code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Create v3 client object
from watson_machine_learning_client import WatsonMachineLearningAPIClient
v3_wml_credentials = {
"apikey":"<your apikey>",
"url": "https://wml-fvt.ml.test.cloud.ibm.com",
"instance_id": "a5a0e69c-cb3d-4b81-b357-3d3f5d8eb600" # v1 instance plan/id where assets are
}
v3_client = WatsonMachineLearningAPIClient(v3_wml_credentials)
print('Using cluster', v3_wml_credentials['url'])
# Create v4ga client object. Note: instance_id not required. instance_id will be picked up from space/project to which the instance is associated with
from ibm_watson_machine_learning import APIClient
v4_wml_credentials = {
"apikey":"<your apikey>",
"url": "https://wml-fvt.ml.test.cloud.ibm.com"
}
v4ga_client = APIClient(v4_wml_credentials)
print('Using cluster', v4_wml_credentials['url'])
v3_client.repository.list_models()
print(v3_client.repository.list_experiments())
# Space should be created before migration apis can be invoked. Assets from the old instance will be cloned
# under this space. In v4ga, space/project is mandatory to be set to access/create assets
space_id = '70f0952e-9947-4592-bd99-54e7890ce836'
# +
# Use help to know more about the api details. Eg: help(client.v4ga_cloud_migration.start), help(client.v4ga_cloud_migration.delete)
meta_props = {
v4ga_client.v4ga_cloud_migration.ConfigurationMetaNames.DESCRIPTION: "Migration of assets from v3 to v4ga",
v4ga_client.v4ga_cloud_migration.ConfigurationMetaNames.OLD_INSTANCE_ID: "a5a0e69c-cb3d-4b81-b357-3d3f5d8eb600",
v4ga_client.v4ga_cloud_migration.ConfigurationMetaNames.SPACE_ID: space_id,
v4ga_client.v4ga_cloud_migration.ConfigurationMetaNames.MODEL_IDS: ["5e56a59d-334a-456e-aa2c-a1558b034ae1"],
v4ga_client.v4ga_cloud_migration.ConfigurationMetaNames.EXPERIMENT_IDS: ["all"]
}
details = v4ga_client.v4ga_cloud_migration.start(meta_props=meta_props)
migration_id = details[u'migration_id']
print(details)
# +
# start is an async process. We need to monitor the status
import time
count = 0
while True:
time.sleep(15)
details = v4ga_client.v4ga_cloud_migration.get_details(migration_id, space_id=space_id)
count = count + 1
if details[u'status'] == 'completed' or count == 10:
break
print("DONE")
# -
print("get_details: ", v4ga_client.v4ga_cloud_migration.get_details(migration_id, space_id=space_id))
v4ga_client.set.default_space(space_id)
print(v4ga_client.repository.get_details('151b9587-7775-45bb-b44e-fc781d388ecd'))
print(v4ga_client.v4ga_cloud_migration.list(space_id=space_id))
v4ga_client.v4ga_cloud_migration.delete(migration_id='bafc6c77-0a4d-4517-82c5-678892cc654a', space_id=space_id)
|
cloud/notebooks/python_sdk/migration/v3_to_v4ga_cloud_assets_migration_sample.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
x=10
y=20
z=x+y
print(z)
x="India is my Country"
x[0]
x[1]
x[-1]
x[0:5]
x=input("Enter Your Name :")
y=int(input("Enter the first Number :"))
z=int(input("Enter the Second Number :"))
d=input("Enter the Operation :")
if d=="Add":
a=y+z
print(a)
if d=="Substract":
a=y-z
print(a)
if d=="Multiply":
a=y*z
print(a)
if d=="Divide":
a=y/z
print(a)
x=int(input("Enter a Number : "))
if x>=0:
if x==0:
print("The Number is zero")
else:
print("The Number is positive")
if x<0:
print("The Number is Negative")
x="We are BBA students"
x[11]
x.upper()
x.lower()
x.partition("BBA")
x.split("students")
x.startswith('W')
x.count("s")
x.index("e")
x.count("b",3,6)
x.isalnum()
x,y,z="AA","BB","CC"
x
y
import math as mt
mt.cos(1)
mt.sin(59)
x=int(input("Enter the First Number :"))
z=input("Enter the Operation :")
if z=="sin":
a=mt.sin(x)
print(a)
if z=="cos":
a=mt.cos(x)
print(a)
if z=="tan":
a=mt.tan(x)
print(a)
# +
x=int(input("Enter The First Number :"))
z=input("Enter the the operation :")
y=int(input("Enter the Second Number :"))
def add(x,y):
z=x+y
return z
def sub (x,y):
z=x-y
return z
def mul(x,y):
z=x*y
return z
def div(x,y):
z=x/y
return z
if z=="+":
K=add(x,y)
if z=="-":
K=sub(x,y)
if z=="*":
K=mul(x,y)
if z=="/":
K=div(x,y)
print(K)
# +
x=int(input("Enter a number"))
def operations(a,b,c,d):
t=(a-b)*c/100+d
return t
if x<250000:
t=operations(0,0,0,0)
if (x>=250000) and (x<500000):
t=operations(x,250000,5,12500)
print(t)
# -
x=[2,4,6,8,10,12,14]
y=[20,30,40,50,60,70,80]
import matplotlib.pyplot as plt
plt.plot(x,y,"o:r",ms=5,mec='black',mfc='yellow')
plt.xlabel('Days',fontdict= {'color':'red','size':20})
plt.ylabel('Sales',fontdict={'color':'red','size':20})
x=[0,1,2,3,4,5]
x[-1]=6
x
x.insert(5,7)
5,7
|
My revision1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 139} colab_type="code" id="uRAytyJbItqO" outputId="ee8e0c8a-484d-4e48-bcc3-dcc5d6093c40"
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score
from sklearn.metrics import precision_recall_curve, average_precision_score
# + colab={} colab_type="code" id="7vcf755WKTn0"
domain_data = pd.read_csv('https://raw.githubusercontent.com/sudo-rushil/dga-intel/master/datasets/domain_data.csv')
model = tf.keras.models.load_model('domain_classifier_model.h5')
char2idx = {'-': 0, '.': 1, '0': 2, '1': 3, '2': 4, '3': 5,
'4': 6, '5': 7, '6': 8, '7': 9, '8': 10, '9': 11,
'_': 12, 'a': 13, 'b': 14, 'c': 15, 'd': 16, 'e': 17,
'f': 18, 'g': 19, 'h': 20, 'i': 21, 'j': 22, 'k': 23,
'l': 24, 'm': 25, 'n': 26, 'o': 27, 'p': 28, 'q': 29,
'r': 30, 's': 31, 't': 32, 'u': 33, 'v': 34, 'w': 35,
'x': 36, 'y': 37, 'z': 38}
# + colab={"base_uri": "https://localhost:8080/", "height": 391} colab_type="code" id="dm4K2F4-KbY7" outputId="0f0d3287-cd9a-4c45-f029-2e3a377e1729"
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="yWsBtBgMK1_w" outputId="09556cda-8c5b-4a6f-9c3f-0345f799f194"
domain_data.head(5)
# + colab={} colab_type="code" id="cgjkQ-cZJwqN"
def load_test(domains):
array = np.zeros((domains.shape[0], 82))
for i, line in enumerate(domains.iloc[:, 0]):
array[i, :len(line)] = np.array([char2idx[c] for c in line])
targets = np.array(domains.iloc[:, 1], dtype=np.int32)
return array, targets
# + colab={} colab_type="code" id="FrC5TVMAKjPp"
inputs, y_test = load_test(domain_data[1600000:1610000])
# + colab={} colab_type="code" id="hitkbm6ELAOF"
output = model(inputs)
# + colab={} colab_type="code" id="YPOmd_gdLk3-"
def read_outputs(output):
out = output.numpy()
n = out.shape[0]
y_pred = np.zeros((n))
for i in range(out.shape[0]):
if out[i, 0] > 0.5:
y_pred[i] = 1
return y_pred
# + colab={} colab_type="code" id="J7DgJ4EyNc6d"
def read_outputs_raw(output):
out = output.numpy()
n = out.shape[0]
y_pred = np.zeros((n))
for i in range(out.shape[0]):
y_pred[i] = out[i, 0]
return y_pred
# + colab={} colab_type="code" id="Rm24CW1aMTrM"
y_pred = read_outputs(output)
y_pred_prob = read_outputs_raw(output)
# + colab={} colab_type="code" id="Qn_WJto2MaPt"
C = np.array(confusion_matrix(y_test, y_pred))
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="YC0WRJ8kMcIj" outputId="dee320ff-7eda-4549-944e-4bed965cc687"
C
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="Pi3pDHmFMwyB" outputId="48d56f29-8e8e-4a84-e421-ce7a16c689c2"
C/C.sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="8opt4-d2MyCv" outputId="378cbd07-a568-4ba3-c35a-7358d136b799"
print(classification_report(y_test, y_pred))
# + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="TGl6bVvbM_d0" outputId="9bd6aba9-0668-4f8e-edd5-8802dcbaf476"
fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob)
# create plot
plt.plot(fpr, tpr, label='ROC curve')
plt.plot([0, 1], [0, 1], 'k--', label='Random guess')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.xlim([-0.02, 1])
plt.ylim([0, 1.02])
plt.legend(loc="lower right")
plt.savefig('roc.svg')
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="fouzfzsoNYx_" outputId="1ec28e9b-0a64-4018-cb9d-ab65f8cc0f8e"
roc_auc_score(y_test, y_pred_prob)
# + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="fuTm6SaWN7so" outputId="bc3ae3c2-d4c9-4db4-9e10-08a3f779486c"
precision, recall, thresholds = precision_recall_curve(y_test, y_pred_prob)
# create plot
plt.plot(precision, recall, label='Precision-recall curve')
plt.xlabel('Precision')
plt.ylabel('Recall')
plt.title('Precision-recall curve')
plt.legend(loc="lower left")
plt.savefig('prc.svg')
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="_HjYBQU8OFJW" outputId="046ff2cd-4f59-4ce1-c941-cf3d5bf5c997"
average_precision_score(y_test, y_pred_prob)
# + colab={} colab_type="code" id="VF8U1LJsOJdp"
|
notebooks/.ipynb_checkpoints/domain_test-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="MClslDBOi199" colab_type="text"
# # Desafios
# + [markdown] id="kwPpG_bi4MG1" colab_type="text"
# ## Desafio 1 da [<NAME>](https://twitter.com/thais_tandre)
#
# Se a pessoa não teve presença, preencha a nota dela com algum número. A nota 0? A nota média? A mediana?
# + id="Xuq52DOWptlr" colab_type="code" outputId="c2a6e3d0-46f9-4535-8476-757927da79b9" executionInfo={"status": "ok", "timestamp": 1587670794954, "user_tz": 180, "elapsed": 21810, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhoED2MwRclSsAx59Pnc82nRT9ubmUmNT2GT_EV=s64", "userId": "02671300746629988104"}} colab={"base_uri": "https://localhost:8080/", "height": 204}
# Dados do dicionário do ENEM
faltou = 0
presente = 1
eliminado = 2
dados_nota = dados.copy()
dados_nota.loc[dados['TP_PRESENCA_CN'].isin([faltou, eliminado]), 'NU_NOTA_CN'] = 0 # dados[dados['TP_PRESENCA_CN'] == presente]['NU_NOTA_CN'].mean()
dados_nota.loc[dados['TP_PRESENCA_CH'].isin([faltou, eliminado]), 'NU_NOTA_CH'] = 0
dados_nota.loc[dados['TP_PRESENCA_LC'].isin([faltou, eliminado]), 'NU_NOTA_LC'] = 0
dados_nota.loc[dados['TP_PRESENCA_MT'].isin([faltou, eliminado]), 'NU_NOTA_MT'] = 0
dados_nota[['NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'NU_NOTA_MT']].head()
# + [markdown] id="Y8Tk3fr_6vDR" colab_type="text"
# ## Desafio 2 do [<NAME>](https://twitter.com/tgcsantos)
#
# A matriz de correlação está feiosa, vamos deixar mais bonita? :) Não se esqueça de manter os valores dentro delas.
# + id="mBYRvQI2tce1" colab_type="code" outputId="cd2fdbdc-4e1e-4424-c93e-90814ee0ca56" executionInfo={"status": "ok", "timestamp": 1587670794957, "user_tz": 180, "elapsed": 21802, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhoED2MwRclSsAx59Pnc82nRT9ubmUmNT2GT_EV=s64", "userId": "02671300746629988104"}} colab={"base_uri": "https://localhost:8080/", "height": 513}
sns.set(font_scale=1.2)
labels = ['Ciências da Natureza', 'Ciências Humanas', 'Linguagens e Códigos', 'Matemática', 'Redação']
plt.figure(figsize=(6, 6))
ax = sns.heatmap(
corr,
annot=True,
cmap="YlGnBu",
cbar=False,
xticklabels=labels,
yticklabels=labels,
# mask=np.triu(np.ones_like(corr, dtype=np.bool))
)
plt.show()
# Reseta as configurações para o padrão
sns.set()
# + [markdown] id="13DD9vRM8u9y" colab_type="text"
# ## Desafio 3 do [<NAME>](https://twitter.com/paulo_caelum)
#
#
# Pairplot dos acertos de cada categoria (CN, CH, MT, LC, nota pura da redação). Usar o gabarito e as respostas
# + id="nAEa-Ryv2SLn" colab_type="code" colab={}
def conta_acertos(aluno, materia):
respostas = aluno.get(f'TX_RESPOSTAS_{materia}')
gabarito = aluno.get(f'TX_GABARITO_{materia}')
if (type(respostas) != str or type(gabarito) != str):
return 0
return sum(a==b for a, b in zip(respostas, gabarito))
# + id="m520zVb1whp-" colab_type="code" colab={}
dados_nota_2 = dados_nota.copy()
# dados_nota_2.dropna(subset=['TX_GABARITO_CN', 'TX_GABARITO_CH', 'TX_GABARITO_LC', 'TX_GABARITO_MT'], inplace=True)
dados_nota_2['NU_RESPOSTAS_CORRETAS_CN'] = dados_nota_2.apply(conta_acertos, materia='CN', axis=1)
dados_nota_2['NU_RESPOSTAS_CORRETAS_CH'] = dados_nota_2.apply(conta_acertos, materia='CH', axis=1)
dados_nota_2['NU_RESPOSTAS_CORRETAS_LC'] = dados_nota_2.apply(conta_acertos, materia='LC', axis=1)
dados_nota_2['NU_RESPOSTAS_CORRETAS_MT'] = dados_nota_2.apply(conta_acertos, materia='MT', axis=1)
# + id="QC3h5iflHqzL" colab_type="code" outputId="fa628add-15da-4b9b-c5ad-4ea7ac48169a" executionInfo={"status": "ok", "timestamp": 1587670833033, "user_tz": 180, "elapsed": 59848, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhoED2MwRclSsAx59Pnc82nRT9ubmUmNT2GT_EV=s64", "userId": "02671300746629988104"}} colab={"base_uri": "https://localhost:8080/", "height": 942}
num_questoes_acertadas = dados_nota_2[['NU_RESPOSTAS_CORRETAS_CN', 'NU_RESPOSTAS_CORRETAS_CH', 'NU_RESPOSTAS_CORRETAS_LC', 'NU_RESPOSTAS_CORRETAS_MT', 'NU_NOTA_REDACAO']]
num_questoes_acertadas.columns = ['Ciências da Natureza', 'Ciências Humanas', 'Linguagens e Códigos', 'Matemática', 'Redação']
sns.set()
sns.pairplot(num_questoes_acertadas)
# + [markdown] id="WHrQGdesBDJp" colab_type="text"
# ## Desafio 4 do [<NAME>](https://twitter.com/guilhermecaelum)
#
# Remover todos os zeros. Tomar o cuidado que no desafio 1 já tomamos decisões ligadas a limpeza dos dados também. Você também pode exportar para outro CSV se quiser.
# + id="vsSKA4uIIsrr" colab_type="code" outputId="4ff799d6-00f1-4fad-d567-68769ad2b5ab" executionInfo={"status": "ok", "timestamp": 1587670833036, "user_tz": 180, "elapsed": 59839, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhoED2MwRclSsAx59Pnc82nRT9ubmUmNT2GT_EV=s64", "userId": "02671300746629988104"}} colab={"base_uri": "https://localhost:8080/", "height": 204}
# Seguindo a dica da Thais
import numpy as np
dados_nota_sem_0 = dados_nota_2.copy()
# dados_nota_sem_0['NU_RESPOSTAS_CORRETAS_CN'] = dados_nota_2['NU_RESPOSTAS_CORRETAS_CN'].replace(0, None)
# dados_nota_sem_0['NU_RESPOSTAS_CORRETAS_CH'] = dados_nota_2['NU_RESPOSTAS_CORRETAS_CH'].replace(0, None)
# dados_nota_sem_0['NU_RESPOSTAS_CORRETAS_LC'] = dados_nota_2['NU_RESPOSTAS_CORRETAS_LC'].replace(0, None)
# dados_nota_sem_0['NU_RESPOSTAS_CORRETAS_MT'] = dados_nota_2['NU_RESPOSTAS_CORRETAS_MT'].replace(0, None)
dados_nota_sem_0['NU_NOTA_CN'] = dados_nota_2['NU_NOTA_CN'].replace(0., np.NAN)
dados_nota_sem_0['NU_NOTA_CH'] = dados_nota_2['NU_NOTA_CH'].replace(0., np.NAN)
dados_nota_sem_0['NU_NOTA_LC'] = dados_nota_2['NU_NOTA_LC'].replace(0., np.NAN)
dados_nota_sem_0['NU_NOTA_MT'] = dados_nota_2['NU_NOTA_MT'].replace(0., np.NAN)
dados_nota_sem_0.dropna(subset=['NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'NU_NOTA_MT'], inplace=True)
dados_nota_sem_0[['NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'NU_NOTA_MT']].head()
# + [markdown] id="w5nybh8yBOT3" colab_type="text"
# ## Desafio 5 do [<NAME>](https://twitter.com/tgcsantos)
#
# Quais questões tiveram mais erros (análise sobre o gabarito x acertos x erros)
# + id="lDaxavnJaz_y" colab_type="code" colab={}
def corrige_questoes(aluno, materia):
respostas = aluno.get(f'TX_RESPOSTAS_{materia}')
gabarito = aluno.get(f'TX_GABARITO_{materia}')
return pd.Series([int(a==b) for a, b in zip(respostas, gabarito)])
# + id="_VdCUIwcbbYs" colab_type="code" outputId="f0e460f9-d593-4d56-fc5e-576c489dfc6c" executionInfo={"status": "ok", "timestamp": 1587670842132, "user_tz": 180, "elapsed": 68917, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhoED2MwRclSsAx59Pnc82nRT9ubmUmNT2GT_EV=s64", "userId": "02671300746629988104"}} colab={"base_uri": "https://localhost:8080/", "height": 419}
# Analisando a prova azul de Ciências da Natureza
prova_cn_azul = dados.query('CO_PROVA_CN == 447')
matriz_acertos = prova_cn_azul.apply(corrige_questoes, materia='CN', axis=1)
matriz_acertos
# + id="7w1jU3ddXPCc" colab_type="code" outputId="0a8f4509-d11d-4fdf-d386-cb3f494cc548" executionInfo={"status": "ok", "timestamp": 1587670842971, "user_tz": 180, "elapsed": 69741, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhoED2MwRclSsAx59Pnc82nRT9ubmUmNT2GT_EV=s64", "userId": "02671300746629988104"}} colab={"base_uri": "https://localhost:8080/", "height": 521}
total_acertos_por_questao = pd.DataFrame(matriz_acertos.sum())
total_acertos_por_questao.reset_index(inplace=True)
total_acertos_por_questao.columns = ['Questão', 'Número de Acertos']
total_acertos_por_questao['Questão'] = total_acertos_por_questao['Questão'] + 1
plt.figure(figsize=(18, 8))
ax = sns.barplot(x='Questão', y='Número de Acertos', data=total_acertos_por_questao)
ax.set_title('Número de acertos por questão da prova azul de Ciências da Natureza', fontsize=16)
plt.show()
# + [markdown] id="prjz3IZIB_Xa" colab_type="text"
# ## Desafio 6 do [<NAME>](https://twitter.com/allanspadini)
#
# Estudar o que as pessoas que estudam o assunto estão discutindo e conclusões que já chegaram sobre a utilização de informações (principalmente sensíveis) para machine learning e data science. Podcast do datahackers também sobre o assunto.
# + [markdown] id="yN729smrZJUO" colab_type="text"
# #Não esqueça de compartilhar a solução dos seus desafios com nossos instrutores, seja no Twitter, seja LinkedIn. Boa sorte!
|
aula-04/Copy of Desafio aula04.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Science, Tirgul 1
# ## Smaller
# ### Great python numpy tutorials in Jupyter lab:
# [Link to Jupyter Lab Tutorials](https://www.youtube.com/watch?v=7wfPqAyYADY&list=PLcD0MjpSMpGRoZgeb509_yICnabm4PYqc)
# # Hebrew tutorial for Jupyter Notebook (the old version before Jupyter Lab)
# [Link](https://www.youtube.com/watch?v=g9EglM7Ymns)
# # Installation instructions:
# # https://www.youtube.com/watch?v=uOwCiZKj2rg
#
# # Data sciene income:
#
# + [Nisha](https://www.nisha.co.il/%D7%98%D7%91%D7%9C%D7%90%D7%95%D7%AA-%D7%A9%D7%9B%D7%A8)
# + [All Jobs](https://www.alljobs.co.il/User/SalaryCompound/position/?JobCategoryID=1733)
# # Same Python commands work here too..
print("Hello Jupyter Lab")
# # Jupiter Lab keyboard shortcuts
# ### https://gist.github.com/discdiver/9e00618756d120a8c9fa344ac1c375ac
#
# #### Keyboard shotrctus greatly increase productivity, here are some commons:
# + note there are modes: Command mode (outside the cells) and edit mode (inside a cell)
# + Edit mode -> command mode: Esc
# + Command mode -> edit mode: Enter
# + Toggle hide/show left sidebar: Ctrl+B
# + toggle comment: Ctrl + /
# + Indent: Ctrl + ]
# + Dedent: Ctrl + [
# # Jupiter lab has a markdown \ code option mode.
# # (Header 1, title)
# ## (Header 2, major headings)
# ### (Header 3, subheadings)
# #### (Header 4)
# ##### (Header 5)
# ###### (Header 6)
# ## Markdown Cheat-sheet
# [Link](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)
# ## Using LaTex equations in Jupyter
# $ x^2 + y^2 = z^2 $
# +
# cmd view point denoted by ! sign
# !pip install pandas
# Needs to be done only once
# +
# Read file using pandas
# Pandas is a fast, powerful, flexible and easy to use open-source data analysis in python
import pandas as pd
data = pd.read_csv('titanic_short.csv')
data
# -
# ## Self excercise: Suggest how to display only survivors (denoted in 1)
max_age = data.age.max()
min_age = data.age.min()
max_age
max_age_srs = (data.age >30) & (data.age <50)
max_age_srs
data.dropna(axis=0)
# ## Self excercise: Suggest how to display only females gender
data[data['sex']=='female']
# ## Self excercise: Suggest how to display only females survivors
d = data[(data['survived'] == 1) & (data['sex']=='female')]
d[['pclass','age']]
# ### Display some of the collumns
# Emphasis, aka italics, with *asterisks* or _underscores_.
#
# Strong emphasis, aka bold, with **asterisks** or __underscores__.
#
# Combined emphasis with **asterisks and _underscores_**.
#
# Strikethrough uses two tildes. ~~Scratch this.~~
data[ [ 'pclass','age'] ]
# !pip install numpy
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
# . product
print(arr*arr)
#### vector multiplication
print("arr.dot(arr) =\n",arr.dot(arr))
# or by
print("arr@arr =\n",arr@arr)
|
tirgul1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (py39)
# language: python
# name: py39
# ---
x = 42
type(x)
print(x)
pi = 3.14159
pi
type(pi)
1+2+3+4+5
2*3.14159
2**10
int_2=2
type(int_2)
int_2/int_2
type(int_2/int_2)
101/2
101//2
int(101/2)
100%2
101%2
100.5%2
x = None
print(x)
type(x)
my_name="<NAME>"
my_name
type(my_name)
course = 'DSCI 511'
course
type(course)
sentence="It's a rainy day."
sentence
type(sentence)
quote='Donald Knuth: "Premature optimization is the root of all evil."'
quote
type(quote)
the_truth=True
the_truth
type(the_truth)
lies=False
lies
type(lies)
2<3
"Deep learning" == "Solve all the world's problems"
2 != "2"
2 is 2
2 == 2.0
True and True
True and False
True or False
False or False
("Python 2" != "Python 3") and (2 <= 3)
True
not True
not not True
x=5.0
type(x)
x=int(5.0)
x
type(x)
x=str(5.0)
x
type(x)
str(5.0)==5.0
int(5.3)
float("hello")
my_list=[1,2,"THREE",4,0.5]
my_list
type(my_list)
another_list=[1,"two",[3,4,"five"],True, None,{"key":"value"}]
another_list
len(my_list)
len(another_list)
today=(1,2,"THREE",4,0.5)
today
type(today)
len(today)
my_list
my_list[0]
my_list[2]
len(my_list)
my_list[5]
my_list[-1]
my_list[-2]
my_list[1:3]
my_list[1:4]
alphabet="abcdefghijklmnopqrstuvwxyz"
alphabet[0]
alphabet[-1]
alphabet[-3]
alphabet[:5]
alphabet[12:20]
primes=[2,3,5,7,11]
primes
len(primes)
primes.append(13)
primes
s={2,3,5,11}
s
{1,2,3} == {3,2,1}
[1,2,3] == [3,2,1]
s.add(2)
s
s[0]
names_list=["Indiana", "Fang", "Linsey"]
names_list
names_list[0] = "Cool guy"
names_list
names_tuple=("Indiana", "Fang", "Linsey")
names_tuple
names_tuple[0]="Not cool guy"
my_name="Karyn"
my_name[-1] = "q"
x=([1,2,3],5)
x[1]=7
x
x[0][1]=4
x
all_caps="HOW ARE YOU TODAY?"
all_caps
new_str=all_caps.lower()
new_str
all_caps
all_caps.split()
all_caps.count("O")
caps_list=list(all_caps)
caps_list
"".join(caps_list)
"-".join(caps_list)
"".join(caps_list).lower().split(" ")
name="<NAME>"
age=4/12
day=10
month=6
year=2020
template_new=f"Hello,my name is {name}. I am {age:.2f} years old. I was born {day}/{month:02}/{year}."
template_new
# +
house={
"bedrooms":3,
"bathrooms":2,
"city": "Vancouver",
"price": 2499999,
"date_sold":(1,3,2015),
}
condo={
"bedrooms":2,
"bathrooms": 1,
"city": "Burnaby",
"price": 699999,
"date_sold": (27,8,2011),
}
# -
house["price"]
condo["city"]
condo["price"]=5
condo
condo["flooring"]="wood"
condo
del condo["city"]
condo
condo[5]=443345
condo
condo[(1,2,3)]=777
condo
condo["not-here"]
lst=list()
lst
lst=[]
lst
tup=tuple()
tup
tup=()
tup
dic=dict()
dic
dic={}
dic
st=set()
st
# +
name="Tom"
if name.lower() == "tom":
print("That's my name too!")
elif name.lower() == "santa":
print("That's a funny name.")
else:
print(f"Hello {name}! That's a cool name!")
print("Nice to meet you!")
# +
name = "<NAME>"
if name.lower() == "tom":
print("That's my name too!")
elif name.lower() == "santa":
print("That's a funny name.")
else:
print(f"Hello {name}! That's a cool name.")
if name.lower().startswith("super"):
print("Do you really have superpowers?")
print("Nice to meet you!")
# -
words=["the","list","of","words"]
x="long list" if len(words) >10 else "short list"
x
if len(words) >10:
x="long list"
else:
x="short list"
x
# +
x=1
if x:
print("I'm truthy!")
else:
print("I'm falsey!")
# +
x=False
if x:
print("I'm truthy!")
else:
print("I'm falsey")
# -
x=[]
if x:
print("I'm truthy!")
else:
print("I'm falsey!")
fake_variable
True or fake_variable
True and fake_variable
False and fake_variable
|
notebooks/plotExamples/Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib
matplotlib.use("nbagg")
import math
import matplotlib.animation as anm
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
# -
class World:
def __init__(self, debug=False):
self.objects = []
self.debug = debug
def append(self, obj): # オブジェクトを登録するための関数
self.objects.append(obj)
def draw(self):
fig = plt.figure(figsize=(4, 4)) # 8x8 inchの図を準備
ax = fig.add_subplot(111) # サブプロットを準備
ax.set_aspect("equal") # 縦横比を座標の値と一致させる
ax.set_xlim(-5, 5) # X軸を-5m x 5mの範囲で描画
ax.set_ylim(-5, 5) # Y軸も同様に
ax.set_xlabel("X", fontsize=10) # X軸にラベルを表示
ax.set_ylabel("Y", fontsize=10) # 同じくY軸に
elems = []
if self.debug:
for i in range(1000):
self.one_step(i, elems, ax)
else:
self.ani = anm.FuncAnimation(
fig,
self.one_step,
fargs=(elems, ax),
frames=100,
interval=1000,
repeat=False,
)
plt.show()
def one_step(self, i, elems, ax): ### fig:one_step_add_one_step
while elems:
elems.pop().remove()
elems.append(ax.text(-4.4, 4.5, "t = " + str(i), fontsize=10))
for obj in self.objects:
obj.draw(ax, elems)
if hasattr(obj, "one_step"):
obj.one_step(1.0) # 追加
class IdealRobot: ### fig:rewrite_init_for_agent
def __init__(self, pose, agent=None, color="black"): # agentという引数を追加
self.pose = pose
self.r = 0.2
self.color = color
self.agent = agent # 追加
self.poses = [pose] # 軌跡の描画用。追加
def draw(self, ax, elems): ###idealrobot6draw
x, y, theta = self.pose # ここから15行目までは変えなくて良い
xn = x + self.r * math.cos(theta)
yn = y + self.r * math.sin(theta)
elems += ax.plot([x, xn], [y, yn], color=self.color)
c = patches.Circle(xy=(x, y), radius=self.r, fill=False, color=self.color)
elems.append(ax.add_patch(c))
self.poses.append(self.pose) # 以下追加。軌跡の描画
elems += ax.plot(
[e[0] for e in self.poses],
[e[1] for e in self.poses],
linewidth=0.5,
color="black",
)
@classmethod
def state_transition(cls, nu, omega, time, pose): ### fig:state_transition(20-35行目)
t0 = pose[2]
if math.fabs(omega) < 1e-10: # 角速度がほぼゼロの場合とそうでない場合で場合分け
return pose + np.array([nu * math.cos(t0), nu * math.sin(t0), omega]) * time
else:
return pose + np.array(
[
nu / omega * (math.sin(t0 + omega * time) - math.sin(t0)),
nu / omega * (-math.cos(t0 + omega * time) + math.cos(t0)),
omega * time,
]
)
def one_step(self, time_interval): ### fig:robot_one_step
if not self.agent:
return
nu, omega = self.agent.decision()
self.pose = self.state_transition(nu, omega, time_interval, self.pose)
class Agent: ### fig:Agent
def __init__(self, nu, omega):
self.nu = nu
self.omega = omega
def decision(self, observation=None):
return self.nu, self.omega
world = World() ### fig:rewrite_robot_for_agent
straight = Agent(0.2, 0.0) # 0.2[m/s]で直進
circling = Agent(0.2, 10.0 / 180 * math.pi) # 0.2[m/s], 10[deg/s](円を描く)
robot1 = IdealRobot(np.array([2, 3, math.pi / 6]).T, straight)
robot2 = IdealRobot(np.array([-2, -1, math.pi / 5 * 6]).T, circling, "red")
robot3 = IdealRobot(np.array([0, 0, 0]).T, color="blue") # エージェントを与えないロボット
world.append(robot1)
world.append(robot2)
world.append(robot3)
world.draw()
## 原点から0.1[m/s]で1[s]直進 ## ### fig:using_state_transition(セル6まで)
IdealRobot.state_transition(0.1, 0.0, 1.0, np.array([0, 0, 0]).T)
## 原点から0.1[m/s], 10[deg/s]で9[s]移動 ##
IdealRobot.state_transition(0.1, 10.0 / 180 * math.pi, 9.0, np.array([0, 0, 0]).T)
## 原点から0.1[m/s], 10[deg/s]で18[s]移動 ##
IdealRobot.state_transition(0.1, 10.0 / 180 * math.pi, 18.0, np.array([0, 0, 0]).T)
|
section_robot/ideal_robot5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Observations and Insights
#
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
# -
# Checking the number of mice in the DataFrame.
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
# Optional: Get all the data for the duplicate mouse ID.
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
# Checking the number of mice in the clean DataFrame.
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# This method is the most straighforward, creating multiple series and putting them all together at the end.
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# -
# ## Bar Plots
# +
# Generate a bar plot showing the number of mice per time point for each treatment throughout the course of the study using pandas.
# +
# Generate a bar plot showing the number of mice per time point for each treatment throughout the course of the study using pyplot.
# -
# ## Pie Plots
# +
# Generate a pie plot showing the distribution of female versus male mice using pandas
# +
# Generate a pie plot showing the distribution of female versus male mice using pyplot
# -
# ## Quartiles, Outliers and Boxplots
# Calculate the final tumor volume of each mouse across four of the most promising treatment regimens. Calculate the IQR and quantitatively determine if there are any potential outliers.
# +
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
# -
# ## Line and Scatter Plots
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
# +
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
# -
# ## Correlation and Regression
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
|
Pymaceuticals/pymaceuticals_starter.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:eu-west-1:470317259841:image/datascience-1.0
# ---
# # Targeting Direct Marketing with Amazon SageMaker XGBoost
# _**Supervised Learning with Gradient Boosted Trees: A Binary Prediction Problem With Unbalanced Classes**_
#
#
# ## Background
# Direct marketing, either through mail, email, phone, etc., is a common tactic to acquire customers. Because resources and a customer's attention is limited, the goal is to only target the subset of prospects who are likely to engage with a specific offer. Predicting those potential customers based on readily available information like demographics, past interactions, and environmental factors is a common machine learning problem.
#
# This notebook presents an example problem to predict if a customer will enroll for a term deposit at a bank, after one or more phone calls. The steps include:
#
# * Preparing your Amazon SageMaker notebook
# * Downloading data from the internet into Amazon SageMaker
# * Investigating and transforming the data so that it can be fed to Amazon SageMaker algorithms
# * Estimating a model using the Gradient Boosting algorithm
# * Evaluating the effectiveness of the model
# * Setting the model up to make on-going predictions
#
# ---
#
# ## Preparation
#
# _This notebook was created and tested on an ml.m4.xlarge notebook instance._
#
# Let's start by specifying:
#
# - The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting.
# - The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regexp with a the appropriate full IAM role arn string(s).
# + isConfigCell=true tags=["parameters"]
# cell 01
# Define IAM role
import boto3
import sagemaker
import re
from sagemaker import get_execution_role
region = boto3.Session().region_name
session = sagemaker.Session()
bucket = session.default_bucket()
prefix = 'sagemaker/DEMO-xgboost-dm'
role = get_execution_role()
# -
# Now let's bring in the Python libraries that we'll use throughout the analysis
# cell 02
import numpy as np # For matrix operations and numerical processing
import pandas as pd # For munging tabular data
import matplotlib.pyplot as plt # For charts and visualizations
from IPython.display import Image # For displaying images in the notebook
from IPython.display import display # For displaying outputs in the notebook
from time import gmtime, strftime # For labeling SageMaker models, endpoints, etc.
import sys # For writing outputs to notebook
import math # For ceiling function
import json # For parsing hosting outputs
import os # For manipulating filepath names
import sagemaker # Amazon SageMaker's Python SDK provides many helper functions
from sagemaker.predictor import csv_serializer # Converts strings for HTTP POST requests on inference
# ! python -m pip install smdebug
# ---
#
# ## Data
# Let's start by downloading the [direct marketing dataset](https://sagemaker-sample-data-us-west-2.s3-us-west-2.amazonaws.com/autopilot/direct_marketing/bank-additional.zip) from the sample data s3 bucket.
#
# \[Moro et al., 2014\] <NAME>, <NAME> and <NAME>. A Data-Driven Approach to Predict the Success of Bank Telemarketing. Decision Support Systems, Elsevier, 62:22-31, June 2014
#
# cell 03
# !wget https://sagemaker-sample-data-us-west-2.s3-us-west-2.amazonaws.com/autopilot/direct_marketing/bank-additional.zip
# !conda install -y -c conda-forge unzip
# !unzip -o bank-additional.zip
# Now lets read this into a Pandas data frame and take a look.
# cell 04
data = pd.read_csv('./bank-additional/bank-additional-full.csv')
pd.set_option('display.max_rows',10)
data
# Let's talk about the data. At a high level, we can see:
#
# * We have a little over 40K customer records, and 20 features for each customer
# * The features are mixed; some numeric, some categorical
# * The data appears to be sorted, at least by `time` and `contact`, maybe more
#
# _**Specifics on each of the features:**_
#
# *Demographics:*
# * `age`: Customer's age (numeric)
# * `job`: Type of job (categorical: 'admin.', 'services', ...)
# * `marital`: Marital status (categorical: 'married', 'single', ...)
# * `education`: Level of education (categorical: 'basic.4y', 'high.school', ...)
#
# *Past customer events:*
# * `default`: Has credit in default? (categorical: 'no', 'unknown', ...)
# * `housing`: Has housing loan? (categorical: 'no', 'yes', ...)
# * `loan`: Has personal loan? (categorical: 'no', 'yes', ...)
#
# *Past direct marketing contacts:*
# * `contact`: Contact communication type (categorical: 'cellular', 'telephone', ...)
# * `month`: Last contact month of year (categorical: 'may', 'nov', ...)
# * `day_of_week`: Last contact day of the week (categorical: 'mon', 'fri', ...)
# * `duration`: Last contact duration, in seconds (numeric). Important note: If duration = 0 then `y` = 'no'.
#
# *Campaign information:*
# * `campaign`: Number of contacts performed during this campaign and for this client (numeric, includes last contact)
# * `pdays`: Number of days that passed by after the client was last contacted from a previous campaign (numeric)
# * `previous`: Number of contacts performed before this campaign and for this client (numeric)
# * `poutcome`: Outcome of the previous marketing campaign (categorical: 'nonexistent','success', ...)
#
# *External environment factors:*
# * `emp.var.rate`: Employment variation rate - quarterly indicator (numeric)
# * `cons.price.idx`: Consumer price index - monthly indicator (numeric)
# * `cons.conf.idx`: Consumer confidence index - monthly indicator (numeric)
# * `euribor3m`: Euribor 3 month rate - daily indicator (numeric)
# * `nr.employed`: Number of employees - quarterly indicator (numeric)
#
# *Target variable:*
# * `y`: Has the client subscribed a term deposit? (binary: 'yes','no')
# ### Transformation
#
# Cleaning up data is part of nearly every machine learning project. It arguably presents the biggest risk if done incorrectly and is one of the more subjective aspects in the process. Several common techniques include:
#
# * Handling missing values: Some machine learning algorithms are capable of handling missing values, but most would rather not. Options include:
# * Removing observations with missing values: This works well if only a very small fraction of observations have incomplete information.
# * Removing features with missing values: This works well if there are a small number of features which have a large number of missing values.
# * Imputing missing values: Entire [books](https://www.amazon.com/Flexible-Imputation-Missing-Interdisciplinary-Statistics/dp/1439868247) have been written on this topic, but common choices are replacing the missing value with the mode or mean of that column's non-missing values.
# * Converting categorical to numeric: The most common method is one hot encoding, which for each feature maps every distinct value of that column to its own feature which takes a value of 1 when the categorical feature is equal to that value, and 0 otherwise.
# * Oddly distributed data: Although for non-linear models like Gradient Boosted Trees, this has very limited implications, parametric models like regression can produce wildly inaccurate estimates when fed highly skewed data. In some cases, simply taking the natural log of the features is sufficient to produce more normally distributed data. In others, bucketing values into discrete ranges is helpful. These buckets can then be treated as categorical variables and included in the model when one hot encoded.
# * Handling more complicated data types: Mainpulating images, text, or data at varying grains is left for other notebook templates.
#
# Luckily, some of these aspects have already been handled for us, and the algorithm we are showcasing tends to do well at handling sparse or oddly distributed data. Therefore, let's keep pre-processing simple.
# cell 05
data['no_previous_contact'] = np.where(data['pdays'] == 999, 1, 0) # Indicator variable to capture when pdays takes a value of 999
data['not_working'] = np.where(np.in1d(data['job'], ['student', 'retired', 'unemployed']), 1, 0) # Indicator for individuals not actively employed
model_data = pd.get_dummies(data) # Convert categorical variables to sets of indicators
# Another question to ask yourself before building a model is whether certain features will add value in your final use case. For example, if your goal is to deliver the best prediction, then will you have access to that data at the moment of prediction? Knowing it's raining is highly predictive for umbrella sales, but forecasting weather far enough out to plan inventory on umbrellas is probably just as difficult as forecasting umbrella sales without knowledge of the weather. So, including this in your model may give you a false sense of precision.
#
# Following this logic, let's remove the economic features and `duration` from our data as they would need to be forecasted with high precision to use as inputs in future predictions.
#
# Even if we were to use values of the economic indicators from the previous quarter, this value is likely not as relevant for prospects contacted early in the next quarter as those contacted later on.
# cell 06
model_data = model_data.drop(['duration', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed'], axis=1)
# When building a model whose primary goal is to predict a target value on new data, it is important to understand overfitting. Supervised learning models are designed to minimize error between their predictions of the target value and actuals, in the data they are given. This last part is key, as frequently in their quest for greater accuracy, machine learning models bias themselves toward picking up on minor idiosyncrasies within the data they are shown. These idiosyncrasies then don't repeat themselves in subsequent data, meaning those predictions can actually be made less accurate, at the expense of more accurate predictions in the training phase.
#
# The most common way of preventing this is to build models with the concept that a model shouldn't only be judged on its fit to the data it was trained on, but also on "new" data. There are several different ways of operationalizing this, holdout validation, cross-validation, leave-one-out validation, etc. For our purposes, we'll simply randomly split the data into 3 uneven groups. The model will be trained on 70% of data, it will then be evaluated on 20% of data to give us an estimate of the accuracy we hope to have on "new" data, and 10% will be held back as a final testing dataset which will be used later on.
# cell 07
train_data, validation_data, test_data = np.split(model_data.sample(frac=1, random_state=1729), [int(0.7 * len(model_data)), int(0.9 * len(model_data))]) # Randomly sort the data then split out first 70%, second 20%, and last 10%
# Amazon SageMaker's XGBoost container expects data in the libSVM or CSV data format. For this example, we'll stick to CSV. Note that the first column must be the target variable and the CSV should not include headers. Also, notice that although repetitive it's easiest to do this after the train|validation|test split rather than before. This avoids any misalignment issues due to random reordering.
# cell 08
pd.concat([train_data['y_yes'], train_data.drop(['y_no', 'y_yes'], axis=1)], axis=1).to_csv('train.csv', index=False, header=False)
pd.concat([validation_data['y_yes'], validation_data.drop(['y_no', 'y_yes'], axis=1)], axis=1).to_csv('validation.csv', index=False, header=False)
# Now we'll copy the file to S3 for Amazon SageMaker's managed training to pickup.
# cell 09
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train/train.csv')).upload_file('train.csv')
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'validation/validation.csv')).upload_file('validation.csv')
# ---
#
# ## Training
# Now we know that most of our features have skewed distributions, some are highly correlated with one another, and some appear to have non-linear relationships with our target variable. Also, for targeting future prospects, good predictive accuracy is preferred to being able to explain why that prospect was targeted. Taken together, these aspects make gradient boosted trees a good candidate algorithm.
#
# There are several intricacies to understanding the algorithm, but at a high level, gradient boosted trees works by combining predictions from many simple models, each of which tries to address the weaknesses of the previous models. By doing this the collection of simple models can actually outperform large, complex models. Other Amazon SageMaker notebooks elaborate on gradient boosting trees further and how they differ from similar algorithms.
#
# `xgboost` is an extremely popular, open-source package for gradient boosted trees. It is computationally powerful, fully featured, and has been successfully used in many machine learning competitions. Let's start with a simple `xgboost` model, trained using Amazon SageMaker's managed, distributed training framework.
#
# First we'll need to specify the ECR container location for Amazon SageMaker's implementation of XGBoost.
# cell 10
from sagemaker.amazon.amazon_estimator import get_image_uri
container = sagemaker.image_uris.retrieve(region=boto3.Session().region_name, framework='xgboost', version='1.0-1')
# Then, because we're training with the CSV file format, we'll create `s3_input`s that our training function can use as a pointer to the files in S3, which also specify that the content type is CSV.
# cell 11
s3_input_train = sagemaker.TrainingInput(s3_data='s3://{}/{}/train'.format(bucket, prefix), content_type='csv')
s3_input_validation = sagemaker.TrainingInput(s3_data='s3://{}/{}/validation/'.format(bucket, prefix), content_type='csv')
# cell 12
base_job_name = "demo-smdebug-xgboost-regression"
bucket_path='s3://{}/{}/output'.format(bucket, prefix)
# ### Enabling Debugger in Estimator object
#
#
# #### DebuggerHookConfig
#
# Enabling Amazon SageMaker Debugger in training job can be accomplished by adding its configuration into Estimator object constructor:
#
# ```python
# from sagemaker.debugger import DebuggerHookConfig, CollectionConfig
#
# estimator = Estimator(
# ...,
# debugger_hook_config = DebuggerHookConfig(
# s3_output_path="s3://{bucket_name}/{location_in_bucket}", # Required
# collection_configs=[
# CollectionConfig(
# name="metrics",
# parameters={
# "save_interval": "10"
# }
# )
# ]
# )
# )
# ```
# Here, the `DebuggerHookConfig` object instructs `Estimator` what data we are interested in.
# Two parameters are provided in the example:
#
# - `s3_output_path`: it points to S3 bucket/path where we intend to store our debugging tensors.
# Amount of data saved depends on multiple factors, major ones are: training job / data set / model / frequency of saving tensors.
# This bucket should be in your AWS account, and you should have full access control over it.
# **Important Note**: this s3 bucket should be originally created in the same region where your training job will be running, otherwise you might run into problems with cross region access.
#
# - `collection_configs`: it enumerates named collections of tensors we want to save.
# Collections are a convinient way to organize relevant tensors under same umbrella to make it easy to navigate them during analysis.
# In this particular example, you are instructing Amazon SageMaker Debugger that you are interested in a single collection named `metrics`.
# We also instructed Amazon SageMaker Debugger to save metrics every 10 iteration.
# See [Collection](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/api.md#collection) documentation for all parameters that are supported by Collections and DebuggerConfig documentation for more details about all parameters DebuggerConfig supports.
#
# #### Rules
#
# Enabling Rules in training job can be accomplished by adding the `rules` configuration into Estimator object constructor.
#
# - `rules`: This new parameter will accept a list of rules you wish to evaluate against the tensors output by this training job.
# For rules, Amazon SageMaker Debugger supports two types:
# - SageMaker Rules: These are rules specially curated by the data science and engineering teams in Amazon SageMaker which you can opt to evaluate against your training job.
# - Custom Rules: You can optionally choose to write your own rule as a Python source file and have it evaluated against your training job.
# To provide Amazon SageMaker Debugger to evaluate this rule, you would have to provide the S3 location of the rule source and the evaluator image.
#
# In this example, you will use a Amazon SageMaker's LossNotDecreasing rule, which helps you identify if you are running into a situation where the training loss is not going down.
#
# ```python
# from sagemaker.debugger import rule_configs, Rule
#
# estimator = Estimator(
# ...,
# rules=[
# Rule.sagemaker(
# rule_configs.loss_not_decreasing(),
# rule_parameters={
# "collection_names": "metrics",
# "num_steps": "10",
# },
# ),
# ],
# )
# ```
#
# - `rule_parameters`: In this parameter, you provide the runtime values of the parameter in your constructor.
# You can still choose to pass in other values which may be necessary for your rule to be evaluated.
# In this example, you will use Amazon SageMaker's LossNotDecreasing rule to monitor the `metrics` collection.
# The rule will alert you if the tensors in `metrics` has not decreased for more than 10 steps.
# First we'll need to specify training parameters to the estimator. This includes:
# 1. The `xgboost` algorithm container
# 1. The IAM role to use
# 1. Training instance type and count
# 1. S3 location for output data
# 1. Algorithm hyperparameters
#
# And then a `.fit()` function which specifies:
# 1. S3 location for output data. In this case we have both a training and validation set which are passed in.
# +
# cell 13
from sagemaker.debugger import rule_configs, Rule, DebuggerHookConfig, CollectionConfig
from sagemaker.estimator import Estimator
sess = sagemaker.Session()
save_interval = 5
xgboost_estimator = Estimator(
role=role,
base_job_name=base_job_name,
instance_count=1,
instance_type='ml.m5.4xlarge',
image_uri=container,
max_run=1800,
sagemaker_session=sess,
debugger_hook_config=DebuggerHookConfig(
s3_output_path=bucket_path, # Required
collection_configs=[
CollectionConfig(
name="metrics",
parameters={
"save_interval": str(save_interval)
}
),
CollectionConfig(
name="predictions",
parameters={
"save_interval": str(save_interval)
}
),
CollectionConfig(
name="feature_importance",
parameters={
"save_interval": str(save_interval)
}
),
CollectionConfig(
name="average_shap",
parameters={
"save_interval": str(save_interval)
}
)
],
)
)
# -
# cell 14
xgboost_estimator.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
silent=0,
objective='binary:logistic',
num_round=100)
# +
# cell 15
xgboost_estimator.fit(
{"train": s3_input_train, "validation": s3_input_validation},
# This is a fire and forget event. By setting wait=False, you submit the job to run in the background.
# Amazon SageMaker starts one training job and release control to next cells in the notebook.
# Follow this notebook to see status of the training job.
wait=False
)
# -
# ### Result
#
# As a result of the above command, Amazon SageMaker starts one training job and one rule job for you. The first one is the job that produces the tensors to be analyzed. The second one analyzes the tensors to check if `train-rmse` and `validation-rmse` are not decreasing at any point during training.
#
# Check the status of the training job below.
# After your training job is started, Amazon SageMaker starts a rule-execution job to run the LossNotDecreasing rule.
#
# **Note that the next cell blocks until the rule execution job ends. You can stop it at any point to proceed to the rest of the notebook. Once it says Rule Evaluation Status is Started, and shows the `RuleEvaluationJobArn`, you can look at the status of the rule being monitored.**
# +
# cell 16
import time
from time import gmtime, strftime
# Below command will give the status of training job
job_name = xgboost_estimator.latest_training_job.name
client = xgboost_estimator.sagemaker_session.sagemaker_client
description = client.describe_training_job(TrainingJobName=job_name)
print('Training job name: ' + job_name)
print(description['TrainingJobStatus'])
if description['TrainingJobStatus'] != 'Completed':
while description['SecondaryStatus'] not in ['Training', 'Completed']:
description = client.describe_training_job(TrainingJobName=job_name)
primary_status = description['TrainingJobStatus']
secondary_status = description['SecondaryStatus']
print("{}: {}, {}".format(strftime('%X', gmtime()), primary_status, secondary_status))
time.sleep(15)
# -
# ## Data Analysis - Manual
#
# Now that you've trained the system, analyze the data.
# Here, you focus on after-the-fact analysis.
#
# You import a basic analysis library, which defines the concept of trial, which represents a single training run.
# +
# cell 17
from smdebug.trials import create_trial
description = client.describe_training_job(TrainingJobName=job_name)
s3_output_path = xgboost_estimator.latest_job_debugger_artifacts_path()
# This is where we create a Trial object that allows access to saved tensors.
trial = create_trial(s3_output_path)
# -
# You can list all the tensors that you know something about. Each one of these names is the name of a tensor. The name is a combination of the feature name, which in these cases, is auto-assigned by XGBoost, and whether it's an evaluation metric, feature importance, or SHAP value.
# cell 18
trial.tensor_names()
# For each tensor, ask for the steps where you have data. In this case, every five steps
# cell 19
trial.tensor("predictions").values()
# You can obtain each tensor at each step as a NumPy array.
# cell 20
type(trial.tensor("predictions").value(10))
# ### Performance metrics
#
# You can also create a simple function that visualizes the training and validation errors as the training progresses.
# Each gradient should get smaller over time, as the system converges to a good solution.
# Remember that this is an interactive analysis. You are showing these tensors to give an idea of the data.
# +
# cell 21
import matplotlib.pyplot as plt
import seaborn as sns
import re
def get_data(trial, tname):
"""
For the given tensor name, walks though all the iterations
for which you have data and fetches the values.
Returns the set of steps and the values.
"""
tensor = trial.tensor(tname)
steps = tensor.steps()
vals = [tensor.value(s) for s in steps]
return steps, vals
def plot_collection(trial, collection_name, regex='.*', figsize=(8, 6)):
"""
Takes a `trial` and a collection name, and
plots all tensors that match the given regex.
"""
fig, ax = plt.subplots(figsize=figsize)
sns.despine()
tensors = trial.collection(collection_name).tensor_names
for tensor_name in sorted(tensors):
if re.match(regex, tensor_name):
steps, data = get_data(trial, tensor_name)
ax.plot(steps, data, label=tensor_name)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_xlabel('Iteration')
# -
# cell 22
plot_collection(trial, "metrics")
# ### Feature importances
#
# You can also visualize the feature priorities as determined by
# [xgboost.get_score()](https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.Booster.get_score).
# If you instructed Estimator to log the `feature_importance` collection, all five importance types supported by `xgboost.get_score()` will be available in the collection.
# cell 23
def plot_feature_importance(trial, importance_type="weight"):
SUPPORTED_IMPORTANCE_TYPES = ["weight", "gain", "cover", "total_gain", "total_cover"]
if importance_type not in SUPPORTED_IMPORTANCE_TYPES:
raise ValueError(f"{importance_type} is not one of the supported importance types.")
plot_collection(
trial,
"feature_importance",
regex=f"feature_importance/{importance_type}/.*")
# cell 24
plot_feature_importance(trial)
# cell 25
plot_feature_importance(trial, importance_type="cover")
# ### SHAP
#
# [SHAP](https://github.com/slundberg/shap) (SHapley Additive exPlanations) is
# another approach to explain the output of machine learning models.
# SHAP values represent a feature's contribution to a change in the model output.
# You instructed Estimator to log the average SHAP values in this example so the SHAP values (as calculated by [xgboost.predict(pred_contribs=True)](https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.Booster.predict)) will be available the `average_shap` collection.
# cell 26
plot_collection(trial,"average_shap")
# cell 27
|
xgboost_debugger_demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Prepare the Data
import pandas as pd
import numpy as np
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
import seaborn as sns
import matplotlib.pyplot as plt
equity = pd.read_excel('../data/industry_equity_data.xlsx',sheet_name='total returns')
equity.set_index('Date',inplace=True)
ff = pd.read_excel('../data/fama_french_data.xlsx',sheet_name='total returns')
ff.set_index('Unnamed: 0',inplace=True)
display(equity.head())
display(ff.head())
equity = equity.subtract(ff['RF'],axis=0)
ff = ff[['MKT','SMB','HML','UMD']].subtract(ff['RF'],axis=0)
# Note that the Fama-French factors are typically published as excess rerturns, meaning you would not need to subtract the risk-free rate.
#
# I gave you a version of the data that was in total returns, so you need to subtract the risk-free rate.
#
# But if you are ever accessing this data directly from the source, you'll find that the factors are already in excess returns.
# # 2. Pricing Factor Stats
ff.mean() * 12
ff.corr()
# ### Note:
#
# The homework said to combine the assets and factors.
#
# SPY and MKT are over 98% correlated, so it makes sense to drop one of them to get better estimations.
#
# Below, set dropSPY to True if you want to estimate without SPY.
#
# Though it would be bad practice to leave both included, it doesn't change the conclusions of this homework meaningfully.
# +
dropSPY = False
assets = equity.join(ff,how='inner')
if dropSPY:
assets = assets.drop(columns=['SPY'])
# -
display(assets.head())
assets.corr()
sns.heatmap(assets.corr(), cmap="PiYG");
# +
sig = assets.cov().to_numpy()
mu = assets.mean().to_numpy()
w_tan = (np.linalg.inv(sig) @ mu) / (np.ones(sig.shape[0]) @ np.linalg.inv(sig) @ mu)
weights = pd.DataFrame(w_tan.reshape(1,-1),columns = assets.columns,index=['MV Weights'])
display(weights)
# -
# # 3. Testing Pricing Models with Alpha
def get_ols_metrics(regressors,targets,annualization=1):
# ensure regressors and targets are pandas dataframes, as expected
if not isinstance(regressors,pd.DataFrame):
regressors = regressors.to_frame()
if not isinstance(targets,pd.DataFrame):
targets = targets.to_frame()
# align the targets and regressors on the same dates
df_aligned = targets.join(regressors,how='inner',lsuffix='y ')
Y = df_aligned[targets.columns]
X = df_aligned[regressors.columns]
reg = pd.DataFrame(index=targets.columns)
for col in Y.columns:
y = Y[col]
model = LinearRegression().fit(X,y)
reg.loc[col,'alpha'] = model.intercept_ * annualization
reg.loc[col,regressors.columns] = model.coef_
reg.loc[col,'r-squared'] = model.score(X,y)
# sklearn does not return the residuals, so we need to build them
yfit = model.predict(X)
residuals = y - yfit
reg.loc[col,'Info Ratio'] = (model.intercept_ / residuals.std()) * np.sqrt(annualization)
return reg
# ### FF 4-factor model
regs_ff = get_ols_metrics(ff,equity)
display(regs_ff)
MAE = regs_ff['alpha'].abs().mean()
print(f'MAE of alphas is: {MAE:0.4f}')
# ### CAPM
regs_capm = get_ols_metrics(ff['MKT'],equity)
display(regs_capm)
MAE = regs_capm['alpha'].abs().mean()
print(f'MAE of alphas is: {MAE:0.4f}')
# ### Not using any pricing model
print(f'MAE of alphas is: {equity.mean().abs().mean():0.4f}')
# ### Plot CAPM Fit
#
# Plotting market beta against historic mean.
#
# If CAPM fit, the scatter plot would (nearly) fit the line.
# +
df = pd.DataFrame()
df['historic mean'] = equity.mean()
df['mkt beta'] = regs_capm[['MKT']]
df.reset_index(inplace=True)
ax = sns.regplot(x='mkt beta',y='historic mean',data=df)
df[['mkt beta','historic mean','index']].apply(lambda x: ax.text(*x),axis=1);
plt.gcf().set_size_inches(10,5)
plt.show()
# -
# # 4 Testing Pricing Models with Cross-Sectional Regressions
#
# Regress $\overline{r}^i$ on $\hat{\boldsymbol{\beta}^i}$ from the previous time-series regressions.
X = regs_ff[['MKT','SMB','HML','UMD']]
y = equity.mean()
reg_cs = sm.OLS(y,sm.add_constant(X)).fit()
print(reg_cs.summary())
# We see that the R-squared is much less than 1, which means the betas do not fully explain expected returns.
#
# The estimated intercept is near zero (and statistically insignficant,) so the issue is not that the expected returns are too high/low. Rather, it is that their variation does not correspond to variation in betas.
#
# The estimated factor premium for HML is **negative**! And it is the most statistically significant factor. This throws doubt on the idea that the value factor is a source of premium--maybe we should be considering the growth factor!
# ### Plotting multifactor models
#
# Given that there are multiple factors, we can't plot a single beta against the historic mean returns.
#
# Rather, we can use the cross-sectional regression to fit the model-expected-return and plot that against the historic mean returns.
#
# If the model fit, all the points would line up on the 45-degree line, (one-for-one.)
# +
df = pd.DataFrame(columns=['mean return','predicted return'])
df['mean return'] = y
df['predicted return'] = reg_cs.predict(sm.add_constant(X))
df.reset_index(inplace=True)
ax = df.plot(x='predicted return',y='mean return',kind='scatter',color='blue',figsize=(7,7),xlim=(0,.03),ylim=(0,.03))
df[['predicted return','mean return','index']].apply(lambda x: ax.text(*x),axis=1);
y_line = np.arange(0, .04, .005)
x_line = y_line
ax.plot(y_line, x_line);
# -
# # Advanced (Optional) Insight:
# ## Weight on the Tangency portfolio
#
# Above we looked at the tangency portfolio weights of the securities and factors.Are these weights "significant"? That is, are we sure that these weights are not just the 4 factors plus random noise?
#
# It turns out that running OLS of y=1 on X = returns gives us the tangency weights! (They are a scaled version, but that doesn't matter.) Since this comes from regression betas, we also automatically get the t-stats and p-values! If the p-values are less than .05, then we say the regressor is statistically significant at the 5th percentile.
#
# Let's see whether the factors are the only significant weights when included with the equities.
temp = sm.add_constant(assets)
X = temp.drop(columns=['const'])
y = temp[['const']]
mod = sm.OLS(y,X).fit()
pvals = mod.pvalues.to_frame()
pvals.columns = ['p-values']
pvals.T
# Not even close! In fact, the p-values for the four factors are all greater than .05, saying they're not significant in testing these equities. Instead, we find significance in teh weights for AAPL, AMZN, and XLP.
#
# This is not a surprise. In fact, testing individual equities is very hard for these pricing models. Typically, they are used to price portfolios, and they are tested on portfolios.
# ### Do the extra 3 factors beyond the CAPM help much?
#
# We could see whether the tangency portfolio is improved much by using the four factors (versus just using MKT.)
# +
temp = sm.add_constant(ff)
X = temp[['MKT','SMB','HML','UMD']]
y = temp[['const']]
mod = sm.OLS(y,X).fit()
pvals = mod.pvalues.to_frame()
pvals.columns = ['p-values']
pvals.T
# -
# Nope--seems the only significant weight is that on MKT, which says that the other 3 factors are not expanding the frontier significantly--at least relative to just MKT.
#
# So why are these factors used in pricing? They seem to help when we test them on many other returns.
#
# But hopefully this also helps illustrate why CAPM is still the baseline for many applications.
|
solutions/HW3_Solutions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Fire up graphlab create
import graphlab
# # Load some house sales data
sales = graphlab.SFrame('home_data.sframe')
sales
# # Exploring the data for housing sales
sales.show()
graphlab.canvas.set_target('ipynb')
sales.show(view = 'Scatter Plot', x = 'sqft_living', y = 'price')
# # Create a simple regression model for sqft_living vs price
train_data, test_data = sales.random_split(0.8, seed = 0)
# # Build the regression model
sqft_model = graphlab.linear_regression.create(train_data, target = 'price', features = ['sqft_living'])
# # Evaluate the simple model
print(test_data['price'].mean())
print(sqft_model.evaluate(test_data))
# # Let's show how our predictions look like
import matplotlib.pyplot as plt
# %matplotlib inline
plt.plot(test_data['sqft_living'], test_data['price'], '.',
test_data['sqft_living'], sqft_model.predict(test_data), '-')
sqft_model.get('coefficients')
# # Exploring other features in our data
my_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode']
sales[my_features].show()
sales.show(view = 'BoxWhisker Plot', x = 'zipcode', y = 'price')
my_features_model = graphlab.linear_regression.create(train_data, target = 'price' , features = my_features)
print(sqft_model.evaluate(test_data))
print(my_features_model.evaluate(test_data))
# # Applying learned models to predict prices of 3 houses
house1 = sales[sales['id'] == '5309101200']
house1
# <img src = 'house_images/house1.png' width = 50% height = 50%>
print(sqft_model.predict(house1))
print(my_features_model.predict(house1))
# ## Prediction for a second, fancier house
house2 = sales[sales['id'] == '1925069082'] # Fancier House than above
# <img src = "house_images/house2.jpg" width = 50% height = 50%>
print(sqft_model.predict(house2))
print(my_features_model.predict(house2))
# ## Last House, Super Fancy
bill_gates = { # Even more fancier
'bedrooms': [8],
'bathrooms': [25],
'sqft_living': [50000],
'sqft_lot': [225000],
'floors': [4],
'zipcode': ['98039'],
'condition': [10],
'grade': [10],
'waterfront': [1],
'view': [4],
'sqft_above': [37500],
'sqft_basement': [12500],
'yr_built': [1994],
'yr_renovated': [2010],
'lat': [47.627606],
'long': [-122.242054],
'sqft_living15': [5000],
'sqft_lot15': [40000]
}
# <img src = "house_images/bill_gates.png" width = 50% height = 50%>
print(my_features_model.predict(graphlab.SFrame(bill_gates)))
|
Predicting House Prices.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hamiltonian MCMC demo
#
# Hamiltonian Monte-Carlo (HMC) is an MCMC algorithm which is able to efficiently sample from complex PDFs which present difficulty for other algorithms, such as those which strong non-linear correlations.
# However, HMC requires not only the log-posterior probability but also its gradient with respect to the model parameters.
# In cases where this gradient can be calculated efficiently, HMC can be very effective.
import matplotlib.pyplot as plt
from numpy import sqrt, exp, array
# Define a 3D torus-shaped posterior distribution as our test-case, with a `gradient()` method to calculate the gradient of the log-probability analytically.
class ToroidalGaussian(object):
def __init__(self):
self.R0 = 1. # torus major radius
self.ar = 10. # torus aspect ratio
self.w2 = (self.R0/self.ar)**2
def __call__(self, theta):
x, y, z = theta
r = sqrt(z**2 + (sqrt(x**2 + y**2) - self.R0)**2)
return -0.5*r**2 / self.w2
def gradient(self, theta):
x, y, z = theta
R = sqrt(x**2 + y**2)
K = 1 - self.R0/R
g = array([K*x, K*y, z])
return -g/self.w2
# HMC is implemented via the `inference.mcmc.HamiltonianChain` class:
# +
# create an instance of our posterior class
posterior = ToroidalGaussian()
# create the chain object
from inference.mcmc import HamiltonianChain
chain = HamiltonianChain(posterior = posterior, grad = posterior.gradient, start = [1,0.1,0.1])
# advance the chain to generate the sample
chain.advance(6000)
# -
# choose how many samples will be thrown away from the start of the chain as 'burn-in'
chain.burn = 1000
# The log-probability of each sample can be obtained using the `get_probabilities()` method, and the values for individual parameters can be obtained using `get_parameter()`:
# extract sample and probability data from the chain
probs = chain.get_probabilities()
xs, ys, zs = [ chain.get_parameter(i) for i in [0,1,2] ]
# Now lets plot the sample as a 3D scatterplot:
# +
colors = exp(probs - max(probs)) # color the points by probability
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize = (10,10))
ax = fig.add_subplot(111, projection='3d')
L = 1.5
ax.set_xlim([-L,L]); ax.set_ylim([-L,L]); ax.set_zlim([-L,L])
ax.set_xlabel('x'); ax.set_ylabel('y'); ax.set_zlabel('z')
ax.scatter(xs, ys, zs, c=colors)
plt.tight_layout()
plt.show()
# -
# We can plot all the possible 1D and 2D marginal distirbutions of the posterior (or a sub-set of them) using the ``matrix_plot()`` method:
chain.matrix_plot(labels = ['x','y','z'])
|
demos/hamiltonian_mcmc_demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
data_frame = pd.read_csv("dataset/SleepStudyDataForAnalysis.csv", index_col = 0)
data_frame.head()
data_frame.fillna(0.0, inplace=True)
bool_series = pd.isnull(data_frame["Hours"])
data_frame[bool_series]
import sklearn
from sklearn import svm, preprocessing
Xs = data_frame.drop('Hours', axis = 1).values
Ys = data_frame['Hours'].values
test_size=40
# +
Xs_train = Xs[:-test_size]
Ys_train = Ys[:-test_size]
Xs_test = Xs[-test_size:]
Ys_test = Ys[-test_size:]
# -
classifier = svm.SVC (kernel='rbf')
classifier.fit(Xs_train, Ys_train)
classifier.score(Xs_test, Ys_test)
for Xs_iterator, Ys_iterator in zip(Xs_test, Ys_test):
print(f"Model {classifier.predict([Xs_iterator])[0]}, Actual {Ys_iterator}")
|
svc_predictor.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append('..')
sys.path
# +
from moviepy.editor import VideoFileClip
from IPython.display import HTML
from scipy.misc import imread, imresize
from sklearn.externals import joblib
import numpy as np
import cv2
import matplotlib.pyplot as plt
# %matplotlib inline
from helpers import convert, show_images, put_boxes, box_boundaries
from featuresourcer import FeatureSourcer
from binaryclassifier import BinaryClassifier
from slider import Slider
from heatmap import HeatMap
# -
def hot_to_rgb(data):
data_ = (data - np.min(data)) / (np.max(data) - np.min(data))
my_cmap = plt.cm.get_cmap('hot')
img = my_cmap(data_)
rgb_img = np.dstack((img[:, :, 0], img[:, :, 1], img[:, :, 2]))
return rgb_img
# +
svc = joblib.load('svc2.pkl')
scaler = joblib.load('scaler2.pkl')
sourcer_params = {
'color_model': 'yuv', # hls, hsv, yuv, ycrcb
'bounding_box_size': 64, #
'number_of_orientations': 11, # 6 - 12
'pixels_per_cell': 16, # 8, 16
'cells_per_block': 2, # 1, 2
'do_transform_sqrt': True
}
cls = BinaryClassifier(svc, scaler)
src = FeatureSourcer(sourcer_params, imread("../vehicles/KITTI_extracted/5364.png"))
slider = Slider(sourcer = src, classifier = cls, increment = 8)
# -
def verbose_pipeline(this_frame):
for sz, pos in zip(ws, wp):
bounding_boxes = slider.locate(frame = this_frame, window_size = sz, window_position = pos)
heatmap.update(bounding_boxes)
mp, _, _ = heatmap.get()
labeled_img = heatmap.draw(this_frame)
rgb_img = imresize(hot_to_rgb(mp), 0.25)
labeled_img[20:200, 20:340] = rgb_img
return labeled_img
# +
ws = 80, 120, 150, 180
wp = 410, 390, 380, 380
temp_frame = imread("../test_images/test1.jpg")
heatmap = HeatMap(frame = temp_frame, thresh = 25, memory = 40)
project_output = '../files/test_video_output.mp4'
clip1 = VideoFileClip("../files/test_video.mp4");
white_clip = clip1.fl_image(verbose_pipeline)
# %time white_clip.write_videofile(project_output, audio = False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(project_output))
# +
ws = 80, 120, 150
wp = 410, 390, 380
temp_frame = imread("../test_images/test1.jpg")
heatmap = HeatMap(frame = temp_frame, thresh = 20, memory = 30)
project_output = '../files/project_video_output.mp4'
clip1 = VideoFileClip("../files/project_video.mp4");
white_clip = clip1.fl_image(verbose_pipeline)
# %time white_clip.write_videofile(project_output, audio = False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(project_output))
# -
|
ipynb/pipeline.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Predicting movie's IMDB rating
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, r2_score, explained_variance_score
from sklearn.model_selection import learning_curve, train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import PolynomialFeatures
from random import randrange
# ## Import and process CSV
# * Remove row which contain empty cell
# * Reset Index for easier preprocessing step
url = ('./src/IMDB-Movie-Data.csv')
csv = pd.read_csv(url, sep=",")
csv = csv.dropna(axis="index", how="any")
csv = csv.sort_values(by=["Rating"], ascending=False)
csv = csv.reset_index(drop=True)
csv.head()
# ## Feature & target selection
# * Select relevant feature (excluding XYZ)
# * Feature "Description" was removed because NLP (Natural Language Processing) isn't used
# * Feature "Metascore" was selected because Metascore usually comes out before movie release
# * On data pre-processing step we will remove some feature
# +
x = csv[["Genre", "Director", "Actors",
"Runtime (Minutes)", "Revenue (Millions)", "Votes", "Metascore", "Year"]]
y = csv["Rating"]
x.head()
# -
# ## Data pre-processing #1
# * Convert feature "Genre" & "Actors" data type from string to list because a movie could have more than a genre and an actor
# * Change feature "Year" into movie age (since release year)
# * Rename column for convenience sake
# +
list_genre = []
for i in range(0, x["Genre"].size):
list_genre.append(x["Genre"][i].split(','))
x["Genre"].update(pd.Series(list_genre))
list_actors = []
for i in range(0, x["Actors"].size):
list_actors.append(x["Actors"][i].split(','))
x["Actors"].update(pd.Series(list_actors))
# Column position, column name, column value
x.insert(3, "Age", [2019] - x["Year"])
x = x.rename(columns={"Revenue (Millions)": "Revenue"})
x = x.rename(columns={"Runtime (Minutes)": "Runtime"})
x = x.drop(columns=["Year"])
x.head()
# -
# ## Data pre-processing #2
# Perform normalization to all features which uses number/decimal. Normalization usually have range -1 to -1 or 0 to 1.
# +
ndarray_runtime = np.array(x["Runtime"])
ndarray_metascore = np.array(x["Metascore"])
ndarray_age = np.array(x["Age"])
ndarray_votes = np.array(x["Votes"])
ndarray_revenue = np.array(x["Revenue"])
interp_runtime = np.interp(
ndarray_runtime, (0, ndarray_runtime.max()), (0, +3))
interp_metascore = np.interp(
ndarray_metascore, (0, ndarray_metascore.max()), (0, +5))
interp_age = np.interp(ndarray_age, (0, ndarray_age.max()), (0, +1))
interp_votes = np.interp(ndarray_votes, (0, ndarray_votes.max()), (0, +4))
interp_revenue = np.interp(
ndarray_revenue, (0, ndarray_revenue.max()), (0, +1))
x["Runtime"].update(pd.Series(interp_runtime))
x["Metascore"].update(pd.Series(interp_metascore))
x["Age"].update(pd.Series(interp_age))
x["Votes"].update(pd.Series(interp_votes))
x["Revenue"].update(pd.Series(interp_revenue))
x.head()
# -
# ## Data pre-processing #3
# Using one-hot encode technique for feature which uses string
# +
ohe_director = pd.get_dummies(x["Director"])
series_genre = pd.Series()
for i in range(0, x["Genre"].size):
tmp_series = pd.Series(x["Genre"][i])
series_genre.at[i] = tmp_series
ohe_genre = pd.get_dummies(series_genre.apply(
pd.Series), prefix='', prefix_sep='').sum(level=0, axis=1)
series_actors = pd.Series()
for i in range(0, x["Actors"].size):
tmp_series = pd.Series(x["Actors"][i])
series_actors.at[i] = tmp_series
ohe_actors = pd.get_dummies(series_actors.apply(
pd.Series), prefix='', prefix_sep='').sum(level=0, axis=1)
print(ohe_director.sample(5))
print(ohe_genre.sample(5))
print(ohe_actors.sample(5))
# -
# ## Data pre-processing #4
# Remove all row where frequency of one-hot encoded feature is too low or have extremely low correlation with target
#
# This method is used to improve accuracy of trained model and only used with small amount of traning data
# +
min_freq_director = 4
min_freq_genre = 6
min_freq_actors = 4
min_corr_director = 0.12
min_corr_genre = 0.11
min_corr_actors = 0.10
corr_director = []
corr_genre = []
corr_actors = []
ctr = 0
for col in ohe_director.columns:
freq = np.sum(np.array(ohe_director[col]))
corr = np.abs(ohe_director[col].corr(y))
corr_director.append(corr)
if freq < min_freq_director or corr < min_corr_director:
ohe_director = ohe_director.drop(columns=[col])
else:
ctr += 1
print(
f"Count of eligable feature 'Director' (>= {min_freq_director} && >= {min_corr_director * 100}%) : {ctr}")
print(
f"Average correlation for feature 'Director' : {np.average(np.array(corr_director))}")
ctr = 0
for col in ohe_genre.columns:
freq = np.sum(np.array(ohe_genre[col]))
corr = np.abs(ohe_genre[col].corr(y))
corr_genre.append(corr)
if freq < min_freq_genre or corr < min_corr_genre:
ohe_genre = ohe_genre.drop(columns=[col])
else:
ctr += 1
print(
f"Count of eligable feature 'Genre' (>= {min_freq_genre} && >= {min_corr_genre * 100}%) : {ctr}")
print(
f"Average correlation for feature 'Genre' : {np.average(np.array(corr_genre))}")
ctr = 0
for col in ohe_actors.columns:
freq = np.sum(np.array(ohe_actors[col]))
corr = abs(ohe_actors[col].corr(y))
corr_actors.append(corr)
if freq < min_freq_actors or corr < min_corr_actors:
ohe_actors = ohe_actors.drop(columns=[col])
else:
ctr += 1
print(
f"Count of eligable feature 'Actors' (>= {min_freq_actors} && >= {min_corr_actors * 100}%) : {ctr}")
print(
f"Average correlation for feature 'Actors' : {np.average(np.array(corr_actors))}")
# -
# ## Data pre-processing #5A
# * Add filtered one-hot encoded feature into feature DataFrame
# * See correlation between each feature & target
# +
show_ohe_corr = True
corr = pd.concat([x, ohe_director, ohe_genre, ohe_actors, y], axis=1,
sort=False) if show_ohe_corr else pd.concat([x, y], axis=1, sort=False)
corr.corr(method='pearson')
# -
# ## Data pre-processing #5B
# * Remove all irrelevant features
# * Add polynomial features degree 2
# +
use_polynomial = True
x = x.drop(columns=["Director", "Genre", "Actors"])
x = x.drop(columns=["Age"])
if use_polynomial:
x_poly = PolynomialFeatures(
2, include_bias=True, interaction_only=False).fit_transform(x)
x = pd.concat([x, pd.DataFrame(x_poly)], axis=1, sort=False)
# x = pd.concat([x, ohe_genre], axis=1, sort=False)
x = pd.concat([x, ohe_director, ohe_genre, ohe_actors], axis=1, sort=False)
print(f"Total feature : {x.shape[1]}")
x.head()
# -
# ## Split training & test data
# * Split into 70/30 due to small training data
# * 80/20 or lower is preffered if there are more traning data
# * Sort x_train for visualization convience
# +
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
x_test = pd.concat([x_test, y_test], axis=1)
x_test = x_test.sort_values(by="Rating")
y_test = x_test.take([28], axis=1)
y_test = y_test.iloc[:, 0]
x_test = x_test.drop(columns=["Rating"])
# -
# ## Training phase
# 3 layers were used with dynamic amount of node
# +
hls = (x.shape[1], int(x.shape[1]*0.7), int(x.shape[1]*0.49))
print(f'Hidden layer/node : {hls}')
mlpr = MLPRegressor(
hidden_layer_sizes=hls,
activation='relu',
solver='adam',
alpha=0.00005,
learning_rate='adaptive',
learning_rate_init=0.0005,
max_iter=5000,
shuffle=False,
tol=0.00005,
momentum=0.9,
verbose=False
)
mlpr_model = mlpr.fit(x_train, y_train)
print(f"Training iteration : {mlpr_model.n_iter_}")
# -
# ## Testing phase & result
# * Show MSE score of train phase
# * Show MSE, R2 and variance score of test phase
# +
mlpr_predict = mlpr.predict(x_test)
mse = mean_squared_error(y_test, mlpr_predict)
r2 = r2_score(y_test, mlpr_predict)
evs = explained_variance_score(y_test, mlpr_predict)
print(f"MSE train : {mlpr_model.loss_}")
print(f"MSE test : {mse}")
print(f"R2 score : {r2}")
print(f"Variance score : {evs}")
# -
# ## Training Loss Curve
plt.style.use('seaborn')
plt.plot([i for i in range(mlpr_model.n_iter_)],
mlpr_model.loss_curve_, label='Training error')
plt.ylabel('MSE', fontsize=14)
plt.xlabel('Iteration', fontsize=14)
plt.title('Training Loss Curve', fontsize=18, y=1.03)
plt.legend()
# ## MSE score with different training size and cross-validation method
# +
train_sizes = ((np.zeros((1, 8)) + x.shape[0]) * np.array(
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])).astype(int)
train_sizes, train_scores, validation_scores = learning_curve(
estimator=mlpr,
X=x,
y=y,
train_sizes=train_sizes,
cv=5,
n_jobs=-1,
scoring='neg_mean_squared_error',
verbose=0)
train_scores_mean = -train_scores.mean(axis=1)
validation_scores_mean = -validation_scores.mean(axis=1)
plt.style.use('seaborn')
plt.plot(train_sizes / x.shape[0], train_scores_mean, label='Training error')
plt.plot(train_sizes / x.shape[0],
validation_scores_mean, label='Validation error')
plt.ylabel('MSE', fontsize=14)
plt.xlabel('Training size', fontsize=14)
plt.title('Learning curves for MLPR', fontsize=18, y=1.03)
plt.legend()
# -
# ## Comparison of predicted and actual target value (table)
# +
def percentage_diff(predict, actual):
if predict == actual or actual == 0 or predict == 0:
return 0
try:
return round((abs(predict - actual) / actual) * 100.0, 3)
except ZeroDivisionError:
return 0
comparison = pd.concat(
[pd.Series(np.array(y_test)), pd.Series(mlpr_predict)], axis=1, sort=False)
comparison[2] = np.absolute(comparison[0] - comparison[1])
percent = []
for i in range(0, comparison[0].size):
percent.append(float(percentage_diff(comparison[1][i], comparison[0][i])))
comparison[3] = percent
comparison.rename(columns={0: 'Actual Rating', 1: 'Predicted Rating',
2: 'Difference', 3: '% Diff.'}, inplace=True)
comparison.sample(10)
# -
print(f"Average diff. : {np.average(comparison['Difference'])}")
print(f"Average diff. percentage : {np.average(comparison['% Diff.'])}")
# ## Comparison of predicted and actual target value (Curve Graph)
# * Red point : actual rating
# * Blue line : predicted rating
# +
index_x = [i for i in range(0, y_test.size)]
plt.style.use('seaborn')
plt.scatter(index_x, y_test, color='red')
# plt.scatter(index_x, y_test, color='red')
plt.plot(index_x, mlpr_predict, color='blue')
# plt.scatter(index_x, mlpr_predict, color='blue')
plt.title('Rating graph')
plt.xlabel('Movie index')
plt.ylabel('Rating')
plt.show()
|
imdb_rating.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/perfectpanda-works/machine-learning/blob/master/LEARNING_PYTORCH_WITH_EXAMPLES5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="gW2LA15k-atB" colab_type="text"
# #optimパッケージ
# + [markdown] id="HlaqAxoGZ9eP" colab_type="text"
# これまで、チュートリアルでは、学習可能なパラメーターを保持するテンソルを手動で変更することにより、モデルの重みを更新しました。
#
# (autogradで履歴の追跡を回避するためにtorch.no_grad()または.dataを使用)。
#
# これは、確率的勾配降下法などの単純な最適化アルゴリズムにとって大きな負担ではありませんが、実際には、AdaGrad、RMSProp、Adamなどのより高度な最適化手法を使用してニューラルネットワークをトレーニングすることがよくあります。特に、Adamは使われているところをよくみます。
#
# PyTorchのoptimパッケージで、最適化アルゴリズムの概念を抽象化し、一般的に使用される最適化アルゴリズムを利用することができます。
#
# この例では、以前のようにnnパッケージを使用してモデルを定義しますが、optimパッケージによって提供されるAdamアルゴリズムを使用してモデルを最適化します。
# + id="_jy5fh2l_o7y" colab_type="code" colab={}
# -*- coding: utf-8 -*-
import torch
# N :バッチサイズ
# D_in :入力次元数
# H :隠れ層の次元数
# D_out:出力次元数
N, D_in, H, D_out = 64, 1000, 100, 10
# ランダムな入力データと出力データの作成
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# ニューラルネットワークと損失関数の定義
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
loss_fn = torch.nn.MSELoss(reduction='sum')
# + [markdown] id="hfVFh6hs_-xp" colab_type="text"
# 次のプログラムで、最適化手法として、Adamオプティマイザーを設定します。optimizerの設定では、引数として、モデルのパラメータを渡します。この様にして、更新する重みを渡しています。
# + id="e3JekU57aGId" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="c6e19200-6318-4d65-c742-48e6a0f3d174"
learning_rate = 1e-4
#最適化手法をAdamで設定して「optimizer」というインスタンスに
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(500):
# 順伝播
y_pred = model(x)
# 損失の計算
loss = loss_fn(y_pred, y)
if t % 100 == 99:
print(t, loss.item())
# 「optimizer」のメソッドで勾配の初期化
optimizer.zero_grad()
# 逆伝播
loss.backward()
# 「optimizer」のメソッドで重みの更新
optimizer.step()
# + id="-bLRSn7CnzUi" colab_type="code" colab={}
|
LEARNING_PYTORCH_WITH_EXAMPLES5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## underfit-overfit
# Interpretation by linear regression
# +
from mxnet import ndarray as nd
from mxnet import autograd
from mxnet import gluon
num_train = 100
num_test = 100
true_w = [1.2, -3.4, 5.6]
true_b = 5.0
# +
x = nd.random.normal(shape=(num_train + num_test, 1))
X = nd.concat(x, nd.power(x, 2), nd.power(x, 3))
y = true_w[0] * X[:, 0] + true_w[1] * X[:, 1] + true_w[2] * X[:, 2] + true_b
y += .1 * nd.random.normal(shape=y.shape)
('x:', x[:5], 'X:', X[:5], 'y:', y[:5])
# +
# %matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.dpi']= 120
import matplotlib.pyplot as plt
def train(X_train, X_test, y_train, y_test):
# 线性回归模型
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(1))
net.initialize()
# 设一些默认参数
learning_rate = 0.01
epochs = 100
batch_size = min(10, y_train.shape[0])
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(
dataset_train, batch_size, shuffle=True)
# 默认SGD和均方误差
trainer = gluon.Trainer(net.collect_params(), 'sgd', {
'learning_rate': learning_rate})
square_loss = gluon.loss.L2Loss()
# 保存训练和测试损失
train_loss = []
test_loss = []
for e in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
train_loss.append(square_loss(
net(X_train), y_train).mean().asscalar())
test_loss.append(square_loss(
net(X_test), y_test).mean().asscalar())
# 打印结果
plt.plot(train_loss)
plt.plot(test_loss)
plt.legend(['train','test'])
plt.show()
return ('learned weight', net[0].weight.data(),
'learned bias', net[0].bias.data())
# -
# fit
train(X[:num_train, :], X[num_train:, :], y[:num_train], y[num_train:])
# underfit
train(x[:num_train, :], x[num_train:, :], y[:num_train], y[num_train:])
# overfit
train(X[0:2, :], X[num_train:, :], y[0:2], y[num_train:])
# ## regularization-scratch
# Interpretation by logistic regression
# +
from mxnet import ndarray as nd
from mxnet import autograd
from mxnet import gluon
import mxnet as mx
num_train = 20
num_test = 100
num_inputs = 200
true_w = nd.ones((num_inputs, 1)) * 0.01
true_b = 0.05
X = nd.random.normal(shape=(num_train + num_test, num_inputs))
y = nd.dot(X, true_w) + true_b
y += .01 * nd.random.normal(shape=y.shape)
X_train, X_test = X[:num_train, :], X[num_train:, :]
y_train, y_test = y[:num_train], y[num_train:]
# -
import random
batch_size = 1
def data_iter(num_examples):
idx = list(range(num_examples))
random.shuffle(idx)
for i in range(0, num_examples, batch_size):
j = nd.array(idx[i:min(i+batch_size,num_examples)])
yield X.take(j), y.take(j)
def init_params():
w = nd.random_normal(scale=1, shape=(num_inputs, 1))
b = nd.zeros(shape=(1,))
params = [w, b]
for param in params:
param.attach_grad()
return params
def L2_penalty(w, b):
return ((w**2).sum() + b**2) / 2
# +
# %matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.dpi']= 120
import matplotlib.pyplot as plt
import numpy as np
def net(X, w, b):
return nd.dot(X, w) + b
def square_loss(yhat, y):
return (yhat - y.reshape(yhat.shape)) ** 2 / 2
def sgd(params, lr, batch_size):
for param in params:
param[:] = param - lr * param.grad / batch_size
def test(net, params, X, y):
return square_loss(net(X, *params), y).mean().asscalar()
#return np.mean(square_loss(net(X, *params), y).asnumpy())
def train(lambd):
epochs = 10
learning_rate = 0.005
w, b = params = init_params()
train_loss = []
test_loss = []
for e in range(epochs):
for data, label in data_iter(num_train):
with autograd.record():
output = net(data, *params)
loss = square_loss(
output, label) + lambd * L2_penalty(*params)
loss.backward()
sgd(params, learning_rate, batch_size)
train_loss.append(test(net, params, X_train, y_train))
test_loss.append(test(net, params, X_test, y_test))
plt.plot(train_loss)
plt.plot(test_loss)
plt.legend(['train', 'test'])
plt.show()
return 'learned w[:10]:', w[:10].T, 'learned b:', b
# -
# without regularization
train(0)
train(5)
# ## regularization-gluon
# +
from mxnet import ndarray as nd
from mxnet import autograd
from mxnet import gluon
import mxnet as mx
num_train = 20
num_test = 100
num_inputs = 200
true_w = nd.ones((num_inputs, 1)) * 0.01
true_b = 0.05
X = nd.random.normal(shape=(num_train + num_test, num_inputs))
y = nd.dot(X, true_w) + true_b
y += .01 * nd.random.normal(shape=y.shape)
X_train, X_test = X[:num_train, :], X[num_train:, :]
y_train, y_test = y[:num_train], y[num_train:]
# +
# %matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.dpi']= 120
import matplotlib.pyplot as plt
import numpy as np
batch_size = 1
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True)
square_loss = gluon.loss.L2Loss()
def test(net, X, y):
return square_loss(net(X), y).mean().asscalar()
def train(weight_decay):
epochs = 10
learning_rate = 0.005
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(1))
net.collect_params().initialize(mx.init.Normal(sigma=1))
# 注意到这里 'wd'
trainer = gluon.Trainer(net.collect_params(), 'sgd', {
'learning_rate': learning_rate, 'wd': weight_decay})
train_loss = []
test_loss = []
for e in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
train_loss.append(test(net, X_train, y_train))
test_loss.append(test(net, X_test, y_test))
plt.plot(train_loss)
plt.plot(test_loss)
plt.legend(['train','test'])
plt.show()
return ('learned w[:10]:', net[0].weight.data()[:,:10],
'learned b:', net[0].bias.data())
# -
train(0)
train(5)
# ## dropout
# Interpretation by mlp
# +
from mxnet import nd
def dropout(X, drop_probability):
keep_probability = 1 - drop_probability
assert 0 <= keep_probability <= 1
# 这种情况下把全部元素都丢弃。
if keep_probability == 0:
return X.zeros_like()
# 随机选择一部分该层的输出作为丢弃元素。
mask = nd.random.uniform(
0, 1.0, X.shape, ctx=X.context) < keep_probability
# 保证 E[dropout(X)] == X
scale = 1 / keep_probability
return mask * X * scale
# -
A = nd.arange(20).reshape((5,4))
dropout(A, 0.0)
dropout(A, 0.5)
dropout(A, 1.0)
import numpy as np
class DataLoader(object):
"""similiar to gluon.data.DataLoader, but might be faster.
The main difference this data loader tries to read more exmaples each
time. But the limits are 1) all examples in dataset have the same shape, 2)
data transfomer needs to process multiple examples at each time
"""
def __init__(self, dataset, batch_size, shuffle, transform=None):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.transform = transform
def __iter__(self):
data = self.dataset[:]
X = data[0]
y = nd.array(data[1])
n = X.shape[0]
if self.shuffle:
idx = np.arange(n)
np.random.shuffle(idx)
X = nd.array(X.asnumpy()[idx])
y = nd.array(y.asnumpy()[idx])
for i in range(n//self.batch_size):
if self.transform is not None:
yield self.transform(X[i*self.batch_size:(i+1)*self.batch_size],
y[i*self.batch_size:(i+1)*self.batch_size])
else:
yield (X[i*self.batch_size:(i+1)*self.batch_size],
y[i*self.batch_size:(i+1)*self.batch_size])
def __len__(self):
return len(self.dataset)//self.batch_size
def load_data_fashion_mnist(batch_size, resize=None, root="~/.mxnet/datasets/fashion-mnist"):
"""download the fashion mnist dataest and then load into memory"""
def transform_mnist(data, label):
# Transform a batch of examples.
if resize:
n = data.shape[0]
new_data = nd.zeros((n, resize, resize, data.shape[3]))
for i in range(n):
new_data[i] = image.imresize(data[i], resize, resize)
data = new_data
# change data from batch x height x width x channel to batch x channel x height x width
return nd.transpose(data.astype('float32'), (0,3,1,2))/255, label.astype('float32')
mnist_train = gluon.data.vision.FashionMNIST(root=root, train=True, transform=None)
mnist_test = gluon.data.vision.FashionMNIST(root=root, train=False, transform=None)
# Transform later to avoid memory explosion.
train_data = DataLoader(mnist_train, batch_size, shuffle=True, transform=transform_mnist)
test_data = DataLoader(mnist_test, batch_size, shuffle=False, transform=transform_mnist)
return (train_data, test_data)
batch_size = 256
train_data, test_data = load_data_fashion_mnist(batch_size)
# +
num_inputs = 28*28
num_outputs = 10
num_hidden1 = 256
num_hidden2 = 256
weight_scale = .01
W1 = nd.random_normal(shape=(num_inputs, num_hidden1), scale=weight_scale)
b1 = nd.zeros(num_hidden1)
W2 = nd.random_normal(shape=(num_hidden1, num_hidden2), scale=weight_scale)
b2 = nd.zeros(num_hidden2)
W3 = nd.random_normal(shape=(num_hidden2, num_outputs), scale=weight_scale)
b3 = nd.zeros(num_outputs)
params = [W1, b1, W2, b2, W3, b3]
for param in params:
param.attach_grad()
# +
drop_prob1 = 0.2
drop_prob2 = 0.5
def net(X):
X = X.reshape((-1, num_inputs))
# 第一层全连接。
h1 = nd.relu(nd.dot(X, W1) + b1)
# 在第一层全连接后添加丢弃层。
h1 = dropout(h1, drop_prob1)
# 第二层全连接。
h2 = nd.relu(nd.dot(h1, W2) + b2)
# 在第二层全连接后添加丢弃层。
h2 = dropout(h2, drop_prob2)
return nd.dot(h2, W3) + b3
# -
# for test
def net2(X):
X = X.reshape((-1, num_inputs))
h1 = nd.relu(nd.dot(X, W1) + b1)
h2 = nd.relu(nd.dot(h1, W2) + b2)
return nd.dot(h2, W3) + b3
def SGD(params, lr):
for param in params:
param[:] = param - lr * param.grad
# +
import mxnet as mx
def accuracy(output, label):
return nd.mean(output.argmax(axis=1)==label).asscalar()
def _get_batch(batch, ctx):
"""return data and label on ctx"""
if isinstance(batch, mx.io.DataBatch):
data = batch.data[0]
label = batch.label[0]
else:
data, label = batch
return (gluon.utils.split_and_load(data, ctx),
gluon.utils.split_and_load(label, ctx),
data.shape[0])
def evaluate_accuracy(data_iterator, net, ctx=[mx.cpu()]):
if isinstance(ctx, mx.Context):
ctx = [ctx]
acc = nd.array([0])
n = 0.
if isinstance(data_iterator, mx.io.MXDataIter):
data_iterator.reset()
for batch in data_iterator:
data, label, batch_size = _get_batch(batch, ctx)
for X, y in zip(data, label):
acc += nd.sum(net(X).argmax(axis=1)==y).copyto(mx.cpu())
n += y.size
acc.wait_to_read() # don't push too many operators into backend
return acc.asscalar() / n
# +
from mxnet import autograd
from mxnet import gluon
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
learning_rate = .5
for epoch in range(5):
train_loss = 0.
train_acc = 0.
for data, label in train_data:
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
SGD(params, learning_rate/batch_size)
train_loss += nd.mean(loss).asscalar()
train_acc += accuracy(output, label)
test_acc = evaluate_accuracy(test_data, net)
print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
epoch, train_loss/len(train_data),
train_acc/len(train_data), test_acc))
# +
from mxnet import autograd
from mxnet import gluon
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
learning_rate = .5
for epoch in range(5):
train_loss = 0.
train_acc = 0.
for data, label in train_data:
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
SGD(params, learning_rate/batch_size)
train_loss += nd.mean(loss).asscalar()
train_acc += accuracy(output, label)
test_acc = evaluate_accuracy(test_data, net2)
print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
epoch, train_loss/len(train_data),
train_acc/len(train_data), test_acc))
# +
def transform(data, label):
return data.astype('float32')/255, label.astype('float32')
def show_images(images):
n = images.shape[0]
_, figs = plt.subplots(1, n, figsize=(15, 15))
for i in range(n):
figs[i].imshow(images[i].reshape((28, 28)).asnumpy())
figs[i].axes.get_xaxis().set_visible(False)
figs[i].axes.get_yaxis().set_visible(False)
plt.show()
def get_text_labels(label):
text_labels = [
't-shirt', 'trouser', 'pullover', 'dress,', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot'
]
return [text_labels[int(i)] for i in label]
mnist_test = gluon.data.vision.FashionMNIST(train=False, transform=transform)
data, label = mnist_test[0:9]
show_images(data)
print('true labels')
print(get_text_labels(label))
predicted_labels = net(data).argmax(axis=1)
print('predicted labels')
print(get_text_labels(predicted_labels.asnumpy()))
# -
# ## Dropout-gluon
# +
from mxnet.gluon import nn
net = nn.Sequential()
drop_prob1 = 0.2
drop_prob2 = 0.5
with net.name_scope():
net.add(nn.Flatten())
# 第一层全连接。
net.add(nn.Dense(256, activation="relu"))
# 在第一层全连接后添加丢弃层。
net.add(nn.Dropout(drop_prob1))
# 第二层全连接。
net.add(nn.Dense(256, activation="relu"))
# 在第二层全连接后添加丢弃层。
net.add(nn.Dropout(drop_prob2))
net.add(nn.Dense(10))
net.initialize()
# +
from mxnet import nd
from mxnet import autograd
from mxnet import gluon
batch_size = 256
train_data, test_data = load_data_fashion_mnist(batch_size)
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(),
'sgd', {'learning_rate': 0.5})
for epoch in range(5):
train_loss = 0.
train_acc = 0.
for data, label in train_data:
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
trainer.step(batch_size)
train_loss += nd.mean(loss).asscalar()
train_acc += accuracy(output, label)
test_acc = evaluate_accuracy(test_data, net)
print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
epoch, train_loss/len(train_data),
train_acc/len(train_data), test_acc))
# -
|
underfit-overfit-regularization-dropout.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercises
# ## import
# +
# #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
# -
# ### EXERCISE 1:
# Given a list of integers, without using any package or built-in function, compute and print:
# - mean of the list
# - number of negative and positive numbers in the list
# - two lists that contain positives and negatives in the original list
# +
"""
Created on Tue Feb 19 08:59:26 2019
@author: francesco
"""
input = [-2,2,-3,3,10]
# -
# ##### mean of the list
# +
def mean_of_list(list):
tot = 0
tot = np.sum(list)
return tot/len(list)
mean_of_list(input)
# -
# ##### number of negative and positive numbers in the list
# +
def signs_of_list(list):
positive = negative = 0
for n in list:
if n >= 0:
positive += 1
else:
negative += 1
return positive, negative
signs_of_list(input)
# -
# ##### two lists that contain positives and negatives in the original list
# +
def signedList_of_list(list):
positive = []
negative = []
for n in input:
if n >= 0:
positive.append(n)
else:
negative.append(n)
return positive,negative
signedList_of_list(input)
# -
# ### EXERCISE 2:
# Given a list of integers, without using any package or built-in function, compute and print:
# - a dictionary where:
# - keys are unique numbers contained in the list
# - values count the occurrencies of unique numbers in the list
# TIP: you can use dictionary functions
#
# +
"""
Created on Tue Feb 19 08:59:26 2019
@author: francesco
"""
input = [1,2,3,4,2,3,1,2,3,4,2,1,3]
def create_dict(list):
dict = {}
for n in list:
if n in dict.keys(): #keys are unique numbers contained in the list
dict[n] += 1
else: #values count the occurrencies of unique numbers in the list
dict[n] = 1
return dict
print(create_dict(input)
# -
# ### EXERCISE 3:
# Given an unordered list of numbers, without using any package or built-in function, define functions to (and print results):
#
# - swap the values of two elements in the list
# - order ascendently the list
# - find mean and median of the list
#
# +
"""
Created on Tue Feb 19 09:37:34 2019
@author: francesco
"""
input = [30,10,40,20,50]
# -
# ##### swap the values of two elements in the list
# +
#swap the values of two elements in the list
def swap(list, x, y):
"""swap two elements of a list"""
index_x = list.index(x)
index_y = list.index(y)
list[index_x] = y
list[index_y] = x
swap(input, 10, 40)
print(input)
# -
# ##### order ascendently the list
#
# +
#order ascendently the list
def sort_ascendently(list):
"""sort a list and reverse it"""
list.sort();
list.reverse();
sort_ascendently(input)
print(input)
# -
# ##### values count the occurrencies of unique numbers in the list
# +
#find mean and median of the lis
def find_mean(list):
"""find the mean in a list"""
tot = 0
for n in list:
tot += n
return tot/len(list)
def find_median(list):
"""find the median in a list"""
if(len(list) % 2 == 0):
median_index = len(list)//2 + 1
else:
median_index = len(list)//2
return list[median_index]
print(find_mean(input))
print(find_median(input))
# -
# ### EXERCISE 4:
# Given a list of 2-dimensional tuples, without using any package
# or built-in function, define functions to (and print results):
# - find the Euclidean distance between two tuples t1 and t2
# - find the Euclidean distance between all tuples of the list
# - to compute the coordinates of the centroid of the list of tuples
# TIP: you can use the math module (use `import math`)
#
# +
"""
Created on Tue Feb 19 09:37:47 2019
@author: francesco
"""
input = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0), (4.0, 4.0)]
# -
# ##### find the Euclidean distance between two tuples t1 and t2
# +
def euclidean_distance(start, stop):
"""find the Euclidean distance between two tuples t1 and t2"""
x_distance = stop[0]-start[0]
y_distance = stop[1]-start[1]
return math.sqrt(x_distance*x_distance + y_distance*y_distance)
euclidean_distance(input[0], input[1])
# -
# ##### find the Euclidean distance between all tuples of the list
# +
def euclidean_distance_all(points):
"""find the Euclidean distance between all tuples of the list"""
for start_point in points:
for stop_point in points:
print("point", start_point, "point", stop_point)
euclidean_distance(start_point, stop_point)
euclidean_distance_all(input)
# -
# ##### to compute the coordinates of the centroid of the list of tuples
# +
def compute_centroid(points):
"""to compute the coordinates of the centroid of the list of tuples"""
x_sum = 0
y_sum = 0
for coordinate in points:
x_sum += coordinate[0]
y_sum += coordinate[1]
print("centroid : ", x_sum/len(points), y_sum/len(points))
compute_centroid(input)
# -
|
exercise/Exercises 01-09.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Computer Vision Project
from scipy.spatial import distance as dist
import cv2
import numpy as np
import math
import os
import sys
import copy
from matplotlib import pyplot as plt
# +
cards = {
# Jokers in no light
'00279': ['Joker', None, 0],
'00390': ['Joker', None, 0],
# Spades in no light
'00526': ['Ace', 'Spades', 1],
'00665': ['2', 'Spades', 2],
'00790': ['3', 'Spades', 3],
'00929': ['4', 'Spades', 4],
'01079': ['5', 'Spades', 5],
'01211': ['6', 'Spades', 6],
'01408': ['7', 'Spades', 7],
'01543': ['8', 'Spades', 8],
'01680': ['9', 'Spades', 9],
'01824': ['10', 'Spades', 10],
'01962': ['Jack', 'Spades', 10],
'02088': ['Queen', 'Spades', 10],
'02230': ['King', 'Spades', 10],
# Diamonds in no light
'02366': ['Ace', 'Diamonds', 1],
'02472': ['2', 'Diamonds', 2],
'02631': ['3', 'Diamonds', 3],
'02744': ['4', 'Diamonds', 4],
'02921': ['5', 'Diamonds', 5],
'03093': ['6', 'Diamonds', 6],
'03211': ['7', 'Diamonds', 7],
'03376': ['8', 'Diamonds', 8],
'03495': ['9', 'Diamonds', 9],
'03632': ['10', 'Diamonds', 10],
'03724': ['Jack', 'Diamonds', 10],
'03844': ['Queen', 'Diamonds', 10],
'03957': ['King', 'Diamonds', 10],
# Clubs in no light
'04088': ['King', 'Clubs', 10],
'04217': ['Queen', 'Clubs', 10],
'04317': ['Jack', 'Clubs', 10],
'04425': ['10', 'Clubs', 10],
'04521': ['9', 'Clubs', 9],
'04609': ['8', 'Clubs', 8],
'04733': ['7', 'Clubs', 7],
'04873': ['6', 'Clubs', 6],
'04965': ['5', 'Clubs', 5],
'05097': ['4', 'Clubs', 4],
'05209': ['3', 'Clubs', 3],
'05329': ['2', 'Clubs', 2],
'05449': ['Ace', 'Clubs', 1],
# Hearts in no light
'05553': ['King', 'Hearts', 10],
'05653': ['Queen', 'Hearts', 10],
'05749': ['Jack', 'Hearts', 10],
'05841': ['10', 'Hearts', 10],
'05933': ['9', 'Hearts', 9],
'06041': ['8', 'Hearts', 8],
'06153': ['7', 'Hearts', 7],
'06245': ['6', 'Hearts', 6],
'06349': ['5', 'Hearts', 5],
'06417': ['4', 'Hearts', 4],
'06549': ['3', 'Hearts', 3],
'06685': ['2', 'Hearts', 2],
'06869': ['Ace', 'Hearts', 1],
# Desk in no light
'06904': ['Desk', 'Desk' , 0],
'07064': ['Desk', 'Desk' , 0],
# Desk in light
'07324': ['Desk', 'Desk' , 0],
'07344': ['Desk', 'Desk' , 0],
# Hearts in light
'07544': ['Ace', 'Hearts', 1],
'07630': ['2', 'Hearts', 2],
'07738': ['3', 'Hearts', 3],
'07834': ['4', 'Hearts', 4],
'07934': ['5', 'Hearts', 5],
'08026': ['6', 'Hearts', 6],
'08126': ['7', 'Hearts', 7],
'08234': ['8', 'Hearts', 8],
'08342': ['9', 'Hearts', 9],
'08442': ['10', 'Hearts', 10],
'08546': ['Jack', 'Hearts', 10],
'08658': ['Queen', 'Hearts', 10],
'08754': ['King', 'Hearts', 10],
# Clubs in light
'08862': ['Ace', 'Clubs', 1],
'08970': ['2', 'Clubs', 2],
'09074': ['3', 'Clubs', 3],
'09186': ['4', 'Clubs', 4],
'09282': ['5', 'Clubs', 5],
'09378': ['6', 'Clubs', 6],
'09490': ['7', 'Clubs', 7],
'09610': ['8', 'Clubs', 8],
'09722': ['9', 'Clubs', 9],
'09810': ['10', 'Clubs', 10],
'09926': ['Jack', 'Clubs', 10],
'10026': ['Queen', 'Clubs', 10],
'10142': ['King', 'Clubs', 10],
# Diamonds in light
'10250': ['King', 'Diamonds', 10],
'10342': ['Queen', 'Diamonds', 10],
'10426': ['Jack', 'Diamonds', 10],
'10546': ['10', 'Diamonds', 10],
'10646': ['9', 'Diamonds', 9],
'10890': ['8', 'Diamonds', 8],
'10734': ['7', 'Diamonds', 7],
'11066': ['6', 'Diamonds', 6],
'11158': ['5', 'Diamonds', 5],
'11258': ['4', 'Diamonds', 4],
'11366': ['3', 'Diamonds', 3],
'11486': ['2', 'Diamonds', 2],
'11598': ['Ace', 'Diamonds', 1],
# Spades in light
'11714': ['King', 'Spades', 10],
'11814': ['Queen', 'Spades', 10],
'11930': ['Jack', 'Spades', 10],
'12050': ['10', 'Spades', 10],
'12158': ['9', 'Spades', 9],
'12258': ['8', 'Spades', 8],
'12370': ['7', 'Spades', 7],
'12478': ['6', 'Spades', 6],
'12610': ['5', 'Spades', 5],
'12718': ['4', 'Spades', 4],
'12877': ['3', 'Spades', 3],
'12978': ['2', 'Spades', 2],
'13088': ['Ace', 'Spades', 1],
# Jokers in no light
'13220': ['Joker', None, 0],
'13312': ['Joker', None, 0]
}
Train_overlap = {
'00001': [['Desk', 'Desk' , 0]],
'00165': [['King', 'Hearts', 10]],
'00277': [['King', 'Hearts', 10], ['7', 'Hearts', 7]],
'00445': [['King', 'Hearts', 10], ['7', 'Hearts', 7], ['8', 'Clubs', 8]],
'00653': [['3', 'Hearts', 3]],
'00753': [['3', 'Hearts', 3], ['5', 'Hearts', 5]],
'00849': [['3', 'Hearts', 3], ['5', 'Hearts', 5], ['Ace', 'Clubs', 1]],
'01049': [['3', 'Hearts', 3], ['5', 'Hearts', 5], ['Ace', 'Clubs', 1], ['Queen', 'Clubs', 10]],
'01305': [['3', 'Hearts', 3], ['5', 'Hearts', 5], ['Ace', 'Clubs', 1], ['Queen', 'Clubs', 10], ['6', 'Diamonds', 6]],
'01481': [['Ace', 'Diamonds', 1]],
'01545': [['Ace', 'Diamonds', 1], ['King', 'Diamonds', 10]],
'01641': [['Ace', 'Diamonds', 1], ['King', 'Diamonds', 10], ['5', 'Spades', 5]],
'01765': [['Ace', 'Diamonds', 1], ['King', 'Diamonds', 10], ['5', 'Spades', 5], ['10', 'Clubs', 10]],
'01941': [['10', 'Hearts', 10]],
'02027': [['10', 'Hearts', 10], ['King', 'Clubs', 10]],
'02176': [['6', 'Spades', 6]],
'02251': [['6', 'Spades', 6], ['3', 'Clubs', 3]],
'02344': [['6', 'Spades', 6], ['3', 'Clubs', 3], ['10', 'Spades', 10]],
'02464': [['6', 'Spades', 6], ['3', 'Clubs', 3], ['10', 'Spades', 10], ['8', 'Diamonds', 8]],
'02611': [['6', 'Hearts', 6]],
'02704': [['6', 'Hearts', 6], ['10', 'Diamonds', 10]],
'02794': [['6', 'Hearts', 6], ['10', 'Diamonds', 10], ['6', 'Clubs', 6]],
'02986': [['4', 'Spades', 4]],
'03091': [['4', 'Spades', 4], ['5', 'Diamonds', 5]],
'03172': [['4', 'Spades', 4], ['5', 'Diamonds', 5], ['2', 'Clubs', 2]],
'03301': [['4', 'Spades', 4], ['5', 'Diamonds', 5], ['2', 'Clubs', 2], ['Queen', 'Spades', 10]],
'03577': [['9', 'Spades', 9]],
'03685': [['9', 'Spades', 9], ['3', 'Diamonds', 3]],
'03748': [['9', 'Spades', 9], ['3', 'Diamonds', 3], ['Jack', 'Hearts', 10]],
'03850': [['9', 'Spades', 9], ['3', 'Diamonds', 3], ['Jack', 'Hearts', 10], ['7', 'Diamonds', 7]],
'03919': [['9', 'Spades', 9], ['3', 'Diamonds', 3], ['Jack', 'Hearts', 10], ['7', 'Diamonds', 7]],
'04132': [['8', 'Spades', 8]],
'04198': [['8', 'Spades', 8], ['Jack', 'Clubs', 10]],
'04351': [['7', 'Spades', 7]],
'04414': [['7', 'Spades', 7], ['7', 'Clubs', 7]],
'04528': [['7', 'Spades', 7], ['7', 'Clubs', 7], ['Queen', 'Hearts', 10]],
'04705': [['4', 'Diamonds', 4]],
'04780': [['4', 'Diamonds', 4], ['Ace', 'Hearts', 1]],
'05005': [['4', 'Diamonds', 4], ['Ace', 'Hearts', 1], ['Jack', 'Spades', 10]],
'05152': [['4', 'Diamonds', 4], ['Ace', 'Hearts', 1], ['Jack', 'Spades', 10], ['2', 'Hearts', 2]],
'05224': [['4', 'Diamonds', 4], ['Ace', 'Hearts', 1], ['Jack', 'Spades', 10], ['2', 'Hearts', 2], ['5', 'Hearts', 5]],
'05395': [['9', 'Hearts', 9]],
'05467': [['9', 'Hearts', 9], ['4', 'Hearts', 4]],
'05545': [['9', 'Hearts', 9], ['4', 'Hearts', 4], ['Queen', 'Diamonds', 10]],
'05692': [['2', 'Diamonds', 2]],
'05782': [['2', 'Diamonds', 2], ['9', 'Clubs', 9]],
'05869': [['2', 'Diamonds', 2], ['9', 'Clubs', 9], ['9', 'Diamonds', 9]],
'05950': [['2', 'Diamonds', 2], ['9', 'Clubs', 9], ['9', 'Diamonds', 9], ['Ace', 'Spades', 1]],
'06202': [['4', 'Clubs', 4]],
'06268': [['4', 'Clubs', 4], ['2', 'Spades', 2]],
'06333': [['4', 'Clubs', 4], ['2', 'Spades', 2], ['King', 'Spades', 10]],
'06469': [['4', 'Clubs', 4], ['2', 'Spades', 2], ['King', 'Spades', 10], ['Jack', 'Diamonds', 10]],
'06597': [['3', 'Spades', 3]],
'06673': [['3', 'Spades', 3], ['8', 'Hearts', 8]],
'06801': [['Desk', 'Desk' , 0]]
}
Train_flat = {
'00001': [],#Desk
'00157': [['8', 'Clubs', 8]],
'00293': [['8', 'Clubs', 8], ['5', 'Hearts', 5]],
'00529': [['8', 'Clubs', 8], ['5', 'Hearts', 5], ['Ace', 'Spades', 1]],
'00673': [['8', 'Clubs', 8], ['5', 'Hearts', 5], ['Ace', 'Spades', 1], ['Jack', 'Clubs', 10]],
'01265': [],#Desk
'01481': [['4', 'Clubs', 4]],
'01573': [['4', 'Clubs', 4], ['Ace', 'Clubs', 1]],
'01741': [['4', 'Clubs', 4], ['Ace', 'Clubs', 1], ['Ace', 'Diamonds', 1]],
'02053': [['4', 'Clubs', 4], ['Ace', 'Clubs', 1], ['Ace', 'Diamonds', 1], ['10', 'Diamonds', 10]],
'02213': [['4', 'Clubs', 4], ['Ace', 'Clubs', 1], ['Ace', 'Diamonds', 1], ['10', 'Diamonds', 10], ['6', 'Clubs', 6]],
'02629': [['8', 'Spades', 8]],
'02901': [['8', 'Spades', 8], ['2', 'Spades', 2]],
'02945': [['8', 'Spades', 8], ['2', 'Spades', 2]],
'03041': [['8', 'Spades', 8], ['2', 'Spades', 2], ['King', 'Diamonds', 10]],
'03313': [['8', 'Spades', 8]],
'03401': [['8', 'Spades', 8], ['2', 'Spades', 2]],
'03477': [['8', 'Spades', 8], ['2', 'Spades', 2], ['King', 'Diamonds', 10]],
'03717': [['King', 'Spades', 10]],
'03885': [['King', 'Spades', 10], ['7', 'Spades', 7]],
'04445': [['6', 'Spades', 6]],
'04573': [['6', 'Spades', 6], ['Queen', 'Clubs', 10]],
'04657': [['6', 'Spades', 6], ['Queen', 'Clubs', 10]],
'04821': [['6', 'Spades', 6], ['Queen', 'Clubs', 10], ['3', 'Clubs', 3]],
'05261': [['Ace', 'Hearts', 1]],
'05337': [['Ace', 'Hearts', 1], ['10', 'Clubs', 10]],
'05821': [['King', 'Clubs', 10]],
'05913': [['King', 'Clubs', 10], ['Jack', 'Spades', 10]],
'06115': [['7', 'Clubs', 7]],
'06187': [['7', 'Clubs', 7], ['4', 'Hearts', 4]],
'06375': [['7', 'Clubs', 7], ['4', 'Hearts', 4], ['10', 'Spades', 10]],
'06891': [['5', 'Diamonds', 5]],
'06975': [['5', 'Diamonds', 5], ['5', 'Spades', 5]],
'07115': [['5', 'Diamonds', 5], ['5', 'Spades', 5], ['Jack', 'Diamonds', 10]],
'07303': [['4', 'Diamonds', 4]],
'07355': [['4', 'Diamonds', 4], ['3', 'Spades', 3]],
'07431': [['4', 'Diamonds', 4], ['3', 'Spades', 3], ['King', 'Hearts', 10]],
'07919': [['8', 'Hearts', 8]],
'07975': [['8', 'Hearts', 8], ['Queen', 'Diamonds', 10]],
'08343': [['8', 'Hearts', 8], ['Queen', 'Diamonds', 10], ['2', 'Clubs', 2]],
'08667': [['9', 'Spades', 9]],
'08735': [['9', 'Spades', 9], ['2', 'Hearts', 2]],
'08811': [['9', 'Spades', 9], ['2', 'Hearts', 2], ['Jack', 'Hearts', 10]],
'09419': [['5', 'Clubs', 5]],
'09551': [['5', 'Clubs', 5], ['10', 'Hearts', 10]],
'09903': [['5', 'Clubs', 5], ['10', 'Hearts', 10], ['4', 'Spades', 4]],
'10255': [['Queen', 'Hearts', 10]],
'10355': [['Queen', 'Hearts', 10], ['2', 'Diamonds', 2]],
'10443': [['Queen', 'Hearts', 10], ['2', 'Diamonds', 2], ['9', 'Hearts', 9]],
'10971': [['9', 'Clubs', 9]],
'11055': [['9', 'Clubs', 9], ['6', 'Diamonds', 6]],
'11823': [['9', 'Clubs', 9], ['6', 'Diamonds', 6], ['9', 'Diamonds', 9]],
'12107': [['8', 'Diamonds', 8]],
'12183': [['8', 'Diamonds', 8], ['3', 'Hearts', 3]],
'12263': [['8', 'Diamonds', 8], ['3', 'Hearts', 3], ['7', 'Hearts', 7]],
'12871': [['Queen', 'Spades', 10]],
'12951': [['Queen', 'Spades', 10], ['7', 'Diamonds', 7]],
'13123': [['Queen', 'Spades', 10], ['7', 'Diamonds', 7], ['3', 'Diamonds', 3]],
'13415': [['6', 'Hearts', 6]],
'13483': [['6', 'Hearts', 6], ['9', 'Diamonds', 9]],
'13667': [['6', 'Diamonds', 6], ['6', 'Hearts', 6], ['9', 'Diamonds', 9]],
}
# -
def show_image(image, image_name='image'):
cv2.imshow('image',image)
cv2.waitKey(0)
cv2.destroyAllWindows()
for i in range(1, 5):
cv2.waitKey(1)
def preprocess_image_grad(image):
image = np.float32(image) / 255.0
# Calculate gradient
gx = cv2.Sobel(image, cv2.CV_32F, 1, 0, ksize=1)
gy = cv2.Sobel(image, cv2.CV_32F, 0, 1, ksize=1)
mag, angle = cv2.cartToPolar(gx, gy, angleInDegrees=True)
gray = cv2.cvtColor(mag,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
retval,thresh = cv2.threshold(blur,0.20,1,cv2.THRESH_BINARY)
return thresh
def valid_contours(min_, max_, min_area, max_area, contours):
valid_conts = []
for i in np.arange(len(contours)):
if min_ < cv2.arcLength(contours[i], False) < max_ :
if min_area < cv2.contourArea(contours[i], False) < max_area :
valid_conts.append(i)
return valid_conts
def test_contours(image):
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
retval,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_NONE)
return contours
def order_points(pts):
xSorted = pts[np.argsort(pts[:, 0]), :]
leftMost = xSorted[:2, :]
rightMost = xSorted[2:, :]
leftMost = leftMost[np.argsort(leftMost[:, 1]), :]
(tl, bl) = leftMost
D = dist.cdist(tl[np.newaxis], rightMost, "euclidean")[0]
(br, tr) = rightMost[np.argsort(D)[::-1], :]
return np.array([tr, br, bl, tl], dtype="float32")
def train_contours(folder, filename):
path = os.path.join(folder, filename + '-lbl.png')
image = cv2.imread(path)
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
retval,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_NONE)
return contours
def affine_transform(folder=None, filename=None, contour=None, image=None):
if image is None:
path = os.path.join(folder, filename + '.png')
image = cv2.imread(path)
epsilon = 0.01*cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
if len(approx) is not 4:
rect = cv2.minAreaRect(approx)
approx = cv2.boxPoints(rect)
else:
approx = np.reshape(approx, (4,2))
approx = order_points(approx)
h = np.array([ [0,0],[449,0],[449,449],[0,449] ],np.float32)
transform = cv2.getPerspectiveTransform(approx, h)
warp = cv2.warpPerspective(image,transform,(450,450))
return warp
class Deck(object):
def __init__(self):
self.cards = []
def load_deck(self, cards, folder):
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
for key in cards:
card = Single_card()
card.filename = key
card.rank = cards[key][0]
card.suit = cards[key][1]
card.value = cards[key][2]
card.contour = train_contours(folder, key)
if card.contour:
card.card = np.array(affine_transform(folder=folder, contour=card.contour[0], filename=key))
card.sift = sift.detectAndCompute(card.card,None)
else:
card.card = np.array([])
self.cards.append(card)
class Single_card(object):
def __init__(self):
self.rank = None
self.suit = None
self.value = 0
self.contour = None
self.card = None
self.height = 0
self.width = 0
self.sift = None
self.filename = None
class black_jack_round(object):
def __init__(self):
self.round = []
self.filename = None
class training_set(object):
def __init__(self):
self.set = []
def load_training_set(self, training_set):
for key in training_set:
black_jack = black_jack_round()
for i, card_ in enumerate(training_set[key]):
card = Single_card()
card.rank = card_[0]
card.suit = card_[1]
card.value = card_[2]
black_jack.round.append(card)
black_jack.filename = key
self.set.append(black_jack)
sing_cards ='CV-BlackjackStudent/GOPR0317.MP4-lbl/'
lap_cards ='CV-BlackjackStudent/GOPR0318.MP4-lbl/'
ind_cards ='CV-BlackjackStudent/GOPR0319.MP4-lbl/'
image = cv2.imread('CV-BlackjackStudent/GOPR0319.MP4-lbl/08343.png')
thresh = preprocess_image_grad(image)
imS = cv2.resize(thresh, (960, 540))
show_image(imS)
thresh = cv2.convertScaleAbs(thresh)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_NONE)
valid_conts = [contours[i] for i in valid_contours(250, 2000, 500, 100000, contours)]
#drawn = cv2.drawContours(image.copy(), valid_conts, -1, (0,255,0), 1)
imS = cv2.resize(drawn, (960, 540))
show_image(imS)
for i in range(len(valid_conts)):
warp = affine_transform(image=image, contour=valid_conts[i])
show_image(warp)
deck = Deck()
deck.load_deck(cards, sing_cards)
len(deck.cards)
# +
image = cv2.imread('CV-BlackjackStudent/GOPR0319.MP4-lbl/07431.png')
thresh = preprocess_image_grad(image)
thresh = cv2.convertScaleAbs(thresh)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_NONE)
valid_conts = [contours[i] for i in valid_contours(250, 2000, 500, 100000, contours)]
#for i in range(len(valid_conts)):
img1 = affine_transform(image=image, contour=valid_conts[1])
card = Single_card()
card.filename = '07431'
card.rank = 'Back'
card.suit = 'of Card'
card.value = 0
card.contour = [valid_conts[1]]
if card.contour:
card.card = np.array(affine_transform(folder='CV-BlackjackStudent/GOPR0319.MP4-lbl', contour=card.contour[0], filename=card.filename))
card.sift = sift.detectAndCompute(card.card,None)
else:
card.card = np.array([])
deck.cards.append(card)
# -
print(deck.cards[2].sift[1])
training_set_overlap = training_set()
training_set_overlap.load_training_set(Train_overlap)
training_set_flat = training_set()
training_set_flat.load_training_set(Train_flat)
warp = affine_transform(image=thresh, contour=valid_conts[5])
show_image(warp)
image_cards = training_set_flat.set[3].round
print(image_cards)
# +
path = os.path.join(ind_cards, training_set_flat.set[3].filename + '.png')
image = cv2.imread(path)
thresh = preprocess_image_grad(image)
thresh = cv2.convertScaleAbs(thresh)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_NONE)
valid_conts = [contours[i] for i in valid_contours(250, 2000, 500, 100000, contours)]
for i in range(len(valid_conts)):
warp = affine_transform(image=image, contour=valid_conts[i])
show_image(warp)
# imS = cv2.resize(image, (960, 540))
# show_image(imS)
# -
def preprocess_(image):
image = np.float32(image) / 255.0
# Calculate gradient
gx = cv2.Sobel(image, cv2.CV_32F, 1, 0, ksize=1)
gy = cv2.Sobel(image, cv2.CV_32F, 0, 1, ksize=1)
mag, angle = cv2.cartToPolar(gx, gy, angleInDegrees=True)
gray = cv2.cvtColor(mag,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
gray = np.float32(gray) * 255.0
retval,thresh = cv2.threshold(gray,20,255,cv2.THRESH_BINARY)
#thresh = np.float32(thresh) * 255.0
return thresh
warp = affine_transform(image=image, contour=valid_conts[1])
warp_ = affine_transform(image=image, contour=valid_conts[2])
pros_im = preprocess_(warp)
show_image(pros_im)
print(pros_im.max())
diff = cv2.absdiff(preprocess_(warp),preprocess_(warp_))
show_image(diff)
print(np.sum(diff))
cards_detected = []
for i in range(len(valid_conts)):
diffs = []
warp = affine_transform(image=image, contour=valid_conts[i])
for j in range(len(deck.cards)):
if len(deck.cards[j].card) > 0:
diff = cv2.absdiff(preprocess_(warp),preprocess_(deck.cards[j].card))
diff = cv2.GaussianBlur(diff,(5,5),5)
flag, diff = cv2.threshold(diff, 200, 255, cv2.THRESH_BINARY)
diffs.append(np.sum(diff))
else:
diffs.append((warp.shape[0]*warp.shape[1])**2)
indx = np.argmin(diffs)
if diffs[indx] <= 500000:
cards_detected.append(deck.cards[indx])
print('card:' + str(deck.cards[indx].rank) +" " + str(deck.cards[indx].suit) +" / diff:" + str(diffs[indx]))
show_image(warp)
def preprocess(img):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),2 )
thresh = cv2.adaptiveThreshold(blur,255,1,1,11,1)
blur_thresh = cv2.GaussianBlur(thresh,(5,5),5)
return blur_thresh
def accuracyDiff(folder, training_set):
total_c_cards = 0
total_t_cards = 0
total_f_cards = 0
iterations = len(training_set.set)
for i in range(iterations):
sys.stdout.write("\rTraining Iteration: " + str(i+1) + "/" + str(iterations))
image_cards = training_set.set[i].round
path = os.path.join(folder, training_set.set[i].filename + '.png')
image = cv2.imread(path)
thresh = preprocess_image_grad(image)
thresh = cv2.convertScaleAbs(thresh)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_NONE)
valid_conts = [contours[i] for i in valid_contours(250, 2000, 500, 100000, contours)]
cards_detected = []
for j in range(len(valid_conts)):
diffs = []
warp = affine_transform(image=image, contour=valid_conts[j])
for k in range(len(deck.cards)):
if len(deck.cards[k].card) > 0:
diff = cv2.absdiff(preprocess_(warp),preprocess_(deck.cards[k].card))
diff = cv2.GaussianBlur(diff,(5,5),5)
flag, diff = cv2.threshold(diff, 200, 255, cv2.THRESH_BINARY)
diffs.append(np.sum(diff))
else:
diffs.append((warp.shape[0]*warp.shape[1])**2)
indx = np.argmin(diffs)
if diffs[indx] <= 5000000:
cards_detected.append(deck.cards[indx])
c_cards, t_cards, f_cards = correctCount(image_cards, cards_detected)
total_c_cards += c_cards
total_t_cards += t_cards
total_f_cards += f_cards
return (total_c_cards, total_t_cards, total_f_cards)
def correctCount(image_cards, detected_cards):
c_cards = 0
f_cards = len(detected_cards)
t_card = len(image_cards)
for i in range(len(detected_cards)):
if detected_cards[i].rank == 'Back':
f_cards-=1
for j in range(t_card):
if detected_cards[i].suit == image_cards[j].suit and detected_cards[i].rank == image_cards[j].rank:
c_cards +=1
return (c_cards, t_card, f_cards)
correct_cards, total_cards, found_cards = accuracyDiff(ind_cards, training_set_flat)
print(correct_cards/ total_cards)
print(correct_cards, total_cards, found_cards)
# +
image = cv2.imread('CV-BlackjackStudent/GOPR0319.MP4-lbl/07431.png')
thresh = preprocess_image_grad(image)
thresh = cv2.convertScaleAbs(thresh)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_NONE)
valid_conts = [contours[i] for i in valid_contours(250, 2000, 500, 100000, contours)]
#for i in range(len(valid_conts)):
img1 = affine_transform(image=image, contour=valid_conts[1])
show_image(img1)
# +
image = cv2.imread('CV-BlackjackStudent/GOPR0317.MP4-lbl/03211.png')
thresh = preprocess_image_grad(image)
thresh = cv2.convertScaleAbs(thresh)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_NONE)
valid_conts = [contours[i] for i in valid_contours(250, 2000, 500, 100000, contours)]
#for i in range(len(valid_conts)):
img2 = affine_transform(image=image, contour=valid_conts[1])
show_image(img2)
# +
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
# queryKP,queryDesc=detector.detectAndCompute(QueryImg,None)
# # BFMatcher with default params
# bf = cv2.BFMatcher()
# matches = bf.knnMatch(des1,des2, k=2)
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
# cv2.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good, None,flags=2)
print(len(good))
plt.imshow(img3),plt.show()
# -
def accuracySift(folder, training_set):
total_c_cards = 0
total_t_cards = 0
total_f_cards = 0
iterations = len(training_set.set)
for i in range(iterations):
sys.stdout.write("\rTraining Iteration: " + str(i+1) + "/" + str(iterations))
image_cards = training_set.set[i].round
path = os.path.join(folder, training_set.set[i].filename + '.png')
cards_detected = card_detector(path, deck)
c_cards, t_cards, f_cards = correctCount(image_cards, cards_detected)
total_c_cards += c_cards
total_t_cards += t_cards
total_f_cards += f_cards
return (total_c_cards, total_t_cards, total_f_cards)
correct_cards, total_cards, found_cards = accuracySift(ind_cards, training_set_flat)
print(correct_cards/ total_cards)
# +
# 61 cards are not part of the mask. but are present in the image
# -
print(correct_cards, total_cards, found_cards)
99 122 235
def card_detector(path, deck):
image = cv2.imread(path)
thresh = preprocess_image_grad(image)
thresh = cv2.convertScaleAbs(thresh)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_NONE)
valid_conts = [contours[i] for i in valid_contours(250, 2000, 500, 100000, contours)]
# BFMatcher with default params
bf = cv2.BFMatcher()
cards_detected = []
for j in range(len(valid_conts)):
diffs = []
warp = affine_transform(image=image, contour=valid_conts[j])
kp1, des1 = sift.detectAndCompute(warp,None)
for k in range(len(deck.cards)):
if(deck.cards[k].sift == None):
diffs.append(0)
else:
matches = bf.knnMatch(des1,deck.cards[k].sift[1], k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
diffs.append(len(good))
indx = np.argmax(diffs)
if diffs[indx] > 15:
Flag_Safe_Add = True
for l in range(len(cards_detected)):
if cards_detected[l].suit == deck.cards[indx].suit and cards_detected[l].rank == deck.cards[indx].rank:
max_match = max(cards_detected[l].matches, diffs[indx])
if max_match is diffs[indx]:
cards_detected.pop(l)
break
else:
Flag_Safe_Add = False
if Flag_Safe_Add:
card = copy.deepcopy(deck.cards[indx])
card.contour = valid_conts[j]
card.matches = diffs[indx]
cards_detected.append(card)
return cards_detected
cards = card_detector('CV-BlackjackStudent/GOPR0319.MP4-lbl/08811.png', deck)
im = cv2.imread('CV-BlackjackStudent/GOPR0319.MP4-lbl/08811.png')
for i in range(len(cards)):
# compute the center of the contour
M = cv2.moments(cards[i].contour)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
text = 'card:' + str(cards[i].rank) +" " + str(cards[i].suit)
cv2.putText(im, text, (cX - 40, cY + 20), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 59, 174), 3)
cv2.drawContours(im, [cards[i].contour], 0, (0,255,0), 2)
imS = cv2.resize(im, (960, 540))
show_image(imS)
# ## surf
# ## orb
# ## kaze
|
blackJack.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Лабораторная работа по машинному обучению
#
#
# Задание №1: Выбрать задачу и проделать с ней все, что мы делали с данными adult.
#
# Задание №2: Для данных применить PCA для сокращения размерности пространства признаков. Изобразить точки, сократив размерность пространства до 2. К новым данным применить классификатор ближайших соседей (с наилучшим значением к-ва соседей, полученнных ранее). Построить графики зависимости ошибки на тестовой и обучающей выборки в зависимости от количества используемых компонент. Найти наилучшее значение количества используемых главных компонент.
#
# Задание №3: Обучить на ваших данных полносвязную (неглубокую) нейронную сеть. В качестве образца см. 17-1. MLP - Распознавание рукописных цифр.ipynb в репозитории https://github.com/NikolaiZolotykh/MachineLearningCourse. Попробуйте 1-2 скрытых слоя. Постройте графики зависимости ошибки на обучающей выборке и тестовой выборках от параметра alpha и от количества нейронов. Наблюдается ли переобучение/недообучение? Сделайте выводы. Обучить на ваших данных 1) random forests, 2) gradient boosted trees. Постройте графики зависимости ошибки (на тестовой и обучающейся выборках, а также oob-ошибку) от количества используемых деревьев. Наблюдается ли переобучение/недообучение? Сделайте выводы. Сделайте общие выводы по всем лабам. Какой метод оказался лучшим. Почему? Все нужно продолжать в том же ноутбуке, в котором вы делали задание №2.
# **Ссылка на данные*:** https://www.kaggle.com/rush4ratio/video-game-sales-with-ratings
#
# *Примечание: сменила базу данных в связи с отсутствием информативности получаемых результатов (методы просто не справляются с такой тяжелой базой данных, нужны более тяжеловесные методы), удостоверилась, что новые данные не были больше никем выбраны; с прошлой базой данных были проделаны задания 1-2, эти результаты также можно найти в репозитории.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# %matplotlib inline
url = 'Video_Games_Sales_as_at_22_Dec_2016.csv'
data = pd.read_csv(url, engine = 'python')
data.head()
# Данные содержат следующую информацию:
#
# * Name - название видеоигры;
# * Platform - платформа;
# * Year_of_Release - дата выпуска;
# * Genre - жанр;
# * Publisher - издательство;
# * NA_Sales - продажи в Северной Америке;
# * EU_Sales - продажи в Европе;
# * JP_Sales - продажи в Японии;
# * Other_Sales - продажи в других регионах;
# * Global_Sales - общая сводка о продажах в миллионах штук;
# * Critic_Score - средняя оценка критиков;
# * Critic_Count - число оценок критиков;
# * User_Score - оценка пользователей;
# * User_Count - число оценок пользователей;
# * Developer - разработчик;
# * Rating - возрастной рейтинг.
#
# Поставим перед собой задачу научить машину **определять пользовательский рейтинг игры** на основе этих данных.
#
# Для начала заметим, что приведенное количество информации избыточно. Отбрасывание столбцов основано на информативности категорий, дублируемости, а также на необходимости отбрасывания признаков, имеющих слишком большое количество принимаемых значений.
#
# Оставим следующий набор столбцов:
#
# * Platform - платформа;
# * Year_of_Release - дата выпуска;
# * Genre - жанр;
# * Global_Sales - общая сводка о продажах;
# * Critic_Score - средняя оценка критиков;
# * User_Score - оценка пользователей;
# * Rating - возрастной рейтинг.
keep_col = ['Platform', 'Year_of_Release', 'Genre', 'Global_Sales', 'Critic_Score', 'User_Score', 'Rating']
new_data = data[keep_col]
new_data.to_csv("newDataFile.csv", index=False)
new_data.head()
# **Подготовим наши данные к работе.**
#
# Выделим категориальные и числовые столбцы.
categorical_columns = [c for c in new_data.columns if new_data[c].dtype.name == 'object']
numerical_columns = [c for c in new_data.columns if new_data[c].dtype.name != 'object']
print(categorical_columns)
print(numerical_columns)
# Заметим, что пользовательская оценка не относится к числовым столбцам, хотя должна. Дело в заполнении этого столбца разными способами. Оценка не указывается либо через пустую ячейку, либо через сокращение tbd = to be developted. Нужно удалить это обозначение и сделать столбец числовым.
new_data.loc[:,'User_Score'] = new_data['User_Score'].str.replace("tbd",'')
new_data.loc[:,'User_Score'] = pd.to_numeric(data['User_Score'], errors='coerce')
categorical_columns = [c for c in new_data.columns if new_data[c].dtype.name == 'object']
numerical_columns = [c for c in new_data.columns if new_data[c].dtype.name != 'object']
print(categorical_columns)
print(numerical_columns)
new_data.head()
for c in categorical_columns:
print(c, new_data[c].unique())
# **Заполним пропущенные значения в наших данных.**
#
# Заметим, что есть два тривиальных варианта решения проблемы пропущенных данных:
# * удалить столбцы (data.dropna(axis = 1)),
# * удалить строки с такими значениями (data.dropna(axis = 0)).
#
# Но это плохое решение (что тогда останется от наших данных?)
# В связи с этим применим метод заполнения пропущенных значений для каждой количественной переменной их средним или медианным значением. Для категориальных признаков пропущенные значения заполним самым популярным значением признака.
# +
#new_data['Year_of_Release'].isnull().sum()
# +
#new_data.loc[:,'Year_of_Release']=new_data['Year_of_Release'].fillna(0)
# +
#new_data['Year_of_Release'].isnull().sum()
# -
new_data.median(axis = 0)
new_data = new_data.fillna(new_data.median(axis = 0), axis = 0)
new_data[numerical_columns].count(axis = 0)
new_data.isnull().sum()
new_data[categorical_columns].count(axis = 0)
data['Rating'].describe()
data['Genre'].describe()
data['Platform'].describe()
new_data['Platform'] = new_data['Platform'].fillna(new_data['Platform'].describe().top)
new_data[categorical_columns].count(axis = 0)
new_data['Genre'] = new_data['Genre'].fillna(new_data['Genre'].describe().top)
new_data[categorical_columns].count(axis = 0)
new_data['Rating'] = new_data['Rating'].fillna(new_data['Rating'].describe().top)
new_data[categorical_columns].count(axis = 0)
new_data.isnull().sum()
new_data.describe(include = [object])
data_describe = new_data.describe(include = [object])
for c in categorical_columns:
new_data[c] = new_data[c].fillna(data_describe[c]['top'])
new_data.describe(include = [object])
new_data.dtypes
# Изменим некоторые типы данных.
new_data['Year_of_Release'] = new_data['Year_of_Release'].astype(int)
new_data.dtypes
new_data.describe()
# Теперь можно переидти к построению каких-нибудь интересных зависимостей.
new_data.corr()
from pandas.plotting import scatter_matrix
scatter_matrix(new_data, alpha = .01, figsize = (30, 30))
pass
plt.plot(new_data['Global_Sales'], new_data['User_Score'], 'o', alpha = 0.1)
plt.xlabel('Число продаж, млн ед.')
plt.ylabel('Пользовательский рейтинг')
plt.title('Рейтинг видеоигры')
pass
plt.plot(new_data['Critic_Score'], new_data['User_Score'], 'o', alpha = 0.1)
plt.xlabel('Рейтинг критиков')
plt.ylabel('Пользовательский рейтинг')
plt.title('Влияние рейтинга критиков на пользовательский рейтинг')
pass
# Создадим признак, исходя из следующих соображений:
#
# 10 - рейтинг больший 9.5
#
# 9 - рейтинг от 8.5 до 9.5
#
# 8 - рейтинг от 7.5 до 8.5
#
# 7 - рейтинг от 6.5 до 7.5
#
# ...
#
# 1 - рейтинг меньше 1.5
new_data.loc[new_data['User_Score'] >= 9.5, 'User_Score'] = 10
new_data.loc[(new_data['User_Score'] >= 8.5) & (new_data['User_Score'] < 9.5), 'User_Score'] = 9
new_data.loc[(new_data['User_Score'] >= 7.5) & (new_data['User_Score'] < 8.5), 'User_Score'] = 8
new_data.loc[(new_data['User_Score'] >= 6.5) & (new_data['User_Score'] < 7.5), 'User_Score'] = 7
new_data.loc[(new_data['User_Score'] >= 5.5) & (new_data['User_Score'] < 6.5), 'User_Score'] = 6
new_data.loc[(new_data['User_Score'] >= 4.5) & (new_data['User_Score'] < 5.5), 'User_Score'] = 5
new_data.loc[(new_data['User_Score'] >= 3.5) & (new_data['User_Score'] < 4.5), 'User_Score'] = 4
new_data.loc[(new_data['User_Score'] >= 2.5) & (new_data['User_Score'] < 3.5), 'User_Score'] = 3
new_data.loc[(new_data['User_Score'] >= 1.5) & (new_data['User_Score'] < 2.5), 'User_Score'] = 2
new_data.loc[new_data['User_Score'] < 1.5, 'User_Score'] = 1
new_data.head()
# **Векторизация.**
#
# В нашем случаебинарных категориальных или числовых признаков нет.
#
# К небинарными признакам, которых у нас три, применим *метод векторизации*. Признак A, принимающий n значений, заменим на n признаков, принимащих значения 0 или 1, в зависимости от того, чему равно значение исходного признака A.
binary_columns = [c for c in categorical_columns if data_describe[c]['unique'] == 2]
nonbinary_columns = [c for c in categorical_columns if data_describe[c]['unique'] > 2]
print(binary_columns, nonbinary_columns)
print(numerical_columns, binary_columns, nonbinary_columns)
new_data['Platform'].unique()
new_data['Genre'].unique()
new_data['Rating'].unique()
data_nonbinary = pd.get_dummies(new_data[nonbinary_columns])
data_nonbinary.head()
# Таким образом, вместо 3-х небинарных признаков мы добавили 51 бинарный.
# **Нормализация.**
#
# Метод ближайших соседей, который мы будем использовать в дальнейшем, чувствителен к масштабированию данных. В связи с этим количественные признаки полезно нормализовать.
# Приведем к нулевому среднему и единичному среднеквадратичному отклонению наши количественные данные.
new_data.dtypes
data_numerical = new_data[numerical_columns]
data_numerical.describe()
User_Score = data_numerical['User_Score']
data_numerical = data_numerical.drop('User_Score', 1)
data_numerical = (data_numerical - data_numerical.mean(axis = 0))/data_numerical.std(axis = 0)
data_numerical = data_numerical.assign(User_Score=User_Score.values)
data_numerical.describe()
data_nonbinary.describe()
User_Score.describe()
# Запишем все подготовленные данные в **итоговую таблицу**.
new_data = pd.concat((data_numerical, data_nonbinary, new_data[binary_columns]), axis = 1)
print(new_data.shape)
new_data.describe()
# Можно переходить к выполнению основных задач курса по "Машинному обучению".
#
# # Обучение на тестовой выборке
#
# Выбросим столбец с пользовательскими оценками.
X = new_data.drop(('User_Score'), axis = 1)
y = new_data['User_Score']
feature_names = X.columns
feature_names
# Теперь данные окончательно готовы, можно создать выборку.
print(X.shape)
print(y.shape)
N, d = X.shape
type(X)
type(y)
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 42)
N_train, _ = X_train.shape
N_test, _ = X_test.shape
print(N_train, N_test)
# -
N_train + N_test
# В scikit-learn реализованы многие методы машинного обучения. Нас интересует метод ближайщих соседей. Он расположен в классе sklearn.neighbors.KNeighborsClassifier.
#
# Применим **МЕТОД БЛИЖАЙШИХ СОСЕДЕЙ** к обучающей выборке.
X_train.head()
y_train.head()
# +
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors = 5)
#knn.set_params(n_neighbors=10)
#Обучение на обучающей выборке
knn.fit(X_train, y_train)
# +
y_train_predict = knn.predict(X_train)
y_test_predict = knn.predict(X_test)
err_train = np.mean(y_train != y_train_predict)
err_test = np.mean(y_test != y_test_predict)
print(err_train, err_test)
# -
# другой способ вычисления ошибки
err_test = 1 - knn.score(X_test, y_test)
err_test
# **Переобучение**! Ошибка на тестовой выборке больше, чем на обучающей.
#
# Осуществим попытку подбора параметров.
# +
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import GridSearchCV
nnb = [1, 3, 5, 10, 15, 20, 25, 35, 45, 55]
knn = KNeighborsClassifier()
grid = GridSearchCV(knn, param_grid = {'n_neighbors': nnb}, cv=10)
grid.fit(X_train, y_train)
best_cv_err = 1 - grid.best_score_
best_n_neighbors = grid.best_estimator_.n_neighbors
print(best_cv_err, best_n_neighbors)
# +
knn = KNeighborsClassifier(n_neighbors = best_n_neighbors).fit(X_train, y_train)
err_train = np.mean(y_train != knn.predict(X_train))
err_test = np.mean(y_test != knn.predict(X_test))
print(err_train, err_test)
# -
# Получили более точный ответ при подобранном параметре 45. Ошибка на тестовой выборке также осталась больше, чем на обучающей, но различие стало значительно меньше. То есть подбор оптимального числа главных компонент дал неплохой результат, небольшое переобучение присутствует.
#
# Посмотрим матрицу рассогласования (одну из метрик качества):
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_test_predict, y_test))
# Вообще говоря, по данной матрице можно сказать,что весьма часто полученная сеть ставит рейтинг, равный 8.
from sklearn.metrics import classification_report
print(classification_report(y_test, y_test_predict))
# **------------------------------------ Конец выполнения задания №1 ------------------------------------**
# Применим теперь **PCA** для сокращения размерности пространства признаков и построим графики **зависимости ошибок от числа компонент**.
# +
from sklearn import decomposition
from sklearn.neighbors import KNeighborsClassifier
components = range(1, 30)
err_test = []
err_train = []
for component in components:
pca = decomposition.PCA(n_components = component)
X_train_reduc = pca.fit_transform(X_train)
X_test_reduc = pca.transform(X_test)
# используем ранее полученное нами оптимальное число соседий - 45
knn = KNeighborsClassifier(n_neighbors = 45).fit(X_train_reduc, y_train)
err_train.append(np.mean(y_train.values.ravel() != knn.predict(X_train_reduc)))
err_test.append(np.mean(y_test.values.ravel() != knn.predict(X_test_reduc)))
# oob_err.append(1 - ert.oob_score_)
# -
plt.plot(components, err_train, 'b-o', label = 'train')
plt.plot(components, err_test, 'r-o', label = 'test')
plt.xlim([np.max(components), np.min(components)])
plt.title('Error vs. Main components')
plt.xlabel('Count of components')
plt.ylabel('Error')
plt.legend()
pass
# Посмотрим также на минимальную ошибку на обучающей и тестовой выборках.
min_train_err = np.min(err_train)
min_test_err = np.min(err_test)
print(min_train_err, min_test_err)
# Переобчения нет!
print(err_test.index(np.min(err_test)) + 1)
# Ранее используя метод ближайших соседей мы получили такие ошибки:
#
# 0.2783901563701615 0.2844896331738437
#
# Можно заметить, что ошибки незначительно уменьшились и приблизились друг к другу и даже исчез эффект переобучения. Можно заключить, что для данного метода сокращение размерности пространства признаков особого эффекта не дало, хотя и позволило избавиться от переобучения.
# Применим теперь метод **СЛУЧАЙНЫХ ДЕРЕВЬЕВ (RANDOM FOREST)**.
# +
from sklearn import ensemble
rf = ensemble.RandomForestClassifier(n_estimators = 1000)
rf.fit(X_train, y_train)
err_train = np.mean(y_train != rf.predict(X_train))
err_test = np.mean(y_test != rf.predict(X_test))
print(err_train, err_test)
# -
# Ошибка на тестовой выборке в этом методе всё ещё высока. Также имеет место значительное переобучение.
np.sum(rf.feature_importances_)
# +
importances = rf.feature_importances_
indices = np.argsort(importances)[::-1]
print("Feature ranking:")
for f in range(d):
print("%2d. feature '%5s' (%f)" % (f + 1, feature_names[indices[f]], importances[indices[f]]))
# -
feature_names[[1,0,1]]
d_first = 20
plt.title("Feature importances")
plt.bar(range(d_first), importances[indices[:d_first]], align = 'center', color = 'r')
plt.xticks(range(d_first), feature_names[indices[:d_first]], rotation = 90)
plt.xlim([-1, d_first])
# Можно сделать вывод, что наибольший влияние на пользовательский рейтинг оказывают такие параметры как оценка критиков, общее число продаж и год выпуска.
#
# Вернёмся к проблеме переобучения при использовании этого метода. Обычно случайный лес не переобучается, но тут явно ошибка на обучающей выборке значительно меньше ошибки на тестовой выборке. Попробуем подобрать более удачное число деревьев, как ранее мы подбирали убачное количество главных компонент для метода ближайших соседий. Затем построим зависимость числа ошибок на тестовой выборке и обучающей выборке в зависимости от числа деревьев. Также необходимо построить OOB-ошибку.
# +
trees_arr = np.arange(50, 200, 5)
test_err = []
train_err = []
oob_err = []
for trees in trees_arr:
rf = ensemble.RandomForestClassifier(bootstrap=True, n_estimators = trees, oob_score=True)
rf.fit(X_train, y_train)
train_err.append(np.mean(y_train != rf.predict(X_train)))
test_err.append(np.mean(y_test != rf.predict(X_test)))
oob_err.append(1 - rf.oob_score_)
plt.plot(trees_arr, train_err, 'b-o', label = 'train')
plt.plot(trees_arr, test_err, 'r-o', label = 'test')
plt.xlim([np.max(trees_arr), np.min(trees_arr)])
plt.title('Error vs. Trees')
plt.xlabel('Trees')
plt.ylabel('Error')
plt.legend()
pass
# -
plt.plot(trees_arr, test_err, 'r-o', label = 'test')
plt.plot(trees_arr, oob_err, 'g-o', label = 'oob')
plt.xlabel('Trees')
plt.ylabel('Error')
plt.title('Error vs. Trees')
plt.legend()
pass
# Подбор оптимального числа деревьев не дал результата, переобучение все равно присутствует при любом числе деревьев. Посмотрим на минимальную ошибку:
min_train_err = np.min(train_err)
min_test_err = np.min(test_err)
print(min_train_err, min_test_err)
# Число деревьев, при котором достигается минимальная ошибка:
trees_opt = trees_arr[test_err == min_test_err]
print(trees_opt[0])
# **------------------------------------ Конец выполнения задания №2 ------------------------------------**
#
# Сделайте общие выводы по всем лабам. Какой метод оказался лучшим. Почему?
# Попробуем теперь обучить на наших данных **полносвязную нейронную сеть**.
# Начнём с **ОДНОСЛОЙНОЙ СЕТИ**.
# Пусть в скрытом слое находится 100 нейронов.
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(hidden_layer_sizes = (100,), solver = 'lbfgs',
activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
# -
# Посмотрим на ошибки на обучающей и тестовой выборках:
print(np.mean(y_train != y_train_pred), np.mean(y_test != y_test_pred))
print(mlp_model.score(X_train, y_train), mlp_model.score(X_test, y_test))
# Ошибки оказались весьма близки, однако на тестовой выборке ошибка всё еще немного больше, чем на обучающей, следовательно небольшое переобучение имеется.
# Рассмотрим теперь **ДВУСЛОЙНУЮ СЕТЬ**.
# Пусть в скрытых слоях находится по 100 нейронов.
# +
mlp_model = MLPClassifier(hidden_layer_sizes = (100, 100), solver = 'lbfgs',
activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
# -
print(np.mean(y_train != y_train_pred), np.mean(y_test != y_test_pred))
# Ошибки снова близки, более того переобучение больше не наблюдается. Попробуем ещё немного улучшить сеть с двумя скрытыми слоями через подбор параметра регуляризации.
# **Нахождение оптимального параметра alpha**. Подберем оптимальное значение параметра регуляризации alpha и построим графики зависимости ошибки на обучающей выборке и тестовой выборках от параметра alpha и от количества нейронов.
alpha_arr = np.logspace(-3, 1, 20)
alpha_arr
test_err = []
train_err = []
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 1.00000000e-03, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 1.62377674e-03, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 2.63665090e-03, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 4.28133240e-03, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 6.95192796e-03, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 1.12883789e-02, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 1.83298071e-02, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 2.97635144e-02, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 4.83293024e-02, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 7.84759970e-02, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 1.27427499e-01, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 2.06913808e-01, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 3.35981829e-01, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 5.45559478e-01, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 8.85866790e-01, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 1.43844989e+00, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 2.33572147e+00, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 3.79269019e+00, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 6.15848211e+00, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# +
from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(alpha = 1.00000000e+01, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
train_err.append(np.mean(y_train != y_train_pred))
test_err.append(np.mean(y_test != y_test_pred))
# -
plt.semilogx(alpha_arr, train_err, 'b-o', label = 'test')
plt.semilogx(alpha_arr, test_err, 'r-o', label = 'train')
plt.xlim([np.max(alpha_arr), np.min(alpha_arr)])
plt.title('Error vs. alpha')
plt.xlabel('Alpha')
plt.ylabel('Error')
plt.legend()
pass
# Минимальное значение ошибки:
min_train_err = np.min(train_err)
min_test_err = np.min(test_err)
print(min_train_err, min_test_err)
# Имеется небольшое переобучение.
# Оптимальное подобранное значение alpha, при котором наблюдаются найденные минимальные ошибки:
alpha_opt = alpha_arr[test_err == min_test_err]
print(alpha_opt[0])
alpha_opt = alpha_opt[0]
# +
#neirons = [10, 50, 100, 150, 200, 250, 300]
#test_err = []
#train_err = []
#for neiron in neirons:
# mlp_model = MLPClassifier(alpha = alpha_opt, hidden_layer_sizes = (neiron, neiron),
# solver = 'lbfgs', activation = 'logistic', random_state = 42)
# mlp_model.fit(X_train, y_train)
#
# y_train_pred = mlp_model.predict(X_train)
# y_test_pred = mlp_model.predict(X_test)
# train_err.append(np.mean(y_train != y_train_pred))
# test_err.append(np.mean(y_test != y_test_pred))
#plt.semilogx(alpha_arr, train_err, 'b-o', label = 'test')
#plt.semilogx(alpha_arr, test_err, 'r-o', label = 'train')
#plt.xlim([np.max(alpha_arr), np.min(alpha_arr)])
#plt.title('Error vs. alpha')
#plt.xlabel('Alpha')
#plt.ylabel('Error')
#plt.legend()
#pass
# -
# Посмотрим теперь на классификатор с оптимальным параметром.
# +
mlp_model = MLPClassifier(alpha = alpha_opt, hidden_layer_sizes = (100, 100),
solver = 'lbfgs', activation = 'logistic', random_state = 42)
mlp_model.fit(X_train, y_train)
y_train_pred = mlp_model.predict(X_train)
y_test_pred = mlp_model.predict(X_test)
print(np.mean(y_train != y_train_pred), np.mean(y_test != y_test_pred))
# -
# Ошибка на тестовой выборке оказалась немного больше ошибки на обучающей, имеет место переобучение.
# Так как метод Random Forest я рассмотрела ранее, сейчас мы рассмотрим вместо него метод **Extremely Random Forest**.
# +
from sklearn import ensemble
ert = ensemble.ExtraTreesClassifier(n_estimators = 100)
ert.fit(X_train, y_train)
err_train = np.mean(y_train != ert.predict(X_train))
err_test = np.mean(y_test != ert.predict(X_test))
print(err_train, err_test)
# -
# Снова видим переобучение, причём заница между ошибками велика.
#
# Построим графики ошибок на тестовой выборке, обучающей выборке и OOB-ошибку в зависимости от числа деревьев.
trees_arr = np.arange(50, 500, 10)
test_err = []
train_err = []
oob_err = []
for trees in trees_arr:
ert = ensemble.ExtraTreesClassifier(n_estimators = trees, oob_score=True, bootstrap=True)
ert.fit(X_train, y_train)
train_err.append(np.mean(y_train != ert.predict(X_train)))
test_err.append(np.mean(y_test != ert.predict(X_test)))
oob_err.append(1 - ert.oob_score_)
plt.plot(trees_arr, train_err, 'b-o', label = 'train')
plt.plot(trees_arr, test_err, 'r-o', label = 'test')
plt.xlim([np.max(trees_arr), np.min(trees_arr)])
plt.title('Error vs. Trees')
plt.xlabel('Trees')
plt.ylabel('Error')
plt.legend()
pass
plt.plot(trees_arr, test_err, 'r-o', label = 'test')
plt.plot(trees_arr, oob_err, 'g-o', label = 'oob')
plt.xlabel('Trees')
plt.ylabel('Error')
plt.title('Error vs. Trees')
plt.legend()
pass
# В данном метода снова НЕ удалось подбором числа деревьев улучшить результат. При любом числе деревьев от 50 до 500 наблюдается сильное переобучение!
#
# Посмотрим каких результатов в лучшем случае мы достигли:
min_train_err = np.min(train_err)
min_test_err = np.min(test_err)
print(min_train_err, min_test_err)
# При этом оптимальное число деревьев:
trees_opt = trees_arr[test_err == min_test_err]
print(trees_opt[0])
# Наконец, рассмотрим **Gradient boosted trees**.
# +
gbt = ensemble.GradientBoostingClassifier(n_estimators = 100)
gbt.fit(X_train, y_train)
err_train = np.mean(y_train != gbt.predict(X_train))
err_test = np.mean(y_test != gbt.predict(X_test))
print(err_train, err_test)
# -
# Достаточно хороший результат, есть небольшое переобучение, попробуем это исправить.
#
# Построим графики для ошибок на тестовой и обучающей выборках в зависимости от числа деревьев:
trees_arr = np.arange(5, 100, 5)
test_err = []
train_err = []
for trees in trees_arr:
gbt = ensemble.GradientBoostingClassifier(n_estimators = trees)
gbt.fit(X_train, y_train)
train_err.append(np.mean(y_train != gbt.predict(X_train)))
test_err.append(np.mean(y_test != gbt.predict(X_test)))
plt.plot(trees_arr, train_err, 'b-o', label = 'train')
plt.plot(trees_arr, test_err, 'r-o', label = 'test')
plt.xlim([np.max(trees_arr), np.min(trees_arr)])
plt.title('Error vs. Trees')
plt.xlabel('trees')
plt.ylabel('error')
plt.legend()
pass
# С ростом числа деревьев показатели ошибок падают. Можно заметить, что при количестве деревьев больше 10 будет наблюдаться переобучение. Причём с ростом сложности модели переобучение только увеличивается.
#
# Посмотрим на минимальную ошибку достигнутую:
min_train_err = np.min(train_err)
min_test_err = np.min(test_err)
print(min_train_err, min_test_err)
# Ошибка стала меньше, но присутствует переобучение.
#
# Оптимальное число деревьев:
trees_opt = trees_arr[test_err == min_test_err]
print(trees_opt[0])
# Попробуем немного интерпретировать результат классификации.
# +
importances = gbt.feature_importances_
indices = np.argsort(importances)[::-1]
print("Feature user score:")
for f in range(min(d, 10)):
print("%2d. feature '%5s' (%f)" % (f + 1, feature_names[indices[f]], importances[indices[f]]))
# -
d_first = min(d, 4)
plt.title("Feature importances")
plt.bar(range(d_first), importances[indices[:d_first]], align = 'center', color = 'r')
plt.xticks(range(d_first), feature_names[indices[:d_first]], rotation = 90)
plt.xlim([-1, d_first])
# Можно сделать вывод, что пользовательский рейтинг по нашему набору данных определяется в основном по оценкам критиков и (!неожиданно) по возрастному рейтингу.
# Наконец все необходимые методы были применены. Можно заключить, что для наших данных наилучший результат был достигнут при использовании двуслойной нейронной сети со 100 нейронами на каждом скрытом слое.
# Также неплохой результат поволило достичь применение РСА к методу ближайших соседий. В данном примере мы также избавились от эффекта переобучения.
|
ML_final_ver_another_data_.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### KMeans Clustering of Netflix Movie Data
#
# Project Leader: <NAME>
# Project Git Repo: [Netflix Study](https://github.com/stevenbowler/netflixstudy)
#
# ##### Go to very bottom of file for heatmap of clusters for 0s and 1s as per homework assignment
#
# Current State of Analysis: The full dataframe/matrix df_p is loaded here but due to its size, 144380 x 5334, KMeans clustering took 24+ hours and did not finish; therefore, to prove out the KMeans below, used df_p_short which just has the first 1,000 rows of df_p. Then for display of the 5 clusters at the bottom of this file, used heatmaps of 70 users x 50 movies.
#
# Next steps: Evaluating cloud options: Google, AWS, Azure; to acquire necessary processing and memory to handle the full dataset.
#
# Attribution: Used helper function to display heat map [from here](https://programming.rhysshea.com/K-means_movie_ratings/)
#
# Next Steps: Evaluating use of collaborative filtering to create recommender table, something like [this](https://github.com/anjanatiha/Movie-Recommendation-Engine-using-User-Based-Collaborative-Filtering)
#
# Need to fire up Jupyter with this to be able to load df_p with its almost 700MM nodes:
#
# <font color=red>jupyter notebook --NotbookApp.iopub_Data_Rate_Limit=1e10
# Import necessary modules
import pandas as pd
import numpy as np
from pandas_profiling import ProfileReport
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
from sklearn.cluster import KMeans
from scipy.sparse import csr_matrix
df_p = pd.read_csv('../data/processed/df_p.csv') # this was created in netflixstudySecondaryEVA.ipynb
df_p_short = df_p.head(1000)
df_p_short
# create sparse array for load to KMeans, won't run with np.NaN
# sparse_ratings = csr_matrix(pd.DataFrame(df_p).sparse.to_coo())
sparse_ratings = csr_matrix(df_p_short.values)
sparse_ratings.shape
# convert to int to stop crashing in KMeans, convert sparse array from float64 to int, also due to KMeans memory overflow
# with float64
sparse_ratings_int = sparse_ratings.astype(int)
predictions = KMeans(n_clusters=5, algorithm='full').fit_predict(sparse_ratings_int)
# +
# # %load ../src/features/rating_helper.py
# # %load ../src/features/rating_helper.py
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.cluster import KMeans
from sklearn.metrics import mean_squared_error
import itertools
from sklearn.metrics import silhouette_samples, silhouette_score
def draw_scatterplot(x_data, x_label, y_data, y_label):
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
plt.xlim(0, 5)
plt.ylim(0, 5)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.scatter(x_data, y_data, s=30)
# plots clusters (with colour) based on the predictions made using the fit_predict method
def draw_clusters(biased_dataset, predictions, cmap='viridis'):
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
plt.xlim(0, 5)
plt.ylim(0, 5)
ax.set_xlabel('Avg scifi rating')
ax.set_ylabel('Avg romance rating')
clustered = pd.concat([biased_dataset.reset_index(),
pd.DataFrame({'group': predictions})], axis=1)
plt.scatter(clustered['avg_scifi_rating'],
clustered['avg_romance_rating'], c=clustered['group'], s=20, cmap=cmap)
def clustering_errors(k, data):
kmeans = KMeans(n_clusters=k).fit(data)
predictions = kmeans.predict(data)
#cluster_centers = kmeans.cluster_centers_
# errors = [mean_squared_error(row, cluster_centers[cluster]) for row, cluster in zip(data.values, predictions)]
# return sum(errors)
silhouette_avg = silhouette_score(data, predictions)
return silhouette_avg
def sparse_clustering_errors(k, data):
kmeans = KMeans(n_clusters=k).fit(data)
predictions = kmeans.predict(data)
cluster_centers = kmeans.cluster_centers_
errors = [mean_squared_error(row, cluster_centers[cluster])
for row, cluster in zip(data, predictions)]
return sum(errors)
def get_genre_ratings(ratings, movies, genres, column_names):
genre_ratings = pd.DataFrame()
for genre in genres:
genre_movies = movies[movies['genres'].str.contains(genre)]
avg_genre_votes_per_user = ratings[ratings['movieId'].isin(genre_movies['movieId'])].loc[:, [
'userId', 'rating']].groupby(['userId'])['rating'].mean().round(2)
genre_ratings = pd.concat(
[genre_ratings, avg_genre_votes_per_user], axis=1)
genre_ratings.columns = column_names
return genre_ratings
def get_dataset_3(movies, ratings, genre_ratings):
# Extract action ratings from dataset
action_movies = movies[movies['genres'].str.contains('Action')]
# Get average vote on action movies per user
avg_action_votes_per_user = ratings[ratings['movieId'].isin(action_movies['movieId'])].loc[:, [
'userId', 'rating']].groupby(['userId'])['rating'].mean().round(2)
# Add action ratings to romance and scifi in dataframe
genre_ratings_3 = pd.concat(
[genre_ratings, avg_action_votes_per_user], axis=1)
genre_ratings_3.columns = ['avg_romance_rating',
'avg_scifi_rating', 'avg_action_rating']
# Let's bias the dataset a little so our clusters can separate scifi vs romance more easily
b1 = 3.2
b2 = 2.5
biased_dataset_3 = genre_ratings_3[((genre_ratings_3['avg_romance_rating'] < b1 - 0.2) & (genre_ratings_3['avg_scifi_rating'] > b2)) | (
(genre_ratings_3['avg_scifi_rating'] < b1) & (genre_ratings_3['avg_romance_rating'] > b2))]
biased_dataset_3 = pd.concat([biased_dataset_3[:300], genre_ratings_3[:2]])
biased_dataset_3 = pd.DataFrame(biased_dataset_3.to_records())
return biased_dataset_3
def draw_clusters_3d(biased_dataset_3, predictions):
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
plt.xlim(0, 5)
plt.ylim(0, 5)
ax.set_xlabel('Avg scifi rating')
ax.set_ylabel('Avg romance rating')
clustered = pd.concat(
[biased_dataset_3.reset_index(), pd.DataFrame({'group': predictions})], axis=1)
colors = itertools.cycle(plt.rcParams["axes.prop_cycle"].by_key()["color"])
for g in clustered.group.unique():
color = next(colors)
for index, point in clustered[clustered.group == g].iterrows():
if point['avg_action_rating'].astype(float) > 3:
size = 50
else:
size = 15
plt.scatter(point['avg_scifi_rating'],
point['avg_romance_rating'],
s=size,
color=color)
def draw_movie_clusters(clustered, max_users, max_movies):
c = 1
for cluster_id in clustered.group.unique():
# To improve visibility, we're showing at most max_users users and max_movies movies per cluster.
# You can change these values to see more users & movies per cluster
d = clustered[clustered.group == cluster_id].drop(
['Cust_Id', 'group'], axis=1)
n_users_in_cluster = d.shape[0]
d = sort_by_rating_density(d, max_movies, max_users)
# d = d.reindex_axis(d.mean().sort_values(ascending=False).index, axis=1) # commented out by SB 20oct2020
# d = d.reindex_axis(d.count(axis=1).sort_values(ascending=False).index)
d = d.iloc[:max_users, :max_movies]
n_users_in_plot = d.shape[0]
# We're only selecting to show clusters that have more than 9 users, otherwise, they're less interesting
if len(d) > 9:
print('cluster # {}'.format(cluster_id))
print('# of users in cluster: {}.'.format(n_users_in_cluster),
'# of users in plot: {}'.format(n_users_in_plot))
fig = plt.figure(figsize=(15, 4))
ax = plt.gca()
ax.invert_yaxis()
ax.xaxis.tick_top()
labels = d.columns.str[:40]
ax.set_yticks(np.arange(d.shape[0]), minor=False)
ax.set_xticks(np.arange(d.shape[1]), minor=False)
ax.set_xticklabels(labels, minor=False)
ax.get_yaxis().set_visible(False)
# Heatmap
heatmap = plt.imshow(d, vmin=0, vmax=5, aspect='auto')
ax.set_xlabel('movies')
ax.set_ylabel('User id')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
# Color bar
cbar = fig.colorbar(heatmap, ticks=[5, 4, 3, 2, 1, 0], cax=cax)
cbar.ax.set_yticklabels(
['5 stars', '4 stars', '3 stars', '2 stars', '1 stars', '0 stars'])
plt.setp(ax.get_xticklabels(), rotation=90, fontsize=9)
plt.tick_params(axis='both', which='both', bottom='off',
top='off', left='off', labelbottom='off', labelleft='off')
# print('cluster # {} \n(Showing at most {} users and {} movies)'.format(cluster_id, max_users, max_movies))
plt.show()
# Let's only show 5 clusters
# Remove the next three lines if you want to see all the clusters
# Contribution welcomed: Pythonic way of achieving this
# c = c+1
# if c > 6:
# break
def get_most_rated_movies(user_movie_ratings, max_number_of_movies):
# 1- Count
user_movie_ratings = user_movie_ratings.append(
user_movie_ratings.count(), ignore_index=True)
# 2- sort
user_movie_ratings_sorted = user_movie_ratings.sort_values(
len(user_movie_ratings)-1, axis=1, ascending=False)
user_movie_ratings_sorted = user_movie_ratings_sorted.drop(
user_movie_ratings_sorted.tail(1).index)
# 3- slice
most_rated_movies = user_movie_ratings_sorted.iloc[:,
:max_number_of_movies]
return most_rated_movies
def get_users_who_rate_the_most(most_rated_movies, max_number_of_movies):
# Get most voting users
# 1- Count
most_rated_movies['counts'] = pd.Series(most_rated_movies.count(axis=1))
# 2- Sort
most_rated_movies_users = most_rated_movies.sort_values(
'counts', ascending=False)
# 3- Slice
most_rated_movies_users_selection = most_rated_movies_users.iloc[:max_number_of_movies, :]
most_rated_movies_users_selection = most_rated_movies_users_selection.drop([
'counts'], axis=1)
return most_rated_movies_users_selection
def sort_by_rating_density(user_movie_ratings, n_movies, n_users):
most_rated_movies = get_most_rated_movies(user_movie_ratings, n_movies)
most_rated_movies = get_users_who_rate_the_most(most_rated_movies, n_users)
return most_rated_movies
def draw_movies_heatmap(most_rated_movies_users_selection, axis_labels=True):
# Reverse to match the order of the printed dataframe
#most_rated_movies_users_selection = most_rated_movies_users_selection.iloc[::-1]
fig = plt.figure(figsize=(15, 4))
ax = plt.gca()
# Draw heatmap
heatmap = ax.imshow(most_rated_movies_users_selection,
interpolation='nearest', vmin=0, vmax=5, aspect='auto')
if axis_labels:
ax.set_yticks(
np.arange(most_rated_movies_users_selection.shape[0]), minor=False)
ax.set_xticks(
np.arange(most_rated_movies_users_selection.shape[1]), minor=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
labels = most_rated_movies_users_selection.columns.str[:40]
ax.set_xticklabels(labels, minor=False)
ax.set_yticklabels(
most_rated_movies_users_selection.index, minor=False)
plt.setp(ax.get_xticklabels(), rotation=90)
else:
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.grid(False)
ax.set_ylabel('User id')
# Separate heatmap from color bar
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
# Color bar
cbar = fig.colorbar(heatmap, ticks=[5, 4, 3, 2, 1, 0], cax=cax)
cbar.ax.set_yticklabels(
['5 stars', '4 stars', '3 stars', '2 stars', '1 stars', '0 stars'])
plt.show()
# breaks the data up so we don't have both genres highly rates, more like one or the other
def bias_genre_rating_dataset(genre_ratings, score_limit_1, score_limit_2):
biased_dataset = genre_ratings[((genre_ratings['avg_romance_rating'] < score_limit_1 - 0.2) & (genre_ratings['avg_scifi_rating'] > score_limit_2))
| ((genre_ratings['avg_scifi_rating'] < score_limit_1) & (genre_ratings['avg_romance_rating'] > score_limit_2))]
biased_dataset = pd.concat([biased_dataset[:300], genre_ratings[:2]])
biased_dataset = pd.DataFrame(biased_dataset.to_records())
return biased_dataset
# -
import recommendation_helper2 # this works when recommendation_helper2.py is in this directory, don't need %load above
# +
max_users = 70
max_movies = 50
# not sure use df_p or sparse_ratings
clustered = pd.concat([df_p_short.reset_index(), pd.DataFrame({'group':predictions})], axis=1)
# helper.draw_movie_clusters(clustered, max_users, max_movies)
draw_movie_clusters(clustered, max_users, max_movies)
# -
# ### Restructure dataset to 0s and 1s for scatter plot and cluster
# create array of 0 and 1 for scatter plot
array_df_p_not_nan = np.invert(np.isnan(df_p.values))
# this is the matrix of 0s and 1s called out in the assignment, will now manipulate
array_df_p_not_nan
# create array w dimensions of df_p where if np.NaN then False, else if !np.NaN then True
count_nan = np.invert(np.isnan(df_p.values))
count_nan
# create dataframe with boolean matrix
df_p_not_nan = pd.DataFrame(count_nan)
# add Cust_Id as index
df_p_not_nan.set_index(df_p["Cust_Id"], inplace=True)
df_p_not_nan
df_p.columns
# set columns same as original df_p
df_p_not_nan.columns = df_p.columns
df_p_not_nan
# drop Cust_Id column
df_p_not_nan.drop('Cust_Id',axis=1, inplace=True)
# convert boolean to int
df_p_not_nan_int = df_p_not_nan.astype(int)
# dataframe per homework request for KMeans clustering where 1 = Movie_Id was rated by this Cust_Id
df_p_not_nan_int
# again due to size of dataset, use first 1000 rows, then run full dataset up on cloud with extra CPUs and Memory
df_p_short2 = df_p_not_nan_int.head(1000)
# ### <font color=red>Map clusters with only 0s and 1s (even though heatmap shows up to 5 stars, only 2
#
# predictions2 = KMeans(n_clusters=5, algorithm='full').fit_predict(df_p_short2)
# +
# same as with ratings, show cluster heatmaps for 70 users and 50 movies
max_users = 70
max_movies = 50
# not sure use df_p or sparse_ratings
clustered = pd.concat([df_p_short2.reset_index(), pd.DataFrame({'group':predictions2})], axis=1)
# helper.draw_movie_clusters(clustered, max_users, max_movies)
draw_movie_clusters(clustered, max_users, max_movies)
|
notebooks/netflixstudyEDAv3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: MetAtlas 2.7
# language: python
# name: metatlas
# ---
# +
import warnings
warnings.filterwarnings('ignore')
import get_compounds_each_provider as import_compounds
import pandas as pd
import sys
sys.path.append('/global/project/projectdirs/openmsi/jupyterhub_libs/anaconda/lib/python2.7/site-packages')
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem import rdMolDescriptors
import numpy as np
# -
# # WikiData
#
# From here: https://github.com/yuvipanda/python-wdqs/blob/master/wdqs/client.py
#
# ##Compound properties table
# From here: https://www.wikidata.org/wiki/Wikidata:List_of_properties/Terms
#
# 24305 compounds
#
# 15413 with a unique structural identifier
#
# Has relationships that link to the largest database on earth
#
# Often no structural description exists, but a PubChemCI does.
#
terms_to_keep = ['smiles','inchi','source_database','ROMol','common_name','Definition', 'synonyms','pubchem_compound_id','lipidmaps_id','metacyc_id','hmdb_id','img_abc_id','chebi_id','kegg_id']
import_compounds = reload(import_compounds)
wikidata = import_compounds.get_wikidata(terms_to_keep)
df = wikidata[terms_to_keep]
wikidata.head()
# compoundLabel
# # Add the 590 MetAtlas compounds that are missing from all these databases
# +
# See below, I did it at the end.
# metatlas_added = pd.read_csv('metatlas_compounds_not_found.csv',index_col=0)
# metatlas_added = metatlas_added[['inchi','name']]
# df = pd.concat([df,metatlas_added], axis=0, ignore_index=True)
# metatlas_added.head()
# -
# # miBIG (using notebook and pubchem API)
# +
def strip_whitespace(x):
return x.strip()
miBIG = pd.read_pickle('miBIG.pkl')
miBIG = miBIG[~miBIG.common_name.str.contains('Status: 404')] #catch any defunct ids and throw them away here
miBIG.inchi = miBIG.inchi.apply(strip_whitespace)
miBIG['source_database'] = 'miBIG'
df = pd.concat([df,miBIG], axis=0, ignore_index=True)
miBIG.head()
# -
# # get names for ones that are missing names (determined from first pass through)
# +
no_name_cid = pd.read_pickle('no_names_with_cid.pkl')
no_name_cid = no_name_cid[~no_name_cid.common_name.str.contains('Status: 404')] #catch any defunct ids and throw them away here
no_name_cid.inchi = no_name_cid.inchi.apply(strip_whitespace)
df = pd.concat([df,no_name_cid], axis=0, ignore_index=True)
no_name_cid.head()
# -
# # get names for missing from inchikey (no CID, determined from first pass through)
# +
no_name_inchikey = pd.read_pickle('no_names_with_inchikey.pkl')
no_name_inchikey = no_name_inchikey[~no_name_inchikey.common_name.str.contains('Status: 404')] #catch any defunct ids and throw them away here
no_name_inchikey.inchi = no_name_inchikey.inchi.apply(strip_whitespace)
df = pd.concat([df,no_name_inchikey], axis=0, ignore_index=True)
no_name_inchikey.head()
# -
# # IMG-ABC
#
# https://img.jgi.doe.gov/cgi-bin/abc/main.cgi?section=NaturalProd&page=smListing
#
# Exported as tab delimited table
#
# 1109 Compounds
#
# 666 Compounds have a unique structure
#
#
img_abc = import_compounds.get_img(terms_to_keep)
df = pd.concat([df,img_abc[terms_to_keep]], axis=0, ignore_index=True)
img_abc.head()
# Secondary Metabolite (SM) Name
# SM ID
# # ENZO Library
#
# 502 compounds
#
# 501 with unique structures
#
# request table from here:
# http://www.enzolifesciences.com/BML-2865/screen-well-natural-product-library/
# BML-2865.xlsx
#
# I had to save as tab delimted text. The Excel JChem structures messed up the excel import.
enzo = import_compounds.get_enzo(terms_to_keep)
df = pd.concat([df,enzo[terms_to_keep]], axis=0, ignore_index=True)
enzo.head()
# Name
# # MSMLS-Library
import_compounds = reload(import_compounds)
msmls = import_compounds.get_msmls(terms_to_keep)
df = pd.concat([df,msmls[terms_to_keep]], axis=0, ignore_index=True)
msmls.keys()
msmls.head()# CNAME
# PC_CID
# # MetaCyc
#
# 12,370 compounds
#
# 11,256 compounds with unique structure
#
# Login as <EMAIL>
#
# Smart table is called: "MetAtlas Export MetaCyc Compounds"
#
# Has mapping to reactions
#
# Has compound ontology
#
# Save as spreadsheet from their website. Prior to saving, I deleted the first row because I had to open it in excel ad save as "xlsx" for pandas to read it. The first row has columns that are longer than Excel's maximum number of characters and they get wrapped into new rows.
#
import_compounds = reload(import_compounds)
metacyc = import_compounds.get_metacyc(terms_to_keep)
df = pd.concat([df,metacyc[terms_to_keep]], axis=0, ignore_index=True)
metacyc.head()
# KEGG
# PubChem
# Common-Name
# Names
# Object ID
# #Dr. Dukes Phytochemical and Ethnobotanical Database
#
# 29585 Compounds
#
# No structural information or ids from external databases.
# +
# dr_dukes = import_compounds.get_dr_dukes()
# -
# # LipidMaps
#
# 40716 compounds
#
# 40145 with a unique structural description
#
# wget http://lipidmaps.org/resources/downloads/LMSDFDownload28Jun15.tar.gz
#
# Its probably possible to get the enzyme-reaction mapping for these!
print df.shape
print df.source_database.unique().shape
print df.keys()
df.head()
import_compounds = reload(import_compounds)
lipid_maps = import_compounds.get_lipid_maps(terms_to_keep)
df = pd.concat([df,lipid_maps[terms_to_keep]], axis=0, ignore_index=True)
lipid_maps.head()
# PUBCHEM_CID
# KEGG_ID
# COMMON_NAME
# SYNONYMS
# ID
# # HMDB
#
# 41895
#
# 41722 neutral and unique structures
#
#
# wget http://www.hmdb.ca/system/downloads/current/structures.zip
#
# unzip to structures.sdf
#
# has calcualted physical properties from jchem
#
# RDKit can't convert the mol for Cyanocobalmin and Hydroxocobalamin.
#
hmdb = import_compounds.get_hmdb(terms_to_keep)
df = pd.concat([df,hmdb[terms_to_keep]], axis=0, ignore_index=True)
hmdb.head()
# GENERIC_NAME
# SYNONYMS
# HMDB_ID
# # ChEMBL
#
# ftp://ftp.ebi.ac.uk/pub/databases/chembl/ChEMBLdb/releases/chembl_21/chembl_21.sdf.gz
mz = 116.070608
delta_mz = mz*5/1e6
min_mz = mz - delta_mz
max_mz = mz + delta_mz
print mz
print delta_mz
print min_mz
print max_mz
# +
#This needs a metadata file to accompany it.
#chembl = pd.read_pickle('/project/projectdirs/openmsi/projects/compound_data/chembl/chembl.pkl')
#These are the fields in the pkl
#(1583839, 3)
#Index([u'ID', u'ROMol', u'chembl_id'], dtype='object')
# chembl = import_compounds.get_chembl()
# df = pd.concat([df,chembl[terms_to_keep]], axis=0, ignore_index=True)
# has 1.583 million compounds
# tackle this next time
# -
# # ChEBI
#
# wget ftp://ftp.ebi.ac.uk/pub/databases/chebi/SDF/ChEBI_complete_3star.sdf.gz
#
# wget ftp://ftp.ebi.ac.uk/pub/databases/chebi/SDF/ChEBI_complete.sdf.gz
#
chebi = import_compounds.get_chebi(terms_to_keep)
df = pd.concat([df,chebi[terms_to_keep]], axis=0, ignore_index=True)
chebi.head()
# ChEBI Name
# ChEBI ID
# KEGG COMPOUND Database Links
# Synonyms
# # GNPS
#
# 9312 spectra
#
# 3971 chemical structures
#
# wget ftp://ccms-ftp.ucsd.edu/Spectral_Libraries/ALL_GNPS.mgf
#
# Many of the named compounds don't have strucutral identifiers or ids that map to other databases. Many of the structural identifiers are formated wrong or in the wrong field (example: SMILES are used in the InChI field).
#
gnps = import_compounds.get_gnps(terms_to_keep)
df = pd.concat([df,gnps[terms_to_keep]], axis=0, ignore_index=True)
gnps.head()
# name
# # BNICE
#
# http://pubs.acs.org/doi/abs/10.1021/acssynbio.6b00054
#
# http://bioinformatics.oxfordjournals.org/content/21/8/1603.short
#
# SDF Files are here:
# http://lincolnpark.chem-eng.northwestern.edu/release/
df.source_database.unique()
# # Load the dataframe and make an rdkit mol for each comound
df.to_pickle('/project/projectdirs/openmsi/projects/ben_run_pactolus/compounds_fixedStereo_notchembl.pkl')
# +
# df = pd.read_pickle('/project/projectdirs/openmsi/projects/ben_run_pactolus/compounds_fixedStereo_notchembl.pkl')
# -
# # Replace float(nan) with ''
df = df.where((pd.notnull(df)), None)
# # Missing the "InChI=" at the start
#
# This is how most of the wikidata inchis are stored. GNPS has some like this too.
idx = (df['inchi'].str.startswith('1')) & (df['inchi'])
print sum(idx)
print df.source_database[idx].unique()
df['inchi'][idx] = 'InChI=' + df['inchi']
# # Missing the "I" in "InChI"
idx = (df['inchi'].str.startswith('nChI')) & (df['inchi'])
print sum(idx)
print df.source_database[idx].unique()
df['inchi'][idx] = 'I' + df['inchi']
# #has an inchikey instead of inchi
idx = (df['inchi'].str.endswith('-N')) & (df['inchi'])
print sum(idx)
print df.source_database[idx].unique()
df['inchi'][idx] = ''
# # Has an "N/A"
idx = (df['inchi'].str.startswith('N/A')) & (df['inchi'])
print sum(idx)
print df.source_database[idx].unique()
df['inchi'][idx] = ''
idx = (df['smiles'].str.startswith('N/A')) & (df['smiles'])
print sum(idx)
print df.source_database[idx].unique()
df['smiles'][idx] = ''
# # Has something else in InChI field. Usually this is SMILES string
idx = (df['inchi'].str.contains('^((?!InChI).)*$')) & df['inchi']
print sum(idx)
print df.source_database[idx].unique()
df['smiles'][idx] = df['inchi'][idx]
df['inchi'][idx] = ''
df[idx].head()
# +
def make_mol_from_smiles_and_inchi(row):
if not row['ROMol']:
mol= []
if row['inchi']:
mol = Chem.MolFromInchi(row['inchi'].encode('utf-8'))
elif row['smiles']:
mol = Chem.MolFromSmiles(row['smiles'].encode('utf-8'))
if mol:
return mol
else:
return row['ROMol']
df.ROMol = df.apply(make_mol_from_smiles_and_inchi, axis=1)
# -
# # Take a look at the rows that don't have an rdkit mol.
# * They don't have a SMILES or InChI
# * RDKit could not parse their description
# # Desalt and remove disconnected components
# +
def desalt_compounds_in_dataframe(x):
if x:
if x.GetNumAtoms()>1:
c = import_compounds.desalt(x)
if c[1]:
return c[0]
else:
return x
df.ROMol = df.ROMol.apply(desalt_compounds_in_dataframe)
# -
# # neutralize those that are charged
# +
import_compounds = reload(import_compounds)
def neutralize_compounds_in_dataframe(x):
if x:
if x.GetNumAtoms()> 0:
neutral_mol = []
try:
c = import_compounds.NeutraliseCharges(x)
neutral_mol = c[0]
except:
pass
if neutral_mol:
return neutral_mol
df.ROMol = df.ROMol.apply(neutralize_compounds_in_dataframe)
# +
def calculate_num_radicals_in_dataframe(x):
num_radicals = 0.0
if x:
num_radicals = Descriptors.NumRadicalElectrons(x)
return num_radicals
def calculate_formula_in_dataframe(x):
formula = ''
if x:
formula = rdMolDescriptors.CalcMolFormula(x)
return formula
def calculate_monoisotopic_mw_in_dataframe(x):
mw = 0.0
if x:
mw = Descriptors.ExactMolWt(x)
return mw
def calculate_inchi_in_dataframe(x):
inchi = ''
if x:
try:
inchi = Chem.MolToInchi(x)
except:
pass#This fails when can't kekulize mol
return inchi
def calculate_flattened_inchi_in_dataframe(x):
flattened_inchi = ''
if x:
sm = Chem.MolToSmiles(x).replace('@','')
flattened_rdkit_mol = Chem.MolFromSmiles(sm)
try:
flattened_inchi = Chem.MolToInchi(flattened_rdkit_mol)
except:
pass#This fails when can't kekulize mol
return flattened_inchi
def calculate_inchikey_in_dataframe(x):
ik = ''
if x:
try:
ik = Chem.InchiToInchiKey(x)
except:
pass#This fails when can't kekulize mol. Carbo-cations are the culprit usually.
return ik
def calculate_charge_in_dataframe(x):
if x:
my_charge = Chem.GetFormalCharge(x)
return my_charge
df['charge'] = df.ROMol.apply(calculate_charge_in_dataframe)
df['formula'] = df.ROMol.apply(calculate_formula_in_dataframe)
df['monoisotopic_mw'] = df.ROMol.apply(calculate_monoisotopic_mw_in_dataframe)
df['num_radicals'] = df.ROMol.apply(calculate_num_radicals_in_dataframe)
df['metatlas_inchi'] = df.ROMol.apply(calculate_inchi_in_dataframe)
df['metatlas_inchi_key'] = df.metatlas_inchi.apply(calculate_inchikey_in_dataframe)
df['flat_inchi'] = df.ROMol.apply(calculate_flattened_inchi_in_dataframe)
df['flat_inchikey'] = df.flat_inchi.apply(calculate_inchikey_in_dataframe)
# -
# # make venn
# +
# dbs = df.source_database.unique().tolist()
# M = np.zeros((len(dbs),len(dbs)))
# for i in range(len(dbs)):
# # df1 = df.loc[df['source_database'] == dbs[i]]
# # M[i,0] = df1.shape[0]
# # print dbs
# for j in range(0,len(dbs)):
# #i is row, j is column
# u1 = df.loc[df['source_database'] == dbs[i],'metatlas_inchi_key'].unique().tolist()
# u2 = df.loc[df['source_database'] == dbs[j],'metatlas_inchi_key'].unique().tolist()
# u1u2 = set(u1).intersection(u2)
# M[i,j] = len(u1u2)
# M.astype(int)
# -
# # create consolidate table from unique compounds
df.to_pickle('/project/projectdirs/openmsi/projects/ben_run_pactolus/all_stereo_compounds_temp.pkl')
# df.to_pickle('/project/projectdirs/openmsi/projects/ben_run_pactolus/all_compounds_temp.pkl')
df = pd.read_pickle('/project/projectdirs/openmsi/projects/ben_run_pactolus/all_stereo_compounds_temp.pkl')
df.source_database.unique()
# +
def strip_non_ascii(string):
''' Returns the string without non ASCII characters'''
stripped = (c for c in string if 0 < ord(c) < 127)
return ''.join(stripped)
def make_strings_consistent(x):
if x:
if type(x) == list:
x = '///'.join([strip_non_ascii(s) for s in x])
else:
x = strip_non_ascii(x)
return x
# -
df = df[df.metatlas_inchi_key != '']
# +
# compound.name = unicode(df_row.common_name, "utf-8",errors='ignore')
# def make_strings_consistent(x):
# try:
# if type(x) == list:
# x = [str(s.encode('utf-8')) for x in x]
# else:
# x = str(x.encode('utf-8'))
# except:
# if type(x) == list:
# x = [str(s) for x in x]
# else:
# x = str(x)
# return x
df['common_name'] = df['common_name'].apply(make_strings_consistent)
df['synonyms'] = df['synonyms'].apply(make_strings_consistent)
# -
my_keys = df.keys().tolist()
my_keys.remove('smiles')
my_keys.remove('inchi')
my_keys.remove('ROMol')
for k in my_keys:
print k
df[k] = df[k].astype(str)#apply(str)
# # Remove URLS from various fields throughout
for k in my_keys:
print k
df[k] = df[k].str.replace(r'<[^>]*>', '')
df.synonyms[100000]
# +
#TODO: define my_keys once! then list the ones to pull out that need special attention
my_agg_keys = my_keys
my_agg_keys.remove('pubchem_compound_id') #has to be handled special...
my_agg_keys.remove('metatlas_inchi_key') #its the grouby key
my_agg_dict = {}
def pubchem_combine_fun(x):
new_list = []
for d in x:
if (d) and (d != 'None'):
d = str(int(float(d)))
new_list.append(d)
return '///'.join(list(set(new_list)))
my_agg_dict['pubchem_compound_id'] = pubchem_combine_fun
for k in my_agg_keys:
my_agg_dict[k] = lambda x: '///'.join(list(set([d for d in x if (d) and (d != '') and (d.lower() != 'none') and (d.lower() != 'nan')])))
gb = df.groupby('metatlas_inchi_key').agg(my_agg_dict)
gb.reset_index(inplace=True)
# import itertools
# import operator
# def most_common(dL):
# # get an iterable of (item, iterable) pairs
# L = dL.split('///')
# SL = sorted((x, i) for i, x in enumerate(L))
# # print 'SL:', SL
# groups = itertools.groupby(SL, key=operator.itemgetter(0))
# # auxiliary function to get "quality" for an item
# def _auxfun(g):
# item, iterable = g
# count = 0
# min_index = len(L)
# for _, where in iterable:
# count += 1
# min_index = min(min_index, where)
# # print 'item %r, count %r, minind %r' % (item, count, min_index)
# return count, -min_index
# # pick the highest-count/earliest item
# return max(groups, key=_auxfun)[0]
# def take_first_common_name(x):
# return sorted(x.split('///'),key=len)[0]
# gb['common_name'] = gb['common_name'].apply(take_first_common_name)
# gb['common_name'] = gb['common_name'].apply(most_common)
# -
gb.head(10)
gb.synonyms.unique()
no_name = gb[gb.common_name == '']
no_name[no_name.pubchem_compound_id != ''].pubchem_compound_id.to_csv('pubchem_cid_no_name.csv')
no_name[no_name.pubchem_compound_id == ''].metatlas_inchi_key.to_csv('inchi_key_no_name_no_pubchem.csv')
gb = pd.read_pickle('/project/projectdirs/openmsi/projects/ben_run_pactolus/unique_compounds.pkl')
# # Add old MetAtlas compounds that are not in any of these databases
metatlas_not_found = pd.read_pickle('metatlas_compounds_not_found.pkl')
metatlas_not_found.drop('inchi',axis=1,inplace=True)
metatlas_not_found.rename(columns = {'name':'common_name'}, inplace = True)
metatlas_not_found['source_database'] = 'MetAtlas'
for c in set(gb.keys()) - set(metatlas_not_found.keys()):
metatlas_not_found[c] = ''
metatlas_not_found.head()
print gb.keys()
print ' '
print metatlas_not_found.keys()
print ' '
print set(gb.keys()) - set(metatlas_not_found.keys())
combo = pd.concat([metatlas_not_found, gb], ignore_index=True)
print combo.shape, gb.shape, metatlas_not_found.shape
combo.shape
combo.to_pickle('/project/projectdirs/openmsi/projects/ben_run_pactolus/unique_compounds.pkl')
combo.to_csv('/project/projectdirs/metatlas/projects/uniquecompounds.csv')
combo.to_csv('uniquecompounds.csv')
# # Mass values are not high enough precision
# # Mass might not be includign the charge
combo = pd.read_pickle('/project/projectdirs/openmsi/projects/ben_run_pactolus/unique_compounds.pkl')
combo.drop('monoisotopoic_mw', axis=1, inplace=True)
def calculate_monoisotopic_mw_from_inchi(row):
mw = ''
rdk_mol = Chem.MolFromInchi(row['metatlas_inchi'])
if rdk_mol:
mw = str(float(Descriptors.ExactMolWt(rdk_mol)))
return mw
combo['monoisotopic_mw'] = combo.apply(calculate_monoisotopic_mw_from_inchi, axis=1)
combo.keys()
combo = combo[(combo.monoisotopic_mw!='') ]
# +
# my_str = 'radical'
# gb[gb.common_name.str.contains(r'(?:\s|^)%s(?:\s|$)'%my_str,case=False,regex=True)].head(20)
# +
# from rdkit.Chem import Descriptors
# print Descriptors.NumRadicalElectrons(Chem.MolFromInchi('InChI=1S/C9H11NO3/c10-8(9(12)13)5-6-1-3-7(11)4-2-6/h1-4,8,11H,5,10H2,(H,12,13)/q+1'))
# print Descriptors.NumRadicalElectrons(Chem.MolFromInchi('InChI=1S/C40H46N8S4/c1-25-19-35-33(21-29(25)23-49-39(45(3)4)46(5)6)41-37(51-35)27-11-15-31(16-12-27)43-44-32-17-13-28(14-18-32)38-42-34-22-30(26(2)20-36(34)52-38)24-50-40(47(7)8)48(9)10/h11-22H,23-24H2,1-10H3/q+2/b44-43+'))
# +
# import sys
# sys.path.append('/global/project/projectdirs/openmsi/jupyterhub_libs/anaconda/lib/python2.7/site-packages')
# from rdkit import Chem
# import numpy as np
# import pandas as pd
# from rdkit.Chem import Draw
# from rdkit.Chem import PandasTools
# # %matplotlib inline
# inchis = ['InChI=1S/C3H7NO2/c1-2(4)3(5)6/h2H,4H2,1H3,(H,5,6)/p+1/t2-/m1/s1',
# 'InChI=1S/C18H18N4O6S4/c1-3-21-13-7-5-11(31(23,24)25)9-15(13)29-17(21)19-20-18-22(4-2)14-8-6-12(32(26,27)28)10-16(14)30-18/h5-10H,3-4H2,1-2H3,(H,23,24,25)(H,26,27,28)/q+1/p-1',
# 'InChI=1S/C3H7NO2.H3N/c1-2(4)3(5)6;/h2H,4H2,1H3,(H,5,6);1H3/p+1/t2-;/m0./s1',
# 'InChI=1S/C3H7NO2/c1-2(4)3(5)6/h2H,4H2,1H3,(H,5,6)/t2-/m0/s1',
# 'InChI=1S/C3H7NO2/c1-2(4)3(5)6/h2H,4H2,1H3,(H,5,6)/t2-/m1/s1']
# labels = ['D-Alanine Cation','2,2''-azino-bis-(3-ethylbenzothiazoline-6-sulfonate) radical cation', 'ammonium alanine','L-alanine','D-Alanine']
# +
# mols = [Chem.MolFromInchi(ii) for ii in inchis]
# de_salt = [m[0] for m in [desalt(m) for m in mols]]
# neutralized = [m[0] for m in [NeutraliseCharges(m) for m in de_salt]]
# # sm1 = Chem.MolToSmiles(m)#,isomericSmiles=True)
# # m2 = Chem.MolFromSmiles(sm1)
# # sm1 = Chem.MolToInchi(m2)
# # m3 = Chem.MolFromInchi(sm1)
# +
# Chem.Draw.MolsToGridImage(mols+de_salt+neutralized,
# legends = labels+['desalted %s'%m for m in labels]+['neutralized %s'%m for m in labels],
# molsPerRow = len(inchis))
# -
|
metatlas/interfaces/compounds/not_used/Get compounds from public databases.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CycleGAN model
#
# > Defines the CycleGAN model architecture.
# +
# default_exp models.cyclegan
# -
#export
from fastai.vision.all import *
from fastai.basics import *
from typing import List
from fastai.vision.gan import *
#hide
from upit.models.junyanz import define_G, define_D
#hide
from nbdev.showdoc import *
# We use the models that were introduced in the [cycleGAN paper](https://arxiv.org/abs/1703.10593).
# ## Generator
#export
def convT_norm_relu(ch_in:int, ch_out:int, norm_layer:nn.Module, ks:int=3, stride:int=2, bias:bool=True):
return [nn.ConvTranspose2d(ch_in, ch_out, kernel_size=ks, stride=stride, padding=1, output_padding=1, bias=bias),
norm_layer(ch_out), nn.ReLU(True)]
show_doc(convT_norm_relu,title_level=3)
#export
def pad_conv_norm_relu(ch_in:int, ch_out:int, pad_mode:str, norm_layer:nn.Module, ks:int=3, bias:bool=True,
pad=1, stride:int=1, activ:bool=True, init=nn.init.kaiming_normal_, init_gain:int=0.02)->List[nn.Module]:
layers = []
if pad_mode == 'reflection': layers.append(nn.ReflectionPad2d(pad))
elif pad_mode == 'border': layers.append(nn.ReplicationPad2d(pad))
p = pad if pad_mode == 'zeros' else 0
conv = nn.Conv2d(ch_in, ch_out, kernel_size=ks, padding=p, stride=stride, bias=bias)
if init:
if init == nn.init.normal_:
init(conv.weight, 0.0, init_gain)
else:
init(conv.weight)
if hasattr(conv, 'bias') and hasattr(conv.bias, 'data'): conv.bias.data.fill_(0.)
layers += [conv, norm_layer(ch_out)]
if activ: layers.append(nn.ReLU(inplace=True))
return layers
show_doc(pad_conv_norm_relu,title_level=3)
#export
class ResnetBlock(nn.Module):
"nn.Module for the ResNet Block"
def __init__(self, dim:int, pad_mode:str='reflection', norm_layer:nn.Module=None, dropout:float=0., bias:bool=True):
super().__init__()
assert pad_mode in ['zeros', 'reflection', 'border'], f'padding {pad_mode} not implemented.'
norm_layer = ifnone(norm_layer, nn.InstanceNorm2d)
layers = pad_conv_norm_relu(dim, dim, pad_mode, norm_layer, bias=bias)
if dropout != 0: layers.append(nn.Dropout(dropout))
layers += pad_conv_norm_relu(dim, dim, pad_mode, norm_layer, bias=bias, activ=False)
self.conv_block = nn.Sequential(*layers)
def forward(self, x): return x + self.conv_block(x)
show_doc(ResnetBlock,title_level=3)
#export
def resnet_generator(ch_in:int, ch_out:int, n_ftrs:int=64, norm_layer:nn.Module=None,
dropout:float=0., n_blocks:int=9, pad_mode:str='reflection')->nn.Module:
norm_layer = ifnone(norm_layer, nn.InstanceNorm2d)
bias = (norm_layer == nn.InstanceNorm2d)
layers = pad_conv_norm_relu(ch_in, n_ftrs, 'reflection', norm_layer, pad=3, ks=7, bias=bias)
for i in range(2):
layers += pad_conv_norm_relu(n_ftrs, n_ftrs *2, 'zeros', norm_layer, stride=2, bias=bias)
n_ftrs *= 2
layers += [ResnetBlock(n_ftrs, pad_mode, norm_layer, dropout, bias) for _ in range(n_blocks)]
for i in range(2):
layers += convT_norm_relu(n_ftrs, n_ftrs//2, norm_layer, bias=bias)
n_ftrs //= 2
layers += [nn.ReflectionPad2d(3), nn.Conv2d(n_ftrs, ch_out, kernel_size=7, padding=0), nn.Tanh()]
return nn.Sequential(*layers)
show_doc(resnet_generator,title_level=3)
# ### Test generator
# Let's test for a few things:
# 1. The generator can indeed be initialized correctly
# 2. A random image can be passed into the model successfully with the correct size output
# 3. The CycleGAN generator is equivalent to the [original implementation](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/cycle_gan_model.py)
# First let's create a random batch:
img1 = torch.randn(4,3,256,256)
m = resnet_generator(3,3)
with torch.no_grad():
out1 = m(img1)
out1.shape
m_junyanz = define_G(3,3,64,'resnet_9blocks', norm='instance')
with torch.no_grad():
out2 = m_junyanz(img1)
out2.shape
#export
def compare_networks(a,b):
"A simple function to compare the printed model representations as a proxy for actually comparing two models"
assert len(a) == len(b)
for i in range(len(a)):
assert (a[i].__repr__()==b[i].__repr__()), f"{a[i]} \n and \n {b[i]} \n not equal (position {i})"
print("Passed!")
return True
test_eq(out1.shape,img1.shape)
test_eq(out2.shape,img1.shape)
assert compare_networks(list(m_junyanz.children())[0],m)
# ## Discriminator
#export
def conv_norm_lr(ch_in:int, ch_out:int, norm_layer:nn.Module=None, ks:int=3, bias:bool=True, pad:int=1, stride:int=1,
activ:bool=True, slope:float=0.2, init=nn.init.normal_, init_gain:int=0.02)->List[nn.Module]:
conv = nn.Conv2d(ch_in, ch_out, kernel_size=ks, padding=pad, stride=stride, bias=bias)
if init:
if init == nn.init.normal_:
init(conv.weight, 0.0, init_gain)
else:
init(conv.weight)
if hasattr(conv, 'bias') and hasattr(conv.bias, 'data'): conv.bias.data.fill_(0.)
layers = [conv]
if norm_layer is not None: layers.append(norm_layer(ch_out))
if activ: layers.append(nn.LeakyReLU(slope, inplace=True))
return layers
show_doc(conv_norm_lr,title_level=3)
#export
def discriminator(ch_in:int, n_ftrs:int=64, n_layers:int=3, norm_layer:nn.Module=None, sigmoid:bool=False)->nn.Module:
norm_layer = ifnone(norm_layer, nn.InstanceNorm2d)
bias = (norm_layer == nn.InstanceNorm2d)
layers = conv_norm_lr(ch_in, n_ftrs, ks=4, stride=2, pad=1)
for i in range(n_layers-1):
new_ftrs = 2*n_ftrs if i <= 3 else n_ftrs
layers += conv_norm_lr(n_ftrs, new_ftrs, norm_layer, ks=4, stride=2, pad=1, bias=bias)
n_ftrs = new_ftrs
new_ftrs = 2*n_ftrs if n_layers <=3 else n_ftrs
layers += conv_norm_lr(n_ftrs, new_ftrs, norm_layer, ks=4, stride=1, pad=1, bias=bias)
layers.append(nn.Conv2d(new_ftrs, 1, kernel_size=4, stride=1, padding=1))
if sigmoid: layers.append(nn.Sigmoid())
return nn.Sequential(*layers)
show_doc(discriminator,title_level=3)
# ### Test discriminator
# Let's test for similar things:
# 1. The discriminator can indeed be initialized correctly
# 2. A random image can be passed into the discriminator successfully with the correct size output
# 3. The CycleGAN discriminator is equivalent to the [original implementation](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/cycle_gan_model.py)
d = discriminator(3)
with torch.no_grad():
out1 = d(img1)
out1.shape
img1 = torch.randn(4,3,256,256)
d_junyanz = define_D(3,64,'basic',norm='instance')
with torch.no_grad():
out2 = d_junyanz(img1)
out2.shape
test_eq(out1.shape,torch.Size([4, 1, 30, 30]))
test_eq(out2.shape,torch.Size([4, 1, 30, 30]))
assert compare_networks(list(d_junyanz.children())[0],d)
# ## Full model
# We group two discriminators and two generators in a single model, then a `Callback` (defined in `02_cyclegan_training.ipynb`) will take care of training them properly.
#export
class CycleGAN(nn.Module):
"""
CycleGAN model. \n
When called, takes in input batch of real images from both domains and outputs fake images for the opposite domains (with the generators).
Also outputs identity images after passing the images into generators that outputs its domain type (needed for identity loss).
Attributes: \n
`G_A` (`nn.Module`): takes real input B and generates fake input A \n
`G_B` (`nn.Module`): takes real input A and generates fake input B \n
`D_A` (`nn.Module`): trained to make the difference between real input A and fake input A \n
`D_B` (`nn.Module`): trained to make the difference between real input B and fake input B \n
"""
def __init__(self, ch_in:int=3, ch_out:int=3, n_features:int=64, disc_layers:int=3, gen_blocks:int=9, lsgan:bool=True,
drop:float=0., norm_layer:nn.Module=None):
"""
Constructor for CycleGAN model.
Arguments: \n
`ch_in` (`int`): Number of input channels (default=3) \n
`ch_out` (`int`): Number of output channels (default=3) \n
`n_features` (`int`): Number of input features (default=64) \n
`disc_layers` (`int`): Number of discriminator layers (default=3) \n
`gen_blocks` (`int`): Number of residual blocks in the generator (default=9) \n
`lsgan` (`bool`): LSGAN training objective (output unnormalized float) or not? (default=True) \n
`drop` (`float`): Level of dropout (default=0) \n
`norm_layer` (`nn.Module`): Type of normalization layer to use in the models (default=None)
"""
super().__init__()
#G_A: takes real input B and generates fake input A
#G_B: takes real input A and generates fake input B
#D_A: trained to make the difference between real input A and fake input A
#D_B: trained to make the difference between real input B and fake input B
self.D_A = discriminator(ch_in, n_features, disc_layers, norm_layer, sigmoid=not lsgan)
self.D_B = discriminator(ch_in, n_features, disc_layers, norm_layer, sigmoid=not lsgan)
self.G_A = resnet_generator(ch_in, ch_out, n_features, norm_layer, drop, gen_blocks)
self.G_B = resnet_generator(ch_in, ch_out, n_features, norm_layer, drop, gen_blocks)
def forward(self, input):
"""Forward function for CycleGAN model. The input is a tuple of a batch of real images from both domains A and B."""
real_A, real_B = input
fake_A, fake_B = self.G_A(real_B), self.G_B(real_A)
idt_A, idt_B = self.G_A(real_A), self.G_B(real_B) #Needed for the identity loss during training.
return [fake_A, fake_B, idt_A, idt_B]
show_doc(CycleGAN,title_level=3)
show_doc(CycleGAN.__init__)
show_doc(CycleGAN.forward)
# ### Quick model tests
#
# Again, let's check that the model can be called sucsessfully and outputs the correct shapes.
#
cyclegan_model = CycleGAN()
img1 = torch.randn(4,3,256,256)
img2 = torch.randn(4,3,256,256)
# %%time
with torch.no_grad(): cyclegan_output = cyclegan_model((img1,img2))
test_eq(len(cyclegan_output),4)
for output_batch in cyclegan_output:
test_eq(output_batch.shape,img1.shape)
#hide
from nbdev.export import notebook2script
notebook2script()
|
nbs/01_models.cyclegan.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## PyTorch Linear Regression
# Author: <b> <NAME></b>
# +
#importing Library
import torch
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# +
#setting value
input_ = 1
output_ = 1
epoch = 600
learning_rate = 0.001
# -
# creating data
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]],dtype=np.float32)
y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]],dtype = np.float32)
# creating pytorch models with one layer linear unit
model = torch.nn.Linear(input_,output_)
#selecting the Loss criterian
criterian = torch.nn.MSELoss()
# setup the optimizer and learning rage
optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate)
# +
# training the model.
for e in range(epoch):
inputs = torch.from_numpy(x_train)
target = torch.from_numpy(y_train)
pred = model(inputs)
loss= criterian(pred,target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if e%50==0:
print(loss.item())
# -
model.weight
model.bias
# +
#plotting the results
# -
predicted = model(inputs).detach().numpy()
plt.plot(x_train,y_train,'ro',label='OrignalData')
plt.plot(x_train,predicted,label = 'fittedLine')
plt.legend()
plt.show()
#saving the model
torch.save(model.state_dict(),'modelRegression.ckpt')
|
Basic 2 Pytorch Linear Regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Plot # of sets (log scale)
# +
unitsnow = '[log(# of sets)]'
setslist = [wf.sets_log, wf.sets_dfad, wf.sets_afad,
sets_tot, wf.sets_una, wf.sets_oth]
setstitlelist = ['Natural log/debris sets', 'Drifting FAD sets', 'Anchored FAD sets',
'Total associated sets', 'Unassociated sets', 'Other sets']
# - Set proj and define axes
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(16,4),
subplot_kw={'projection': ccrs.PlateCarree(central_longitude=200)})
# - Make maps pretty + plot
isp = 0
for irow in range(axes.shape[0]):
for icol in range(axes.shape[1]):
if isp > len(setslist)-1:
axes[irow,icol].set_visible(False)
else:
ax = axes[irow][icol]
exec(open('helper_scripts/create_map_bgs.py').read())
ax.text(-0.08, 1.08, string.ascii_uppercase[isp],
transform=ax.transAxes, size=18, weight='bold')
setsnow = setslist[isp].sum(dim='time')
setsnow = np.log10(setsnow.where(setsnow>0))
setsnow.plot(
ax=ax, transform=ccrs.PlateCarree(),
cmap=plt.cm.get_cmap('viridis',13), vmin=0, vmax=5.433,
cbar_kwargs={'pad': 0.02, 'label': unitsnow})
ax.set_xlabel(''); ax.set_ylabel('')
ax.set_title(setstitlelist[isp])
isp = isp + 1
# - Save fig
fig.savefig(figpath + 'S1_fig.pdf',
bbox_inches='tight', pad_inches = 0, dpi = 300)
fig.savefig(figpath + 'S1_fig.png',
bbox_inches='tight', pad_inches = 0, dpi = 300)
|
python/figure_notebooks/suppfig1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Statistical Downscaling and Bias-Adjustment
#
# `xclim` provides tools and utilities to ease the bias-adjustement process through its `xclim.sdba` module. Almost all adjustment algorithms conform to the `train` - `adjust` scheme, formalized within `TrainAdjust` classes. Given a reference time series (ref), historical simulations (hist) and simulations to be adjusted (sim), any bias-adjustment method would be applied by first estimating the adjustment factors between the historical simulation and the observations series, and then applying these factors to `sim`, which could be a future simulation.
#
# A very simple "Quantile Mapping" approach is available through the "Empirical Quantile Mapping" object. The object is created through the `.train` method of the class, and the simulation is adjusted with `.adjust`.
# +
import numpy as np
import xarray as xr
import cftime
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use("seaborn")
plt.rcParams["figure.figsize"] = (11, 5)
# Create toy data to explore bias adjustment, here fake temperature timeseries
t = xr.cftime_range("2000-01-01", "2030-12-31", freq="D", calendar="noleap")
ref = xr.DataArray(
(
-20 * np.cos(2 * np.pi * t.dayofyear / 365)
+ 2 * np.random.random_sample((t.size,))
+ 273.15
+ 0.1 * (t - t[0]).days / 365
), # "warming" of 1K per decade,
dims=("time",),
coords={"time": t},
attrs={"units": "K"},
)
sim = xr.DataArray(
(
-18 * np.cos(2 * np.pi * t.dayofyear / 365)
+ 2 * np.random.random_sample((t.size,))
+ 273.15
+ 0.11 * (t - t[0]).days / 365
), # "warming" of 1.1K per decade
dims=("time",),
coords={"time": t},
attrs={"units": "K"},
)
ref = ref.sel(time=slice(None, "2015-01-01"))
hist = sim.sel(time=slice(None, "2015-01-01"))
ref.plot(label="Reference")
sim.plot(label="Model")
plt.legend()
# +
from xclim import sdba
QM = sdba.EmpiricalQuantileMapping.train(
ref, hist, nquantiles=15, group="time", kind="+"
)
scen = QM.adjust(sim, extrapolation="constant", interp="nearest")
ref.groupby("time.dayofyear").mean().plot(label="Reference")
hist.groupby("time.dayofyear").mean().plot(label="Model - biased")
scen.sel(time=slice("2000", "2015")).groupby("time.dayofyear").mean().plot(
label="Model - adjusted - 2000-15", linestyle="--"
)
scen.sel(time=slice("2015", "2030")).groupby("time.dayofyear").mean().plot(
label="Model - adjusted - 2015-30", linestyle="--"
)
plt.legend()
# -
# In the previous example, a simple Quantile Mapping algorithm was used with 15 quantiles and one group of values. The model performs well, but our toy data is also quite smooth and well-behaved so this is not surprising. A more complex example could have biais distribution varying strongly across months. To perform the adjustment with different factors for each months, one can pass `group='time.month'`. Moreover, to reduce the risk of sharp change in the adjustment at the interface of the months, `interp='linear'` can be passed to `adjust` and the adjustment factors will be interpolated linearly. Ex: the factors for the 1st of May will be the average of those for april and those for may.
# +
QM_mo = sdba.EmpiricalQuantileMapping.train(
ref, hist, nquantiles=15, group="time.month", kind="+"
)
scen = QM_mo.adjust(sim, extrapolation="constant", interp="linear")
ref.groupby("time.dayofyear").mean().plot(label="Reference")
hist.groupby("time.dayofyear").mean().plot(label="Model - biased")
scen.sel(time=slice("2000", "2015")).groupby("time.dayofyear").mean().plot(
label="Model - adjusted - 2000-15", linestyle="--"
)
scen.sel(time=slice("2015", "2030")).groupby("time.dayofyear").mean().plot(
label="Model - adjusted - 2015-30", linestyle="--"
)
plt.legend()
# -
# The training data (here the adjustment factors) is available for inspection in the `ds` attribute of the adjustment object.
QM_mo.ds
QM_mo.ds.af.plot()
# ## Grouping
#
# For basic time period grouping (months, day of year, season), passing a string to the methods needing it is sufficient. Most methods acting on grouped data also accept a `window` int argument to pad the groups with data from adjacent ones. Units of `window` are the sampling frequency of the main grouping dimension (usually `time`). For more complex grouping, or simply for clarity, one can pass a `xclim.sdba.base.Grouper` directly.
#
# Example here with another, simpler, adjustment method. Here we want `sim` to be scaled so that its mean fits the one of `ref`. Scaling factors are to be computed separately for each day of the year, but including 15 days on either side of the day. This means that the factor for the 1st of May is computed including all values from the 16th of April to the 15th of May (of all years).
# +
group = sdba.Grouper("time.dayofyear", window=31)
QM_doy = sdba.Scaling.train(ref, hist, group=group, kind="+")
scen = QM_doy.adjust(sim)
ref.groupby("time.dayofyear").mean().plot(label="Reference")
hist.groupby("time.dayofyear").mean().plot(label="Model - biased")
scen.sel(time=slice("2000", "2015")).groupby("time.dayofyear").mean().plot(
label="Model - adjusted - 2000-15", linestyle="--"
)
scen.sel(time=slice("2015", "2030")).groupby("time.dayofyear").mean().plot(
label="Model - adjusted - 2015-30", linestyle="--"
)
plt.legend()
# -
sim
QM_doy.ds.af.plot()
# ## Modular approach
#
# The `sdba` module adopts a modular approach instead of implementing published and named methods directly.
# A generic bias adjustment process is laid out as follows:
#
# - preprocessing on `ref`, `hist` and `sim` (using methods in `xclim.sdba.processing` or `xclim.sdba.detrending`)
# - creating and training the adjustment object `Adj = Adjustment.train(obs, hist, **kwargs)` (from `xclim.sdba.adjustment`)
# - adjustment `scen = Adj.adjust(sim, **kwargs)`
# - post-processing on `scen` (for example: re-trending)
#
# The train-adjust approach allows to inspect the trained adjustment object. The training information is stored in the underlying `Adj.ds` dataset and often has a `af` variable with the adjustment factors. Its layout and the other available variables vary between the different algorithm, refer to their part of the API docs.
#
# For heavy processing, this separation allows the computation and writing to disk of the training dataset before performing the adjustment(s). See the [advanced notebook](sdba-advanced.ipynb).
#
# Parameters needed by the training and the adjustment are saved to the `Adj.ds` dataset as a `adj_params` attribute. Other parameters, those only needed by the adjustment are passed in the `adjust` call and written to the history attribute in the output scenario dataarray.
#
# ### First example : pr and frequency adaptation
#
# The next example generates fake precipitation data and adjusts the `sim` timeseries but also adds a step where the dry-day frequency of `hist` is adapted so that is fits the one of `ref`. This ensures well-behaved adjustment factors for the smaller quantiles. Note also that we are passing `kind='*'` to use the multiplicative mode. Adjustment factors will be multiplied/divided instead of being added/substracted.
# +
vals = np.random.randint(0, 1000, size=(t.size,)) / 100
vals_ref = (4 ** np.where(vals < 9, vals / 100, vals)) / 3e6
vals_sim = (
(1 + 0.1 * np.random.random_sample((t.size,)))
* (4 ** np.where(vals < 9.5, vals / 100, vals))
/ 3e6
)
pr_ref = xr.DataArray(
vals_ref, coords={"time": t}, dims=("time",), attrs={"units": "mm/day"}
)
pr_ref = pr_ref.sel(time=slice("2000", "2015"))
pr_sim = xr.DataArray(
vals_sim, coords={"time": t}, dims=("time",), attrs={"units": "mm/day"}
)
pr_hist = pr_sim.sel(time=slice("2000", "2015"))
pr_ref.plot(alpha=0.9, label="Reference")
pr_sim.plot(alpha=0.7, label="Model")
plt.legend()
# +
# 1st try without adapt_freq
QM = sdba.EmpiricalQuantileMapping.train(
pr_ref, pr_hist, nquantiles=15, kind="*", group="time"
)
scen = QM.adjust(pr_sim)
pr_ref.sel(time="2010").plot(alpha=0.9, label="Reference")
pr_hist.sel(time="2010").plot(alpha=0.7, label="Model - biased")
scen.sel(time="2010").plot(alpha=0.6, label="Model - adjusted")
plt.legend()
# -
# In the figure above, `scen` has small peaks where `sim` is 0. This problem originates from the fact that there are more "dry days" (days with almost no precipitation) in `hist` than in `ref`. The next example works around the problem using frequency-adaptation, as described in [Themeßl et al. (2010)](https://doi.org/10.1007/s10584-011-0224-4).
# +
# 2nd try with adapt_freq
sim_ad, pth, dP0 = sdba.processing.adapt_freq(
pr_ref, pr_sim, thresh="0.05 mm d-1", group="time"
)
QM_ad = sdba.EmpiricalQuantileMapping.train(
pr_ref, sim_ad, nquantiles=15, kind="*", group="time"
)
scen_ad = QM_ad.adjust(pr_sim)
pr_ref.sel(time="2010").plot(alpha=0.9, label="Reference")
pr_sim.sel(time="2010").plot(alpha=0.7, label="Model - biased")
scen_ad.sel(time="2010").plot(alpha=0.6, label="Model - adjusted")
plt.legend()
# -
# ### Second example: tas and detrending
#
# The next example reuses the fake temperature timeseries generated at the beginning and applies the same QM adjustment method. However, for a better adjustment, we will scale sim to ref and then detrend the series, assuming the trend is linear. When `sim` (or `sim_scl`) is detrended, its values are now anomalies, so we need to normalize `ref` and `hist` so we can compare similar values.
#
# This process is detailed here to show how the sdba module should be used in custom adjustment processes, but this specific method also exists as `sdba.DetrendedQuantileMapping` and is based on [Cannon et al. 2015](https://doi.org/10.1175/JCLI-D-14-00754.1). However, `DetrendedQuantileMapping` normalizes over a `time.dayofyear` group, regardless of what is passed in the `group` argument. As done here, it is anyway recommended to use `dayofyear` groups when normalizing, especially for variables with strong seasonal variations.
# +
doy_win31 = sdba.Grouper("time.dayofyear", window=15)
Sca = sdba.Scaling.train(ref, hist, group=doy_win31, kind="+")
sim_scl = Sca.adjust(sim)
detrender = sdba.detrending.PolyDetrend(degree=1, group="time.dayofyear", kind="+")
sim_fit = detrender.fit(sim_scl)
sim_detrended = sim_fit.detrend(sim_scl)
ref_n = sdba.processing.normalize(ref, group=doy_win31, kind="+")
hist_n = sdba.processing.normalize(hist, group=doy_win31, kind="+")
QM = sdba.EmpiricalQuantileMapping.train(
ref_n, hist_n, nquantiles=15, group="time.month", kind="+"
)
scen_detrended = QM.adjust(sim_detrended, extrapolation="constant", interp="nearest")
scen = sim_fit.retrend(scen_detrended)
ref.groupby("time.dayofyear").mean().plot(label="Reference")
sim.groupby("time.dayofyear").mean().plot(label="Model - biased")
scen.sel(time=slice("2000", "2015")).groupby("time.dayofyear").mean().plot(
label="Model - adjusted - 2000-15", linestyle="--"
)
scen.sel(time=slice("2015", "2030")).groupby("time.dayofyear").mean().plot(
label="Model - adjusted - 2015-30", linestyle="--"
)
plt.legend()
# -
# ### Third example : Multi-method protocol - Hnilica et al. 2017
# In [their paper of 2017](https://doi.org/10.1002/joc.4890), Hnilica, Hanel and Puš present a bias-adjustment method based on the principles of Principal Components Analysis. The idea is simple : use principal components to define coordinates on the reference and on the simulation and then transform the simulation data from the latter to the former. Spatial correlation can thus be conserved by taking different points as the dimensions of the transform space. The method was demonstrated in the article by bias-adjusting precipitation over different drainage basins.
#
# The same method could be used for multivariate adjustment. The principle would be the same, concatening the different variables into a single dataset along a new dimension.
#
# Here we show how the modularity of `xclim.sdba` can be used to construct a quite complex adjustment protocol involving two adjustment methods : quantile mapping and principal components. Evidently, as this example uses only 2 years of data, it is not complete. It is meant to show how the adjustment functions and how the API can be used.
# +
# We are using xarray's "air_temperature" dataset
ds = xr.tutorial.open_dataset("air_temperature")
# To get an exagerated example we select different points
# here "lon" will be our dimension of two "spatially correlated" points
reft = ds.air.isel(lat=21, lon=[40, 52]).drop_vars(["lon", "lat"])
simt = ds.air.isel(lat=18, lon=[17, 35]).drop_vars(["lon", "lat"])
# Principal Components Adj, no grouping and use "lon" as the space dimensions
PCA = sdba.PrincipalComponents.train(reft, simt, group="time", crd_dims=["lon"])
scen1 = PCA.adjust(simt)
# QM, no grouping, 20 quantiles and additive adjustment
EQM = sdba.EmpiricalQuantileMapping.train(
reft, scen1, group="time", nquantiles=50, kind="+"
)
scen2 = EQM.adjust(scen1)
# +
# some Analysis figures
fig = plt.figure(figsize=(12, 16))
gs = plt.matplotlib.gridspec.GridSpec(3, 2, fig)
axPCA = plt.subplot(gs[0, :])
axPCA.scatter(reft.isel(lon=0), reft.isel(lon=1), s=20, label="Reference")
axPCA.scatter(simt.isel(lon=0), simt.isel(lon=1), s=10, label="Simulation")
axPCA.scatter(scen2.isel(lon=0), scen2.isel(lon=1), s=3, label="Adjusted - PCA+EQM")
axPCA.set_xlabel("Point 1")
axPCA.set_ylabel("Point 2")
axPCA.set_title("PC-space")
axPCA.legend()
refQ = reft.quantile(EQM.ds.quantiles, dim="time")
simQ = simt.quantile(EQM.ds.quantiles, dim="time")
scen1Q = scen1.quantile(EQM.ds.quantiles, dim="time")
scen2Q = scen2.quantile(EQM.ds.quantiles, dim="time")
for i in range(2):
if i == 0:
axQM = plt.subplot(gs[1, 0])
else:
axQM = plt.subplot(gs[1, 1], sharey=axQM)
axQM.plot(refQ.isel(lon=i), simQ.isel(lon=i), label="No adj")
axQM.plot(refQ.isel(lon=i), scen1Q.isel(lon=i), label="PCA")
axQM.plot(refQ.isel(lon=i), scen2Q.isel(lon=i), label="PCA+EQM")
axQM.plot(
refQ.isel(lon=i), refQ.isel(lon=i), color="k", linestyle=":", label="Ideal"
)
axQM.set_title(f"QQ plot - Point {i + 1}")
axQM.set_xlabel("Reference")
axQM.set_xlabel("Model")
axQM.legend()
axT = plt.subplot(gs[2, :])
reft.isel(lon=0).plot(ax=axT, label="Reference")
simt.isel(lon=0).plot(ax=axT, label="Unadjusted sim")
# scen1.isel(lon=0).plot(ax=axT, label='PCA only')
scen2.isel(lon=0).plot(ax=axT, label="PCA+EQM")
axT.legend()
axT.set_title("Timeseries - Point 1")
# -
# ### Fourth example : Multivariate bias-adjustment with multiple steps - Cannon 2018
#
# This section replicates the "MBCn" algorithm described by [Cannon (2018)](https://doi.org/10.1007/s00382-017-3580-6). The method relies on some univariate algorithm, an adaption of the N-pdf transform of [Pitié et al. (2005)](https://ieeexplore.ieee.org/document/1544887/) and a final reordering step.
#
# In the following, we use the AHCCD and CanESM2 data are reference and simulation and we correct both `pr` and `tasmax` together.
# +
from xclim.testing import open_dataset
from xclim.core.units import convert_units_to
dref = open_dataset(
"sdba/ahccd_1950-2013.nc", chunks={"location": 1}, drop_variables=["lat", "lon"]
).sel(time=slice("1981", "2010"))
dref = dref.assign(
tasmax=convert_units_to(dref.tasmax, "K"),
pr=convert_units_to(dref.pr, "kg m-2 s-1"),
)
dsim = open_dataset(
"sdba/CanESM2_1950-2100.nc", chunks={"location": 1}, drop_variables=["lat", "lon"]
)
dhist = dsim.sel(time=slice("1981", "2010"))
dsim = dsim.sel(time=slice("2041", "2070"))
dref
# -
# ##### Perform an initial univariate adjustment.
# +
# additive for tasmax
QDMtx = sdba.QuantileDeltaMapping.train(
dref.tasmax, dhist.tasmax, nquantiles=20, kind="+", group="time"
)
# Adjust both hist and sim, we'll feed both to the Npdf transform.
scenh_tx = QDMtx.adjust(dhist.tasmax)
scens_tx = QDMtx.adjust(dsim.tasmax)
# remove == 0 values in pr:
dref["pr"] = sdba.processing.jitter_under_thresh(dref.pr, "0.01 mm d-1")
dhist["pr"] = sdba.processing.jitter_under_thresh(dhist.pr, "0.01 mm d-1")
dsim["pr"] = sdba.processing.jitter_under_thresh(dsim.pr, "0.01 mm d-1")
# multiplicative for pr
QDMpr = sdba.QuantileDeltaMapping.train(
dref.pr, dhist.pr, nquantiles=20, kind="*", group="time"
)
# Adjust both hist and sim, we'll feed both to the Npdf transform.
scenh_pr = QDMpr.adjust(dhist.pr)
scens_pr = QDMpr.adjust(dsim.pr)
scenh = xr.Dataset(dict(tasmax=scenh_tx, pr=scenh_pr))
scens = xr.Dataset(dict(tasmax=scens_tx, pr=scens_pr))
# -
# ##### Stack the variables to multivariate arrays and standardize them
# The standardization process ensure the mean and standard deviation of each column (variable) is 0 and 1 respectively.
#
# `hist` and `sim` are standardized together so the two series are coherent. We keep the mean and standard deviation to be reused when we build the result.
# +
# Stack the variables (tasmax and pr)
ref = sdba.base.stack_variables(dref)
scenh = sdba.base.stack_variables(scenh)
scens = sdba.base.stack_variables(scens)
# Standardize
ref, _, _ = sdba.processing.standardize(ref)
allsim, savg, sstd = sdba.processing.standardize(xr.concat((scenh, scens), "time"))
hist = allsim.sel(time=scenh.time)
sim = allsim.sel(time=scens.time)
# -
# ##### Perform the N-dimensional probability density function transform
#
# The NpdfTransform will iteratively randomly rotate our arrays in the "variables" space and apply the univariate adjustment before rotating it back. In Cannon (2018) and Pitié et al. (2005), it can be seen that the source array's joint distribution converges toward the target's joint distribution when a large number of iterations is done.
# +
from xclim import set_options
# See the advanced notebook for details on how this option work
with set_options(sdba_extra_output=True):
out = sdba.adjustment.NpdfTransform.adjust(
ref,
hist,
sim,
base=sdba.QuantileDeltaMapping, # Use QDM as the univariate adjustment.
base_kws={"nquantiles": 20, "group": "time"},
n_iter=20, # perform 20 iteration
n_escore=1000, # only send 1000 points to the escore metric (it is realy slow)
)
scenh = out.scenh.rename(time_hist="time") # Bias-adjusted historical period
scens = out.scen # Bias-adjusted future period
extra = out.drop_vars(["scenh", "scen"])
# Un-standardize (add the mean and the std back)
scenh = sdba.processing.unstandardize(scenh, savg, sstd)
scens = sdba.processing.unstandardize(scens, savg, sstd)
# -
# ##### Restoring the trend
#
# The NpdfT has given us new "hist" and "sim" arrays with a correct rank structure. However, the trend is lost in this process. We reorder the result of the initial adjustment according to the rank structure of the NpdfT outputs to get our final bias-adjusted series.
#
# `sdba.processing.reordering` : 'ref' the argument that provides the order, 'sim' is the argument to reorder.
scenh = sdba.processing.reordering(hist, scenh, group="time")
scens = sdba.processing.reordering(sim, scens, group="time")
scenh = sdba.base.unstack_variables(scenh, "variables")
scens = sdba.base.unstack_variables(scens, "variables")
# ##### There we are!
#
# Let's trigger all the computations. Here we write the data to disk and use `compute=False` in order to trigger the whole computation tree only once. There seems to be no way in xarray to do the same with a `load` call.
# +
from dask import compute
from dask.diagnostics import ProgressBar
tasks = [
scenh.isel(location=2).to_netcdf("mbcn_scen_hist_loc2.nc", compute=False),
scens.isel(location=2).to_netcdf("mbcn_scen_sim_loc2.nc", compute=False),
extra.escores.isel(location=2)
.to_dataset()
.to_netcdf("mbcn_escores_loc2.nc", compute=False),
]
with ProgressBar():
compute(tasks)
# -
# Let's compare the series and look at the distance scores to see how well the Npdf transform has converged.
# +
scenh = xr.open_dataset("mbcn_scen_hist_loc2.nc")
fig, ax = plt.subplots()
dref.isel(location=2).tasmax.plot(ax=ax, label="Reference")
scenh.tasmax.plot(ax=ax, label="Adjusted", alpha=0.65)
dhist.isel(location=2).tasmax.plot(ax=ax, label="Simulated")
ax.legend()
# -
escores = xr.open_dataarray("mbcn_escores_loc2.nc")
diff_escore = escores.differentiate("iterations")
diff_escore.plot()
plt.title("Difference of the subsequent e-scores.")
plt.ylabel("E-scores difference")
diff_escore
# The tutorial continues in the [advanced notebook](sdba-advanced.ipynb) with more on optimization with dask, other fancier detrending algorithms and an example pipeline for heavy processing.
|
docs/notebooks/sdba.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
data_points = np.array([
[1, 1],
[1, 1.5],
[2, 2],
[8, 1],
[8, 0],
[8.5, 1],
[6, 1],
[1, 10],
[1.5, 10],
[1.5, 9.5],
[10, 10],
[1.5, 8.5]
])
P1 = [1, 1]
from scipy.spatial import distance
r = 2
points1 = np.array([
p0 for p0 in data_points if distance.euclidean(p0, P1) <= r
])
points1
P2 = [
np.mean( points1.transpose()[0] ),
np.mean(points1.transpose()[1] )
]
P2
points2 = np.array([
p0 for p0 in data_points if distance.euclidean( p0, P2) <= r
])
points2
P3 = [8, 1]
points3 = np.array( [
p0 for p0 in data_points if distance.euclidean(p0, P3) <= r
])
points3
P4 = [
np.mean(points3.transpose()[0]),
np.mean(points3.transpose()[1])
]
P4
P5 = [8, 0]
points4 = np.array([
p0 for p0 in data_points if distance.euclidean(p0, P5) <= r
])
points4
P6 = [
np.mean(points4.transpose()[0]),
np.mean(points4.transpose()[1])
]
P6
P7 = [8.5, 1]
points5 = np.array([
p0 for p0 in data_points if distance.euclidean(p0, P7) <= r
])
points5
P8 = [6, 1]
points6 = np.array([
p0 for p0 in data_points if distance.euclidean(p0, P8) <= r
])
points6
P9 = [
np.mean(points6.transpose()[0]),
np.mean(points6.transpose()[1])
]
P9
points7 = np.array([
p0 for p0 in data_points if distance.euclidean(p0, P9) <= r
])
points7
|
Exercise03/Exercise03.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Word Count - Pinot wines
# ---
# +
import pandas as pd
import numpy
reviews_df = pd.read_json('winemag-data-130k-v2.json')
reviews_df = reviews_df[reviews_df.variety.notnull()]
pinot_df = reviews_df[reviews_df['variety'].str.contains("Pinot")].reset_index(drop=True)
pinot_df['variety'].replace('\s+', '_',regex=True,inplace=True)
counts_df = pd.DataFrame(columns=['variety','word','count'])
for i in list(pinot_df.variety.unique()):
temp_df = pinot_df[pinot_df['variety']==i]
temp_df['variety'].replace('_', ' ',regex=True,inplace=True)
slice_df = pd.Series(' '.join(temp_df.description).split()).value_counts().to_frame()
slice_df.reset_index(inplace=True)
slice_df.columns = ['word','count']
slice_df['variety'] = i
slice_df['variety'].replace('_', ' ',regex=True,inplace=True)
slice_df['word'] = slice_df['word'].map(lambda x: x.strip('+-.,').lower())
counts_df= counts_df.append(slice_df)
counts_df.to_csv('wordcounts.csv', encoding='utf-8')
# -
|
discarded ideas/data/dashboard_pinot-details/wordcounts.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# +
#create 150 random points from -1 to 1
x_data = np.linspace(-1,1,150)[:,np.newaxis]
#create bias
noise = np.random.normal(0,0.02,x_data.shape)
#create a nolinear model
y_data = np.power(x_data,2)+noise
#define two placehoders
x = tf.placeholder(tf.float32,[None,1])
y = tf.placeholder(tf.float32,[None,1])
#create a neural network containing only one input layer and output layer
Weights_L1 = tf.Variable(tf.random_normal([1,20]))
biases_L1 = tf.Variable(tf.zeros([1,20]))
Wx_plus_b_L1 = tf.matmul(x,Weights_L1) + biases_L1
#tanh is activation function
L1 = tf.nn.tanh(Wx_plus_b_L1)
Weights_L2 = tf.Variable(tf.random_normal([20,1]))
biases_L2 = tf.Variable(tf.zeros([1,1]))
Wx_plus_b_L2 = tf.matmul(L1,Weights_L2)+biases_L2
prediction = tf.nn.tanh(Wx_plus_b_L2)
#define loss function
loss = tf.reduce_mean(tf.square(y-prediction))
#define an optimizer baesd on GradientDescent to minimize loss function
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#initialize all global variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for _ in range(2000):
sess.run(train_step,feed_dict={x:x_data,y:y_data})
prediction_value = sess.run(prediction,feed_dict={x:x_data})
#use matplotlib to show the prediction value and theorectical value in the same figure
plt.figure()
plt.scatter(x_data,y_data) #theorectical value
plt.plot(x_data,prediction_value,'r-',lw=5) #predictional value
plt.show()
# -
|
3-1simple non-linear regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="A--hJ0bCbK-p"
# # Git command execution in colab
# Google ColabでGitコマンドを操作できるファイルです。
# 初期設定を行った後に操作したいセルを実行してください。
#
# # 初期設定
# 1. 以下のセルに共通設定を入力してください。
# - root_ws_dir : Githubレポジトリのclone先ディレクトリ
# - git_repo_url : GithubレポジトリURL
#
# 2. セル実行後に以下の情報を入力してください。
# 各種情報は実行後にファイルを削除するためサーバー内には残りません。
# - git_user_email : Githubユーザーメールアドレス
# - git_user_name : Githubユーザー名
# - github_password : <PASSWORD>
# + colab={"base_uri": "https://localhost:8080/"} id="Ns-yyWFXYYAy" executionInfo={"status": "ok", "timestamp": 1620524569459, "user_tz": -540, "elapsed": 862, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggjdqxj5bE_7B9wnIJlwJoolPPvCMDe7-lsWl19uA=s64", "userId": "10642792699665912885"}} outputId="b68ed3ce-ab0a-421b-919e-c438be094b9d"
import os
import getpass
root_ws_dir = '/content/drive/MyDrive/Github'
git_repo_url = 'https://github.com/naonaorange/developper_utility_in_colab.git'
if not 'git_user_email' in globals():
git_user_email = getpass.getpass(prompt='Please input email address')
if not 'git_user_name' in globals():
git_user_name = getpass.getpass(prompt='Please input name')
if not 'git_password' in globals():
git_password = getpass.getpass(prompt='Please input password')
from google.colab import drive
drive.mount("/content/drive")
repo_name = git_repo_url.split('/')[-1]
repo_name = repo_name.split('.')[0]
git_repo_dir = root_ws_dir + '/' + repo_name
def run_command(command, work_dir=None):
with open('command.sh', mode='w') as f:
f.write('#!/bin/sh\n')
if work_dir is not None:
f.write('cd ' + work_dir + '\n')
f.write(command)
# #!cat command.sh
# !chmod +x command.sh && sh command.sh
os.remove('command.sh')
#git初期設定
run_command('git config --global user.email ' + '"' + git_user_email + '"')
run_command('git config --global user.name ' + '"' + git_user_name + '"')
# + [markdown] id="QVX-iuMVc7At"
# # git clone
# + colab={"base_uri": "https://localhost:8080/"} id="eQ0pThZ9Q7tS" executionInfo={"status": "ok", "timestamp": 1620522319147, "user_tz": -540, "elapsed": 1204, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggjdqxj5bE_7B9wnIJlwJoolPPvCMDe7-lsWl19uA=s64", "userId": "10642792699665912885"}} outputId="cb0df2b6-1295-472a-9895-2b3da826216f"
def git_clone():
run_command(work_dir=root_ws_dir, command='git clone ' + git_repo_url)
git_clone()
# + [markdown] id="ZFh1QZ8ay8Oc"
# # git status
# + colab={"base_uri": "https://localhost:8080/"} id="yRTl6zDWy_D8" executionInfo={"status": "ok", "timestamp": 1620524710254, "user_tz": -540, "elapsed": 496, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggjdqxj5bE_7B9wnIJlwJoolPPvCMDe7-lsWl19uA=s64", "userId": "10642792699665912885"}} outputId="7c0152dd-5e80-49ec-8ee8-c1723c2242a4"
run_command(work_dir=git_repo_dir, command='''
git status
''')
# + [markdown] id="YuqrWXCDzB-6"
# # git diff
# + id="ZEBInbMKzDqx"
run_command(work_dir=git_repo_dir, command='''
git diff
''')
# + [markdown] id="jcDqwWsqdBUo"
# # git commit
# + colab={"base_uri": "https://localhost:8080/"} id="Vo_-qpZbRZBY" executionInfo={"status": "ok", "timestamp": 1620524782868, "user_tz": -540, "elapsed": 1015, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggjdqxj5bE_7B9wnIJlwJoolPPvCMDe7-lsWl19uA=s64", "userId": "10642792699665912885"}} outputId="bba47af5-59d5-4414-f060-18653f8bdb49"
insert_index = git_repo_url.find('https://')
insert_index += 8
remote_url = git_repo_url[:insert_index] + git_user_name + ':' + git_password + '@' + git_repo_url[insert_index:]
run_command('git remote set-url origin ' + remote_url, work_dir=git_repo_dir)
run_command(work_dir=git_repo_dir, command='''
git commit -a -m "add function"
''')
# + [markdown] id="t3bexvKWdGSK"
# # git push
# + colab={"base_uri": "https://localhost:8080/"} id="RT220AQTSx6v" executionInfo={"status": "ok", "timestamp": 1620524787917, "user_tz": -540, "elapsed": 2429, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggjdqxj5bE_7B9wnIJlwJoolPPvCMDe7-lsWl19uA=s64", "userId": "10642792699665912885"}} outputId="00db964c-4918-423b-bd70-20d501fd5324"
run_command(work_dir=git_repo_dir, command='''
git push
''')
# + [markdown] id="CaPTHuC_vlod"
# # git pull
# + id="TLXnQsjZmKO0" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620523792022, "user_tz": -540, "elapsed": 5570, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/<KEY>7-lsWl19uA=s64", "userId": "10642792699665912885"}} outputId="b58a4e0b-2aa9-4d15-8050-26f7724b9f28"
run_command(work_dir=git_repo_dir, command='''
git pull
''')
# + [markdown] id="0_2pkjJtx4jd"
# #Root workspace directory下でのコマンド実行
# + id="d1cpdtzlvqCQ"
run_command(work_dir=root_ws_dir, command='''
ls
''')
# + [markdown] id="X5xFDIOqyKEM"
# # Github repository directory下でのコマンド実行
# + id="KAgk3KFpyNd7"
run_command(work_dir=git_repo_dir, command='''
git status
''')
|
github_command_execution.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 3章 集約
# データの価値を大きく損失せずに、分析の単位を変更できる
#
# ## 3-1 データ数, 種類数の算出
# ### カウントとユニークカウント
# +
from load_data.data_loader import load_hotel_reserve
customer_tb, hotel_tb, reserve_tb = load_hotel_reserve()
import pandas as pd
import numpy as np
reserve_tb.head()
# +
# Not Awesome
# groupbyで集約単位決めて, size()で取得.
rsv_cnt_tb = reserve_tb.groupby('hotel_id').size().reset_index()
rsv_cnt_tb.columns = ['hotel_id', 'rsv_cnt']
# size() -> nunique()なのは, NaNを弾くため
cus_cnt_tb = reserve_tb.groupby('hotel_id')['customer_id'].nunique().reset_index()
cus_cnt_tb.columns = ['hotel_id', 'cus_cnt']
rsv_cus_cnt_tb = pd.merge(rsv_cnt_tb, cus_cnt_tb, on='hotel_id')
rsv_cus_cnt_tb.head()
# -
# Awesome
# プロはagg
result = reserve_tb.groupby('hotel_id').agg({'reserve_id' : 'count', 'customer_id' : 'nunique'}).reset_index()
result.columns = ['hotel_id', 'rsv_cnt', 'cst_cnt']
result.head()
# ## 3-2 合計値の算出
# ### 合計値
# ホテルごとの宿泊人数別の合計予約金額
result = reserve_tb.groupby(['hotel_id', 'people_num'])['total_price'].sum().reset_index()
result = result.rename(columns={'total_price' : 'price_sum'})
result.head()
# ## 極値, 代表値の算出
# ### 代表値
# 予約金額の最大, 最小, 平均, 中央, 20%
# Awesome
# describe()
result = reserve_tb.groupby('hotel_id').agg({'total_price' : ['max', 'min', 'mean', 'median', lambda x: np.percentile(x, q=20)]}).reset_index()
result.columns = ['hotel_id', 'max', 'min', 'mean', 'median', 'price_20%']
result.head()
reserve_tb.describe()
# ## 3-4 ばらつき具合の算出
# ### 分散値と標準偏差値
result = reserve_tb.groupby('hotel_id').agg({'total_price' : ['var', 'std']}).reset_index()
result.columns = ['hotel_id', 'price_var', 'prive_std']
result = result.fillna(0)
result.head()d
# ## 3-5 最頻値の算出
# ### 最頻値
# カテゴリ化して使うとgood
reserve_tb['total_price'].round(-3).mode()
# ## 3-6 順位の算出
# ### 時系列に番号を付与
reserve_tb['reserve_datetime'] = pd.to_datetime(reserve_tb['reserve_datetime'], format='%Y-%m-%d %H:%M:%S')
reserve_tb['log_no'] = reserve_tb.groupby('customer_id')['reserve_datetime'].rank(ascending=True, method='first')
reserve_tb.head()
# ### ランキング
# +
# 予約回数
rsv_cnt_tb = reserve_tb.groupby('hotel_id').size().reset_index()
rsv_cnt_tb.columns = ['hotel_id', 'rsv_cnt']
# 順位
rsv_cnt_tb['rsv_cnt_rank'] = rsv_cnt_tb['rsv_cnt'].rank(ascending=False, method='min')
rsv_cnt_tb.head()
# -
|
udoooom/python/3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial 6: Caesium-133 D Line
# Caesium is a heavy atom with hyperfine structure. Modelling of the <sup>133</sup>Cs D line is shown in this tutorial using the `LASED` library.
# + tags=[]
import LASED as las
import plotly.graph_objects as go
import time
import numpy as np
# -
# The data for Caesium can be accessed [here](https://steck.us/alkalidata/cesiumnumbers.1.6.pdf). The 6$^2$S$_{1/2}$ to the 6$^2$P$_{3/2}$ transition will be modelled here with F' = 4 to the F = 5 level as the resonant transition and all other terms will be detuned from this resonance. A level diagram of the system being modelled is shown below.
from IPython.display import Image
Image(filename = "LevelDiagrams/Cs-133-6S1_2to6P3_2.jpg")
# ## Setting up the System
# +
# 6^2S_{1/2} -> 6^2P_{3/2}
wavelength_cs = 852.356e-9 # Wavelength in nm
w_e = las.angularFreq(wavelength_cs)
tau_cs = 30.473 # in ns
I_cs = 7/2 # Isospin for sodium
PI = np.pi
# Energy Splittings
w1 = 9.192631770*2*PI # Splitting of 6^2S_{1/2}(F' = 3) -> (F' = 4) in Grad/s (Exact due to definition of the second)
w2 = 0.15121*2*PI # Splitting between 6^2P_{3/2} F = 2 and F = 3 in Grad/s
w3 = 0.20124*2*PI # Splitting between 6^2P_{3/2} F = 3 and F = 4 in Grad/s
w4 = 0.251*2*PI # Splitting between 6^2P_{3/2} F = 4 and F = 5 in Grad/s
# Detunings
w_Fp3 = -1*w1
w_F2 = w_e-(w4+w3+w2)
w_F3 = w_e-(w4+w3)
w_F4 = w_e-w4
w_F5 = w_e
# Create states
# 6^2S_{1/2}
Fp3 = las.generateSubStates(label_from = 1, w = w_Fp3, L = 0, S = 1/2, I = I_cs, F = 3)
Fp4 = las.generateSubStates(label_from = 8, w = 0, L = 0, S = 1/2, I = I_cs, F = 4)
# 5^2P_{3/2}
F2 = las.generateSubStates(label_from = 17, w = w_F2, L = 1, S = 1/2, I = I_cs, F = 2)
F3 = las.generateSubStates(label_from = 22, w = w_F3, L = 1, S = 1/2, I = I_cs, F = 3)
F4 = las.generateSubStates(label_from = 29, w = w_F4, L = 1, S = 1/2, I = I_cs, F = 4)
F5 = las.generateSubStates(label_from = 38, w = w_F5, L = 1, S = 1/2, I = I_cs, F = 5)
# Declare excited and ground states
G_cs = Fp3 + Fp4
E_cs = F2 + F3 + F4 + F5
# Laser parameters
intensity_cs = 50 # mW/mm^-2
Q_cs = [0]
# Simulation parameters
start_time = 0
stop_time = 500 # in ns
time_steps = 501
time_cs = np.linspace(start_time, stop_time, time_steps)
# -
# Create a `LaserAtomSystem` object and time evolve the system using `timeEvolution()`.
cs_system = las.LaserAtomSystem(E_cs, G_cs, tau_cs, Q_cs, wavelength_cs, laser_intensity = intensity_cs)
tic = time.perf_counter()
cs_system.timeEvolution(time_cs)
toc = time.perf_counter()
print(f"The code finished in {toc-tic:0.4f} seconds")
# ## Saving and Plotting
cs_system.saveToCSV(f"cs133piExcitationI={intensity_cs}.csv")
# +
rho_to_plot = [ [abs(rho) for rho in cs_system.Rho_t(s, s)] for s in F5]
fig_csF5 = go.Figure()
for i, rho in enumerate(rho_to_plot):
fig_csF5.add_trace(go.Scatter(x = time_cs,
y = rho,
name = f"m_F = {F5[i].m}",
mode = 'lines'))
fig_csF5.update_layout(title = "<sup>133</sup>Caesium 6<sup>2</sup>S<sub>1/2</sub> to 6<sup>2</sup>P<sub>3/2</sub> π-Excitation: Time Evolution of Population in the F = 5 Substates",
xaxis_title = "Time (ns)",
yaxis_title = "Population",
font = dict(
size = 11))
fig_csF5.write_image(f"SavedPlots/CsF=5I={intensity_cs}.png")
Image(f"SavedPlots/CsF=5I={intensity_cs}.png")
# +
rho_to_plot = [ [abs(rho) for rho in cs_system.Rho_t(s, s)] for s in F4]
fig_csF4 = go.Figure()
for i, rho in enumerate(rho_to_plot):
fig_csF4.add_trace(go.Scatter(x = time_cs,
y = rho,
name = f"m_F = {F4[i].m}",
mode = 'lines'))
fig_csF4.update_layout(title = "<sup>133</sup>Caesium 6<sup>2</sup>S<sub>1/2</sub> to 6<sup>2</sup>P<sub>3/2</sub> π-Excitation: Time Evolution of Population in the F = 4 Substates",
xaxis_title = "Time (ns)",
yaxis_title = "Population",
font = dict(
size = 11))
fig_csF4.write_image(f"SavedPlots/CsF=4I={intensity_cs}.png")
Image(f"SavedPlots/CsF=4I={intensity_cs}.png")
# +
rho_to_plot = [ [abs(rho) for rho in cs_system.Rho_t(s, s)] for s in F3]
fig_csF3 = go.Figure()
for i, rho in enumerate(rho_to_plot):
fig_csF3.add_trace(go.Scatter(x = time_cs,
y = rho,
name = f"m_F = {F3[i].m}",
mode = 'lines'))
fig_csF3.update_layout(title = "<sup>133</sup>Caesium 6<sup>2</sup>S<sub>1/2</sub> to 6<sup>2</sup>P<sub>3/2</sub> π-Excitation: Time Evolution of Population in the F = 3 Substates",
xaxis_title = "Time (ns)",
yaxis_title = "Population",
font = dict(
size = 11))
fig_csF3.write_image(f"SavedPlots/CsF=3I={intensity_cs}.png")
Image(f"SavedPlots/CsF=3I={intensity_cs}.png")
# +
rho_to_plot = [ [abs(rho) for rho in cs_system.Rho_t(s, s)] for s in F2]
fig_csF2 = go.Figure()
for i, rho in enumerate(rho_to_plot):
fig_csF2.add_trace(go.Scatter(x = time_cs,
y = rho,
name = f"m_F = {F2[i].m}",
mode = 'lines'))
fig_csF2.update_layout(title = "<sup>133</sup>Caesium 6<sup>2</sup>S<sub>1/2</sub> to 6<sup>2</sup>P<sub>3/2</sub> π-Excitation: Time Evolution of Population in the F = 2 Substates",
xaxis_title = "Time (ns)",
yaxis_title = "Population",
font = dict(
size = 11))
fig_csF2.write_image(f"SavedPlots/CsF=2I={intensity_cs}.png")
Image(f"SavedPlots/CsF=2I={intensity_cs}.png")
# +
rho_to_plot = [ [abs(rho) for rho in cs_system.Rho_t(s, s)] for s in Fp3]
fig_csFp3 = go.Figure()
for i, rho in enumerate(rho_to_plot):
fig_csFp3.add_trace(go.Scatter(x = time_cs,
y = rho,
name = f"m_F = {Fp3[i].m}",
mode = 'lines'))
fig_csFp3.update_layout(title = "<sup>133</sup>Caesium 6<sup>2</sup>S<sub>1/2</sub> to 6<sup>2</sup>P<sub>3/2</sub> π-Excitation: Time Evolution of Population in the F' = 3 Substates",
xaxis_title = "Time (ns)",
yaxis_title = "Population",
font = dict(
size = 11))
fig_csFp3.write_image(f"SavedPlots/CsFp=3I={intensity_cs}.png")
Image(f"SavedPlots/CsFp=3I={intensity_cs}.png")
# +
rho_to_plot = [ [abs(rho) for rho in cs_system.Rho_t(s, s)] for s in Fp4]
fig_csFp4 = go.Figure()
for i, rho in enumerate(rho_to_plot):
fig_csFp4.add_trace(go.Scatter(x = time_cs,
y = rho,
name = f"m_F = {Fp4[i].m}",
mode = 'lines'))
fig_csFp4.update_layout(title = "<sup>133</sup>Caesium 6<sup>2</sup>S<sub>1/2</sub> to 6<sup>2</sup>P<sub>3/2</sub> π-Excitation: Time Evolution of Population in the F' = 4 Substates",
xaxis_title = "Time (ns)",
yaxis_title = "Population",
font = dict(
size = 11))
fig_csFp4.write_image(f"SavedPlots/CsFp=4I={intensity_cs}.png")
Image(f"SavedPlots/CsFp=4I={intensity_cs}.png")
|
docs/source/tutorials/Tutorial6-Caesium133DLine.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
import pandas as pd
from scipy.spatial import distance_matrix
import sys, os, Bio.PDB, warnings
pdb_list = Bio.PDB.PDBList()
pdb_parser = Bio.PDB.PDBParser()
data_dir = '../protein_pfam'
sys.path.append(data_dir)
from parse_pfam import parse_pfam
from Bio import BiopythonWarning
warnings.simplefilter('ignore', BiopythonWarning)
# %matplotlib inline
pfam, pdb_refs = parse_pfam(data_dir)
print 'total MSAs: %i, total PDB refs: %i' % (pfam.shape[0], pdb_refs.shape[0])
# Cas 9 family accesion code
ac = 'PF00186'
# store the family of interest in the 'fam' variable
fam = pfam.loc[ac]
#print 'size rank: %i of %i' % (pfam['size'].rank(ascending=False)[fam.name].astype(int), pfam.shape[0])
fam
# +
# local directory containing data for this MSA
fam_dir = os.path.join(data_dir, 'Pfam-A.full', fam.name)
# the residue symbols array that is the MSA
msa = np.load(os.path.join(fam_dir, 'msa.npy'))
# -
msa.shape
pfam[pfam.index == ac]
# # find pdb reference:
refs = pdb_refs[pdb_refs.index.str.contains(fam.name)]
refs.head()
n_refs = refs.shape[0]
print(n_refs)
for i in range(n_refs):
ref = refs.iloc[i]
#print(ref)
# pdb sequence
#seq = msa[:,ref.seq]
seq = msa[:,ref.seq+1] # change j-1 --> j
#print(seq)
gap_pos = seq == '-'
seq_non_gap = seq[~gap_pos]
#print(seq_non_gap.shape)
#print(seq_non_gap)
pdb_file = pdb_list.retrieve_pdb_file(ref.pdb_id, pdir=fam_dir, file_format='pdb')
chain = pdb_parser.get_structure(ref.pdb_id, pdb_file)[0][ref.chain]
coords = np.array([a.get_coord() for a in chain.get_atoms()])
#print(coords.shape)
#print(coords)
coords_cut = coords[ref.pdb_start-1:ref.pdb_end]
#print(coords_cut.shape)
print(seq_non_gap.shape[0]-coords_cut.shape[0])
|
ref_code/1get_pdb_structure_lowercase-v2_seq+1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Formatting Plots
#
# ## About the Data
# In this notebook, we will be working with 2 datasets:
# - Facebook's stock price throughout 2018 (obtained using the [`stock_analysis` package](https://github.com/stefmolin/stock-analysis))
# - European Centre for Disease Prevention and Control's (ECDC) [daily number of new reported cases of COVID-19 by country worldwide dataset](https://www.ecdc.europa.eu/en/publications-data/download-todays-data-geographic-distribution-covid-19-cases-worldwide) collected on September 19, 2020 via [this link](https://opendata.ecdc.europa.eu/covid19/casedistribution/csv)
#
# ## Setup
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
fb = pd.read_csv(
'data/fb_stock_prices_2018.csv', index_col='date', parse_dates=True
)
covid = pd.read_csv('data/covid19_cases.csv').assign(
date=lambda x: pd.to_datetime(x.dateRep, format='%d/%m/%Y')
).set_index('date').replace(
'United_States_of_America', 'USA'
).sort_index()['2020-01-18':'2020-09-18']
# -
# ## Titles and Axis Labels
# - `plt.suptitle()` adds a title to plots and subplots
# - `plt.title()` adds a title to a single plot. Note if you use subplots, it will only put the title on the last subplot, so you will need to use `plt.suptitle()`
# - `plt.xlabel()` labels the x-axis
# - `plt.ylabel()` labels the y-axis
fb.close.plot()
plt.title('FB Closing Price')
plt.xlabel('date')
plt.ylabel('price ($)')
# ### `plt.suptitle()` vs. `plt.title()`
# Check out what happens when we call `plt.title()` with subplots (and also `plt.ylabel()`):
fb.iloc[:,:4].plot(subplots=True, layout=(2, 2), figsize=(12, 5))
plt.title('Facebook 2018 Stock Data')
plt.ylabel('price ($)')
# Using `plt.suptitle()` will give the figure a title; for the y-axis labels, we need to use the `Axes` objects directly:
axes = fb.iloc[:,:4].plot(subplots=True, layout=(2, 2), figsize=(12, 5))
plt.suptitle('Facebook 2018 Stock Data')
for ax in axes.flatten():
ax.set_ylabel('price ($)')
# ## Legends
# Matplotlib makes it possible to control many aspects of how the legend looks including its appearance and location. Here is a sampling of commonly used parameters, which we will see throughout the book:
#
# | Parameter | Purpose |
# | --- | --- |
# | `loc` | Specify the location of the legend |
# | `bbox_to_anchor` | Used in conjunction with `loc` to specify legend location |
# | `ncol` | Set the number of columns the labels will be broken into, default is 1 |
# | `framealpha` | Control the transparency of the legend's background |
# | `title` | Give the legend a title |
#
# `plt.legend()` adds a legend to the plot. We can specify where to place it with the `loc` parameter:
fb.assign(
ma=lambda x: x.close.rolling(20).mean()
).plot(
y=['close', 'ma'],
title='FB closing price in 2018',
label=['closing price', '20D moving average'],
style=['-', '--']
)
plt.legend(loc='lower left')
plt.ylabel('price ($)')
# By default, `matplotlib` tries to find the best location for the legend, but we can override it as we see fit. Here are the available general locations, but note that you can always provide an (x, y) tuple of where to place the legend:
#
# | Location String | Location Code |
# | --- | --- |
# | `'best'` | `0` |
# | `'upper right'` | `1` |
# | `'upper left'` | `2` |
# | `'lower left'` | `3` |
# | `'lower right'` | `4` |
# | `'right'` | `5` |
# | `'center left'` | `6` |
# | `'center right'` | `7` |
# | `'lower center'` | `8` |
# | `'upper center'` | `9` |
# | `'center'` | `10` |
#
# Note there is also a `legend()` method on `Axes` objects. Let's use it along with the `framealpha` and `ncol` arguments to style a legend. We will take a look at the percentage of the world's new COVID-19 cases that occurred in Brazil, China, India, Italy, Spain, and the USA over the 8-month period from January 18, 2020 through September 18, 2020:
# +
new_cases = covid.reset_index().pivot(
index='date', columns='countriesAndTerritories', values='cases'
).fillna(0)
pct_new_cases = new_cases.apply(lambda x: x / new_cases.apply('sum', axis=1), axis=0)[
['Italy', 'China', 'Spain', 'USA', 'India', 'Brazil']
].sort_index(axis=1).fillna(0)
ax = pct_new_cases.plot(
figsize=(12, 7), style=['-'] * 3 + ['--', ':', '-.'],
title='Percentage of the World\'s New COVID-19 Cases\n(source: ECDC)'
)
ax.legend(title='Country', framealpha=0.5, ncol=2)
ax.set_xlabel('')
ax.set_ylabel('percentage of the world\'s COVID-19 cases')
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
# -
# ## Formatting Axes
# ### Specifying axis limits
# `plt.xlim()`/`plt.ylim()` and `Axes.set_xlim()`/`Axes.set_ylim()` can be used to specify the minimum and maximum values for the axis. Passing `None` will have `matplotlib` determine the limit.
# +
ax = pct_new_cases.plot(
figsize=(12, 7), style=['-'] * 3 + ['--', ':', '-.'],
title='Percentage of the World\'s New COVID-19 Cases\n(source: ECDC)'
)
ax.legend(framealpha=0.5, ncol=2)
ax.set_xlabel('')
ax.set_ylabel('percentage of the world\'s COVID-19 cases')
ax.set_ylim(0, None)
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
# -
# ### Formatting the Axis Ticks
# We can use `plt.xticks()` and `plt.yticks()` to provide tick labels and specify, which ticks to show. Here, we show every month on the same day of the month (the 18th):
# +
ax = pct_new_cases.plot(
figsize=(12, 7), style=['-'] * 3 + ['--', ':', '-.'],
title='Percentage of the World\'s New COVID-19 Cases\n(source: ECDC)'
)
tick_locs = covid.index[covid.index.day == 18].unique()
tick_labels = [loc.strftime('%b %d\n%Y') for loc in tick_locs]
plt.xticks(tick_locs, tick_labels)
ax.legend(framealpha=0.5, ncol=2)
ax.set_xlabel('')
ax.set_ylabel('percentage of the world\'s COVID-19 cases')
ax.set_ylim(0, None)
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
# -
# #### The `matplotlib.ticker` module
# ##### `PercentFormatter`
# We can use the `PercentFormatter` class to display the tick labels as percentages. We can specify the denominator (`xmax`) to use when calculating the percentages. This gets passed to the `set_major_formatter()` method of the `xaxis` or `yaxis` on the `Axes`.
# +
from matplotlib.ticker import PercentFormatter
ax = pct_new_cases.plot(
figsize=(12, 7), style=['-'] * 3 + ['--', ':', '-.'],
title='Percentage of the World\'s New COVID-19 Cases\n(source: ECDC)'
)
tick_locs = covid.index[covid.index.day == 18].unique()
tick_labels = [loc.strftime('%b %d\n%Y') for loc in tick_locs]
plt.xticks(tick_locs, tick_labels)
ax.legend(title='', framealpha=0.5, ncol=2)
ax.set_xlabel('')
ax.set_ylabel('percentage of the world\'s COVID-19 cases')
ax.set_ylim(0, None)
ax.yaxis.set_major_formatter(PercentFormatter(xmax=1))
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
# -
# ##### `EngFormatter`
# We can use the `EngFormatter` class to apply engineering notation to the ticks.
# +
from matplotlib.ticker import EngFormatter
ax = covid.query('continentExp != "Other"').groupby([
'continentExp', pd.Grouper(freq='1D')
]).cases.sum().unstack(0).apply('cumsum').plot(
style=['-', '-', '--', ':', '-.'],
title='Cumulative COVID-19 Cases per Continent\n(source: ECDC)'
)
ax.legend(title='', loc='center left')
ax.set(xlabel='', ylabel='total COVID-19 cases')
ax.yaxis.set_major_formatter(EngFormatter())
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
# -
# ##### `MultipleLocator`
# If we plot the daily new COVID-19 cases in New Zealand from April 18, 2020 through September 18, 2020, by default, `matplotlib` shows the *y*-axis in increments of 2.5, but we know that it isn't possible to have fractional cases:
# +
ax = new_cases.New_Zealand['2020-04-18':'2020-09-18'].plot(
title='Daily new COVID-19 cases in New Zealand\n(source: ECDC)'
)
ax.set(xlabel='', ylabel='new COVID-19 cases')
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
# -
# To make sure we only show integer values, we can use the `MultipleLocator` class. This will give ticks for all multiples of a number specified with the `base` parameter:
# +
from matplotlib.ticker import MultipleLocator
ax = new_cases.New_Zealand['2020-04-18':'2020-09-18'].plot(
title='Daily new COVID-19 cases in New Zealand\n(source: ECDC)'
)
ax.set(xlabel='', ylabel='new COVID-19 cases')
ax.yaxis.set_major_locator(MultipleLocator(base=3))
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
# -
# <hr>
# <div>
# <a href="./1-introduction_to_seaborn.ipynb">
# <button>← Previous Notebook</button>
# </a>
# <a href="./3-customizing_visualizations.ipynb">
# <button style="float: right;">Next Notebook →</button>
# </a>
# </div>
# <hr>
|
ch_06/2-formatting_plots.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Softmax
# : 주로 분류(Classification)문제에서 사용되는 함수
# $$y_k = {exp(a_k) \over{\sum_{i=1}^n exp(a_i)}}$$
# +
# example
a = np.array([0.3, 2.9, 4.0])
exp_a = np.exp(a)
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
print(y) # 출력 : [0.01821127 0.24519181 0.73659691]
# -
# softmax 구현
def softmax(a):
exp_a = np.exp(a)
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
# ### Problem : Overflow
# :값이 너무 커져서, 표현할 수 있는 범위를 초과해버려 nan값이 출력될 위험 존재
# example : overflow
a = np.array([1010, 1000, 990])
print(np.exp(a) / np.sum(np.exp(a))) # 출력 : [nan nan nan]
# ### Solution : 입력신호의 최대값을 활용
# : 입력신호(input)의 최대값을 빼준다.
# example : solution
c = np.max(a) # 최대값 추출
print(np.exp(a-c) / np.sum(np.exp(a-c)))
# 출력 : [9.99954600e-01 4.53978686e-05 2.06106005e-09]
# overflow 반영 softmax
def softmax(a):
c = np.max(a)
exp_a = np.exp(a-c)
sum_exp_a = np.sum(exp_a)
return exp_a / sum_exp_a
# ### softmax의 특징
# - softmax 출력값을 확률로 해석할 수 있다.
# - 그래서 문제를 확률적/통계적으로 대응할 수 있다.
# - 그러나 exp(x)가 **단조증가함수**이기 때문에 입력값의 대소관계는 변하지 않는다
# - **단조증가함수** : $a \le b$ 이면, $f(a) \le f(b)$
|
DeepLearning_from_the_bottom/3.Softmax.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Ex1
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# +
def brownianmotion(X,delta,num_move):
"""
Input: start point 'X', step size 'delta', number of move 'num_move'
Output: all position moved by 'pos_x,pos_y'
"""
x_init=X[0]
y_init=X[1]
x_collet = []
y_collet = []
while (1):
move_x = np.random.normal(0, 1, (num_move))
move_y = np.random.normal(0, 1, (num_move))
pos_x=(np.cumsum(move_x)*np.sqrt(delta))+x_init
pos_y=(np.cumsum(move_y)*np.sqrt(delta)) +y_init
# to judge if the point is out of ring
for i in range(len(pos_x)):
if pos_x[i]**2+pos_y[i]**2 > 1:
pos_x=pos_x[:i+1]
pos_y=pos_y[:i+1]
x_collet.append(pos_x)
y_collet.append(pos_y)
x_collet=(np.concatenate(x_collet))
y_collet=(np.concatenate(y_collet))
return list(x_collet),list(y_collet)
x_init=pos_x[-1]
y_init=pos_y[-1]
x_collet.append(pos_x)
y_collet.append(pos_y)
# -
"""set up the parameter"""
delta=0.0001
X = (0.2,0.4)
num_move = 10
all_collet=[]
# +
plt.figure(figsize=(10,10.1),dpi=125)
plt.plot(X[0],X[1],'*',c='black',label='point start')
"""to indicate Brownian motion you wnat to show by n"""
for i in range(5):
all_collet.append(list(brownianmotion(X,delta,num_move)))
plt.scatter(all_collet[i][0],all_collet[i][1],s=1,label="motion %d" % (i+1))
plt.plot(all_collet[i][0][-2:],all_collet[i][1][-2:],'r') #draw interpolates linearly between the two last positions
"""to draw the ring,title,etc."""
_t = np.arange(0,7,0.1)
_x = np.cos(_t)
_y = np.sin(_t)
plt.plot(_x,_y,'g-')
plt.xlim(-1.1,1.1)
plt.ylim(-1.1,1.1)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Brownian motion')
plt.grid(True)
plt.legend()
plt.show()
# -
# ex2
# +
import cv2
import numpy as np
from matplotlib import pyplot as plt
import lowpassfilter2D as lp2d
# %matplotlib inline
"""read the image and put it in fonction in lowpassfilter2D"""
fc=25
img = cv2.imread('stinkbug.png', 0)
iimg=lp2d.filter_2d(img,fc)
# -
|
TP3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''base'': conda)'
# name: python3
# ---
import pathlib
import os
import pandas as pd
import random
root_dir = '../../../data/data/Desc_dataset/'
style_enc_file = '../../../data/data/Desc_dataset/style_enc.csv'
# ## rename files by giving style index
root_path = pathlib.Path(root_dir)
idxs = []
styles = []
style_enc = {}
n = 1
m = 0
for file in root_path.glob('*'):
m += 1
st_name = ''.join(file.stem.split("_")[:-1])
ind = file.stem.split("_")[-1]
if st_name not in style_enc:
style_enc[st_name] = n
n+=1
os.rename(file, os.path.join(file.parent, f'{m}.jpg'))
idxs.append(m)
styles.append(style_enc[st_name])
st_df = pd.DataFrame(data={'fname' : idxs, 'style_code' : styles})
st_df.to_csv(style_enc_file, index=False)
# ## build tf dataset and datapipeline
import tensorflow as tf
root_path = pathlib.Path(root_dir)
list_ds = tf.data.Dataset.list_files(str(root_path/'*.jpg'))
stenc_df = pd.read_csv(style_enc_file)['style_code'].tolist()
LEN = len(stenc_df)
LEN
list_ds.element_spec
for f in list_ds.take(5):
print(f.numpy())
def process_path(file_path):
cur_ind = tf.strings.split(tf.strings.split(file_path, os.sep)[-1],'.')[0]
# cur_style = stenc_df[cur_ind-1]
# random_num = random.randint(0, LEN)
# if random_num == int(cur_ind):
# random_num = random.randint(0, LEN)
# sec_style = stenc_df[random_num-1]
# cur_img = tf.io.read_file(file_path)
#rand_path = tf.strings.join(inputs=tf.strings.split(file_path, os.sep)[:-1], separator=os.sep)
#rand_img = tf.io.read_file(tf.strings.join(inputs=[*tf.strings.split(file_path, os.sep)[:-1],os.sep,f"{random_num}.jpg"], seperator=os.sep))
#cur_ind = tf.strings.split(file_path, os.sep)[-1]
return type(cur_ind)
def process_path(file_path):
label = tf.strings.split(file_path, os.sep)[-2]
return tf.io.read_file(file_path)
# + tags=[]
sample_dt = list_ds.map(process_path)
for f in sample_dt.take(5):
print(f.numpy())
# -
|
src/support/notebooks/desc_pipeline.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sistemas Inteligentes
#
# ## Laboratório 2 - Classificação
#
# Neste laboratório serão introduzidos: **Linear Discriminant Analysis (LDA)**, **Support Vector Machine (SVM)** e **Artificial Neural Networks (ANN)**.
# ### Identificação do Aluno
# #### Nome Completo
# <NAME>'
# #### RA
# 11090115
import warnings
warnings.filterwarnings('ignore')
# ### Importação e preparação dos dados
# Importe o pacote **datasets** da biblioteca **sklearn**, então carregue o dataset de imagens de dígitos, utilizando a função **load_digits()** do pacote de datasets importado, já atribuindo-o à variável **dataset**.
#
# Tendo feito o processo adequadamente, a variável dataset conterá todo o dataset correspondente, inclusive com informações que não são de nosso interesse para esta atividade. Para selecionar apenas o que é de nosso interesse, atribua à variável X apenas o objeto images de dataset. Analogamente, atribua à variável y o objeto target.
#
# Ainda nesta célula, utilize a função reshape do próprio X para forçar um formato unidimensional para cada imagem dentro do array X, que, originalmente, contém várias imagens com seus pixels distribuidos bidimensionalmente.
# +
#importar o pacote datasets da biblioteca sklearn
import sklearn.datasets as datasets
#importa o numpy para fazer o reshape
import numpy as np
#carregar o dataset de imagens de dígitos
digits_dataset = datasets.load_digits()
#carregar apenas o objeto imagens
X = digits_dataset.images
#carregar o target
y = digits_dataset.target
#cria uma nova variável para guardar as imagens convertidas
X_reshape = []
#cria um loop para transformar o formato das imagens
for i in range(len(X)):
X_reshape.append(np.reshape(X[i], -1,1)) # não tenho certeza se está certo, o -1 significa indefinido
# -
# #### Divisão dos dados em grupos para treinamento e teste.
# Importe e utilize a função **train_test_split** (do pacote **model_selection**, que percente à biblioteca **sklearn**) para dividir os dados de entrada e saída, **digits** e **digits_target**, respectivamente, separando-os em dados de treinamento e de testes para entrada e saída, ou seja, **X_train**, **X_test**, **y_train** e **y_test**.
#
# Separe 25% para testes utilizando o argumento *test_size* da própria função **train_test_split**.
# +
#importa o test split
from sklearn.model_selection import train_test_split
#divide os dados de entrada e saida
X_train, X_test, y_train, y_test = train_test_split(X_reshape, y, test_size=0.25)
# -
# ### Linear Discriminant Analysis (LDA)
# #### Treinamento
# Importe o classificador do LDA, que é uma função chamada **LinearDiscriminantAnalysis** do pacote **discriminant_analysis**, que faz parte da biblioteca **sklearn**. Então crie o classificador com o nome **LDA** atribundo a ele a função **LinearDiscriminantAnalysis**.
#
# Treine o classificador **LDA** por meio de sua própria função **fit()**. Os argumentos da função **fit()** são os dados de entrada e saída de treinamento, ou seja, **X_train** e **y_train**.
# +
#importa o LDA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LinearDiscriminantAnalysis
# instancia o LDA
LDA = LinearDiscriminantAnalysis()
# treina o LDA
LDA.fit(X_train, y_train)
# -
# #### Classificação e Resultados
# Então, faça a classificação (também chamada de predição), utilizando a função **predict()** do próprio classificador **LDA**. O argumento da função é apenas a entrada de teste, ou seja, **X_test**. Atribua a saída dessa classificação à variável **y_pred_LDA**.
#
# Para analisar os resultados, precisamos utilizar algumas ferramentas e métricas. A biblioteca **sklearn** possui um grande conjunto de funções para essa finalidade em seu pacote **metrics**. Importe as funções **accuracy_score** e **confusion_matrix**, então imprima a acurácia do classificador lda e sua confusion matrix.
# +
#importa as funções de verificar como foi o treinamento
from sklearn.metrics import accuracy_score as accuracy_score
from sklearn.metrics import confusion_matrix as confusion_matrix
# testa o treinamento que foi feito no LDA
y_pred_LDA = LDA.predict(X_test)
# testa a acuracia do teste e exibe o resultado
print(accuracy_score(y_test, y_pred_LDA))
#testa a matriz de confusão
confusion_matrix(y_test, y_pred_LDA)
# -
# ### Support Vector Machine (SVM)
# #### Treinamento
# Importe a função **LinearSVC**, pertencente ao pacote **svm** da biblioteca **sklearn**, então crie o classificador chamado **SVM**, análogo ao que foi feito com o LDA anteriormente.
#
# Então, treine o classificador **SVM** utilizando a função **fit** com os dados de treinamento, **X_train** e **y_train**, respectivamente.
# +
#importa o SVM
from sklearn.svm import LinearSVC as LinearSVC
# instancia o SVM
SVM = LinearSVC()
# treina o SVM
SVM.fit(X_train, y_train)
# -
# #### Classificação e Resultados
# Faça a classificação dos dados de entrada para testes, **X_test**, utilizando a função **predict** do próprio **SVM** e armazene os resultados em **y_pred_SVM**.
#
# Na mesma célula, verifique a acurácia do classificador **SVM** por meio da função **accuracy_score** e mostre a matriz de confusão do classificador **SVM** utilizando a função **confusion_matrix**.
# +
# testa o treinamento que foi feito no SVM
y_pred_SVM = SVM.predict(X_test)
# testa a acuracia do teste e exibe o resultado
print(accuracy_score(y_test, y_pred_SVM))
#testa a matriz de confusão
confusion_matrix(y_test, y_pred_SVM)
# -
# ### Artificial Neural Networks (ANN)
# #### Treinamento
# Importe a função **MLPClassifier**, pertencente ao pacote **neural_network** da biblioteca **sklearn**, então crie o classificador **ANN** usando a função importada com os parâmetros e **hidden_layer_sizes=(50)**.
#
# Depois, treine o classificador usando os dados de entrada e saída de treinamento.
# +
#importa o mpl
from sklearn.neural_network import MLPClassifier as MLPClassifier
# instancia o ANN
ANN = MLPClassifier(hidden_layer_sizes=(50))
# treina o ANN
ANN.fit(X_train, y_train)
# -
# #### Classificação e Resultados
# Faça a predição com base nos dados de entrada de teste com o classificador **ANN**, atribundo as predições à variável **y_pred_ANN**. Então imprima a acurácia e a matriz de confusão dessa classificação.
# +
# testa o treinamento que foi feito no ANN
y_pred_ANN = ANN.predict(X_test)
# testa a acuracia do teste e exibe o resultado
print(accuracy_score(y_test, y_pred_ANN))
#testa a matriz de confusão
confusion_matrix(y_test, y_pred_ANN)
# -
# #### Análise para diferentes quantidades de neurônios
# Faça, iterativamente, com que seja treinada e testada uma rede neural de uma camada, utilizando uma quantidade crescente de neurônios, indo de 1 a 50. Armazene o valor da acurácia para cada iteração, então imprima as seguintes estatísticas sobre a acurácia: mínima, máxima, média, mediana, moda, variância e desvio padrão.
# +
#importa pacote para calcular a mediana
import statistics as statistics
import math, os, sys
#define função que vai somar os valores
def somar(valores):
soma = 0
for v in valores:
soma += v
return soma
def media(valores):
soma = somar(valores)
qtd_elementos = len(valores)
media = soma / float(qtd_elementos)
return media
def variancia(valores):
_media = media(valores)
soma = 0
_variancia = 0
for valor in valores:
soma += math.pow( (valor - _media), 2)
_variancia = soma / float( len(valores) )
return _variancia
def desvio_padrao(valores):
return math.sqrt( variancia(valores) )
#prepara a lista de acuracias
arr_acuracia = []
#monta um for de 50
for i in range(50):
# reinstancia o ANN
ANN = MLPClassifier(hidden_layer_sizes=(i+1))
# treina o ANN
ANN.fit(X_train, y_train)
# testa o treinamento que foi feito no ANN
y_pred_ANN_loop = ANN.predict(X_test)
# testa a acuracia do teste e salva o resultado
arr_acuracia.append(accuracy_score(y_test, y_pred_ANN_loop))
# exibe os resultados
print("\nMáximo: {} - {}".format(max(arr_acuracia), arr_acuracia.index(max(arr_acuracia))))
print("\nMínimo: {} - {}".format(min(arr_acuracia), arr_acuracia.index(min(arr_acuracia))))
print("\nMédia: {}".format(media(arr_acuracia)))
print("\nMediana: {} - {}".format(statistics.median(arr_acuracia), arr_acuracia.index(statistics.median(arr_acuracia))))
print("\nModa: {} - {}".format(statistics.mode(arr_acuracia), arr_acuracia.index(statistics.mode(arr_acuracia))))
print("\nVariância: {}".format(variancia(arr_acuracia)))
print("\nDesvio padrão: {}".format(desvio_padrao(arr_acuracia)))
# -
# Utilizando subplots, gere o gráfico da acurácia do classificador para cada quantidade de neurônios na camada da rede neural e o gráfico do erro, considerando como o erro a diferença entre as acurácias das últimas duas iterações para cada iteração.
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
params = {'figure.figsize': [10, 6],
'axes.labelsize': 16,
'axes.titlesize':18,
'font.size': 16,
'legend.fontsize': 12,
'xtick.labelsize': 12,
'ytick.labelsize': 12
}
plt.rcParams.update(params)
plt.plot(range(50), arr_acuracia)
# -
# ## Referências
# [Scikit-Learn - Datasets](http://scikit-learn.org/stable/datasets/index.html)
#
# [Matplotlib - Pyplot](https://matplotlib.org/api/pyplot_summary.html)
#
# [Numpy](https://docs.scipy.org/doc/numpy/reference/)
#
# [Scikit-Learn - Train, Test and Split](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
#
# [Scikit-Learn - LinearDiscriminantAnalysis](http://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis.html)
#
# [Scikit-Learn - SVC](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html#sklearn.svm.SVC)
#
# [Scikit-Learn - MLPClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html#sklearn.neural_network.MLPClassifier)
#
# [Scikit-Learn - Accuracy Score](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html)
#
# [Scikit-Learn - Confusion Matrix](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html)
|
limitadas/sistemas-inteligentes/RICARDO SUYAMA com LUNEQUE JUNIOR e TITO CACO/q3-2018/lab2/.ipynb_checkpoints/Lab_2-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Number of Discordant Pairs Country
#
# Discordant pair is a pair contain of male-female and its prediction, such that the Sentiment Analysis produce a different prediction.
# Example of discordant pair from country:
#
# `<(name from Indonesia, prediction), (name from USA, prediction)>`
#
# `<(“Paijo is angry”, 1), (“John is angry”, 0)>`
import pandas as pd
import numpy as np
import math
import time
file_name = "occ"
df = pd.read_csv("occ-age/prepared/" + file_name + ".csv")
df["label"] = df["0"]
df = df.iloc[:,2:]
df
def read_txt(fpath):
pred = []
file = open(fpath)
lines = file.readlines()
for l in lines :
pred.append(int(l))
file.close()
return pred
# +
output_dir = "occupation"
result_dir = "../result/" + output_dir + "/"
path = result_dir + "results_data.txt"
pred = read_txt(path)
print(len(pred))
# -
df["prediction"] = pred
df
# ### Use Groupby to Group the text by Template
df["original"] = df["original"].astype("category")
df["template_id"] = df["original"].cat.codes
gb = df.groupby("template_id")
gb.count()
len(gb.size())
# #### Create Discordant Pairs based on Country -> not possible, memory limitation
#
# You can create another discordant pairs from another column by changing the `identifier`.
#
# In case you want to calculate discordant pairs from gender, you can try `identifier = "gender"`
# +
start = time.time()
identifier = "occupation"
mutant_text_1 = []
mutant_text_2 = []
prediction_1 = []
prediction_2 = []
identifier_1 = []
identifier_2 = []
template = []
for i in range(len(gb.size())) :
data = gb.get_group(i)
dc = data.groupby(identifier)
for k1, v1 in dict(iter(dc)).items() :
for k2, v2 in dict(iter(dc)).items() :
if k1 != k2 :
for m_1, p_1, i_1, t in zip(v1["mutant"].values, v1["prediction"].values, v1[identifier].values, v1["template"].values) :
for m_2, p_2, i_2 in zip(v2["mutant"].values, v2["prediction"].values, v2[identifier].values) :
mutant_text_1.append(m_1)
prediction_1.append(p_1)
identifier_1.append(i_1)
mutant_text_2.append(m_2)
prediction_2.append(p_2)
identifier_2.append(i_2)
template.append(t)
end = time.time()
print("Execution time: ", end-start)
# +
dfcross = pd.DataFrame(data={"mutant_1" : mutant_text_1, "mutant_2" : mutant_text_2, "prediction_1": prediction_1, "prediction_2" : prediction_2, "identifier_1": identifier_1, "identifier_2" : identifier_2, "template": template})
dfcross
# -
dfcross["discordant"] = dfcross["prediction_1"] != dfcross["prediction_2"]
dfcross
# ### Number of Bias-uncovering Test Case
#
# We divide by 2, because we assume that pair (A,B) is the same with pair (B,A)
print("Number of Discordant Pairs: ", int(len(dfcross[dfcross["discordant"] == True])/2))
print("Discordant Pairs Rate: ", len(dfcross[dfcross["discordant"] == True]) / len(df) )
d = dfcross[dfcross["discordant"] == True]
d = d.drop(columns=["discordant"])
for id, rows in d.iloc[100:101].iterrows():
print()
print("Mutant 1:")
print(rows["mutant_1"])
print("Mutant 2:")
print(rows["mutant_2"])
print("Prediction 1:")
print(rows["prediction_1"])
print("Prediction 2:")
print(rows["prediction_2"])
print("Identifier 1:")
print(rows["identifier_1"])
print("Identifier 2:")
print(rows["identifier_2"])
print("Template:")
print(rows["template"])
# ## Get Number of Discordant Pairs for Each Template by Estimation
#
# There is a memory limitation that make us can't directly produce +- 240M pairs. Fortunately, the number of discordant pairs for each template can be calculate theoritically without crossing th data to get +- 240M pairs. This will solve the memory issue.
#
# For each template, we will give an example of the male mutant and female mutant for user study
df
dft = df.iloc[:,[1,2,6]]
dft
dft = dft.drop_duplicates()
dft = dft.sort_values(by=["template_id"]).reset_index(drop=True)
dft
# ### Data crossing
# +
import time
start = time.time()
identifier = "occupation"
mutant_example = []
mutant_prediction_stat = []
key = []
for i in range(len(gb.size())) :
# for i in range(10) :
data = gb.get_group(i)
dc = data.groupby(identifier)
me = {} # mutant example
mp = {} # mutant prediction
key = []
for k, v in dict(iter(dc)).items() :
key.append(k)
is_first_instance = True
pos_counter = 0 # positive counter
neg_counter = 0 # negative counter
for m, p in zip(v["mutant"].values, v["prediction"].values) :
if is_first_instance :
me[k] = m
is_first_instance = False
if p == 1 :
pos_counter += 1
else :
neg_counter += 1
mp[k] = {"pos": pos_counter, "neg" : neg_counter}
mutant_example.append(me)
mutant_prediction_stat.append(mp)
end = time.time()
print("Execution time: ", end-start)
# -
len(mutant_example)
dft["mutant_example"] = mutant_example
dft["mutant_prediction_stat"] = mutant_prediction_stat
dft
# +
btcs = []
pairs = []
for mp in dft["mutant_prediction_stat"].values :
if len(mp) > 0 :
btc = 0
pair = 0
for k1 in key :
for k2 in key :
if k1 != k2 :
btc += ((mp[k1]["pos"] * mp[k2]["neg"]) + (mp[k1]["neg"] * mp[k2]["pos"]))
pair += (mp[k1]["pos"] + mp[k1]["neg"]) * (mp[k2]["pos"] + mp[k2]["neg"])
# double_counting_divider = len(key) * (len(key)-1)
# dp.append(int(_dp/double_counting_divider)) # we must divide the number with the number of key to reduce the double counting
btcs.append(btc)
pairs.append(pair)
else :
btcs.append(0)
pairs.append(0)
# -
dft["btc"] = btcs
dft["possible_pair"] = pairs
dft
# ### Number of Bias-uncovering Test Case
#
# We divide by 2, because we assume that pair (A,B) is the same with pair (B,A)
int(dft["btc"].sum() /2)
# ### BTC Rate
dft["btc"].sum() / dft["possible_pair"].sum()
# ### Get Data that Have number of BTC more than one
d = dft[dft["btc"] > 0]
d.head()
# ### Sort Data based on the number of BTC
d = d.sort_values(["btc", "template"], ascending=False)
d = d.reset_index(drop=True)
d
d.to_csv("occ-age/occ-btc.csv")
d.iloc[0]["mutant_prediction_stat"]
df.groupby("template_id").get_group(1279).to_csv("occ-age/selected-occ.csv")
dft
for x, i in zip(dft["original"], dft["template_id"]) :
if "I rented this film" in x :
print("template_id")
print(i)
|
codes/old-experiments/RQ1-BTC-Occupation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#hide
# %load_ext autoreload
# %autoreload 2
from deeptracktorch.disp import *
from deeptracktorch.simg import *
from deeptracktorch.video import track_single_particle
from deeptracktorch.measures import radialcenter
from fastai.vision import *
from sklearn.metrics import mean_squared_error, mean_absolute_error
# # If training the network
# Set the experimental params
# +
#notest
from numpy.random import normal, uniform, choice, randint
from numpy import pi
exp_dist = {
'cx': lambda: normal(0., 2),
'cy': lambda: normal(0., 2),
'radius': lambda: uniform(2, 3),
'intensities': lambda: [uniform(.7, .9, 1), -uniform(.2, .3, 1)],
'bessel_orders': lambda: [1, 2] ,
'ellip_direction': lambda: uniform(-pi, pi),
'ellipticity': lambda: 1,
'size': lambda: 51,
'bkgd_level': lambda: uniform(.2, .5),
'gradient_intensity': lambda: uniform(0, .8),
'gradient_direction': lambda: uniform(-pi,pi),
'snr': lambda: uniform(5, 100),
}
# -
# Generate the image specs
#notest
df = image_specs_from_dist(exp_dist, 1000)
# Make a data loader and learner
# +
#notest
from deeptracktorch.models import DeepTrackNet
#data = MImageList.from_df(df).split_by_rand_pct(0.2).label_from_df(cols=['cx','cy','r'],label_cls = XYRList).databunch(bs=16)
#learner = Learner(data, DeepTrackNet)
# -
# Trainlearn.fit_one_cycle(4)
# +
#notest
#earner.fit_one_cycle(4)
# -
# # If using previously trained network
# +
#notest
#learner = load_learner('models/E2')
# -
#notest
### Define the video file to be tracked
#video_file_name = '../../DeepTrack 1.0/DeepTrack - Example 2 - Optically Trapped Particle Good.mp4'
# +
#notest
# take the predicted positions and compare to symtry method
# video_file_name = '../../DeepTrack 1.0/DeepTrack - Example 2 - Optically Trapped Particle Bad.mp4'
# estimators = {
# 'deeptrack':lambda image: learner.predict(npimg2tensor(image))[1].numpy(),
# 'symmetry':lambda image: np.array(radialcenter(image)[:3],dtype=np.float32),
# }
# predicted_positions = track_single_particle(
# video_file_name,
# estimators,use_cv2=True)
# -
#notest
# plt.plot(predicted_positions['deeptrack'][:,1])
# plt.plot(predicted_positions['symmetry'][:,1])
# +
#notest
# plt.hist(predicted_positions['deeptrack'][:,1],bins=25)
# +
#notest
# # !ffprobe -v error -select_streams v:0 -show_entries stream=nb_frames -of default=nokey=1:noprint_wrappers=1 "../../DeepTrack 1.0/DeepTrack - Example 2 - Optically Trapped Particle Good.mp4"
# +
#notest
# from PIL import Image, ImageDraw, ImageFont
# def annotate_video_with_predictions(video_file_name, predicted_positions):
# vinput = av.open(video_file_name)
# output = av.open(video_file_name[:-4]+'_a.mp4', mode='w')
# stream = output.add_stream('h264', '23.976')
# stream.width = 120
# stream.height = 120
# stream.pix_fmt = 'yuv420p'
# number_frames_to_be_tracked = len(predicted_positions)
# for i, frame in enumerate(vinput.decode(video=0)):
# if i >= number_frames_to_be_tracked:
# break
# image = frame.to_image()
# p=predicted_positions[i].numpy()*120/51
# # annotate
# draw=ImageDraw.Draw(image)
# draw.rectangle( [60+p[0]-1,60+p[1]-1,60+p[0]+1,60+p[1]+1], fill='#ff0')
# draw.point( [60+p[0],60+p[1]], fill='#000')
# del draw
# # write frame
# new_frame = av.VideoFrame.from_image(image)
# packet = stream.encode(new_frame)
# output.mux(packet)
# # flush
# packet = stream.encode(None)
# output.mux(packet)
# output.close()
# vinput.close()
# # save frame
# -
# +
# video_file_name = '../../DeepTrack 1.0/DeepTrack - Example 2 - Optically Trapped Particle Bad.mp4'
# predicted_positions = track_single_particle(video_file_name, learner)
# annotate_video_with_predictions(video_file_name, predicted_positions)
# +
#notest
# %%HTML
<video width="400" height="400" controls>
<source src="../../DeepTrack 1.0/DeepTrack - Example 6 - Vesicles SNR4 mid.mp4" type="video/mp4">
</video>
# +
# ### Visualize tracked frames (maximum 10 frames at a time)
# from deeptracktorch.video import show_tracked_frames_single_particle
# show_tracked_frames_single_particle(
# min(number_tracked_frames, 10),
# frames,
# predicted_positions)
# +
# import cv2
# fourcc = cv2.VideoWriter_fourcc("MJPG")
# out = cv2.VideoWriter('output.avi',fourcc, 20.0, (120,120))
# for frame,pos in zip(frames, predicted_positions):
# image = cv2.circle(frame, (int(pos[0]),int(pos[1])), radius=0, color=(0, 0, 255), thickness=-1)
# out.write(np.uint8(frame))
# out.release()
|
nbs/E2_Track1fromN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: mass spec cori
# language: python
# name: mass_spec_cori
# ---
# +
import os
import pandas as pd
import requests
import json
import requests
import multiprocessing as mp
from random import randint
from time import sleep
from rdkit import Chem
import sys
sys.path.insert(0,'/global/homes/b/bpb/repos/metatlas')
# from metatlas import metatlas_objects as metob
# -
# df = pd.read_pickle('/project/projectdirs/openmsi/projects/ben_run_pactolus/unique_compounds.pkl')
df = pd.read_csv('/project/projectdirs/metatlas/projects/unique_compounds.csv.gz')
df.rename(columns = {'monoisotopoic_mw':'monoisotopic_mw'},inplace=True)
# +
# all_compounds = metob.retrieve('Compounds',username = '*')
# all_compounds_df = metob.to_dataframe(all_compounds)
# all_compounds_df.keys()
# +
# df = pd.read_pickle('/project/projectdirs/openmsi/projects/ben_run_pactolus/unique_compounds.pkl')
# # df = pd.read_csv('/project/projectdirs/openmsi/projects/compound_data/jgi_molecules/new_jgi_compounds.csv')
# df.rename(columns = {'monoisotopoic_mw':'monoisotopic_mw'},inplace=True)
# -
df = df.convert_objects(convert_numeric=True)
df.keys()
inchi_keys = df[(~ pd.isnull(df.inchi_key))].inchi_key
out_dir = '/project/projectdirs/metatlas/projects/pubchem_info/'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# # remove those that have already been done
import glob
files = glob.glob(os.path.join(out_dir,'*.json'))
done_inchi_key = []
for f in files:
done_inchi_key.append(os.path.basename(f).split('.')[0])
inchi_keys = list(set(inchi_keys) - set(done_inchi_key))
len(inchi_keys)
# chunks = [inchi_keys[x:x+1000] for x in xrange(0, len(inchi_keys), 1000)]
# +
def write_pubchem_info_to_file(ik):
suffix = '.json'
fname = os.path.join(out_dir, ik + suffix)
# this query will return
# {"Fault": {"Message": "No records found for the given CID(s)", "Code": "PUGREST.NotFound"}}
# if 3d data is not available from pubchem.
# leaving it out does not get you 3d. It defaults to 2d if left out. Have to run it twice.
#
# missing inchikeys will return
# {"Fault": {"Message": "No CID found", "Code": "PUGREST.NotFound", "Details": ["No CID found that matches the given InChI key"]}}
#
url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/inchikey/%s/json?record_type=3d'%ik
response = requests.get(url)
d = response.json()
if 'Fault' in d.keys():
url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/inchikey/%s/json'%ik
response = requests.get(url)
d = response.json()
if "Fault" in d.keys():
with open(fname, 'w') as fid:
fid.write('ERROR')
else:
with open(fname, 'w') as fid:
json.dump(d, fid)
# url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/inchikey/%s/json?record_type=3d'%inchi_key
# try:
# response = requests.get(url)
# with open(fname, 'w') as fid:
# json.dump(response.json(), fid)
# except:
# print "could not query", inchi_key
# -
for ik in inchi_keys:
write_pubchem_info_to_file(ik)
# +
# pool = mp.Pool(processes=20)
# pool.map(write_pubchem_info_to_file, inchi_keys)
# -
# ik = 'JAISCAWSAHCRJE-GJKQCQSZSA-N' #this one is only 2d in pubchem
# ik = 'PARFVHOUOBIHJG-GSHUGGBRSA-N' #this one has 2d and 3d in pubchem
# ik = "NZPACTGCRWDXCJ-UHFFFAOYSA-N" #this one is not in pubchem
ik = 'AMVODTGMYSRMNP-GNIMZFFESA-N' #this one is messed up for some reason
url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/inchikey/%s/json?record_type=3d'%ik
print url
response = requests.get(url)
d = response.json()
print d.keys()
if 'Fault' in d.keys():
url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/inchikey/%s/json'%ik
print url
response = requests.get(url)
d = response.json()
print d.keys()
# +
# response.json()
# +
# url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name/%s/json'%'Baumin'
# response = requests.get(url)
# response.json()
# -
|
metatlas/interfaces/compounds/write_pubchem_info_to_file.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# # Basic DataFrame manipulation
#
# Data source information is [here](https://github.com/HeardLibrary/digital-scholarship/tree/master/data/codegraf)
#
# Load Excel spreadsheet into DataFrame
url = 'https://github.com/HeardLibrary/digital-scholarship/raw/master/data/codegraf/co2_state_2016_sector.xlsx'
state_co2_sector = pd.read_excel(url)
# Examine contents of DataFrame
state_co2_sector.head()
state_co2_sector.tail()
# ## Setting the row index
#
# The `.set_index()` method changes one of the columns into the row index.
#
# The `.reset_index()` method changes a row index into a regular column.
# Set the State column as the index
state_co2_sector.set_index('State')
# What happened to the index ???
state_co2_sector.tail()
# Create a new view of the DataFrame
new_df = state_co2_sector.set_index('State')
print(new_df.head())
print()
print(state_co2_sector.head())
# Use the `inplace` attribute to change the source DataFrame (no assignment)
state_co2_sector.set_index('State', inplace=True)
state_co2_sector.head()
# ## Removing rows and columns
#
# `.drop()` defaults to rows
state_co2_sector.tail()
state_co2_sector.drop('Total').tail()
# .drop() can be a list
state_co2_sector.drop(['Virginia', 'West Virginia', 'Wyoming']).tail()
# Use inplace argument to change the source table
state_co2_sector.drop('Total', inplace=True)
state_co2_sector.tail()
# Use `axis` argument to drop columns
state_co2_sector.drop('Total', axis='columns').head()
# ## Dealing with missing data
#
#
url = 'https://github.com/HeardLibrary/digital-scholarship/raw/master/data/gis/wg/Metro_Nashville_Schools.csv'
schools = pd.read_csv(url)
schools.head()
# In some cases, cells were empty because the group wasn't represented (i.e. there were zero students). In that case, those `NaN` values should be zeros.
#
# The first argument of the `.fillna()` method can be a single value if it applys to the entire table, or a dictionary if it applies only to certain columns.
schools.fillna({'Native Hawaiian or Other Pacific Islander': 0}, inplace=True)
schools.head()
# In other cases, cells were empty because that column didn't apply to that kind of school (e.g. high schools don't have PreK students). The `.dropna()` method can be used to skip rows with any `NaN` values, but that won't work if you only care about certain columns. In that case, we can filter rows using the `.notnull()` method. The `.isnull()` method can be used to select only rows that have `NaN` valued for a column.
schools[schools['Grade PreK 3yrs'].notnull()]
# ## Sorting rows
#
# Load state CO2 emissions by fuel spreadsheet
url = 'https://github.com/HeardLibrary/digital-scholarship/raw/master/data/codegraf/co2_state_2016_fuel.xlsx'
state_co2_fuel = pd.read_excel(url)
# Set the State column as the row index
state_co2_fuel.set_index('State', inplace=True)
state_co2_fuel.tail()
# Remove the total row
state_co2_fuel.drop('Total', inplace=True)
state_co2_fuel.tail()
# Sort ascending
state_co2_fuel.sort_values(by='Total mmt').head()
# Sort descending, do inplace to modify source table
state_co2_fuel.sort_values(by='Total mmt', ascending=False, inplace=True)
state_co2_fuel.head()
# ## Slicing columns and rows
#
# To slice using labels, need to use the `.loc()` method. To slice columns, we need to specify both indices, with "all rows" (`:`) selected as the first index.
#
# Recall that slicing with labels is inclusive of last label selected.
# Create a slice (view) with only the fractions
state_co2_fuel_fractions = state_co2_fuel.loc[:, 'Coal fraction': 'Natural Gas fraction']
state_co2_fuel_fractions.head()
# To slice rows, only the first index needs to be specified. For integer indices, use the `.iloc()` method.
# Create a slice with only the top four states
top_state_co2_fuel = state_co2_fuel.iloc[:4]
# Note that included rows are 0, 1, 2, and 3 (but not 4).
top_state_co2_fuel
# Combine both slicing operations at once.
top_state_co2_fuel_fraction = state_co2_fuel.iloc[:4].loc[:, 'Coal fraction': 'Natural Gas fraction']
top_state_co2_fuel_fraction
# # Selecting data
#
# Units are million metric tons
url = 'https://github.com/HeardLibrary/digital-scholarship/raw/master/data/codegraf/co2_data.xlsx'
state_co2 = pd.read_excel(url)
state_co2.head(15)
# Performing a boolean operation on a column generates a series of booleans whose index matches the DataFrame rows
state_co2.State=='Alabama'
# The boolean series can be used to filter a subset of rows in the DataFrame.
#
# Notice that the indices for the rows carry through in the selection.
state_co2[state_co2.State=='Alaska']
state_co2[state_co2['Sector']=='Industrial'].head()
# You can assign the selection to a named view (but remember that indices are maintained).
state_co2_industrial = state_co2[state_co2['Sector']=='Industrial']
state_co2_industrial.head()
|
code/codegraf/009/009a.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="2iOggnzMbOGl" colab_type="text"
# # Pre-processing and analysis of mixed-species single-cell RNA-seq data with kallisto|bustools.
#
# In this notebook, we will perform pre-processing and analysis of [10x Genomics 1k 1:1 mixure of fresh frozen human and mouse cells](https://support.10xgenomics.com/single-cell-gene-expression/datasets/3.0.2/1k_hgmm_v3) using the **kallisto | bustools** workflow, implemented with a wrapper called `kb`. It was developed by <NAME> (<NAME> and <NAME>.
# + id="du0Ne17KbHpX" colab_type="code" outputId="3b3d2bf0-ed5c-477a-a838-4d8bbc9852eb" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !date
# + [markdown] id="FNePDFbdbpKt" colab_type="text"
# ## Pre-processing
# + [markdown] id="6vLxz_2YbqbC" colab_type="text"
# ### Download the data
#
# __Note:__ We use the `-O` option for `wget` to rename the files to easily identify them.
# + id="OHOVj4czboE9" colab_type="code" outputId="291ce862-3b84-4842-bc0e-e2090cd01002" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# %%time
# !wget https://caltech.box.com/shared/static/8oeuskecfr9ujlufqj3b7frj74rxfzcc.txt -O checksums.txt
# !wget https://caltech.box.com/shared/static/ags4jxbqrceuqewb0zy7kyuuggazqb0j.gz -O 1k_hgmm_v3_S1_L001_R1_001.fastq.gz
# !wget https://caltech.box.com/shared/static/39tknal6wm4lhvozu6bf6vczb475bnuu.gz -O 1k_hgmm_v3_S1_L001_R2_001.fastq.gz
# !wget https://caltech.box.com/shared/static/x2hwq2q3weuggtffjfgd1e8a1m1y7wj9.gz -O 1k_hgmm_v3_S1_L002_R1_001.fastq.gz
# !wget https://caltech.box.com/shared/static/0g7lnuieg8jxlxswrssdtz809gus75ek.gz -O 1k_hgmm_v3_S1_L002_R2_001.fastq.gz
# !wget https://caltech.box.com/shared/static/0avmybuxqcw8haa1hf0n72oyb8zriiuu.gz -O 1k_hgmm_v3_S1_L003_R1_001.fastq.gz
# !wget https://caltech.box.com/shared/static/hp10z2yr8u3lbzoj1qflz83r2v9ohs6q.gz -O 1k_hgmm_v3_S1_L003_R2_001.fastq.gz
# !wget https://caltech.box.com/shared/static/fx8fduedje53dvf3xixyyaqzugn7yy85.gz -O 1k_hgmm_v3_S1_L004_R1_001.fastq.gz
# !wget https://caltech.box.com/shared/static/lpt6uzmueh1l2vx71nvsdj3pwqh8z3ak.gz -O 1k_hgmm_v3_S1_L004_R2_001.fastq.gz
# + [markdown] id="gJJkVVxIcf38" colab_type="text"
# Then, we verify the integrity of the files we downloaded to make sure they were not corrupted during the download.
# + id="oohtwHDLcd3A" colab_type="code" outputId="c7a16c94-893a-4357-fabe-fc9585319c96" colab={"base_uri": "https://localhost:8080/", "height": 153}
# !md5sum -c checksums.txt --ignore-missing
# + [markdown] id="WkYC8XN8ci_3" colab_type="text"
# ### Install `kb`
#
# Install `kb` for running the kallisto|bustools workflow.
# + id="n5PmFRWKchjW" colab_type="code" outputId="441bf958-e83e-46fd-8034-01422bdfcaf0" colab={"base_uri": "https://localhost:8080/", "height": 970}
# !pip install git+https://github.com/pachterlab/kb_python@count-kite
# + [markdown] id="Afdv-S9yGTLv" colab_type="text"
# ### Download human and mouse reference files
#
# We will download the following files from Ensembl:
# * Mouse genome (FASTA)
# * Mouse genome annotations (GTF)
# * Human genome (FASTA)
# * Human genome annotations (GTF)
# + id="WxlR6CuMcl3N" colab_type="code" outputId="b8f83233-c330-446c-dade-8a60f958fa2c" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# %%time
# !wget ftp://ftp.ensembl.org/pub/release-98/fasta/mus_musculus/dna/Mus_musculus.GRCm38.dna.primary_assembly.fa.gz
# !wget ftp://ftp.ensembl.org/pub/release-98/gtf/mus_musculus/Mus_musculus.GRCm38.98.gtf.gz
# !wget ftp://ftp.ensembl.org/pub/release-98/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz
# !wget ftp://ftp.ensembl.org/pub/release-98/gtf/homo_sapiens/Homo_sapiens.GRCh38.98.gtf.gz
# + [markdown] id="rjCdMN11HB0t" colab_type="text"
# ### Build the mixed species index
#
# `kb` can build a single transcriptome index with multiple references. The FASTAs and GTFs must be passed in as a comma-separated list.
#
# __Note__: Because Google Colab offers limited RAM, we split the index into 4 parts.
# + id="D05OWSFdG8lR" colab_type="code" outputId="7a747c56-3eee-4bfe-9299-021d3945f261" colab={"base_uri": "https://localhost:8080/", "height": 496}
# %%time
# !kb ref -i mixed_index.idx -g mixed_t2g.txt -f1 mixed_cdna.fa -n 4 \
# Mus_musculus.GRCm38.dna.primary_assembly.fa.gz,Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz \
# Mus_musculus.GRCm38.98.gtf.gz,Homo_sapiens.GRCh38.98.gtf.gz
# + [markdown] id="IayU6CNiHlfi" colab_type="text"
# ### Generate an RNA count matrix in H5AD format
#
# The following command will generate an RNA count matrix of cells (rows) by genes (columns) in H5AD format, which is a binary format used to store [Anndata](https://anndata.readthedocs.io/en/stable/) objects. Notice we are providing the index and transcript-to-gene mapping we downloaded in the previous step to the `-i` and `-g` arguments respectively. Also, these reads were generated with the 10x Genomics Chromium Single Cell v2 Chemistry, hence the `-x 10xv2` argument. To view other supported technologies, run `kb --list`.
#
# __Note:__ If you would like a Loom file instead, replace the `--h5ad` flag with `--loom`. If you want to use the raw matrix output by `kb` instead of their H5AD or Loom converted files, omit these flags.
# + id="MbwwzFexHeXs" colab_type="code" outputId="40986e18-172d-4082-b006-c6a903a39f18" colab={"base_uri": "https://localhost:8080/", "height": 986}
# %%time
# !kb count -i mixed_index.idx.0,mixed_index.idx.1,mixed_index.idx.2,mixed_index.idx.3 \
# -g mixed_t2g.txt -x 10xv3 -o output --h5ad -t 2 \
# 1k_hgmm_v3_S1_L001_R1_001.fastq.gz 1k_hgmm_v3_S1_L001_R2_001.fastq.gz \
# 1k_hgmm_v3_S1_L002_R1_001.fastq.gz 1k_hgmm_v3_S1_L002_R2_001.fastq.gz \
# 1k_hgmm_v3_S1_L003_R1_001.fastq.gz 1k_hgmm_v3_S1_L003_R2_001.fastq.gz \
# 1k_hgmm_v3_S1_L004_R1_001.fastq.gz 1k_hgmm_v3_S1_L004_R2_001.fastq.gz
# + [markdown] id="60VGWSaPKRHM" colab_type="text"
# ## Analysis
#
# See [this notebook](https://github.com/pachterlab/MBGBLHGP_2019/blob/master/Supplementary_Figure_6_7/analysis/hgmm10k_v3_single_gene.Rmd) for how to process and load count matrices for a species mixing experiment.
# + id="FMP2f8wnKPyw" colab_type="code" colab={}
|
docs/tutorials/kb_species_mixing/python/kb_species_mixing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.neighbors import KNeighborsClassifier
import sklearn.metrics as metrics
from sklearn.metrics import multilabel_confusion_matrix
import pandas as pd
import numpy as np
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
t_label = pd.read_csv('test_label.csv')
#train.head()
a_test = np.array(t_label)
a_test
length = train.Tweets.str.len()
length.mean(), length.std(), length.max()
length.hist();
trait_cols = [ 'Openness', 'Conscientiousness', 'Extraversion', 'Agreeableness', 'Neuroticism']
train['none'] = 1-train[trait_cols].max(axis=1)
train.describe()
len(train),len(test)
TWEET = 'Tweets'
train[TWEET].fillna("unknown", inplace=True)
test[TWEET].fillna("unknown", inplace=True)
n = train.shape[0]
vec = CountVectorizer(ngram_range=(1,2), min_df=3, max_df=0.9, strip_accents='unicode')
train_term_doc = vec.fit_transform(train[TWEET])
test_term_doc = vec.transform(test[TWEET])
train_term_doc, test_term_doc
def pr(y_i, y):
p = train_x[y==y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
train_x = train_term_doc
test_x = test_term_doc
def get_model(y):
y = y.values
r = np.log(pr(1,y) / pr(0,y))
m = KNeighborsClassifier(n_neighbors=5)
x_nb = train_x.multiply(r)
return m.fit(x_nb, y), r
# +
predicts = np.zeros((len(test), len(trait_cols)))
for i, t in enumerate(trait_cols):
print('fit', t)
m, r = get_model(train[t])
predicts[:,i] = m.predict_proba(test_x.multiply(r))[:,1]
# -
preds = np.rint(predicts)
print("Hamming_loss:\t", metrics.hamming_loss(a_test, preds))
print("Precision:\t", metrics.precision_score(a_test, preds, average='macro'))
print("Recall:\t", metrics.recall_score(a_test, preds, average='micro'))
print("F1:\t", metrics.f1_score(a_test, preds, average='weighted'))
print("F_beta:\t", metrics.fbeta_score(a_test, preds, average='macro', beta=0.5))
accuracy = metrics.accuracy_score(a_test, preds)
print("The accuracy is " + str(accuracy *100) + "%")
multilabel_confusion_matrix(a_test, preds)
result = pd.DataFrame(predicts, columns = trait_cols)
result.to_csv('result.csv', index=False)
|
40437535/BoW_KNN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="hKSOePcsFseP"
from PIL import Image
import numpy as np
import cv2
# + id="h640KbPZF_3F" outputId="d751b6e3-a688-43d6-9abe-ec54d759a269" colab={"base_uri": "https://localhost:8080/", "height": 272}
# !pip install ImageHash
# + id="5vV5tD3xFseU"
import imagehash
#import scipy.spatial
from scipy.spatial import distance
# + id="ZuXUHUfyGujZ"
# !wget https://raw.githubusercontent.com/duybluemind1988/Mastering-Computer-Vision-with-TensorFlow-2.0/master/Chapter01/car1.png
# !wget https://raw.githubusercontent.com/duybluemind1988/Mastering-Computer-Vision-with-TensorFlow-2.0/master/Chapter01/car2.png
# !wget https://raw.githubusercontent.com/duybluemind1988/Mastering-Computer-Vision-with-TensorFlow-2.0/master/Chapter01/car3.png
# + id="gG1uiIX7FseX" outputId="a97c8f91-0c79-4e20-d7bd-5fe562a2d238" colab={"base_uri": "https://localhost:8080/", "height": 34}
hash1 = imagehash.phash(Image.open('car1.png'))
hash2 = imagehash.phash(Image.open('car2.png'))
hash3 = imagehash.phash(Image.open('car3.png'))
print (hash1, hash2, hash3)
# + id="CkMKJDikFsec" outputId="4f674cc3-c734-4115-a406-188403f48f25" colab={"base_uri": "https://localhost:8080/", "height": 34}
hs1 = str(hash1)
hs2 = str(hash2)
hs3 = str(hash3)
print (hs1, hs2, hs3)
# + id="JEodQUFwFseh"
# we are doing a string compare to find difference
def hamming_distance(h1, h2):
counter = 0
for i in range(len(h1)):
if h1[i] != h2[i]:
counter += 1
return counter
# + id="6UgggttmFsel" outputId="93117cfb-c3f0-47fc-f1d0-0b78b6059ecc" colab={"base_uri": "https://localhost:8080/", "height": 85}
#print (hamming_distance(hash1,hash2)) - this needs to be commented out to avoid error: object of type 'ImageHash' has no len()
# so hash value is converted to string first
print ("hs1-hs2",hamming_distance(hs1,hs2))
print ("hs1-hs3",hamming_distance(hs1,hs3))
print ("hs2-hs3",hamming_distance(hs2,hs3))
print ("hs2-hs2",hamming_distance(hs2,hs2))
# + id="v4D6EFS1Fseo" outputId="8fc91675-95d3-43ab-a7bd-63abf2bdc4ae" colab={"base_uri": "https://localhost:8080/", "height": 34}
#note the distance.hamming function from scipy.spatial always return 1
print(distance.hamming(hash1, hash2))
# + id="mtTFwg-RFser" outputId="95b8ac9f-bc42-4aa2-b4a4-82b913ecbb58" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(distance.hamming(hs1, hs2))
# + id="8B7oIDAzFseu"
|
Chapter01/Chapter1_hamming_distance.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from __future__ import print_function
import numpy
import os
from lsst.sims.catalogs.db import fileDBObject
# +
dbName = 'myFakeDatabase.db'
if os.path.exists(dbName):
os.unlink(dbName)
db = fileDBObject('hostedSN.csv', runtable='testSN',
database=dbName, idColKey='id')
# -
# !du -h myFakeDatabase.db
# !ls -ltr hostedSN.csv
print(db.show_mapped_columns())
query = 'select snra from testSN'
results = db.execute_arbitrary(query)
for line in results:
print(line)
|
Notebooks/fileDBObject.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cheat Sheet: Writing Python 2-3 compatible code
# - **Copyright (c):** 2013-2019 Python Charmers Pty Ltd, Australia.
# - **Author:** <NAME>.
# - **Licence:** Creative Commons Attribution.
#
# A PDF version is here: http://python-future.org/compatible_idioms.pdf
#
# This notebook shows you idioms for writing future-proof code that is compatible with both versions of Python: 2 and 3. It accompanies <NAME>'s talk at PyCon AU 2014, "Writing 2/3 compatible code". (The video is here: <http://www.youtube.com/watch?v=KOqk8j11aAI&t=10m14s>.)
#
# Minimum versions:
#
# - Python 2: 2.6+
# - Python 3: 3.3+
# ## Setup
# The imports below refer to these ``pip``-installable packages on PyPI:
#
# import future # pip install future
# import builtins # pip install future
# import past # pip install future
# import six # pip install six
#
# The following scripts are also ``pip``-installable:
#
# futurize # pip install future
# pasteurize # pip install future
#
# See http://python-future.org and https://pythonhosted.org/six/ for more information.
# ## Essential syntax differences
# ### print
# Python 2 only:
print 'Hello'
# Python 2 and 3:
print('Hello')
# To print multiple strings, import ``print_function`` to prevent Py2 from interpreting it as a tuple:
# Python 2 only:
print 'Hello', 'Guido'
# +
# Python 2 and 3:
from __future__ import print_function # (at top of module)
print('Hello', 'Guido')
# -
# Python 2 only:
print >> sys.stderr, 'Hello'
# +
# Python 2 and 3:
from __future__ import print_function
print('Hello', file=sys.stderr)
# -
# Python 2 only:
print 'Hello',
# +
# Python 2 and 3:
from __future__ import print_function
print('Hello', end='')
# -
# ### Raising exceptions
# Python 2 only:
raise ValueError, "dodgy value"
# Python 2 and 3:
raise ValueError("dodgy value")
# Raising exceptions with a traceback:
# Python 2 only:
traceback = sys.exc_info()[2]
raise ValueError, "dodgy value", traceback
# Python 3 only:
raise ValueError("dodgy value").with_traceback()
# +
# Python 2 and 3: option 1
from six import reraise as raise_
# or
from future.utils import raise_
traceback = sys.exc_info()[2]
raise_(ValueError, "dodgy value", traceback)
# +
# Python 2 and 3: option 2
from future.utils import raise_with_traceback
raise_with_traceback(ValueError("dodgy value"))
# -
# Exception chaining (PEP 3134):
# Setup:
class DatabaseError(Exception):
pass
# Python 3 only
class FileDatabase:
def __init__(self, filename):
try:
self.file = open(filename)
except IOError as exc:
raise DatabaseError('failed to open') from exc
# +
# Python 2 and 3:
from future.utils import raise_from
class FileDatabase:
def __init__(self, filename):
try:
self.file = open(filename)
except IOError as exc:
raise_from(DatabaseError('failed to open'), exc)
# -
# Testing the above:
try:
fd = FileDatabase('non_existent_file.txt')
except Exception as e:
assert isinstance(e.__cause__, IOError) # FileNotFoundError on Py3.3+ inherits from IOError
# ### Catching exceptions
# Python 2 only:
try:
...
except ValueError, e:
...
# Python 2 and 3:
try:
...
except ValueError as e:
...
# ### Division
# Integer division (rounding down):
# Python 2 only:
assert 2 / 3 == 0
# Python 2 and 3:
assert 2 // 3 == 0
# "True division" (float division):
# Python 3 only:
assert 3 / 2 == 1.5
# +
# Python 2 and 3:
from __future__ import division # (at top of module)
assert 3 / 2 == 1.5
# -
# "Old division" (i.e. compatible with Py2 behaviour):
# Python 2 only:
a = b / c # with any types
# +
# Python 2 and 3:
from past.utils import old_div
a = old_div(b, c) # always same as / on Py2
# -
# ### Long integers
# Short integers are gone in Python 3 and ``long`` has become ``int`` (without the trailing ``L`` in the ``repr``).
# +
# Python 2 only
k = 9223372036854775808L
# Python 2 and 3:
k = 9223372036854775808
# +
# Python 2 only
bigint = 1L
# Python 2 and 3
from builtins import int
bigint = int(1)
# -
# To test whether a value is an integer (of any kind):
# +
# Python 2 only:
if isinstance(x, (int, long)):
...
# Python 3 only:
if isinstance(x, int):
...
# Python 2 and 3: option 1
from builtins import int # subclass of long on Py2
if isinstance(x, int): # matches both int and long on Py2
...
# Python 2 and 3: option 2
from past.builtins import long
if isinstance(x, (int, long)):
...
# -
# ### Octal constants
0644 # Python 2 only
0o644 # Python 2 and 3
# ### Backtick repr
`x` # Python 2 only
repr(x) # Python 2 and 3
# ### Metaclasses
# +
class BaseForm(object):
pass
class FormType(type):
pass
# -
# Python 2 only:
class Form(BaseForm):
__metaclass__ = FormType
pass
# Python 3 only:
class Form(BaseForm, metaclass=FormType):
pass
# +
# Python 2 and 3:
from six import with_metaclass
# or
from future.utils import with_metaclass
class Form(with_metaclass(FormType, BaseForm)):
pass
# -
# ## Strings and bytes
# ### Unicode (text) string literals
# If you are upgrading an existing Python 2 codebase, it may be preferable to mark up all string literals as unicode explicitly with ``u`` prefixes:
# +
# Python 2 only
s1 = 'The Zen of Python'
s2 = u'きたないのよりきれいな方がいい\n'
# Python 2 and 3
s1 = u'The Zen of Python'
s2 = u'きたないのよりきれいな方がいい\n'
# -
# The ``futurize`` and ``python-modernize`` tools do not currently offer an option to do this automatically.
# If you are writing code for a new project or new codebase, you can use this idiom to make all string literals in a module unicode strings:
# +
# Python 2 and 3
from __future__ import unicode_literals # at top of module
s1 = 'The Zen of Python'
s2 = 'きたないのよりきれいな方がいい\n'
# -
# See http://python-future.org/unicode_literals.html for more discussion on which style to use.
# ### Byte-string literals
# +
# Python 2 only
s = 'This must be a byte-string'
# Python 2 and 3
s = b'This must be a byte-string'
# -
# To loop over a byte-string with possible high-bit characters, obtaining each character as a byte-string of length 1:
# +
# Python 2 only:
for bytechar in 'byte-string with high-bit chars like \xf9':
...
# Python 3 only:
for myint in b'byte-string with high-bit chars like \xf9':
bytechar = bytes([myint])
# Python 2 and 3:
from builtins import bytes
for myint in bytes(b'byte-string with high-bit chars like \xf9'):
bytechar = bytes([myint])
# -
# As an alternative, ``chr()`` and ``.encode('latin-1')`` can be used to convert an int into a 1-char byte string:
# +
# Python 3 only:
for myint in b'byte-string with high-bit chars like \xf9':
char = chr(myint) # returns a unicode string
bytechar = char.encode('latin-1')
# Python 2 and 3:
from builtins import bytes, chr
for myint in bytes(b'byte-string with high-bit chars like \xf9'):
char = chr(myint) # returns a unicode string
bytechar = char.encode('latin-1') # forces returning a byte str
# -
# ### basestring
# +
# Python 2 only:
a = u'abc'
b = 'def'
assert (isinstance(a, basestring) and isinstance(b, basestring))
# Python 2 and 3: alternative 1
from past.builtins import basestring # pip install future
a = u'abc'
b = b'def'
assert (isinstance(a, basestring) and isinstance(b, basestring))
# +
# Python 2 and 3: alternative 2: refactor the code to avoid considering
# byte-strings as strings.
from builtins import str
a = u'abc'
b = b'def'
c = b.decode()
assert isinstance(a, str) and isinstance(c, str)
# ...
# -
# ### unicode
# Python 2 only:
templates = [u"blog/blog_post_detail_%s.html" % unicode(slug)]
# Python 2 and 3: alternative 1
from builtins import str
templates = [u"blog/blog_post_detail_%s.html" % str(slug)]
# Python 2 and 3: alternative 2
from builtins import str as text
templates = [u"blog/blog_post_detail_%s.html" % text(slug)]
# ### StringIO
# +
# Python 2 only:
from StringIO import StringIO
# or:
from cStringIO import StringIO
# Python 2 and 3:
from io import BytesIO # for handling byte strings
from io import StringIO # for handling unicode strings
# -
# ## Imports relative to a package
# Suppose the package is:
#
# mypackage/
# __init__.py
# submodule1.py
# submodule2.py
#
# and the code below is in ``submodule1.py``:
# Python 2 only:
import submodule2
# Python 2 and 3:
from . import submodule2
# Python 2 and 3:
# To make Py2 code safer (more like Py3) by preventing
# implicit relative imports, you can also add this to the top:
from __future__ import absolute_import
# ## Dictionaries
heights = {'Fred': 175, 'Anne': 166, 'Joe': 192}
# ### Iterating through ``dict`` keys/values/items
# Iterable dict keys:
# Python 2 only:
for key in heights.iterkeys():
...
# Python 2 and 3:
for key in heights:
...
# Iterable dict values:
# Python 2 only:
for value in heights.itervalues():
...
# Idiomatic Python 3
for value in heights.values(): # extra memory overhead on Py2
...
# +
# Python 2 and 3: option 1
from builtins import dict
heights = dict(Fred=175, Anne=166, Joe=192)
for key in heights.values(): # efficient on Py2 and Py3
...
# +
# Python 2 and 3: option 2
from future.utils import itervalues
# or
from six import itervalues
for key in itervalues(heights):
...
# -
# Iterable dict items:
# Python 2 only:
for (key, value) in heights.iteritems():
...
# Python 2 and 3: option 1
for (key, value) in heights.items(): # inefficient on Py2
...
# +
# Python 2 and 3: option 2
from future.utils import viewitems
for (key, value) in viewitems(heights): # also behaves like a set
...
# +
# Python 2 and 3: option 3
from future.utils import iteritems
# or
from six import iteritems
for (key, value) in iteritems(heights):
...
# -
# ### dict keys/values/items as a list
# dict keys as a list:
# Python 2 only:
keylist = heights.keys()
assert isinstance(keylist, list)
# Python 2 and 3:
keylist = list(heights)
assert isinstance(keylist, list)
# dict values as a list:
# Python 2 only:
heights = {'Fred': 175, 'Anne': 166, 'Joe': 192}
valuelist = heights.values()
assert isinstance(valuelist, list)
# Python 2 and 3: option 1
valuelist = list(heights.values()) # inefficient on Py2
# +
# Python 2 and 3: option 2
from builtins import dict
heights = dict(Fred=175, Anne=166, Joe=192)
valuelist = list(heights.values())
# +
# Python 2 and 3: option 3
from future.utils import listvalues
valuelist = listvalues(heights)
# +
# Python 2 and 3: option 4
from future.utils import itervalues
# or
from six import itervalues
valuelist = list(itervalues(heights))
# -
# dict items as a list:
# Python 2 and 3: option 1
itemlist = list(heights.items()) # inefficient on Py2
# +
# Python 2 and 3: option 2
from future.utils import listitems
itemlist = listitems(heights)
# +
# Python 2 and 3: option 3
from future.utils import iteritems
# or
from six import iteritems
itemlist = list(iteritems(heights))
# -
# ## Custom class behaviour
# ### Custom iterators
# +
# Python 2 only
class Upper(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self): # Py2-style
return self._iter.next().upper()
def __iter__(self):
return self
itr = Upper('hello')
assert itr.next() == 'H' # Py2-style
assert list(itr) == list('ELLO')
# +
# Python 2 and 3: option 1
from builtins import object
class Upper(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __next__(self): # Py3-style iterator interface
return next(self._iter).upper() # builtin next() function calls
def __iter__(self):
return self
itr = Upper('hello')
assert next(itr) == 'H' # compatible style
assert list(itr) == list('ELLO')
# +
# Python 2 and 3: option 2
from future.utils import implements_iterator
@implements_iterator
class Upper(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __next__(self): # Py3-style iterator interface
return next(self._iter).upper() # builtin next() function calls
def __iter__(self):
return self
itr = Upper('hello')
assert next(itr) == 'H'
assert list(itr) == list('ELLO')
# -
# ### Custom ``__str__`` methods
# +
# Python 2 only:
class MyClass(object):
def __unicode__(self):
return 'Unicode string: \u5b54\u5b50'
def __str__(self):
return unicode(self).encode('utf-8')
a = MyClass()
print(a) # prints encoded string
# +
# Python 2 and 3:
from future.utils import python_2_unicode_compatible
@python_2_unicode_compatible
class MyClass(object):
def __str__(self):
return u'Unicode string: \u5b54\u5b50'
a = MyClass()
print(a) # prints string encoded as utf-8 on Py2
# -
# ### Custom ``__nonzero__`` vs ``__bool__`` method:
# +
# Python 2 only:
class AllOrNothing(object):
def __init__(self, l):
self.l = l
def __nonzero__(self):
return all(self.l)
container = AllOrNothing([0, 100, 200])
assert not bool(container)
# +
# Python 2 and 3:
from builtins import object
class AllOrNothing(object):
def __init__(self, l):
self.l = l
def __bool__(self):
return all(self.l)
container = AllOrNothing([0, 100, 200])
assert not bool(container)
# -
# ## Lists versus iterators
# ### xrange
# Python 2 only:
for i in xrange(10**8):
...
# Python 2 and 3: forward-compatible
from builtins import range
for i in range(10**8):
...
# Python 2 and 3: backward-compatible
from past.builtins import xrange
for i in xrange(10**8):
...
# ### range
# Python 2 only
mylist = range(5)
assert mylist == [0, 1, 2, 3, 4]
# Python 2 and 3: forward-compatible: option 1
mylist = list(range(5)) # copies memory on Py2
assert mylist == [0, 1, 2, 3, 4]
# +
# Python 2 and 3: forward-compatible: option 2
from builtins import range
mylist = list(range(5))
assert mylist == [0, 1, 2, 3, 4]
# +
# Python 2 and 3: option 3
from future.utils import lrange
mylist = lrange(5)
assert mylist == [0, 1, 2, 3, 4]
# +
# Python 2 and 3: backward compatible
from past.builtins import range
mylist = range(5)
assert mylist == [0, 1, 2, 3, 4]
# -
# ### map
# Python 2 only:
mynewlist = map(f, myoldlist)
assert mynewlist == [f(x) for x in myoldlist]
# Python 2 and 3: option 1
# Idiomatic Py3, but inefficient on Py2
mynewlist = list(map(f, myoldlist))
assert mynewlist == [f(x) for x in myoldlist]
# +
# Python 2 and 3: option 2
from builtins import map
mynewlist = list(map(f, myoldlist))
assert mynewlist == [f(x) for x in myoldlist]
# +
# Python 2 and 3: option 3
try:
from itertools import imap as map
except ImportError:
pass
mynewlist = list(map(f, myoldlist)) # inefficient on Py2
assert mynewlist == [f(x) for x in myoldlist]
# +
# Python 2 and 3: option 4
from future.utils import lmap
mynewlist = lmap(f, myoldlist)
assert mynewlist == [f(x) for x in myoldlist]
# +
# Python 2 and 3: option 5
from past.builtins import map
mynewlist = map(f, myoldlist)
assert mynewlist == [f(x) for x in myoldlist]
# -
# ### imap
# +
# Python 2 only:
from itertools import imap
myiter = imap(func, myoldlist)
assert isinstance(myiter, iter)
# -
# Python 3 only:
myiter = map(func, myoldlist)
assert isinstance(myiter, iter)
# +
# Python 2 and 3: option 1
from builtins import map
myiter = map(func, myoldlist)
assert isinstance(myiter, iter)
# +
# Python 2 and 3: option 2
try:
from itertools import imap as map
except ImportError:
pass
myiter = map(func, myoldlist)
assert isinstance(myiter, iter)
# -
# ### zip, izip
# As above with ``zip`` and ``itertools.izip``.
# ### filter, ifilter
# As above with ``filter`` and ``itertools.ifilter`` too.
# ## Other builtins
# ### File IO with open()
# +
# Python 2 only
f = open('myfile.txt')
data = f.read() # as a byte string
text = data.decode('utf-8')
# Python 2 and 3: alternative 1
from io import open
f = open('myfile.txt', 'rb')
data = f.read() # as bytes
text = data.decode('utf-8') # unicode, not bytes
# Python 2 and 3: alternative 2
from io import open
f = open('myfile.txt', encoding='utf-8')
text = f.read() # unicode, not bytes
# -
# ### reduce()
# Python 2 only:
assert reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) == 1+2+3+4+5
# +
# Python 2 and 3:
from functools import reduce
assert reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) == 1+2+3+4+5
# -
# ### raw_input()
# Python 2 only:
name = raw_input('What is your name? ')
assert isinstance(name, str) # native str
# +
# Python 2 and 3:
from builtins import input
name = input('What is your name? ')
assert isinstance(name, str) # native str on Py2 and Py3
# -
# ### input()
# Python 2 only:
input("Type something safe please: ")
# Python 2 and 3
from builtins import input
eval(input("Type something safe please: "))
# Warning: using either of these is **unsafe** with untrusted input.
# ### file()
# Python 2 only:
f = file(pathname)
# +
# Python 2 and 3:
f = open(pathname)
# But preferably, use this:
from io import open
f = open(pathname, 'rb') # if f.read() should return bytes
# or
f = open(pathname, 'rt') # if f.read() should return unicode text
# -
# ### exec
# +
# Python 2 only:
exec 'x = 10'
# Python 2 and 3:
exec('x = 10')
# +
# Python 2 only:
g = globals()
exec 'x = 10' in g
# Python 2 and 3:
g = globals()
exec('x = 10', g)
# +
# Python 2 only:
l = locals()
exec 'x = 10' in g, l
# Python 2 and 3:
exec('x = 10', g, l)
# -
# But note that Py3's `exec()` is less powerful (and less dangerous) than Py2's `exec` statement.
# ### execfile()
# Python 2 only:
execfile('myfile.py')
# +
# Python 2 and 3: alternative 1
from past.builtins import execfile
execfile('myfile.py')
# +
# Python 2 and 3: alternative 2
exec(compile(open('myfile.py').read()))
# This can sometimes cause this:
# SyntaxError: function ... uses import * and bare exec ...
# See https://github.com/PythonCharmers/python-future/issues/37
# -
# ### unichr()
# Python 2 only:
assert unichr(8364) == '€'
# Python 3 only:
assert chr(8364) == '€'
# Python 2 and 3:
from builtins import chr
assert chr(8364) == '€'
# ### intern()
# Python 2 only:
intern('mystring')
# Python 3 only:
from sys import intern
intern('mystring')
# Python 2 and 3: alternative 1
from past.builtins import intern
intern('mystring')
# Python 2 and 3: alternative 2
from six.moves import intern
intern('mystring')
# Python 2 and 3: alternative 3
from future.standard_library import install_aliases
install_aliases()
from sys import intern
intern('mystring')
# Python 2 and 3: alternative 2
try:
from sys import intern
except ImportError:
pass
intern('mystring')
# ### apply()
args = ('a', 'b')
kwargs = {'kwarg1': True}
# Python 2 only:
apply(f, args, kwargs)
# Python 2 and 3: alternative 1
f(*args, **kwargs)
# Python 2 and 3: alternative 2
from past.builtins import apply
apply(f, args, kwargs)
# ### chr()
# Python 2 only:
assert chr(64) == b'@'
assert chr(200) == b'\xc8'
# Python 3 only: option 1
assert chr(64).encode('latin-1') == b'@'
assert chr(0xc8).encode('latin-1') == b'\xc8'
# +
# Python 2 and 3: option 1
from builtins import chr
assert chr(64).encode('latin-1') == b'@'
assert chr(0xc8).encode('latin-1') == b'\xc8'
# -
# Python 3 only: option 2
assert bytes([64]) == b'@'
assert bytes([0xc8]) == b'\xc8'
# +
# Python 2 and 3: option 2
from builtins import bytes
assert bytes([64]) == b'@'
assert bytes([0xc8]) == b'\xc8'
# -
# ### cmp()
# Python 2 only:
assert cmp('a', 'b') < 0 and cmp('b', 'a') > 0 and cmp('c', 'c') == 0
# Python 2 and 3: alternative 1
from past.builtins import cmp
assert cmp('a', 'b') < 0 and cmp('b', 'a') > 0 and cmp('c', 'c') == 0
# Python 2 and 3: alternative 2
cmp = lambda(x, y): (x > y) - (x < y)
assert cmp('a', 'b') < 0 and cmp('b', 'a') > 0 and cmp('c', 'c') == 0
# ### reload()
# Python 2 only:
reload(mymodule)
# Python 2 and 3
from imp import reload
reload(mymodule)
# ## Standard library
# ### dbm modules
# +
# Python 2 only
import anydbm
import whichdb
import dbm
import dumbdbm
import gdbm
# Python 2 and 3: alternative 1
from future import standard_library
standard_library.install_aliases()
import dbm
import dbm.ndbm
import dbm.dumb
import dbm.gnu
# Python 2 and 3: alternative 2
from future.moves import dbm
from future.moves.dbm import dumb
from future.moves.dbm import ndbm
from future.moves.dbm import gnu
# Python 2 and 3: alternative 3
from six.moves import dbm_gnu
# (others not supported)
# -
# ### commands / subprocess modules
# +
# Python 2 only
from commands import getoutput, getstatusoutput
# Python 2 and 3
from future import standard_library
standard_library.install_aliases()
from subprocess import getoutput, getstatusoutput
# -
# ### subprocess.check_output()
# +
# Python 2.7 and above
from subprocess import check_output
# Python 2.6 and above: alternative 1
from future.moves.subprocess import check_output
# Python 2.6 and above: alternative 2
from future import standard_library
standard_library.install_aliases()
from subprocess import check_output
# -
# ### collections: Counter, OrderedDict, ChainMap
# +
# Python 2.7 and above
from collections import Counter, OrderedDict, ChainMap
# Python 2.6 and above: alternative 1
from future.backports import Counter, OrderedDict, ChainMap
# Python 2.6 and above: alternative 2
from future import standard_library
standard_library.install_aliases()
from collections import Counter, OrderedDict, ChainMap
# -
# ### StringIO module
# Python 2 only
from StringIO import StringIO
from cStringIO import StringIO
# Python 2 and 3
from io import BytesIO
# and refactor StringIO() calls to BytesIO() if passing byte-strings
# ### http module
# +
# Python 2 only:
import httplib
import Cookie
import cookielib
import BaseHTTPServer
import SimpleHTTPServer
import CGIHttpServer
# Python 2 and 3 (after ``pip install future``):
import http.client
import http.cookies
import http.cookiejar
import http.server
# -
# ### xmlrpc module
# +
# Python 2 only:
import DocXMLRPCServer
import SimpleXMLRPCServer
# Python 2 and 3 (after ``pip install future``):
import xmlrpc.server
# +
# Python 2 only:
import xmlrpclib
# Python 2 and 3 (after ``pip install future``):
import xmlrpc.client
# -
# ### html escaping and entities
# +
# Python 2 and 3:
from cgi import escape
# Safer (Python 2 and 3, after ``pip install future``):
from html import escape
# Python 2 only:
from htmlentitydefs import codepoint2name, entitydefs, name2codepoint
# Python 2 and 3 (after ``pip install future``):
from html.entities import codepoint2name, entitydefs, name2codepoint
# -
# ### html parsing
# +
# Python 2 only:
from HTMLParser import HTMLParser
# Python 2 and 3 (after ``pip install future``)
from html.parser import HTMLParser
# Python 2 and 3 (alternative 2):
from future.moves.html.parser import HTMLParser
# -
# ### urllib module
# ``urllib`` is the hardest module to use from Python 2/3 compatible code. You may like to use Requests (http://python-requests.org) instead.
# Python 2 only:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError
# Python 3 only:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
# +
# Python 2 and 3: easiest option
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
# +
# Python 2 and 3: alternative 2
from future.standard_library import hooks
with hooks():
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
# -
# Python 2 and 3: alternative 3
from future.moves.urllib.parse import urlparse, urlencode
from future.moves.urllib.request import urlopen, Request
from future.moves.urllib.error import HTTPError
# or
from six.moves.urllib.parse import urlparse, urlencode
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import HTTPError
# Python 2 and 3: alternative 4
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError
# ### Tkinter
# +
# Python 2 only:
import Tkinter
import Dialog
import FileDialog
import ScrolledText
import SimpleDialog
import Tix
import Tkconstants
import Tkdnd
import tkColorChooser
import tkCommonDialog
import tkFileDialog
import tkFont
import tkMessageBox
import tkSimpleDialog
import ttk
# Python 2 and 3 (after ``pip install future``):
import tkinter
import tkinter.dialog
import tkinter.filedialog
import tkinter.scrolledtext
import tkinter.simpledialog
import tkinter.tix
import tkinter.constants
import tkinter.dnd
import tkinter.colorchooser
import tkinter.commondialog
import tkinter.filedialog
import tkinter.font
import tkinter.messagebox
import tkinter.simpledialog
import tkinter.ttk
# -
# ### socketserver
# +
# Python 2 only:
import SocketServer
# Python 2 and 3 (after ``pip install future``):
import socketserver
# -
# ### copy_reg, copyreg
# +
# Python 2 only:
import copy_reg
# Python 2 and 3 (after ``pip install future``):
import copyreg
# -
# ### configparser
# +
# Python 2 only:
from ConfigParser import ConfigParser
# Python 2 and 3 (after ``pip install future``):
from configparser import ConfigParser
# -
# ### queue
# +
# Python 2 only:
from Queue import Queue, heapq, deque
# Python 2 and 3 (after ``pip install future``):
from queue import Queue, heapq, deque
# -
# ### repr, reprlib
# +
# Python 2 only:
from repr import aRepr, repr
# Python 2 and 3 (after ``pip install future``):
from reprlib import aRepr, repr
# -
# ### UserDict, UserList, UserString
# +
# Python 2 only:
from UserDict import UserDict
from UserList import UserList
from UserString import UserString
# Python 3 only:
from collections import UserDict, UserList, UserString
# Python 2 and 3: alternative 1
from future.moves.collections import UserDict, UserList, UserString
# Python 2 and 3: alternative 2
from six.moves import UserDict, UserList, UserString
# Python 2 and 3: alternative 3
from future.standard_library import install_aliases
install_aliases()
from collections import UserDict, UserList, UserString
# -
# ### itertools: filterfalse, zip_longest
# +
# Python 2 only:
from itertools import ifilterfalse, izip_longest
# Python 3 only:
from itertools import filterfalse, zip_longest
# Python 2 and 3: alternative 1
from future.moves.itertools import filterfalse, zip_longest
# Python 2 and 3: alternative 2
from six.moves import filterfalse, zip_longest
# Python 2 and 3: alternative 3
from future.standard_library import install_aliases
install_aliases()
from itertools import filterfalse, zip_longest
|
docs/notebooks/Writing Python 2-3 compatible code.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **관악구 동네 이야기**
# 1. 데이터 불러오기
# 1. 주소를 바탕으로 위도, 경도 데이터 가져오기
# 1. Naver 지도 위에서 해당 내용 Marker (in React.js)
# ## **1 데이터 전처리 작업**
# 데이터를 불러온 뒤, 처리에 용이한 Type 으로 변환하기
# 데이터 CSV 불러오기
import pandas as pd
file_name = "./data/ourtown_store.xlsx"
data_xls = pd.read_excel(file_name, header=1)
data_columns = data_xls.columns.to_list()
data_columns.pop()
data_xls.columns = data_columns + ['기타']
data_xls.head(3)
data_xls[data_xls["연번"].isnull()]
# +
# Filling the NaN data
for _ in data_xls.columns.to_list()[:-1]:
data_xls[_] = data_xls[_].fillna(method="ffill")
# Other field filled with blank text
data_xls["기타"] = data_xls["기타"].fillna('')
# Checking the NaN data
data_xls[data_xls["연번"].isnull()]
# -
# ## **2 위도, 경도 데이터 생성하기**
# 주소 데이터를 활용하여 위도/ 경도 데이터 생성하기
# data_xls.to_csv(file_name.replace("xlsx","csv"), index=None)
print(data_xls['주소'][0])
data_xls.head(3)
data_xls['주소']
# +
# 건물관리번호 : 1144012700115950000000001
# X좌표 : 945959.0381341814
# Y좌표 : 1953851.7348996028
# 건물명 : 한국지역정보개발원(KLID Tower)
|
jupyters/at1_pandas.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
### we start with the liar dataset
import numpy as np
import pandas as pd
# -
#get the dataset
df_train = pd.read_csv('../../../datasets/liar_tweaked/trainvectordata.csv')
df_test = pd.read_csv('../../../datasets/liar_tweaked/testvectordata.csv')
df_valid = pd.read_csv('../../../datasets/liar_tweaked/validvectordata.csv')
X_train=df_train['statement']
X_test=df_test['statement']
Y_train=df_train['label']
Y_test=df_test['label']
X_valid=df_valid['statement']
Y_valid=df_valid['label']
X_test=X_test.append(X_valid, ignore_index=True)
Y_test=Y_test.append(Y_valid, ignore_index=True)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
#make necessary imports
import numpy as np
import pandas as pd
import itertools
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
#tfidf
tfidf_vectorizer=TfidfVectorizer(stop_words='english', max_df=0.7)
tfidf_train=tfidf_vectorizer.fit_transform(X_train)
tfidf_test=tfidf_vectorizer.transform(X_test)
np.array(tfidf_test.todense().tolist()).shape
tfidf_train = np.array(tfidf_train.todense().tolist())
tfidf_test = np.array(tfidf_test.todense().tolist())
Y_train = np.array(Y_train.tolist())
Y_test = np.array(Y_test.tolist())
# +
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
## define variable
n_sample = 10229
text_len = 11915
## create dummy data
# x_train = np.random.uniform(0,1, (n_sample,text_len))
# y_train = np.random.randint(0,2, n_sample)
# print(x_train.shape, y_train.shape)
# print(type(x_train), type(y_train.shape))
# ## expand train dimnesion: pass from 2d to 3d
tfidf_train = np.expand_dims(tfidf_train, axis=1)
tfidf_test = np.expand_dims(tfidf_test, axis=1)
# print(x_train.shape, y_train.shape)
## create model
inp = Input(shape=(1,text_len))
conv2 = Conv1D(filters=128, kernel_size=5, activation='relu', padding='same')(inp)
drop21 = Dropout(0.5)(conv2)
conv22 = Conv1D(filters=64, kernel_size=5, activation='relu', padding='same')(drop21)
drop22 = Dropout(0.5)(conv22)
pool2 = Flatten()(drop22) # this is an option to pass from 3d to 2d
out = Dense(2, activation='softmax')(pool2) # the output dim must be equal to the num of class if u use softmax
model = Model(inp, out)
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
history = model.fit(tfidf_train, Y_train, epochs=100,
validation_data=(tfidf_test, Y_test))
# +
### now we have the kaggle dataset
# +
df = pd.read_csv('../../../datasets/kaggle/train.csv')
# -
df
df['content'] = df['title'] + ' ' + df['text']
df=df[df['content']==df['content']]
df=df[df['label']==df['label']]
df
df.to_csv('../../../datasets/kaggle/final.csv')
df=pd.read_csv('../../../datasets/kaggle/final.csv')
df
X=df['content']
Y=df['label']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=42)
X_test=X_test.reset_index()
Y_test=Y_test.reset_index()
X_train=X_train.reset_index()
Y_train=Y_train.reset_index()
Y_test=Y_test['label']
X_test=X_test['content']
X_train=X_train['content']
Y_train=Y_train['label']
#tfidf
tfidf_vectorizer=TfidfVectorizer(stop_words='english', max_df=0.7)
tfidf_train=tfidf_vectorizer.fit_transform(X_train)
tfidf_test=tfidf_vectorizer.transform(X_test)
tfidf_test = tfidf_test.toarray()
tfidf_train=tfidf_train.toarray()
Y_train = np.array(Y_train.tolist())
Y_test = np.array(Y_test.tolist())
tfidf_train.shape
# +
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
## define variable
n_sample = len(X_train)
text_len = 157102
## create dummy data
# x_train = np.random.uniform(0,1, (n_sample,text_len))
# y_train = np.random.randint(0,2, n_sample)
# print(x_train.shape, y_train.shape)
# print(type(x_train), type(y_train.shape))
# ## expand train dimnesion: pass from 2d to 3d
tfidf_train = np.expand_dims(tfidf_train, axis=1)
tfidf_test = np.expand_dims(tfidf_test, axis=1)
# print(x_train.shape, y_train.shape)
## create model
inp = Input(shape=(1,text_len))
conv2 = Conv1D(filters=128, kernel_size=5, activation='relu', padding='same')(inp)
drop21 = Dropout(0.5)(conv2)
conv22 = Conv1D(filters=64, kernel_size=5, activation='relu', padding='same')(drop21)
drop22 = Dropout(0.5)(conv22)
pool2 = Flatten()(drop22) # this is an option to pass from 3d to 2d
out = Dense(2, activation='softmax')(pool2) # the output dim must be equal to the num of class if u use softmax
model = Model(inp, out)
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
history = model.fit(tfidf_train, Y_train, epochs=100,
validation_data=(tfidf_test, Y_test))
# -
|
models/CNN/TFIDF/TFIDF.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ML
# language: python
# name: ml
# ---
# # Long Text Sentiment
#
# So far, we have restricted the length of the text being fed into our models. Bert in particular is restricted to consuming 512 tokens per sample. For many use-cases, this is most likely not a problem - but in some cases it can be.
#
# If we take the example of Reddit posts on the */r/investing* subreddit, many of the more important posts are **DD** (due-diligence), which often consists of deep dives into why the author thinks a stock is a good investment or not. On these longer pieces of text, the actual sentiment from the author may not be clear from the first 512 tokens. We need to consider the full post.
#
# Before working through the logic that allows us to consider the full post, let's import and define everything we need to make a prediction on a single chunk of text (using much of what we covered in the last section).
# +
from transformers import BertForSequenceClassification, BertTokenizer
import torch
# initialize our model and tokenizer
tokenizer = BertTokenizer.from_pretrained('ProsusAI/finbert')
model = BertForSequenceClassification.from_pretrained('ProsusAI/finbert')
# and we will place the processing of our input text into a function for easier prediction later
def sentiment(tokens):
# get output logits from the model
output = model(**tokens)
# convert to probabilities
probs = torch.nn.functional.softmax(output[0], dim=-1)
# we will return the probability tensor (we will not need argmax until later)
return probs
# -
# Now let's get to how we apply sentiment to longer pieces of text. There are two approaches that we cover in these notebooks:
#
# * Using neural text summarization to shorten the text to below 512 tokens.
#
# * Iterating through the text using a *window* and calculate the average article sentiment.
#
# In this notebook we will be using the second approach. The window in question will be a subsection of our tokenized text, of length `512`. First, let's define an example and tokenize it.
# +
txt = """
I would like to get your all thoughts on the bond yield increase this week. I am not worried about the market downturn but the sudden increase in yields. On 2/16 the 10 year bonds yields increased by almost 9 percent and on 2/19 the yield increased by almost 5 percent.
Key Points from the CNBC Article:
* **The “taper tantrum” in 2013 was a sudden spike in Treasury yields due to market panic after the Federal Reserve announced that it would begin tapering its quantitative easing program.**
* **Major central banks around the world have cut interest rates to historic lows and launched unprecedented quantities of asset purchases in a bid to shore up the economy throughout the pandemic.**
* **However, the recent rise in yields suggests that some investors are starting to anticipate a tightening of policy sooner than anticipated to accommodate a potential rise in inflation.**
The recent rise in bond yields and U.S. inflation expectations has some investors wary that a repeat of the 2013 “taper tantrum” could be on the horizon.
The benchmark U.S. 10-year Treasury note climbed above 1.3% for the first time since February 2020 earlier this week, while the 30-year bond also hit its highest level for a year. Yields move inversely to bond prices.
Yields tend to rise in lockstep with inflation expectations, which have reached their highest levels in a decade in the U.S., powered by increased prospects of a large fiscal stimulus package, progress on vaccine rollouts and pent-up consumer demand.
The “taper tantrum” in 2013 was a sudden spike in Treasury yields due to market panic after the Federal Reserve announced that it would begin tapering its quantitative easing program.
Major central banks around the world have cut interest rates to historic lows and launched unprecedented quantities of asset purchases in a bid to shore up the economy throughout the pandemic. The Fed and others have maintained supportive tones in recent policy meetings, vowing to keep financial conditions loose as the global economy looks to emerge from the Covid-19 pandemic.
However, the recent rise in yields suggests that some investors are starting to anticipate a tightening of policy sooner than anticipated to accommodate a potential rise in inflation.
With central bank support removed, bonds usually fall in price which sends yields higher. This can also spill over into stock markets as higher interest rates means more debt servicing for firms, causing traders to reassess the investing environment.
“The supportive stance from policymakers will likely remain in place until the vaccines have paved a way to some return to normality,” said <NAME>, chief investment officer at Beaufort Investment, in a research note this week.
“However, there will be a risk of another ‘taper tantrum’ similar to the one we witnessed in 2013, and this is our main focus for 2021,” Balkham projected, should policymakers begin to unwind this stimulus.
Long-term bond yields in Japan and Europe followed U.S. Treasurys higher toward the end of the week as bondholders shifted their portfolios.
“The fear is that these assets are priced to perfection when the ECB and Fed might eventually taper,” said <NAME>, senior macro strategist at Nordea Asset Management, in a research note entitled “Little taper tantrum.”
“The odds of tapering are helped in the United States by better retail sales after four months of disappointment and the expectation of large issuance from the $1.9 trillion fiscal package.”
Galy suggested the Fed would likely extend the duration on its asset purchases, moderating the upward momentum in inflation.
“Equity markets have reacted negatively to higher yield as it offers an alternative to the dividend yield and a higher discount to long-term cash flows, making them focus more on medium-term growth such as cyclicals” he said. Cyclicals are stocks whose performance tends to align with economic cycles.
Galy expects this process to be more marked in the second half of the year when economic growth picks up, increasing the potential for tapering.
## Tapering in the U.S., but not Europe
Allianz CEO <NAME> told CNBC on Friday that there was a geographical divergence in how the German insurer is thinking about the prospect of interest rate hikes.
“One is Europe, where we continue to have financial repression, where the ECB continues to buy up to the max in order to minimize spreads between the north and the south — the strong balance sheets and the weak ones — and at some point somebody will have to pay the price for that, but in the short term I don’t see any spike in interest rates,” Bäte said, adding that the situation is different stateside.
“Because of the massive programs that have happened, the stimulus that is happening, the dollar being the world’s reserve currency, there is clearly a trend to stoke inflation and it is going to come. Again, I don’t know when and how, but the interest rates have been steepening and they should be steepening further.”
## Rising yields a ‘normal feature’
However, not all analysts are convinced that the rise in bond yields is material for markets. In a note Friday, Barclays Head of European Equity Strategy <NAME> suggested that rising bond yields were overdue, as they had been lagging the improving macroeconomic outlook for the second half of 2021, and said they were a “normal feature” of economic recovery.
“With the key drivers of inflation pointing up, the prospect of even more fiscal stimulus in the U.S. and pent up demand propelled by high excess savings, it seems right for bond yields to catch-up with other more advanced reflation trades,” Cau said, adding that central banks remain “firmly on hold” given the balance of risks.
He argued that the steepening yield curve is “typical at the early stages of the cycle,” and that so long as vaccine rollouts are successful, growth continues to tick upward and central banks remain cautious, reflationary moves across asset classes look “justified” and equities should be able to withstand higher rates.
“Of course, after the strong move of the last few weeks, equities could mark a pause as many sectors that have rallied with yields look overbought, like commodities and banks,” Cau said.
“But at this stage, we think rising yields are more a confirmation of the equity bull market than a threat, so dips should continue to be bought.”
"""
tokens = tokenizer.encode_plus(txt, add_special_tokens=False)
len(tokens['input_ids'])
# -
# If we tokenize this longer piece of text we get a total of **1345** tokens, far too many to fit into our BERT model containing a maximum limit of 512 tokens. We will need to split this text into chunks of 512 tokens at a time, and calculate our sentiment probabilities for each chunk seperately.
#
# Because we are taking this slightly different approach, we have encoded our tokens using a different set of parameters to what we have used before. This time, we:
#
# * Avoided adding special tokens `add_special_tokens=False` because this will add *[CLS]* and *[SEP]* tokens to the start and end of the full tokenized tensor of length **1345**, we will instead add them manually later.
#
# * We will not specify `max_length`, `truncation`, or `padding` parameters (as we do not use any of them here).
#
# * We will return standard Python *lists* rather than tensors by not specifying `return_tensors` (it will return lists by default). This will make the following logic steps easier to follow - but we will rewrite them using PyTorch code in the next section.
type(tokens['input_ids'])
# First, we break our tokenized dictionary into `input_ids` and `attention_mask` variables.
input_ids = tokens['input_ids']
attention_mask = tokens['attention_mask']
# We can now access slices of these lists like so:
input_ids[16:32]
# We will be using this to break our lists into smaller sections, let's test it in a simple loop.
# +
# define our starting position (0) and window size (number of tokens in each chunk)
start = 0
window_size = 512
# get the total length of our tokens
total_len = len(input_ids)
# initialize condition for our while loop to run
loop = True
# loop through and print out start/end positions
while loop:
# the end position is simply the start + window_size
end = start + window_size
# if the end position is greater than the total length, make this our final iteration
if end >= total_len:
loop = False
# and change our endpoint to the final token position
end = total_len
print(f"{start=}\n{end=}")
# we need to move the window to the next 512 tokens
start = end
# -
# This logic works for shifting our window across the full length of input IDs, so now we can modify it to iterately predict sentiment for each window. There will be a few added steps for us to get this to work:
#
# 1. Extract the window from `input_ids` and `attention_mask`.
#
# 2. Add the start of sequence token `[CLS]`/`101` and seperator token `[SEP]`/`102`.
#
# 3. Add padding (only applicable to final batch).
#
# 4. Format into dictionary containing PyTorch tensors.
#
# 5. Make logits predictions with the model.
#
# 6. Calculate softmax and append softmax vector to a list `probs_list`.
# +
# initialize probabilities list
probs_list = []
start = 0
window_size = 510 # we take 2 off here so that we can fit in our [CLS] and [SEP] tokens
loop = True
while loop:
end = start + window_size
if end >= total_len:
loop = False
end = total_len
# (1) extract window from input_ids and attention_mask
input_ids_chunk = input_ids[start:end]
attention_mask_chunk = attention_mask[start:end]
# (2) add [CLS] and [SEP]
input_ids_chunk = [101] + input_ids_chunk + [102]
attention_mask_chunk = [1] + attention_mask_chunk + [1]
# (3) add padding upto window_size + 2 (512) tokens
input_ids_chunk += [0] * (window_size - len(input_ids_chunk) + 2)
attention_mask_chunk += [0] * (window_size - len(attention_mask_chunk) + 2)
# (4) format into PyTorch tensors dictionary
input_dict = {
'input_ids': torch.Tensor([input_ids_chunk]).long(),
'attention_mask': torch.Tensor([attention_mask_chunk]).int()
}
# (5) make logits prediction
outputs = model(**input_dict)
# (6) calculate softmax and append to list
probs = torch.nn.functional.softmax(outputs[0], dim=-1)
probs_list.append(probs)
start = end
# let's view the probabilities given
probs_list
# -
# Each section has been assign varying levels of sentiment. The first and section sections both score *negatively* (index *1*) and the final sections scores *positively* (index *0*). To calculate the average sentiment across the full text, we will merge these tensors using the `stack` method:
stacks = torch.stack(probs_list)
stacks
# From here we will calculate the mean score of each column (positive, negative, and neutral sentiment respectively) using `mean(dim=0)`. But before we do that we must reshape our tensor into a *3x3* shape - it is currently a 3x1x3:
shape = stacks.shape
shape
# We can reshape our tensor dimensions using the `resize_` method, and use dimensions `0` and `2` of our current tensor shape:
shape[0], shape[2]
stacks.resize_(shape[0], shape[2])
# When we try to resize our tensor, we will receive this `RuntimeError` telling us that we cannot resize variables that require *grad*. What this is referring to is the *gradient updates* of our model tensors during training. PyTorch cannot calculate gradients for tensors that have been reshaped. Fortunately, we don't actually want to use this tensor during any training, so we can use the `torch.no_grad()` namespace to tell PyTorch that we do **not** want to calculate any gradients.
# +
with torch.no_grad():
# we must include our stacks operation in here too
stacks = torch.stack(probs_list)
# now resize
stacks = stacks.resize_(stacks.shape[0], stacks.shape[2])
# finally, we can calculate the mean value for each sentiment class
mean = stacks.mean(dim=0)
mean
# -
# Our final sentiment prediction shows a reasonable balanced sentiment of both positive and negative classes, with a slightly stronger negative sentiment score overall. We can take the `argmax` too to specify our winning class.
torch.argmax(mean).item()
|
course/language_classification/03_long_text_sentiment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import psycopg2 as pg
import pandas as pd
# Credentials
host = 'comunidade-ds-postgres.c50pcakiuwi3.us-east-1.rds.amazonaws.com'
port = 5432
database = 'comunidadedsdb'
username = 'member'
password = '<PASSWORD>'
# +
# create variable to connect to db
conn = pg.connect( user = username,
password = password,
host = host,
port = port,
database = database)
# -
# # 1.0. Schema Query
# +
# create a cursor
cursor = conn.cursor()
query_schema = """
SELECT nspname
FROM pg_catalog.pg_namespace
"""
cursor.execute( query_schema )
# extract query result
record = cursor.fetchall()
print(record)
# after finishing queries, always close connection!
cursor.close()
conn.close()
# -
# # 2.0. Table Query
# +
# create a cursor
cursor = conn.cursor()
query_table = """
SELECT tablename
FROM pg_tables
WHERE schemaname = 'pa004'
"""
cursor.execute( query_table )
# extract query result
record = cursor.fetchall()
print(record)
# after finishing queries, always close connection!
cursor.close()
conn.close()
# -
# # 3.0. Collect Data with Psycopg2
# +
# create a cursor
cursor = conn.cursor()
query_table_users = """
SELECT *
FROM pa004.users u
WHERE u.age > 44
LIMIT 10
"""
cursor.execute( query_table_users )
# extract query result
record = cursor.fetchall()
print(record)
# after finishing queries, always close connection!
cursor.close()
conn.close()
# +
data = pd.DataFrame( record )
# bom mas nao preenche colunas direto
data
# -
# # 4.0. Collect Data with Pandas
# To import db into DFs, the best way is to use pandas
import pandas.io.sql as psql
query_table_users = """
SELECT *
FROM pa004.users u LEFT JOIN pa004.vehicle v ON (u.id = v.id)
LEFT JOIN pa004.insurance i ON (u.id = i.id)
WHERE u.age > 44
"""
df = psql.read_sql(query_table_users, conn)
df
# +
query_table_users = """
SELECT *
FROM pa004.users u LEFT JOIN pa004.vehicle v ON (u.id = v.id)
LEFT JOIN pa004.insurance i ON (u.id = i.id)
WHERE u.age > 44
"""
df = pd.read_sql(query_table_users, conn)
df = df.drop(df.columns[[5, 9 ]], axis = 1)
df
# -
|
pa004.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Lab 03 - Polynomial Fitting
# In the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As we
# mentioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting".
#
# Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomial
# of some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is:
# $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$
#
# So our hypothesis class is of the form:
# $$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$
#
# Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are two
# examples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
# -
import sys
sys.path.append("../")
from utils import *
# + pycharm={"is_executing": false, "name": "#%%\n"}
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
# + [markdown] pycharm={"name": "#%% md\n"}
# As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,
# such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,
# we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.
#
# For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
#
# + pycharm={"is_executing": false, "name": "#%%\n"}
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Fitting A Polynomial Of Different Degrees
#
# Next, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.
# We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a model
# that describes the data in a better way, reflected by the decrease in the MSE.
#
# *Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
# + pycharm={"is_executing": false, "name": "#%%\n"}
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
# + [markdown] pycharm={"name": "#%% md\n"}
# Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$,
# the additional coefficients will be zero.
# + pycharm={"is_executing": false, "name": "#%%\n"}
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Fitting Polynomial Of Different Degrees - With Sample Noise
#
# Still fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).
# This time we observe two things:
# - Even for the correct $k=4$ model we are not able to achieve zero MSE.
# - As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
# + pycharm={"is_executing": false, "name": "#%%\n"}
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
# + [markdown] pycharm={"name": "#%% md\n"}
# How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the model
# more "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go after
# the noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.
#
# Later in the course we will learn methods for detection and avoidance of overfitting.
#
#
# ## Fitting Polynomial Over Different Sample Noise Levels
#
# Next, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Though
# we will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as its
# distribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We can
# observe this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
# + pycharm={"is_executing": false, "name": "#%%\n"}
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
# + [markdown] pycharm={"name": "#%% md\n"}
# ## The Influence Of $k$ And $\sigma^2$ On Error
#
# Lastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we will
# add noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select an
# hypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and report
# the mean MSE value. Results are seen in heatmap below:
#
# + pycharm={"is_executing": false, "name": "#%%\n"}
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
# + [markdown] pycharm={"name": "#%% md\n"}
# # Time To Think...
#
# In the above figure, we observe the following trends:
# - As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.
# - Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.
# - For all noise levels, we manage to reduce MSE values by increasing $k$.
#
# So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higher
# degree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.
# Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)
# we would have calculated it over a **new** set of test samples drawn from the same distribution.
#
# Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
# +
from sklearn.model_selection import ParameterGrid
df = []
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
# Generate the x values of the test set
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
# y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
# -
|
lab/Lab 03 - Polynomial Fitting.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.5
# language: python
# name: python3
# ---
# # Testing different embedding schemes for information retrieval
# ## Step 1: Load sample text
import sys
sys.path.append('..')
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
from src.utils import read_txt, split_txt, aiap_qna_quickscore
text = read_txt('../data/fund_guide_extend.txt')
# text = [x.decode("utf-8") for x in text]
condition_terms = split_txt(text, qa=False)
print(len(condition_terms))
# condition_terms[0]
# # Step 2: Load sample questions
import pandas as pd
df_queries = pd.read_csv('../data/Consolidated emails.csv', encoding='iso-8859-1')
print(len(df_queries))
# Answer collection
initial_list=df_queries['Email Queries'].values
modified_list=[val for val in initial_list for _ in (0, 1, 2)]
df_comparisons = pd.DataFrame({'queries' : modified_list})
# # Step 3: define similarity function
# The cosine similarity function returns the cosine similarity given a query string, an encoder, and an array of knowledgebase embeddings
def cosine_sim_results(query_str, encoder, kb_embeddings, **kwargs):
if kwargs:
qn_embedding = encoder(query_str, kwargs.get('tokenize', None))
else:
qn_embedding = encoder(query_str)
results = cosine_similarity(kb_embeddings, qn_embedding)
return results
# # Utility functions
def dummy_embed_fn(vector):
return vector
# # Exp 1: Test InferSent model
from InferSent.models import InferSent
import torch
V = 1
MODEL_PATH = '../encoder/infersent%s.pkl' % V
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': V}
infersent = InferSent(params_model)
infersent.load_state_dict(torch.load(MODEL_PATH))
W2V_PATH = '../fastText/crawl-300d-2M.vec'
infersent.set_w2v_path(W2V_PATH)
infersent.build_vocab(condition_terms, tokenize=True)
infersent.update_vocab(condition_terms, tokenize=True)
response_results = infersent.encode(condition_terms, tokenize=True)
questions = list(df_queries['Email Queries'])
question_results = infersent.encode(questions, tokenize=True)
print(response_results.shape, question_results.shape)
# +
# sample question
# question=['how frequent can i get disbursements?']
# condition_terms[cosine_sim_results(question, infersent.encode, embeddings, tokenize=True).argmax()]
# +
# question=['who is RESPONSIBLE FOR DATA CHARGES?']
# sortargs=cosine_sim_results(question, infersent.encode, embeddings, tokenize=True).argsort(axis=0)
# print(sortargs.shape)
# for ii,arg in enumerate(sortargs[::-1,0]):
# print(ii, condition_terms[arg])
# if ii==4:
# break
# +
# list(cosine_sim_results(question, infersent.encode, embeddings, tokenize=True).argsort(axis=0).shape[-2:])
# -
# ## Test with example queries
responses=[]
for index, ii in enumerate(df_queries.iterrows()):
print('QN: ', ii[1]['Email Queries'])
sortargs=np.flip(cosine_sim_results(question_results[index].reshape(1, -1), dummy_embed_fn, response_results).argsort(axis=0))
for ans in range(3):
responses.append(condition_terms[sortargs[ans,0]])
print('ANS: ', condition_terms[sortargs[ans,0]])
df_comparisons['infersent']=responses
# +
# responses=[]
# for ii in df_queries.iterrows():
# print('QN: ', ii[1]['Email Queries'])
# sortargs=np.flip(cosine_sim_results([ii[1]['Email Queries']], infersent.encode, embeddings, tokenize=True).argsort(axis=0))
# for ans in range(3):
# print(sortargs[ans, 0])
# responses.append(condition_terms[sortargs[ans,0]])
# print('ANS: ', condition_terms[sortargs[ans,0]])
# # answer = condition_terms[cosine_sim_results([ii[1]['Email Queries']], infersent.encode, embeddings, tokenize=True).argmax()]
# # print('ANS: ', answer)
# # responses.append(answer)
# # print('\n')
# df_comparisons['infersent']=responses
# -
# # Exp 2: Google universal sentence encoder
# # !pip install tensorflow-gpu
# # !pip install tensorflow-hub
import tensorflow as tf
import tensorflow_hub as hub
# +
#download the model to local so it can be used again and again
# # !mkdir google_use
# Download the module, and uncompress it to the destination folder.
# # !curl -L "https://tfhub.dev/google/universal-sentence-encoder-large/3?tf-hub-format=compressed" | tar -zxvC ./google_use
# -
embed = hub.Module("../google_use")
# +
def use_embed(terms):
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
message_embeddings = session.run(embed(terms))
return message_embeddings
# to only load session once.
# def embed_useT(module):
# with tf.Graph().as_default():
# sentences = tf.placeholder(tf.string)
# embed = hub.Module(module)
# embeddings = embed(sentences)
# session = tf.train.MonitoredSession()
# return lambda x: session.run(embeddings, {sentences: x})
# -
response_results = use_embed(condition_terms)
all_questions = df_queries['Email Queries']
questions=list(all_questions)
question_results = use_embed(questions)
print(response_results.shape, question_results.shape)
# +
# responses=[]
# for ii in df_queries.iterrows():
# print('QN: ', ii[1]['Email Queries'])
# answer = condition_terms[cosine_sim_results([ii[1]['Email Queries']], use_embed, message_embeddings).argmax()]
# print('ANS: ', answer)
# responses.append(answer)
# print('\n')
# df_comparisons['use']=responses
# +
responses=[]
for index, ii in enumerate(df_queries.iterrows()):
print('QN: ', ii[1]['Email Queries'])
sortargs=np.flip(cosine_sim_results(question_results[index].reshape(1, -1), dummy_embed_fn, response_results).argsort(axis=0))
for ans in range(3):
responses.append(condition_terms[sortargs[ans,0]])
print('ANS: ', condition_terms[sortargs[ans,0]])
df_comparisons['use']=responses
# +
# responses=[]
# for ii in df_queries.iterrows():
# print('QN: ', ii[1]['Email Queries'])
# sortargs=np.flip(cosine_sim_results([ii[1]['Email Queries']], use_embed, message_embeddings).argsort(axis=0))
# for ans in range(3):
# responses.append(condition_terms[sortargs[ans,0]])
# print('ANS: ', condition_terms[sortargs[ans,0]])
# df_comparisons['use']=responses
# -
# # Exp 3: Test the new QnA USE
# # !pip install sentencepiece
# !pip install tf-sentencepiece
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
import tf_sentencepiece
def init_model():
# Set up graph.
g = tf.Graph()
with g.as_default():
embed = hub.Module("https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/1")
# put placeholders
question = tf.placeholder(dtype=tf.string, shape=[None]) # question
response = tf.placeholder(dtype=tf.string, shape=[None]) # response
response_context = tf.placeholder(dtype=tf.string, shape=[None]) # response context
question_embeddings = embed(
dict(input=question),
signature="question_encoder", as_dict=True)
response_embeddings = embed(
dict(input=response,
context=response_context),
signature="response_encoder", as_dict=True)
init_op = tf.group([tf.global_variables_initializer(), tf.tables_initializer()])
g.finalize()
# Initialize session.
session = tf.Session(graph=g, config=tf.ConfigProto(log_device_placement=True))
session.run(init_op)
return session, question_embeddings, response_embeddings, question, response, response_context
# define inputs
responses = condition_terms
response_contexts = responses # no need to provide context
all_questions = df_queries['Email Queries']
questions=list(all_questions)
session, question_embeddings, response_embeddings, question, response, response_context = init_model(questions, responses, response_contexts)
# Compute embeddings.
response_results = session.run(response_embeddings)
question_results = session.run(question_embeddings)
session.close()
response_results['outputs'].shape
# +
# responses = []
# for index, ii in enumerate(df_queries.iterrows()):
# print('QN: ', ii[1]['Email Queries'])
# answer=condition_terms[cosine_sim_results(question_results['outputs'][index].reshape(1, -1), dummy_embed_fn, response_results['outputs']).argmax()]
# print('ANS: ', answer)
# responses.append(answer)
# print('\n')
# df_comparisons['use_qa']=responses
# -
responses=[]
for index, ii in enumerate(df_queries.iterrows()):
print('QN: ', ii[1]['Email Queries'])
sortargs=np.flip(cosine_sim_results(question_results['outputs'][index].reshape(1, -1), dummy_embed_fn, response_results['outputs']).argsort(axis=0))
for ans in range(3):
responses.append(condition_terms[sortargs[ans,0]])
print('ANS: ', condition_terms[sortargs[ans,0]])
df_comparisons['use_qa']=responses
# # Save to csv
df_comparisons.to_csv('../predictions_3_extend_batch2.csv', index=False)
# # Test AIAP qna
# 7/10
from src.model import QnaEncoderModel
model=QnaEncoderModel()
aiap_qa, aiap_questions = split_txt(read_txt('../data/aiap.txt'), qa=True)
# aiap_qa = split_txt(read_txt('./data/aiap.txt'))
print(aiap_qa[0], aiap_questions[0])
answer_array = model.predict(aiap_qa)
# +
# return aiap_qa[(lambda x: cosine_similarity(answer_array, model.predict(x, type='query')))([question]).argmax()]
# aiap_qa[np.flip(cosine_sim_results(question_results['outputs'][index].reshape(1, -1), dummy_embed_fn, response_results['outputs']).argsort(axis=0))[:2]]
# +
aiap_qna_quickscore(aiap_questions, answer_array, aiap_qa, model, 1)
# -
for qn in aiap_context:
print(qn, aiap_qna(qn, answer_arraym, aiap_qa, model, 1), '\n')
# ## USE model
# 2/10
from src.model import USEModel
model=USEModel()
aiap_qa, aiap_context = split_txt(read_txt('../data/aiap.txt'), qa=True)
answer_array = model.predict(aiap_qa)
def aiap_qna(question):
return aiap_qa[(lambda x: cosine_similarity(answer_array, model.predict(x)))([question]).argmax()]
for qn in aiap_context:
print(qn, aiap_qna(qn), '\n')
# ## Infersent
# build vocab on context only: 1/10
from src.model import InferSent
model=InferSent()
aiap_qa, aiap_context = split_txt(read_txt('../data/aiap.txt'), qa=True)
model.build_vocab(aiap_context)
answer_array = model.predict(aiap_qa)
def aiap_qna(question):
return aiap_qa[(lambda x: cosine_similarity(answer_array, model.predict(x)))([question]).argmax()]
for qn in aiap_context:
print(qn, aiap_qna(qn), '\n')
|
notebooks/Exploratory_notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Jr4cugH3mZ_X" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="8ec2bb83-d87e-4bad-b7c5-0d433e185f63"
# LSTM for sequence classification in the IMDB dataset
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
# fix random seed for reproducibility
numpy.random.seed(7)
# + id="Kjf2yM5GF4_8" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="7ea1f028-9d40-411e-bc9c-78b0ecb19cb6"
#Refer: https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification
# load the dataset but only keep the top n words, zero the rest
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words)
# + id="eYEE6ts7GAjC" colab={"base_uri": "https://localhost:8080/", "height": 89} outputId="e28e2718-8b44-4478-d8b8-88c0189fdde8"
print(X_train[1])
print(type(X_train[1]))
print(len(X_train[1]))
# + id="57N6TyKLH-Pc" colab={"base_uri": "https://localhost:8080/", "height": 780} outputId="1dc02095-292e-4c2d-b0b0-9c4b25012ce5"
# truncate and/or pad input sequences
max_review_length = 600
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
print(X_train.shape)
print(X_train[1])
# + id="CquzlqrOIYGn" colab={"base_uri": "https://localhost:8080/", "height": 260} outputId="593c99ca-f15e-4df7-8ac6-0e7d0950415d"
# create the model
embedding_vecor_length = 32
model = Sequential()
model.add(Embedding(top_words+1, embedding_vecor_length, input_length=max_review_length))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
#Refer: https://datascience.stackexchange.com/questions/10615/number-of-parameters-in-an-lstm-model
# + id="A_FT0dPNIeLP" colab={"base_uri": "https://localhost:8080/", "height": 436} outputId="55c48adc-c76a-462f-c737-7d73bafdb5cb"
model.fit(X_train, y_train, nb_epoch=10, batch_size=64)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
|
LSTM_IMDB.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Here we go!
#
# * What are the data types in Python?
# * What is a variable and rules to keep in mind while defining a variable?
# * Type of errors in Python?
# * Booleans and Logical Operators
# * Conditional statement in form of if elif and else In python?
# Building blocks of programs:
# input
# output
# sequential execurtion
# conditional execution
# repeated execution
# reuse
#1. A Data Types
String
Integers
Float
#Strings : Immutable objects,
best_cricketer = '<NAME>'
type(best_cricketer)
devine_number = '9'
type(devine_number)
#1. B Floats: integers consisting decimal points - mathematical computation are handled
going_great = 90.56
type(going_great)
#1. C Integers: are the whole numbers - mathematical computation is handled here as well
integers = 55
type(integers)
# +
# 2 - What is a variable and rules to keep in mind while defining a variable?
# -
# should not start with number
saiioo = 'SENAP'
sai = 8
SAI = 8
SAI = 65
# should not have an illegal character_
_country_nameeeeee_ejtidjk = 'Australia'
# should not be python keyword
del = 45.98
# djsk
'''ksldjfjdl
fjdlfmdmfld
fjdfdk'''
# Operators (+,-,/,*,and **) PEMDAS
# var = 18 / 6
# print(type(var))
# commenting
var = 18 // 6
print(type(var))
# +
# code block commenting
# in line commenting
# +
# commenting -
# -
#3 Types of errors in Python?
a - Compilation errors
1 - Syntax error
2 - Name error
3 - Value error
b - Runtime errors
4 - zero division error
# runtime errors are more dangerous as we have to look into our data for figuring out the mistake.
#3.1 Syntax error - Code that does not work with the current programing language
ankush ==1 1
#3.2 Name error - Code that tries to use something that does not exist.
dele(1)
#3.3 Value error - There's something wrong with the way your formula is typed
purchase_price = int(input())
# 3.4 Zero division Error.
times_pro = 4 / 0
# +
#
# -
#4 Booleans and Logical Operators
# Boolean operators check for the combination of multiple relational operators.
cat=4
dog=2
mamals=(cat==dog) #False because x and y are not the same
mamals=(cat==cat) #True because x and y are the same
#Comparison is by value
hi = 123456789
HI = 123456789
print(id(HI),id(hi)) #Different strings, as the space occupied by both of them is dirrenent.
# id - is the function that returns the location of a value in memory
print(HI == hi)
#As both of them are having the same value, hence the result is coming as true
# Logical Operators -> Checks if things are true
cow_has_how_many_legs=4
Numers_of_wings_birds_have=2
print(cow_has_how_many_legs>1 and Numers_of_wings_birds_have>1)
#True because both are greater than 1
cow_has_how_many_legs=4
Numers_of_wings_birds_have=2
print(cow_has_how_many_legs<-60 and Numers_of_wings_birds_have<-35)
#False because both are False
#5. if elif and else In python?
bahubali = float(input("No. of actors in the movie: "))
if bahubali > 15:
print('Ohh, is it!')
elif bahubali < 15:
print ('I do not think so')
else:
print ('bingoo!!!')
# +
# if statement: A conditinoal control structure that runs a block of code only if a certain condition is true.
#Elif statement: is like if statement that are always followd by a condition.
# Else: A conditional control structure that runs a block of code if all preceding if and elif statements
# have been false.
|
25_Aug_2018/Python_Basics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Results for PCA+LSTM
#
#
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import LSTM
from keras.models import Sequential, model_from_json
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
import warnings
import numpy as np
from collections import OrderedDict
import os
from lob_data_utils import lob, db_result, gdf_pca, model, stocks_numbers
from lob_data_utils.svm_calculation import lob_svm
sns.set_style('whitegrid')
warnings.filterwarnings('ignore')
# -
data_dir = 'res_gru/'
if_should_savefig = False # TODO
df_res = pd.DataFrame()
for f in os.listdir(data_dir):
d = pd.read_csv(os.path.join(data_dir, f))
d['filename'] = [f for i in range(len(d))]
number_of_units = []
for i, row in d.iterrows():
unit = row['unit'].split(':')[0][1:]
number_of_units.append(unit)
d['number_of_units'] = number_of_units
df_res = df_res.append(d)
df_res['diff'] = df_res['train_matthews'] - df_res['matthews']
df_log = pd.read_csv('res_log_que.csv')
columns = ['matthews', 'test_matthews', 'stock', 'unit']
df_best = df_res.sort_values(by='matthews', ascending=False).groupby(['stock']).head(1)
df_best = pd.merge(df_best, df_log, on='stock', suffixes=['_lstm', '_log'])
df_best.index = df_best['stock']
df_best[['r', 's', 'unit', 'n_steps', 'kernel_reg', 'train_matthews_lstm',
'matthews_lstm', 'test_matthews_lstm', 'test_matthews_log', 'stock', 'filename']]
df_res.sort_values(by='test_matthews', ascending=False).groupby(
'stock').head(1)[['matthews', 'stock', 'test_matthews', 'r', 's', 'number_of_units', 'kernel_reg']]
fig, ax = plt.subplots(1, 5, figsize=(16, 4), sharey=True)
i = 0
for r, s in stocks_numbers.rs_params:
d = df_res[df_res['r'] == r]
d = d[d['s'] == s]
d = d[d['stock'] == 2748]
# ax[i].axhline(d['test_matthews_log'].values[0])
ax[i].scatter(d['number_of_units'], d['matthews'], d['n_steps'])
ax[i].scatter(d['number_of_units'], d['test_matthews'], d['n_steps'])
ax[i].scatter(d['number_of_units'], d['train_matthews'], d['n_steps'])
ax[i].set_title(f'{r} {s}')
i += 1
df_best[['r', 's', 'train_roc_auc_lstm',
'roc_auc_lstm', 'test_roc_auc_lstm', 'test_roc_auc_log', 'stock', 'filename']]
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 4))
df_best[['train_matthews_lstm', 'matthews_lstm', 'test_matthews_lstm']].plot(kind='bar', ax=ax1)
ax1.legend(['Train', 'Validation', 'Test'])
ax1.set_title('MCC score for GDF+PCA+LSTM')
df_best[['train_roc_auc_lstm', 'roc_auc_lstm', 'test_roc_auc_lstm']].plot(kind='bar', ax=ax2)
ax2.legend(['Train', 'Validation', 'Test'])
ax2.set_ylim(0.5, 0.7)
ax2.set_title('ROC area score for GDF+PCA+LSTM')
plt.tight_layout()
if if_should_savefig:
plt.savefig('gdf_pca_lstm_mcc_roc_scores_bar.png')
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 4))
df_best[['test_matthews_lstm', 'test_matthews_log']].plot(kind='bar', ax=ax1)
ax1.legend(['GDF+PCA+LSTM', 'QUE+LOG'])
ax1.set_title('MCC score for GDF+PCA+LSTM vs QUE+LOG on test data-set')
df_best[['test_roc_auc_lstm', 'test_roc_auc_log']].plot(kind='bar', ax=ax2)
ax2.legend(['GDF+PCA+LSTM', 'QUE+LOG'])
ax2.set_ylim(0.5, 0.7)
ax2.set_title('ROC area score for GDF+PCA+LSTM vs QUE+LOG on test data-set')
plt.tight_layout()
if if_should_savefig:
plt.savefig('gdf_pca_lstm_que_log_mcc_roc_scores_bar.png')
# +
df_best[['train_matthews_lstm', 'matthews_lstm', 'test_matthews_lstm', 'test_matthews_log']].plot(kind='bar', figsize=(16, 4))
plt.legend(['Train', 'Validation', 'Test', 'QUE+LOG Test'])
df_best[['train_roc_auc_lstm', 'roc_auc_lstm', 'test_roc_auc_lstm', 'test_roc_auc_log']].plot(kind='bar', figsize=(16, 4))
plt.legend(['Train', 'Validation', 'Test', 'QUE+LOG Test'])
plt.ylim(0.5, 0.7)
plt.tight_layout()
# -
print(df_best[['train_matthews_lstm', 'matthews_lstm', 'test_matthews_lstm',
'train_roc_auc_lstm', 'roc_auc_lstm', 'test_roc_auc_lstm']].describe().to_latex())
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 4))
sns.distplot(df_best['train_matthews_lstm'], label='Train', ax=ax1)
sns.distplot(df_best['matthews_lstm'], label='Validation', ax=ax1)
sns.distplot(df_best['test_matthews_lstm'], label='Test', ax=ax1)
ax1.legend(['Train', 'Validation', 'Test'])
ax1.set_title('MCC score distribution for GDF+PCA+LSTM')
ax1.set_xlabel('MCC score')
sns.distplot(df_best['train_roc_auc_lstm'], label='Train', ax=ax2)
sns.distplot(df_best['roc_auc_lstm'], label='Validation', ax=ax2)
sns.distplot(df_best['test_roc_auc_lstm'], label='Test', ax=ax2)
ax2.legend(['Train', 'Validation', 'Test'])
ax2.set_title('ROC area score distribution for GDF+PCA+LSTM')
ax2.set_xlabel('ROC area score')
plt.tight_layout()
if if_should_savefig:
plt.savefig('gdf_pca_lstm_score_dist.png')
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
columns = ['stock', 'matthews_lstm', 'roc_auc_lstm',
'test_matthews_lstm', 'test_roc_auc_lstm', 'train_matthews_lstm', 'train_roc_auc_lstm']
df = df_best[columns].copy()
df.rename(columns={
'matthews_lstm': 'Validation', 'test_matthews_lstm': 'Testing', 'train_matthews_lstm': 'Train'}, inplace=True)
df = df.melt(['stock', 'roc_auc_lstm', 'test_roc_auc_lstm', 'train_roc_auc_lstm'])
sns.violinplot(x="variable", y="value", data=df, ax=ax1)
ax1.set_title('Distribution of MCC scores')
ax1.set_xlabel('Data Set')
ax1.set_ylabel('Score')
df = df_best[columns].copy()
df.rename(columns={'roc_auc_lstm': 'Validation', 'test_roc_auc_lstm': 'Testing', 'train_roc_auc_lstm': 'Train'}, inplace=True)
df = df.melt(['stock', 'matthews_lstm', 'test_matthews_lstm', 'train_matthews_lstm'])
ax2.set_title('Distribution of ROC Area scores')
sns.violinplot(x="variable", y="value", data=df, ax=ax2)
ax2.set_xlabel('Data Set')
ax2.set_ylabel('Score')
plt.tight_layout()
if if_should_savefig:
plt.savefig('violin_distribution_scores_gdf_pca_lstm.png')
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 4))
df_best['pca_components'].plot(kind='bar', color=['b'], alpha=0.5, ax=ax1)
ax1.set_title('Number of PCA components')
ax1.set_ylabel('PCA components')
sns.distplot(df_best['pca_components'], ax=ax2, bins=10, kde=False)
ax2.set_title('PCA components histogram')
ax2.set_ylabel('Count')
ax2.set_xlabel('PCA component')
plt.tight_layout()
if if_should_savefig:
plt.savefig('gdf_pca_lstm_pca_components.png')
# -
plt.scatter(x=df_best['r'], y=df_best['s'], c=df_best['pca_components'])
plt.title('Number of PCA components')
plt.ylabel('PCA components')
plt.legend()
plt.tight_layout()
if if_should_savefig:
plt.savefig('gdf_pca_lstm_pca_components.png')
# +
r_s_dict = OrderedDict()
r_parameters = [0.01, 0.1]
s_parameters = [0.1, 0.5]
for r in r_parameters:
for s in s_parameters:
r_s_dict['r={}, s={}'.format(r, s)] = df_best[df_best['r'] == r][df_best['s'] == s][
'matthews_lstm'].values
plt.figure(figsize=(16, 8))
ax = sns.boxplot(data=list(r_s_dict.values()))
plt.ylabel('MCC score')
plt.xlabel('Parameters r and s')
_ = ax.set_xticklabels(list(r_s_dict.keys()), rotation=45)
plt.title('MCC score distribution for different r and s parameters for validation set')
# -
# ## Comparision with QUE+LOG
df_best['diff_test_matthews'] = df_best['test_matthews_lstm'] - df_best['test_matthews_log']
df_best['diff_train_matthews'] = df_best['train_matthews_lstm'] - df_best['train_matthews_log']
df_best['diff_matthews'] = df_best['matthews_lstm'] - df_best['matthews_log']
df_best['diff_test_roc_auc'] = df_best['test_roc_auc_lstm'] - df_best['test_roc_auc_log']
df_best['diff_train_roc_auc'] = df_best['train_roc_auc_lstm'] - df_best['train_roc_auc_log']
df_best['diff_roc_auc'] = df_best['roc_auc_lstm'] - df_best['roc_auc_log']
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 6))
sns.distplot(df_best['diff_train_matthews'], label='Train', ax=ax1)
sns.distplot(df_best['diff_matthews'], label='Validation', ax=ax1)
sns.distplot(df_best['diff_test_matthews'], label='Test', ax=ax1)
ax1.set_title('Dist. plot of differences of MCC score for GDF+PCA+LSTM and QUE+LOG')
ax1.set_xlabel('MCC score')
ax1.legend(['Train', 'Validation', 'Test'])
sns.distplot(df_best['diff_train_roc_auc'], label='Train', ax=ax2)
sns.distplot(df_best['diff_roc_auc'], label='Validation', ax=ax2)
sns.distplot(df_best['diff_test_roc_auc'], label='Test', ax=ax2)
ax2.set_title('Dist.plot of differences of ROC area score for GDF+PCA+LSTM and QUE+LOG')
ax2.legend(['Train', 'Validation', 'Test'])
ax2.set_xlabel('ROC area score')
plt.tight_layout()
if if_should_savefig:
plt.savefig('gdf_pca_lstm_and_que_log_score_diff.png')
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
columns = ['stock', 'diff_matthews', 'diff_roc_auc',
'diff_test_matthews', 'diff_test_roc_auc', 'diff_train_matthews', 'diff_train_roc_auc']
df = df_best[columns].copy()
df.rename(columns={
'diff_matthews': 'Validation', 'diff_test_matthews': 'Testing', 'diff_train_matthews': 'Train'}, inplace=True)
df = df.melt(['stock', 'diff_roc_auc', 'diff_test_roc_auc', 'diff_train_roc_auc'])
sns.violinplot(x="variable", y="value", data=df, ax=ax1)
ax1.set_title('Distribution of differences of MCC scores')
ax1.set_xlabel('Data Set')
ax1.set_ylabel('Score')
df = df_best[columns].copy()
df.rename(
columns={'diff_roc_auc': 'Validation', 'diff_test_roc_auc': 'Testing', 'diff_train_roc_auc': 'Train'},
inplace=True)
df = df.melt(['stock', 'diff_matthews', 'diff_test_matthews', 'diff_train_matthews'])
ax2.set_title('Distribution of differences of ROC Area scores')
sns.violinplot(x="variable", y="value", data=df, ax=ax2)
ax2.set_xlabel('Data Set')
ax2.set_ylabel('Score')
plt.tight_layout()
if if_should_savefig:
plt.savefig('gdf_pca_lstm_and_que_log_violin_score_diff.png')
# -
bad = df_best[df_best['test_matthews_lstm'] < df_best['test_matthews_log']]['stock'].values
df_best[['diff_train_matthews', 'diff_matthews', 'diff_test_matthews',
'diff_train_roc_auc', 'diff_roc_auc', 'diff_test_roc_auc']][df_best['stock'].isin(bad)]
df_best[['diff_train_matthews', 'diff_matthews', 'diff_test_matthews',
'diff_train_roc_auc', 'diff_roc_auc', 'diff_test_roc_auc']][df_best['stock'].isin(bad)].describe()
df_best[['diff_train_matthews', 'diff_matthews', 'diff_test_matthews',
'diff_train_roc_auc', 'diff_roc_auc', 'diff_test_roc_auc']].describe()
print(df_best[['diff_train_matthews', 'diff_matthews', 'diff_test_matthews',
'diff_train_roc_auc', 'diff_roc_auc', 'diff_test_roc_auc']].describe().to_latex())
print(df_best[df_best['test_roc_auc_lstm'] < df_best['test_roc_auc_log']]['stock'].values)
print(list(df_best[df_best['test_matthews_lstm'] < df_best['test_matthews_log']]['stock'].values))
# +
columns = ['stock'] + [c for c in df_best.columns if 'matthews' in c]
df_best[columns + ['arch']]
for i, row in df_best.iterrows():
m = model_from_json(row['arch'])
from keras.utils import plot_model
st = row['stock']
r = row['r']
s = row['s']
if if_should_savefig:
plot_model(m, show_layer_names=True, show_shapes=True, to_file=f'plot_model/model_{st}_r{r}_s{s}.png')
# -
df_best[['r', 's', 'matthews_lstm', 'test_matthews_lstm', 'test_matthews_log', 'stock', 'filename']]
|
gdf_pca/pca_gru_overview.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Otimização
# ## Introdução
#
# Problemas de otimização (POs) são encontrados em diversas situações da Engenharia, em particular na Engenharia de Produção. Em uma linha de produção, por exemplo, a otimização de custos com logística, recursos humanos, matéria-prima são exemplos de onde podemos empregar métodos computacionais para obter soluções _ótimas_. Entretanto, princípios de otimização são a base de muitos algoritmos e aplicações de inteligência artificial, em particular, no aprendizado de máquina. Máquinas de vetor de suporte (_support vector machines_) são um exemplo de onde se usa otimização, já que podem ser formuladas como problemas convexos quadráticos.
#
# Problemas de otimização são comumente tratados como *problemas de minimização*, onde se busca o _mínimo global_ de uma _função objetivo_ (FO) escalar $f(x)$, visto que maximizar $f(x)$ é equivalente a minimizar $-f(x)$.
#
# Entretanto, esses problemas são acompanhados de _restrições_, que podem ser representadas por uma igualdade ou por uma desigualdade. Quando uma restrição é escrita na forma $g(x) = 0$, dizemos que $g(x)$ é uma _restrição de igualdade_; quando escrita na forma $h(x) \leq 0$, dizemos que $h(x)$ é uma _restrição de desigualdade_.
#
# Neste capítulo, faremos uma breve explanação sobre otimização tomando o cálculo de derivadas e pontos críticos como elementos fundamentais. Utilizaremos recursos de computação simbólica para resolver um problema unidimensional e revisitaremos conceitos aprendidos nas disciplinas de Cálculo.
# ### Classificação de problemas de otimização
#
# Problemas de otimização (PO) são classificados com base nas propriedades das funções $f(x)$, $g(x)$ e $h(x)$. Em linhas gerais, um PO pode ser:
#
# - _univariado_ (ou _unidimensional_), se $x$ é escalar, i.e. $x \in \mathbb{R}$;
# - _multivariado_ (ou _multidimensional_), se $x$ é um vetor, i.e. $x \in \mathbb{R}^n$.
# - _linear_: se a FO e as restrições são funções lineares. Neste caso, por razões históricas, diz-se que o problema é de _programação linear_.
# - _não-linear_: se a FO e as restrições são funções não-lineares. Neste caso, diz-se que o problema é de _programação não-linear_.
#
# Com respeito às restrições, um PO pode ainda ser:
#
# - _irrestrito_: quando não se assumem limites para os valores de $x$.
# - _restrito_: quando limites para os valores de $x$ são impostos.
#
# Aqui trataremos apenas de casos em que $x \in \mathbb{R}$.
# ### Problemas convexos
#
# Sabe-se que problemas não-lineares são muito mais difíceis de resolver do que problemas lineares porque eles podem admitir uma ampla variedade de comportamentos. Um PO não-linear pode ter tanto _mínimos locais_ quanto _mínimos globais_. Logo, encontrar o _mínimo global_ de uma função $f(x)$ não-linear exige técnicas aperfeiçoadas.
#
# Neste sentido, uma subclasse de problemas não-lineares que pode ser resolvida eficientemente são os chamados _convexos_. Em problemas convexos, a função $f(x)$ é _convexa_. Mas o que é uma _função convexa_?
#
# Uma função convexa definida em um intervalo $[a,b]$ é aquela em que todos os seus valores estão abaixo da reta secante que passa pelos pontos $(a,f(a))$ e $(b,f(b)$. Isto, por sua vez, garante que ela contenha _somente_ um mínimo global.
# Importaremos os seguintes módulos:
import numpy as np
import matplotlib.pyplot as plt
import sympy as sy
sy.init_printing()
# **Exemplo**: a função $f(x) = 3x^2 - 0.36x - 11.2$ é convexa em $[-2,3]$.
# +
# domínio
a,b = -2,3
x = np.linspace(a,b,100)
# função e valores nos extremos
f = lambda x: 5*x**2 - 10.36*x - 11.2
fa,fb = f(a),f(b)
# reta secante
s = fa + (fb - fa)/(b - a)*(x - a)
# ponto de mínimo: -b/(2a)
xmin = 10.36/10
# plotagem de funções
plt.figure(figsize=(5,3))
plt.plot(x,f(x))
plt.plot(x,s,color='#ffa500')
# pontos da secante
plt.plot(a,f(a),'o',color='#ffa500')
plt.plot(b,f(b),'o',color='#ffa500')
# ponto de mínimo
plt.plot(xmin,f(xmin),'*r',ms=10);
plt.title('Exemplo de função convexa');
# -
# **Exemplo**: a função $p(x) = 10x^2\textrm{sen}(6x) - 10.36\exp(x/8) - 11.2$ não é convexa em $[-2,3]$.
# +
# função
p = lambda x: 10*x**2*np.sin(6*x) - 10.36*x*np.exp(x/8) - 11.2
# extremos
pa,pb = p(a),p(b)
# secante
t = pa + (pb - pa)/(b - a)*(x - a)
# plotagem de funções
plt.figure(figsize=(5,3))
plt.plot(x,p(x))
plt.plot(x,t,color='#ffa500')
# pontos da secante
plt.plot(a,p(a),'o',color='#ffa500')
plt.plot(b,p(b),'o',color='#ffa500')
# mínimos locais
xloc = [-1.33868618,0.88811853,1.87451904]
for xl in xloc:
plt.plot(xl,p(xl),'or');
# mínimo global
xmin2 = 2.90547127
plt.plot(xmin2,p(xmin2),'*r',ms=10);
plt.title('Exemplo de função não convexa');
# -
# Como vemos acima, a função $p(x)$ admite 3 mínimos locais e um mínimo global. Pense um pouco sobre que estratégia computacional você utilizaria para encontrar os mínimos locais. Mais adiante mostraremos como localizar o mínimo global para funções univariadas contínuas (quando possível).
# ### Pontos de sela
#
# Como vimos acima, a convexidade de uma função é uma propriedade muito importante para que um mínimo global seja localizado. Como sabemos do Cálculo, pontos de máximo ou mínimo identificam-se como _pontos críticos_ de uma função nos quais a primeira derivada da função se anula.
#
# Casos particulares onde a derivada de uma FO anula-se mas o ponto não pode ser definido como de mínimo ou máximo podem ocorrer. Tais situações implicam a existência dos chamados _pontos de sela_. Uma função com um único ponto de sela, por exemplo, não admitirá mínimo global nem mínimo local. Para testarmos se um ponto crítico é um ponto de sela, devemos verificar o sinal da segunda derivada da função. Uma das seguintes situações deve ser obtida em um ponto crítico $x^*$:
#
# - _ponto de mínimo:_ $f''(x^*) > 0$
# - _ponto de máximo:_ $f''(x^*) < 0$
# - _ponto de sela:_ $f''(x^*) = 0$
# **Exemplo:** qualquer função quadrática admite ou um ponto de mínimo ou de máximo. A função $f(x) = x^3$ possui um ponto de sela em $x^* = 0$.
# +
x = np.linspace(-1,1)
plt.figure(figsize=(10,3))
plt.subplot(131)
plt.plot(x,x**2 + 1)
plt.plot(0,1,'r*',ms=10)
plt.title('mínimo global')
plt.subplot(132)
plt.plot(x,-x**2 + 1)
plt.plot(0,1,'r*',ms=10)
plt.title('máximo global')
plt.subplot(133)
plt.plot(x,x**3 + 1)
plt.plot(0,1,'r*',ms=10)
plt.title('ponto de sela');
# -
# ## Otimização univariada
# Como dissemos anteriormente, a otimização univariada visa resolver um problema de minimização tomando uma FO que depende apenas de uma variável. Matematicamente, podemos descrever este problema da seguinte forma:
#
# $$\text{Encontre } x^{*} = \min f(x), \, \text{sujeito a} \, g(x) = 0, h(x) \leq 0.$$
#
# Em geral, $x$ é uma _variável de decisão_, isto é, uma quantidade que pode ser ajustada livremente (ex. comprimentos, áreas, ângulos etc.).
#
# As técnicas utilizadas para a resolução de um problema desse tipo são baseadas em métodos analíticos (busca pelos zeros das derivadas) ou em métodos computacionais (determinação de raízes por processos iterativos). Métodos chamados de _root finding_ são estudados em um curso introdutório de Métodos Numéricos.
#
# Para exemplificar, usaremos uma abordagem analítica por meio de computação simbólica (módulo `sympy`) para resolver um problema que pode ser exibido como de otimização univariada.
# ### Problema resolvido
#
# Consideremos o seguinte problema: _maximizar a área do retângulo inscrito em uma elipse._
# ### Resolução
#
# Em primeiro lugar, escreveremos este problema em linguagem matemática. Sabemos que a área de um retângulo com vértice esquerdo inferior na origem da elipse e com vértice direito superior no ponto $(x,y)$ da elipse que está no primeiro quadrante é dada por $A_r = xy$. Logo, a área do retângulo inscrito na elipse será $A = 4xy$.
#
# A área $A$ pode ser escrita em termos de $y$. Uma vez que a equação da elipse (centrada na origem) é dada por
#
# $$\frac{x^2}{a^2} + \frac{y^2}{b^2} = 1,$$
#
# podemos resolver a equação da elipse para $x$ (ou $y$) e substituir esta solução na expressão da área para ter uma função $A(x)$ (ou $A(y)$). Se escolhermos $x$, o problema de otimização pode ser escrito como:
#
# $$\text{Encontre } x^{*} = \min \,( -A(x) ), \, \text{sujeito a} \, x > 0.$$
#
# Notemos que maximizar $A(x)$ equivale a minimizar $-A(x)$.
#
# Na busca do ponto de mínimo $x^{*}$, usaremos computação simbólica.
# Primeiramente, criamos variáveis simbólicas que representem as variáveis de interesse do problema e a expressão da área total.
# +
# cria variáveis simbólicas
x,y,a,b = sy.symbols('x,y,a,b')
# área do retângulo no 1o. quadrante é xy
# logo, área total é 4xy
A = -4*x*y
A
# -
# Em seguida, resolvemos a equação da elipse para a variável $y$ utilizando a função `sympy.solve`.
# resolve equação da elipse para y
sol = sy.solve(x**2/a**2 + y**2/b**2 - 1,y)
sol[0],sol[1]
# Duas soluções são possíveis para $y$. Porém, como o nosso ponto de referência sobre a elipse está no primeiro quadrante, tomamos a expressão para $y > 0$ e a substituímos na expressão da área de forma a obter uma expressão univariada $A(x)$.
# substitui expressão de y positivo em A para ter -A(x)
A = A.subs({'y':sol[1]})
A
# Localizaremos o ponto crítico da função a partir da derivada $A'(x)$. Derivando $A$ em relação a $x$, obtemos:
# deriva -A(x) com a,b constantes
dAdx = A.diff(x)
dAdx
# Em seguida, buscamos $x^{*}$ tal que $A'(x^{*}) = \frac{dA}{dx}(x^{*}) = 0$.
# resolve A'(x*) = 0
sol_x = sy.solve(dAdx,x)
sol_x
# Duas soluções, são possíveis, porém, podemos verificar qual ponto de crítico, de fato, é o que minimizará $-A(x)$ através da análise da concavidade. Então, calculamos $A''(x)$, para cada ponto crítico.
# testa A''(x) para os dois pontos
dAdx2 = dAdx.diff(x)
dAdx2.subs(x,sol_x[0]).simplify(),dAdx2.subs(x,sol_x[1]).simplify()
# Uma vez que a segunda solução verifica a concavidade positiva, temos que o ponto crítico $x^{*}$ é:
# concavidade para cima => ponto de mínimo
xs = sol_x[1]
xs
# Usando este valor na equação da elipse, obtemos a ordenada correspondente:
# resolve para y > 0
ys = sy.solve(xs**2/a**2 + y**2/b**2 - 1,y)[1]
ys
# Por fim, substituindo $x^{*}$ na expressão da área, temos que $A_{max}$ é:
# área máxima
A_max = A.subs(x,xs)
A_max
# ou, de forma, simplificada,
# simplificando
A_max.simplify()
# ### Conclusão
#
# A área do retângulo inscrito na elipse será máxima quando $x = \frac{\sqrt{2}}{2}a$ e $y = \frac{\sqrt{2}}{2}b$. Portanto, $A_{max} = 2ab$, para comprimentos $a$ e $b$ de semi-eixo maior e menor.
# ## Estudo paramétrico de geometria
#
# No gráfico abaixo, plotamos a variação das áreas de retângulos inscritos em uma elipse arbitrária com semi-eixos $a$ e $b$ em função do comprimento $x$ da meia-base do retângulo até o limite da meia-base do retângulo de área máxima. Adicionalmente, plotamos a variação do comprimento da diagonal do retângulo. A constante $A_{elip}$ é a área da elipse.
#
# Você pode alterar os parâmetros de construção de elipse, o número de valores para $x$ e realizar uma nova análise dos parâmetros.
# +
# semi-eixos da elipse
a,b = 10,2
# no. de retângulos inscritos
nx = 40
# base variável do retângulo
X = np.linspace(0,np.sqrt(2)/2*a,nx)
# área da elipse
e = np.pi*a*b
# áreas dos retângulos
R = []
H = []
for x in X:
y = b*np.sqrt(1 - x**2/a**2)
r = 4*x*y
h = np.hypot(2*x,2*y) # diagonal do retângulo
R.append(r)
H.append(h)
# plotagem
fig,ax1 = plt.subplots(figsize=(6,4))
ax1.plot(X,R,'sb',mec='w',alpha=0.8,label='$A_{ret}(x)$')
ax1.plot(X,np.full(X.shape,2*a*b),'--r',alpha=0.8,label='$A_{max}$')
ax1.plot(X,np.full(X.shape,e),'-',alpha=0.8,label='$A_{elip}$')
ax1.legend(fontsize=10)
# labels
plt.xlabel('$x$ [compr. base ret. inscrito]')
plt.ylabel('$A$ [áreas]');
ax2 = ax1.twinx()
ax2.plot(X,H,'og',mec='w',alpha=0.8,label='$h_{ret}(x)$')
ax2.legend(loc=5,ncol=1,fontsize=10)
plt.ylabel('$h$ [compr. diag ret.]');
plt.suptitle('Variação de áreas e diagonais: elipse x retângulo inscrito\n');
plt.title(f'Elipse: $x^2/({a:.1f})^2 + y^2/({b:.1f})^2 = 1$',fontsize=10);
|
_build/jupyter_execute/ipynb/15-otimizacao.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# Introduction to PyTorch
# ***********************
#
# Introduction to Torch's tensor library
# ======================================
#
# All of deep learning is computations on tensors, which are
# generalizations of a matrix that can be indexed in more than 2
# dimensions. We will see exactly what this means in-depth later. First,
# lets look what we can do with tensors.
#
#
# +
# Author: <NAME>
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
# -
# Creating Tensors
# ~~~~~~~~~~~~~~~~
#
# Tensors can be created from Python lists with the torch.Tensor()
# function.
#
#
#
# +
# torch.tensor(data) creates a torch.Tensor object with the given data.
V_data = [1., 2., 3.]
V = torch.tensor(V_data)
print(V)
# Creates a matrix
M_data = [[1., 2., 3.], [4., 5., 6]]
M = torch.tensor(M_data)
print(M)
# Create a 3D tensor of size 2x2x2.
T_data = [[[1., 2.], [3., 4.]],
[[5., 6.], [7., 8.]]]
T = torch.tensor(T_data)
print(T)
# -
# What is a 3D tensor anyway? Think about it like this. If you have a
# vector, indexing into the vector gives you a scalar. If you have a
# matrix, indexing into the matrix gives you a vector. If you have a 3D
# tensor, then indexing into the tensor gives you a matrix!
#
# A note on terminology:
# when I say "tensor" in this tutorial, it refers
# to any torch.Tensor object. Matrices and vectors are special cases of
# torch.Tensors, where their dimension is 1 and 2 respectively. When I am
# talking about 3D tensors, I will explicitly use the term "3D tensor".
#
#
#
# +
# Index into V and get a scalar (0 dimensional tensor)
print(V[0])
# Get a Python number from it
print(V[0].item())
# Index into M and get a vector
print(M[0])
# Index into T and get a matrix
print(T[0])
# -
# You can also create tensors of other datatypes. The default, as you can
# see, is Float. To create a tensor of integer types, try
# torch.LongTensor(). Check the documentation for more data types, but
# Float and Long will be the most common.
#
#
#
# You can create a tensor with random data and the supplied dimensionality
# with torch.randn()
#
#
#
x = torch.randn((3, 4, 5))
print(x)
# Operations with Tensors
# ~~~~~~~~~~~~~~~~~~~~~~~
#
# You can operate on tensors in the ways you would expect.
#
#
x = torch.tensor([1., 2., 3.])
y = torch.tensor([4., 5., 6.])
z = x + y
z = torch.add(x, y)
print(z)
# See `the documentation <http://pytorch.org/docs/torch.html>`__ for a
# complete list of the massive number of operations available to you. They
# expand beyond just mathematical operations.
#
# One helpful operation that we will make use of later is concatenation.
#
#
#
# +
# By default, it concatenates along the first axis (concatenates rows)
x_1 = torch.randn(2, 5)
y_1 = torch.randn(3, 5)
z_1 = torch.cat([x_1, y_1])
print(z_1)
print(z_1.shape)
# Concatenate columns:
x_2 = torch.randn(2, 3)
y_2 = torch.randn(2, 5)
# second arg specifies which axis to concat along
z_2 = torch.cat([x_2, y_2], 1)
print(z_2)
# If your tensors are not compatible, torch will complain. Uncomment to see the error
torch.cat([x_1, x_2])
# -
# Reshaping Tensors
# ~~~~~~~~~~~~~~~~~
#
# Use the .view() method to reshape a tensor. This method receives heavy
# use, because many neural network components expect their inputs to have
# a certain shape. Often you will need to reshape before passing your data
# to the component.
#
#
#
x = torch.randn(2, 3, 4)
print(x)
print(x.view(2, 12)) # Reshape to 2 rows, 12 columns
# Same as above. If one of the dimensions is -1, its size can be inferred
print(x.view(2, -1))
# Computation Graphs and Automatic Differentiation
# ================================================
#
# The concept of a computation graph is essential to efficient deep
# learning programming, because it allows you to not have to write the
# back propagation gradients yourself. A computation graph is simply a
# specification of how your data is combined to give you the output. Since
# the graph totally specifies what parameters were involved with which
# operations, it contains enough information to compute derivatives. This
# probably sounds vague, so let's see what is going on using the
# fundamental flag ``requires_grad``.
#
# First, think from a programmers perspective. What is stored in the
# torch.Tensor objects we were creating above? Obviously the data and the
# shape, and maybe a few other things. But when we added two tensors
# together, we got an output tensor. All this output tensor knows is its
# data and shape. It has no idea that it was the sum of two other tensors
# (it could have been read in from a file, it could be the result of some
# other operation, etc.)
#
# If ``requires_grad=True``, the Tensor object keeps track of how it was
# created. Lets see it in action.
#
#
#
# +
# Tensor factory methods have a ``requires_grad`` flag
x = torch.tensor([1., 2., 3], requires_grad=True)
# With requires_grad=True, you can still do all the operations you previously
# could
y = torch.tensor([4., 5., 6], requires_grad=True)
z = x + y
print(z)
# BUT z knows something extra.
print(z.grad_fn)
x_no_grad = torch.tensor([1., 2., 3.], requires_grad=False)
y_no_grad = torch.tensor([4., 5., 6.], requires_grad=False)
z_no_grad = x_no_grad + y_no_grad
print(z_no_grad)
print(z_no_grad.grad_fn)
# -
# So Tensors know what created them. z knows that it wasn't read in from
# a file, it wasn't the result of a multiplication or exponential or
# whatever. And if you keep following z.grad_fn, you will find yourself at
# x and y.
#
# But how does that help us compute a gradient?
#
#
#
# Lets sum up all the entries in z
s = z.sum()
print(s)
print(s.grad_fn)
# So now, what is the derivative of this sum with respect to the first
# component of x? In math, we want
#
# \begin{align}\frac{\partial s}{\partial x_0}\end{align}
#
# Well, s knows that it was created as a sum of the tensor z. z knows
# that it was the sum x + y. So
#
# \begin{align} s = \overbrace{x_0 + y_0}^\text{$z_0$} + \overbrace{x_1 + y_1}^\text{$z_1$} + \overbrace{x_2 + y_2}^\text{$z_2$} \end{align}
#
# And so s contains enough information to determine that the derivative
# we want is 1!
#
# Of course this glosses over the challenge of how to actually compute
# that derivative. The point here is that s is carrying along enough
# information that it is possible to compute it. In reality, the
# developers of Pytorch program the sum() and + operations to know how to
# compute their gradients, and run the back propagation algorithm. An
# in-depth discussion of that algorithm is beyond the scope of this
# tutorial.
#
#
#
# Lets have Pytorch compute the gradient, and see that we were right:
# (note if you run this block multiple times, the gradient will increment.
# That is because Pytorch *accumulates* the gradient into the .grad
# property, since for many models this is very convenient.)
#
#
#
# calling .backward() on any variable will run backprop, starting from it.
s.backward()
print(x.grad)
# Understanding what is going on in the block below is crucial for being a
# successful programmer in deep learning.
#
#
#
# +
x = torch.randn(2, 2)
y = torch.randn(2, 2)
# By default, user created Tensors have ``requires_grad=False``
print(x.requires_grad, y.requires_grad)
z = x + y
# So you can't backprop through z
print(z.grad_fn)
# ``.requires_grad_( ... )`` changes an existing Tensor's ``requires_grad``
# flag in-place. The input flag defaults to ``True`` if not given.
x = x.requires_grad_()
y = y.requires_grad_()
# z contains enough information to compute gradients, as we saw above
z = x + y
print(z.grad_fn)
# If any input to an operation has ``requires_grad=True``, so will the output
print(z.requires_grad)
# Now z has the computation history that relates itself to x and y
# Can we just take its values, and **detach** it from its history?
new_z = z.detach()
# ... does new_z have information to backprop to x and y?
# NO!
print(new_z.grad_fn)
# And how could it? ``z.detach()`` returns a tensor that shares the same storage
# as ``z``, but with the computation history forgotten. It doesn't know anything
# about how it was computed.
# In essence, we have broken the Tensor away from its past history
# -
# You can also stop autograd from tracking history on Tensors
# with ``.requires_grad``=True by wrapping the code block in
# ``with torch.no_grad():``
#
#
# +
print(x.requires_grad)
print((x ** 2).requires_grad)
with torch.no_grad():
print((x ** 2).requires_grad)
|
pytorch_tutorials/pytorch_tutorial_NLP1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: dev
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Update sklearn to prevent version mismatches
# !pip install sklearn --upgrade
# install joblib. This will be used to save your model.
# Restart your kernel after installing
# !pip install joblib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import MinMaxScaler
# # Read the CSV and Perform Basic Data Cleaning
df = pd.read_csv("exoplanet_data.csv")
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()
# # Select your features (columns)
# Set features. This will also be used as your x values.
selected_features = df[df.columns]
selected_features = selected_features.drop(columns = 'koi_disposition')
X = selected_features
y = df['koi_disposition'].to_numpy()
y
columns = selected_features.columns.to_list()
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
label_encoder.fit(y)
encoded_y = label_encoder.transform(y)
encoded_y
encoded_y = encoded_y.reshape(-1,1)
print(X.shape, y.shape)
encoded_y.shape
# # Create a Train Test Split
#
# Use `koi_disposition` for the y values
# +
from sklearn.model_selection import train_test_split
X_train, X_test, encoded_y_train, encoded_y_test = train_test_split(X, encoded_y, random_state=1)
# -
X_train.head()
# # Pre-processing
#
# Scale the data using the MinMaxScaler and perform some feature selection
# Scale your data
X_scaler=MinMaxScaler().fit(X_train)
X_scaled_train=X_scaler.transform(X_train)
X_scaled_test=X_scaler.transform(X_test)
X_scaled_train
encoded_y_train.ravel()
print(X_scaled_train.shape, encoded_y_train.shape)
# # Train the Model
#
#
encoded_y_train = encoded_y_train.ravel()
# Support vector machine linear classifier
from sklearn.svm import SVC
model2 = SVC(kernel='linear')
model2.fit(X_scaled_train, encoded_y_train.ravel())
lsvc = model2.fit(X_scaled_train, encoded_y_train.ravel())
print(f"Training Data Score: {model2.score(X_scaled_train, encoded_y_train)}")
print(f"Testing Data Score: {model2.score(X_scaled_test, encoded_y_test)}")
model2.coef_[0]
print(model2.coef_, columns)
# # Hyperparameter Tuning
#
# Use `GridSearchCV` to tune the model's parameters
# Create the GridSearchCV model
from sklearn.model_selection import GridSearchCV
param_grid = {'C': [500,600, 700, 800 ,1000, 1100, 1200, 1300],
'gamma': [1e-16, 1e-15,1e-14,1e-13]}
grid2 = GridSearchCV(model, param_grid, n_jobs = 10, verbose=3)
# Train the model with GridSearch
grid2.fit(X_scaled_train, encoded_y_train.ravel())
grid2
sorted(grid2.cv_results_.keys())
print(grid2.best_params_)
print(grid2.best_score_)
# # Save the Model
# save your model by updating "your_name" with your name
# and "your_model" with your model variable
# be sure to turn this in to BCS
# if joblib fails to import, try running the command to install in terminal/git-bash
import joblib
filename = 'your_name.sav'
joblib.dump(model2, filename)
|
starter_code/.ipynb_checkpoints/model_1-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Improvise a Jazz Solo with an LSTM Network
#
# Welcome to your final programming assignment of this week! In this notebook, you will implement a model that uses an LSTM to generate music. You will even be able to listen to your own music at the end of the assignment.
#
# **You will learn to:**
# - Apply an LSTM to music generation.
# - Generate your own jazz music with deep learning.
#
# ## <font color='darkblue'>Updates</font>
#
# #### If you were working on the notebook before this update...
# * The current notebook is version "3a".
# * You can find your original work saved in the notebook with the previous version name ("v3")
# * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory.
#
# #### List of updates
# * `djmodel`
# - Explains `Input` layer and its parameter `shape`.
# - Explains `Lambda` layer and replaces the given solution with hints and sample code (to improve the learning experience).
# - Adds hints for using the Keras `Model`.
# * `music_inference_model`
# - Explains each line of code in the `one_hot` function.
# - Explains how to apply `one_hot` with a Lambda layer instead of giving the code solution (to improve the learning experience).
# - Adds instructions on defining the `Model`.
# * `predict_and_sample`
# - Provides detailed instructions for each step.
# - Clarifies which variable/function to use for inference.
# * Spelling, grammar and wording corrections.
# Please run the following cell to load all the packages required in this assignment. This may take a few minutes.
from __future__ import print_function
import IPython
import sys
from music21 import *
import numpy as np
from grammar import *
from qa import *
from preprocess import *
from music_utils import *
from data_utils import *
from keras.models import load_model, Model
from keras.layers import Dense, Activation, Dropout, Input, LSTM, Reshape, Lambda, RepeatVector
from keras.initializers import glorot_uniform
from keras.utils import to_categorical
from keras.optimizers import Adam
from keras import backend as K
# ## 1 - Problem statement
#
# You would like to create a jazz music piece specially for a friend's birthday. However, you don't know any instruments or music composition. Fortunately, you know deep learning and will solve this problem using an LSTM network.
#
# You will train a network to generate novel jazz solos in a style representative of a body of performed work.
#
# <img src="images/jazz.jpg" style="width:450;height:300px;">
#
#
# ### 1.1 - Dataset
#
# You will train your algorithm on a corpus of Jazz music. Run the cell below to listen to a snippet of the audio from the training set:
IPython.display.Audio('./data/30s_seq.mp3')
# We have taken care of the preprocessing of the musical data to render it in terms of musical "values."
#
# #### Details about music (optional)
# You can informally think of each "value" as a note, which comprises a pitch and duration. For example, if you press down a specific piano key for 0.5 seconds, then you have just played a note. In music theory, a "value" is actually more complicated than this--specifically, it also captures the information needed to play multiple notes at the same time. For example, when playing a music piece, you might press down two piano keys at the same time (playing multiple notes at the same time generates what's called a "chord"). But we don't need to worry about the details of music theory for this assignment.
#
# #### Music as a sequence of values
# * For the purpose of this assignment, all you need to know is that we will obtain a dataset of values, and will learn an RNN model to generate sequences of values.
# * Our music generation system will use 78 unique values.
#
# Run the following code to load the raw music data and preprocess it into values. This might take a few minutes.
X, Y, n_values, indices_values = load_music_utils()
print('number of training examples:', X.shape[0])
print('Tx (length of sequence):', X.shape[1])
print('total # of unique values:', n_values)
print('shape of X:', X.shape)
print('Shape of Y:', Y.shape)
# You have just loaded the following:
#
# - `X`: This is an (m, $T_x$, 78) dimensional array.
# - We have m training examples, each of which is a snippet of $T_x =30$ musical values.
# - At each time step, the input is one of 78 different possible values, represented as a one-hot vector.
# - For example, X[i,t,:] is a one-hot vector representing the value of the i-th example at time t.
#
# - `Y`: a $(T_y, m, 78)$ dimensional array
# - This is essentially the same as `X`, but shifted one step to the left (to the past).
# - Notice that the data in `Y` is **reordered** to be dimension $(T_y, m, 78)$, where $T_y = T_x$. This format makes it more convenient to feed into the LSTM later.
# - Similar to the dinosaur assignment, we're using the previous values to predict the next value.
# - So our sequence model will try to predict $y^{\langle t \rangle}$ given $x^{\langle 1\rangle}, \ldots, x^{\langle t \rangle}$.
#
# - `n_values`: The number of unique values in this dataset. This should be 78.
#
# - `indices_values`: python dictionary mapping integers 0 through 77 to musical values.
# ### 1.2 - Overview of our model
#
# Here is the architecture of the model we will use. This is similar to the Dinosaurus model, except that you will implement it in Keras.
#
# <img src="images/music_generation.png" style="width:600;height:400px;">
#
#
# * $X = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, \cdots, x^{\langle T_x \rangle})$ is a window of size $T_x$ scanned over the musical corpus.
# * Each $x^{\langle t \rangle}$ is an index corresponding to a value.
# * $\hat{y}^{t}$ is the prediction for the next value.
# * We will be training the model on random snippets of 30 values taken from a much longer piece of music.
# - Thus, we won't bother to set the first input $x^{\langle 1 \rangle} = \vec{0}$, since most of these snippets of audio start somewhere in the middle of a piece of music.
# - We are setting each of the snippets to have the same length $T_x = 30$ to make vectorization easier.
# ## Overview of parts 2 and 3
#
# * We're going to train a model that predicts the next note in a style that is similar to the jazz music that it's trained on. The training is contained in the weights and biases of the model.
# * In Part 3, we're then going to use those weights and biases in a new model which predicts a series of notes, using the previous note to predict the next note.
# * The weights and biases are transferred to the new model using 'global shared layers' described below"
#
# ## 2 - Building the model
#
# * In this part you will build and train a model that will learn musical patterns.
# * The model takes input X of shape $(m, T_x, 78)$ and labels Y of shape $(T_y, m, 78)$.
# * We will use an LSTM with hidden states that have $n_{a} = 64$ dimensions.
# number of dimensions for the hidden state of each LSTM cell.
n_a = 64
#
# #### Sequence generation uses a for-loop
# * If you're building an RNN where, at test time, the entire input sequence $x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, \ldots, x^{\langle T_x \rangle}$ is given in advance, then Keras has simple built-in functions to build the model.
# * However, for **sequence generation, at test time we don't know all the values of $x^{\langle t\rangle}$ in advance**.
# * Instead we generate them one at a time using $x^{\langle t\rangle} = y^{\langle t-1 \rangle}$.
# * The input at time "t" is the prediction at the previous time step "t-1".
# * So you'll need to implement your own for-loop to iterate over the time steps.
#
# #### Shareable weights
# * The function `djmodel()` will call the LSTM layer $T_x$ times using a for-loop.
# * It is important that all $T_x$ copies have the same weights.
# - The $T_x$ steps should have shared weights that aren't re-initialized.
# * Referencing a globally defined shared layer will utilize the same layer-object instance at each time step.
# * The key steps for implementing layers with shareable weights in Keras are:
# 1. Define the layer objects (we will use global variables for this).
# 2. Call these objects when propagating the input.
#
# #### 3 types of layers
# * We have defined the layers objects you need as global variables.
# * Please run the next cell to create them.
# * Please read the Keras documentation and understand these layers:
# - [Reshape()](https://keras.io/layers/core/#reshape): Reshapes an output to a certain shape.
# - [LSTM()](https://keras.io/layers/recurrent/#lstm): Long Short-Term Memory layer
# - [Dense()](https://keras.io/layers/core/#dense): A regular fully-connected neural network layer.
#
n_values = 78 # number of music values
reshapor = Reshape((1, n_values)) # Used in Step 2.B of djmodel(), below
LSTM_cell = LSTM(n_a, return_state = True) # Used in Step 2.C
densor = Dense(n_values, activation='softmax') # Used in Step 2.D
# * `reshapor`, `LSTM_cell` and `densor` are globally defined layer objects, that you'll use to implement `djmodel()`.
# * In order to propagate a Keras tensor object X through one of these layers, use `layer_object()`.
# - For one input, use `layer_object(X)`
# - For more than one input, put the inputs in a list: `layer_object([X1,X2])`
# **Exercise**: Implement `djmodel()`.
#
# #### Inputs (given)
# * The `Input()` layer is used for defining the input `X` as well as the initial hidden state 'a0' and cell state `c0`.
# * The `shape` parameter takes a tuple that does not include the batch dimension (`m`).
# - For example,
# ```Python
# X = Input(shape=(Tx, n_values)) # X has 3 dimensions and not 2: (m, Tx, n_values)
# ```
# #### Step 1: Outputs (TODO)
# 1. Create an empty list "outputs" to save the outputs of the LSTM Cell at every time step.
# #### Step 2: Loop through time steps (TODO)
# * Loop for $t \in 1, \ldots, T_x$:
#
# #### 2A. Select the 't' time-step vector from X.
# * X has the shape (m, Tx, n_values).
# * The shape of the 't' selection should be (n_values,).
# * Recall that if you were implementing in numpy instead of Keras, you would extract a slice from a 3D numpy array like this:
# ```Python
# var1 = array1[:,1,:]
# ```
#
# #### Lambda layer
# * Since we're using Keras, we need to define this step inside a custom layer.
# * In Keras, this is a Lambda layer [Lambda](https://keras.io/layers/core/#lambda)
# * As an example, a Lambda layer that takes the previous layer and adds '1' looks like this
# ```
# lambda_layer1 = Lambda(lambda z: z + 1)(previous_layer)
# ```
# * The previous layer in this case is `X`.
# * `z` is a local variable of the lambda function.
# * The `previous_layer` gets passed into the parameter `z` in the lowercase `lambda` function.
# * You can choose the name of the variable to be something else if you want.
# * The operation after the colon ':' should be the operation to extract a slice from the previous layer.
# * **Hint**: You'll be using the variable `t` within the definition of the lambda layer even though it isn't passed in as an argument to Lambda.
# #### 2B. Reshape x to be (1,n_values).
# * Use the `reshapor()` layer. It is a function that takes the previous layer as its input argument.
#
# #### 2C. Run x through one step of LSTM_cell.
# * Initialize the `LSTM_cell` with the previous step's hidden state $a$ and cell state $c$.
# * Use the following formatting:
# ```python
# next_hidden_state, _, next_cell_state = LSTM_cell(inputs=input_x, initial_state=[previous_hidden_state, previous_cell_state])
# ```
# * Choose appropriate variables for inputs, hidden state and cell state.
#
# #### 2D. Dense layer
# * Propagate the LSTM's hidden state through a dense+softmax layer using `densor`.
#
# #### 2E. Append output
# * Append the output to the list of "outputs".
#
# #### Step 3: After the loop, create the model
# * Use the Keras `Model` object to create a model.
# * specify the inputs and outputs:
# ```Python
# model = Model(inputs=[input_x, initial_hidden_state, initial_cell_state], outputs=the_outputs)
# ```
# * Choose the appropriate variables for the input tensor, hidden state, cell state, and output.
# * See the documentation for [Model](https://keras.io/models/model/)
# +
# GRADED FUNCTION: djmodel
def djmodel(Tx, n_a, n_values):
"""
Implement the model
Arguments:
Tx -- length of the sequence in a corpus
n_a -- the number of activations used in our model
n_values -- number of unique values in the music data
Returns:
model -- a keras instance model with n_a activations
"""
# Define the input layer and specify the shape
X = Input(shape=(Tx, n_values))
# Define the initial hidden state a0 and initial cell state c0
# using `Input`
a0 = Input(shape=(n_a,), name='a0')
c0 = Input(shape=(n_a,), name='c0')
a = a0
c = c0
### START CODE HERE ###
# Step 1: Create empty list to append the outputs while you iterate (≈1 line)
outputs = None
# Step 2: Loop
for t in range(Tx):
# Step 2.A: select the "t"th time step vector from X.
x = None
# Step 2.B: Use reshapor to reshape x to be (1, n_values) (≈1 line)
x = None
# Step 2.C: Perform one step of the LSTM_cell
a, _, c = None
# Step 2.D: Apply densor to the hidden state output of LSTM_Cell
out = None
# Step 2.E: add the output to "outputs"
None
# Step 3: Create model instance
model = None
### END CODE HERE ###
return model
# -
# #### Create the model object
# * Run the following cell to define your model.
# * We will use `Tx=30`, `n_a=64` (the dimension of the LSTM activations), and `n_values=78`.
# * This cell may take a few seconds to run.
model = djmodel(Tx = 30 , n_a = 64, n_values = 78)
# Check your model
model.summary()
# **Expected Output**
# Scroll to the bottom of the output, and you'll see the following:
#
# ```Python
# Total params: 41,678
# Trainable params: 41,678
# Non-trainable params: 0
# ```
# #### Compile the model for training
# * You now need to compile your model to be trained.
# * We will use:
# - optimizer: Adam optimizer
# - Loss function: categorical cross-entropy (for multi-class classification)
# +
opt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
# -
# #### Initialize hidden state and cell state
# Finally, let's initialize `a0` and `c0` for the LSTM's initial state to be zero.
m = 60
a0 = np.zeros((m, n_a))
c0 = np.zeros((m, n_a))
# #### Train the model
# * Lets now fit the model!
# * We will turn `Y` into a list, since the cost function expects `Y` to be provided in this format
# - `list(Y)` is a list with 30 items, where each of the list items is of shape (60,78).
# - Lets train for 100 epochs. This will take a few minutes.
model.fit([X, a0, c0], list(Y), epochs=100)
# #### Expected Output
#
# The model loss will start high, (100 or so), and after 100 epochs, it should be in the single digits. These won't be the exact number that you'll see, due to random initialization of weights.
# For example:
# ```
# Epoch 1/100
# 60/60 [==============================] - 3s - loss: 125.7673
# ...
# ```
# Scroll to the bottom to check Epoch 100
# ```
# ...
# Epoch 100/100
# 60/60 [==============================] - 0s - loss: 6.1861
# ```
#
# Now that you have trained a model, let's go to the final section to implement an inference algorithm, and generate some music!
# ## 3 - Generating music
#
# You now have a trained model which has learned the patterns of the jazz soloist. Lets now use this model to synthesize new music.
#
# #### 3.1 - Predicting & Sampling
#
# <img src="images/music_gen.png" style="width:600;height:400px;">
#
# At each step of sampling, you will:
# * Take as input the activation '`a`' and cell state '`c`' from the previous state of the LSTM.
# * Forward propagate by one step.
# * Get a new output activation as well as cell state.
# * The new activation '`a`' can then be used to generate the output using the fully connected layer, `densor`.
#
# ##### Initialization
# * We will initialize the following to be zeros:
# * `x0`
# * hidden state `a0`
# * cell state `c0`
# **Exercise:**
# * Implement the function below to sample a sequence of musical values.
# * Here are some of the key steps you'll need to implement inside the for-loop that generates the $T_y$ output characters:
#
# * Step 2.A: Use `LSTM_Cell`, which takes in the input layer, as well as the previous step's '`c`' and '`a`' to generate the current step's '`c`' and '`a`'.
# ```Python
# next_hidden_state, _, next_cell_state = LSTM_cell(input_x, initial_state=[previous_hidden_state, previous_cell_state])
# ```
# * Choose the appropriate variables for the input_x, hidden_state, and cell_state
#
# * Step 2.B: Compute the output by applying `densor` to compute a softmax on '`a`' to get the output for the current step.
#
# * Step 2.C: Append the output to the list `outputs`.
#
#
# * Step 2.D: Sample x to be the one-hot version of '`out`'.
# * This allows you to pass it to the next LSTM's step.
# * We have provided the definition of `one_hot(x)` in the 'music_utils.py' file and imported it.
# Here is the definition of `one_hot`
# ```Python
# def one_hot(x):
# x = K.argmax(x)
# x = tf.one_hot(indices=x, depth=78)
# x = RepeatVector(1)(x)
# return x
# ```
# Here is what the `one_hot` function is doing:
# * argmax: within the vector `x`, find the position with the maximum value and return the index of that position.
# * For example: argmax of [-1,0,1] finds that 1 is the maximum value, and returns the index position, which is 2. Read the documentation for [keras.argmax](https://www.tensorflow.org/api_docs/python/tf/keras/backend/argmax).
# * one_hot: takes a list of indices and the depth of the one-hot vector (number of categories, which is 78 in this assignment). It converts each index into the one-hot vector representation. For instance, if the indices is [2], and the depth is 5, then the one-hot vector returned is [0,0,1,0,0]. Check out the documentation for [tf.one_hot](https://www.tensorflow.org/api_docs/python/tf/one_hot) for more examples and explanations.
# * RepeatVector(n): This takes a vector and duplicates it `n` times. Notice that we had it repeat 1 time. This may seem like it's not doing anything. If you look at the documentation for [RepeatVector](https://keras.io/layers/core/#repeatvector), you'll notice that if x is a vector with dimension (m,5) and it gets passed into `RepeatVector(1)`, then the output is (m,1,5). In other words, it adds an additional dimension (of length 1) to the resulting vector.
# * Apply the custom one_hot encoding using the [Lambda](https://keras.io/layers/core/#lambda) layer. You saw earlier that the Lambda layer can be used like this:
# ```Python
# result = Lambda(lambda x: x + 1)(input_var)
# ```
#
# If you pre-define a function, you can do the same thing:
# ```Python
# def add_one(x)
# return x + 1
#
# # use the add_one function inside of the Lambda function
# result = Lambda(add_one)(input_var)
# ```
# #### Step 3: Inference Model:
# This is how to use the Keras `Model`.
# ```Python
# model = Model(inputs=[input_x, initial_hidden_state, initial_cell_state], outputs=the_outputs)
# ```
#
#
# * Choose the appropriate variables for the input tensor, hidden state, cell state, and output.
# * **Hint**: the inputs to the model are the **initial** inputs and states.
# +
# GRADED FUNCTION: music_inference_model
def music_inference_model(LSTM_cell, densor, n_values = 78, n_a = 64, Ty = 100):
"""
Uses the trained "LSTM_cell" and "densor" from model() to generate a sequence of values.
Arguments:
LSTM_cell -- the trained "LSTM_cell" from model(), Keras layer object
densor -- the trained "densor" from model(), Keras layer object
n_values -- integer, number of unique values
n_a -- number of units in the LSTM_cell
Ty -- integer, number of time steps to generate
Returns:
inference_model -- Keras model instance
"""
# Define the input of your model with a shape
x0 = Input(shape=(1, n_values))
# Define s0, initial hidden state for the decoder LSTM
a0 = Input(shape=(n_a,), name='a0')
c0 = Input(shape=(n_a,), name='c0')
a = a0
c = c0
x = x0
### START CODE HERE ###
# Step 1: Create an empty list of "outputs" to later store your predicted values (≈1 line)
outputs = None
# Step 2: Loop over Ty and generate a value at every time step
for t in range(None):
# Step 2.A: Perform one step of LSTM_cell (≈1 line)
a, _, c = None
# Step 2.B: Apply Dense layer to the hidden state output of the LSTM_cell (≈1 line)
out = None
# Step 2.C: Append the prediction "out" to "outputs". out.shape = (None, 78) (≈1 line)
None
# Step 2.D:
# Select the next value according to "out",
# Set "x" to be the one-hot representation of the selected value
# See instructions above.
x = None
# Step 3: Create model instance with the correct "inputs" and "outputs" (≈1 line)
inference_model = None
### END CODE HERE ###
return inference_model
# -
# Run the cell below to define your inference model. This model is hard coded to generate 50 values.
inference_model = music_inference_model(LSTM_cell, densor, n_values = 78, n_a = 64, Ty = 50)
# Check the inference model
inference_model.summary()
# ** Expected Output**
# If you scroll to the bottom of the output, you'll see:
# ```
# Total params: 41,678
# Trainable params: 41,678
# Non-trainable params: 0
# ```
# #### Initialize inference model
# The following code creates the zero-valued vectors you will use to initialize `x` and the LSTM state variables `a` and `c`.
x_initializer = np.zeros((1, 1, 78))
a_initializer = np.zeros((1, n_a))
c_initializer = np.zeros((1, n_a))
# **Exercise**: Implement `predict_and_sample()`.
#
# * This function takes many arguments including the inputs [x_initializer, a_initializer, c_initializer].
# * In order to predict the output corresponding to this input, you will need to carry-out 3 steps:
#
#
# #### Step 1
# * Use your inference model to predict an output given your set of inputs. The output `pred` should be a list of length $T_y$ where each element is a numpy-array of shape (1, n_values).
# ```Python
# inference_model.predict([input_x_init, hidden_state_init, cell_state_init])
# ```
# * Choose the appropriate input arguments to `predict` from the input arguments of this `predict_and_sample` function.
#
# #### Step 2
# * Convert `pred` into a numpy array of $T_y$ indices.
# * Each index is computed by taking the `argmax` of an element of the `pred` list.
# * Use [numpy.argmax](https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html).
# * Set the `axis` parameter.
# * Remember that the shape of the prediction is $(m, T_{y}, n_{values})$
#
# #### Step 3
# * Convert the indices into their one-hot vector representations.
# * Use [to_categorical](https://keras.io/utils/#to_categorical).
# * Set the `num_classes` parameter. Note that for grading purposes: you'll need to either:
# * Use a dimension from the given parameters of `predict_and_sample()` (for example, one of the dimensions of x_initializer has the value for the number of distinct classes).
# * Or just hard code the number of distinct classes (will pass the grader as well).
# * Note that using a global variable such as n_values will not work for grading purposes.
# +
# GRADED FUNCTION: predict_and_sample
def predict_and_sample(inference_model, x_initializer = x_initializer, a_initializer = a_initializer,
c_initializer = c_initializer):
"""
Predicts the next value of values using the inference model.
Arguments:
inference_model -- Keras model instance for inference time
x_initializer -- numpy array of shape (1, 1, 78), one-hot vector initializing the values generation
a_initializer -- numpy array of shape (1, n_a), initializing the hidden state of the LSTM_cell
c_initializer -- numpy array of shape (1, n_a), initializing the cell state of the LSTM_cel
Returns:
results -- numpy-array of shape (Ty, 78), matrix of one-hot vectors representing the values generated
indices -- numpy-array of shape (Ty, 1), matrix of indices representing the values generated
"""
### START CODE HERE ###
# Step 1: Use your inference model to predict an output sequence given x_initializer, a_initializer and c_initializer.
pred = None
# Step 2: Convert "pred" into an np.array() of indices with the maximum probabilities
indices = None
# Step 3: Convert indices to one-hot vectors, the shape of the results should be (Ty, n_values)
results = None
### END CODE HERE ###
return results, indices
# -
results, indices = predict_and_sample(inference_model, x_initializer, a_initializer, c_initializer)
print("np.argmax(results[12]) =", np.argmax(results[12]))
print("np.argmax(results[17]) =", np.argmax(results[17]))
print("list(indices[12:18]) =", list(indices[12:18]))
# **Expected (Approximate) Output**:
#
# * Your results **may likely differ** because Keras' results are not completely predictable.
# * However, if you have trained your LSTM_cell with model.fit() for exactly 100 epochs as described above:
# * You should very likely observe a sequence of indices that are not all identical.
# * Moreover, you should observe that:
# * np.argmax(results[12]) is the first element of list(indices[12:18])
# * and np.argmax(results[17]) is the last element of list(indices[12:18]).
#
# <table>
# <tr>
# <td>
# **np.argmax(results[12])** =
# </td>
# <td>
# 1
# </td>
# </tr>
# <tr>
# <td>
# **np.argmax(results[17])** =
# </td>
# <td>
# 42
# </td>
# </tr>
# <tr>
# <td>
# **list(indices[12:18])** =
# </td>
# <td>
# [array([1]), array([42]), array([54]), array([17]), array([1]), array([42])]
# </td>
# </tr>
# </table>
# #### 3.3 - Generate music
#
# Finally, you are ready to generate music. Your RNN generates a sequence of values. The following code generates music by first calling your `predict_and_sample()` function. These values are then post-processed into musical chords (meaning that multiple values or notes can be played at the same time).
#
# Most computational music algorithms use some post-processing because it is difficult to generate music that sounds good without such post-processing. The post-processing does things such as clean up the generated audio by making sure the same sound is not repeated too many times, that two successive notes are not too far from each other in pitch, and so on. One could argue that a lot of these post-processing steps are hacks; also, a lot of the music generation literature has also focused on hand-crafting post-processors, and a lot of the output quality depends on the quality of the post-processing and not just the quality of the RNN. But this post-processing does make a huge difference, so let's use it in our implementation as well.
#
# Let's make some music!
# Run the following cell to generate music and record it into your `out_stream`. This can take a couple of minutes.
out_stream = generate_music(inference_model)
# To listen to your music, click File->Open... Then go to "output/" and download "my_music.midi". Either play it on your computer with an application that can read midi files if you have one, or use one of the free online "MIDI to mp3" conversion tools to convert this to mp3.
#
# As a reference, here is a 30 second audio clip we generated using this algorithm.
IPython.display.Audio('./data/30s_trained_model.mp3')
# ### Congratulations!
#
# You have come to the end of the notebook.
#
#
# ## What you should remember
# - A sequence model can be used to generate musical values, which are then post-processed into midi music.
# - Fairly similar models can be used to generate dinosaur names or to generate music, with the major difference being the input fed to the model.
# - In Keras, sequence generation involves defining layers with shared weights, which are then repeated for the different time steps $1, \ldots, T_x$.
# Congratulations on completing this assignment and generating a jazz solo!
# **References**
#
# The ideas presented in this notebook came primarily from three computational music papers cited below. The implementation here also took significant inspiration and used many components from Ji-Sung Kim's GitHub repository.
#
# - <NAME>, 2016, [deepjazz](https://github.com/jisungk/deepjazz)
# - <NAME>, <NAME> and <NAME>, 2009. [Learning Jazz Grammars](http://ai.stanford.edu/~kdtang/papers/smc09-jazzgrammar.pdf)
# - <NAME> and <NAME>, 2007, [A Grammatical Approach to Automatic Improvisation](http://smc07.uoa.gr/SMC07%20Proceedings/SMC07%20Paper%2055.pdf)
# - <NAME>, 1999, [Surprising Harmonies](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.5.7473&rep=rep1&type=pdf)
#
# We're also grateful to <NAME> for valuable feedback.
|
05.Sequence_Models/Week1/Jazz improvisation with LSTM/Improvise_a_Jazz_Solo_with_an_LSTM_Network_v3a.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""
jupyter-nbconvert report_ttcs.ipynb --to python; python -u report_ttcs.py
"""
# +
from __future__ import division
import os
import glob
import pickle
from natsort import natsorted
import argparse
import os
import sys
import numpy as np
import tqdm
import pandas as pd
import scipy as sp
import numpy as np
from pprint import pprint
import glob
import sys
sys.path.append('../')
from plume_env import PlumeEnvironment, PlumeFrameStackEnvironment
import config
import agents
import agent_analysis
import os
import sklearn
import sklearn.decomposition as skld
import importlib
import log_analysis
importlib.reload(log_analysis)
# -
import sys
batchmode = False
if 'ipykernel_launcher' in sys.argv[0]:
print("Interactive mode")
else:
batchmode = True
print("Batch/CLI mode")
import argparse
# +
# Common
import matplotlib
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from IPython.display import clear_output
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patches as mpatches
import seaborn as sns
sns.set(style="white")
# print(plt.style.available)
mpl.rcParams['figure.dpi'] = 300
dpi_save = 300
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 10}
matplotlib.rc('font', **font)
# Interactive vs. CLI
if not batchmode:
# %matplotlib inline
# # %config InlineBackend.figure_format = 'svg'
# %config InlineBackend.figure_format = 'retina'
if batchmode:
matplotlib.use("Agg")
pca3d_figsize=(10,5)
# +
# Common
importlib.reload(config)
outcome_colormap = config.outcome_colormap
regime_colormap = config.regime_colormap
# https://seaborn.pydata.org/generated/seaborn.set_color_codes.html#seaborn.set_color_codes
sns.color_palette()
sns.set_color_codes(palette='deep')
# -
if batchmode:
parser = argparse.ArgumentParser(description='')
parser.add_argument('--model_fname', default=None)
# parser.add_argument('--datasets', type=str, nargs='+',
# default=['constantx5b5', 'switch45x5b5', 'noisy3x5b5'])
# parser.add_argument('--dataset', type=str, default='constantx5b5')
args = parser.parse_args()
print(args)
model_fname = args.model_fname
# +
if not batchmode: # Interactive
model_fname = natsorted(glob.glob(f'/home/satsingh/plume/plumezoo/latest/fly/memory/*VRNN*.pt'))[0]
# model_fname = natsorted(glob.glob(f'/home/satsingh/plume/plumezoo/dynamic/fly/memory/*VRNN*.pt'))[0]
# +
model_dir = model_fname.replace('.pt', '/')
print("model_dir", model_dir)
model_seed = model_dir.split('seed')[-1].split('/')[0]
print("model_seed ---->", model_seed)
outdir = f"{model_dir}/report_common_subspace/"
os.makedirs(outdir, exist_ok=True)
print(outdir)
is_recurrent = True if ('GRU' in model_dir) or ('VRNN' in model_dir) else False
logfiles = natsorted(glob.glob(model_dir + '*.pkl'))
[ x.split('/')[-1] for x in logfiles ]
# -
# use_datasets = ['constantx5b5', 'switch45x5b5', 'noisy6x5b5']
use_datasets = ['constantx5b5', 'switch45x5b5', 'noisy3x5b5']
selected_df = log_analysis.get_selected_df(model_dir,
use_datasets,
n_episodes_home=60,
n_episodes_other=60,
min_ep_steps=0)
print(selected_df.shape)
selected_df
selected_df.groupby(['dataset', 'outcome']).count()
pca_common = log_analysis.get_pca_common(selected_df,
n_comp=15,
is_recurrent=is_recurrent)
with open(f"{outdir}/pca_common.pkl", 'wb') as file:
pickle.dump(pca_common, file)
# +
# # Plot neural-trajectories on common subspace
# # # %matplotlib notebook
# # importlib.reload(log_analysis)
# # # %config InlineBackend.figure_format = 'retina' # For manuscript
# # mpl.rcParams['figure.dpi'] = 300
# column_to_titlestring = {
# 'odor_lastenc': 'Steps since last\nplume encounter',
# 'stray_distance': 'stray_distance',
# 'odor_01': 'On/off plume',
# 'odor_ma_8': 'Odor moving-average\n(8 step window) [A.U.]',
# 'odor_ma_6': 'Odor moving-average\n(6 step window) [A.U.]',
# 'wind_theta_obs': 'Egocentric\nwind angle [rad]',
# 'agent_angle_ground': r'Head direction [rad]',
# 'turn': 'Turn',
# 'step': 'Step',
# 'neural_velocity': r"$\Delta$h",
# }
# column_ticklabels = {
# 'agent_angle_ground': [r'$-\pi/2$', 0, r'$+\pi/2$'],
# }
# column_ticks = {
# 'agent_angle_ground': [0, 0.5, 1.0],
# }
# from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# def plot_common_subspace_all(selected_df, colorby):
# fig = plt.figure(figsize=pca3d_figsize)
# ax = fig.gca(projection='3d')
# # plot_df = selected_df.groupby(['dataset', 'outcome']).head(5)
# plot_df = selected_df.groupby(['dataset', 'outcome']).tail(5)
# # plot_df = selected_df.groupby(['dataset', 'outcome']).sample(5, replace=True)
# colorbar_is_plotted = False
# for idx, row in plot_df.iterrows():
# outcome = row['outcome']
# ep_activity = log_analysis.get_activity(row['log'], is_recurrent, do_plot=False)
# # ep_activity = pd.DataFrame(ep_activity).diff().fillna(0).to_numpy() # if colorby == 'neural_velocity' else ep_activity
# traj_df = log_analysis.get_traj_df(row['log'],
# extended_metadata=True,
# squash_action=True,
# seed=model_seed)
# X_pca = pca_common.transform(ep_activity)
# # X_pca = pd.DataFrame(X_pca).diff().to_numpy()
# ax.plot(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2], linewidth=0.6, c='grey', alpha=1.0)
# if colorby == 'outcome':
# sc = ax.scatter(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2], s=10,
# c=outcome_colormap[outcome], label='outcome')
# elif colorby == 'regime':
# regime_colors = [ regime_colormap[x] for x in traj_df['regime'] ]
# sc = ax.scatter(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2], s=10,
# c=regime_colors, alpha=0.5)
# elif colorby in ['wind_theta_obs', 'agent_angle_ground']:
# # Cyclic colormap: https://matplotlib.org/stable/tutorials/colors/colormaps.html
# # Seaborn: https://stackoverflow.com/questions/23712207/cyclic-colormap-without-visual-distortions-for-use-in-phase-angle-plots
# c = traj_df[colorby]
# print(colorby, c.min(), c.max())
# sc = ax.scatter(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2],
# s=10, c=c, cmap='twilight', alpha=0.5, vmin=0, vmax=1)
# elif 'odor' in colorby:
# c = traj_df[colorby]
# print(colorby, c.min(), c.max())
# vmax = 28 if colorby == 'odor_lastenc' else 1
# sc = ax.scatter(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2],
# s=10, c=c, cmap='copper', alpha=0.5, vmin=0, vmax=vmax)
# elif colorby == 'stray_distance':
# c = traj_df[colorby]
# print(colorby, c.min(), c.max())
# sc = ax.scatter(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2],
# s=10, c=c, cmap='copper', alpha=0.5, vmin=0, vmax=2)
# elif colorby in ['step', 'turn']:
# c = traj_df[colorby]
# print(colorby, c.min(), c.max())
# sc = ax.scatter(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2],
# s=10, c=c, cmap='copper', alpha=0.5, vmin=0, vmax=1)
# elif colorby == 'neural_velocity':
# c = pd.DataFrame(ep_activity).diff().fillna(0).apply(np.linalg.norm, axis=1)
# # c /= pd.DataFrame(ep_activity).apply(np.linalg.norm, axis=1)
# # c = np.log(1+c)
# # c = np.clip(0, 1.5, c)
# print(colorby, c.min(), c.max())
# sc = ax.scatter(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2],
# s=10, c=c, cmap='copper', alpha=0.5, vmin=0)
# else:
# c = traj_df[colorby]
# print(colorby, c.min(), c.max())
# sc = ax.scatter(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2],
# s=10, c=c, cmap='copper', alpha=0.5, vmin=0)
# if not batchmode:
# plt.title(f"{colorby} [{model_seed}]]")
# # https://stackoverflow.com/questions/18211967/position-colorbar-inside-figure
# if colorby not in ['outcome', 'regime']:
# # plt.colorbar(sc)
# cbar_ax = inset_axes(ax,
# width="30%",
# height="3%",
# # loc='upper right',
# bbox_to_anchor=(0.0, 0.45, 0.92, 0.4), # (x0, y0, width, height)
# bbox_transform=ax.transAxes,
# )
# clb = plt.colorbar(sc, cbar_ax, orientation='horizontal') # ticks=[0., 1.]
# # cbar_ax.set_title(colorby)
# cbar_ax.set_title(column_to_titlestring[colorby])
# if colorby in column_ticklabels.keys():
# clb.set_ticks(column_ticks[colorby])
# clb.set_ticklabels(column_ticklabels[colorby])
# # https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
# # divider = make_axes_locatable(ax)
# # cax = divider.append_axes("right", size="5%", pad=0.05)
# # plt.colorbar(sc, cax=cax)
# ax.set_xlabel(f'PC1 (VarExp: {pca_common.explained_variance_ratio_[0]:0.2f})')
# ax.set_ylabel(f'PC2 (VarExp: {pca_common.explained_variance_ratio_[1]:0.2f})')
# ax.set_zlabel(f'PC3 (VarExp: {pca_common.explained_variance_ratio_[2]:0.2f})')
# # plt.legend()
# plt.tight_layout()
# if colorby == 'regime':
# handles, labels = plt.gca().get_legend_handles_labels()
# patch1 = mpatches.Patch(color='green', label='Track')
# patch2 = mpatches.Patch(color='slateblue', label='Recover')
# patch3 = mpatches.Patch(color='red', label='Lost')
# handles.extend([patch1, patch2, patch3])
# plt.legend(handles=handles, loc='upper right', bbox_to_anchor=(0.95, 0.9))
# plt.show()
# fname = f"{outdir}/comsub_{colorby}_{model_seed}.png"
# print("Saving:", fname)
# plt.savefig(fname, dpi=dpi_save, bbox_inches='tight')
# # Just plot colorbar
# # if colorby not in ['outcome', 'regime']:
# # fig = plt.figure()
# # ax = plt.gca()
# # sc = ax.scatter(X_pca[:, 0], X_pca[:, 1],
# # s=10, c=c, cmap='copper', alpha=0.5, vmin=0)
# # plt.colorbar(sc, orientation='horizontal') # ticks=[0., 1.]
# # fname = f"{outdir}/comsub_{colorby}_{model_seed}_colorbar.png"
# # print("Saving:", fname)
# # plt.savefig(fname, dpi=dpi_save, bbox_inches='tight')
# if batchmode:
# colorbys = [
# 'outcome',
# 'regime',
# 'odor_lastenc',
# 'stray_distance',
# 'odor_01',
# 'odor_ma_8',
# 'odor_ma_6',
# 'wind_theta_obs',
# 'agent_angle_ground',
# 'turn',
# 'step',
# 'neural_velocity']
# else:
# colorbys = [
# # 'outcome',
# # 'regime',
# # 'odor_lastenc',
# # 'stray_distance',
# # 'odor_01',
# # 'odor_ma_8',
# # 'odor_ma_6',
# # 'wind_theta_obs',
# 'agent_angle_ground',
# # 'turn',
# # 'step',
# # 'neural_velocity'
# ]
# for colorby in colorbys:
# plot_common_subspace_all(selected_df, colorby)
# -
# +
# # Plot neural-trajectories on common subspace
# # # %matplotlib notebook
# # importlib.reload(log_analysis)
# def plot_common_subspace_individual(selected_df, plot_df, idxs_to_color):
# for outcome in ['HOME', 'OOB']:
# outcome_colormap = {'HOME': 'g', 'OOB':'r', 'OOT':'b'}
# regime_colormap = {'SEARCH': 'red',
# 'TRACK':'green',
# 'RECOVER':'blue',
# 'WARMUP':'cyan'}
# for idx2color in idxs_to_color[outcome]:
# fig = plt.figure(figsize=pca3d_figsize)
# ax = fig.gca(projection='3d')
# for idx, row in plot_df.iterrows():
# outcome = row['outcome']
# ep_activity = log_analysis.get_activity(row['log'], is_recurrent, do_plot=False)
# traj_df = log_analysis.get_traj_df(row['log'],
# extended_metadata=False,
# squash_action=False,
# seed=model_seed)
# X_pca = pca_common.transform(ep_activity)
# ax.plot(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2], linewidth=0.6, c='grey', alpha=0.5)
# # Color trajectory timestep by REGIME
# if row['dataset'] == viz_dataset and row['idx'] == idx2color:
# viz_log = row['log']
# print('Coloring', row['idx'], outcome)
# regime_colors = [ regime_colormap[x] for x in traj_df['regime'] ]
# sc = ax.scatter(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2], s=10, c=regime_colors, alpha=0.5)
# ax.scatter(X_pca[0, 0], X_pca[0, 1], X_pca[0, 2], c='black', marker='o', lw=2, s=100) # Start
# ax.scatter(X_pca[-1, 0], X_pca[-1, 1], X_pca[-1, 2], c='black', marker='x', lw=2, s=100) # End
# # print(idx, row['idx'])
# ax.set_xlabel(f'PC1 (VarExp: {pca_common.explained_variance_ratio_[0]:0.2f})')
# ax.set_ylabel(f'PC2 (VarExp: {pca_common.explained_variance_ratio_[1]:0.2f})')
# ax.set_zlabel(f'PC3 (VarExp: {pca_common.explained_variance_ratio_[2]:0.2f})')
# plt.title(f"{outcome}_ep{idx2color}")
# plt.tight_layout()
# plt.show()
# fname = f"{outdir}/comsub_{model_seed}_{outcome}_ep{idx2color}.png"
# print("Saving:", fname)
# plt.savefig(fname, dpi=dpi_save, bbox_inches='tight')
# # Plot associated trajectory
# agent_analysis.visualize_episodes([viz_log],
# zoom=2,
# dataset=viz_dataset,
# animate=False,
# colorby='regime'
# )
# plt.xlim(-1, 10.5)
# plt.show()
# title = f"{outcome}_ep{idx2color}"
# fname = f"{outdir}/traj_regime_{model_seed}_{title}.png"
# print("Saving:", fname)
# plt.savefig(fname, dpi=dpi_save, bbox_inches='tight')
# viz_dataset = 'constantx5b5'
# # viz_dataset = 'switch45x5b5'
# # viz_dataset = 'noisy6x5b5'
# # viz_dataset = 'noisy3x5b5'
# n_background = 5
# n_color = 5
# plot_df = selected_df.groupby(['dataset', 'outcome']).tail(n_background)
# idxs_to_color = {}
# for outcome in plot_df['outcome'].unique():
# idxs_to_color[outcome] = plot_df.query("dataset == @viz_dataset and outcome == @outcome").head(n_color)['idx'].to_list()
# plot_common_subspace_individual(selected_df, plot_df, idxs_to_color)
# +
# # importlib.reload(agent_analysis)
# # importlib.reload(log_analysis)
# def plot_common_subspace_individual_traj(plot_df, idxs_to_color):
# for outcome in ['HOME', 'OOB']:
# for idx, row in plot_df.iterrows():
# if row['dataset'] == viz_dataset and row['idx'] in idxs_to_color[outcome]:
# outcome = row['outcome']
# ep_activity = log_analysis.get_activity(row['log'],
# is_recurrent,
# do_plot=False)
# traj_df = log_analysis.get_traj_df(row['log'],
# extended_metadata=False, squash_action=False, seed=model_seed)
# print('Coloring', row['idx'], outcome, row['dataset'])
# title = f"{outcome}_ep{row['idx']}"
# agent_analysis.visualize_episodes([row['log']],
# zoom=2,
# dataset=row['dataset'],
# animate=False,
# # title=title, # not supported
# colorby='regime'
# )
# plt.xlim(-1, 10.5)
# plt.show()
# fname = f"{outdir}/traj_regime_{title}.png"
# print("Saving:", fname)
# plt.savefig(fname, dpi=dpi_save, bbox_inches='tight')
# plot_common_subspace_individual_traj(plot_df, idxs_to_color)
# -
# ## Time to surge/time to cast analysis
#
# ### Extract centroids of [HOME] and [OOB] from actual trials
# +
trail_len = 25
h_outcomes = ['HOME', 'OOB']
hx_centroids = {}
for h_outcome in h_outcomes:
hx_trailing = [] # hidden activity around end of trial
# for log in plot_df.query("outcome == @h_outcome")['log']:
for log in selected_df.query("outcome == @h_outcome")['log']:
ep_activity = log_analysis.get_activity(log,
is_recurrent,
do_plot=False)
# print(h_outcome, ep_activity.shape[0])
if h_outcome == 'HOME':
hx_trailing.append( ep_activity[-trail_len:,:] )
if h_outcome == 'OOB' and ep_activity.shape[0] > 75: # Use longer (>3s) traj for OOB
hx_trailing.append( ep_activity[-trail_len:,:] )
hx_trailing = np.vstack(hx_trailing)
# hx_trailing.shape
# hx_centroids[h_outcome] = np.median(hx_trailing, axis=0).reshape((1, -1))
hx_centroids[h_outcome] = np.mean(hx_trailing, axis=0).reshape((1, -1))
dist_between_centroids = np.linalg.norm(hx_centroids[h_outcomes[0]] - hx_centroids[h_outcomes[1]])
dist_between_centroids
# -
# +
# # Plot extracted centroids
# n_background = 5
# n_color = 1
# plot_df = selected_df.groupby(['dataset', 'outcome']).tail(n_background)
# for h_outcome in h_outcomes:
# fig = plt.figure(figsize=pca3d_figsize)
# ax = fig.gca(projection='3d')
# for idx, row in plot_df.iterrows():
# outcome = row['outcome']
# ep_activity = log_analysis.get_activity(row['log'], is_recurrent, do_plot=False)
# traj_df = log_analysis.get_traj_df(row['log'],
# extended_metadata=False, squash_action=False, seed=model_seed)
# X_pca = pca_common.transform(ep_activity)
# ax.plot(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2], linewidth=2, c='grey', alpha=0.2)
# # plot the centroid
# X_pca = pca_common.transform(hx_centroids[h_outcome])
# ax.scatter(X_pca[0, 0], X_pca[0, 1], X_pca[0, 2],
# c=outcome_colormap[h_outcome], marker='x', linewidth=3.0, s=300, alpha=1.0)
# ax.set_xlabel(f'PC1 (VarExp: {pca_common.explained_variance_ratio_[0]:0.2f})')
# ax.set_ylabel(f'PC2 (VarExp: {pca_common.explained_variance_ratio_[1]:0.2f})')
# ax.set_zlabel(f'PC3 (VarExp: {pca_common.explained_variance_ratio_[2]:0.2f})')
# plt.title(f"Centroid: {h_outcome}")
# plt.tight_layout()
# plt.show()
# fname = f"{outdir}/centroids_{model_seed}_{h_outcome}.png"
# print("Saving:", fname)
# plt.savefig(fname, dpi=dpi_save, bbox_inches='tight')
# -
outcome_colormap.values()
# ### Color by which centroid closer to [2-centroid model]
# +
# # %config InlineBackend.figure_format = 'retina'
# mpl.rcParams['figure.dpi'] = 300
n_background = 5
n_color = 1
plot_df = selected_df.groupby(['dataset', 'outcome']).tail(n_background)
fig = plt.figure(figsize=pca3d_figsize)
# fig = plt.figure(figsize=(4,7))
ax = fig.gca(projection='3d')
# outcome_colormap['HOME'] = 'lightblue'
# outcome_colormap['HOME'] = 'b'
importlib.reload(config)
ttcs_colormap = config.ttcs_colormap
# Doesn't respect this!
# https://matplotlib.org/3.3.3/gallery/misc/zorder_demo.html
# # plot centroids
# for h_outcome in hx_centroids.keys():
# X_pca = pca_common.transform(hx_centroids[h_outcome])
# ax.scatter(X_pca[0, 0], X_pca[0, 1], X_pca[0, 2], alpha=1.0,
# c='black', marker='x', lw=3, s=300, zorder=10)
for idx, row in plot_df.iterrows():
outcome = row['outcome']
ep_activity = log_analysis.get_activity(row['log'], is_recurrent, do_plot=False)
traj_df = log_analysis.get_traj_df(row['log'],
extended_metadata=False, squash_action=False, seed=model_seed)
X_pca = pca_common.transform(ep_activity)
ax.plot(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2],
linewidth=0.6, c='grey', alpha=0.5, zorder=1)
# Add scatterplot colored by distance to centroids
colors = pd.Series(['grey']*X_pca.shape[0])
dist_home = np.linalg.norm(ep_activity - hx_centroids['HOME'], axis=1)
dist_oob = np.linalg.norm(ep_activity - hx_centroids['OOB'], axis=1)
# colors = pd.Series(outcome_colormap.values())[ dist_home < dist_oob ]
closer_to_home = [ dist_home[i] < dist_oob[i] for i in range(dist_home.shape[0]) ]
colors = [ ttcs_colormap['HOME'] if x else ttcs_colormap['OOB'] for x in closer_to_home ]
# for h_outcome in hx_centroids.keys():
# distances = np.linalg.norm(ep_activity - hx_centroids[h_outcome], axis=1)
# in_range_idxs = distances <= centroid_ranges[h_outcome]
# colors[ in_range_idxs ] = outcome_colormap[h_outcome]
ax.scatter(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2],
c=colors, alpha=0.3, s=6, zorder=1)
# plot centroids
for h_outcome in hx_centroids.keys():
X_pca = pca_common.transform(hx_centroids[h_outcome])
ax.scatter(X_pca[0, 0], X_pca[0, 1], X_pca[0, 2], alpha=1.0,
c='black', marker='o', lw=3, s=300, zorder=10)
# legend
handles, labels = plt.gca().get_legend_handles_labels()
patch1 = mpatches.Patch(color=ttcs_colormap['HOME'], label='HOME')
patch2 = mpatches.Patch(color=ttcs_colormap['OOB'], label='OOB')
handles.extend([patch1, patch2])
plt.legend(handles=handles,
loc='lower left',
framealpha=1.0,
labelspacing=0.07,
bbox_to_anchor=(0.02, 0.24),
)
ax.set_xlabel(f'PC1')
ax.set_ylabel(f'PC2')
ax.set_zlabel(f'PC3')
# ax.set_xlabel(f'PC1 (VarExp: {pca_common.explained_variance_ratio_[0]:0.2f})')
# ax.set_ylabel(f'PC2 (VarExp: {pca_common.explained_variance_ratio_[1]:0.2f})')
# ax.set_zlabel(f'PC3 (VarExp: {pca_common.explained_variance_ratio_[2]:0.2f})')
# if not batchmode:
# plt.title(f"{model_seed}")
plt.tight_layout()
plt.show()
fname = f"{outdir}/comsub_by_centroid_{model_seed}.png"
print("Saving:", fname)
plt.savefig(fname, dpi=dpi_save, bbox_inches='tight')
# -
# ## Floris Q: Time to cast vs time to surge
# ### Time to cast
# +
# Find episodes that end in casting (OOB)
# Find time from last_enc=0 to entry into OOB-centroid-range
# However, should have encountered plume at least once i.e. last_enc=0 for multiple rows
# for idx, row in plot_df.query("outcome == 'OOB' and dataset == 'constantx5b5' ").iterrows():
# outcome = row['outcome']
# ep_activity = log_analysis.get_activity(row['log'], is_recurrent, do_plot=False)
# traj_df = log_analysis.get_traj_df(row['log'],
# extended_metadata=False, squash_action=False, seed=model_seed)
# if sum(traj_df.odor_lastenc == 0) > 1: # encountered plume at least once
# print("Found")
# break
# +
# # centroid_ranges = {'HOME':dist_between_centroids/3, 'OOB':dist_between_centroids/3}
# centroid_ranges = {'HOME':dist_between_centroids/2, 'OOB':dist_between_centroids/2}
# home_distances = np.linalg.norm(ep_activity - hx_centroids['HOME'], axis=1)
# oob_distances = np.linalg.norm(ep_activity - hx_centroids['OOB'], axis=1)
# oob_distances
# # TODO fix -- Hack to get plot scales same
# max_val = np.max( [np.max(home_distances), np.max(oob_distances)] )
# max_val
# # np.max(home_distances)
# min_val = np.min( [np.min(home_distances), np.min(oob_distances)] )
# print(min_val, max_val)
# #
# # pd.Series(home_distances).describe(), pd.Series(oob_distances).describe()
# pd.Series(home_distances).hist(alpha=0.5) # Blue
# pd.Series(oob_distances).hist(alpha=0.5) # Orange
# plt.show()
# exit_idx = len(traj_df.odor_01) - pd.Series(traj_df.odor_01).iloc[::-1].argmax()
# print(exit_idx)
# pd.Series(home_distances).plot(figsize=(5, 1.5), label=r'd$_{HOME}$') # Blue
# pd.Series(oob_distances).plot(label=r'd$_{OOB}$') # Orange
# for x in ['HOME', 'OOB']:
# plt.axhline(y=centroid_ranges[x], c='grey', ls='--')
# plt.axvline(x=exit_idx, c='grey', ls=':')
# plt.legend()
# plt.show()
# pd.Series(traj_df.odor_lastenc).plot(figsize=(5, 1))
# plt.axvline(x=exit_idx, c='grey', ls=':')
# plt.show()
# pd.Series(traj_df.odor_01).plot(figsize=(5, 1))
# plt.axvline(x=exit_idx, c='grey', ls=':')
# plt.show()
# agent_analysis.visualize_episodes([row['log']],
# zoom=2,
# dataset=row['dataset'],
# animate=False,
# )
# plt.show()
# for x in ['HOME', 'OOB']:
# plt.axhline(y=centroid_ranges[x], c='grey', ls='--')
# plt.axvline(x=exit_idx, c='grey', ls=':')
# plt.legend()
# plt.show()
# pd.Series(traj_df.odor_lastenc).plot(figsize=(5, 1))
# plt.axvline(x=exit_idx, c='grey', ls=':')
# plt.show()
# pd.Series(traj_df.odor_01).plot(figsize=(5, 1))
# plt.axvline(x=exit_idx, c='grey', ls=':')
# plt.show()
# agent_analysis.visualize_episodes([row['log']],
# zoom=2,
# dataset=row['dataset'],
# animate=False,
# )
# plt.show()
# +
# pd.Series(traj_df.odor_01).plot(figsize=(5, 1))
# exit_idx = len(traj_df.odor_01) - pd.Series(traj_df.odor_01).iloc[::-1].argmax()
# print(exit_idx)
# plt.axvline(x=exit_idx, c='grey', ls=':')
# plt.show()
# +
# # Distances in PCA space? -- Basically the same
# ep_pca = pca_common.transform(ep_activity)
# hx_centroids_pca = {}
# for h_outcome in hx_centroids.keys():
# hx_centroids_pca[h_outcome] = pca_common.transform(hx_centroids[h_outcome])
# home_distances = np.linalg.norm(ep_pca - hx_centroids_pca['HOME'], axis=1)
# oob_distances = np.linalg.norm(ep_pca - hx_centroids_pca['OOB'], axis=1)
# # TODO fix -- Hack to get scales same
# max_val = np.max( [np.max(home_distances), np.max(oob_distances)] )
# min_val = np.min( [np.min(home_distances), np.min(oob_distances)] )
# print(min_val, max_val)
# pd.Series(home_distances).hist(alpha=0.5) # Blue
# pd.Series(oob_distances).hist(alpha=0.5) # Orange
# plt.show()
# pd.Series(home_distances).plot(figsize=(5, 1.5))
# pd.Series(oob_distances).plot() # Orange
# +
# # traj_df.loc[:, ['loc_x', 'loc_y']]
# # plt.plot(traj_df['loc_x'], traj_df['loc_y'])
# agent_analysis.visualize_episodes([row['log']],
# zoom=2,
# dataset=row['dataset'],
# animate=False,
# colorby=home_distances,
# vmax=max_val,
# )
# agent_analysis.visualize_episodes([row['log']],
# zoom=2,
# dataset=row['dataset'],
# animate=False,
# colorby=oob_distances,
# vmax=max_val,
# )
# -
# ### Time to surge
# +
# # Find episodes that end in HOME
# # Do a hist. of last_enc_max
# # If bimodal, choose episodes with longer duration mode;
# # else arbitrary threshold
# # Find trajectory snippets from last_enc.diff()<0, to 15(?) timesteps...
# # .... and look for entry into the HOME-centroid-range
# last_encs = []
# # for idx, row in selected_df.query("outcome == 'HOME' and dataset == 'constantx5b5' ").iterrows():
# for idx, row in plot_df.query("outcome == 'HOME' and dataset == 'constantx5b5' ").iterrows():
# outcome = row['outcome']
# ep_activity = log_analysis.get_activity(row['log'], is_recurrent, do_plot=False)
# traj_df = log_analysis.get_traj_df(row['log'],
# extended_metadata=False, squash_action=False, seed=model_seed)
# # last_encs.extend( traj_df.odor_lastenc.to_list() )
# last_encs.append( traj_df.odor_lastenc.max() )
# pd.Series(last_encs).hist() # Not bimodal for plot_df, but is for selected_df
# +
# last_encs = []
# # for idx, row in selected_df.query("outcome == 'HOME' and dataset == 'constantx5b5' ").iterrows():
# for idx, row in plot_df.query("outcome == 'HOME' and dataset == 'constantx5b5' ").iterrows():
# outcome = row['outcome']
# ep_activity = log_analysis.get_activity(row['log'], is_recurrent, do_plot=False)
# traj_df = log_analysis.get_traj_df(row['log'],
# extended_metadata=False, squash_action=False, seed=model_seed)
# if sum(traj_df.odor_lastenc >= 25) >= 1:
# print("Found", idx)
# break
# pd.Series(traj_df.odor_lastenc).plot(figsize=(5, 1)); plt.show()
# pd.Series(traj_df.odor_01).plot(figsize=(5, 1)); plt.show()
# +
# home_distances = np.linalg.norm(ep_activity - hx_centroids['HOME'], axis=1)
# oob_distances = np.linalg.norm(ep_activity - hx_centroids['OOB'], axis=1)
# oob_distances
# # TODO fix -- Hack to get scales same
# max_val = np.max( [np.max(home_distances), np.max(oob_distances)] )
# min_val = np.min( [np.min(home_distances), np.min(oob_distances)] )
# print(min_val, max_val)
# pd.Series(home_distances).hist(alpha=0.5) # Blue
# pd.Series(oob_distances).hist(alpha=0.5) # Orange
# plt.show()
# +
# entry_idx = traj_df.odor_lastenc.argmax()
# print("entry_idx", entry_idx)
# pd.Series(home_distances).plot(figsize=(5, 1.5), label='d[HOME]') # Blue
# pd.Series(oob_distances).plot(label=r'd$_{OOB}$') # Orange
# for x in ['HOME', 'OOB']:
# plt.axhline(y=centroid_ranges[x], c='grey', ls='--')
# plt.axvline(x=exit_idx, c='grey', ls=':')
# plt.legend()
# plt.show()
# # pd.Series(home_distances).plot(figsize=(5, 1.5))
# # pd.Series(oob_distances).plot() # Orange
# # for x in ['HOME', 'OOB']:
# # plt.axhline(y=centroid_ranges[x], c='grey', ls='--')
# # plt.axvline(x=entry_idx, c='grey', ls=':')
# # plt.show()
# pd.Series(traj_df.odor_lastenc).plot(figsize=(5, 1))
# plt.axvline(x=entry_idx, c='grey', ls=':')
# plt.show()
# pd.Series(traj_df.odor_01).plot(figsize=(5, 1))
# plt.axvline(x=entry_idx, c='grey', ls=':')
# plt.show()) # Orange
# for x in ['HOME', 'OOB']:
# plt.axhline(y=centroid_ranges[x], c='grey', ls='--')
# plt.axvline(x=exit_idx, c='grey', ls=':')
# plt.legend()
# plt.show()
# # pd.Series(home_distances).plot(figsize=(5, 1.5))
# # pd.Series(oob_distances).plot() # Orange
# # for x in ['HOME', 'OOB']:
# # plt.axhline(y=centroid_ranges[x], c='grey', ls='--')
# # plt.axvline(x=entry_idx, c='grey', ls=':')
# # plt.show()
# pd.Series(traj_df.odor_lastenc).plot(figsize=(5, 1))
# plt.axvline(x=entry_idx, c='grey', ls=':')
# plt.show()
# pd.Series(traj_df.odor_01).plot(figsize=(5, 1))
# plt.axvline(x=entry_idx, c='grey', ls=':')
# plt.show()
# +
# # home_distances[entry_idx-3:entry_idx+4]
# pd.Series(home_distances).diff().plot(figsize=(5, 1.5))
# pd.Series(oob_distances).diff().plot() # Orange
# +
# agent_analysis.visualize_episodes([row['log']],
# zoom=2,
# dataset=row['dataset'],
# animate=False,
# colorby=home_distances,
# vmax=max_val,
# )
# agent_analysis.visualize_episodes([row['log']],
# zoom=2,
# dataset=row['dataset'],
# animate=False,
# colorby=oob_distances,
# vmax=max_val,
# )
# -
# ### Scale out TTC and TTS
# Work with all episodes
use_datasets_all = ['constantx5b5', 'switch45x5b5', 'noisy3x5b5']
all_df = log_analysis.get_selected_df(model_dir,
use_datasets_all,
n_episodes_home=240,
n_episodes_other=240,
balanced=False,
oob_only=False,
min_ep_steps=0)
# all_df.groupby(['dataset','outcome']).count()
all_df.groupby(['dataset']).count()
# +
# Common
centroid_ranges = {'HOME':dist_between_centroids/2, 'OOB':dist_between_centroids/2}
datasets = ['constantx5b5', 'switch45x5b5', 'noisy3x5b5']
# datasets = ['constantx5b5']
# datasets = ['constantx5b5_0.4']
# datasets = use_datasets
to_plot = []
# to_plot = ['distances']
# to_plot = ['trajectories']
# to_plot = ['distances', 'trajectories']
print(datasets)
print(to_plot)
# +
# Scale out time-to-cast
ttc_df = [] # time to cast
snippet_window = 10 # steps
# 1. Select eps that end in OOB
for idx, row in all_df.query("outcome != 'HOME' and dataset in @datasets ").iterrows():
outcome = row['outcome']
dataset = row['dataset']
ep_activity = log_analysis.get_activity(row['log'], is_recurrent, do_plot=False)
traj_df = log_analysis.get_traj_df(row['log'],
extended_metadata=True, squash_action=True, seed=model_seed)
# 2: ep. encountered plume at least X timesteps before RECOVER/LOST
min_steps_on_plume = 2 # 1 step is trivially possible because odor_lastenc[0]==0 :(
steps_on_plume = sum(traj_df.odor_lastenc == 0)
if steps_on_plume < min_steps_on_plume:
print(f"{idx}: steps_on_plume {steps_on_plume} < {min_steps_on_plume}")
continue
# 3: Get (last) exit_idx
exit_length = pd.Series(traj_df.odor_01).iloc[::-1].argmax()
ep_length = len(traj_df.odor_01)
ep_len = traj_df.shape[0]
exit_idx = ep_length - exit_length
print(ep_length, exit_idx, exit_length, steps_on_plume)
if exit_length < 25: # exclude episodes where it exits too close to end of episode
print(f"{idx}: exit_lenght {exit_length}")
# 4: Get idx of when gets into OOB range
home_distances = np.linalg.norm(ep_activity - hx_centroids['HOME'], axis=1)
oob_distances = np.linalg.norm(ep_activity - hx_centroids['OOB'], axis=1)
oob_idx = pd.Series(oob_distances > centroid_ranges['OOB'])
oob_idx[:exit_idx] = True
oob_idx = oob_idx.argmin()
ttc = oob_idx - exit_idx
if ttc <= 0: # automation failure -- outlier cases where episodes ends before entering OOB
continue
if 'distances' in to_plot:
pd.Series(home_distances).plot(figsize=(5, 1.5), label=r'd$_{HOME}$') # Blue
pd.Series(oob_distances).plot(label=r'd$_{OOB}$') # Orange
for x in ['HOME', 'OOB']:
plt.axhline(y=centroid_ranges[x], c='grey', ls='--')
plt.axvline(x=exit_idx, c='black', ls=':')
plt.axvline(x=oob_idx, c='orange', ls='--')
plt.title(row['idx'])
plt.legend()
plt.show()
if 'trajectories' in to_plot:
agent_analysis.visualize_episodes([row['log']],
zoom=2,
dataset=row['dataset'],
animate=False,
)
plt.show()
ttc_df.append({
'idx': row['idx'],
'ep_len': ep_len,
'outcome': outcome,
'dataset': dataset,
'exit_idx': exit_idx,
'entry_idx': None,
'oob_idx': oob_idx,
'home_idx': None,
'ttc': ttc,
'tts': None,
'range_oob': centroid_ranges['OOB'],
'range_home': centroid_ranges['HOME'],
# 'log': row['log'],
'entered_search_mode': True, # for all OOB
'speed_snippet': traj_df['r_step'][exit_idx-snippet_window:exit_idx+snippet_window+1].to_list(),
'traj_snippet': traj_df.iloc[exit_idx-snippet_window:exit_idx+snippet_window+1,:],
'home_distances': home_distances,
'oob_distances': oob_distances,
})
ttc_df = pd.DataFrame(ttc_df)
print(ttc_df.shape)
ttc_df.head() # Orange
# +
# Scale out time-to-home calculation
tts_df = [] # time to surge
outcome = 'HOME'
last_encs = []
# 1. Only ep that end in HOME
for idx, row in all_df.query("outcome == @outcome and dataset in @datasets ").iterrows():
outcome = row['outcome']
dataset = row['dataset']
ep_activity = log_analysis.get_activity(row['log'], is_recurrent, do_plot=False)
traj_df = log_analysis.get_traj_df(row['log'],
extended_metadata=True, squash_action=True, seed=model_seed)
# 2. Only ep. where agent enters RECOVER regime
min_exit_duration = 13
if sum(traj_df.odor_lastenc >= min_exit_duration) ==0:
break
# 3. entry_idx for first instance in ep. where enters from RECOVER (can also be in LOST)
ep_len = traj_df.shape[0]
for entry_idx in reversed(range(ep_len)):
if traj_df.odor_lastenc[entry_idx] >= min_exit_duration:
break
# 4. Locate home_idx i.e. idx when in range of HOME centroid
home_distances = np.linalg.norm(ep_activity - hx_centroids['HOME'], axis=1)
oob_distances = np.linalg.norm(ep_activity - hx_centroids['OOB'], axis=1)
home_idx = pd.Series(home_distances > centroid_ranges[outcome])
home_idx[:entry_idx] = True # ignore till entry_idx
home_idx = home_idx.argmin()
tts = home_idx - entry_idx
if tts < 0:
continue # exclude -- ep. identification failure
# 5. Flag for if this ep was in within OOB range before entry_idx
entered_search_mode = pd.Series(oob_distances < centroid_ranges['OOB'])[:entry_idx].any()
if 'distances' in to_plot:
pd.Series(home_distances).plot(figsize=(5, 1.5), label='d[HOME]') # Blue
pd.Series(oob_distances).plot(label=r'd$_{OOB}$') # Orange
for x in ['HOME', 'OOB']:
plt.axhline(y=centroid_ranges[x], c='grey', ls='--')
plt.axvline(x=entry_idx, c='grey', ls=':')
plt.axvline(x=home_idx, c='b', ls='--')
plt.title(row['idx'])
plt.legend()
plt.show()
if 'trajectories' in to_plot:
agent_analysis.visualize_episodes([row['log']],
zoom=2,
dataset=row['dataset'],
animate=False,
)
plt.show()
tts_df.append({
'idx': row['idx'],
'ep_len': ep_len,
'outcome': outcome,
'dataset': dataset,
'exit_idx': None,
'entry_idx': entry_idx,
'oob_idx': None,
'home_idx': home_idx,
'ttc': None,
'tts': tts,
'range_oob': centroid_ranges['OOB'],
'range_home': centroid_ranges['HOME'],
'entered_search_mode': entered_search_mode,
'traj_snippet': traj_df.iloc[entry_idx-snippet_window:entry_idx+snippet_window+1,:],
'speed_snippet': traj_df['r_step'][entry_idx-snippet_window:entry_idx+snippet_window+1].to_list(),
# 'log': row['log'],
'home_distances': home_distances,
'oob_distances': oob_distances,
})
tts_df = pd.DataFrame(tts_df)
print(tts_df.shape)
tts_df.head()
# +
# Histograms
# plt.figure(figsize=(5,3))
# tts_df['tts'][~tts_df.entered_search_mode].hist(label='tts_nosearch', alpha=0.5)
# tts_df['tts'][tts_df.entered_search_mode].hist(label='tts_search', alpha=0.5)
# ttc_df['ttc'].hist(label='ttc', alpha=0.5)
# plt.legend()
# plt.title(f'Time to cast/surge [Seed:{model_seed}]')
# plt.tight_layout()
# fname = f"{outdir}/ttcs_hist_{model_seed}.png"
# print("Saving:", fname)
# plt.savefig(fname, dpi=dpi_save, bbox_inches='tight')
# plt.show()
# -
# Swarmplot
ttc_df['label'] = 'TTL'
ttc_df['timesteps'] = ttc_df['ttc']
tts_df['label'] = 'TTT-L'
tts_df['timesteps'] = tts_df['tts']
tts_df['label'][~tts_df.entered_search_mode] = 'TTT-NL'
ttcs_df = pd.concat([tts_df, ttc_df])
ttcs_df.to_csv(f"{outdir}/ttcs_swarm_{model_seed}.csv")
ttcs_df.to_pickle(f"{outdir}/ttcs_swarm_{model_seed}.pkl")
# +
# %config InlineBackend.figure_format = 'retina'
mpl.rcParams['figure.dpi'] = 300
figsz = (3.5,2.2)
# figsz = (3,2)
# Original w/o statannot
# plt.figure(figsize=figsz)
# ax = sns.boxplot(x="label", y="timesteps", data=ttcs_df, palette="Set1")
# # plt.title(f"{datasets} [Seed:{model_seed}]")
# if not batchmode:
# plt.title(f"[Seed:{model_seed}]")
# plt.tight_layout()
# plt.ylabel("Duration [frames]")
# plt.xlabel(None)
# plt.savefig(f"{outdir}/ttcs_box_{model_seed}.png", dpi=dpi_save, bbox_inches='tight')
# plt.show()
sns.color_palette()
# sns.color_palette("pastel")
# sns.set_palette("pastel")
from statannot import add_stat_annotation
plt.figure(figsize=figsz)
order = ["TTT-NL", "TTT-L", "TTL"]
ax = sns.boxplot(x="label",
y="timesteps",
data=ttcs_df,
order=order,
palette="Set1",
boxprops=dict(alpha=.7),
)
# plt.title(f"{datasets} [Seed:{model_seed}]")
test_results = add_stat_annotation(ax, data=ttcs_df,
x="label",
y="timesteps",
order=order,
box_pairs=[
("TTT-L", "TTL"),
("TTT-NL", "TTL"),
("TTT-NL", "TTT-L"),],
test='Mann-Whitney', text_format='star',
loc='inside', verbose=2)
# if not batchmode:
# plt.title(f"[Seed:{model_seed}]")
# plt.tight_layout()
plt.ylabel("Duration [frames]")
plt.xlabel(None)
plt.savefig(f"{outdir}/ttcs_box_{model_seed}.png", dpi=dpi_save, bbox_inches='tight')
plt.show()
#Swarm
plt.figure(figsize=figsz)
ax = sns.swarmplot(x="label", y="timesteps", data=ttcs_df, order=order, color=".25")
# plt.title(f"{datasets} [Seed:{model_seed}]")
plt.xlabel(None)
# if not batchmode:
# plt.title(f"[Seed:{model_seed}]")
plt.ylabel("Duration [frames]")
# plt.tight_layout()
plt.savefig(f"{outdir}/ttcs_swarm_{model_seed}.png", dpi=dpi_save, bbox_inches='tight')
plt.show()
# -
|
code/ppo/report_ttcs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 3.2 线性回归的从零开始实现
# +
# %matplotlib inline
import torch
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import random
print(torch.__version__)
# -
# ## 3.2.1 生成数据集
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.randn(num_examples, num_inputs,
dtype=torch.float32)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()),
dtype=torch.float32)
for i in range(num_examples):
print(features[i], labels[i])
# +
def use_svg_display():
# 用矢量图显示
display.set_matplotlib_formats('svg')
def set_figsize(figsize=(3.5, 2.5)):
use_svg_display()
# 设置图的尺寸
plt.rcParams['figure.figsize'] = figsize
# # 在../d2lzh_pytorch里面添加上面两个函数后就可以这样导入
# import sys
# sys.path.append("..")
# from d2lzh_pytorch import *
set_figsize()
plt.scatter(features[:, 1].numpy(), labels.numpy(), 1);
# -
# ## 3.2.2 读取数据
# 本函数已保存在d2lzh包中方便以后使用
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices) # 样本的读取顺序是随机的
for i in range(0, num_examples, batch_size):
j = torch.LongTensor(indices[i: min(i + batch_size, num_examples)]) # 最后一次可能不足一个batch
yield features.index_select(0, j), labels.index_select(0, j)
# +
batch_size = 10
for X, y in data_iter(batch_size, features, labels):
print(X, '\n', y)
break
# -
# ## 3.2.3 初始化模型参数
w = torch.tensor(np.random.normal(0, 0.01, (num_inputs, 1)), dtype=torch.float32)
b = torch.zeros(1, dtype=torch.float32)
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
# ## 3.2.4 定义模型
def linreg(X, w, b): # 本函数已保存在d2lzh包中方便以后使用
return torch.mm(X, w) + b
# ## 3.2.5 定义损失函数
def squared_loss(y_hat, y): # 本函数已保存在pytorch_d2lzh包中方便以后使用
return (y_hat - y.view(y_hat.size())) ** 2 / 2
# ## 3.2.6 定义优化算法
def sgd(params, lr, batch_size): # 本函数已保存在d2lzh_pytorch包中方便以后使用
for param in params:
param.data -= lr * param.grad / batch_size # 注意这里更改param时用的param.data
# ## 3.2.7 训练模型
# +
lr = 0.03
num_epochs = 10
net = linreg
loss = squared_loss
for epoch in range(num_epochs): # 训练模型一共需要num_epochs个迭代周期
# 在每一个迭代周期中,会使用训练数据集中所有样本一次(假设样本数能够被批量大小整除)。X
# 和y分别是小批量样本的特征和标签
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y).sum() # l是有关小批量X和y的损失
l.backward() # 小批量的损失对模型参数求梯度
sgd([w, b], lr, batch_size) # 使用小批量随机梯度下降迭代模型参数
# 不要忘了梯度清零
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features, w, b), labels)
print('epoch %d, loss %f' % (epoch + 1, train_l.mean().item()))
# -
print(true_w, '\n', w)
print(true_b, '\n', b)
|
code/chapter03_DL-basics/3.2_linear-regression-scratch.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
import experiment_util as exper
# +
datafolder = '/home/eliezer/Dropbox/repo-phd/poissoncpp/datasets/hetrec2011/lastfm/'
userfriends = np.loadtxt(datafolder+"user_friends.dat", delimiter='\t',skiprows=1,dtype=int)
userartists = np.loadtxt(datafolder+"user_artists.dat", delimiter='\t',skiprows=1,dtype=int)
ua_tag = np.loadtxt(datafolder+"user_taggedartists.dat", delimiter='\t',skiprows=1,dtype=int)
artists_id = np.unique(userartists[:,1])
users_id = np.unique(userartists[:,0])
max_rating_users=np.array([np.max(userartists[userartists[:,0]==x,2]) for x in users_id])
# -
artists_inv_id=dict(zip(artists_id,xrange(len(artists_id))))
users_inv_id=dict(zip(users_id,xrange(len(users_id))))
# +
### gera a lista de amigos usando o indice do array e nao o id que veio do sistema
list_friends_id= [ [users_inv_id[friend_id] for friend_id in userfriends[userfriends[:,0]==u_id][:,1]]\
for u_id in users_id]
### pegar o indice de todas as tags de um artista
artists_tags_id= [ ua_tag[ua_tag[:,1]==a_id][:,2] for a_id in artists_id ]
# -
temp=np.apply_along_axis( lambda x:[users_inv_id[x[0]],artists_inv_id[x[1]],int(np.ceil(np.log(x[2])))]\
, axis=1, arr=userartists)
datafolder = datafolder+'/home/eliezer/datasets/hetrec2011/lastfm/transformed/'
new_userartists_log = np.apply_along_axis( lambda x:[users_inv_id[x[0]],artists_inv_id[x[1]]\
,int(np.ceil(np.log(x[2])))], axis=1, arr=userartists)
np.savetxt(datafolder+"v1_log_user_artists.dat",new_userartists_log,delimiter='\t')
datafolder = '/home/eliezer/datasets/hetrec2011/lastfm/transformed/'
new_list_friends_id=np.array([ (i, item_i) for i, item in enumerate(list_friends_id) for item_i in item])
np.savetxt(datafolder+"v1_user_friends.dat",new_list_friends_id,delimiter='\t',fmt='%i')
datafolder = '/home/eliezer/datasets/hetrec2011/lastfm/'
from load_convert import LoadModify
loader=LoadModify(datafolder)
loader.load()
R = loader.mat_users_artists_train.T
W = loader.mat_artists_tags
S = loader.list_friends_id
datafolder = '/home/eliezer/datasets/hetrec2011/lastfm/transformed/'
W = np.array(loader.mat_artists_tags,dtype=int)
print W.shape
W[0]
loader.list_test[0]
loader.list_train[0]
print loader.list_friends_id[0]
print list_friends_id[0]
# +
#import pickle as pk
#pk.dump(loader, open( loader.rootfolder+"loader.pk", "wb" ) )
# -
import load_convert
reload(load_convert)
# +
datafolder = '/home/eliezer/datasets/hetrec2011/lastfm/transformed/'
np.savetxt(datafolder+"user_artist_rating.train",loader.list_train,fmt='%i',delimiter='\t')
np.savetxt(datafolder+"user_artist_rating.test",loader.list_test,fmt='%i',delimiter='\t')
new_list_friends_id=np.array([ (i, item_i) for i, item in enumerate(loader.list_friends_id) for item_i in item])
np.savetxt(datafolder+"v1_user_friends.dat",new_list_friends_id,delimiter='\t',fmt='%i')
W = np.array(loader.mat_artists_tags,dtype=int)
np.savetxt(datafolder+"tag_artist_count.dat",W,fmt='%i',delimiter='\t')
# keep a list with tags and index
tags_id_name=np.array(zip([loader.tags_inv_id[x] for x in loader.tags_id],loader.generate_artists_tags(loader.tags_id)))
pd.DataFrame(tags_id_name[:,1],index=tags_id_name[:,0],columns=['tag'])\
.to_csv(datafolder+"tag_id_name.dat",sep='\t',header=False)
# -
# !pwd
# +
#np.savetxt(datafolder+"tag_id_name.dat",tags_id_name,delimiter='\t')
# -
datafolder = '/home/eliezer/Dropbox/repo-phd/poissoncpp/datasets/hetrec2011/lastfm/'
from load_convert import LoadModify
loader=LoadModify(datafolder)
loader.load_save(0.8)
#tag_id tag_name {tag_id_name.dat}
# art_id tag_id tag_count {tag_artist_count.dat}
# user_id art_id user_art_count {user_artist_rating.test user_artist_rating.train}
# user_id friend_id {v1_user_friends.dat}
loader.load_save(0.80)
#tag_id tag_name {tag_id_name.dat}
# art_id tag_id tag_count {tag_artist_count.dat}
# user_id art_id user_art_count {user_artist_rating.test user_artist_rating.train}
# user_id friend_id {v1_user_friends.dat}
userfriends = np.loadtxt("/home/eliezer/datasets/hetrec2011/lastfm/p85_train_test_9208/user_artist_rating.test", delimiter='\t',dtype=int)
userfriends[11685]
userfriends[:,0]
datafolder = '/home/eliezer/Dropbox/repo-phd/poissoncpp/datasets/hetrec2011/lastfm/'
from load_convert import LoadModify
loader=LoadModify(datafolder)
loader.load_save(0.85)
for x in range(30):
datafolder = '/home/eliezer/Dropbox/repo-phd/poissoncpp/datasets/hetrec2011/lastfm/'
loader=LoadModify(datafolder)
loader.load_save(0.85)
for x in range(30):
datafolder = '/home/eliezer/Dropbox/repo-phd/poissoncpp/datasets/hetrec2011/lastfm/'
loader=LoadModify(datafolder)
loader.load_save(0.90)
import numpy as np
for x in range(10):
print "p"+str(int(0.8*100))+"_train_test_"+str(np.random.randint(10000))+'/'
# !ls /home/eliezer/Dropbox/repo-phd/poissoncpp/datasets/hetrec2011/lastfm
|
scripts/load_convert.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %reset
import sys, platform, os
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pdb
from astropy.io import fits
from scipy import interpolate
import astropy.units as u
import pickle as pk
from astropy import constants as const
import notebook_calc_3d_to_2d_new as nc
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=100, Om0=0.25, Tcmb0=2.725, Ob0=0.0448)
h = 0.7
oneMpc_h = (((10 ** 6) / h) * (u.pc).to(u.m))
import copy
#see http://ipython.readthedocs.io/en/stable/config/extensions/autoreload.html
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# +
# Cosmology functions
# get comoving distance
def get_Dcom_array(zarray, Omega_m):
Omega_L = 1. - Omega_m
c = 3 * 10 ** 5
Dcom_array = np.zeros(len(zarray))
for j in xrange(len(zarray)):
zf = zarray[j]
res1 = sp.integrate.quad(lambda z: (c / 100) * (1 / (np.sqrt(Omega_L + Omega_m * ((1 + z) ** 3)))), 0, zf)
Dcom = res1[0]
Dcom_array[j] = Dcom
return Dcom_array
# get 100 times dimensionless hubble constant as a function of redshift
def get_Hz(zarray, Omega_m):
Omega_L = 1 - Omega_m
Ez = np.sqrt(Omega_m * (1 + zarray) ** 3 + Omega_L)
Hz = 100. * Ez
return Hz
def get_zmean(zcent, nz_bin):
prob_zcent = nz_bin
delz = zcent[1] - zcent[0]
zmean = (np.sum(prob_zcent * zcent * delz)) / (np.sum(prob_zcent * delz))
return zmean
# +
# wtheta and gammat functions:
# coeff_sigcrit = ((const.c ** 2)/(4 * np.pi * const.G * (1.0 * u.Mpc))).to(u.kg/u.m**2)).value
# # get critical surface density (eq.9)
# def get_sigcrit_zl_zs(zl,zs, Om, Dcom_interp):
# chi_l = Dcom_interp(zl)
# chi_s = Dcom_interp(zs)
# DA_l = (chi_l/(1. + zl))
# DA_s = (chi_s / (1. + zs))
# DA_ls = ( (chi_s- chi_l) / (1. + zs))
# sigcrit = ((coeff_sigcrit * (DA_s)/(DA_l * DA_ls))
# return sigcrit
# get projected correlation function (eq.2)
def get_wprp_from_xi(rp,r_array,xi_array):
num = r_array * xi_array
denom = np.sqrt(r_array**2 - rp**2)
toint = num/denom
val = 2.*sp.integrate.simps(toint,r_array)
return val
# # get w(theta) (eq 3)
# def get_wtheta_from_xi(theta, r_array,xi_array, z_array, ng, chi_array, dchi_dz):
# rp_array = chi_array*theta
# wprp_array = np.zeros(len(rp_array))
# for j in range(len(rp_array)):
# rp = rp_array[j]
# ind_inp = np.where(r_array > rp)[0]
# r_inp = r_array[ind_inp]
# xi_inp = xi_array[j,ind_inp]
# wprp_array[j] = get_wprp_from_xi(rp, r_inp, xi_inp)
# toint = ng**2 * wprp_array / dchi_dz
# val = sp.integrate.simps(toint,z_array)
# return val
# # get gammat for a single lens and source redshift (Eq.12)
# def get_Delta_wp(rp, r_array,xi_array, r_array_full,xi_array_full):
# rp_ti = np.logspace(-2,np.log10(rp),50)
# wprp_ti = np.zeros(len(rp_ti))
# for j in range(len(rp_ti)):
# rp_ti_h = rp_ti[j]
# ind_inp = np.where(r_array_full > rp_ti_h)[0]
# r_inp = r_array_full[ind_inp]
# xi_inp = xi_array_full[j,ind_inp]
# wprp_ti[j] = get_wprp_from_xi(rp_ti_h,r_inp, xi_inp)
# # wp_int = (2./ (rp**2) )*sp.integrate.simps(rp_ti * wprp_ti, rp_ti)
# wp_int = sp.integrate.simps(rp_ti * wprp_ti, rp_ti)/sp.integrate.simps(rp_ti , rp_ti)
# wp_val = get_wprp_from_xi(rp,r_array,xi_array)
# Delta_wp = wp_int - wp_val
# return Delta_wp
# get w(theta) (eq 3)
def get_wtheta_from_xi(theta, r_array,xi_mat, z_array, ng, chi_array, dchi_dz):
rp_array = chi_array*theta
r_mat = np.tile(r_array.reshape(1,len(r_array)),(len(z_array),1))
rp_mat = np.tile(rp_array.reshape(len(z_array),1),(1,len(r_array)))
invdenom1 = 1./(r_mat**2 - rp_mat**2)
ind = np.where(invdenom1 <= 0)
invdenom1[ind] = 0.0
integrand = r_mat * xi_mat*(np.sqrt(invdenom1))
wprp_array = 2.*sp.integrate.simps(integrand,r_array)
toint = ng**2 * wprp_array / dchi_dz
val = sp.integrate.simps(toint,z_array)
return val
def get_wtheta_from_Pk(theta,k, Pk,z_array,ng, chi_array, dchi_dz):
rp_array = chi_array*theta
k_mat = np.tile(k.reshape(1,len(k)),(len(z_array),1))
wprp_array = np.zeros(len(rp_array))
rp_mat = np.tile(rp_array.reshape(len(z_array),1),(1,len(k)))
J0_mat = sp.special.jv(0, k_mat * rp_mat)
wprp_array = (sp.integrate.simps(k_mat * Pk * J0_mat, k)) / (2 * np.pi)
toint = ng**2 * wprp_array / dchi_dz
val = sp.integrate.simps(toint,z_array)
return val
# # (1/2*pi)*int dy y J_2(y*rp) P_gm(y) = Delta w_p(r_p)
# def get_Deltawp_from_Pk(rp_array,k, Pk):
# k_mat = np.tile(k.reshape(1,len(k)),(len(rp_array),1))
# rp_mat = np.tile(rp_array.reshape(len(rp_array),1),(1,len(k)))
# J2_mat = sp.special.jv(2, k_mat * rp_mat)
# Deltawprp_array = (sp.integrate.simps(k_mat * Pk * J2_mat, k)) / (2 * np.pi)
# return Deltawprp_array
# def get_Delta_wp(rp_array, r_array,xi_mat, z_array):
# r_mat = np.tile(r_array.reshape(1,len(r_array)),(len(z_array),1))
# rp_mat = np.tile(rp_array.reshape(len(z_array),1),(1,len(r_array)))
# invdenom1 = 1./(r_mat**2 - rp_mat**2)
# ind = np.where(invdenom1 <= 0)
# invdenom1[ind] = 0.0
# integrand = r_mat * xi_mat*(np.sqrt(invdenom1))
# wprp_array = 2.*sp.integrate.simps(integrand,r_array)
# wprp_interp = interpolate.interp1d(rp_array,np.log(wprp_array),fill_value='extrapolate')
# wprp_mean = np.zeros(len(rp_array))
# for j in range(len(rp_array)):
# rp_ti = np.logspace(-3,np.log10(rp_array[j]),5000)
# wprp_ti = np.exp(wprp_interp(rp_ti))
# wprp_mean[j] = sp.integrate.simps(rp_ti * wprp_ti, rp_ti)/sp.integrate.simps(rp_ti , rp_ti)
# Delta_wp = wprp_mean - wprp_array
# return Delta_wp
# +
# setup cosmological calculations
z_array = nc.z_array
chi_array = get_Dcom_array(z_array, cosmo.Om0)
DA_array = chi_array / (1. + z_array)
dchi_dz_array = (const.c.to(u.km / u.s)).value / (get_Hz(z_array, cosmo.Om0))
# rhom_z = cosmo.Om0 * ((1 + z_array)**3) * (cosmo.critical_density0.to(u.kg/u.m**3)).value
bin_lens = nc.bins_to_fit[0]
bin_source = nc.bin_source
# +
# get n(z) of sources and lenses
df = fits.open('twopt_3d_to_2d_MICE.fits')
df_zmid = df['nz_pos_zspec'].data['Z_MID']
df_bin = df['nz_pos_zspec'].data['BIN'+ str(bin_lens)]
# ng_lensz, nm_lensz,z_lensz = nc.get_nz_lens()
ng_lensz,z_lensz = df_bin, df_zmid
z_lensz_pz, ng_lensz_pz = nc.get_nz_lens_2pt_pz()
z_lensz_specz, ng_lensz_specz = nc.get_nz_lens_2pt_specz()
df_zmid_s = df['nz_shear_true'].data['Z_MID']
df_bin_s = df['nz_shear_true'].data['BIN'+ str(bin_source)]
z_sourcez, ng_sourcez = df_zmid_s, df_bin_s
ng_interp = interpolate.interp1d(z_lensz, np.log(ng_lensz + 1e-40), fill_value='extrapolate')
ng_array_lens = np.exp(ng_interp(z_array))
ng_interp = interpolate.interp1d(z_lensz_pz, np.log(ng_lensz_pz + 1e-40), fill_value='extrapolate')
ng_array_lens_pz = np.exp(ng_interp(z_array))
ng_interp = interpolate.interp1d(z_lensz_pz, np.log(ng_lensz_specz + 1e-40), fill_value='extrapolate')
ng_array_lens_specz = np.exp(ng_interp(z_array))
# nm_interp = interpolate.interp1d(z_lensz, np.log(nm_lensz + 1e-40), fill_value='extrapolate')
# nm_array_lens = np.exp(nm_interp(z_array))
ng_interp = interpolate.interp1d(z_sourcez, np.log(ng_sourcez + 1e-40), fill_value='extrapolate')
ng_array_source = np.exp(ng_interp(z_array))
zmean_bin = get_zmean(z_array, ng_array_lens)
zmean_ind = np.where(z_array > zmean_bin)[0][0]
print zmean_bin, z_array[zmean_ind]
# +
fig, ax_all = plt.subplots(1,1, figsize = (8,6))
ax = ax_all
ax.plot(z_array,ng_array_lens, color = 'r',lw=2, marker='',linestyle='-',label=r'Lenses 3D')
ax.plot(z_array,ng_array_lens_pz, color = 'orange',lw=2, marker='',linestyle='-',label=r'Lenses Photoz')
# ax.plot(z_array,ng_array_lens_specz, color = 'k',lw=2, marker='',linestyle='-',label=r'Lenses Specz')
ax.plot(z_array,ng_array_source, color = 'b',lw=2, marker='',linestyle='-',label=r'Sources')
ax.set_ylabel(r'n(z)', size = 22)
ax.set_xlabel(r'z', size = 22)
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
ax.legend(fontsize=20)
plt.tight_layout()
# plt.savefig('nz_comp.png')
# +
fig, ax_all = plt.subplots(1,1, figsize = (8,6))
ax = ax_all
ax.plot(nc.r_array,nc.xi_gg[0,:], color = 'r',lw=2, marker='',linestyle='-',label=r'Lenses 3D')
# ax.plot(z_array,ng_array_lens_pz, color = 'orange',lw=2, marker='',linestyle='-',label=r'Lenses Photoz')
# # ax.plot(z_array,ng_array_lens_specz, color = 'k',lw=2, marker='',linestyle='-',label=r'Lenses Specz')
# ax.plot(z_array,ng_array_source, color = 'b',lw=2, marker='',linestyle='-',label=r'Sources')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel(r'n(z)', size = 22)
ax.set_xlabel(r'z', size = 22)
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
ax.legend(fontsize=20)
plt.tight_layout()
# plt.savefig('nz_comp.png')
# +
# Calculate w(theta)
theta_arcmin = np.logspace(np.log10(2.5),np.log10(250),20)
theta_rad = theta_arcmin * (1./60.) * (np.pi/180.)
# r_array_hres = np.logspace(np.log10(np.min(nc.r_array)),3,5000)
r_array_hres = np.logspace(-2.8,3.0,2000)
xi_hres_th = np.zeros((len(z_array),len(r_array_hres)))
xi_hres_data = np.zeros((len(z_array),len(r_array_hres)))
xi_data_interp = interpolate.interp1d(np.log10(nc.r_obs_new[0]), np.log10((nc.data_obs_new[0:20])), fill_value='extrapolate')
xi_data_nc = 10**(xi_data_interp(np.log10(nc.r_array)))
xi_data = (np.tile( (xi_data_nc), (len(z_array),1)))*nc.xi_mm
for j in range(len(z_array)):
xi_interp = interpolate.interp1d(np.log10(nc.r_array), (nc.xi_gg[j,:]), fill_value='extrapolate')
xi_hres_th[j,:] = (xi_interp(np.log10(r_array_hres)))
xi_interp = interpolate.interp1d(np.log10(nc.r_array), (xi_data[j,:]), fill_value='extrapolate')
xi_hres_data[j,:] = (xi_interp(np.log10(r_array_hres)))
wtheta_th = np.zeros(len(theta_rad)) #bestfit theory w(theta)
# wtheta_th_pz = np.zeros(len(theta_rad)) #bestfit theory w(theta)
wtheta_data = np.zeros(len(theta_rad)) #data w(theta)
wtheta_th_pk = np.zeros(len(theta_rad))
for j in range(len(theta_rad)):
print j
wtheta_th[j] = get_wtheta_from_xi(theta_rad[j], r_array_hres, xi_hres_th, z_array, ng_array_lens, chi_array, dchi_dz_array)
# wtheta_th_pz[j] = get_wtheta_from_xi(theta_rad[j], r_array_hres, xi_hres_th, z_array, ng_array_lens_pz, chi_array, dchi_dz_array)
wtheta_data[j] = get_wtheta_from_xi(theta_rad[j], r_array_hres, xi_hres_data, z_array, ng_array_lens, chi_array, dchi_dz_array)
# wtheta_th_pk[j] = get_wtheta_from_Pk(theta_rad[j], nc.k_hres, nc.Pk_gg, z_array, ng_array_lens, chi_array, dchi_dz_array)
# +
# # Calculate w(theta) by powerspec directly:
# def get_wtheta_from_Pk_limber(theta_rad, k_array, Pk_array, z_array, ng_array, chi_array, dchi_dz_array):
# j0_integral = np.zeros(len(chi_array))
# for j in range(len(chi_array)):
# chi = chi_array[j]
# j0_integrand = k_array * Pk_array * sp.special.jv(0, k_array * (chi*theta_rad))
# j0_integral[j] = (1./(2.*np.pi)) * sp.integrate.simps(j0_integrand,k_array)
# z_integral = (ng_array ** 2) * j0_integral / dchi_dz_array
# wt_val = sp.integrate.simps(z_integral, z_array)
# return wt_val
# wtheta_th_lim = np.zeros(len(theta_rad))
# for j in range(len(theta_rad)):
# wtheta_th_lim[j] = get_wtheta_from_Pk_limber(theta_rad[j], nc.k_hres, nc.Pk_gg, z_array, ng_array_lens, chi_array, dchi_dz_array)
# +
# wt_mice_des = np.loadtxt('/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/wt_mice_des_y1_3_3.txt', delimiter=',')
# theta_mice_des, theta_wt_mice_des = wt_mice_des[:,0],wt_mice_des[:,1]
# wt_des = np.loadtxt('/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/wt_des_y1_3_3.txt', delimiter=',')
# theta_des, theta_wt_des = wt_des[:,0],wt_des[:,1]
# +
# fig, ax_all = plt.subplots(1,1, figsize = (8,6))
# ax = ax_all
# ax.plot(theta_arcmin,theta_arcmin*wtheta_th, color = 'r',lw=2, marker='',linestyle='-',label=r'with specz')
# ax.plot(theta_arcmin,theta_arcmin*wtheta_th_pz, color = 'b',lw=2, marker='',linestyle='-',label=r'with photoz')
# ax.plot(theta_des,theta_wt_des, color = 'k',lw=2, marker='',linestyle='--',label=r'DES Y1')
# ax.plot(theta_mice_des,theta_wt_mice_des, color = 'k',lw=2, marker='',linestyle=':',label=r'MICE Y1')
# ax.set_xscale('log')
# # ax.set_yscale('log')
# ax.set_ylabel(r'$\theta \ w(\theta)$', size = 22)
# ax.set_xlabel(r'$\theta$', size = 22)
# ax.tick_params(axis='both', which='major', labelsize=15)
# ax.tick_params(axis='both', which='minor', labelsize=15)
# ax.legend(fontsize=20, frameon=False)
# ax.set_xlim(3,100)
# ax.set_ylim(0.2,2.2)
# plt.tight_layout()
# plt.savefig('wtheta_specz_vs_photoz.png')
# +
# (1/2*pi)*int dy y J_2(y*rp) P_gm(y) = Delta w_p(r_p)
def get_Deltawp_from_Pk(rp_array,k, Pk):
k_mat = np.tile(k.reshape(1,len(k)),(len(rp_array),1))
rp_mat = np.tile(rp_array.reshape(len(rp_array),1),(1,len(k)))
J2_mat = sp.special.jv(2, k_mat * rp_mat)
Deltawprp_array = (sp.integrate.simps(k_mat * Pk * J2_mat, k)) / (2 * np.pi)
return Deltawprp_array
def get_Delta_wp(rp_array, r_array,xi_mat):
r_mat = np.tile(r_array.reshape(1,len(r_array)),(len(rp_array),1))
rp_mat = np.tile(rp_array.reshape(len(rp_array),1),(1,len(r_array)))
invdenom1 = 1./(r_mat**2 - rp_mat**2)
ind = np.where(invdenom1 <= 0)
invdenom1[ind] = 0.0
integrand = r_mat * xi_mat*(np.sqrt(invdenom1))
wprp_array = 2.*sp.integrate.simps(integrand,r_array)
wprp_interp = interpolate.interp1d(rp_array,np.log(wprp_array),fill_value='extrapolate')
wprp_mean = np.zeros(len(rp_array))
for j in range(len(rp_array)):
rp_ti = np.logspace(-3.5,np.log10(rp_array[j]),5000)
wprp_ti = np.exp(wprp_interp(rp_ti))
wprp_mean[j] = sp.integrate.simps(rp_ti * wprp_ti, rp_ti)/sp.integrate.simps(rp_ti , rp_ti)
Delta_wp = wprp_mean - wprp_array
return Delta_wp
# +
# Calculate Sigma_crit
# when lens redshift > source redshift, set sigma_crit to high value so that gamma_t is zero
# Dcom_interp = interpolate.interp1d(z_array,chi_array)
# invsig_crit_rhom = np.zeros((len(z_array),len(z_array)))
# # invcoeff_sigcrit = ((const.c ** 2)/(4 * np.pi * const.G * (1.0 * u.Mpc/h))).to(u.kg/u.m**2).value
# invcoeff_sigcrit = 3*(100**2) * cosmo.Om0 /(2. * ((3*10**5) **2 ) )
# z_lmat = np.tile(z_array.reshape(len(z_array),1), (1,len(z_array)) )
# z_smat = np.tile(z_array.reshape(1,len(z_array)), (len(z_array),1) )
# chi_lmat = np.tile(chi_array.reshape(len(z_array),1), (1,len(z_array)) )
# chi_smat = np.tile(chi_array.reshape(1,len(z_array)), (len(z_array),1) )
# DA_l = (chi_lmat*(1. + z_lmat))
# DA_s = (chi_smat * (1. + z_smat))
# DA_ls = ( (chi_smat- chi_lmat) * (1. + z_smat))
# sig_crit_mat = (coeff_sigcrit * (DA_s)/(DA_l * DA_ls))
# ind_lz = np.where(DA_ls <= 0)
# sig_crit_mat[ind_lz] = 1e180
chi_lmat = np.tile(chi_array.reshape(len(z_array),1), (1,len(z_array)) )
chi_smat = np.tile(chi_array.reshape(1,len(z_array)), (len(z_array),1) )
num = chi_smat - chi_lmat
ind_lzero = np.where(num <= 0)
num[ind_lzero] = 0
ng_array_source_rep = np.tile(ng_array_source.reshape(1,len(z_array)), (len(z_array), 1))
int_sourcez = sp.integrate.simps(ng_array_source_rep * (num/ chi_smat), z_array)
coeff_ints = 3*(100**2) * cosmo.Om0 /(2. * ((3*10**5) **2 ) )
Is = coeff_ints * chi_array * (1. + z_array) * int_sourcez
# +
# # Do the integral over the source redshift, last integral in Eq.16
# ng_array_source_rep = np.tile(ng_array_source.reshape(1,len(z_array)), (len(z_array), 1))
# int_sourcez = sp.integrate.simps(ng_array_source_rep / sig_crit_mat, z_array)
# +
theta_arcmin = np.logspace(np.log10(2.5),np.log10(250),20)
theta_rad = theta_arcmin * (1./60.) * (np.pi/180.)
r_array_hres = np.logspace(-2.8,3,2000)
xi_gm_hres_th = np.zeros((len(z_array),len(r_array_hres)))
xi_gm_hres_data = np.zeros((len(z_array),len(r_array_hres)))
# xi_gm_data = (np.tile( (nc.data_obs_new[20:40] ), (len(z_array),1)))*nc.xi_mm
xi_gm_data_interp = interpolate.interp1d(np.log10(nc.r_obs_new[1]), np.log10(nc.data_obs_new[20:40]), fill_value='extrapolate')
xi_gm_data_nc = 10**(xi_gm_data_interp(np.log10(nc.r_array)))
xi_gm_data = (np.tile( (xi_gm_data_nc), (len(z_array),1)))*nc.xi_mm
for j in range(len(z_array)):
xi_interp = interpolate.interp1d(np.log10(nc.r_array), (nc.xi_gm[j,:]), fill_value='extrapolate')
xi_gm_hres_th[j,:] = (xi_interp(np.log10(r_array_hres)))
xi_interp = interpolate.interp1d(np.log10(nc.r_array), (xi_gm_data[j,:]), fill_value='extrapolate')
xi_gm_hres_data[j,:] = (xi_interp(np.log10(r_array_hres)))
# +
# Calculate gamma_t
# (1/2*pi)*int dy y J_2(y*rp) P_gm(y) = Delta w_p(r_p)
# gtheta_data = np.zeros(len(theta_rad)) #bestfit theory gamma_t
# gtheta_th = np.zeros(len(theta_rad)) #data gamma_t
# # gtheta_th_pk = np.zeros(len(theta_rad)) #data gamma_t
# # gtheta_th_pz = np.zeros(len(theta_rad))
# # Deltawp_th_pk = np.zeros(len(z_array))
# for j1 in range(len(theta_rad)):
# print j1
# rp_array = chi_array*theta_rad[j1]
# # Deltawp_data = get_Delta_wp(rp_array, r_array_hres, xi_gm_hres_data, z_array)
# Deltawp_th = get_Delta_wp(rp_array, r_array_hres, xi_gm_hres_th)
# # Deltawp_th_pk = get_Deltawp_from_Pk(rp_array,nc.k_hres, nc.Pk_gm)
# # gtheta_data[j1] = sp.integrate.simps(ng_array_lens *Is * Deltawp_data, z_array)
# gtheta_th[j1] = sp.integrate.simps(ng_array_lens *Is* Deltawp_th , z_array)
# # gtheta_th_pk[j1] = sp.integrate.simps(ng_array_lens * Is * Deltawp_th_pk , z_array)
# # gtheta_th_pz[j1] = sp.integrate.simps(rhom_z * ng_array_lens_pz * int_sourcez * Deltawp_th * oneMpc_h, z_array)
# +
def get_Delta_wp_from_xi(rp_array, r_array,xi_mat):
r_mat = np.tile(r_array.reshape(1,len(r_array)),(len(rp_array),1))
rp_mat = np.tile(rp_array.reshape(len(rp_array),1),(1,len(r_array)))
invdenom1 = 1./(r_mat**2 - rp_mat**2)
ind = np.where(invdenom1 <= 0)
invdenom1[ind] = 0.0
integrand = r_mat * xi_mat*(np.sqrt(invdenom1))
wprp_array = 2.*sp.integrate.simps(integrand,r_array)
wprp_interp = interpolate.interp1d(rp_array,np.log(wprp_array),fill_value='extrapolate')
wprp_mean = np.zeros(len(rp_array))
for j in range(len(rp_array)):
# if np.mod(j,100) == 0:
# print j
rp_ti = np.logspace(-3.5,np.log10(rp_array[j]),100)
r_mat = np.tile(r_array.reshape(1,len(r_array)),(len(rp_ti),1))
rp_ti_mat = np.tile(rp_ti.reshape(len(rp_ti),1),(1,len(r_array)))
xi_ti_mat = np.tile(xi_mat[j,:].reshape(1,len(r_array)),(len(rp_ti),1))
invdenom1 = 1./(r_mat**2 - rp_ti_mat**2)
ind = np.where(invdenom1 <= 0)
invdenom1[ind] = 0.0
integrand = 2.*r_mat * rp_ti_mat * xi_ti_mat*(np.sqrt(invdenom1))
integrand_intr = sp.integrate.simps(integrand,r_array)
wprp_mean[j] = sp.integrate.simps(integrand_intr, rp_ti)/sp.integrate.simps(rp_ti , rp_ti)
Delta_wp = wprp_mean - wprp_array
return Delta_wp
gtheta_data = np.zeros(len(theta_rad)) #bestfit theory gamma_t
gtheta_th = np.zeros(len(theta_rad)) #data gamma_t
# gtheta_th_pk = np.zeros(len(theta_rad)) #data gamma_t
# gtheta_th_pz = np.zeros(len(theta_rad))
# Deltawp_th_pk = np.zeros(len(z_array))
for j1 in range(len(theta_rad)):
print j1
rp_array = chi_array*theta_rad[j1]
Deltawp_data = get_Delta_wp_from_xi(rp_array, r_array_hres, xi_gm_hres_data)
Deltawp_th = get_Delta_wp_from_xi(rp_array, r_array_hres, xi_gm_hres_th)
# Deltawp_th_pk = get_Deltawp_from_Pk(rp_array,nc.k_hres, nc.Pk_gm)
gtheta_data[j1] = sp.integrate.simps(ng_array_lens *Is * Deltawp_data, z_array)
gtheta_th[j1] = sp.integrate.simps(ng_array_lens *Is* Deltawp_th , z_array)
# gtheta_th_pk[j1] = sp.integrate.simps(ng_array_lens * Is * Deltawp_th_pk , z_array)
# gtheta_th_pz[j1] = sp.integrate.simps(rhom_z * ng_array_lens_pz * int_sourcez * Deltawp_th * oneMpc_h, z_array)
# +
# # wt_mice_des = np.loadtxt('/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/wt_mice_des_y1_3_3.txt', delimiter=',')
# # theta_mice_des, theta_wt_mice_des = wt_mice_des[:,0],wt_mice_des[:,1]
# # wt_des = np.loadtxt('/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/wt_des_y1_3_3.txt', delimiter=',')
# # theta_des, theta_wt_des = wt_des[:,0],wt_des[:,1]
# wt_cosmosis = fits.open('twopt_3d_to_2d_MICE.fits')
# # ind_sel = np.where( (wt_cosmosis['wtheta'].data['BIN1'] == 3) & (wt_cosmosis['wtheta'].data['BIN2']==3))[0]
# ind_sel = np.where( (wt_cosmosis['wtheta'].data['BIN1'] == 2) & (wt_cosmosis['wtheta'].data['BIN2'] == 2))[0]
# wtc = wt_cosmosis['wtheta'].data['VALUE'][ind_sel]
# wta = wt_cosmosis['wtheta'].data['ANG'][ind_sel]
# wtc_interp = interpolate.interp1d(wta,wtc,fill_value='extrapolate')
# fig, ax_all = plt.subplots(1,2, figsize = (14,6))
# for j in range(2):
# ax = ax_all[j]
# if j==0:
# ax.plot(nc.r_obs_new[0],nc.xi_gg[0], color = 'r', lw=2,marker='',linestyle='-',label=r'Theory')
# ax.plot(nc.r_obs_new[0],nc.data_obs_new[0:20]*nc.xi_mm[0], color = 'b',lw=2, marker='',linestyle='-',label=r'Data')
# ax.axvline(8., lw=2, color='k',label='Y1 Scale Cut')
# ax.axvspan(nc.r_obs_new[0][0], 8, alpha=0.1, color='black')
# ax.set_yscale('log')
# ax.set_xscale('log')
# ax.set_xlim(nc.r_obs_new[0][0],nc.r_obs_new[0][-1])
# # ax.set_ylim(1.0,12.0)
# ax.grid()
# # ax.set_xlabel(r'$\theta$', size = 18)
# ax.set_ylabel(r'$\xi^{gg}(r)$', size = 22)
# ax.set_xlabel(r'$r$ (Mpc/h)', size = 22)
# ax.tick_params(axis='both', which='major', labelsize=15)
# ax.tick_params(axis='both', which='minor', labelsize=15)
# else:
# ax.plot(theta_arcmin,theta_arcmin*wtheta_th, color = 'r',lw=2, marker='',linestyle='-',label=r'Theory')
# ax.plot(theta_arcmin,theta_arcmin*wtheta_data, color = 'b',lw=2, marker='',linestyle='-',label=r'Data')
# ax.axvline(20., lw=2, color='k',label='Y1 Scale Cut')
# # ax.plot(theta_des,theta_wt_des, color = 'k',lw=2, marker='',linestyle='--',label=r'DES Y1')
# # ax.plot(theta_mice_des,theta_wt_mice_des, color = 'k',lw=2, marker='',linestyle=':',label=r'MICE Y1')
# ax.axvspan(theta_arcmin[0], 20, alpha=0.1, color='k')
# ax.set_xlim(theta_arcmin[0],theta_arcmin[-1])
# # ax.set_yscale('log')
# ax.set_xscale('log')
# ax.set_xlim(3,100)
# ax.set_ylim(0.2,2.4)
# # ax.set_ylim(1.0,12.0)
# ax.grid()
# # ax.set_xlabel(r'$\theta$', size = 18)
# ax.set_ylabel(r'$\theta \ w(\theta)$', size = 22)
# ax.set_xlabel(r'$\theta$ (arcmin)', size = 22)
# ax.legend(fontsize=17,loc='upper left')
# ax.tick_params(axis='both', which='major', labelsize=15)
# ax.tick_params(axis='both', which='minor', labelsize=15)
# plt.tight_layout()
# # plt.savefig('xigg_3d_to_2d_comp_cy.png')
# +
df = fits.open('twopt_3d_to_2d_MICE.fits')
df_zmid = df['nz_pos_zspec'].data['Z_MID']
df_bin3 = df['nz_pos_zspec'].data['BIN4']
fig, ax_all = plt.subplots(1,1, figsize = (8,6))
ax = ax_all
ax.plot(df_zmid,df_bin3, color = 'r',lw=2, marker='',linestyle='-',label=r'Lenses 3D')
ax.plot(z_array,ng_array_lens, color = 'b',lw=2, marker='',linestyle='-',label=r'Lenses 3D')
ax.set_xlim(0.3,0.8)
ax.set_ylabel(r'n(z)', size = 22)
ax.set_xlabel(r'z', size = 22)
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
ax.legend(fontsize=20)
plt.tight_layout()
# -
bin_lens
# +
# output_data_gg = pk.load(open('/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/wtheta_3dcorr_r_1.0_250.0_nr_20_zbin_3_jk_True_njkradec_180_njkz_1_dsg_1.pk', "rb"))
wt_cosmosis = fits.open('twopt_3d_to_2d_MICE.fits')
# ind_sel = np.where( (wt_cosmosis['wtheta'].data['BIN1'] == 3) & (wt_cosmosis['wtheta'].data['BIN2']==3))[0]
ind_sel = np.where( (wt_cosmosis['wtheta'].data['BIN1'] == bin_lens) & (wt_cosmosis['wtheta'].data['BIN2'] == bin_lens))[0]
wtc = wt_cosmosis['wtheta'].data['VALUE'][ind_sel]
wta = wt_cosmosis['wtheta'].data['ANG'][ind_sel]
wtc_interp = interpolate.interp1d(wta,wtc,fill_value='extrapolate')
fig, ax_all = plt.subplots(1,1, figsize = (8,6))
ax = ax_all
ax.plot(theta_arcmin,wtc_interp(theta_arcmin) /wtheta_th, color = 'r',lw=2, marker='',linestyle='-', label='With xi')
# ax.plot(theta_arcmin,wtc_interp(theta_arcmin)/wtheta_th_pk, color = 'b',lw=2, marker='',linestyle='-', label='With P(k)')
ax.axvline(20., lw=2, color='k')
# ax.errorbar(output_data_gg['r_gg'], output_data_gg['r_gg'] * output_data_gg['xi_gg_full'], output_data_gg['r_gg'] *output_data_gg['sig'], color = 'k',lw=2, marker='',linestyle='--',label=r'Data 2D')
# ax.plot(theta_arcmin,*theta_arcmin, color = 'r',lw=2, marker='',linestyle='--',label=r'Theory Cosmosis' )
ax.axvspan(theta_arcmin[0], 20, alpha=0.1, color='k')
ax.set_xlim(theta_arcmin[0],theta_arcmin[-1])
# ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(3,100)
# ax.set_ylim(0.2,2.4)
# ax.set_ylim(1.0,12.0)
ax.grid()
# ax.set_xlabel(r'$\theta$', size = 18)
ax.set_ylabel(r'$w^{\rm cosmosis}(\theta)/w^{\rm Theory}(\theta)$', size = 22)
ax.set_xlabel(r'$\theta$ (arcmin)', size = 22)
ax.legend(fontsize=17)
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
plt.tight_layout()
# plt.savefig('xigg_ratio_bin'+ str(bin_lens) +'.png')
# +
# output_data_gg = pk.load(open('/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/wtheta_3dcorr_r_1.0_250.0_nr_20_zbin_3_jk_True_njkradec_180_njkz_1_dsg_1.pk', "rb"))
wt_cosmosis = fits.open('twopt_3d_to_2d_MICE.fits')
ind_sel = np.where( (wt_cosmosis['gammat'].data['BIN1'] == bin_lens) & (wt_cosmosis['gammat'].data['BIN2']== bin_source))[0]
gtc = wt_cosmosis['gammat'].data['VALUE'][ind_sel]
gta = wt_cosmosis['gammat'].data['ANG'][ind_sel]
gtc_interp = interpolate.interp1d(gta,gtc,fill_value='extrapolate')
c_by_theta2 = -0.012/(theta_arcmin**2)
fig, ax_all = plt.subplots(1,1, figsize = (8,6))
ax = ax_all
ax.plot(theta_arcmin,gtc_interp(theta_arcmin)/(gtheta_th), color = 'r',lw=2, marker='',linestyle='-',label='With xi')
# ax.plot(theta_arcmin,gtc_interp(theta_arcmin)/(gtheta_th_pk), color = 'b',lw=2, marker='',linestyle='-', label='With P(k)')
ax.axvline(20., lw=2, color='k')
ax.axvspan(theta_arcmin[0], 20, alpha=0.1, color='k')
ax.set_xlim(theta_arcmin[0],theta_arcmin[-1])
# ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(3,100)
# ax.set_ylim(0.2,2.4)
# ax.set_ylim(1.0,12.0)
ax.grid()
# ax.set_xlabel(r'$\theta$', size = 18)
ax.set_ylabel(r'$\gamma_t^{\rm cosmosis}(\theta)/\gamma_t^{\rm Theory}(\theta)$', size = 22)
ax.set_xlabel(r'$\theta$ (arcmin)', size = 22)
ax.legend(fontsize=17)
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
plt.tight_layout()
# plt.savefig('xigm_ratio_bin'+ str(bin_lens) + '_' + str(bin_source) +'.png')
# -
wt_cosmosis.info()
# +
# output_data_gg = pk.load(open('/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/wtheta_3dcorr_r_1.0_250.0_nr_20_zbin_3_jk_True_njkradec_180_njkz_1_dsg_1.pk', "rb"))
wt_cosmosis = fits.open('/home/shivam/Research/cosmosis/y3kp-bias-model/3d_stats/3d_to_2d/twopt_3d_to_2d_MICE_wcov.fits')
# wt_cosmosis = fits.open('twopt_3d_to_2d_MICE.fits')
# ind_sel = np.where( (wt_cosmosis['wtheta'].data['BIN1'] == 3) & (wt_cosmosis['wtheta'].data['BIN2']==3))[0]
ind_sel = np.where( (wt_cosmosis['wtheta'].data['BIN1'] == bin_lens) & (wt_cosmosis['wtheta'].data['BIN2']==bin_lens))[0]
wt_cov = wt_cosmosis['COVMAT'].data[400:500,400:500]
wt_cov_bin = wt_cov[:,ind_sel][ind_sel,:]
wt_sig_bin = np.sqrt(np.diag(wt_cov_bin))
fig, ax_all = plt.subplots(1,2, figsize = (14,6))
for j in range(2):
ax = ax_all[j]
if j==0:
ax.plot(nc.r_array,nc.xi_gg[zmean_ind,:], color = 'r', lw=2,marker='',linestyle='-',label=r'Theory')
ax.plot(nc.r_array,xi_data[zmean_ind,:], color = 'b',lw=2, marker='',linestyle='-',label=r'Data')
ax.axvline(8., lw=2, color='k',label='Y1 Scale Cut')
ax.axvspan(nc.r_obs_new[0][0], 8, alpha=0.1, color='black')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(nc.r_obs_new[0][0],nc.r_obs_new[0][-1])
ax.set_ylim(1e-2,1e2)
ax.grid()
# ax.set_xlabel(r'$\theta$', size = 18)
ax.set_ylabel(r'$\xi^{gg}(r)$', size = 22)
ax.set_xlabel(r'$r$ (Mpc/h)', size = 22)
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
else:
ax.plot(theta_arcmin,theta_arcmin*wtheta_th, color = 'r',lw=2, marker='',linestyle='-',label=r'Theory')
ax.errorbar(theta_arcmin,theta_arcmin*wtheta_data, theta_arcmin*wt_sig_bin, color = 'b',lw=2, marker='',linestyle='-',label=r'Data 3D to 2D')
ax.axvline(20., lw=2, color='k')
# ax.errorbar(output_data_gg['r_gg'], output_data_gg['r_gg'] * output_data_gg['xi_gg_full'], output_data_gg['r_gg'] *output_data_gg['sig'], color = 'k',lw=2, marker='',linestyle='--',label=r'Data 2D')
ax.plot(wt_cosmosis['wtheta'].data['ANG'][ind_sel],wt_cosmosis['wtheta'].data['ANG'][ind_sel]*wt_cosmosis['wtheta'].data['VALUE'][ind_sel], color = 'r',lw=2, marker='',linestyle='--',label=r'Theory Cosmosis' )
ax.axvspan(theta_arcmin[0], 20, alpha=0.1, color='k')
ax.set_xlim(theta_arcmin[0],theta_arcmin[-1])
# ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(3,250)
# ax.set_ylim(0.2,2.4)
# ax.set_ylim(1.0,12.0)
ax.grid()
# ax.set_xlabel(r'$\theta$', size = 18)
ax.set_ylabel(r'$\theta \ w(\theta)$', size = 22)
ax.set_xlabel(r'$\theta$ (arcmin)', size = 22)
ax.legend(fontsize=17,loc='lower left')
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
plt.tight_layout()
# plt.savefig('xigg_3d_to_2d_comp_fixh.png')
# +
# # gt_des = np.loadtxt('/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/gt_des_mice_y1_3_4.txt', delimiter=',')
# # theta_des, theta_gt_des = gt_des[:,0],gt_des[:,1]
# gt_mice_des = np.loadtxt('/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/gt_mice_des_y1_3_4.txt', delimiter=',')
# theta_mice_des, theta_gt_mice_des = gt_des[:,0],gt_des[:,1]
# # Subtract the 1-halo term from the theory to match data
# c_by_theta2 = -0.03/(theta_arcmin**2)
# fig, ax_all = plt.subplots(1,2, figsize = (14,6))
# for j in range(2):
# ax = ax_all[j]
# if j==0:
# ax.plot(nc.r_obs_new[1],nc.xi_gm[0], color = 'r',lw=2, marker='',linestyle='-',label=r'Theory')
# ax.plot(nc.r_obs_new[1],nc.data_obs_new[20:40]*nc.xi_mm[0], lw=2,color = 'b', marker='',linestyle='-',label=r'Data')
# ax.axvline(12., lw=2, color='k',label='Y1 Scale Cut')
# ax.axvspan(nc.r_obs_new[0][0], 12, alpha=0.1, color='black')
# ax.set_yscale('log')
# ax.set_xscale('log')
# ax.set_xlim(nc.r_obs_new[0][0],nc.r_obs_new[0][-1])
# # ax.set_ylim(1.0,12.0)
# ax.grid()
# # ax.set_xlabel(r'$\theta$', size = 18)
# ax.set_ylabel(r'$\xi^{gm}(r)$', size = 22)
# ax.set_xlabel(r'$r$ (Mpc/h)', size = 22)
# ax.tick_params(axis='both', which='major', labelsize=15)
# ax.tick_params(axis='both', which='minor', labelsize=15)
# # ax.legend(fontsize=20)
# else:
# ax.plot(theta_arcmin,(theta_arcmin**0.7)*gtheta_th, color = 'r', lw=2,marker='',linestyle='-',label=r'Theory')
# # ax.plot(theta_arcmin,c_by_theta2, color = 'g', lw=2,marker='',linestyle='--',label=r'$c/\theta^2$')
# ax.plot(theta_arcmin,(theta_arcmin**0.7)*(gtheta_th + c_by_theta2), color = 'orange', lw=2,marker='',linestyle='-',label=r'Theory + ' + r'$c/\theta^2$')
# ax.plot(theta_arcmin,(theta_arcmin**0.7)*(gtheta_th_pz + c_by_theta2), color = 'orange', lw=2,marker='',linestyle='--',label=r'Theory pz + ' + r'$c/\theta^2$')
# ax.plot(theta_arcmin,(theta_arcmin**0.7)*gtheta_data, color = 'b', lw=2,marker='',linestyle='-',label=r'Data')
# ax.axvline(30., lw=2, color='k',label='Y1 Scale Cut')
# ax.plot(theta_mice_des, theta_gt_mice_des, color = 'k', lw=2,marker='',linestyle='--',label=r'MICE Y1')
# ax.axvspan(theta_arcmin[0], 30, alpha=0.1, color='k')
# ax.set_xlim(theta_arcmin[0],theta_arcmin[-1])
# # ax.set_yscale('log')
# ax.set_xscale('log')
# ax.set_ylim(0.0,0.0175)
# ax.grid()
# # ax.set_xlabel(r'$\theta$', size = 18)
# ax.set_ylabel(r'$\theta^{0.7} \ \gamma^{3,4}_t(\theta)$', size = 22)
# ax.set_xlabel(r'$\theta$ (arcmin)', size = 22)
# ax.legend(fontsize=17)
# ax.tick_params(axis='both', which='major', labelsize=15)
# ax.tick_params(axis='both', which='minor', labelsize=15)
# plt.tight_layout()
# plt.savefig('xigm_3d_to_2d_comp_cy.png')
# +
# gt_des = np.loadtxt('/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/gt_des_mice_y1_3_4.txt', delimiter=',')
# theta_des, theta_gt_des = gt_des[:,0],gt_des[:,1]
wt_cosmosis = fits.open('/home/shivam/Research/cosmosis/y3kp-bias-model/3d_stats/3d_to_2d/twopt_3d_to_2d_MICE_wcov.fits')
# wt_cosmosis = fits.open('twopt_3d_to_2d_MICE.fits')
ind_sel = np.where( (wt_cosmosis['gammat'].data['BIN1'] == bin_lens) & (wt_cosmosis['gammat'].data['BIN2']== bin_source))[0]
gt_cov = wt_cosmosis['COVMAT'].data[0:400,0:400]
gt_cov_bin = gt_cov[:,ind_sel][ind_sel,:]
gt_sig_bin = np.sqrt(np.diag(gt_cov_bin))
# Subtract the 1-halo term from the theory to match data
const = 0.015
c_by_theta2 = const/(theta_arcmin**2)
fig, ax_all = plt.subplots(1,2, figsize = (14,6))
for j in range(2):
ax = ax_all[j]
if j==0:
ax.plot(nc.r_array,nc.xi_gm[zmean_ind,:], color = 'r', lw=2,marker='',linestyle='-',label=r'Theory')
ax.plot(nc.r_array,xi_gm_data[zmean_ind,:], color = 'b',lw=2, marker='',linestyle='-',label=r'Data')
ax.axvline(12., lw=2, color='k',label='Y1 Scale Cut')
ax.axvspan(nc.r_obs_new[0][0], 12, alpha=0.1, color='black')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(nc.r_obs_new[0][0],nc.r_obs_new[0][-1])
ax.set_ylim(1e-2,1e2)
ax.grid()
# ax.set_xlabel(r'$\theta$', size = 18)
ax.set_ylabel(r'$\xi^{gm}(r)$', size = 22)
ax.set_xlabel(r'$r$ (Mpc/h)', size = 22)
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
# ax.legend(fontsize=20)
else:
# ax.plot(theta_arcmin,gtheta_th , color = 'r', lw=2,marker='',linestyle='-',label=r'Theory')
ax.plot(theta_arcmin,(gtheta_th + c_by_theta2) , color = 'r', lw=2,marker='',linestyle='-',label=r'Theory')
# ax.plot(theta_arcmin,c_by_theta2, color = 'g', lw=2,marker='',linestyle='--',label=r'$c/\theta^2$')
# ax.plot(theta_arcmin,(theta_arcmin**0.7)*(gtheta_th + c_by_theta2), color = 'orange', lw=2,marker='',linestyle='-',label=r'Theory + ' + r'$c/\theta^2$')
# ax.plot(theta_arcmin,(theta_arcmin**0.7)*(gtheta_th_pz + c_by_theta2), color = 'orange', lw=2,marker='',linestyle='--',label=r'Theory pz + ' + r'$c/\theta^2$')
ax.errorbar(theta_arcmin,gtheta_data, gt_sig_bin, color = 'b', lw=2,marker='',linestyle='-',label=r'Data')
ax.axvline(30., lw=2, color='k')
ax.plot(wt_cosmosis['gammat'].data['ANG'][ind_sel],wt_cosmosis['gammat'].data['VALUE'][ind_sel] + ( const/wt_cosmosis['gammat'].data['ANG'][ind_sel]**2), color = 'r',lw=2, marker='',linestyle='--',label=r'Theory Cosmosis' )
ax.axvspan(theta_arcmin[0], 30, alpha=0.1, color='k')
ax.set_xlim(theta_arcmin[0],theta_arcmin[-1])
ax.set_yscale('log')
ax.set_xscale('log')
# ax.set_ylim(0.0,0.006)
ax.grid()
# ax.set_xlabel(r'$\theta$', size = 18)
ax.set_ylabel(r'$\gamma^{2,4}_t(\theta)$', size = 22)
ax.set_xlabel(r'$\theta$ (arcmin)', size = 22)
ax.legend(fontsize=17)
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
plt.tight_layout()
# plt.savefig('xigm_3d_to_2d_comp_fixh.png')
# -
# +
nz_dir = '/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/nz_data/'
bins_array = np.arange(0,5) + 1
for bins in [bins_array[0]]:
filename_nzlens = nz_dir + 'nz_g_m_' + '_zbin_' + str(bins) + '_dsg_' + str(1) + '_dsm_' + str(1) + '.pk'
nz_data = pk.load(open(filename_nzlens, 'rb'))
nz_g, nz_m, nz_z = nz_data['nz_g'], nz_data['nz_m'], nz_data['nz_z']
# -
nz_g
# +
zmid = nz_z
zlow = zmid - (zmid[1] - zmid[0])
zhigh = zmid + (zmid[1] - zmid[0])
# +
with fits.open('/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/twopt_mice_y1_specz_mod.fits', mode='update') as hdul:
for bins in bins_array:
filename_nzlens = nz_dir + 'nz_g_m_' + '_zbin_' + str(bins) + '_dsg_' + str(1) + '_dsm_' + str(1) + '.pk'
nz_data = pk.load(open(filename_nzlens, 'rb'))
nz_g, nz_m, nz_z = nz_data['nz_g'], nz_data['nz_m'], nz_data['nz_z']
nz_interp = interpolate.interp1d(nz_z,np.log(nz_g + 1e-80),fill_value='extrapolate')
zmid = hdul['nz_pos_zspec'].data['Z_MID']
nz_zmid = np.exp(nz_interp(zmid))
hdul['nz_pos_zspec'].data['BIN' + str(bins)] = nz_zmid
hdul.flush()
# +
twopt_new = fits.open('/Users/shivam/Dropbox/Research/bias_des/3d_to_2d/twopt_mice_y1_specz_mod.fits')
zmid = twopt_new['nz_pos_zspec'].data['Z_MID']
bin1 = twopt_new['nz_pos_zspec'].data['BIN1']
fig, ax_all = plt.subplots(1,1, figsize = (8,6))
ax = ax_all
for bins in bins_array:
filename_nzlens = nz_dir + 'nz_g_m_' + '_zbin_' + str(bins) + '_dsg_' + str(1) + '_dsm_' + str(1) + '.pk'
nz_data = pk.load(open(filename_nzlens, 'rb'))
nz_g, nz_m, nz_z = nz_data['nz_g'], nz_data['nz_m'], nz_data['nz_z']
ax.plot(zmid,twopt_new['nz_pos_zspec'].data['BIN' + str(bins)], color = 'r',lw=2, marker='',linestyle='-')
ax.plot(nz_z,nz_g, color = 'b',lw=2, marker='',linestyle='-')
ax.set_ylabel(r'n(z)', size = 22)
ax.set_xlabel(r'z', size = 22)
ax.set_xlim(0,1)
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
ax.legend(fontsize=20)
plt.tight_layout()
# plt.savefig('nz_comp.png')
# +
fig, ax_all = plt.subplots(1,1, figsize = (8,6))
ax = ax_all
ax.plot(z_array, 1./dchi_dz_array, color = 'b',lw=2, marker='',linestyle='-')
ax.set_ylabel(r'n(z)', size = 22)
ax.set_xlabel(r'z', size = 22)
ax.set_xlim(0,1)
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
ax.legend(fontsize=20)
plt.tight_layout()
# plt.savefig('nz_comp.png')
# -
|
3d_to_2d/3d_to_2d_new_fftlog.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Report
#
# Summary your findings and motivate your choice of approach. A better motivation show your understanding of the lab. Dont forget to include the result from part 1!
# **Name:** <NAME>\
# **Date:** 2021-12-12
#
# ## Introduction
# For this lab I was given the task to develop and evaluate a CNN to get a better understanding of the architecture.
# This included evaluation, tuning and conclusions of the final obtained model.
#
# ## Result
# ### Metrics
# #### Todo1
# **Question:** Does a high accuracy impy a good model? <br>
# **Answer:** No, not usually. When developing models it's important to be careful about high accuracy -> good performance, this implicity does not hold in all situations. For example, a model that achieve a very high training accuracy will not tell if the model is generally good in performance, since high training accuracy could indicate on overfitted model. An overfitted model is a model that does not generalize very well on unseen data. <br>
# Another case when we could achieve high accuracy is when the model is capable of predicting the majority of classes. <br><br>
# As conclusion, we should not rely on only accuracy as metric, there exists many other metrics that are more capable of describing the performance, e.g. precision, recall, F1 score, etc.
#
# #### Todo2
# **Task:** Find an alternative metric which can show similar or better precision than accuracy. <br><br>
# For this task I used precision metric, which is calculated as following: <br><br>
# <center> $Precision = \frac{TP}{TP + FP}$ </center> <br>
# The precision for both training and test-set can bee seen in the table below:
# <ul>
# <li>$ Precision_{train} = 1.0 $</li>
# <li>$ Precision_{test} = 0.4476 $</li>
# </ul>
# From the obtain precision during training, it makes sense that the value is 1 since the model is correctly classifying all data points. So when dealing with training data, the model is precise and can predict correctly with 100% of the time. For test data, the model can predict correctly 44% of the time.
#
# **Task: Comparison between metrics:** <br>
# Additional, I chose to compare the different metrics, which the data is fetched from tensorboard. The metrics I have chosen are **Accuracy, Precision, AUROC and F1 score** and the result can be seen in the table below: <br>
#
# | | Accuracy | Precision | AUROC | F1-score |
# | --- | --- | --- | --- | --- |
# | Train data | 0.95 | 0.95 | 0.74 | 0.95
# | Val data | 0.43 | 0.413 | 0.44 | 0.43
#
#
#
# ### Architecture
#
# #### Overfit
# The aim for this task was to overfit the model as much as possible. I did this by removing the dropout (since it works as regularization), removing the second convolutional layer and by increasing the number epochs from 5 to 40. The reuslt can be shown in the images below, where **accuracy**, **loss** and **F1-score** are shown for both training and validation. <br><br>
#
# <center><strong>Accuracy for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/ArchitectureTask/Overfit/Accuracy_Train.svg" alt="Drawing" style="width: 100%;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/ArchitectureTask/Overfit/Accuracy_Validation.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
# <br>
#
# <center><strong>Loss for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/ArchitectureTask/Overfit/Loss_Train.svg" alt="Drawing" style="width: 100%;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/ArchitectureTask/Overfit/loss_Validation.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
# <br>
#
# <center><strong>F1-score for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/ArchitectureTask/Overfit/F1_Train.svg" alt="Drawing" style="width: 100%;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/ArchitectureTask/Overfit/F1_Validation.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
# <br>
#
# <p>From the obtained results, as seen in the images above, I can conclude that the minimal changes I made to the architecture, caused the model to overfit, and almost obtained 100% accuracy in training with an increasing validation loss. Further changes could be to expand the feedforward network by more layers, and see if there are any changes.</p>
#
# #### Modification
# For this task, I investigated the performance by increasing the complexity much and just a little modification. The intention behind this was to look for the underfitting pattern. Since the dataset is very small, and simple, it should not be required to have a complex model. A way too complex model could in this scenario cause underfitting. <br>
# For the model I ran the following architectures:
# <ul>
# <li>Kernel size = 11, padding = 5, conv layers = 5</li>
# <li>Kernel size = 3, padding = 1, conv layers = 3</li>
#
# </ul>
#
# For the first modification, the results in term of accuracy, f1-score and loss can be shown below. From these images, we can clearly see that as the complexity increases in the model, underfitting is caused. <br>
# <center><strong>Accuracy for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/ArchitectureTask/ArchModification/Accuracy_Train.svg" alt="Drawing" style="width: 100%;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/ArchitectureTask/ArchModification/Accuracy_Validation.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
# <br>
#
# <center><strong>Loss for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/ArchitectureTask/ArchModification/Loss_Train.svg" alt="Drawing" style="width: 100%;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/ArchitectureTask/ArchModification/loss_Validation.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
# <br>
#
# <center><strong>F1-score for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/ArchitectureTask/ArchModification/F1_Train.svg" alt="Drawing" style="width: 100%;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/ArchitectureTask/ArchModification/F1_Validation.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
#
# Second architecture can be shown in the images below: <br>
#
# <center><strong>Accuracy for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/ArchitectureTask/ArchModification/Accuracy_Train2.svg" alt="Drawing" style="width: 100%;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/ArchitectureTask/ArchModification/Accuracy_Validation2.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
# <br>
#
# <center><strong>Loss for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/ArchitectureTask/ArchModification/Loss_Train2.svg" alt="Drawing" style="width: 100%;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/ArchitectureTask/ArchModification/loss_Validation2.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
# <br>
#
# <center><strong>F1-score for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/ArchitectureTask/ArchModification/F1_Train2.svg" alt="Drawing" style="width: 100%;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/ArchitectureTask/ArchModification/F1_Validation2.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
#
#
# ### Hyperparameter tuning
# This task was to hyperparameter tune the model. This included tuning of learning rate, dropout, channels in the convlayers, number of convlayers, hidden nodes in the feedforward network and regularization term. <br>
# My final model architecture is the following: <br>
#
# <ul>
# <li>epochs = 40</li>
# <li>number of conv layers = 3</li>
# <li>output channels: (32, 128, 150)</li>
# <li>Hidden nodes in fc1 = 150</li>
# <li>(kernel size, padding) = (3, 1)</li>
# <li>max pooling = 2x2 </li>
# <li>dropout = 0.8 </li>
# <li>learning rate = 0.002</li>
# <li>l2 regularization = 0.0003</li>
# </ul>
# The results of the accuracy and F1-score can be shown below: <br>
# <center><strong>Accuracy for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/Hypertune/Accuracy_Train.svg" alt="Drawing" style="width: 100%;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/Hypertune/Accuracy_Validation.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
# <br>
#
# <center><strong>F1-score for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/Hypertune/F1_Train.svg" alt="Drawing" style="width: 100%;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/Hypertune/F1_Validation.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
# <br><br>
# <center><strong>Confusion matrix on test-set</strong></center>
# <table><tr>
# <td><img src="Images/Hypertune/confusionm.PNG" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
# From the result I can conclude that, compared to the first task, my tuning improved the model to almost 88% accuracy in validation, and training accuracy did reach approximately 98%. Though, in terms of validation, I think that the performance is still a bit too vague. Due to the data simplicity the model should be able to reach above 90%. Also, 40 epochs is not efficient, further investigations and tuning on the learning rate could lead to faster learning, thus I would be able to decrease the number of epochs. However, the confusion matrix generates good results, and the model manages to classify 93.75%, which I will consider as an descent model even though the dataset is very simple. <br>
# In order to obtain a better accuracy on validation-set, further tuning on kernel size and output channels in the convolutional layers could be done.
#
#
#
# ### Augmentation
# For this task, I tested for three techniques: RandomHorizontalFlip with both 0.5 and 0.3 as probablity, RandomVerticalFlip and RandomRotation with an angle of 15 degrees. I tried each one seperatly, and a combination of all where the best result was obtained using RandomHorizontalFlip with 0.3 probability separately. The images of the metrics can be shown below: <br><br>
#
# <center><strong>Accuracy for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/Augmentation/Accuracy_Train.svg" alt="Drawing" style="width: 300px;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/Augmentation/Accuracy_Validation.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
# <br><br>
#
# <center><strong>F1-score for train and validation-set</strong></center>
# <table><tr>
# <td><h2 style="margin-right:20%;">Train</h2><img src="Images/Augmentation/F1_Train.svg" alt="Drawing" style="width: 300px;"/></td>
# <td><h2 style="margin-right:10%;">Validation</h2><img src="Images/Augmentation/F1_Validation.svg" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
# <br><br>
#
# <center><strong>Confusion matrix on test-data</strong></center>
# <table><tr>
# <td><img src="Images/Augmentation/conf.png" alt="Drawing" style="width: 100%;"/> </td>
# </tr></table>
#
#
# ***Question 1:*** Did data augmentation improve the model?<br>
# ***Answer 1:*** By looking at the training and validation it did not impact that much. Validation got increased from 85% to almost 88%, which for me is not enough. Though, by looking at the confusion matrix, the test accuracy got improved from 93% to 97%. <br><br>
# ***Question: 2*** What do you think have the greatest impact on the performance, why? <br>
# ***Answer 2:*** My intention to the performance is that the architecture of the convolutional in the model is the main part that impact the performance, since this is the one part operating on the images. Also, I tried to change the architecture in the feedforward network (hidden nodes) but it did not have that much impact. Small modifications in the convolutional layers made big difference in performance. learning rate and reg term is of big importance. <br><br>
#
#
# ### Conclusion
# This lab gave insights in the operations of an convolution layer, and how the CNN works as a whole. The tuning demonstrated the minior changes that could be done and how the performance of the model changed, e.g. as the conv layers increase the model will underfit and a way too simple model will probably overfit. The final task, showed the impact on the model by applying augmentation, even though this did not cause any big changes as I expected, this could be due to the simple dataset, or that more augmentation techniques should be included in the transformer. Also, from each image, I use both ***Accuracy*** and ***F1-score***, F1-score helps measuring the model performance when working with imbalanced data. From all results, accuracy and f1-score generates same values, therefore accuracy would be a good metric for this problem.
|
lab3_Classification/Report.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to the Spatially Enabled DataFrame
#
# The [`Spatially Enabled DataFrame`](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.features.toc.html#spatialdataframe) (SEDF) creates a simple, intutive object that can easily manipulate geometric and attribute data.
#
# <blockquote>
# New at version 1.5, the Spatially Enabled DataFrame is an evolution of the <code>SpatialDataFrame</code> object that you may be familiar with. While the <code>SDF</code> object is still avialable for use, the team has stopped active development of it and is promoting the use of this new Spatially Enabled DataFrame pattern. The SEDF provides you better memory management, ability to handle larger datasets and is the pattern that Pandas advocates as the path forward.</blockquote>
#
# The Spatially Enabled DataFrame inserts a custom namespace called `spatial` into the popular [Pandas](https://pandas.pydata.org/) [DataFrame](http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe) structure to give it spatial abilities. This allows you to use intutive, pandorable operations on both the attribute and spatial columns. Thus, the SEDF is based on data structures inherently suited to data analysis, with natural operations for the filtering and inspecting of subsets of values which are fundamental to statistical and geographic manipulations.
#
# The dataframe reads from many **sources**, including shapefiles, [Pandas](https://pandas.pydata.org/) [DataFrames](http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe), feature classes, GeoJSON, and Feature Layers.
#
# This document outlines some fundamentals of using the `Spatially Enabled DataFrame` object for working with GIS data.
#
# * [Accessing GIS Data](#Accessing-GIS-data)
# * [Reading Web Layers](#Reading-Web-Layers)
# * [Reading Feature Layer Data](#Reading-Feature-Layer-Data)
# * [Examining Feature Layer Content](#Example:-Examining-Feature-Layer-content)
# * [Example: Feature Layer Query Results to a Spatial DataFrame](#Example:-Feature-Layer-Query-Results-to-a-Spatially-Enabled-DataFrame)
# * [Accessing local GIS Data](#Accessing-local-GIS-data)
# * [Example: Reading a Shapefile](#Example:-Reading-a-Shapefile)
# * [Saving Spatiall Enabled DataFrames](#Saving-Spatially-Enabled-DataFrames)
# * [Export Options](#Export-Options)
# * [Exporting to a Feature Class](#Export-to-Feature-Class)
# * [Example: Export a whole dataset to a Shapefile](#Example:-Export-a-whole-dataset-to-a-shapefile:)
# * [Example: Export a subset to a Shapefile](#Example:-Export-dataset-with-a-subset-of-columns-and-top-5-records-to-a-shapefile:)
import pandas as pd
from arcgis.features import GeoAccessor, GeoSeriesAccessor
# ## Accessing GIS data
# GIS users need to work with both published layers on remote servers (web layers) and local data, but the ability to manipulate these datasets without permanentently copying the data is lacking. The `Spatial Enabled DataFrame` solves this problem because it is an in-memory object that can read, write and manipulate geospatial data.
#
# The SEDF integrates with Esri's [`ArcPy` site-package](http://pro.arcgis.com/en/pro-app/arcpy/get-started/what-is-arcpy-.htm) as well as the open source [`pyshp`](https://github.com/GeospatialPython/pyshp/), [`shapely`](https://github.com/Toblerity/Shapely) and [`fiona`](https://github.com/Toblerity/Fiona) packages. This means the ArcGIS API for Python SEDF can use either of these geometry engines to provide you options for easily working with geospatial data regardless of your platform. The SEDF transforms data into the formats you desire so you can use Python functionality to analyze and visualize geographic information.
#
# Data can be read and scripted to automate workflows and just as easily visualized on maps in [`Jupyter notebooks`](../using-the-jupyter-notebook-environment/). The SEDF can export data as feature classes or publish them directly to servers for sharing according to your needs. Let's explore some of the different options available with the versatile `Spatial Enabled DataFrame` namespaces:
# ### Reading Web Layers
#
# [`Feature layers`](https://doc.arcgis.com/en/arcgis-online/share-maps/hosted-web-layers.htm) hosted on [**ArcGIS Online**](https://www.arcgis.com) or [**ArcGIS Enterprise**](http://enterprise.arcgis.com/en/) can be easily read into a Spatially Enabled DataFrame using the [`from_layer`](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.features.toc.html?highlight=from_layer#arcgis.features.GeoAccessor.from_layer) method. Once you read it into a SEDF object, you can create reports, manipulate the data, or convert it to a form that is comfortable and makes sense for its intended purpose.
#
# **Example: Retrieving an ArcGIS Online [`item`](https://developers.arcgis.com/rest/users-groups-and-items/publish-item.htm) and using the [`layers`](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.gis.toc.html#layer) property to inspect the first 5 records of the layer**
# +
from arcgis import GIS
gis = GIS()
item = gis.content.get("85d0ca4ea1ca4b9abf0c51b9bd34de2e")
flayer = item.layers[0]
# create a Spatially Enabled DataFrame object
sdf = pd.DataFrame.spatial.from_layer(flayer)
sdf.head()
# -
# When you inspect the `type` of the object, you get back a standard pandas `DataFrame` object. However, this object now has an additional `SHAPE` column that allows you to perform geometric operations. In other words, this `DataFrame` is now geo-aware.
type(sdf)
# Further, the `DataFrame` has a new `spatial` property that provides a list of geoprocessing operations that can be performed on the object. The rest of the guides in this section go into details of how to use these functionalities. So, sit tight.
# ### Reading Feature Layer Data
#
# As seen above, the SEDF can consume a `Feature Layer` served from either ArcGIS Online or ArcGIS Enterprise orgs. Let's take a step-by-step approach to break down the notebook cell above and then extract a subset of records from the feature layer.
#
# #### Example: Examining Feature Layer content
# Use the `from_layer` method on the SEDF to instantiate a data frame from an item's `layer` and inspect the first 5 records.
# Retrieve an item from ArcGIS Online from a known ID value
known_item = gis.content.get("85d0ca4ea1ca4b9abf0c51b9bd34de2e")
known_item
# +
# Obtain the first feature layer from the item
fl = known_item.layers[0]
# Use the `from_layer` static method in the 'spatial' namespace on the Pandas' DataFrame
sdf = pd.DataFrame.spatial.from_layer(fl)
# Return the first 5 records.
sdf.head()
# -
# > NOTE: See Pandas DataFrame [`head() method documentation`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.html) for details.
# You can also use sql queries to return a subset of records by leveraging the ArcGIS API for Python's [`Feature Layer`](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.features.toc.html#featurelayer) object itself. When you run a [`query()`](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.features.toc.html#arcgis.features.FeatureLayer.query) on a `FeatureLayer`, you get back a `FeatureSet` object. Calling the `sdf` property of the `FeatureSet` returns a Spatially Enabled DataFrame object. We then use the data frame's [`head()`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.core.groupby.GroupBy.head.html#pandas.core.groupby.GroupBy.head) method to return the first 5 records and a subset of columns from the DataFrame:
# #### Example: Feature Layer Query Results to a Spatially Enabled DataFrame
# We'll use the `AGE_45_54` column to query the dataframe and return a new `DataFrame` with a subset of records. We can use the built-in [`zip()`](https://docs.python.org/3/library/functions.html#zip) function to print the data frame attribute field names, and then use data frame syntax to view specific attribute fields in the output:
# +
# Filter feature layer records with a sql query.
# See https://developers.arcgis.com/rest/services-reference/query-feature-service-layer-.htm
df = fl.query(where="AGE_45_54 < 1500").sdf
# -
for a,b,c,d in zip(df.columns[::4], df.columns[1::4],df.columns[2::4], df.columns[3::4]):
print("{:<30}{:<30}{:<30}{:<}".format(a,b,c,d))
# Return a subset of columns on just the first 5 records
df[['NAME', 'AGE_45_54', 'POP2010']].head()
# ## Accessing local GIS data
#
# The SEDF can also access local geospatial data. Depending upon what Python modules you have installed, you'll have access to a wide range of functionality:
#
# * If the **`ArcPy`** module is installed, meaning you have installed [`ArcGIS Pro`](http://pro.arcgis.com/en/pro-app/) and have installed the ArcGIS API for Python in that same environment, the `DataFrame` then has methods to read a subset of the ArcGIS Desktop [supported geographic formats](http://desktop.arcgis.com/en/arcmap/10.3/manage-data/datatypes/about-geographic-data-formats.htm#ESRI_SECTION1_4835793C55C0439593A46FD5BC9E64B9), most notably:
# * [`feature classes`](http://desktop.arcgis.com/en/arcmap/latest/manage-data/feature-classes/a-quick-tour-of-feature-classes.htm)
# * [`shapefiles`](http://desktop.arcgis.com/en/arcmap/latest/manage-data/shapefiles/what-is-a-shapefile.htm),
# * [`ArcGIS Server Web Services`](https://enterprise.arcgis.com/en/server/latest/publish-services/windows/what-types-of-services-can-you-publish.htm) and [`ArcGIS Online Hosted Feature Layers`](https://doc.arcgis.com/en/arcgis-online/share-maps/publish-features.htm)
# * [`OGC Services`](http://www.opengeospatial.org/standards)
# * If the **ArcPy** module is not installed, the SEDF [`from_featureclass`](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.features.toc.html?arcgis.features.GeoAccessor.from_featureclass#arcgis.features.GeoAccessor.from_featureclass) method only supports consuming an Esri [`shapefile`](http://desktop.arcgis.com/en/arcmap/latest/manage-data/shapefiles/what-is-a-shapefile.htm)
# > Please note that you must install the `pyshp` package to read shapefiles in environments that don't have access to `ArcPy`.
#
# ### Example: Reading a Shapefile
# > You must authenticate to `ArcGIS Online` or `ArcGIS Enterprise` to use the `from_featureclass()` method to read a shapefile with a Python interpreter that does not have access to `ArcPy`.
#
# > `g2 = GIS("https://www.arcgis.com", "username", "password")`
g2 = GIS("https://python.playground.esri.com/portal", "arcgis_python", "amazing_arcgis_123")
sdf = pd.DataFrame.spatial.from_featureclass("path\to\your\data\census_example\cities.shp")
sdf.tail()
# ## Saving Spatially Enabled DataFrames
#
# The SEDF can export data to various data formats for use in other applications.
#
#
# ### Export Options
#
# - [Feature Layers](https://doc.arcgis.com/en/arcgis-online/share-maps/hosted-web-layers.htm)
# - [Feature Collections](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.features.toc.html#featurelayercollection)
# - [Feature Set](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.features.toc.html#featureset)
# - [GeoJSON](http://geojson.org/)
# - [Feature Class](http://desktop.arcgis.com/en/arcmap/latest/manage-data/feature-classes/a-quick-tour-of-feature-classes.htm)
# - [Pickle](https://pythontips.com/2013/08/02/what-is-pickle-in-python/)
# - [HDF](https://support.hdfgroup.org/HDF5/Tutor/HDF5Intro.pdf)
#
# ### Export to Feature Class
#
# The SEDF allows for the export of whole datasets or partial datasets.
#
# #### Example: Export a whole dataset to a shapefile:
sdf.spatial.to_featureclass(location=r"c:\output_examples\census.shp")
# > The ArcGIS API for Python installs on all `macOS` and `Linux` machines, as well as those `Windows` machines not using Python interpreters that have access to `ArcPy` will only be able to write out to shapefile format with the `to_featureclass` method. Writing to file geodatabases requires the `ArcPy` site-package.
# #### Example: Export dataset with a subset of columns and top 5 records to a shapefile:
for a,b,c,d in zip(sdf.columns[::4], sdf.columns[1::4], sdf.columns[2::4], sdf.columns[3::4]):
print("{:<30}{:<30}{:<30}{:<}".format(a,b,c,d))
columns = ['NAME', 'ST', 'CAPITAL', 'STFIPS', 'POP2000', 'POP2007', 'SHAPE']
sdf[columns].head().spatial.to_featureclass(location=r"/path/to/your/data/directory/sdf_head_output.shp")
|
guide/05-working-with-the-spatially-enabled-dataframe/introduction-to-the-spatially-enabled-dataframe.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/hothienlac/CS433/blob/master/Random%20Forest%20Lab/Untitled7.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="PhksgKclPkXM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="1b0a39bb-53fd-462b-da58-1fe31275ba2b"
import numpy as np
import re
import nltk
from sklearn.datasets import load_files
nltk.download('stopwords')
import pickle
from nltk.corpus import stopwords
# + id="v9iV8kIPWkdF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="8eb1b9a7-5fc7-45d4-b4b9-5b0355f34128"
# !unzip txt_sentoken.zip
# + id="1svNMmN8ZbJ-" colab_type="code" colab={}
path = '/content/txt_sentoken/'
movie_data = load_files(path)
X, y = movie_data.data, movie_data.target
# + id="tryP56-bcCOY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f1578144-f76c-4dc9-f5b9-2afb9ff64e97"
print(len(X))
# + id="wrivlJa1cVim" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="dd7d43e3-9e7a-43ae-9e82-475951102b62"
documents = []
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
stemmer = WordNetLemmatizer()
for sen in range(0, len(X)):
document = re.sub(r'\W', ' ', str(X[sen]))
document = re.sub(r'\s+[a-zA-Z]\s+', ' ', document)
document = re.sub(r'\^+[a-zA-Z]\s+', ' ', document)
document = re.sub(r'\s+', ' ', document, flags=re.I)
document = re.sub(r'^b\s+', '', document)
document = document.lower()
document = document.split()
document = [stemmer.lemmatize(word) for word in document]
document = ' '.join(document)
documents.append(document)
# + id="awyFBRgWdu9v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d12efbb7-6e55-442c-ceb9-5e8617d4b791"
print(len(documents))
# + id="QOZ174-EeVys" colab_type="code" colab={}
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(max_features=1500, min_df=5, max_df=0.7,
stop_words=stopwords.words('english'))
X = vectorizer.fit_transform(documents).toarray()
# + id="GOfjmHcCe2l3" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2)
# + id="W293WbRlfLTG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="19dd5db7-b80e-44a1-b130-6f20ff81ccb0"
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=1000)
classifier.fit(X_train, y_train)
# + id="TjzF0MU3fe5V" colab_type="code" colab={}
y_pred = classifier.predict(X_test)
# + id="1f_1rd9rfj__" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="b5d0a2ff-b449-4a4f-b298-7c8a4b8a3a1c"
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print(accuracy_score(y_test, y_pred))
# + id="QbaxLbxYf1cf" colab_type="code" colab={}
|
Random Forest Lab/Untitled7.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cbga py2
# language: python
# name: cbga_py2
# ---
import pandas as pd
import re
data = pd.read_excel('../csvs/assam/Grant_No_29_2018-19.xls', header=None)
data
starting_row = data.iloc[18]
starting_row
starting_index = None
for index, row in data.iterrows():
if row.equals(starting_row) and starting_index is None:
starting_index = index + 3
if starting_index is not None and index > starting_index:
combined_string = ' '.join([row[5] if pd.notnull(row[5]) else '', row[6] if pd.notnull(row[6]) else '', row[7] if pd.notnull(row[7]) else ''])
data.iat[index, 5] = combined_string
data
data.drop([0,6,7], axis=1, inplace=True)
selected_data = data[(starting_index + 1):]
starting_index
cols = ['Actual 2016-17 Plan', 'Actual 2016-17 Non-Plan',
'Budget Estimates 2017-18', 'Revised Estimates 2017-18',
'Head of Account', 'Budget Estimate 2018-19']
selected_data.columns = cols
selected_data
selected_data['is_english'] = selected_data['Head of Account'].apply(lambda x: True if len([word for word in re.findall('[a-zA-Z]*', x) if len(word.strip()) != 0]) != 0 else False )
selected_data
|
state_budget/assam/exploration/Assam CSV Audit.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py3.6tf1.12]
# language: python
# name: conda-env-py3.6tf1.12-py
# ---
# %matplotlib inline
# +
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf.__version__)
# -
# # 波士顿房价数据集
# 此数据集可以直接在 TensorFlow 中访问。下载并随机化处理训练集:
# +
boston_housing = keras.datasets.boston_housing
(train_data, train_labels), (test_data, test_labels) = boston_housing.load_data()
# Shuffle the training set
order = np.argsort(np.random.random(train_labels.shape))
train_data = train_data[order]
train_labels = train_labels[order]
# -
# ## 样本和特征
# 此数据集比我们到目前为止使用的其他数据集小得多:它共有 506 个样本,拆分为 404 个训练样本和 102 个测试样本:
print("Training set: {}".format(train_data.shape)) # 404 examples, 13 features
print("Testing set: {}".format(test_data.shape)) # 102 examples, 13 features
# 该数据集包含 13 个不同的特征:
#
# 1. 人均犯罪率。
# 2. 占地面积超过 25000 平方英尺的住宅用地所占的比例。
# 3. 非零售商业用地所占的比例(英亩/城镇)。
# 4. 查尔斯河虚拟变量(如果大片土地都临近查尔斯河,则为 1;否则为 0)。
# 5. 一氧化氮浓度(以千万分之一为单位)。
# 6. 每栋住宅的平均房间数。
# 7. 1940 年以前建造的自住房所占比例。
# 8. 到 5 个波士顿就业中心的加权距离。
# 9. 辐射式高速公路的可达性系数。
# 10. 每 10000 美元的全额房产税率。
# 11. 生师比(按城镇统计)。
# 12. 1000 * (Bk - 0.63) ** 2,其中 Bk 是黑人所占的比例(按城镇统计)。
# 13. 较低经济阶层人口所占百分比。
# 以上每个输入数据特征都有不同的范围。一些特征用介于 0 到 1 之间的比例表示,另外一些特征的范围在 1 到 12 之间,还有一些特征的范围在 0 到 100 之间,等等。真实的数据往往都是这样,了解如何探索和清理此类数据是一项需要加以培养的重要技能。
# 要点:作为建模者兼开发者,需要考虑如何使用这些数据,以及模型预测可能会带来哪些潜在益处和危害。类似这样的模型可能会加深社会偏见,扩大社会差异。某个特征是否与您想要解决的问题相关,或者是否会引入偏见?要了解详情,请参阅机器学习公平性。
print(train_data[0]) # Display sample features, notice the different scales
# +
# 使用 Pandas 库在格式规范的表格中显示数据集的前几行:
import pandas as pd
column_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT']
df = pd.DataFrame(train_data, columns=column_names)
df.head()
# -
# ## 标签
# 标签是房价(以千美元为单位)。(您可能会注意到 20 世纪 70 年代中期的房价。)
print(train_labels[0:10]) # Display first 10 entries
# ## 数据正则化处理
# 作用是消除不同量纲的影响
# 建议标准化使用不同比例和范围的特征。对于每个特征,用原值减去特征的均值,再除以标准偏差即可:
#
# +
# Test data is *not* used when calculating the mean and std
mean = train_data.mean(axis=0)
std = train_data.std(axis=0)
train_data = (train_data - mean) / std
test_data = (test_data - mean) / std
print(train_data[0]) # First training sample, normalized
# -
# 虽然在未进行特征标准化的情况下,模型可能会收敛,但这样做会增加训练难度,而且使生成的模型更加依赖于在输入中选择使用的单位。
# # 创建模型
# 我们来构建模型。在此教程中,我们将使用 Sequential 模型,该模型包含两个密集连接隐藏层,以及一个返回单个连续值的输出层。由于我们稍后要再创建一个模型,因此将模型构建步骤封装在函数 build_model 中。
# +
def build_model():
model = keras.Sequential([
keras.layers.Dense(64, activation=tf.nn.relu,
input_shape=(train_data.shape[1],)),
keras.layers.Dense(64, activation=tf.nn.relu),
keras.layers.Dense(1)
])
optimizer = tf.train.RMSPropOptimizer(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae'])
return model
model = build_model()
model.summary()
# -
# # 训练模型
# 对该模型训练 500 个周期,并将训练和验证准确率记录到 history 对象中。
# +
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 500
# Store training stats
history = model.fit(train_data, train_labels, epochs=EPOCHS,
validation_split=0.2, verbose=0,
callbacks=[PrintDot()])
# -
# 使用存储在 history 对象中的统计数据可视化模型的训练进度。我们希望根据这些数据判断:对模型训练多长时间之后它会停止优化。
# +
import matplotlib.pyplot as plt
def plot_history(history):
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [1000$]')
plt.plot(history.epoch, np.array(history.history['mean_absolute_error']),
label='Train Loss')
plt.plot(history.epoch, np.array(history.history['val_mean_absolute_error']),
label = 'Val loss')
plt.legend()
plt.ylim([0, 5])
plot_history(history)
# -
# 此图显示,在大约 200 个周期之后,模型几乎不再出现任何改进。我们更新一下 model.fit 方法,以便在验证分数不再提高时自动停止训练。我们将使用一个回调来测试每个周期的训练状况。如果模型在一定数量的周期之后没有出现任何改进,则自动停止训练。
# +
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)
history = model.fit(train_data, train_labels, epochs=EPOCHS,
validation_split=0.2, verbose=0,
callbacks=[early_stop, PrintDot()])
plot_history(history)
# -
# 此图显示平均误差约为 2500 美元。这是一个出色的模型吗?如果某些标签只是 15000 美元,那么 2500 美元的误差也不算小。
#
# 现在看一下模型在测试集上的表现如何:
# +
[loss, mae] = model.evaluate(test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: ${:7.2f}".format(mae * 1000))
# -
# # 预测
# 最后,使用测试集中的数据预测某些房价:
# +
test_predictions = model.predict(test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [1000$]')
plt.ylabel('Predictions [1000$]')
plt.axis('equal')
plt.xlim(plt.xlim())
plt.ylim(plt.ylim())
_ = plt.plot([-100, 100], [-100, 100])
# -
error = test_predictions - test_labels
plt.hist(error, bins = 50)
plt.xlabel("Prediction Error [1000$]")
_ = plt.ylabel("Count")
# # 总结
# 此笔记本介绍了几个处理回归问题的技巧。
#
# * 均方误差 (MSE) 是用于回归问题的常见损失函数(与分类问题不同)。
# * 同样,用于回归问题的评估指标也与分类问题不同。常见回归指标是平均绝对误差 (MAE)。
# * 如果输入数据特征的值具有不同的范围,则应分别缩放每个特征。
# * 如果训练数据不多,则选择隐藏层较少的小型网络,以避免出现过拟合。
# * 早停法是防止出现过拟合的实用技术。
|
tensorflownotes/getstart_keras/regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Region & Domain
# Germany, European Union
# Sports & Athletics
#
# # Research Question
# How did wages affect the performance of football teams in the Bundesliga between 2016 and 2020?
# +
# %matplotlib notebook
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.ticker import ScalarFormatter
from matplotlib.colors import ListedColormap
import pandas as pd
import numpy as np
plt.style.use('seaborn')
# +
wages = pd.read_csv('/Users/riro/Documents/GitHub/umich_ds/plotting/data/bundesliga_wages.csv', skipfooter=2, engine='python')
def clean_wages(df):
'''A function that cleans the wages database and converts the currency format to numbers.'''
df = df.drop('Unnamed: 5', axis=1).set_index('Team')
df = df.replace('[\$,]', '', regex=True).astype(float)
return df
wages = clean_wages(wages)
# -
def get_league_table(year):
'''A function that takes a year as an argument and returns the league table with the index set to the team names.'''
df = pd.read_csv(f'/Users/riro/Documents/GitHub/umich_ds/plotting/data/bundesliga_{year}.csv')
df.set_index('Squad', inplace=True)
return df
# +
def color(x):
if x == 1:
return 'gold'
elif x >=2 and x <= 7:
return 'red'
elif x >= 16:
return 'black'
else:
return 'lightslategrey'
def wages_vs_points(year):
df = get_league_table(year)
df = df.join(wages[str(year)])
x = df['Pts']
y = df[str(year)]
df['color'] = df['Rk'].apply(color)
cmap = ListedColormap(df['color'])
return [x, y, df['color'], cmap]
def wages_vs_goals(year):
df = get_league_table(year)
df = df.join(wages[str(year)])
x = df['GF']
y = df[str(year)]
df['color'] = df['Rk'].apply(color)
cmap = ListedColormap(df['color'])
return [x, y, df['color'], cmap]
def crowd_vs_points(year):
df = get_league_table(year)
x = df['Attendance']
y = df['Pts']
df['color'] = df['Rk'].apply(color)
cmap = ListedColormap(df['color'])
return [x, y, df['color'], cmap]
def gd_vs_points(year):
df = get_league_table(year)
x = df['GDiff']
y = df['Pts']
df['color'] = df['Rk'].apply(color)
cmap = ListedColormap(df['color'])
return [x, y, df['color'], cmap]
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(10, 10))
alpha=0.7
for i in range(2016, 2020):
ax1.scatter(wages_vs_points(f'{i}')[0],
wages_vs_points(f'{i}')[1],
alpha = alpha,
c = wages_vs_points(f'{i}')[2],
cmap = wages_vs_points(f'{i}')[3])
ax2.scatter(wages_vs_goals(f'{i}')[0],
wages_vs_goals(f'{i}')[1],
alpha = alpha,
c = wages_vs_goals(f'{i}')[2],
cmap = wages_vs_points(f'{i}')[3])
ax3.scatter(crowd_vs_points(f'{i}')[0],
crowd_vs_points(f'{i}')[1],
alpha = alpha,
c = crowd_vs_points(f'{i}')[2],
cmap = crowd_vs_points(f'{i}')[3])
ax4.scatter(gd_vs_points(f'{i}')[0],
gd_vs_points(f'{i}')[1],
alpha = alpha,
c = gd_vs_points(f'{i}')[2],
cmap = gd_vs_points(f'{i}')[3])
ax1.set_xlabel('Points')
ax1.set_ylabel('Average Annual Spend ($ millions)')
ax2.set_xlabel('Goals Scored')
ax2.set_yticklabels([])
ax3.set_xlabel('Attendance')
ax3.set_ylabel('Points')
ax4.set_xlabel('Goal Difference')
ax4.set_yticklabels([])
plt.gca()
plt.suptitle('Bundesliga Performance (2016-2020)')
categories = ['Winner','European places','Relegation places']
colors = ['gold','red','black']
recs = []
for i in range(0,len(colors)):
recs.append(mpatches.Rectangle((0,0),1,1, fc=colors[i]))
ax1.legend(recs, categories, loc=2)
plt.savefig('bundesliga_performance', dpi=300)
|
plotting/Assignment 4.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Tensorflow
# language: python
# name: tensorflow
# ---
# +
import math
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
#from tensorflow.python.framework import ops from cnn_utils import *
from math import pi
import itertools
import time
# %matplotlib inline
#with tf.Session() as sess:
# hello = tf.constant('Hello, TensorFlow!')
# print(sess.run(hello))
# +
np.random.seed(seed=123)
class Vertex:
"""a polar coordinate vertex of unit length"""
def __init__(self, index):
self.index = index
if(index == 0):
self.theta = tf.constant([0.0], dtype=tf.float64, name = "theta" + str(index))
self.phi = tf.constant([0.0], dtype=tf.float64, name = "phi" + str(index))
else:
self.theta = tf.get_variable("theta" + str(index), [1] , dtype=tf.float64,
initializer=tf.constant_initializer(np.random.random_sample() * pi))
if(index == 1):
self.phi = tf.constant([0.0], dtype=tf.float64, name = "phi" + str(index))
else:
self.phi = tf.get_variable("phi" + str(index), [1] , dtype=tf.float64,
initializer=tf.constant_initializer(np.random.random_sample() * 2 * pi))
#print( index )
#print(self.theta)
#print(self.phi)
sin_theta = tf.sin(self.theta)
self.x = tf.multiply(sin_theta, tf.cos(self.phi) , name='x' + str(self.index))
self.y = tf.multiply(sin_theta, tf.sin(self.phi) , name='y' + str(self.index))
self.z = tf.cos(self.theta, name='z' + str(self.index))
self.xyz = tf.concat([self.x, self.y, self.z], axis=0)
def make_vertextes( n_vertexes ):
print('make vertextes')
vertexes = []
for i in range(n_vertexes):
vertexes.append( Vertex(i) )
return vertexes
def make_pairs( vertexes ):
print('make pairs')
list = []
for pair in itertools.combinations(vertexes, 2):
list.append( 1 / tf.norm(pair[0].xyz-pair[1].xyz))
return tf.add_n(list)
# +
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial import ConvexHull
from sklearn import preprocessing
from pythreejs import *
import numpy as np
from IPython.display import display
def make_face_normals(verts,simplices):
print(verts)
print(simplices)
face_normals = np.zeros((simplices.shape[0],3))
for i in range(simplices.shape[0]):
v = face_normals[i]
for vertex_index in simplices[i]:
#print(vertex_index)
v += verts[vertex_index]
face_normals = preprocessing.normalize(face_normals, norm='l2')
print(face_normals)
tuples=[]
for i in range(face_normals.shape[0]):
tuples.append((face_normals[i],i))
return tuples
def fix_simplex_normals(verts, simplices):
#print('fix_simplex_normals')
base = 1
left = 0
right = 2
for face in simplices:
vA = verts[ face[left] ];
vB = verts[ face[base] ];
vC = verts[ face[right] ];
cb = vC - vB
ab = vA - vB
cross = np.cross( cb, ab )
dot = np.dot( vA, cross)
if dot < 0:
#print( face )
tmp = face[left]
face[left] = face[right]
face[right] = tmp
#print( dot )
#print( face )
return None
def make_vert_spheres(verts):
spheres = []
for v in verts:
sphereGeom = SphereGeometry( radius = 0.03)
sphereMesh = Mesh(geometry=sphereGeom, material = LambertMaterial(color='#444444',transparent=False, opacity=0.9))
sphereMesh.position = v.tolist()
spheres.append(sphereMesh)
return spheres
def makeMeshes(verts, simplices):
#fix_simplex_normals(verts, simplices)
fix_simplex_normals(verts, simplices)
# https://github.com/jovyan/pythreejs
cubeGeometry = PlainGeometry(vertices=verts, faces=simplices)
# , faceNormals=make_face_normals(pos,simplices)
#,transparent=True, opacity=0.7, side='DoubleSide'
myobjectCube = Mesh(geometry=cubeGeometry, material = LambertMaterial(color='#888888'))
myobjectCube.material.wireframe = True
camera = PerspectiveCamera(position=[-0.5, 6, -0.5], fov=25, aspect=1.0,
children=[DirectionalLight(color='#aaaaaa', position=[20, 20, 30], intensity=4)])
spheres = make_vert_spheres(verts)
sceneCube = Scene(children=[myobjectCube, AmbientLight(color='#aaaaaa', intensity=4)]+spheres)
renderer = Renderer(camera=camera, background='black', background_opacity=1,
scene = sceneCube, controls=[OrbitControls(controlling=camera)], width='800',height='800')
return renderer
# +
import multiprocessing
n_vertexes = 12 # 12 -24 28 +32 36 40 -42 46
learning_rate = 6/(n_vertexes**2)
start_time = time.time()
tf.reset_default_graph()
vertextes = make_vertextes(n_vertexes)
potential = make_pairs(vertextes)
multiplier = 1.02
learning_rate_placeholder = tf.placeholder(tf.float32, [], name='learning_rate')
print('make optimizer')
optimizer = tf.train.GradientDescentOptimizer( learning_rate=learning_rate_placeholder ).minimize(potential)
#print([v.xyz for v in vertextes])
allPositions = tf.stack([v.xyz for v in vertextes])
NUM_THREADS = max(1, int( multiprocessing.cpu_count() / 2 ))
print('NUM_THREADS = ' + str(NUM_THREADS))
print('start session')
#config = tf.ConfigProto(intra_op_parallelism_threads=4, inter_op_parallelism_threads=4, \
# allow_soft_placement=True, device_count = {'CPU': 4})
#config = tf.ConfigProto(intra_op_parallelism_threads=4, inter_op_parallelism_threads=4, \
# allow_soft_placement=True, device_count = {'CPU': 1})
#session = tf.Session(config=config)
session = tf.Session()
with session as sess:
sess.run(tf.global_variables_initializer())
last_p = 1e6
last_was_equal = False
for i in range(100000):
if(i % 10 == 0):
_ , p = sess.run([optimizer, potential], feed_dict={learning_rate_placeholder: learning_rate} )
if(i % 200 == 0):
print( str(i) + ' ' + "{0:.16f}, ".format(p)+ " ({0:.16e}), ".format(last_p-p), end='')
#if(i % 50 == 0):
# print( str(i) + ' ' + "{0:.8f}".format(p) + ' learning_rate = ' +
# str( learning_rate) + ' multiplier = ' + str(multiplier) )
#print( p - last_p )
if abs(p - last_p) < 1e-11:
if last_was_equal:
print('early stop')
break
else:
last_was_equal = True
else:
last_was_equal = False
#if( p < last_p ):
# learning_rate *= multiplier
#else:
# learning_rate = max(1e-3, learning_rate/8)
# multiplier = 1 + (multiplier-1)/2
last_p = p;
else:
sess.run( optimizer, feed_dict={learning_rate_placeholder: learning_rate} )
elapsed_time = time.time() - start_time
print( str(elapsed_time) + ' sec')
print( str(i) + ' epochs' )
verts = sess.run(allPositions, feed_dict={learning_rate_placeholder: learning_rate})
#print(verts)
# +
hull = ConvexHull(verts)
display(makeMeshes(verts, hull.simplices))
# -
# +
def add_ordered(set, index1, index2):
if index1 < index2:
set.add((index1, index2))
else:
set.add((index2, index1))
def get_edges(simplexes):
edges = set()
for s in simplexes:
add_ordered(edges, s[0], s[1])
add_ordered(edges, s[1], s[2])
add_ordered(edges, s[0], s[2])
return list(edges)
class EdgeGroup:
"""all edges that are the same distance"""
def __init__(self, distance):
self.distance = distance
self.count = 1
def __str__(self):
return str(self.distance) + ':' + str(self.count)
def make_surface_edge_groups(verts, simplexes, epsilon):
groups = []
edges = get_edges(simplexes)
for e in edges:
distance = np.linalg.norm(verts[e[0]] - verts[e[1]])
found = False
for g in groups:
if abs(g[0] - distance) < epsilon:
g[1]+=1
found=True
break
if not found:
groups.append( [distance,1] )
return list(groups)
epsilon = 1e-5
groups = make_surface_edge_groups(verts, hull.simplices, epsilon)
print(groups)
# +
import matplotlib.pyplot as plt
import numpy as np
plt.rcdefaults()
fig, ax = plt.subplots()
# Example data
people = [group[0] for group in groups]
y_pos = np.arange(len(people))
performance = [group[1] for group in groups]
ax.barh(y_pos, performance, align='center',
color='darkblue', ecolor='black')
ax.set_yticks(y_pos)
ax.set_yticklabels(people)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Edge Count')
ax.set_title('Edge Length')
plt.show()
# -
# Todo:
# 1. fix face normals, https://github.com/mrdoob/three.js/blob/dev/src/core/Geometry.js computeFaceNormals
# cubeGeometry.faceNormals
#
# 2. edge stats bar chart
# 4. transparent faces
# 5. edge color groups
# 5. manual change learning rate on split run on session
# 5. adaptive learning rate, exponetial average of last delta for each learning rate with N buckets randomly chosen each an increment more or less than best in the middle
# 5. save and load to file and use as init
# 5. graph
# 1. e f v
# 2. potential per ?
# 3. groups
# 4. periodic table of groups
#
#
# ## 12 Vertexes
# 
#
#
# ## 32 Vertexes
# 
#
#cubeGeometry.faceNormals.__class__
" {0:.16e}, ".format(1e-9)
|
n-geodesic.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os.path as osp
import sys
import matplotlib.pyplot as plt
import numpy as np
import omegaconf
import torch
import torch.linalg as tln
sys.path.insert(0, "../src")
from data_utils import PolynomialDataset
from lighting_utils import LitLinearNet
from pnml_utils import PnmlAnalytical
from torch.utils.data import DataLoader
# -
def calc_net_effective_theta(net):
for l, (name, param) in enumerate(net.named_parameters()):
if l == 0:
# features = x_arr
param_all = param.T
else:
# features = linear_net.model.layers[l-1](features)
param_all = param_all @ param.T
param_prev = param
return param_all
# +
# Load paths
paths = [
"../output/multirun_train_erm_20210412_194206/experimnet_name=train_erm,intermediate_sizes=[]",
"../output/multirun_train_erm_20210412_194206/experimnet_name=train_erm,intermediate_sizes=[6]",
"../output/multirun_train_erm_20210412_194206/experimnet_name=train_erm,intermediate_sizes=[6,6]",
"../output/multirun_train_erm_20210412_194206/experimnet_name=train_erm,intermediate_sizes=[6,6,6,6]",
"../output/multirun_train_erm_20210412_194206/experimnet_name=train_erm,intermediate_sizes=[6,6,6,6,6,6]",
]
experimnet_dicts = []
for path in paths:
model_path = osp.join(path, "model.ckpt")
cfg = omegaconf.OmegaConf.load(osp.join(path, ".hydra/config.yaml"))
experimnet_dicts.append({"cfg": cfg, "path": path, "model_path": model_path})
# -
thetas = []
for experimnet_dict in experimnet_dicts:
cfg = experimnet_dict["cfg"]
model_path = experimnet_dict["model_path"]
# Testing set
x_test = np.arange(cfg.x_test_min, cfg.x_test_max, cfg.dx_test)
testset = PolynomialDataset(x_test, np.empty(len(x_test)), cfg.model_degree)
testloader = DataLoader(
testset, batch_size=cfg.batch_size, shuffle=False, num_workers=cfg.num_workers
)
linear_net = LitLinearNet.load_from_checkpoint(model_path)
layer_num = len([_ for l in linear_net.model.layers.named_children()])
y_hat_list = []
linear_net.eval()
with torch.no_grad():
for x, _ in testloader:
y_hats = linear_net.model(x)
y_hat_list.append(y_hats)
y_hats = torch.vstack(y_hat_list)
print(linear_net.model)
experimnet_dict["y_hats"] = y_hats
experimnet_dict["testloader"] = testloader
experimnet_dict["layer_num"] = layer_num
theta = calc_net_effective_theta(linear_net)
thetas.append({"num_layers": len(linear_net.model.layers), "params": theta})
# +
fig, axs = plt.subplots(1, 1, sharex=True, figsize=(8, 4))
ax = axs
alphas = np.linspace(0.2, 1, len(experimnet_dicts))
ls = ["-", "--", "-.", ":"]
lw = np.linspace(0.5, 3, len(experimnet_dicts))
for i, experimnet_dict in enumerate(experimnet_dicts):
cfg = experimnet_dict["cfg"]
y_hats = experimnet_dict["y_hats"]
testloader = experimnet_dict["testloader"]
layer_num = experimnet_dict["layer_num"]
ax.plot(
testloader.dataset.x,
y_hats,
label=f"{layer_num} layers",
alpha=alphas[i],
linestyle=ls[i % len(ls)],
linewidth=lw[i],
)
ax.plot(cfg.x_train, cfg.y_train, "r*", label="Training")
ax.set_ylabel("y")
ax.set_title("ERM models with SGD")
ax.grid()
ax.legend()
ax.set_ylim(-2, 2)
# +
# Training set
cfg = experimnet_dict["cfg"]
model_path = experimnet_dict["model_path"]
# Testing set
trainset = PolynomialDataset(cfg.x_train, cfg.y_train, cfg.model_degree)
x_arr = torch.tensor([x for x, _ in trainset])
x_arr.shape
# -
for theta_dict in thetas:
print(
"num_layers={}. norm={}".format(
theta_dict["num_layers"], tln.norm(theta_dict["params"], ord=np.inf)
)
)
|
notebooks/minimum_rank.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] chapterId="Hy8l_RQWlbz" id="chapter_name"
# # ★Chapter9 Pandasの応用
# + [markdown] id="table"
# - **9.1 DataFrameの連結・結合の概観**
# - **9.1.1 連結・結合について**
# <br><br>
# - **9.2 DataFrameの連結**
# - **9.2.1 インデックス、カラムが一致しているDataFrame同士の連結**
# - **9.2.2 インデックス、カラムが一致していないDataFrame同士の連結**
# <br><br>
# - **9.3 DataFrameの結合**
# - **9.3.1 結合の種類**
# - **9.3.2 内部結合の基本**
# - **9.3.3 外部結合の基本**
# - **9.3.4 同名でない列をKeyにして結合する**
# - **9.3.5 インデックスをKeyにして結合する**
# <br><br>
# - **9.4 DataFrameを用いたデータ分析**
# - **9.4.1 一部の行を得る**
# - **9.4.2 計算処理を適用する**
# - **9.4.3 要約統計量を得る**
# - **9.4.4 DataFrameの行間または列間の差を求める**
# - **9.4.5 グループ化**
# <br><br>
# - **添削問題**
# - **総合添削問題**
# + [markdown] id="section_name" sectionId="B1Peu0XWgZM"
# ## ●9.1 DataFrameの連結・結合の概観
# + [markdown] courseId=4010 exerciseId="ryGBKh8sIgz" id="quiz_session_name" important=true isDL=false timeoutSecs=5
# ### 〇9.1.1 連結・結合について
# -
# この項にサンプルはありません
# + [markdown] id="section_name" sectionId="HJ_eORQblZG"
# ## ●9.2 DataFrameの連結
# + [markdown] courseId=4010 exerciseId="BJQBF3UsIgM" id="code_session_name" important=true isDL=false timeoutSecs=5
# ### 〇9.2.1 インデックス、カラムが一致しているDataFrame同士の連結
# -
# #### □問題
# + id="index"
import numpy as np
import pandas as pd
# 指定のインデックスとカラムを持つDataFrameを乱数によって作成する関数です
def make_random_df(index, columns, seed):
np.random.seed(seed)
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 101), len(index))
df.index = index
return df
#インデックス、カラムが一致しているDataFrameを作成します
columns = ["apple", "orange", "banana"]
df_data1 = make_random_df(range(1, 5), columns, 0)
df_data2 = make_random_df(range(1, 5), columns, 1)
# df_data1とdf_data2を縦方向に連結しdf1に代入してください
# df_data1とdf_data2を横方向に連結しdf2に代入してください
print(df1)
print(df2)
# -
# **リスト 9.1:問題**
# #### □解答例
# + id="answer"
import numpy as np
import pandas as pd
# 指定のインデックスとカラムを持つDataFrameを乱数によって作成する関数です
def make_random_df(index, columns, seed):
np.random.seed(seed)
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 101), len(index))
df.index = index
return df
#インデックス、カラムが一致しているDataFrameを作成します
columns = ["apple", "orange", "banana"]
df_data1 = make_random_df(range(1, 5), columns, 0)
df_data2 = make_random_df(range(1, 5), columns, 1)
# df_data1とdf_data2を縦方向に連結しdf1に代入してください
df1 = pd.concat([df_data1, df_data2], axis=0)
# df_data1とdf_data2を横方向に連結しdf2に代入してください
df2 = pd.concat([df_data1, df_data2], axis=1)
print(df1)
print(df2)
# -
# **リスト 9.2:解答例**
# + [markdown] courseId=4010 exerciseId="ByVBF2LoLgz" id="code_session_name" important=true isDL=false timeoutSecs=5
# ### 〇9.2.2 インデックス、カラムが一致していないDataFrame同士の連結
# -
# #### □問題
# + id="index"
import numpy as np
import pandas as pd
# 指定のインデックスとカラムを持つDataFrameを乱数によって作成する関数です
def make_random_df(index, columns, seed):
np.random.seed(seed)
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 101), len(index))
df.index = index
return df
columns1 = ["apple", "orange", "banana"]
columns2 = ["orange", "kiwifruit", "banana"]
# インデックスが1,2,3,4, カラムがcolumns1のDataFrameを作成します
df_data1 = make_random_df(range(1, 5), columns1, 0)
# インデックスが1,3,5,7, カラムがcolumns2のDataFrameを作成します
df_data2 = make_random_df(np.arange(1, 8, 2), columns2, 1)
# df_data1とdf_data2を縦方向に連結しdf1に代入してください
# df_data1とdf_data2を横方向に連結しdf2に代入してください
print(df1)
print(df2)
# -
# **リスト 9.3:問題**
# #### □解答例
# + id="answer"
import numpy as np
import pandas as pd
# 指定のインデックスとカラムを持つDataFrameを乱数によって作成する関数です
def make_random_df(index, columns, seed):
np.random.seed(seed)
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 101), len(index))
df.index = index
return df
columns1 = ["apple", "orange", "banana"]
columns2 = ["orange", "kiwifruit", "banana"]
# インデックスが1,2,3,4, カラムがcolumns1のDataFrameを作成します
df_data1 = make_random_df(range(1, 5), columns1, 0)
# インデックスが1,3,5,7, カラムがcolumns2のDataFrameを作成します
df_data2 = make_random_df(np.arange(1, 8, 2), columns2, 1)
# df_data1とdf_data2を縦方向に連結しdf1に代入してください
df1 = pd.concat([df_data1, df_data2], axis=0)
# df_data1とdf_data2を横方向に連結しdf2に代入してください
df2 = pd.concat([df_data1, df_data2], axis=1)
print(df1)
print(df2)
# -
# **リスト 9.4:解答例**
# + [markdown] courseId=4010 exerciseId="HySHFhIiIlz" id="code_session_name" important=true isDL=false timeoutSecs=5
# ### 〇9.2.3 連結する際のラベルの指定
# -
# #### □問題
# + id="index"
import numpy as np
import pandas as pd
# 指定のインデックスとカラムを持つDataFrameを乱数によって作成する関数です
def make_random_df(index, columns, seed):
np.random.seed(seed)
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 101), len(index))
df.index = index
return df
columns = ["apple", "orange", "banana"]
df_data1 = make_random_df(range(1, 5), columns, 0)
df_data2 = make_random_df(range(1, 5), columns, 1)
# df_data1とdf_data2を横方向に連結し、Keysに"X"、"Y"を指定してMultiIndexにしてdfに代入してください
# dfの"Y"ラベルの"banana"をY_bananaに代入してください
print(df)
print()
print(Y_banana)
# -
# **リスト 9.5:問題** Keysに"X"、
# #### □解答例
# + id="answer"
import numpy as np
import pandas as pd
# 指定のインデックスとカラムを持つDataFrameを乱数によって作成する関数です
def make_random_df(index, columns, seed):
np.random.seed(seed)
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 101), len(index))
df.index = index
return df
columns = ["apple", "orange", "banana"]
df_data1 = make_random_df(range(1, 5), columns, 0)
df_data2 = make_random_df(range(1, 5), columns, 1)
# df_data1とdf_data2を横方向に連結し、Keysに"X"、"Y"を指定してMultiIndexにしてdfに代入してください
df = pd.concat([df_data1, df_data2], axis=1, keys=["X", "Y"])
# dfの"Y"ラベルの"banana"をY_bananaに代入してください
Y_banana = df["Y", "banana"]
print(df)
print()
print(Y_banana)
# -
# **リスト 9.6:解答例** Keysに"X"、
# + [markdown] id="section_name" sectionId="SytlORmbgbG"
# ## ●9.3 DataFrameの結合
# + [markdown] courseId=4010 exerciseId="SyUrtnIjIxz" id="quiz_session_name" important=true isDL=false timeoutSecs=5
# ### 〇9.3.1 結合の種類
# -
# この項にサンプルはありません
# + [markdown] courseId=4010 exerciseId="B1PHFhLiIgf" id="code_session_name" important=true isDL=false timeoutSecs=5
# ### 〇9.3.2 内部結合の基本
# -
# #### □問題
# + id="index"
import numpy as np
import pandas as pd
data1 = {"fruits": ["apple", "orange", "banana", "strawberry", "kiwifruit"],
"year": [2001, 2002, 2001, 2008, 2006],
"amount": [1, 4, 5, 6, 3]}
df1 = pd.DataFrame(data1)
data2 = {"fruits": ["apple", "orange", "banana", "strawberry", "mango"],
"year": [2001, 2002, 2001, 2008, 2007],
"price": [150, 120, 100, 250, 3000]}
df2 = pd.DataFrame(data2)
# df1, df2の中身を確認してください
print(df1)
print()
print(df2)
print()
# df1とdf2を"fruits"をKeyに内部結合して作成したDataFrameをdf3に代入してください
# 出力します
# 内部結合を行った時の挙動を確認しましょう
print(df3)
# -
# **リスト 9.7:問題**
# #### □解答例
# + id="answer"
import numpy as np
import pandas as pd
data1 = {"fruits": ["apple", "orange", "banana", "strawberry", "kiwifruit"],
"year": [2001, 2002, 2001, 2008, 2006],
"amount": [1, 4, 5, 6, 3]}
df1 = pd.DataFrame(data1)
data2 = {"fruits": ["apple", "orange", "banana", "strawberry", "mango"],
"year": [2001, 2002, 2001, 2008, 2007],
"price": [150, 120, 100, 250, 3000]}
df2 = pd.DataFrame(data2)
# df1, df2の中身を確認してください
print(df1)
print()
print(df2)
print()
# df1とdf2を"fruits"をKeyに内部結合して作成したDataFrameをdf3に代入してください
df3 = pd.merge(df1, df2, on="fruits", how="inner")
# 出力します
# 内部結合を行った時の挙動を確認しましょう
print(df3)
# -
# **リスト 9.8:解答例**
# + [markdown] courseId=4010 exerciseId="H1dHtnLiLeG" id="code_session_name" important=true isDL=false timeoutSecs=5
# ### 〇9.3.3 外部結合の基本
# -
# #### □問題
# + id="index"
import numpy as np
import pandas as pd
data1 = {"fruits": ["apple", "orange", "banana", "strawberry", "kiwifruit"],
"year": [2001, 2002, 2001, 2008, 2006],
"amount": [1, 4, 5, 6, 3]}
df1 = pd.DataFrame(data1)
data2 = {"fruits": ["apple", "orange", "banana", "strawberry", "mango"],
"year": [2001, 2002, 2001, 2008, 2007],
"price": [150, 120, 100, 250, 3000]}
df2 = pd.DataFrame(data2)
# df1, df2の中身を確認してください
print(df1)
print()
print(df2)
print()
# df1とdf2を"fruits"をKeyに外部結合して作成したDataFrameをdf3に代入してください
# 出力します
# 外部結合を行った時の挙動を確認しましょう
print(df3)
# -
# **リスト 9.9:問題**
# #### □解答例
# + id="answer"
import numpy as np
import pandas as pd
data1 = {"fruits": ["apple", "orange", "banana", "strawberry", "kiwifruit"],
"year": [2001, 2002, 2001, 2008, 2006],
"amount": [1, 4, 5, 6, 3]}
df1 = pd.DataFrame(data1)
data2 = {"fruits": ["apple", "orange", "banana", "strawberry", "mango"],
"year": [2001, 2002, 2001, 2008, 2007],
"price": [150, 120, 100, 250, 3000]}
df2 = pd.DataFrame(data2)
# df1, df2の中身を確認してください
print(df1)
print()
print(df2)
print()
# df1とdf2を"fruits"をKeyに外部結合して作成したDataFrameをdf3に代入してください
df3 = pd.merge(df1, df2, on="fruits", how="outer")
# 出力します
# 外部結合を行った時の挙動を確認しましょう
print(df3)
# -
# **リスト 9.10:解答例**
# + [markdown] courseId=4010 exerciseId="SyYBF2UiIlM" id="code_session_name" important=false isDL=false timeoutSecs=5
# ### 〇9.3.4 同名でない列をKeyにして結合する
# -
# #### □問題
# +
import pandas as pd
# 注文情報です
order_df = pd.DataFrame([[1000, 2546, 103],
[1001, 4352, 101],
[1002, 342, 101]],
columns=["id", "item_id", "customer_id"])
# 顧客情報です
customer_df = pd.DataFrame([[101, "Tanaka"],
[102, "Suzuki"],
[103, "Kato"]],
columns=["id", "name"])
# order_dfを元に"id"をcustomer_dfに結合してorder_dfに代入してください
print(order_df)
# -
# **リスト 9.11:問題**
# #### □解答例
# + id="answer"
import pandas as pd
# 注文情報です
order_df = pd.DataFrame([[1000, 2546, 103],
[1001, 4352, 101],
[1002, 342, 101]],
columns=["id", "item_id", "customer_id"])
# 顧客情報です
customer_df = pd.DataFrame([[101, "Tanaka"],
[102, "Suzuki"],
[103, "Kato"]],
columns=["id", "name"])
# order_dfを元に"id"をcustomer_dfに結合してorder_dfに代入してください
order_df = pd.merge(order_df, customer_df, left_on="customer_id", right_on="id", how="inner")
print(order_df)
# -
# **リスト 9.12:解答例**
# + [markdown] courseId=4010 exerciseId="Hy5rYh8j8ez" id="code_session_name" important=false isDL=false timeoutSecs=5
# ### 〇9.3.5 インデックスをKeyにして結合する
# -
# #### □問題
# + id="index"
import pandas as pd
# 注文情報です
order_df = pd.DataFrame([[1000, 2546, 103],
[1001, 4352, 101],
[1002, 342, 101]],
columns=["id", "item_id", "customer_id"])
# 顧客情報です
customer_df = pd.DataFrame([["Tanaka"],
["Suzuki"],
["Kato"]],
columns=["name"])
customer_df.index = [101, 102, 103]
# customer_dfを元に"name"をorder_dfに結合してorder_dfに代入してください
print(order_df)
# -
# **リスト 9.13:問題**
# #### □解答例
# + id="answer"
import pandas as pd
# 注文情報です
order_df = pd.DataFrame([[1000, 2546, 103],
[1001, 4352, 101],
[1002, 342, 101]],
columns=["id", "item_id", "customer_id"])
# 顧客情報です
customer_df = pd.DataFrame([["Tanaka"],
["Suzuki"],
["Kato"]],
columns=["name"])
customer_df.index = [101, 102, 103]
# customer_dfを元に"name"をorder_dfに結合してorder_dfに代入してください
order_df = pd.merge(order_df, customer_df, left_on="customer_id", right_index=True, how="inner")
print(order_df)
# -
# **リスト 9.14:解答例**
# + [markdown] id="section_name" sectionId="HycedRXbl-f"
# ## ●9.4 DataFrameを用いたデータ分析
# + [markdown] courseId=4010 exerciseId="SkiSt3UsUlM" id="code_session_name" important=true isDL=false timeoutSecs=5
# ### 〇9.4.1 一部の行を得る
# -
# #### □問題
# + id="index"
import numpy as np
import pandas as pd
np.random.seed(0)
columns = ["apple", "orange", "banana", "strawberry", "kiwifruit"]
# DataFrameを生成し、列を追加します
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 11), 10)
df.index = range(1, 11)
# dfの冒頭3行を取得し、df_headに代入してください
# dfの末尾3行を取得し、df_tailに代入してください
# 出力します
print(df_head)
print(df_tail)
# -
# **リスト9.15:問題**
# #### □解答例
# + id="answer"
import numpy as np
import pandas as pd
np.random.seed(0)
columns = ["apple", "orange", "banana", "strawberry", "kiwifruit"]
# DataFrameを生成し、列を追加します
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 11), 10)
df.index = range(1, 11)
# dfの冒頭3行を取得し、df_headに代入してください
df_head = df.head(3)
# dfの末尾3行を取得し、df_tailに代入してください
df_tail = df.tail(3)
# 出力します
print(df_head)
print(df_tail)
# -
# **リスト 9.16:解答例**
# + [markdown] courseId=4010 exerciseId="rJnSFnIsLxz" id="code_session_name" important=true isDL=false timeoutSecs=5
# ### 〇9.4.2 計算処理を適用する
# -
# #### □問題
# + id="index"
import numpy as np
import pandas as pd
import math
np.random.seed(0)
columns = ["apple", "orange", "banana", "strawberry", "kiwifruit"]
# DataFrameを生成し、列を追加します
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 11), 10)
df.index = range(1, 11)
# dfの各要素を2倍し、double_dfに代入してください
# dfの各要素を2乗し、square_dfに代入してください
# dfの各要素の平方根を計算し、sqrt_dfに代入してください
# 出力します
print(double_df)
print(square_df)
print(sqrt_df)
# -
# **リスト 9.17:問題**
# #### □解答例
# + id="answer"
import numpy as np
import pandas as pd
import math
np.random.seed(0)
columns = ["apple", "orange", "banana", "strawberry", "kiwifruit"]
# DataFrameを生成し、列を追加します
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 11), 10)
df.index = range(1, 11)
# dfの各要素を2倍し、double_dfに代入してください
double_df = df * 2 # double_df = df + dfもOK
# dfの各要素を2乗し、square_dfに代入してください
square_df = df * df #square_df = df**2 でもOK
# dfの各要素の平方根を計算し、sqrt_dfに代入してください
sqrt_df = np.sqrt(df)
# 出力します
print(double_df)
print(square_df)
print(sqrt_df)
# -
# **リスト 9.18:解答例**
# + [markdown] courseId=4010 exerciseId="HypSY3UsLgf" id="code_session_name" important=true isDL=false timeoutSecs=5
# ### 〇9.4.3 要約統計量を得る
# -
# #### □問題
# + id="index"
import numpy as np
import pandas as pd
np.random.seed(0)
columns = ["apple", "orange", "banana", "strawberry", "kiwifruit"]
# DataFrameを生成し、列を追加します
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 11), 10)
df.index = range(1, 11)
# dfの要約統計量のうち、"mean", "max", "min"を取り出してdf_desに代入してください
print(df_des)
# -
# **リスト 9.19:問題**
# #### □解答例
# + id="answer"
import numpy as np
import pandas as pd
np.random.seed(0)
columns = ["apple", "orange", "banana", "strawberry", "kiwifruit"]
# DataFrameを生成し、列を追加します
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 11), 10)
df.index = range(1, 11)
# dfの要約統計量のうち、"mean", "max", "min"を取り出してdf_desに代入してください
df_des = df.describe().loc[["mean", "max", "min"]]
print(df_des)
# -
# **リスト 9.20:解答例**
# + [markdown] courseId=4010 exerciseId="BkCSFnUsUxf" id="code_session_name" important=false isDL=false timeoutSecs=5
# ### 〇9.4.4 DataFrameの行間または列間の差を求める
# -
# #### □問題
# + id="index"
import numpy as np
import pandas as pd
np.random.seed(0)
columns = ["apple", "orange", "banana", "strawberry", "kiwifruit"]
# DataFrameを生成し、列を追加します
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 11), 10)
df.index = range(1, 11)
# dfの各行について、2行後の行との差を計算したDataFrameをdf_diffに代入してください
# dfとdf_diffの中身を比較して処理内容を確認してください
print(df)
print(df_diff)
# -
# **リスト 9.21:問題**
# #### □解答例
# + id="answer"
import numpy as np
import pandas as pd
np.random.seed(0)
columns = ["apple", "orange", "banana", "strawberry", "kiwifruit"]
# DataFrameを生成し、列を追加します
df = pd.DataFrame()
for column in columns:
df[column] = np.random.choice(range(1, 11), 10)
df.index = range(1, 11)
# dfの各行について、2行後の行との差を計算したDataFrameをdf_diffに代入してください
df_diff = df.diff(-2, axis=0)
# dfとdf_diffの中身を比較して処理内容を確認してください
print(df)
print(df_diff)
# -
# **リスト 9.22:解答例**
# + [markdown] courseId=4010 exerciseId="S1JIF3Uj8xz" id="code_session_name" important=false isDL=false timeoutSecs=5
# ### 〇9.4.5 グループ化
# -
# #### □問題
# + id="index"
import pandas as pd
# 一部の都道府県に関するDataFrameを作成します
prefecture_df = pd.DataFrame([["Tokyo", 2190, 13636, "Kanto"],
["Kanagawa", 2415, 9145, "Kanto"],
["Osaka", 1904, 8837, "Kinki"],
["Kyoto", 4610, 2605, "Kinki"],
["Aichi", 5172, 7505, "Chubu"]],
columns=["Prefecture", "Area",
"Population", "Region"])
# 出力します
print(prefecture_df)
# prefecture_dfを地域(Region)についてグループ化し、grouped_regionに代入してください
# prefecture_dfに出てきた地域ごとの、面積(Area)と人口(Population)の平均をmean_dfに代入してください
# 出力します
print(mean_df)
# -
# **リスト 9.23:問題**
# #### □解答例
# + id="answer"
import pandas as pd
# 一部の都道府県に関するDataFrameを作成します
prefecture_df = pd.DataFrame([["Tokyo", 2190, 13636, "Kanto"],
["Kanagawa", 2415, 9145, "Kanto"],
["Osaka", 1904, 8837, "Kinki"],
["Kyoto", 4610, 2605, "Kinki"],
["Aichi", 5172, 7505, "Chubu"]],
columns=["Prefecture", "Area",
"Population", "Region"])
# 出力します
print(prefecture_df)
# prefecture_dfを地域(Region)についてグループ化し、grouped_regionに代入してください
grouped_region = prefecture_df.groupby("Region")
# prefecture_dfに出てきた地域ごとの、面積(Area)と人口(Population)の平均をmean_dfに代入してください
mean_df = grouped_region.mean()
# 出力します
print(mean_df)
# -
# **リスト 9.24:解答例**
# + [markdown] id="chapter_exam"
# ## ●添削問題
# -
# #### □問題
# + id="index"
import pandas as pd
# それぞれのDataFrameの定義です
df1 = pd.DataFrame([["apple", "Fruit", 120],
["orange", "Fruit", 60],
["banana", "Fruit", 100],
["pumpkin", "Vegetable", 150],
["potato", "Vegetable", 80]],
columns=["Name", "Type", "Price"])
df2 = pd.DataFrame([["onion", "Vegetable", 60],
["carrot", "Vegetable", 50],
["beans", "Vegetable", 100],
["grape", "Fruit", 160],
["kiwifruit", "Fruit", 80]],
columns=["Name", "Type", "Price"])
# ここに解答を記述してください
# -
# **リスト 9.25:問題**
# #### □解答例
# + id="answer"
import pandas as pd
# それぞれのDataFrameの定義です
df1 = pd.DataFrame([["apple", "Fruit", 120],
["orange", "Fruit", 60],
["banana", "Fruit", 100],
["pumpkin", "Vegetable", 150],
["potato", "Vegetable", 80]],
columns=["Name", "Type", "Price"])
df2 = pd.DataFrame([["onion", "Vegetable", 60],
["carrot", "Vegetable", 50],
["beans", "Vegetable", 100],
["grape", "Fruit", 160],
["kiwifruit", "Fruit", 80]],
columns=["Name", "Type", "Price"])
# ここに解答を記述してください
# 結合合します
df3 = pd.concat([df1, df2], axis=0)
# 果物のみを抽出し、Priceでソート合します
df_fruit = df3.loc[df3["Type"] == "Fruit"]
df_fruit = df_fruit.sort_values(by="Price")
# 野菜のみを抽出し、Priceでソート合します
df_veg = df3.loc[df3["Type"] == "Vegetable"]
df_veg = df_veg.sort_values(by="Price")
# それぞれの上3つの要素のPriceの合計金額を計算します
print(sum(df_fruit[:3]["Price"]) + sum(df_veg[:3]["Price"]))
# -
# **リスト 9.26:解答例**beansに修正
# # 総合添削問題
# #### □問題
# +
import pandas as pd
index = ["taro", "mike", "kana", "jun", "sachi"]
columns = ["国語", "数学", "社会", "理科", "英語"]
data = [[30, 45, 12, 45, 87], [65, 47, 83, 17, 58], [64, 63, 86, 57, 46, ], [38, 47, 62, 91, 63], [65, 36, 85, 94, 36]]
df = pd.DataFrame(data, index=index, columns=columns)
# dfの新しい列"体育"にpe_columnのデータを追加してください
pe_column = pd.Series([56, 43, 73, 82, 62], index=["taro", "mike", "kana", "jun", "sachi"])
df
print(df)
print()
# 数学を昇順で並び替えてください
df1 =
print(df1)
print()
# df1の各要素に5点を足してください
df2 =
print(df2)
print()
# dfの要約統計量のうち、"mean", "max", "min"を出力してください
print()
# -
# **リスト9.27:問題**
# +
import pandas as pd
index = ["taro", "mike", "kana", "jun", "sachi"]
columns = ["国語", "数学", "社会", "理科", "英語"]
data = [[30, 45, 12, 45, 87], [65, 47, 83, 17, 58], [64, 63, 86, 57, 46, ], [38, 47, 62, 91, 63], [65, 36, 85, 94, 36]]
df = pd.DataFrame(data, index=index, columns=columns)
# dfの新しい列"体育"にpe_columnのデータを追加してください
pe_column = pd.Series([56, 43, 73, 82, 62], index=["taro", "mike", "kana", "jun", "sachi"])
df["体育"] = pe_column
print(df)
print()
# 数学を昇順で並び替えてください
df1 = df.sort_values(by="数学", ascending=True)
print(df1)
print()
# df1の各要素に5点を足してください
df2 = df1 + 5
print(df2)
print()
# dfの要約統計量のうち、"mean", "max", "min"を出力してください
print(df2.describe().loc[["mean", "max", "min"]])
# -
# **リスト9.28:解答例**
|
notebooks/ShinsouGakushu_sample/Chapter9_Sample.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: gliberal
# language: python
# name: gliberal
# ---
# # Annotation tool to correct predicted segmentation, directly on large overview images (>10000 px)
# +
import os
import numpy as np
from improc.io import parse_collection, DCAccessor
DCAccessor.register()
from holoviews import opts
from skimage.io import imread
from inter_view.dashboards import AnnotationDashBoard
import holoviews as hv
hv.extension('bokeh', width=100)
# -
# # config
# +
basedir = '../../data/2D'
img_subdir = 'TIF_OVR_MIP'
annot_subdir = 'TIF_OVR_MIP_SEG_CORRECTION'
data_pattern = '{platedir}/{layer}/{f1}_{f2}_{f3}_{row}{col:02d}_T{T:04d}F{field:03d}L{L:02d}A{A:02d}Z{zslice:02d}C{channel:02d}.{ext}'
index = ['layer', 'platedir','row', 'col', 'field', 'zslice', 'channel']
channel_config = {img_subdir:{'cmap':'gray'},
annot_subdir:{'cmap':'glasbey_hv_16bit', 'raster_aggregator':'first', 'intensity_bounds':(-2**15,2**15-1), 'slider_limits':(-2**15,2**15-1), 'bitdepth':'int16', 'opacity':0.5}}
def set_inactive_tool(plot, element):
plot.state.toolbar.active_inspect = None
opts.defaults(opts.Image('channel.{}'.format(img_subdir), frame_width=1500),
opts.Image('channel.{}'.format(annot_subdir), frame_width=1500, clipping_colors={'min': (0, 0, 0, 0)}, clim=(0,2**16-1), hooks=[set_inactive_tool], tools=['hover'], show_title=False),)
# -
# # parse files
# +
df = parse_collection(os.path.join(basedir, data_pattern), index)
df = df.dc[[img_subdir, annot_subdir]]
# alternatively to work on overview images directly:
df = df.groupby(['platedir','row', 'col', 'channel'],).filter(lambda x: annot_subdir in x.reset_index().layer.tolist())
df
# -
# # interactive dashboard
#
# To start drawing select the 'Freehand Drawing Tool' in the toolbar on the right of the image.
#
# - drawing label:
# - -1: un-annotated (does not contribute to the training loss)
# - 0: background
# - [1...] instance labels. last one in the list is always unused
# - on click
# - \-: does nothing
# - pick label (pipette): sets label at the clicked position as drawing label
# - fill label (pot filling): fill the label at the clicked label with the current drawing labels (discontinuous)
# - lock
# - background: prevent from drawing over the existing background
# - foreground: prevents from drawing over the existing labels
# - save: saves the current annotation. The current annotation is also automatically saved when loading a new image
# - discard changes: Cancels all changes made since the last save (i.e. not a granual ctrl+z!)
# +
def read_first_slice(path):
'''Reads only the first slice
(TIF_OVR contain a second tiles mask channel)
'''
img = imread(path, img_num=0)
if annot_subdir in path:
# annotation need to support negative labels
img = img.astype(np.int16)
return img
db = AnnotationDashBoard(df=df,
multi_select_levels=['layer'],
channel_config=channel_config,
composite_channels=[img_subdir],
overlay_channels=[annot_subdir],
annot_channel=annot_subdir,
loading_fun=read_first_slice)
db.panel().servable()
|
notebooks/applications/2D_annotator_TIF_OVR.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import re
import pdfplumber
import csv
DATE_STRING = '2020-04-14'
outfile = 'covid_conjugate_0414.csv'
outfileh = open(outfile, 'w')
writer = csv.writer(outfileh)
writer.writerow(['facility','county', 'first_reported', 'start_date', 'cases', 'deaths', 'report_date'])
file = "../or_weekly_reports/COVID-19-Weekly-Report-%s-FINAL.pdf" % DATE_STRING
pdf = pdfplumber.open(file)
# -
settings = {
"vertical_strategy": "lines",
"horizontal_strategy": "words",
"snap_tolerance": 3,
"join_tolerance": 3,
"edge_min_length": 3,
"min_words_vertical": 3,
"min_words_horizontal": 1,
"keep_blank_chars": False,
"text_tolerance": 3,
"text_x_tolerance": None,
"text_y_tolerance": None,
"intersection_tolerance": 3,
"intersection_x_tolerance": None,
"intersection_y_tolerance": None,
}
# +
pagenumber = 3
# +
#im = pdf.pages[pagenumber].to_image()
width = pdf.pages[pagenumber].width
height = pdf.pages[pagenumber].height
box1 = (0, 150, width, height)
cropped_page = pdf.pages[pagenumber].crop(box1)
im = cropped_page.to_image()
im.debug_tablefinder()
# -
table_found = cropped_page.extract_table(table_settings={})
result_count = len(table_found)
# Skip the header row
for i in range(1,result_count):
this_row = table_found[i]
if not this_row[0]:
# hack for misread
print("Empty-ish row, continuing")
continue
if this_row[0]=='Total':
continue
print(len(this_row))
assert len(this_row)==5
print(this_row)
this_row.append(DATE_STRING)
writer.writerow(this_row)
# +
## that's the end! In other reports there might be more.
# -
outfileh.close()
|
data/source/or_data/extract_conjugate/read_conjugate_0414.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <img src="https://img.icons8.com/dusk/64/000000/artificial-intelligence.png" style="height:50px;display:inline"> EE 046202 - Technion - Unsupervised Learning & Data Analysis
# ---
#
# #### <NAME>
#
# ## Tutorial 07 - Deep Unsupervised Learning - PyTorch & Autoencoders
# ---
#
# ### <img src="https://img.icons8.com/bubbles/50/000000/checklist.png" style="height:50px;display:inline"> Agenda
# ---
# * [Introduction & Motivation](#-Introduction-&-Motivation)
# * Up & Running with PyTorch
# * [PyTorch Basics](#-PyTorch-Basics)
# * [Datasets: MNIST, Fashion-MNIST](#-Datasets-in-PyTorch---MNIST-&-Fashion-MNIST)
# * [Multi-Layer Perceptrons (MLPs)](#-Multi-Layer-Perceptron-(MLP)-in-PyTorch)
# * [Extra: Convolutional Layers](#-Multi-Layer-Perceptron-(MLP)-in-PyTorch)
# * [Autoencoders](#-Autoencoders)
# * [PyTorch Implementation]()
# * [Recommended Videos](#-Recommended-Videos)
# * [Credits](#-Credits)
# +
# imports for the tutorial
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
# pytorch imports
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, ConcatDataset
import torchvision
# scikit-learn imports
from sklearn.manifold import LocallyLinearEmbedding, Isomap, TSNE
from sklearn.decomposition import PCA, KernelPCA
# -
# ## <img src="https://img.icons8.com/color/48/000000/fire-element.png" style="height:50px;display:inline" /> Introduction & Motivation
# ---
# As you saw in the ML course, even though the idea of a neural network is a simple one, the implementation and calculations are not very sympathetic. Neural networks are a strong tool that might be useful to many sorts of tasks and thus simple frameworks were established to help engineers focus on the task rather than the actual implemntation of the neural networks and making the backpropagation easy as one line of code. There are several deep learning frameworks, where the two most popular ones are <a href="https://tensorflow.org">Tensorflow</a> (maintained by Google) and <a href="https://pytorch.org">PyTorch</a> (maintained by Facebook). TF and PyTorch have different workflows, though in recent versions they share some of the workflows.
#
# <img src="./assets/tut_xiv_pytorch_logo.png" style="height:100px" />
# PyTorch is "an open source deep learning platform that provides a seamless path from research prototyping to production deployment". In simple words, PyTorch is meant to make deep learning programming more easier for Python programmers by adapting the Python workflow and "easy-to-code" language. Also, the PyTorch official website provides great tutorials to get you up and running in most deep learning fields (Computer Vision, Reinforcement Learning, Natural Language Processing).
#
# At its core, PyTorch provides two main features:
#
# * An n-dimensional Tensor, similar to numpy but can run on GPUs
# * Automatic differentiation for building and training neural networks
#
# The word "Tensor" might seem initmidating, but it is just an n-dimensional array or matrix. A PyTorch Tensor is conceptually identical to a numpy array: a Tensor is an n-dimensional array, and PyTorch provides many functions for operating on these Tensors. Behind the scenes, Tensors can keep track of a computational graph and gradients, but they’re also useful as a generic tool for scientific computing. Most of the function and methods available in `numpy` are also available in PyTorch for tensors.
#
# #### Why not just use NumPy?
# Numpy is a great framework, but it cannot utilize GPUs to accelerate its numerical computations. For modern deep neural networks, GPUs often provide speedups of 50x or greater, so unfortunately numpy won’t be enough for modern deep learning.
# ### <img src="https://img.icons8.com/dusk/64/000000/diversity.png" style="height:50px;display:inline"> PyTorch Basics
# ---
# We will now introduce some the basics. The main thing to keep in mind is that PyTorch was designed to have similar API as NumPy's. That means that almost every NumPy's function is in PyTorch, and most probably has the same name and same parameters.
# create a tensor initialized from a random uniform distribution
x = torch.Tensor(5, 3).uniform_(-1, 1)
print("x: \n", x)
print("x size:", x.size())
print("same as x shape:", x.shape)
print()
# basic math oprations
y = torch.rand(5, 3)
print("y: \n", y)
print("x + y = \n", x + y)
print("x - y = \n", x - y)
print()
# indexing - same as NumPy
print("x[3, :] = ", x[3, :])
print()
# ones, zeros, reshaping - same as NumPy
print("torch.ones([2, 2]): \n", torch.ones([2,2]))
print("torch.zeros([1, 2]): \n", torch.zeros([1,2]))
print("y.reshape(-1, 1): \n", y.reshape(-1, 1))
# -1 just means 'don't care about the dimension this axis, make it happen'
# more efficiently, use `view` as it doesn't copy the variable (saves memory)
print("y.view(-1, 1): \n", y.view(-1, 1))
print()
# torch <-> numpy
z = torch.tensor(np.array([[1, 2], [3, 4]]))
print("z: \n", z)
print()
# torch <-> numpy, alternative verion
a = np.array([[1, 2], [3, 4]])
z = torch.from_numpy(a)
print("z: \n", z)
print()
p = x.numpy()
print("p = x.numpy(): \n", p)
print()
# tensor multiplication
xy = x @ y.t()
print("xy = x @ y.t(): \n", xy)
# or
xy = torch.matmul(x, y.t())
print("xy = torch.matmul(x, y.t()): \n", xy)
print()
# element-wise
xy = x * y
print("xy = x * y: \n", xy)
# or
xy = torch.mul(x, y)
print("xy = torch.mul(x, y): \n", xy)
# ### <img src="https://img.icons8.com/dusk/64/000000/processor.png" style="height:50px;display:inline"> Device - CPU or GPU?
# ---
# * The greatest advantage of using the deep learning frameworks in the ability to utilize the GPUs. In PyTorch, we can code it to automatically choose which device to use.
# * A good practice is to always define the device at the beginning of your code, and then just send the models and tensors to that device using `.to(device)`
# +
# check if there is a GPU available
print(torch.cuda.is_available())
# check what is the current available device
if torch.cuda.is_available():
print("current device: ", torch.cuda.current_device())
# automatically choose device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # use gpu 0 if it is available, o.w. use the cpu
print("device: ", device)
# create a random tensor and send it to the device
a = torch.randn([2, 2]).to(device)
print(a)
# -
# ### <img src="https://img.icons8.com/cute-clipart/64/000000/documents-folder.png" style="height:50px;display:inline"> Datasets in PyTorch - MNIST & Fashion-MNIST
# ---
# * The first thing we need to do before we build our networks is load the data, separate it to train and test sets (and sometimes also validation) and create batches of it to train using one of the Gradient Descent optimizers.
# * Fortunately, PyTorch provide a simple data structure to load the data (usually from files) called `Dataset` and another data structure, called `DataLoader` that creates batches out of the Dataset (and it even takes care of shuffling it if you wish).
# * We now introduce two very popular datasets:
#
# 1. **MNIST**- The MNIST database (1998) of handwritten digits (0-9, a total of 10 digits) has a training set of 60,000 examples, and a test set of 10,000 examples. Images are of size 28 x 28 pixels with one color channel. It is a subset of a larger set available from NIST. The digits have been size-normalized and centered in a fixed-size image. It is a good database for people who want to try learning techniques and pattern recognition methods on real-world data while spending minimal efforts on preprocessing and formatting. MNIST is broadly used as a baseline to many ML papers, even today.
# 2. **Fashion-MNIST** - Fashion-MNIST is a dataset of Zalando's article images—consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. Zalando intends Fashion-MNIST to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking machine learning algorithms. It shares the same image size and structure of training and testing splits.
#
# Let's load the dataset (PyTorch provides an auto-download feature).
# +
batch_size = 128
# MNIST
mnist_train_dataset = torchvision.datasets.MNIST(root='./datasets/',
train=True,
transform=torchvision.transforms.ToTensor(),
download=True)
mnist_test_dataset = torchvision.datasets.MNIST(root='./datasets',
train=False,
transform=torchvision.transforms.ToTensor())
# Data loader
mnist_train_loader = torch.utils.data.DataLoader(dataset=mnist_train_dataset,
batch_size=batch_size,
shuffle=True, drop_last=True)
# We use drop_last=True to avoid the case where the data / batch_size != int
mnist_test_loader = torch.utils.data.DataLoader(dataset=mnist_test_dataset,
batch_size=batch_size,
shuffle=False)
# let's plot some of the samples from the test set
examples = enumerate(mnist_test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print("shape: \n", example_data.shape)
fig = plt.figure()
for i in range(6):
ax = fig.add_subplot(2,3,i+1)
ax.imshow(example_data[i][0], cmap='gray', interpolation='none')
ax.set_title("Ground Truth: {}".format(example_targets[i]))
ax.set_axis_off()
plt.tight_layout()
# +
# Fashion-MNIST
fmnist_train_dataset = torchvision.datasets.FashionMNIST(root='./datasets/',
train=True,
transform=torchvision.transforms.ToTensor(),
download=True)
fmnist_test_dataset = torchvision.datasets.FashionMNIST(root='./datasets',
train=False,
transform=torchvision.transforms.ToTensor())
# Data loader
fmnist_train_loader = torch.utils.data.DataLoader(dataset=fmnist_train_dataset,
batch_size=batch_size,
shuffle=True, drop_last=True)
fmnist_test_loader = torch.utils.data.DataLoader(dataset=fmnist_test_dataset,
batch_size=batch_size,
shuffle=False)
# let's plot some of the samples from the test set
examples = enumerate(fmnist_test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print("shape: \n", example_data.shape)
fig = plt.figure()
for i in range(6):
ax = fig.add_subplot(2,3,i+1)
ax.imshow(example_data[i][0], cmap='gray', interpolation='none')
ax.set_title("Ground Truth: {}".format(example_targets[i]))
ax.set_axis_off()
plt.tight_layout()
# -
# ### <img src="https://img.icons8.com/color/96/000000/broadcasting.png" style="height:50px;display:inline"> Multi-Layer Perceptron (MLP) in PyTorch
# ---
# * An MLP (other names: Fully-Connected (FC) Network (FCN), Dense Network) is composed of one input layer, one or more hidden layers and a final output layer.
# * Every layer, except the output layer includes a bias neuron which is fully connected to the next layer.
# * When the number of hidden layers is larger than 2, the network is usually called a deep neural network (DNN).
#
# * The algorithm is composed of two main parts: **forward pass** and **backward pass**.
# * In the *forward pass*, for each training instance, the algorithm feeds it to the network and computes the output of every neuron in each consecutive layer (using the network for prediction is just doing a forward pass). Then, the output error (the difference between the desired output and the actual output from the network) is computed.
#
# * After the output error calculation, the network calculates how much each neuron in the last hidden layer contributed to the output error (using the chain rule).
# * It then proceeds to measure how much of these error contributions came from each neuron in the previous layers until reaching the input layer.
# * This is the *backward pass*: measuring the error gradient across all the connection weights in the network by propagating the error gradient backward in the network (this is the backpropagation process).
#
#
# * **In short**: for each training instance the backpropagation algorithm first makes a prediction (forward pass), measures the error, then goes in reverse to measure the error contribution from each connection (backward pass) and finally, using Gradient Descent, updates the weights in the direction that reduces the error.
# <img src="./assets/tut_xiv_mlp.jpg" style="height:200px">
# #### Implementing an MLP
# ---
# We will now see how easy it is to implement a neural network using PyTorch. The first thing to know is that every neural network (NN) inherits from parent class `torch.nn.Module`, which establishes a shared workflow, that is, every (!) NN in PyTorch has the same properties in terms of functunality. This global design helps with building a common language. We will now meet PyTorch's basic blocks and see why it is so popular.
#
# We will implement the following network:
# * Input dimension: 28 * 28
# * Output dimension: 10 (as the number of classes)
# * Batch size: 128
# * Hidden Layers: 1
# * Hidden Units: 256
# * Optimizer: Adam (Learning Rate: 0.001)
# * Activation: ReLU
# +
# define a two-layer MLP
# method 1
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
Parameters:
D_in - dimensions of inputs
H - number of hidden units per layer
D_out - dimensions of outputs
"""
# initialzing the parent object (important!)
super(TwoLayerNet, self).__init__()
# define the first layer (hidden)
self.linear1 = torch.nn.Linear(D_in, H)
# define the second layer (output)
self.linear2 = torch.nn.Linear(H, D_out)
# define the activation function
self.relu = torch.nn.ReLU()
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
Parameters:
x - tensor of inputs (shape: [BATCH_SIZE, D_in])
"""
h_relu = self.relu(self.linear1(x))
y_pred = self.linear2(h_relu)
return y_pred
# method 2
class TwoLayerNetPiped(torch.nn.Module):
def __init__(self, D_in, H, D_out):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
Parameters:
D_in - dimensions of inputs
H - number of hidden units per layer
D_out - dimensions of outputs
"""
# initialzing the parent object (important!)
super(TwoLayerNetPiped, self).__init__()
# Create a pipeline - a sequence of layers
self.pipe = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out))
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
Parameters:
x - tensor of inputs (shape: [BATCH_SIZE, D_in])
"""
return self.pipe(x)
# -
# ### When to use method 1 and when to use method 2?
# If you want easy access to the weights of the layers (to do some manipulating on them or analyze them) then it is better to use method 1, otherwise, use method 2.
# +
# hyper-parameters:
num_epochs = 10
learning_rate = 0.001
# Device configuration, as before
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# create model, send it to device
model = TwoLayerNetPiped(D_in=28*28, H=256, D_out=10).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# -
# Train the model
model.train() # training mode
total_step = len(mnist_train_loader)
start_time = time.time()
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(mnist_train_loader):
# each i is a batch of 128 samples
images = images.to(device).view(batch_size, -1) # represent images as column vectors
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize - ALWAYS IN THIS ORDER!
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Time: {:.4f} secs'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item(), time.time() - start_time))
# Test the model
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance), or use:
with torch.no_grad(): # "don't keep track of the gradients" -> faster, can also use .detach()
correct = 0
total = 0
for images, labels in mnist_test_loader:
images = images.to(device).view(images.size(0), -1)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# ### <img src="https://img.icons8.com/dusk/50/000000/xlarge-icons.png" style="height:50px;display:inline"> Extra - Convolutional Neural Networks
# ---
# Let's first recap some of the basics:
# ### 2D Convolution
# ---
# Convolution is moving a window or filter across the image being studied. This moving window applies to a certain neighborhood of nodes as shown below – here, the filter applied is (0.5 × the node value):
# <img src="./assets/tut_xiv_filter.jpg" style="height:200px" />
# In the convolutional part of the CNN, we can imagine this 2 x 2 moving filter sliding across all the available nodes / pixels in the input image. This operation can also be illustrated using standard neural network node diagrams:
# <img src="./assets/tut_xiv_layer.jpg" style="height:200px" />
# The first position of the moving filter connections is illustrated by the blue connections, and the second is shown with the green lines. The weights of each of these connections, as stated previously, is 0.5.
#
# Calculating a convolutional layer output size:
# We define the following:
# * $W_{in}$ - the width of the input
# * $F$ - filter size
# * $P$ - padding
# * $S$ -stride
# The output width:
# $$W_{out} = \frac{W_{in} - F + 2P}{S} + 1$$
# In our example, the output of the convolutional layers, just before the FC:
# $$W_{1, out} = \frac{28 - 5 + 2*2}{1} + 1 = 28 \rightarrow MaxPooling(2x2) \rightarrow 28 / 2 = 14$$
# $$W_{2, out} = \frac{14 - 5 + 2*2}{1} + 1 = 14 \rightarrow MaxPooling(2x2) \rightarrow 14 / 2 = 7$$
# So the input to the FC layer is $7x7=49$
#
# ### Feature mapping and multiple channels
# ---
# Since the weights of individual filters are held constant as they are applied over the input nodes, they can be trained to select certain features from the input data. In the case of images, it may learn to recognize common geometrical objects such as lines, edges and other shapes which make up objects. This is where the name feature mapping comes from. Because of this, any convolution layer needs multiple filters which are trained to detect different features.
#
# ### Pooling
# ---
# It is a sliding window type technique, but instead of applying weights, which can be trained, it applies a statistical function of some type over the contents of its window. The most common type of pooling is called max pooling, and it applies the max() function over the contents of the window.
# There are two main benefits to pooling in CNN's:
# 1. It reduces the number of parameters in your model by a process called down-sampling
# 2. It makes feature detection more robust to object orientation and scale changes
#
# Pooling generalizes over lower level, more complex information. Let’s imagine the case where we have convolutional filters that, during training, learn to detect the digit “9” in various orientations within the input images. In order for the Convolutional Neural Network to learn to classify the appearance of “9” in the image correctly, it needs to in some way “activate” whenever a “9” is found anywhere in the image, no matter what the size or orientation the digit is (except for when it looks like “6”, that is). Pooling can assist with this higher level, generalized feature selection. An example can be seen <a href="https://adventuresinmachinelearning.com/convolutional-neural-networks-tutorial-in-pytorch/">here</a>.
#
# ### Strides and down-sampling
# ---
# In the pooling diagram below, you will notice that the pooling window shifts to the right each time by 2 places. This is called a stride of 2, which should be considered both in the x and y direction. In other words, the stride is actually specified as [2, 2]. One important thing to notice is that, if during pooling the stride is greater than 1, then the output size will be reduced. As can be observed below, the 5 x 5 input is reduced to a 3 x 3 output. This is a good thing – it is called down-sampling, and it reduces the number of trainable parameters in the model.
#
# ### Padding
# ---
# In the pooling diagram below is that there is an extra column and row added to the 5 x 5 input – this makes the effective size of the pooling space equal to 6 x 6. This is to ensure that the 2 x 2 pooling window can operate correctly with a stride of [2, 2] and is called padding. These nodes are basically dummy nodes – because the values of these dummy nodes is 0, they are basically invisible to the max pooling operation. Padding will need to be considered when constructing our Convolutional Neural Network in PyTorch.
# <img src="./assets/tut_xiv_max_pool.jpg" style="height:200px" />
# * <a href="adventuresinmachinelearning.com">Image Source</a>
#
# ### The FC Layer
# ---
# The fully connected layer can be thought of as attaching a standard classifier onto the information-rich output of the network, to “interpret” the results and finally produce a classification result. In order to attach this fully connected layer to the network, the dimensions of the output of the Convolutional Neural Network need to be flattened.
#
#
# ### CNN Vs. Fully Connected
# ---
# Fully connected networks with a few layers can only do so much – to get close to state-of-the-art results in image classification it is necessary to go deeper. In other words, lots more layers are required in the network. However, by adding a lot of additional layers, we come across some problems. First, we can run into the vanishing gradient problem. However, this can be solved to an extent by using sensible activation functions, such as the ReLU family of activations. Another issue for deep fully connected networks is that the number of trainable parameters in the model (i.e. the weights) can grow rapidly. This means that the training slows down or becomes practically impossible, and also exposes the model to overfitting. CNNs try to solve this second problem by exploiting correlations between adjacent inputs in images.
# let's build the ConvNet
# TRICK: if you don't want to manually calcualte the conv layers output size, you can use a dummy input
# to test the output dimension.
class ConvNet(nn.Module):
def __init__(self, input_shape, num_classes=10):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(input_shape[0], 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
# self.fc = nn.Linear(7*7*32, num_classes)
# or, use our TRICK:
self.fc = nn.Linear(self._get_conv_out(input_shape), num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def _get_conv_out(self, shape):
"""
Helper function to automatically calculate the conv layers output.
"""
o = self.layer2(self.layer1((torch.zeros(1, *shape))))
return int(np.prod(o.size()))
# +
# hyper-parameters:
num_epochs = 5
learning_rate = 0.001
input_shape = [1, 28, 28] # num_channels, height, width
# Device configuration, as before
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# create model
model = ConvNet(input_shape=input_shape, num_classes=10).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# -
# Train the model
total_step = len(mnist_train_loader)
start_time = time.time()
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(mnist_train_loader):
# each i is a batch of 128 samples
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Time: {:.4f} secs'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item(), time.time() - start_time))
# Test the model
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad(): # "don't keep track of the gradients" ,can also use .detach()
correct = 0
total = 0
for images, labels in mnist_test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# ## <img src="https://img.icons8.com/color/96/000000/code.png" style="height:50px;display:inline"> Autoencoders
# ---
# * Most of the natural data is high-dimensional, such as images. Consider the MNIST (hand-written digits) dataset, where each image has $28x28=784$ pixels, which means it can be represented by a vector of length 784.
# * But do we really need 784 values to represent a digit? The answer is no. We believe that the data lies on a low-dimensional space which is enough to describe the observasions. In case of MNIST, we can choose to represent digits as one-hot vectors, which means we only need 10 dimensions. So we can **encode** high-dimensional observations in a low-dimensional space.
# * But how can we learn meaningful low-deminsional representations? The general idea is to reconstruct or, **decode** the low-dimensional representation to the high-dimensional reperesentation, and use the reconstruction error to find the best representations (using the gradients of the error) . This is the core idea behind **autoencoders**.
# * **Autoencoders** - models which take data as input and discover some latent state representation of that data.The input data is converted into an encoding vector where each dimension represents some learned attribute about the data. The most important detail to grasp here is that our encoder network is outputting a single value for each encoding dimension. The decoder network then subsequently takes these values and attempts to recreate the original input. Autoencoders have **three parts**: an encoder, a decoder, and a 'loss' function that maps one to the other. For the simplest autoencoders - the sort that compress and then reconstruct the original inputs from the compressed representation - we can think of the 'loss' as describing the amount of information lost in the process of reconstruction.
# * Illustration: <img src="./assets/tut_xv_autoencoder.png" style="height:250px">
#
# * The basic architecture of an autoencoder:
# <img src="./assets/tut_xiv_autoencoder.png" style="height:250px">
# * <a href="https://towardsdatascience.com/applied-deep-learning-part-3-autoencoders-1c083af4d798">Image Source</a>
#
# Let's implement it in PyTorch using what we have learnt so far!
class AutoEncoder(nn.Module):
def __init__(self, input_dim=28*28, hidden_dim=256, latent_dim=10):
super(AutoEncoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.latent_dim = latent_dim
# define the encoder
self.encoder = nn.Sequential(nn.Linear(self.input_dim, self.hidden_dim),
nn.ReLU(), nn.Linear(self.hidden_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, self.latent_dim)
)
# define decoder
self.decoder = nn.Sequential(nn.Linear(self.latent_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, self.input_dim),
nn.Sigmoid())
def forward(self,x):
x = self.encoder(x)
x = self.decoder(x)
return x
def get_latent_rep(self, x):
return self.encoder(x)
# +
# hyper-parameters:
num_epochs = 5
learning_rate = 0.001
# Device configuration, as before
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# create model, send it to device
model = AutoEncoder(input_dim=28 * 28, hidden_dim=128, latent_dim=10).to(device)
# Loss and optimizer
criterion = nn.BCELoss() # binary cross entropy, as pixels are in [0,1]
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# -
# Train the model
total_step = len(fmnist_train_loader)
start_time = time.time()
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(fmnist_train_loader):
# each i is a batch of 128 samples
images = images.to(device).view(batch_size, -1)
# Forward pass
outputs = model(images)
loss = criterion(outputs, images)
# Backward and optimize - ALWAYS IN THIS ORDER!
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Time: {:.4f} secs'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item(), time.time() - start_time))
# let's see some of the reconstructions
model.eval() # put in evaluation mode - no gradients
examples = enumerate(fmnist_test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print("shape: \n", example_data.shape)
fig = plt.figure()
for i in range(3):
ax = fig.add_subplot(2,3,i+1)
ax.imshow(example_data[i][0], cmap='gray', interpolation='none')
ax.set_title("Ground Truth: {}".format(example_targets[i]))
ax.set_axis_off()
ax = fig.add_subplot(2,3,i+4)
recon_img = model(example_data[i][0].view(1, -1).to(device)).data.cpu().numpy().reshape(28, 28)
ax.imshow(recon_img, cmap='gray')
ax.set_title("Reconstruction of: {}".format(example_targets[i]))
ax.set_axis_off()
plt.tight_layout()
# +
# let's compare different dimensionality reduction methods
n_neighbors = 10
n_components = 2
n_points= 500
fmnist_test_loader = torch.utils.data.DataLoader(dataset=fmnist_test_dataset,
batch_size=n_points,
shuffle=False)
X, labels = next(iter(fmnist_test_loader))
latent_X = model.get_latent_rep(X.to(device).view(n_points, -1)).data.cpu().numpy()
labels = labels.data.cpu().numpy()
# +
fig = plt.figure(figsize=(15,8))
# PCA
t0 = time.time()
x_pca = PCA(n_components).fit_transform(latent_X)
t1 = time.time()
print("PCA time: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 3, 1)
ax.scatter(x_pca[:, 0], x_pca[:, 1], c=labels, cmap=plt.cm.Spectral)
ax.set_title('PCA')
# KPCA
t0 = time.time()
x_kpca = KernelPCA(n_components, kernel='rbf').fit_transform(latent_X)
t1 = time.time()
print("KPCA time: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 3, 2)
ax.scatter(x_kpca[:, 0], x_kpca[:, 1], c=labels, cmap=plt.cm.Spectral)
ax.set_title('KernelPCA')
# LLE
t0 = time.time()
x_lle = LocallyLinearEmbedding(n_neighbors, n_components, eigen_solver='auto').fit_transform(latent_X)
t1 = time.time()
print("LLE time: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 3, 3)
ax.scatter(x_lle[:, 0], x_lle[:, 1], c=labels, cmap=plt.cm.Spectral)
ax.set_title('LLE')
# Isomap
t0 = time.time()
x_isomap = Isomap(n_neighbors, n_components).fit_transform(latent_X)
t1 = time.time()
print("Isomap time: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 3, 4)
ax.scatter(x_isomap[:, 0], x_isomap[:, 1], c=labels, cmap=plt.cm.Spectral)
ax.set_title('Isomap')
# t-SNE
t0 = time.time()
x_tsne = TSNE(n_components).fit_transform(latent_X)
t1 = time.time()
print("t-SNE time: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 3, 5)
scatter = ax.scatter(x_tsne[:, 0], x_tsne[:, 1], c=labels, cmap=plt.cm.Spectral)
ax.set_title('t-SNE')
bounds = np.linspace(0, 10, 11)
cb = plt.colorbar(scatter, spacing='proportional',ticks=bounds)
cb.set_label('Classes Colors')
plt.tight_layout()
# + [markdown] slideshow={"slide_type": "slide"}
# ### <img src="https://img.icons8.com/bubbles/50/000000/video-playlist.png" style="height:50px;display:inline"> Recommended Videos
# ---
# #### <img src="https://img.icons8.com/cute-clipart/64/000000/warning-shield.png" style="height:30px;display:inline"> Warning!
# * These videos do not replace the lectures and tutorials.
# * Please use these to get a better understanding of the material, and not as an alternative to the written material.
#
# #### Video By Subject
#
# * PyTorch - <a href="https://www.youtube.com/watch?v=_H3aw6wkCv0&t=2s"><NAME>: Deep Neural Networks with PyTorch | PyData Berlin 2018</a>
# -
# ## <img src="https://img.icons8.com/dusk/64/000000/prize.png" style="height:50px;display:inline"> Credits
# ---
# * Icons from <a href="https://icons8.com/">Icon8.com</a> - https://icons8.com
# * Datasets from <a href="https://www.kaggle.com/">Kaggle</a> - https://www.kaggle.com/
|
ee046202_tutorial_07_deep_learn_pytorch_ae.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **WARNING**: Do not commit this file to GitHub if it has a lot of images
#
# YouTube: https://www.youtube.com/playlist?list=PLhhyoLH6Ijfw0TpCTVTNk42NN08H6UvNq
#
# <NAME> put Pascal dataset on Kaggle: https://www.kaggle.com/dataset/734b7bcb7ef13a045cbdd007a3c19874c2586ed0b02b4afc86126e89d00af8d2
#
# VOC2007 original dataset: http://host.robots.ox.ac.uk/pascal/VOC/voc2007/
#
# Model makes S * S * B = 7 * 7 * 2 = 98 bounding box predictions
#
# Only one final prediction is made per cell (the one with the highest confidence (IOU) among the two bounding boxes)
# +
# # %load ../../jupyter_heading.py
# %config Completer.use_jedi = False
# %config InlineBackend.figure_format = "retina"
# %load_ext autoreload
# %autoreload 2
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
from collections import Counter
import os
from os.path import join
import sys
from typing import List, Union
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import pandas as pd
from PIL import Image
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import torchvision.transforms.functional as FT
import torchvision.transforms.transforms as transforms
from tqdm import tqdm
sys.path.insert(0, "../../")
from bentorched.utils import print_torch_setup
from bentorched.vision.detection import intersection_over_union, non_max_suppression
print_torch_setup()
# -
# # IOU
# +
boxes_pred = torch.Tensor([
[0, 5, 10, 10],
[0, 50, 100, 100],
])
# perfect overlap
boxes_labels = torch.Tensor([
[0, 5, 10, 10],
[0, 50, 100, 100]
])
print(intersection_over_union(boxes_pred, boxes_labels, box_format="corners"))
# shift to the right
boxes_labels = torch.Tensor([
[5, 5, 15, 10],
[50, 50, 150, 100]
])
print(intersection_over_union(boxes_pred, boxes_labels, box_format="corners"))
# Completely contained in pred box
boxes_labels = torch.Tensor([
[0, 2.5, 10, 10],
[0, 25, 100, 100]
])
print(intersection_over_union(boxes_pred, boxes_labels, box_format="corners"))
# No overlap
boxes_labels = torch.Tensor([
[1000, 1000, 2000, 2000],
[10000, 10000, 20000, 20000]
])
print(intersection_over_union(boxes_pred, boxes_labels, box_format="corners"))
# -
# # NMS
bboxes = [
[0, 0.9, 0, 10, 0, 20],
[0, 0.6, 0, 10, 0, 20],
[0, 0.7, 0, 0, 0, 20],
[1, 0.9, 0, 10, 0, 20]
]
non_max_suppression(bboxes, 0.9, 0.6, box_format="corners")
# # mAP@x
# +
# mean Average Precision (mAP): mean of Average Precision for each class
# Average Precision: Area under precision-recall curve
# mAP@x: mAP for a IOU threshold of x
# Each box in the test set is a prediction
# -
def mean_average_precision(
pred_boxes, true_boxes, iou_threshold=0.5, box_format="midpoint", num_classes=20
):
"""
Calculates mean average precision
Parameters:
pred_boxes (list): list of lists containing all bboxes with each bboxes
specified as [train_idx, class_prediction, prob_score, x1, y1, x2, y2]
true_boxes (list): Similar as pred_boxes except all the correct ones
iou_threshold (float): threshold where predicted bboxes is correct
box_format (str): "midpoint" or "corners" used to specify bboxes
num_classes (int): number of classes
Returns:
float: mAP value across all classes given a specific IoU threshold
"""
# list storing all AP for respective classes
average_precisions = []
# used for numerical stability later on
epsilon = 1e-6
for c in range(num_classes):
detections = []
ground_truths = []
# Go through all predictions and targets,
# and only add the ones that belong to the
# current class c
for detection in pred_boxes:
if detection[1] == c:
detections.append(detection)
for true_box in true_boxes:
if true_box[1] == c:
ground_truths.append(true_box)
# find the amount of bboxes for each training example
# Counter here finds how many ground truth bboxes we get
# for each training example, so let's say img 0 has 3,
# img 1 has 5 then we will obtain a dictionary with:
# amount_bboxes = {0:3, 1:5}
amount_bboxes = Counter([gt[0] for gt in ground_truths])
# We then go through each key, val in this dictionary
# and convert to the following (w.r.t same example):
# ammount_bboxes = {0:torch.tensor[0,0,0], 1:torch.tensor[0,0,0,0,0]}
for key, val in amount_bboxes.items():
amount_bboxes[key] = torch.zeros(val)
# sort by box probabilities which is index 2
detections.sort(key=lambda x: x[2], reverse=True)
TP = torch.zeros((len(detections)))
FP = torch.zeros((len(detections)))
total_true_bboxes = len(ground_truths)
# If none exists for this class then we can safely skip
if total_true_bboxes == 0:
continue
for detection_idx, detection in enumerate(detections):
# Only take out the ground_truths that have the same
# training idx as detection
ground_truth_img = [
bbox for bbox in ground_truths if bbox[0] == detection[0]
]
num_gts = len(ground_truth_img)
best_iou = 0
for idx, gt in enumerate(ground_truth_img):
iou = intersection_over_union(
torch.tensor(detection[3:]),
torch.tensor(gt[3:]),
box_format=box_format,
)
if iou > best_iou:
best_iou = iou
best_gt_idx = idx
if best_iou > iou_threshold:
# only detect ground truth detection once
if amount_bboxes[detection[0]][best_gt_idx] == 0:
# true positive and add this bounding box to seen
TP[detection_idx] = 1
amount_bboxes[detection[0]][best_gt_idx] = 1
else:
FP[detection_idx] = 1
# if IOU is lower then the detection is a false positive
else:
FP[detection_idx] = 1
TP_cumsum = torch.cumsum(TP, dim=0)
FP_cumsum = torch.cumsum(FP, dim=0)
recalls = TP_cumsum / (total_true_bboxes + epsilon)
precisions = TP_cumsum / (TP_cumsum + FP_cumsum + epsilon)
precisions = torch.cat((torch.tensor([1]), precisions))
recalls = torch.cat((torch.tensor([0]), recalls))
# torch.trapz for numerical integration
average_precisions.append(torch.trapz(precisions, recalls))
return sum(average_precisions) / len(average_precisions)
# # YOLO
# +
"""
Information about architecture config:
Tuple is structured by (kernel_size, filters, stride, padding)
"M" is simply maxpooling with stride 2x2 and kernel 2x2
List is structured by tuples and lastly int with number of repeats
"""
architecture_config = [
# (kernel_size, out_channels, stride, padding)
(7, 64, 2, 3), # Single CNN block
"M", # Maxpooling layer
(3, 192, 1, 1),
"M",
(1, 128, 1, 0),
(3, 256, 1, 1),
(1, 256, 1, 0),
(3, 512, 1, 1),
"M",
[(1, 256, 1, 0), (3, 512, 1, 1), 4], # 2 CNN blocks repeated 4 times
(1, 512, 1, 0),
(3, 1024, 1, 1),
"M",
[(1, 512, 1, 0), (3, 1024, 1, 1), 2],
(3, 1024, 1, 1),
(3, 1024, 2, 1),
(3, 1024, 1, 1),
(3, 1024, 1, 1),
]
class CNNBlock(nn.Module):
# TODO: This should just be nn.Sequential
def __init__(self, in_channels, out_channels, **kwargs):
super(CNNBlock, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.batchnorm = nn.BatchNorm2d(out_channels)
self.leakyrelu = nn.LeakyReLU(0.1)
def forward(self, x):
return self.leakyrelu(self.batchnorm(self.conv(x)))
class Yolov1(nn.Module):
def __init__(self, in_channels=3, **kwargs):
super(Yolov1, self).__init__()
self.architecture = architecture_config
self.in_channels = in_channels
# The YOLO architecture is called darknet
self.darknet = self._create_conv_layers(self.architecture)
# Fully connected layers at end of model
self.fcs = self._create_fcs(**kwargs)
def forward(self, x):
x = self.darknet(x)
return self.fcs(torch.flatten(x, start_dim=1))
def _create_conv_layers(self, architecture):
layers = []
in_channels = self.in_channels
for x in architecture:
if type(x) == tuple:
# Create CNN block
layers += [
CNNBlock(
in_channels, x[1], kernel_size=x[0], stride=x[2], padding=x[3],
)
]
in_channels = x[1]
elif type(x) == str:
# Create Maxpooling layer
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif type(x) == list:
# Create 2 CNN blocks repeated multiple times
conv1 = x[0]
conv2 = x[1]
num_repeats = x[2]
for _ in range(num_repeats):
layers += [
CNNBlock(
in_channels,
conv1[1],
kernel_size=conv1[0],
stride=conv1[2],
padding=conv1[3],
)
]
layers += [
CNNBlock(
conv1[1], # in_channels=out_channels of previous layer
conv2[1], # out_channels of current layer
kernel_size=conv2[0],
stride=conv2[2],
padding=conv2[3],
)
]
in_channels = conv2[1]
return nn.Sequential(*layers)
def _create_fcs(self, split_size, num_boxes, num_classes):
S, B, C = split_size, num_boxes, num_classes
# In original paper this should be
# nn.Linear(1024*S*S, 4096),
# nn.LeakyReLU(0.1),
# nn.Linear(4096, S*S*(B*5+C))
return nn.Sequential(
nn.Flatten(),
nn.Linear(1024 * S * S, 496), # TODO: 4096 in original paper
nn.Dropout(0.0),
nn.LeakyReLU(0.1),
nn.Linear(496, S * S * (C + B * 5)), # TODO: 4096 in original paper
)
# +
def test(S=7, B=2, C=20):
model = Yolov1(split_size=S, num_boxes=B, num_classes=C)
# (batch_size, channels, H, W)
x = torch.randn((2, 3, 448, 448))
print(model(x).shape)
test()
7*7*30
# -
class YoloLoss(nn.Module):
"""
Calculate the loss for yolo (v1) model
"""
def __init__(self, S=7, B=2, C=20):
super(YoloLoss, self).__init__()
# MSE without dividing by number of examples (just summing)
self.mse = nn.MSELoss(reduction="sum")
"""
S is split size of image (in paper 7),
B is number of boxes (in paper 2),
C is number of classes (in paper and VOC dataset is 20),
"""
self.S = S
self.B = B
self.C = C
# These are from Yolo paper
# Weight for no object loss
self.lambda_noobj = 0.5
# Weight for bounding box coordinates
self.lambda_coord = 5
def forward(self, predictions, target):
# predictions are shaped (BATCH_SIZE, S*S(C+B*5) when inputted
predictions = predictions.reshape(-1, self.S, self.S, self.C + self.B * 5)
# Calculate IoU for all predicted bounding boxes with target bbox
# TODO: Don't hardcode the number of classes [0-19]
# 20 = probability for box 1 in image segment
# 21-24 = box 1 coordinates
# 25 = probability for box 2 in image segment
# 26-29 = box 2 coordinates
iou_b1 = intersection_over_union(predictions[..., 21:25], target[..., 21:25])
iou_b2 = intersection_over_union(predictions[..., 26:30], target[..., 21:25])
ious = torch.cat([iou_b1.unsqueeze(0), iou_b2.unsqueeze(0)], dim=0)
# Take the box with highest IoU out of the two prediction
# Note that bestbox will be indices of 0, 1 for which bbox was best
iou_maxes, bestbox = torch.max(ious, dim=0)
exists_box = target[..., 20].unsqueeze(3) # in paper this is Iobj_i
# ======================== #
# FOR BOX COORDINATES #
# ======================== #
# Set boxes with no object in them to 0. We only take out one of the two
# predictions, which is the one with highest Iou calculated previously.
# bestbox is 0 or 1
box_predictions = exists_box * (
(
bestbox * predictions[..., 26:30]
+ (1 - bestbox) * predictions[..., 21:25]
)
)
box_targets = exists_box * target[..., 21:25]
# Take sqrt of width, height of boxes to ensure that
box_predictions[..., 2:4] = torch.sign(box_predictions[..., 2:4]) * torch.sqrt(
torch.abs(box_predictions[..., 2:4] + 1e-6)
)
box_targets[..., 2:4] = torch.sqrt(box_targets[..., 2:4])
box_loss = self.mse(
torch.flatten(box_predictions, end_dim=-2),
torch.flatten(box_targets, end_dim=-2),
)
# ==================== #
# FOR OBJECT LOSS #
# ==================== #
# pred_box is the confidence score for the bbox with highest IoU
pred_box = (
bestbox * predictions[..., 25:26] + (1 - bestbox) * predictions[..., 20:21]
)
object_loss = self.mse(
torch.flatten(exists_box * pred_box),
torch.flatten(exists_box * target[..., 20:21]),
)
# ======================= #
# FOR NO OBJECT LOSS #
# ======================= #
#max_no_obj = torch.max(predictions[..., 20:21], predictions[..., 25:26])
#no_object_loss = self.mse(
# torch.flatten((1 - exists_box) * max_no_obj, start_dim=1),
# torch.flatten((1 - exists_box) * target[..., 20:21], start_dim=1),
#)
# Box with aspect ratio 1
# pred = 20, target = 20
no_object_loss = self.mse(
torch.flatten((1 - exists_box) * predictions[..., 20:21], start_dim=1),
torch.flatten((1 - exists_box) * target[..., 20:21], start_dim=1),
)
# Box with aspect ratio 2
# pred = 25, target = 20
no_object_loss += self.mse(
torch.flatten((1 - exists_box) * predictions[..., 25:26], start_dim=1),
torch.flatten((1 - exists_box) * target[..., 20:21], start_dim=1)
)
# ================== #
# FOR CLASS LOSS #
# ================== #
# Using MSE for classification for some reason
class_loss = self.mse(
torch.flatten(exists_box * predictions[..., :20], end_dim=-2,),
torch.flatten(exists_box * target[..., :20], end_dim=-2,),
)
# Add all parts of loss together
loss = (
self.lambda_coord * box_loss # first two rows in paper
+ object_loss # third row in paper
+ self.lambda_noobj * no_object_loss # forth row
+ class_loss # fifth row
)
return loss
# # Get data
# +
# class VOCDataset(Dataset):
# def __init__(self, csv_file, img_dir, label_dir, S=7, B=2, C=20, transform=None):
# self.annotations = pd.read_csv(csv_file)
# self.img_dir = img_dir
# self.label_dir = label_dir
# self.transform = transform
# self.S = S
# self.B = B
# self.C = C
# def __len__(self):
# return len(self.annotations)
# def __getitem__(self, index):
# img_path = join(self.img_dir, self.annotations.iloc[index, 0])
# label_path = join(self.label_dir, self.annotations.iloc[index, 1])
# image = Image.open(img_path)
# boxes = []
# with open(label_path) as f:
# for label in f.readlines():
# class_label, x, y, width, height = [
# float(x) if float(x) != int(float(x)) else int(x)
# for x in label.replace("\n", "").split()
# ]
# boxes.append([class_label, x, y, width, height])
# boxes = torch.tensor(boxes)
# if self.transform:
# image, boxes = self.transform(image, boxes)
# label_matrix = torch.zeros((self.S, self.S, self.C+5*self.B))
# for box in boxes:
# class_label, x, y, width, height = box.tolist()
# class_label = int(class_label)
# # x, y in range [0, 1]. Multiplying by S then truncating gives cell the coordinate is in
# # 1st index is y, 2nd index is x.
# i = int(self.S * y)
# j = int(self.S * x)
# # Coordinates relative to cell coordinates which have dimensions in range [0, 1]
# x_cell = self.S * x - j
# y_cell = self.S * y - i
# width_cell = width * self.S
# height_cell = height * self.S
# if label_matrix[i, j, 20] == 0:
# label_matrix[i, j, 20] = 1
# box_coordinates = torch.tensor([x_cell, y_cell, width_cell, height_cell])
# label_matrix[i, j, 21:25] = box_coordinates
# label_matrix[i, j, class_label] = 1
# return image, label_matrix
# -
class VOCDataset(torch.utils.data.Dataset):
def __init__(
self, csv_file, img_dir, label_dir, S=7, B=2, C=20, transform=None,
):
self.annotations = pd.read_csv(csv_file)
self.img_dir = img_dir
self.label_dir = label_dir
self.transform = transform
self.S = S
self.B = B
self.C = C
def __len__(self):
return len(self.annotations)
def __getitem__(self, index):
label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
boxes = []
with open(label_path) as f:
for label in f.readlines():
class_label, x, y, width, height = [
float(x) if float(x) != int(float(x)) else int(x)
for x in label.replace("\n", "").split()
]
boxes.append([class_label, x, y, width, height])
img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
image = Image.open(img_path)
boxes = torch.tensor(boxes)
if self.transform:
# image = self.transform(image)
image, boxes = self.transform(image, boxes)
# Convert To Cells
label_matrix = torch.zeros((self.S, self.S, self.C + 5 * self.B))
for box in boxes:
class_label, x, y, width, height = box.tolist()
class_label = int(class_label)
# i,j represents the cell row and cell column
i, j = int(self.S * y), int(self.S * x)
x_cell, y_cell = self.S * x - j, self.S * y - i
"""
Calculating the width and height of cell of bounding box,
relative to the cell is done by the following, with
width as the example:
width_pixels = (width*self.image_width)
cell_pixels = (self.image_width)
Then to find the width relative to the cell is simply:
width_pixels/cell_pixels, simplification leads to the
formulas below.
"""
width_cell, height_cell = (
width * self.S,
height * self.S,
)
# If no object already found for specific cell i,j
# Note: This means we restrict to ONE object
# per cell!
if label_matrix[i, j, 20] == 0:
# Set that there exists an object
label_matrix[i, j, 20] = 1
# Box coordinates
box_coordinates = torch.tensor(
[x_cell, y_cell, width_cell, height_cell]
)
label_matrix[i, j, 21:25] = box_coordinates
# Set one hot encoding for class_label
label_matrix[i, j, class_label] = 1
return image, label_matrix
# # Other utilities
# +
def plot_image(image, boxes):
"""Plots predicted bounding boxes on the image"""
im = np.array(image)
height, width, _ = im.shape
# Create figure and axes
fig, ax = plt.subplots(1)
# Display the image
ax.imshow(im)
# box[0] is x midpoint, box[2] is width
# box[1] is y midpoint, box[3] is height
# Create a Rectangle potch
for box in boxes:
box = box[2:]
assert len(box) == 4, "Got more values than in x, y, w, h, in a box!"
upper_left_x = box[0] - box[2] / 2
upper_left_y = box[1] - box[3] / 2
rect = patches.Rectangle(
(upper_left_x * width, upper_left_y * height),
box[2] * width,
box[3] * height,
linewidth=1,
edgecolor="r",
facecolor="none",
)
# Add the patch to the Axes
ax.add_patch(rect)
plt.show()
def get_bboxes(
loader,
model,
iou_threshold,
threshold,
pred_format="cells",
box_format="midpoint",
device="cuda",
):
all_pred_boxes = []
all_true_boxes = []
# make sure model is in eval before get bboxes
model.eval()
train_idx = 0
for batch_idx, (x, labels) in enumerate(loader):
x = x.to(device)
labels = labels.to(device)
with torch.no_grad():
predictions = model(x)
batch_size = x.shape[0]
true_bboxes = cellboxes_to_boxes(labels)
bboxes = cellboxes_to_boxes(predictions)
for idx in range(batch_size):
nms_boxes = non_max_suppression(
bboxes[idx],
iou_threshold=iou_threshold,
threshold=threshold,
box_format=box_format,
)
#if batch_idx == 0 and idx == 0:
# plot_image(x[idx].permute(1,2,0).to("cpu"), nms_boxes)
# print(nms_boxes)
for nms_box in nms_boxes:
all_pred_boxes.append([train_idx] + nms_box)
for box in true_bboxes[idx]:
# many will get converted to 0 pred
if box[1] > threshold:
all_true_boxes.append([train_idx] + box)
train_idx += 1
model.train()
return all_pred_boxes, all_true_boxes
def convert_cellboxes(predictions, S=7):
"""
Converts bounding boxes output from Yolo with
an image split size of S into entire image ratios
rather than relative to cell ratios. Tried to do this
vectorized, but this resulted in quite difficult to read
code... Use as a black box? Or implement a more intuitive,
using 2 for loops iterating range(S) and convert them one
by one, resulting in a slower but more readable implementation.
"""
predictions = predictions.to("cpu")
batch_size = predictions.shape[0]
predictions = predictions.reshape(batch_size, 7, 7, 30)
bboxes1 = predictions[..., 21:25]
bboxes2 = predictions[..., 26:30]
scores = torch.cat(
(predictions[..., 20].unsqueeze(0), predictions[..., 25].unsqueeze(0)), dim=0
)
best_box = scores.argmax(0).unsqueeze(-1)
best_boxes = bboxes1 * (1 - best_box) + best_box * bboxes2
cell_indices = torch.arange(7).repeat(batch_size, 7, 1).unsqueeze(-1)
x = 1 / S * (best_boxes[..., :1] + cell_indices)
y = 1 / S * (best_boxes[..., 1:2] + cell_indices.permute(0, 2, 1, 3))
w_y = 1 / S * best_boxes[..., 2:4]
converted_bboxes = torch.cat((x, y, w_y), dim=-1)
predicted_class = predictions[..., :20].argmax(-1).unsqueeze(-1)
best_confidence = torch.max(predictions[..., 20], predictions[..., 25]).unsqueeze(
-1
)
converted_preds = torch.cat(
(predicted_class, best_confidence, converted_bboxes), dim=-1
)
return converted_preds
def cellboxes_to_boxes(out, S=7):
converted_pred = convert_cellboxes(out).reshape(out.shape[0], S * S, -1)
converted_pred[..., 0] = converted_pred[..., 0].long()
all_bboxes = []
for ex_idx in range(out.shape[0]):
bboxes = []
for bbox_idx in range(S * S):
bboxes.append([x.item() for x in converted_pred[ex_idx, bbox_idx, :]])
all_bboxes.append(bboxes)
return all_bboxes
def save_checkpoint(state, filename="my_checkpoint.pth.tar"):
print("=> Saving checkpoint")
torch.save(state, filename)
def load_checkpoint(checkpoint, model, optimizer):
print("=> Loading checkpoint")
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
# -
# # Train model
seed = 123
torch.manual_seed(seed)
LEARNING_RATE = 2.0e-5
DEVICE = "cuda"
BATCH_SIZE = 16
WEIGHT_DECAY = 0
EPOCHS = 100
NUM_WORKERS = 2
PIN_MEMORY = True
LOAD_MODEL = False
LOAD_MODEL_FILE = "overfit.pth.tar"
IMG_DIR = "data/images"
LABEL_DIR = "data/labels"
# TODO: Use albumentations library to transform both image and bounding boxes
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, bboxes):
for t in self.transforms:
img, bboxes = t(img), bboxes
return img, bboxes
# +
transform = Compose([
transforms.Resize((448, 448)),
transforms.ToTensor(),
])
def train_fn(train_loader, model, optimizer, loss_fn):
"""
Train 1 epoch.
"""
loop = tqdm(train_loader, leave=True)
mean_loss = []
for batch_idx, (x, y) in enumerate(loop):
x = x.to(DEVICE)
y = y.to(DEVICE)
out = model(x)
loss = loss_fn(out, y)
mean_loss.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
loop.set_postfix(loss=loss.item())
print(f"Mean loss: {sum(mean_loss) / len(mean_loss)}")
# +
model = Yolov1(split_size=7, num_boxes=2, num_classes=20).to(DEVICE)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
loss_fn = YoloLoss()
train_dataset = VOCDataset("data/8examples.csv", transform=transform, img_dir=IMG_DIR, label_dir=LABEL_DIR)
test_dataset = VOCDataset("data/test.csv", transform=transform, img_dir=IMG_DIR, label_dir=LABEL_DIR)
train_loader = DataLoader(
dataset=train_dataset,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory=PIN_MEMORY,
shuffle=True,
drop_last=False # last batch can sometimes have small number of samples (ruins gradient computation)
)
test_loader = DataLoader(
dataset=test_dataset,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory=PIN_MEMORY,
shuffle=True,
drop_last=False # last batch can sometimes have small number of samples (ruins gradient computation)
)
# -
len(train_dataset), len(test_dataset)
# +
classes = [
"Aeroplane",
"Bicycle",
"Bird",
"Boat",
"Bottle",
"Bus",
"Car",
"Cat",
"Chair",
"Cow",
"Diningtable",
"Dog",
"Horse",
"Motorbike",
"Person",
"Pottedplant",
"Sheep",
"Sofa",
"Train",
"Tvmonitor",
]
[(i, c) for i, c in enumerate(classes)]
# +
x, y = next(iter(train_loader))
x.shape, y.shape
# -
y_pred = model(x.to(DEVICE)).to("cpu")
y_pred = y_pred.reshape(8, 7, 7, 30)
y_pred
# +
#y_example = y[6]
y_example = y_pred[6]
for i in range(7):
for j in range(7):
y_classes = y_example[i, j, :20]
#print(y_classes.argmax().item())
if any(y_classes) == 1:
print(classes[y_classes.argmax().item()])
print(i, j, y_example[i, j])
# -
y_pred.shape
for epoch in range(EPOCHS):
for x, y in train_loader:
x = x.to(DEVICE)
for idx in range(8):
bboxes = cellboxes_to_boxes(model(x))
bboxes = non_max_suppression(bboxes[idx], iou_threshold=0.5, threshold=0.4, box_format="midpoint")
plot_image(x[idx].permute(1,2,0).to("cpu"), bboxes)
pred_boxes, target_boxes = get_bboxes(
train_loader, model, iou_threshold=0.5, threshold=0.4
)
mean_avg_prec = mean_average_precision(
pred_boxes, target_boxes, iou_threshold=0.5, box_format="midpoint"
)
print(f"Train mAP: {mean_avg_prec}")
train_fn(train_loader, model, optimizer, loss_fn)
|
examples/detection/yolo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# 
# ___
# # Chapter 10 - Web Scraping with Beautiful Soup
#
# ## Exploring NavigableString Objects
from bs4 import BeautifulSoup
# ### The BeautifulSoup object
soup = BeautifulSoup('<b body="description">Product description</b>')
# ### NavigableString objects
#
tag= soup.b
type(tag)
tag.name
tag.string
type(tag.string)
nav_string = tag.string
nav_string
nav_string.replace_with('Null')
tag.string
# #### Working with NavigableString objects
html_doc = '''
<html><head><title>Best Books</title></head>
<body>
<p class='title'><b>DATA SCIENCE FOR DUMMIES</b></p>
<p class='description'>Jobs in data science abound, but few people have the data science skills needed to fill these increasingly important roles in organizations. Data Science For Dummies is the pe
<br><br>
Edition 1 of this book:
<br>
<ul>
<li>Provides a background in data science fundamentals before moving on to working with relational databases and unstructured data and preparing your data for analysis</li>
<li>Details different data visualization techniques that can be used to showcase and summarize your data</li>
<li>Explains both supervised and unsupervised machine learning, including regression, model validation, and clustering techniques</li>
<li>Includes coverage of big data processing tools like MapReduce, Hadoop, Storm, and Spark</li>
</ul>
<br><br>
What to do next:
<br>
<a href='http://www.data-mania.com/blog/books-by-lillian-pierson/' class = 'preview' id='link 1'>See a preview of the book</a>,
<a href='http://www.data-mania.com/blog/data-science-for-dummies-answers-what-is-data-science/' class = 'preview' id='link 2'>get the free pdf download,</a> and then
<a href='http://bit.ly/Data-Science-For-Dummies' class = 'preview' id='link 3'>buy the book!</a>
</p>
<p class='description'>...</p>
'''
soup = BeautifulSoup(html_doc, 'html.parser')
for string in soup.stripped_strings: print(repr(string))
title_tag = soup.title
title_tag
title_tag.parent
title_tag.string
title_tag.string.parent
|
Ch10/10_02/10_02.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <img style="float: center;" src="images/CI_horizontal.png" width="600">
# <center>
# <span style="font-size: 1.5em;">
# <a href='https://www.coleridgeinitiative.org'>Website</a>
# </span>
# </center>
#
# Ghani, Rayid, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
# # Data Visualization in Python
# ---
# ## Table of Contents
# - [Introduction](#Introduction)
# - [Learning Objectives](#Learning-Objectives)
# - [Python Setup](#Python-Setup)
# - [Load the Data](#Load-the-Data)
# - [Our First Chart in `matplotlib`](#Our-First-Chart-in-matplotlib)
# - [A Note on Data Sourcing](#A-Note-on-Data-Sourcing)
# - [Layering in `matplotlib`](#Layering-in-matplotlib)
# - [Our First Chart in `seaborn`](#Our-First-Chart-in-seaborn)
# - [Choosing a Data Visualization package](#Choosing-a-Data-Visualization-Package)
# - [Combining `seaborn` and `matplotlib`](#Combining-seaborn-and-matplotlib)
# - [Visual Encodings](#Visual-Encodings)
# - [Using Hex Codes for Color](#Using-Hex-Codes-for-Color)
# - [Saving Charts as a Variable](#Saving-Charts-as-a-Variable)
# - [An Important Note on Graph Titles](#An-Important-Note-on-Graph-Titles)
# - [Exporting Completed Graphs](#Exporting-Completed-Graphs)
# - [Exercises & Practice](#Exercises-&-Practice)
# - [Additional Resources](#Additional-Resources)
# ## Introduction
# - Back to [Table of Contents](#Table-of-Contents)
#
# In this module, you will learn to quickly and flexibly make a wide series of visualizations for exploratory data analysis and communicating to your audience. This module contains a practical introduction to data visualization in Python and covers important rules that any data visualizer should follow.
#
# ### Learning Objectives
#
# * Learn critical rules about data visualization (using the correct graph types, correctly labeling all visual encodings, properly sourcing data).
#
# * Become familiar with a core base of data visualization tools in Python - specifically matplotlib and seaborn.
#
# * Start to develop the ability to conceptualize what visualizations are going to best reveal various types of patterns in your data.
#
# * Learn more about class data with exploratory analyses.
# ## Python Setup
# - Back to [Table of Contents](#Table-of-Contents)
# +
import pandas as pd
import matplotlib as mplib
import matplotlib.pyplot as plt # visualization package
import seaborn as sns
# database connections
from sqlalchemy import create_engine # to get data from database
from sqlalchemy import __version__ as sql_version
from sqlalchemy import inspect
# so images get plotted in the notebook
# %matplotlib inline
# -
# ## Load the Data
# - Back to [Table of Contents](#Table-of-Contents)
# set up sqlalchemy engine
engine = create_engine('postgresql://10.10.2.10/appliedda')
# Let's focus on the employer data. The employer EIN is a unique identification number for every organization. Grouping by EIN, let's take a look at the total number of employees and the total wages by company during three distinct quarters: the first quarter of 2006, the first quarter of 2010, and the first quarter of 2016.
# We can look at column names within the employers table:
query = '''
SELECT *
FROM information_schema.columns
WHERE table_schema = 'il_des_kcmo' AND table_name = 'il_qcew_employers'
'''
pd.read_sql(query, engine)
# +
# For 3 distinct quarters, let's look at total number of employees and total wages per EIN
select_string = '''
SELECT ein
, year
, quarter
, SUM(empl_month1::int) AS total_empl1
, SUM(empl_month2::int) AS total_empl2
, SUM(empl_month3::int) AS total_empl3
, SUM(total_wages) AS total_wage
FROM il_des_kcmo.il_qcew_employers
WHERE year in (2006, 2010, 2015)
AND ein != ''
AND quarter = 1
AND total_wages > 0
AND empl_month1::int > 0
AND empl_month2::int > 0
AND empl_month3::int > 0
GROUP BY ein, year, quarter;
'''
ein_empl = pd.read_sql(select_string, engine)
print("Number of rows returned: " + str(len(ein_empl)))
# -
pd.crosstab(index = ein_empl['year'], columns = 'count')
# Let's define the average employer monthly wage as the total wages over the quarter divided by the sum of the number of employees in every month of the quarter.
ein_empl['total_empl'] = ein_empl['total_empl1']+ein_empl['total_empl2']+ein_empl['total_empl3']
ein_empl['avg_wage'] = ein_empl['total_wage']/ein_empl['total_empl']
ein_empl.head()
# ## Our First Chart in `matplotlib`
# - Back to [Table of Contents](#Table-of-Contents)
#
# Below, we make our first chart in matplotlib. We'll come back to the choice of this particular library in a second, but for now just appreciate that the visualization is creating sensible scales, tick marks, and gridlines on its own.
# Make a simple histogram:
plt.hist(ein_empl['avg_wage'])
plt.show()
# The chart only shows us one bar. What is the distribution of our data?
ein_empl['avg_wage'].describe(percentiles = [.01, .1, .25, .5, .75, .9, .99])
# Since the distribution of average wages is very skewed to the right, let's limit our data to average wages under \$X a month.
# +
# set X
X =
## Average wages per company often have a very strong right skew:
max_empl = ein_empl['avg_wage'].max()
print("Maximum average company wage = " + str(max_empl))
## But most companies have an average wage of under $x per month:
(ein_empl['avg_wage'] < X).value_counts()
# +
## So let's just look at companies with average wages under $x per month
ein_empl_lim = ein_empl[(ein_empl['avg_wage'] <= X)]
# Make a simple histogram:
plt.hist(ein_empl_lim['avg_wage'])
plt.show()
# +
## We can change options within the hist function (e.g. number of bins, color, transparency:
plt.hist(ein_empl_lim['avg_wage'], bins=20, facecolor="purple", alpha=0.5)
## And we can affect the plot options too:
plt.xlabel('Average Monthly Wage')
plt.ylabel('Number of Employers')
plt.title('Most Employers Pay Under $X per Month')
## And add Data sourcing:
### xy are measured in percent of axes length, from bottom left of graph:
plt.annotate('Source: IL Department of Labor', xy=(0.7,-0.2), xycoords="axes fraction")
## We use plt.show() to display the graph once we are done setting options:
plt.show()
# -
# ### A Note on Data Sourcing
# - Back to [Table of Contents](#Table-of-Contents)
#
# Data sourcing is a critical aspect of any data visualization. Although here we are simply referencing the agencies that created the data, it is ideal to provide as direct of a path as possible for the viewer to find the data the graph is based on. When this is not possible (e.g. the data is sequestered), directing the viewer to documentation or methodology for the data is a good alternative. Regardless, providing clear sourcing for the underlying data is an **absolutely requirement** of any respectable visualization, and further builds trusts and enables reproducibility.
# ### Layering in `matplotlib`
# - Back to [Table of Contents](#Table-of-Contents)
#
# This functionality - where we can make consecutive changes to the same plot - also allows us to layer on multiple plots. By default, the first graph you create will be at the bottom, with ensuing graphs on top.
#
# Below, we see the 2006 histogram, in blue, is beneath the 2010 histogram, in orange. You might also notice that since 2006, the number of employers paying high average wages has increased, but the number of companies paying an average monthly wage of around $X has decreased.
plt.hist(ein_empl_lim[ein_empl_lim['year'] == 2015].avg_wage, facecolor="blue", alpha=0.6)
plt.hist(ein_empl_lim[ein_empl_lim['year'] == 2006].avg_wage, facecolor="orange", alpha=0.6)
plt.annotate('Source: IL Department of Labor', xy=(0.7,-0.2), xycoords="axes fraction")
plt.show()
# ## Our First Chart in `seaborn`
# - Back to [Table of Contents](#Table-of-Contents)
#
# Below, we quickly use pandas to create an aggregation of our job data - the total number of jobs by year. Then we pass the data to the barplot function in the `seaborn` function, which recall we imported as `sns` for short.
# +
## Calculate average wages by year:
overall_avg_wage = ein_empl.groupby('year')[['total_empl', 'total_wage']].sum().reset_index()
overall_avg_wage['average_wages'] = overall_avg_wage['total_wage']/overall_avg_wage['total_empl']
overall_avg_wage.columns = ['year', 'total_empl', 'total_wage', 'average_wages']
print(type(overall_avg_wage))
print("***********")
print(overall_avg_wage)
# -
## Barplot function
# Note we can reference column names (in quotes) in the specified data:
sns.barplot(x='year', y='average_wages', data=overall_avg_wage)
plt.show()
# You might notice that if you don't include plt.show(), Jupyter will still produce a chart. However this is not the case in other environments. So we will continue using plt.show() to more formally ask for Python to display the chart we have constructed, after adding all layers and setting all options.
# +
## Seaborn has a great series of charts for showing distributions across a categorical variable:
sns.factorplot(x='year', y='avg_wage', hue='year', data=ein_empl_lim, kind='box')
plt.show()
## Other options for the 'kind' argument include 'bar' and 'violin'
# -
# Already you might notice some differences between matplotlib and seaborn - at the very least seaborn allows us to more easily reference column names within a pandas dataframe, whereas matplotlib clearly has a plethora of options.
# ## Choosing a Data Visualization Package
#
# - Back to [Table of Contents](#Table-of-Contents)
#
# There are many excellent data visualiation modules available in Python, but for the tutorial we will stick to the tried and true combination of `matplotlib` and `seaborn`. You can read more about different options for data visualization in Python in the [More Resources](#More-Resources) section at the bottom of this notebook.
#
# `matplotlib` is very expressive, meaning it has functionality that can easily account for fine-tuned graph creation and adjustment. However, this also means that `matplotlib` is somewhat more complex to code.
#
# `seaborn` is a higher-level visualization module, which means it is much less expressive and flexible than matplotlib, but far more concise and easier to code.
#
# It may seem like we need to choose between these two approaches, but this is not the case! Since `seaborn` is itself written in `matplotlib` (you will sometimes see `seaborn` be called a `matplotlib` 'wrapper'), we can use `seaborn` for making graphs quickly and then `matplotlib` for specific adjustments. When you see `plt` referenced in the code below, we are using `matplotlib`'s pyplot submodule.
#
#
# `seaborn` also improves on `matplotlib` in important ways, such as the ability to more easily visualize regression model results, creating small multiples, enabling better color palettes, and improve default aesthetics. From [`seaborn`'s documentation](https://seaborn.pydata.org/introduction.html):
#
# > If matplotlib 'tries to make easy things easy and hard things possible', seaborn tries to make a well-defined set of hard things easy too.
# +
## Seaborn offers a powerful tool called FacetGrid for making small multiples of matplotlib graphs:
### Create an empty set of grids:
facet_histograms = sns.FacetGrid(ein_empl_lim, col='year', hue='year')
## "map' a histogram to each grid:
facet_histograms = facet_histograms.map(plt.hist, 'avg_wage')
## Data Sourcing:
plt.annotate('Source: IL Department of Labor', xy=(0.6,-0.35), xycoords="axes fraction")
plt.show()
# -
## Alternatively, you can create and save several charts:
for i in set(ein_empl_lim["year"]):
tmp = ein_empl_lim[ein_empl_lim["year"] == i]
plt.hist(tmp["avg_wage"])
plt.xlabel('Average Monthly Wage')
plt.ylabel('Number of Employers')
plt.title(str(i))
plt.annotate('Source: IL Department of Labor', xy=(0.7,-0.2), xycoords="axes fraction")
filename = "output/graph_" + str(i) + ".pdf"
plt.savefig(filename)
plt.show()
# ### Combining `seaborn` and `matplotlib`
# - Back to [Table of Contents](#Table-of-Contents)
#
# Below, we use `seaborn` for setting an overall aesthetic style and then faceting (created small multiples). We then use `matplotlib` to set very specific adjustments - things like adding the title, adjusting the locations of the plots, and sizing th graph space. This is a pretty protoyptical use of the power of these two libraries together.
#
# More on [`seaborn`'s set_style function](https://seaborn.pydata.org/generated/seaborn.set_style.html).
# More on [`matplotlib`'s figure (fig) API](https://matplotlib.org/api/figure_api.html).
# +
# Seaborn's set_style function allows us to set many aesthetic parameters.
sns.set_style("whitegrid")
facet_histograms = sns.FacetGrid(ein_empl_lim, col='year', hue='year')
facet_histograms.map(plt.hist, 'avg_wage')
## We can still change options with matplotlib, using facet_histograms.fig
facet_histograms.fig.subplots_adjust(top=0.85)
facet_histograms.fig.suptitle("Employer Average Monthly Wages Improved since 2006", fontsize=14)
facet_histograms.fig.set_size_inches(10,5)
## Add a legend for hue (color):
facet_histograms = facet_histograms.add_legend()
## Data Sourcing:
plt.annotate('Source: IL LEHD', xy=(0.6,-0.35), xycoords="axes fraction")
plt.show()
# -
# ## Visual Encodings
#
# - Back to [Table of Contents](#Table-of-Contents)
#
# We often start with charts that use 2-dimensional position (like a scatterplot) or that use height (like histograms and bar charts). This is because these visual encodings - the visible mark that represents the data - are particularly perceptually strong. This means that when humans view these visual encodings, they are more accurate in estimating the underlying numbers than encodings like size (think circle size in a bubble chart) or angle (e.g. pie chart).
#
# For more information on visual encodings and data visualization theory, see:
#
# * [Designing Data Visualizations, Chapter 4](http://www.safaribooksonline.com/library/view/designing-data-visualizations/9781449314774/ch04.html) by <NAME> and <NAME>
#
# * Now You See It - book by <NAME>
# +
select_string = "SELECT year, SUM(empl_month1::int + empl_month2::int + empl_month3::int) AS total_empl, SUM(total_wages) AS total_wages"
select_string += " FROM il_des_kcmo.il_qcew_employers"
select_string += " WHERE year::int > 2005"
select_string += " GROUP BY year"
yearly_avg_wages = pd.read_sql(select_string, engine)
# -
yearly_avg_wages['avg_wage'] = yearly_avg_wages['total_wages']/yearly_avg_wages['total_empl']
yearly_avg_wages = yearly_avg_wages.sort_values('year')
yearly_avg_wages
# +
## We can pass a single value to a the tsplot function to get a simple line chart:
sns.tsplot(data=yearly_avg_wages['avg_wage'], color="#179809")
## Data Sourcing:
plt.annotate('Source: IL Department of Labor', xy=(0.8,-0.20), xycoords="axes fraction")
plt.show()
# -
# ### Using Hex Codes for Color
# - Back to [Table of Contents](#Table-of-Contents)
#
# In the graph above, you can see I set the color of the graph with a pound sign `#` followed by a series of six numbers. This is a hexcode - which is short for hexadecimal code. A hexadecimal code lets you specify one of over 16 million colors using combinations of red, green, and blue. It first has two digits for red, then two digits for green, and lastly two digits for blue: `#RRGGBB`
#
# Further, these codes allow for you to specify sixteen integers (thus hexadecimal) for each digit, in this order:
#
# (0,1,2,3,4,5,6,7,8,9,A,B,C,D,E,F)
#
# Over time, it gets easier to read these codes. For instance, above, I used the hex code "#179809". Understanding how hex codes work, I can see that there is a relatively low number for red (17) and fairly high number for green (98) and another low number for blue (09). Thus it shouldn't be too surprising that a green color resulted in the graph.
#
# Tools like [Adobe Color](https://color.adobe.com) and this [Hex Calculator](https://www.w3schoosl.com/colors/colors_hexadecimal.asp) can help you get used to this system.
#
# Most modern browsers also support eight digit hex codes, in which the first two enable transparency, which is often called 'alpha' in data visualization: `#AARRGGBB`
# +
## We can add the time argument to set the x-axis correctly. And let's change the color, since we can:
sns.tsplot(data=yearly_avg_wages['avg_wage'], time=yearly_avg_wages['year'], color="#B088CD")
# Color Note: B088CD
## The highest values are red 'B0' and blue 'CD', so we can expect a mix of those
## Further this is high in all three colors, so it'll be light, not dark
## Data Sourcing:
plt.annotate('Source: IL Department of Labor', xy=(0.8,-0.20), xycoords="axes fraction")
plt.show()
# -
# ### Saving Charts as a Variable
# - Back to [Table of Contents](#Table-of-Contents)
#
# Although as you can see above, we can immediately print our plots on a page, it is generally better to save them as variable. We can then alter the charts over several lines before finally displaying them with the `show()` function, which comes from the `matplotlib` `pyplot` module we loaded earlier.
# +
## Save the line chart as 'graph'
graph = sns.tsplot(data=yearly_avg_wages['avg_wage'], time=yearly_avg_wages['year'])
## To add data labels, we loop over each row and use graph.text()
for i, row, in yearly_avg_wages.iterrows():
graph.text(row["year"] + 0.05, row["avg_wage"] - 50, int(row["year"]))
## Now change x-axis and y-axis labels:
graph.set(xlabel="Year", ylabel="Average Annual Wage")
graph.set(title="Rising Annual Wages since the 2009 Financial Crisis")
plt.annotate('Source: IL Department of Labor', xy=(0.8,-0.20), xycoords="axes fraction")
## Then display the plot:
plt.show()
# -
# ### An Important Note on Graph Titles
# - Back to [Table of Contents](#Table-of-Contents)
#
# The title of a visualization occupies the most valuable real estate on the page. If nothing else, you can be reasonably sure a viewer will at least read the title and glance at your visualization. This is why you want to put thought into making a clear and effective title that acts as a **narrative** for your chart. Many novice visualizers default to an **explanatory** title, something like: "Average Wages Over Time (2006-2016)". This title is correct - it just isn't very useful. This is particularly true since any good graph will have explained what the visualization is through the axes and legends. Instead, use the title to reinforce and explain the core point of the visualization. It should answer the question "Why is this graph important?" and focus the viewer onto the most critical take-away.
# ## Exporting Completed Graphs
# - Back to [Table of Contents](#Table-of-Contents)
#
# When you are satisfied with your visualization, you may want to save a a copy outside of your notebook. You can do this with `matplotlib`'s savefig function. You simply need to run:
#
# plt.savefig("fileName.fileExtension")
#
# The file extension is actually surprisingly important. Image formats like png and jpeg are actually **not ideal**. These file formats store your graph as a giant grid of pixels, which is space-efficient, but can't be edited later. Saving your visualizations instead as a PDF is strongly advised. PDFs are a type of vector image, which means all the component of the graph will be maintained.
#
# With PDFs, you can later open the image in a program like Adobe Illustrator and make changes like the size or typeface of your text, move your legends, or adjust the colors of your visual encodings. All of this would be impossible with a png or jpeg.
# +
## Save the line chart as 'graph'
graph = sns.tsplot(data=yearly_avg_wages['avg_wage'], time=yearly_avg_wages['year'])
## To add data labels, we loop over each row and use graph.text()
for i, row, in yearly_avg_wages.iterrows():
graph.text(row["year"] + 0.05, row["avg_wage"] - 50, int(row["year"]))
## Now change x-axis and y-axis labels:
graph.set(xlabel="Year", ylabel="Average Annual Wage")
graph.set(title="Rising Annual Wages since the 2009 Financial Crisis")
plt.annotate('Source: IL Department of Labor', xy=(0.8,-0.20), xycoords="axes fraction")
plt.savefig('output/wageplot.png')
plt.savefig('output/wageplot.pdf')
# -
# ## Exercises & Practice
# - Back to [Table of Contents](#Table-of-Contents)
# ### Exercise 1: Directed Scatterplot
# - Back to [Table of Contents](#Table-of-Contents)
#
# A directed scatterplot still uses one point for each year, but then uses the x-axis and the y-axis for variabes. In order to maintain the ordinal relationship, a line is drawn between the years. To do this in seaborn, we actually use sns.FacetGrid, which allows us to overlay different plots together. Specifically, it lets us overlay a scatterplot (`plt.scatter` and a line chart `plt.plot`).
# +
# We can also look at a scatterplot of the number of people and averages wages in each year:
scatter = sns.lmplot(x='total_empl', y='avg_wage', data=yearly_avg_wages, fit_reg=False)
scatter.set(xlabel="Number of Employees", ylabel="Average Annual Wages", title="Number and Wages of IL Employees")
## Sourcing:
plt.annotate('Source: IL Department of Labor', xy=(0.8,-0.20), xycoords="axes fraction")
plt.show()
# +
cncted_scatter = sns.FacetGrid(data=yearly_avg_wages, size=7)
cncted_scatter.map(plt.scatter, 'total_empl', 'avg_wage', color="#A72313")
cncted_scatter.map(plt.plot, 'total_empl', 'avg_wage', color="#A72313")
cncted_scatter.set(title="Rising Wages of IL Employees", xlabel="Number of Employees", ylabel="Average Wages")
## Adding data labels:
for i, row, in yearly_avg_wages.iterrows():
plt.text(row["total_empl"], row["avg_wage"], int(row["year"]))
## Sourcing:
plt.annotate('Source: IL Department of Labor', xy=(0.8,-0.10), xycoords="axes fraction")
plt.show()
# -
# ### Exercise 2: Heatmap
# - Back to [Table of Contents](#Table-of-Contents)
#
# Below, we reconsider the count of jobs by industry that we calculated in the "Variables" notebook. We query the database and collect the the sum of the jobs in every industry from the LODES data. We then format this data into a wide DataFrame using `pandas`. This grid is format that `seaborn`'s heatmap function is expecting.
#
# If you would like to reproduce this type of graph, query one of the tables again and create dataframe in the correct format, then pass that along to seaborn's heatmap function. Use the code you learned above to add a title, better axis labels, and data sourcing.
#
# Note that the color map used here `viridis` is a scientifically derived color palette meant to be perceptually linear. The color maps `inferno`, `plasma` and `magama` also all meet this criteria.
#
# __More information:__
# * [seaborn heatmap documentation](http://seaborn.pydata.org/generated/seaborn.heatmap.html)
#
# * [matplotlib color map documentation](http://matplotlib.org/users/colormap.html)
query = '''
SELECT *
FROM public.lodes_workplace_area_characteristics
WHERE segment = 'S000' AND jobtype = 'JT01' AND state = 'il'
LIMIT 20;
'''
wac = pd.read_sql(query, engine)
# +
filter_col = [col for col in wac if col.startswith('cn')]
query = '''
SELECT
year'''
for col in filter_col:
query += '''
, sum({0:}) as {0:}'''.format(col)
query += '''
FROM public.lodes_workplace_area_characteristics
WHERE segment = 'S000' AND jobtype = 'JT01' AND state = 'il'
GROUP BY year
ORDER BY year
'''
wac_year_stats = pd.read_sql(query, engine, index_col='year')
# -
wac_year_stats['total_jobs'] = wac_year_stats.sum(axis=1)
for var in filter_col:
wac_year_stats[var] = (wac_year_stats[var]/wac_year_stats['total_jobs'])*100
del wac_year_stats['total_jobs']
wac_year_stats = wac_year_stats.T
## Create a heatmap, with annotations:
pd.options.display.float_format = '{:.2f}%'.format
fig, ax = plt.subplots(figsize = (20,12))
sns.heatmap(wac_year_stats, annot=True, fmt='.2f', cmap="viridis")
plt.show()
# ### Exercise 3: Joinplot
# - Back to [Table of Contents](#Table-of-Contents)
#
# Below, we pull two continuous variables from the Missouri Department of Labor, summed over each employer. See if you can pass this data to the sns.jointplot() function. Some of the arguments have been filled out for you, while others need completion.
pd.read_sql("SELECT * FROM il_des_kcmo.il_qcew_employers LIMIT 5;",engine)
# +
## Querying Total Wages and Jobs by Employer
select_string = "SELECT ein, sum(total_wage) as agg_wages, sum(empl_month1::int + empl_month2::int + empl_month3::int) as agg_jobs"
select_string += " FROM il_des_kcmo.il_qcew_employers"
select_string += " WHERE year = 2016"
select_string += " GROUP BY ein"
print(select_string)
## Run SQL query:
employers = pd.read_sql(select_string, engine)
print(len(employers))
# +
## Fill in the arguments (x, y, data) below to get the visualiztion to run.
# sns.jointplot(x=, y=, data=
# , color="#137B80", marginal_kws={"bins":30})
# plt.show()
# -
# ### Exercise 4: FacetGrid
# - Back to [Table of Contents](#Table-of-Contents)
#
# Let's see if we can use seaborn's FacetGrid to create small multiple scatterplots. First you need to query a database and get at least one categorical variable and at least two continuous variables (floats).
#
# Then try passing this data to the FacetGrid function from `seaborn` and the scatter function from `matplotlib`.
#
# [FacetGrid Documentation](http://seaborn.pydata.org/examples/many_facets.html)
# +
## Pseudo-code to get you started:
# grid = sns.FacetGrid(dataframe, col = "categorical_var", hue="categorical_var", col_wrap=2)
# grid.map(plt.scatter("x_var", "y_var"))
# +
## Enter your code for excercise 3 here:
# +
## Submit results by saving to shared folder (use code below):
# myname = !whoami
plt.savefig('/nfshome/{0}/Projects/ada_18_uchi/shared/Class_Submits/Data_Visualization/{0}_1.png'.format(myname[0]))
# -
# ### Exercise 5: Geographic Visualization
# Another important feature in data visualization is mapping out a metric according to geography. In the following graphic, we color blocks in Kansas City according to the number of jobs on the LEHD data. We will use the Python packages `geopandas` and `matplotlib`.
import geopandas as gpd
# +
# TO BE UPDATED
# query = """
# SELECT geoid10, geom_wgs, sum(c000) tot_jobs
# FROM b
# JOIN lodes_workplace_area_characteristics w
# ON w.w_geocode = b.geoid10
# WHERE w.segment = 'S000' AND w.jobtype = 'JT01' AND w.year = 2010
# group by geoid10, geom_wgs
# """
# gdf = gpd.read_postgis(query, engine, geom_col='geom_wgs', crs='+init=epsg:4326')
# -
fig, ax = plt.subplots(1, figsize = (10,15))
gdf.plot('tot_jobs', cmap = 'plasma', scheme = 'quantiles', legend = True, edgecolor = 'grey', ax = ax)
# ### Exercise 6: One more Visualization
# - Back to [Table of Contents](#Table-of-Contents)
#
# Test your mettle. Check out the seaborn [data visualization gallery](http://seaborn.pydata.org/examples) and see if you can implement an interesting visualization. Don't forget to submit your results by saving to the shared folder.
# +
## Your code here.
# # myname = !whoami
# export_file.to_csv(
# '/nfshome/{0}/Projects/ada_kcmo/shared/Class_Submits/Data_Visualization/{0}.csv'.format(myname[0])
# , index = False)
# -
# ---
# ## Additional Resources
#
# * [A Thorough Comparison of Python's DataViz Modules](https://dsaber.com/2016/10/02/a-dramatic-tour-through-pythons-data-visualization-landscape-including-ggplot-and-altair)
#
# * [Seaborn Documentation](http://seaborn.pydata.org)
#
# * [Matplotlib Documentation](https://matplotlib.org)
#
# * [Advanced Functionality in Seaborn](blog.insightdatalabs.com/advanced-functionality-in-seaborn)
#
# * Other Python Visualization Libraries:
# * [`Bokeh`](http://bokeh.pydata.org)
#
# * [`Altair`](https://altair-viz.github.io)
#
# * [`ggplot`](http://ggplot.yhathq.com.com)
#
# * [`Plotly`](https://plot.ly)
|
notebooks_business_vitality/week_1/day_2/3_1_Data_Visualization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ищем мусор на побережье Камчатки по фоткам с дрона
#
# Мы обучили бейзлайн-модель для поиска и сегментации мусора. Она была обучена на смеси датасета, который предоставлен вам как обучающий и на наших дополнительных данных.
# Этот пример предназначен только для того, чтобы продемонстрировать как собрать данные для отправки, вы не обязаны использовать архитектуру/фреймворк/whatsoever. Более того, так как у вас нет наших весов и части функций, этот пример невозможно будет просто взять и завести на вашей машине, модель придется менять.
# ### Импорты, функции
import torch, torchvision
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
import numpy as np
import os, json, cv2, random, yaml
from tqdm.auto import tqdm
from matplotlib import pyplot as plt
# %matplotlib inline
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.utils.visualizer import ColorMode
from detectron2.engine import default_setup
def setup(path_to_subconfig, load_from_checkpoint=None):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.BACKBONE.IN_CHANNELS = None
cfg.MODEL.BACKBONE.NUM_CLASSES = None
cfg.MODEL.BACKBONE.TYPE = None
cfg.merge_from_file(path_to_subconfig)
if load_from_checkpoint is not None:
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(load_from_checkpoint)
default_setup(cfg, None)
return cfg
from skimage.io import imread
# +
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
ran = C if isinstance(C, list) else range(C)
for i in ran:
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / float(union))
ious.append(iou)
ious = [np.mean(iou) for iou in zip(*ious)] # mean accross images if per_image
return 100 * np.array(ious)
def get_iou(preds, labels, label_to_calculate=None):
C = preds.shape[1]
preds = torch.argmax(preds, 1)
if label_to_calculate is not None:
return iou(preds, labels[:, 0], [label_to_calculate,]).mean()
else:
return iou(preds, labels[:, 0], C)[1:].mean() # ignoiring background label.
# -
# ### Конфигурируем модель и параметры датасета
# + tags=[]
cfg = setup('detectron_config.yaml', 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')
# cfg = setup('detectron_config.yaml', 'COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml')
# -
MetadataCatalog.get("waste").set(thing_classes=['metall', 'net', 'plastic', 'wood'])
waste_metadata = MetadataCatalog.get("waste")
# Критично важная функция для отправки своих результатов: Run Length Encoding масок.
# +
def rle(inarray):
""" run length encoding. Partial credit to R rle function.
Multi datatype arrays catered for including non Numpy
returns: tuple (runlengths, startpositions, values) """
ia = np.asarray(inarray) # force numpy
n = len(ia)
if n == 0:
return (None, None, None)
else:
y = ia[1:] != ia[:-1] # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return(z, p, ia[i])
def rlencode_mask(mask):
l,s,v = rle(mask.flatten()) # length, starts, values
l,s = l[v], s[v]
encoded = ' '.join([' '.join(map(str, e)) for e in zip(s, l)])
if not encoded:
encoded = '0 0'
return encoded
# -
# ### Грузим веса, инициализируем инференс-класс
cfg.MODEL.WEIGHTS = "model_final.pth" # path to the model we just trained
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set a custom testing threshold
predictor = DefaultPredictor(cfg)
# + [markdown] tags=[]
# ### convert binary imgs to COCO format and train
# +
# import cv2
# im = cv2.imread('data/00_net.png', 0)
# +
# img_paths
# + tags=[]
# cfg.get("DEVICE")
# + tags=[]
# import json
# import numpy as np
# from pycocotools import mask
# from skimage import measure
# from glob import glob
# from detectron2.structures import BoxMode
# class_convert = {'metall':0, 'net':1, 'plastic':2, 'wood':3}
# img_paths = [img_path for img_path in glob('data/*') if 'JPG' in img_path]
# dataset = []
# count = 112
# for im_path in img_paths:
# im_num = im_path.split('/')[-1].split('_')[0]
# img_masks_paths = [img_path for img_path in glob('data/*') if 'png' in img_path and im_num in img_path]
# annotation_base = {
# "file_name": im_path,
# "image_id": count,
# "annotations": []\
# }
# for mask_path in img_masks_paths:
# class_name = mask_path.split('_')[-1].split('.')[0]
# im = cv2.imread(mask_path, 0)
# ground_truth_binary_mask = im
# fortran_ground_truth_binary_mask = np.asfortranarray(ground_truth_binary_mask)
# encoded_ground_truth = mask.encode(fortran_ground_truth_binary_mask)
# ground_truth_area = mask.area(encoded_ground_truth)
# ground_truth_bounding_box = mask.toBbox(encoded_ground_truth)
# print(ground_truth_bounding_box)
# break
# break
# contours = measure.find_contours(ground_truth_binary_mask, 0.5)
# orig_im_path = im_path.split('_')[0]+'_'+'image.JPG'
# annotation = {
# "segmentation": [],
# "area": ground_truth_area.tolist(),
# "iscrowd": 0,
# "bbox": ground_truth_bounding_box.tolist(),
# 'bbox_mode': BoxMode.XYXY_ABS,
# "category_id": class_convert[class_name],
# }
# for contour in contours:
# contour = np.flip(contour, axis=1)
# segmentation = contour.ravel().tolist()
# annotation["segmentation"].append(segmentation)
# annotation_base['annotations'].append(annotation)
# # dataset.append(json.dumps(annotation_base, indent=4))
# dataset.append(annotation_base)
# # with open('coco_format_masks/'+json_name, 'w') as f:
# # f.write(json.dumps(annotation, indent=4))
# # print()
# count += 1
# +
import json
def load_data(t="train"):
if t == "waste_train":
with open("image-to-coco-json-converter/output/train.json", 'r') as file:
train = json.load(file)
return train
elif t == "waste_test":
with open("image-to-coco-json-converter/output/val.json", 'r') as file:
val = json.load(file)
return val
# + tags=[]
from detectron2.structures import BoxMode
train_dataset = []
data = load_data('waste_train')
for i in range(len(data['images'])):
annots_list = []
base_annot = data['images'][i]
base_annot['file_name'] = base_annot['file_name'].replace('img', 'image')
for annot in data['annotations']:
if annot['image_id'] == i:
annot['category_id'] = annot['category_id'] - 1
annot['bbox_mode'] = BoxMode.XYXY_ABS
annots_list.append(annot)
# print('image_id', i, 'added annotation')
base_annot['annotations'] = annots_list
train_dataset.append(base_annot)
val_dataset = []
data = load_data('waste_test')
for i in range(len(data['images'])):
annots_list = []
base_annot = data['images'][i]
base_annot['file_name'] = base_annot['file_name'].replace('img', 'image')
for annot in data['annotations']:
if annot['image_id'] == i:
annot['category_id'] = annot['category_id'] - 1
annot['bbox_mode'] = BoxMode.XYXY_ABS
annots_list.append(annot)
# print('image_id', i, 'added annotation')
base_annot['annotations'] = annots_list
val_dataset.append(base_annot)
# + tags=[]
import json
from detectron2.data import MetadataCatalog, DatasetCatalog
DatasetCatalog.register('waste_train', lambda d='waste_train': train_dataset)
DatasetCatalog.register('waste_test', lambda d='waste_test': val_dataset)
MetadataCatalog.get('waste_train').set(thing_classes=['metall', 'net', 'plastic', 'wood'])
MetadataCatalog.get('waste_test').set(thing_classes=['metall', 'net', 'plastic', 'wood'])
# metadata = MetadataCatalog.get("waste_train")
# + jupyter={"outputs_hidden": true} tags=[]
cfg
# + jupyter={"outputs_hidden": true} tags=[]
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.engine import DefaultTrainer
if __name__ == '__main__':
for d in ["waste_train", "waste_test"]:
# DatasetCatalog.register(d, lambda d=d: load_data(d))
MetadataCatalog.get(d).set(thing_classes=['metall', 'net', 'plastic', 'wood'])
metadata = MetadataCatalog.get(d)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
# -
torch.save(trainer.model.state_dict(), "./mymodel.pth")
# ### Просматриваем, тестируем и формируем посылку
# Ниже три секции, которые делают следующее:
# 1. Просто визуализирует результаты
# 2. Визуализирует на два фрейма предсказанную и истинную маски для отладки, показывает IoU скор по классам. (Он не равен Dice но они тесно связаны).
# 3. Формирует csv файл посылки.
#
# Критично важной для вас является только третья ячейка, остальные оставлены здесь, для возможности визуальной оценки работы предложенного бейзлайна.
data_folder = './data/'
# + tags=[]
# from glob import glob
# for i in sorted(glob(data_folder+'/*'))[:3]:
# im = cv2.imread(i)
# outputs = predictor(im)
# v = Visualizer(im[:, :, ::-1],
# metadata=waste_metadata,
# scale=0.5,
# instance_mode=ColorMode.IMAGE_BW
# )
# out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
# plt.figure(figsize=(16, 16))
# plt.xticks([])
# plt.yticks([])
# plt.imshow(out.get_image()[:, :, ::-1])
# plt.show()
# + tags=[]
for i in range(0, 3):
im = cv2.imread(os.path.join(data_folder, f'{i:02}_image.JPG'))
outputs = predictor(im)
v = Visualizer(im[:, :, ::-1],
metadata=waste_metadata,
scale=0.5,
instance_mode=ColorMode.IMAGE_BW
)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
plt.figure(figsize=(16, 16))
plt.xticks([])
plt.yticks([])
plt.imshow(out.get_image()[:, :, ::-1])
plt.show()
# -
for i in tqdm(range(0, 3)):
im = cv2.imread(os.path.join(data_folder, f'{i:02}_image.JPG'))
outputs = predictor(im)
predicted_mask = np.zeros(im.shape[:-1], np.uint8)
for c, m in zip(outputs['instances'].pred_classes.detach().cpu().numpy(),
outputs['instances'].pred_masks.detach().cpu().numpy()):
predicted_mask[m] = c+1
expected_mask = np.zeros(im.shape[:-1], np.uint8)
for class_id, class_name in enumerate(waste_metadata.thing_classes, start=1):
addr = os.path.join(data_folder, f'{i:02}_{class_name}.png')
if os.path.exists(addr):
img = imread(addr)
mask = (img.sum(-1) > 0)
expected_mask[mask] = class_id
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16,10))
ax1.imshow(predicted_mask)
ax2.imshow(expected_mask)
plt.show()
print(iou(predicted_mask, expected_mask, C=[1,2,3,4]))
# +
lines = ['Type_Id,Mask\n']
for i in tqdm(range(0, 32)):
im = cv2.imread(os.path.join(data_folder, f'{i:02}_image.JPG'))
if im is not None:
outputs = predictor(im)
predicted_mask = np.zeros(im.shape[:-1], np.uint8)
for c, m in zip(outputs['instances'].pred_classes.detach().cpu().numpy(),
outputs['instances'].pred_masks.detach().cpu().numpy()):
predicted_mask[m] = c+1
for class_id, class_name in enumerate(waste_metadata.thing_classes, start=1):
if im is not None:
encoded = f'{class_name}_{i},' + rlencode_mask(predicted_mask == class_id) + f'\n'
else:
encoded = f'{class_name}_{i},' + '0 0' + f'\n'
lines.append(encoded)
with open('baseline_solution.csv', 'w') as f:
f.writelines(lines)
# -
|
model_training/train_detectron.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### Import Module
import tensorflow as tf
# ### Load Data
fashion_mnist = tf.keras.datasets.fashion_mnist
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train.shape, x_test.shape
# +
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(200)
plt.imshow(x_train[10])
x_train[0].shape
# -
# ### Design Model
# +
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(64, (3,3), input_shape=(28,28,1), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.summary()
# -
# ### Reshape Data
x_train = x_train.reshape(60000, 28, 28, 1)
x_test = x_test.reshape(10000, 28, 28, 1)
# ### Compile and Fitting the model
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.get_weights()
model.layers
layer_outputs = [layer.output for layer in model.layers]
layer_outputs
model.input
import matplotlib.pyplot as plt
f, axarr = plt.subplots(3,4)
FIRST_IMAGE=0
SECOND_IMAGE=7
THIRD_IMAGE=26
CONVOLUTION_NUMBER = 1
from tensorflow.keras import models
layer_outputs = [layer.output for layer in model.layers]
activation_model = tf.keras.models.Model(inputs = model.input, outputs = layer_outputs)
for x in range(0,4):
f1 = activation_model.predict(x_test[FIRST_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[0,x].imshow(f1[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[0,x].grid(False)
f2 = activation_model.predict(x_test[SECOND_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[1,x].imshow(f2[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[1,x].grid(False)
f3 = activation_model.predict(x_test[THIRD_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[2,x].imshow(f3[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[2,x].grid(False)
|
code-colab/Tensorflow - Enhancing Vision with CNN(Fashion MNIST).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_pytorch_latest_p36
# language: python
# name: conda_pytorch_latest_p36
# ---
# +
# %%time
import os
import boto3
import sagemaker
from sagemaker import get_execution_role
region = boto3.Session().region_name
role = get_execution_role()
# -
# This creates a default S3 bucket where we will upload our model.
bucket = sagemaker.Session().default_bucket()
bucket_path = "https://s3-{}.amazonaws.com/{}".format(region, bucket)
print(role)
print(region)
print(bucket)
print(bucket_path)
for model_id in range(1,15):
print(model_id)
pytorch_model = PyTorchModel(model_data='s3://sagemaker-us-east-1-296246463737/model' + str(model_id) + '.tar.gz', role=role,
entry_point='inference.py', framework_version='1.8.0',
py_version='py3', source_dir='s3://sagemaker-us-east-1-296246463737/model' + str(model_id) + '.tar.gz')
predictor = pytorch_model.deploy(instance_type='ml.t2.medium', initial_instance_count=1, endpoint_name = 'model' + str(model_id) + '-essay-scoring-endpoint')
# +
# predictor.delete_endpoint()
# -
|
ML/sagemaker/sagemaker_deploy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PaddlePaddle 2.0.0b0 (Python 3.5)
# language: python
# name: py35-paddle1.2.0
# ---
# 查看当前挂载的数据集目录, 该目录下的变更重启环境后会自动还原
# View dataset directory.
# This directory will be recovered automatically after resetting environment.
# !ls /home/aistudio/data
# 查看工作区文件, 该目录下的变更将会持久保存. 请及时清理不必要的文件, 避免加载过慢.
# View personal work directory.
# All changes under this directory will be kept even after reset.
# Please clean unnecessary files in time to speed up environment loading.
# !ls /home/aistudio/work
# 如果需要进行持久化安装, 需要使用持久化路径, 如下方代码示例:
# If a persistence installation is required,
# you need to use the persistence path as the following:
# !mkdir /home/aistudio/external-libraries
# !pip install beautifulsoup4 -t /home/aistudio/external-libraries
# 同时添加如下代码, 这样每次环境(kernel)启动的时候只要运行下方代码即可:
# Also add the following code,
# so that every time the environment (kernel) starts,
# just run the following code:
import sys
sys.path.append('/home/aistudio/external-libraries')
# 请点击[此处](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576)查看本环境基本用法. <br>
# Please click [here ](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576) for more detailed instructions.
# # 准备数据和训练模型
# - 安装PaddleDetection和其使用依赖
# - 解压比赛数据
# - 按照yolo框架需要格式处理数据并切分数据集
# +
# ! git clone https://gitee.com/paddlepaddle/PaddleDetection.git
# 安装其他依赖
# %cd PaddleDetection
# ! pip install paddledet==2.0.1 -i https://mirror.baidu.com/pypi/simple
# 验证是否安装成功
# ! python ppdet/modeling/tests/test_architectures.py
# +
# %cd /home/aistudio/
# ! unzip -qo data/data85130/常规赛:PALM眼底彩照中黄斑中央凹定位.zip
# ! mv 常规赛:PALM眼底彩照中黄斑中央凹定位 dataset
# ! mkdir ~/PaddleDetection/dataset/palm
# ! mkdir ~/PaddleDetection/dataset/palm/annotations
# ! mkdir ~/PaddleDetection/dataset/palm/images
# images
# ! cp dataset/Train/fundus_image/* PaddleDetection/dataset/palm/images
# +
# annotations
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import SubElement
from xml.etree.ElementTree import ElementTree
from xml.dom import minidom
from PIL import Image
def geneXml(imgName, x, y, width, height):
img_name = imgName.split('.')[0]
# generate root node
root = Element('annotation')
# frame
frame = SubElement(root, 'frame')
frame.text = img_name
# object
objects = SubElement(root, 'object')
# object-name
name = SubElement(objects, 'name')
name.text = "1"
# object-bndbox
bndbox = SubElement(objects, 'bndbox')
xmin = SubElement(bndbox, 'xmin')
xmin.text = str(int(x)-20)
ymin = SubElement(bndbox, 'ymin')
ymin.text = str(int(y)-20)
xmax = SubElement(bndbox, 'xmax')
xmax.text = str(int(x)+20)
ymax = SubElement(bndbox, 'ymax')
ymax.text = str(int(y)+20)
# size
size = SubElement(root, 'size')
widths = SubElement(size, 'width')
widths.text = str(width)
heights = SubElement(size, 'height')
heights.text = str(height)
# write out xml data
prettyXml(root, '\t', '\n')
tree = ElementTree(root)
tree.write(f'PaddleDetection/dataset/palm/annotations/{img_name}.xml', encoding='utf-8')
def prettyXml(element, indent='\t', newline='\n', level = 0):
# 判断element是否有子元素
if element:
# 如果element的text没有内容
if element.text == None or element.text.isspace():
element.text = newline + indent * (level + 1)
else:
element.text = newline + indent * (level + 1) + element.text.strip() + newline + indent * (level + 1)
# 此处两行如果把注释去掉,Element的text也会另起一行
#else:
#element.text = newline + indent * (level + 1) + element.text.strip() + newline + indent * level
temp = list(element) # 将elemnt转成list
for subelement in temp:
# 如果不是list的最后一个元素,说明下一个行是同级别元素的起始,缩进应一致
if temp.index(subelement) < (len(temp) - 1):
subelement.tail = newline + indent * (level + 1)
else: # 如果是list的最后一个元素, 说明下一行是母元素的结束,缩进应该少一个
subelement.tail = newline + indent * level
# 对子元素进行递归操作
prettyXml(subelement, indent, newline, level = level + 1)
from tqdm import tqdm
import pandas as pd
data = pd.read_excel('dataset/Train/Fovea_Location_train.xlsx')
for i in tqdm(data.index):
imgName, x, y = data.loc[i, ['imgName', 'Fovea_X', 'Fovea_Y']]
width, height = Image.open(f'dataset/Train/fundus_image/{imgName}').size
geneXml(imgName, x, y, width, height)
# -
import os, random
from tqdm import tqdm
def splitPic():
file_list = os.listdir('PaddleDetection/dataset/palm/images/')
random.shuffle(file_list)
n = len(file_list)
train_list = file_list[:int(n*0.8)]
val_list = file_list[int(n*0.8):]
print(n, len(train_list), len(val_list))
train_txt = open('PaddleDetection/dataset/palm/train.txt', 'w')
for file in tqdm(train_list, desc='Train'):
file_name = file.split('.')[0]
train_txt.write(f'./images/{file_name}.jpg ./annotations/{file_name}.xml\n')
train_txt.close()
val_txt = open('PaddleDetection/dataset/palm/valid.txt', 'w')
for file in tqdm(val_list, desc='Val'):
file_name = file.split('.')[0]
val_txt.write(f'./images/{file_name}.jpg ./annotations/{file_name}.xml\n')
val_txt.close()
splitPic()
with open('/home/aistudio/PaddleDetection/dataset/palm/label_list.txt', 'w') as f:
f.write('1')
# # 训练数据并提交测试数据结果
# - 修改对应配置
# - 训练数据
# - 预测测试数据
# - 按照提交形式处理预测数据
# - 提交结果
#
# **模型构建思路及调优过程**
# - 修改的配置主要是线程数(加快训练速度)和对应的数据集还有训练轮数(只设置了一个较低值50,因为训练时间有限)
# - 在预测数据的时候调整了draw_threshold的值,这样就可以避免很多错误的预测
# - 其实最后预测出来的数据很少,分数也很低,还有的思路就是把图片调白然后训练,因为现在的点实在是不好认出来
# ! cp PaddleDetection/configs/yolov3/yolov3_mobilenet_v1_roadsign.yml PaddleDetection/configs/yolov3/yolov3_palm_voc.yml
# ! cp PaddleDetection/configs/yolov3/_base_/yolov3_reader.yml PaddleDetection/configs/yolov3/_base_/yolov3_palm_reader.yml
# ! cp PaddleDetection/configs/yolov3/_base_/yolov3_mobilenet_v1.yml PaddleDetection/configs/yolov3/_base_/yolov3_palm_mobilenet.yml
# ! cp PaddleDetection/configs/datasets/roadsign_voc.yml PaddleDetection/configs/datasets/palm_voc.yml
# **下面是修改的配置文件**
# ```
# ### yolov3_palm_voc.yml ###
# _BASE_: [
# '../datasets/palm_voc.yml',
# '../runtime.yml',
# '_base_/yolov3_palm_mobilenet.yml',
# '_base_/yolov3_palm_reader.yml',
# ]
# # pretrain_weights: https://paddledet.bj.bcebos.com/models/yolov3_mobilenet_v1_270e_coco.pdparams
# # weights: output/yolov3_mobilenet_v1_roadsign/model_final
# pretrain_weights:: ~/PaddleDetection/output/yolov3_palm_voc/model_final.pdparams
#
# YOLOv3Loss:
# ignore_thresh: 0.7
# label_smooth: true
#
# snapshot_epoch: 2
# epoch: 50
#
# LearningRate:
# base_lr: 0.001
# schedulers:
# - !PiecewiseDecay
# gamma: 0.1
# milestones: [32, 36]
# - !LinearWarmup
# start_factor: 0.3333333333333333
# steps: 100
#
# OptimizerBuilder:
# optimizer:
# momentum: 0.9
# type: Momentum
# regularizer:
# factor: 0.0005
# type: L2
#
#
# ### yolov3_palm_reader.yml ###
# worker_num: 3
# TrainReader:
# inputs_def:
# num_max_boxes: 1
# sample_transforms:
# - Decode: {}
# - Mixup: {alpha: 1.5, beta: 1.5}
# - RandomDistort: {}
# - RandomExpand: {fill_value: [123.675, 116.28, 103.53]}
# - RandomCrop: {}
# - RandomFlip: {}
# batch_transforms:
# - BatchRandomResize: {target_size: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608], random_size: True, random_interp: True, keep_ratio: False}
# - NormalizeBox: {}
# - PadBox: {num_max_boxes: 1}
# - BboxXYXY2XYWH: {}
# - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
# - Permute: {}
# - Gt2YoloTarget: {anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]], anchors: [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]], downsample_ratios: [32, 16, 8]}
# batch_size: 24
# shuffle: true
# drop_last: true
# mixup_epoch: 250
# use_shared_memory: true
#
# EvalReader:
# inputs_def:
# num_max_boxes: 1
# sample_transforms:
# - Decode: {}
# - Resize: {target_size: [608, 608], keep_ratio: False, interp: 2}
# - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
# - Permute: {}
# batch_size: 1
# drop_empty: false
#
# TestReader:
# inputs_def:
# image_shape: [3, 608, 608]
# sample_transforms:
# - Decode: {}
# - Resize: {target_size: [608, 608], keep_ratio: False, interp: 2}
# - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
# - Permute: {}
# batch_size: 1
#
#
# ### yolov3_palm_mobilenet.yml ####
# architecture: YOLOv3
# pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV1_pretrained.pdparams
# norm_type: sync_bn
#
# YOLOv3:
# backbone: MobileNet
# neck: YOLOv3FPN
# yolo_head: YOLOv3Head
# post_process: BBoxPostProcess
#
# MobileNet:
# scale: 1
# feature_maps: [4, 6, 13]
# with_extra_blocks: false
# extra_block_filters: []
#
# # use default config
# # YOLOv3FPN:
#
# YOLOv3Head:
# anchors: [[10, 13], [16, 30], [33, 23],
# [30, 61], [62, 45], [59, 119],
# [116, 90], [156, 198], [373, 326]]
# anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
# loss: YOLOv3Loss
#
# YOLOv3Loss:
# ignore_thresh: 0.7
# downsample: [32, 16, 8]
# label_smooth: false
#
# BBoxPostProcess:
# decode:
# name: YOLOBox
# conf_thresh: 0.005
# downsample_ratio: 32
# clip_bbox: true
# nms:
# name: MultiClassNMS
# keep_top_k: 100
# score_threshold: 0.01
# nms_threshold: 0.45
# nms_top_k: 1000
#
#
#
# ### palm_voc.yml ###
# metric: VOC
# map_type: integral
# num_classes: 1
#
# TrainDataset:
# # !VOCDataSet
# dataset_dir: dataset/palm
# anno_path: train.txt
# label_list: label_list.txt
# data_fields: ['image', 'gt_bbox', 'gt_class', 'difficult']
#
# EvalDataset:
# # !VOCDataSet
# dataset_dir: dataset/palm
# anno_path: valid.txt
# label_list: label_list.txt
# data_fields: ['image', 'gt_bbox', 'gt_class', 'difficult']
#
# TestDataset:
# # !ImageFolder
# anno_path: dataset/palm/label_list.txt
# ```
# %cd /home/aistudio/PaddleDetection/
# ! python tools/train.py -c configs/yolov3/yolov3_palm_voc.yml --eval --use_vdl=True --vdl_log_dir="./output"
# %cd /home/aistudio/PaddleDetection/
# ! python tools/infer.py -c configs/yolov3/yolov3_palm_voc.yml -o weights=output/yolov3_palm_voc/best_model.pdparams --infer_dir=/home/aistudio/dataset/PALM-Testing400-Images --save_txt=True --output_dir=output/yolov3_palm_voc/pred/ --draw_threshold=0.5
from PIL import Image
import os
submit = open('/home/aistudio/dataset/submit.csv', 'w')
submit.write("FileName,Fovea_X,Fovea_Y\n")
for f in os.listdir('/home/aistudio/PaddleDetection/output/yolov3_palm_voc/pred/'):
img_name = f.split('.')[0]
if f.split('.')[1] == 'jpg':
continue
size = os.path.getsize(f'/home/aistudio/PaddleDetection/output/yolov3_palm_voc/pred/{f}')
if size > 0:
pred = open(f'/home/aistudio/PaddleDetection/output/yolov3_palm_voc/pred/{f}')
line = pred.readline()
_, _, xmin, ymin, xmax, ymax = line.strip().strip().split(' ')
x = (float(xmin) + float(ymax)) / 2
y = (float(ymin) + float(ymax)) / 2
pred.close()
else:
img = Image.open(f'/home/aistudio/PaddleDetection/output/yolov3_palm_voc/pred/{img_name}.jpg')
width, height = img.size
x = width / 2
y = height / 2
submit.write(f"{img_name}.jpg,{x},{y}\n")
submit.close()
|
PALM-05.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Create an artificial neural network for the hmda data
# Import packages
import numpy as np
import pandas as pd
import shap
import subprocess
import sys
import keras
from timeit import default_timer as timer
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Dense
from keras.models import Model, Sequential
from keras.layers import Dense
# +
# Set seed and output directory
seed = 12345
np.random.seed(seed)
my_init = keras.initializers.RandomUniform(seed=seed)
out_dir = "ann_output5/"
output_files = False
# -
def ann_model():
""" Create an artificial neural network model """
input = Input(shape=(features,), name='main_input')
out = Dense(20, input_dim=10, activation='relu')(input)
out = Dense(20, input_dim=20, activation='relu')(out)
out = Dense(1, activation='sigmoid')(out)
model = Model(inputs=input, outputs=out)
model.compile(loss='binary_crossentropy', optimizer='adam')
return model
# +
# Load the dataset
xnn_data_dir = '~/article-information-2019/data/xnn_output/'
DATA=pd.read_csv(xnn_data_dir + 'train_transformed.csv')
TEST=pd.read_csv(xnn_data_dir + 'test_transformed.csv')
# +
# Select features and split into target and feature sets
selected_vars = ['term_360', 'conforming']
selected_vars += ['debt_to_income_ratio_missing','loan_amount_std', 'loan_to_value_ratio_std']
selected_vars += ['no_intro_rate_period_std', 'intro_rate_period_std']
selected_vars += ['property_value_std', 'income_std', 'debt_to_income_ratio_std']
target_var = "high_priced"
# +
# Split the data into feature and target values
X=DATA[selected_vars].values
Y=DATA[target_var].values
TEST_X = TEST[selected_vars].values
TEST_Y = TEST[target_var].values
features = X.shape[1]
inputs = {'main_input': X}
# -
#Fit model
model = ann_model()
model.fit(inputs, Y, epochs=5000, batch_size=1024, validation_split=0, verbose=1)
# Find the predictions and Shapley values on the test set.
bg_samples = 1000
background = DATA[selected_vars].iloc[np.random.choice(DATA[selected_vars].shape[0], bg_samples, replace=False)]
explainer = shap.DeepExplainer(model, background)
shap_values = explainer.shap_values(TEST_X)
preds = model.predict(TEST_X)
preds = np.concatenate((preds, shap_values[0], preds), axis=1)
preds[:, -1] = explainer.expected_value
# +
# Add the Shapley values and predictions to the dataset
TEST = pd.DataFrame(pd.concat([TEST, pd.DataFrame(preds)], axis=1))
Feature_names = selected_vars.copy()
TEST = TEST.rename(columns={0: "probability",
1: Feature_names[0]+"_Shapley_score",
2: Feature_names[1]+"_Shapley_score",
3: Feature_names[2]+"_Shapley_score",
4: Feature_names[3]+"_Shapley_score",
5: Feature_names[4]+"_Shapley_score",
6: Feature_names[5]+"_Shapley_score",
7: Feature_names[6]+"_Shapley_score",
8: Feature_names[7]+"_Shapley_score",
9: Feature_names[8]+"_Shapley_score",
10: Feature_names[9]+"_Shapley_score",
11: "Intercept_Shapley_score"})
# -
# Save results
if output_files:
TEST.to_csv(out_dir + "hmda_ann_results_with_Shapley.csv" , index=False)
|
notebooks/hmda_ann.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Below is a table schema for a P2P messaging application.
# The table contains send/receive message data for the application's users, and has the following schema:
# time_stamp (epoch seconds), sender_id (# id of the message sender), receiver_id (# id of the message receiver)
# Question: What fraction of active users communicated with at least 2 unique people on 2019-02-26 GMT?
import pandas as pd
import datetime
# mock up data: expect this to return 123456 and 7890ab
s = """ts,sender,receiver
1540071583,'123456','7890ab'
1551156983,'7890ab','123456'
1551156988,'123456','abcdef'
1551156998,'123456','abcdef'
1551157098,'123456','7890ab'
1551157983,'7890ab','123456'
1551157999,'7890ab','abcdef'
1551157999,'abcdef','123456'
"""
df = pd.read_csv(pd.compat.StringIO(s))
df['ts'] = pd.to_datetime(df['ts'], unit='s')
df = df[df['ts']>=pd.Timestamp(2019, 2, 26)]
agg = df.groupby('sender').agg({'receiver':'nunique'}).reset_index()
agg[agg['receiver']>=2]['sender']
# -
|
interviewq_exercises/q004_pandas_p2p_msg_senders.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Use Tensorflow to recognize hand-written digits with `ibm-watson-machine-learning`
#
# This notebook facilitates Tensorflow and Watson Machine Learning service. It contains steps and code to work with [ibm-watson-machine-learning](https://pypi.python.org/pypi/ibm-watson-machine-learning) library available in PyPI repository. It also introduces commands for getting model and training data, persisting model, deploying model and scoring it.
#
# Some familiarity with Python is helpful. This notebook uses Python 3.7.
# ## Learning goals
#
# The learning goals of this notebook are:
#
# - Download an externally trained Tensorflow model with dataset.
# - Persist an external model in Watson Machine Learning repository.
# - Deploy model for online scoring using client library.
# - Score sample records using client library.
#
#
# ## Contents
#
# This notebook contains the following parts:
#
# 1. [Setup](#setup)
# 2. [Download externally created Tensorflow model and data](#download)
# 3. [Persist externally created Tensforflow model](#persistence)
# 4. [Deploy and score](#scoring)
# 5. [Clean up](#cleanup)
# 6. [Summary and next steps](#summary)
# <a id="setup"></a>
# ## 1. Set up the environment
#
# Before you use the sample code in this notebook, you must perform the following setup tasks:
#
# - Contact with your Cloud Pack for Data administrator and ask him for your account credentials
# ### Connection to WML
#
# Authenticate the Watson Machine Learning service on IBM Cloud Pack for Data. You need to provide platform `url`, your `username` and `password`.
username = 'PASTE YOUR USERNAME HERE'
password = '<PASSWORD>'
url = 'PASTE THE PLATFORM URL HERE'
wml_credentials = {
"username": username,
"password": password,
"url": url,
"instance_id": 'openshift',
"version": '3.5'
}
# ### Install and import the `ibm-watson-machine-learning` package
# **Note:** `ibm-watson-machine-learning` documentation can be found <a href="http://ibm-wml-api-pyclient.mybluemix.net/" target="_blank" rel="noopener no referrer">here</a>.
# !pip install -U ibm-watson-machine-learning
# +
from ibm_watson_machine_learning import APIClient
client = APIClient(wml_credentials)
# -
# ### Working with spaces
#
# First of all, you need to create a space that will be used for your work. If you do not have space already created, you can use `{PLATFORM_URL}/ml-runtime/spaces?context=icp4data` to create one.
#
# - Click New Deployment Space
# - Create an empty space
# - Go to space `Settings` tab
# - Copy `space_id` and paste it below
#
# **Tip**: You can also use SDK to prepare the space for your work. More information can be found [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Space%20management.ipynb).
#
# **Action**: Assign space ID below
space_id = 'PASTE YOUR SPACE ID HERE'
# You can use `list` method to print all existing spaces.
client.spaces.list(limit=10)
# To be able to interact with all resources available in Watson Machine Learning, you need to set **space** which you will be using.
client.set.default_space(space_id)
# + [markdown] pycharm={"name": "#%% md\n"}
# <a id="download"></a>
# ## 2. Download externally created Tensorflow model and data
# In this section, you will download externally created Tensorflow models and data used for training it.
# + pycharm={"is_executing": false, "name": "#%%\n"}
import os
import wget
data_dir = 'MNIST_DATA'
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
model_path = os.path.join(data_dir, 'mnist-tf-hpo-saved-model.tar.gz')
if not os.path.isfile(model_path):
wget.download("https://github.com/IBM/watson-machine-learning-samples/raw/master/cpd3.5/models/tensorflow/mnist/mnist-tf-hpo-saved-model.tar.gz", out=data_dir)
# +
data_dir = 'MNIST_DATA'
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
filename = os.path.join(data_dir, 'mnist.npz')
if not os.path.isfile(filename):
wget.download('https://s3.amazonaws.com/img-datasets/mnist.npz', out=data_dir)
# + pycharm={"is_executing": false, "name": "#%%\n"}
import numpy as np
dataset = np.load(filename)
x_test = dataset['x_test']
# -
# <a id="persistence"></a>
# ## 3. Persist externally created Tensorflow model
# In this section, you will learn how to store your model in Watson Machine Learning repository by using the IBM Watson Machine Learning SDK.
# ### 3.1: Publish model
# #### Publish model in Watson Machine Learning repository on Cloud.
# Define model name, autor name and email.
# + pycharm={"is_executing": false, "name": "#%%\n"}
sofware_spec_uid = client.software_specifications.get_id_by_name("tensorflow_2.1-py3.7")
# + pycharm={"is_executing": false, "name": "#%%\n"}
metadata = {
client.repository.ModelMetaNames.NAME: 'External Tensorflow model',
client.repository.ModelMetaNames.TYPE: 'tensorflow_2.1',
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
}
published_model = client.repository.store_model(
model=model_path,
meta_props=metadata)
# -
# ### 3.2: Get model details
# + pycharm={"is_executing": false, "name": "#%%\n"}
import json
published_model_uid = client.repository.get_model_uid(published_model)
model_details = client.repository.get_details(published_model_uid)
print(json.dumps(model_details, indent=2))
# -
# ### 3.3 Get all models
# + pycharm={"is_executing": false, "name": "#%%\n"}
models_details = client.repository.list_models()
# -
# <a id="scoring"></a>
# ## 4. Deploy and score
# In this section you will learn how to create online scoring and to score a new data record by using the IBM Watson Machine Learning SDK.
# ### 4.1: Create model deployment
# #### Create online deployment for published model
# + pycharm={"is_executing": false, "name": "#%%\n"}
metadata = {
client.deployments.ConfigurationMetaNames.NAME: "Deployment of external Tensorflow model",
client.deployments.ConfigurationMetaNames.ONLINE: {}
}
created_deployment = client.deployments.create(published_model_uid, meta_props=metadata)
# -
# **Note**: Here we use deployment url saved in published_model object. In next section, we show how to retrive deployment url from Watson Machine Learning instance.
deployment_uid = client.deployments.get_uid(created_deployment)
# Now you can print an online scoring endpoint.
# + pycharm={"is_executing": false, "name": "#%%\n"}
scoring_endpoint = client.deployments.get_scoring_href(created_deployment)
print(scoring_endpoint)
# -
# You can also list existing deployments.
client.deployments.list()
# ### 4.2: Get deployment details
# + pycharm={"is_executing": false, "name": "#%%\n"}
client.deployments.get_details(deployment_uid)
# -
# ### 4.3: Score
# You can use below method to do test scoring request against deployed model.
# Let's first visualize two samples from dataset, we'll use for scoring.
# %matplotlib inline
import matplotlib.pyplot as plt
for i, image in enumerate([x_test[0], x_test[1]]):
plt.subplot(2, 2, i + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
# Prepare scoring payload with records to score.
score_0 = x_test[0].flatten().tolist()
score_1 = x_test[1].flatten().tolist()
# + pycharm={"is_executing": false, "name": "#%%\n"}
scoring_payload = {"input_data": [{"values": [score_0, score_1]}]}
# -
# Use ``client.deployments.score()`` method to run scoring.
# + pycharm={"is_executing": false, "name": "#%%\n"}
predictions = client.deployments.score(deployment_uid, scoring_payload)
# -
# Let's print the result of predictions.
# + pycharm={"is_executing": false, "name": "#%%\n"}
import json
print(json.dumps(predictions, indent=2))
# -
# As you can see, predicted values are the same one as displayed above from test dataset.
# <a id="cleanup"></a>
# ## 5. Clean up
# If you want to clean up all created assets:
# - experiments
# - trainings
# - pipelines
# - model definitions
# - models
# - functions
# - deployments
#
# please follow up this sample [notebook](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Machine%20Learning%20artifacts%20management.ipynb).
# <a id="summary"></a>
# ## 6. Summary and next steps
# You successfully completed this notebook! You learned how to use Pytorch machine learning library as well as Watson Machine Learning for model creation and deployment.
#
# Check out our [Online Documentation](https://dataplatform.cloud.ibm.com/docs/content/analyze-data/wml-setup.html) for more samples, tutorials, documentation, how-tos, and blog posts.
# ### Authors
#
# **<NAME>**, Software Engineer
# Copyright © 2020 IBM. This notebook and its source code are released under the terms of the MIT License.
|
cpd3.5/notebooks/python_sdk/deployments/tensorflow/Use Tensorflow to recognize hand-written digits.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # COVID-19 growth analysis
#
# (c) 2020, [<NAME>](https://twitter.com/twiecki)<br>
# Adapted for South Africa by [<NAME>](https://twitter.com/AltadeWaal)
#
# This notebook gets up-to-date data from the [Coronavirus COVID-19 (2019-nCoV) Data Repository for South Africa [Hosted by DSFSI group at University of Pretoria]](https://github.com/dsfsi/covid19za) and recreates the (pay-walled) plot in the [Financial Times]( https://www.ft.com/content/a26fbf7e-48f8-11ea-aeb3-955839e06441).
# +
# %matplotlib inline
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import seaborn as sns
import requests
import io
sns.set_context('talk')
plt.style.use('seaborn-whitegrid')
# -
# ## Load data
# +
def load_timeseries(name):
#base_url='https://github.com/dsfsi/covid19za/blob/master/data'
#url = f'{base_url}/covid19za_timeline_{name}.csv'
#csv = requests.get(url).text
#print(csv)
df = pd.read_csv(name)
print(df.head())
df = (df.set_index('date'))
df.index = pd.to_datetime(df.index,dayfirst=True)
return df
df = load_timeseries('../data/covid19za_timeline_confirmed.csv')
# -
df.head()
def plot_confirmed(provinces, min_cases=100, ls='-'):
for province in provinces:
df1 = df.loc[(df.province == province)].groupby(['date']).agg({'country': ['count']})
df1.columns = ['new cases']
df1['cummulative'] = df1['new cases'].cumsum()
(df1.reset_index()['cummulative']
.plot(label=province, ls=ls))
print('\n' + province +":")
print(df1)
# +
sns.set_palette(sns.hls_palette(8, l=.45, s=.8)) # 8 countries max
fig, ax = plt.subplots(figsize=(12, 8))
provinces = ['GP', 'WC', 'KZN','LP', 'MP', 'EC','FS', 'NC', 'NW']
plot_confirmed(provinces, min_cases=1, ls='-')
#x = np.linspace(0, plt.xlim()[1])
#plt.plot(x,x+(1.33), ls='--', color='k', label='33% daily growth')
#plt.yscale('log');
plt.title('Growth of COVID-19 across provinces in South Africa' + '\n' +'Data up to {}'.format(df.index.max().strftime('%B %d, %Y')))
plt.xlabel('Days from first confirmed case')
plt.ylabel('Confirmed cases')
ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xticks(range(0,int(plt.xlim()[1])+1))
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='upper center',bbox_to_anchor=(1.15,1))
#sns.despine()
text = plt.text(0.0015, -0.2,'Based on Coronavirus COVID-19 (2019-nCoV) Data Repository for South Africa \nHosted by DSFSI group at University of Pretoria\nDr. <NAME>',
transform=ax.transAxes, fontsize=14)
plt.savefig("../visualisation/per_province_growth_first_report.png",
bbox_inches='tight',
transparent=True,
pad_inches=0, dpi = 200)
# -
|
notebooks/covid19_growth_for_sa.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: lapalma-earthquakes
# language: python
# name: lapalma-earthquakes
# ---
# + [markdown] iooxa
# ## Visualization of La Palma Earthquake Data
#
# Using the subset of IGN data specific to La Palma. This subset was prepared using `screening.ipynb` and saved to `lapalma.csv`.
# + iooxa
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set_theme(style="whitegrid")
# + iooxa
df = pd.read_csv('./lapalma.csv')
df.head()
# + iooxa={"id": {"block": "TpHbIg1ifrpGJVOvPIyW", "project": "1Bk7uPlcMuaTyKEshESj", "version": 1}, "outputId": {"block": "HNZJx0ZNjCJx2fJQg49O", "project": "1Bk7uPlcMuaTyKEshESj", "version": 1}}
df.describe()
# + iooxa={"id": {"block": "C2Vtka9NXOA1HTeqNA1J", "project": "1Bk7uPlcMuaTyKEshESj", "version": 1}, "outputId": null}
df['Depth'] = 'Crustal (<20km)'
df.loc[df['Depth(km)'] >= 20, 'Depth'] = 'Mantle (>20km)'
# + iooxa tags=[]
|
notebooks/interactive.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
datadf = pd.read_csv('data/ranked_number_of_patches.csv')
datadf
# len(datadf['distractorc
# +
datadf['distractorcatlist'] = datadf['distractorcatlist'].str.split(', ', expand=True).stack().unique().tolist()
datadf['distractorcatlist']
# datadf['distractorcatlistlent'] = datadf['distractorcatlist'].str.replace('[','', regex=False).replace(']','', regex=False).replace('\'','', regex=False).replace(', ',',', regex=False).str.split(",")
# datadf['distractorcatlistlent'].reset_index()['distractorcatlistlent'].unique()
# # print( type(datadf['distractorcatlistlent'].astype(str).unique() ))
# datadf['distractorcatlistlent'].apply(sorted).transform(tuple).unique()
# datadf['distractorcatlistlent'] = datadf['distractorcatlist'].unique()
# print(datadf['distractorcatlistlent'].unique())
# datadf.distractorcatlistlent.unique()
# datadf['distractorcatlistlenLENGTH'] = datadf['distractorcatlistlent'].str.len()
# datadf['distractorcatlistlenLENGTH']
# datadf['distractorcatlistlenLENGTH'] = datadf['distractorcatlistlent'].str.len()
# datadf['distractorcatlistlent']
# datadf.to_csv('data/test---.csv')
# datadf['distractorcatlistlent'] = datadf['distractorcatlistlent']
# Tarray = datadf['distractorcatlist'].apply(np.array)
# list1 = Tarray.tolist()
# joined_string = ",".join(list1)
# datadf['test'] = joined_string
# datadf['test']
# # # Using for loop
# # for i in list1:
# # print(i)
# # datadf['distractorcatlist'] = np.unique(datadf['distractorcatlist'].apply(np.array))
# # len(datadf['distractorcatlist'].apply(np.array))
# -
|
notebooks/Untitled8.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: VPython
# language: python
# name: vpython
# ---
# # The perihelion motion of Mercury - Base solution
# This Notebook contains a template for the simulation of Mercuries orbit.
#
# It sets up the graphics display and defines basic parameters like the position and
# velocity of Mercury. It also provides a basic structure for the program to get started
# quickly.
#
# The run parameters are given just before the while loop at the end. They can be
# changed freely but the values given have proven to yield good results.
# ## ToDo list
# The tasks which needs to be done are:
# 1. Complete the function `evolve_mercury`
# 2. Create the graphical objects
# 3. Finalize the loop which updates the position of mercury
# ## Importing VPython
from vpython import *
# ## Defining parameters and functions
# The following parameter values are computed using https://nssdc.gsfc.nasa.gov/planetary/factsheet
rM0 = 4.60 # Initial radius of Mercury orbit, in units of R0
vM0 = 5.10e-1 # Initial orbital speed of Mercury, in units of R0/T0
c_a = 9.90e-1 # Base acceleration of Mercury, in units of R0**3/T0**2
rS = 2.95e-7 # Schwarzschild radius of Sun,in units of R0
rL2 = 8.19e-7 # Specific angular momentum, in units of R0**2
# Because we want to visualize the orbit of Mercury, we need to work with vectors. The initial position and velocity vectors of mercury are thus given by
vec_rM0 = vector(0, rM0, 0) # Initial position vector of Mercury
vec_vM0 = vector(vM0, 0, 0) # Initial velocity vector of Mercury
# Finally, before we start the simmulation, we have to specify how long it should run, how big the time steps are and which parameters we want to use for the forces.
dt = 2. * vM0 / c_a / 20 # Time step
alpha = 1.e6 # Strength of 1/r**3 term
beta = 0.0 # Strength of 1/r**4 term
time = 0 # Current simulation time
max_time = 1000*dt # Maximum simulation time
# ## Task 1: Update Mercury
# Calculate new position and velocity of Mercury here in the following function.
# You must specify:
#
# `vec_rM_new = ??`
#
# `vec_vM_new = ??`
#
def evolve_mercury(vec_rM_old, vec_vM_old, alpha, beta):
"""
Advance Mercury in time by one step of length dt.
Arguments:
- vec_rM_old: old position vector of Mercury
- vec_vM_old: old velocity vector of Mercury
- alpha: strength of 1/r**3 term in force
- beta: strength of 1/r**4 term in force
Returns:
- vec_rM_new: new position vector of Mercury
- vec_vM_new: new velocity vector of Mercury
"""
### TODO
# - Calculate new position and velocity of Mercury here.
# vec_rM_new = ??
# vec_vM_new = ??
return vec_rM_new, vec_vM_new
# # Task 2 and 3: Visualization
# 2. Next, you have to create the graphical objects, specify their initial positions and add a visible trajectoy to Mercury.
# 3. Once this is done, complete the loop by updating Mercury and drawing the trajectory.
# +
# Specify how the output should look like
scene = canvas() # Create a new scene: this displays the scene below this cell
scene.userzoom = False # No zoom allowed (for smooth scrolling in notebook)
scene.width = 1024 # Width of visualization in pixel
scene.height = 1024 # Height of visualization in pixel
scene.background = color.white # Background color ...
scene.center = vector(0, -2, 0) # ... and shifted center
# Define graphical objects; M = Mercury, S = Sun ...
S = sphere(pos=vector(0, 0, 0), radius=1.5, color=color.yellow)
### TODO
# - Define graphical Mercury objects that you want to display.
# - Give Mercury initial positions and velocities.
# - Enable drawing of trajectory of Mercury by using the 'curve' object
# run the simulation for a given time and draw trajectory
while time < max_time:
# set the frame rate: shows four earth days at once
rate(100)
### TODO
# - Append position to trajectory.
# - Update position and velocity of Mercury (see function evolve_mercury).
|
ipynb-scripts/template.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.models import Model, load_model
from keras.layers import *
from keras import backend as K
from keras import optimizers, callbacks, regularizers
import numpy as np
import pandas as pd
import cv2, h5py
BATCH_SIZE = 128
INPUT_SIZE = (299,299)
print("Batch size:", BATCH_SIZE)
# +
train_datagen = ImageDataGenerator(rescale=1./255, horizontal_flip=True)
train_generator = train_a_datagen.flow_from_directory('data2/train',
target_size=INPUT_SIZE,
batch_size=BATCH_SIZE)
train_aug_datagen = ImageDataGenerator(rotation_range=3,
width_shift_range=0.1,
height_shift_range=0.1,
rescale=1./255,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
train_aug_generator = train_b_datagen.flow_from_directory('data2/train',
target_size=INPUT_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical')
train_maxaug_datagen = ImageDataGenerator(rotation_range=3,
width_shift_range=0.1,
height_shift_range=0.1,
rescale=1./255,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
train_maxaug_generator = train_b_datagen.flow_from_directory('train_aug',
target_size=INPUT_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical')
# -
validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_directory(
'data2/validation',
target_size=INPUT_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical')
# **Model**
# +
from keras.applications.xception import Xception
input_tensor = Input(shape=(INPUT_SIZE[0], INPUT_SIZE[1], 3)) # input image
print("Building base model for Xception...")
xception_base = Xception(input_tensor=input_tensor, weights='imagenet', include_top=False)
features_xception = GlobalAveragePooling2D()(xception_base.output)
print("Done!")
# +
x = Dense(1024, activation='relu', kernel_regularizer=regularizers.l2(0.00))(features_xception)
#x = Dropout(0.3)(x)
#x = BatchNormalization()(x)
predictions = Dense(128, activation='softmax')(x)
model = Model(inputs=input_tensor, outputs=predictions)
# +
def set_trainable(boolean):
global xception_base, inceptionresnet_base, inception_base
for layer in xception_base.layers[:46]:
layer.trainable = False
for layer in xception_base.layers[46:]:
layer.trainable = boolean
set_trainable(False) # default
# -
# **Training**
tensorboard = callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=16,
write_grads=True , write_graph=True)
checkpoints = callbacks.ModelCheckpoint("inceptionresnet-{val_loss:.3f}-{val_acc:.3f}.h5",
monitor='val_loss', verbose=1, save_best_only=True,
save_weights_only=False, mode='auto', period=0)
reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=6, verbose=1,
mode='auto', min_delta=0.0001, cooldown=0, min_lr=0)
adadelta = optimizers.Adadelta(lr=2.0, rho=0.95, epsilon=None, decay=0)
sgd = optimizers.SGD(lr=0.1, momentum=0.6, decay=0.2, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=adadelta,
metrics=['acc'])
model.summary()
# !nvidia-settings -a [gpu:0]/GPUFanControlState=1
# !nvidia-settings -a [fan:0]/GPUTargetFanSpeed=90
# !rm -R logs
print("Training Progress:")
model_log = model.fit_generator(train_a_generator, validation_data=validation_generator,
epochs=1, workers=5, use_multiprocessing=True,
callbacks=[checkpoints])
model_log = model.fit_generator(train_b_generator, validation_data=validation_generator,
epochs=1, workers=5, use_multiprocessing=True,
callbacks=[checkpoints])
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['acc'])
print("Training Progress:")
model_log = model.fit_generator(train_b_generator, validation_data=validation_generator,
epochs=2, workers=5, use_multiprocessing=True,
callbacks=[checkpoints])
model_log = model.fit_generator(train_a_generator, validation_data=validation_generator,
epochs=2, workers=5, use_multiprocessing=True,
callbacks=[checkpoints])
# !nvidia-settings -a [gpu:0]/GPUFanControlState=0
# **Fine-tuning**
# +
BATCH_SIZE = 44
train_datagen = ImageDataGenerator(rescale=1./255, horizontal_flip=True)
train_generator = train_a_datagen.flow_from_directory('data2/train',
target_size=INPUT_SIZE,
batch_size=BATCH_SIZE)
train_aug_datagen = ImageDataGenerator(rotation_range=3,
width_shift_range=0.1,
height_shift_range=0.1,
rescale=1./255,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
train_aug_generator = train_b_datagen.flow_from_directory('data2/train',
target_size=INPUT_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical')
train_maxaug_datagen = ImageDataGenerator(rotation_range=3,
width_shift_range=0.1,
height_shift_range=0.1,
rescale=1./255,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
train_maxaug_generator = train_b_datagen.flow_from_directory('train_aug',
target_size=INPUT_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical')
validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_directory(
'data2/validation',
target_size=INPUT_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical')
# -
# !nvidia-settings -a [gpu:0]/GPUFanControlState=1
# !nvidia-settings -a [fan:0]/GPUTargetFanSpeed=90
sgd_low = optimizers.SGD(lr=0.1, momentum=0.3, decay=0.3, nesterov=False)
options = [[True, train_a_generator, sgd_low], [True, train_b_generator, sgd_low], [True, train_a_generator, sgd_low]]
for option in options:
set_trainable(option[0])
model.compile(optimizer=option[2], loss='categorical_crossentropy',metrics=['acc'])
print("Training Progress for", option,":")
model_log = model.fit_generator(option[1], validation_data=validation_generator,
epochs=4, workers=5, use_multiprocessing=True,
callbacks=[checkpoints])
# !nvidia-settings -a [gpu:0]/GPUFanControlState=0
# **Evaluation**
#
# TODO: Fix class mapping
"""from keras.models import load_model
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
%config InlineBackend.figure_format = 'retina'
import itertools, pickle
from glob import glob
class_names = glob("train_aug/*") # Reads all the folders in which images are present
class_names = sorted(class_names) # Sorting them
fixed_classes = []
for class_name in class_names:
fixed_classes.append(class_name[10:])
name_id_map = dict(zip(range(len(class_names)), fixed_classes))
og_classes = [str(x) for x in range(1,129)]
"""validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_directory(
'data2/validation', shuffle=False,
target_size=(299, 299),
batch_size=batch_size,
class_mode='categorical')
"""Y_pred = model.predict_generator(validation_generator, 6322 // batch_size+1)
y_pred = np.argmax(Y_pred, axis=1)
"""corr_preds = []
for pred in y_pred:
corr_preds.append(int(name_id_map[pred]))
"""print('Classification Report')
print(classification_report(validation_generator.classes, y_pred, target_names=og_classes))
|
furniture/model-xception.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 圧縮と解凍をサクッとやる
#
import zlib
import gzip
import bz2
# +
# 短い文章データの圧縮
s = b'witch which has which witches wrist watch'
print("圧縮前: " + str(len(s)))
t = zlib.compress(s)
# %time print("zlib 圧縮後: " + str(len(t)) + ", 圧縮率: " + str(round((len(s) - len(t))/len(s)*100)) + "%")
t = gzip.compress(s)
# %time print("gzip 圧縮後: " + str(len(t)) + ", 圧縮率: " + str(round((len(s) - len(t))/len(s)*100)) + "%")
t = bz2.compress(s)
# %time print("bz2 圧縮後: " + str(len(t)) + ", 圧縮率: " + str(round((len(s) - len(t))/len(s)*100)) + "%")
# +
# 長い文章データの圧縮
s = open('wagahai.txt', 'rb').read()
print("圧縮前: " + str(len(s)))
t = zlib.compress(s)
# %time print("zlib 圧縮後: " + str(len(t)) + ", 圧縮率: " + str(round((len(s) - len(t))/len(s)*100)) + "%")
t = gzip.compress(s)
# %time print("gzip 圧縮後: " + str(len(t)) + ", 圧縮率: " + str(round((len(s) - len(t))/len(s)*100)) + "%")
t = bz2.compress(s)
# %time print("bz2 圧縮後: " + str(len(t)) + ", 圧縮率: " + str(round((len(s) - len(t))/len(s)*100)) + "%")
# +
# 画像データの圧縮
from PIL import Image
import matplotlib.pyplot as plt
img = Image.open('lena_std.png')
plt.imshow(img)
plt.show()
s = open("lena_std.png", "rb").read()
print("圧縮前: " + str(len(s)))
t = zlib.compress(s)
# %time print("zlib 圧縮後: " + str(len(t)) + ", 圧縮率: " + str(round((len(s) - len(t))/len(s)*100)) + "%")
t = gzip.compress(s)
# %time print("gzip 圧縮後: " + str(len(t)) + ", 圧縮率: " + str(round((len(s) - len(t))/len(s)*100)) + "%")
t = bz2.compress(s)
# %time print("bz2 圧縮後: " + str(len(t)) + ", 圧縮率: " + str(round((len(s) - len(t))/len(s)*100)) + "%")
# +
# 音声データの圧縮
import wave
import pyaudio
CHUNK = 1024
wf = wave.open('J.S.Bach-G-air.wav')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(CHUNK)
while len(data)>0:
stream.write(data)
data = wf.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
s = open("J.S.Bach-G-air.wav", "rb").read()
print("圧縮前: " + str(len(s)))
t = zlib.compress(s)
# %time print("zlib 圧縮後: " + str(len(t)) + ", 圧縮率: " + str(round((len(s) - len(t))/len(s)*100)) + "%")
t = gzip.compress(s)
# %time print("gzip 圧縮後: " + str(len(t)) + ", 圧縮率: " + str(round((len(s) - len(t))/len(s)*100)) + "%")
t = bz2.compress(s)
# %time print("bz2 圧縮後: " + str(len(t)) + ", 圧縮率: " + str(round((len(s) - len(t))/len(s)*100)) + "%")
# +
wf.
# -
|
20180209_zip/compress.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Adding the Aima-Python folder to the folder
import sys
import random
from random import choice
sys.path.insert(1, 'D:/Masters/Knowledge Representation/workplace/aima-python')
# -
from agents import *
from search import *
# ##### Creation of things in the environment
# +
class LifeJacket(Thing):
# This will provide the life jacket to the Player and will remove all enery down points
pass
class PowerUp(Thing):
# Increases the performance of the agent by + 1
pass
class PowerDown(Thing):
# Decreases the performance of the agent by - 1
pass
class EnergyUp(Thing):
# Increases the performance of the agent by + 4
pass
class EnergyDown(Thing):
# Decreases the performance of the agent by - 4
pass
class WinningRow(Thing):
# Perfromance change in case of winning row
pass
class Barrier(Thing):
# This will not allow the Player to move ahead, and force to find alternative way
pass
class PitFall(Thing):
# This will kill the Player
pass
# -
# #### Defining the Program method
# The program method works on the percepts and return the appropriate action to be used by execute_action
def program(percepts):
'''Returns an action based on it's percepts'''
#print("Dheeraj" , percepts) # Contains list of things that are curently faced by agent in environment for tabledriven
for values_percepted in percepts:
if values_percepted == 'goalsearch':
return 'searchproblem'
if isinstance(values_percepted, PowerUp):
return 'powerup'
elif isinstance(values_percepted, PowerDown):
return 'powerdown'
elif isinstance(values_percepted, EnergyUp):
return 'energyup'
elif isinstance(values_percepted, EnergyDown):
return 'energydown'
elif isinstance(values_percepted, PitFall):
return 'pitfall'
elif isinstance(values_percepted, LifeJacket):
return 'grab'
if isinstance(values_percepted, Barrier):
return 'barrier'
if isinstance(values_percepted,WinningRow):
return 'lastlevel'
if isinstance(values_percepted,Bump):
# then check if you are at an edge and have to turn
turn = False
choice = random.choice((1,2));
else:
choice = random.choice((1,2,3,4,5,6,7)) # 1-right, 2-left, others-forward
if choice == 1:
return 'turnright'
elif choice == 2:
return 'turnleft'
else:
return 'moveforward'
# #### This creates the 2d Environment which contains the percepts, execute_action and the is_done method
class RaceFieldEnvironment(GraphicEnvironment):
def percept(self, agent):
'''return a list of things that are in our agent's location'''
#print(self.things_near(agent.location,2))
things = self.list_things_at(agent.location)
loc = copy.deepcopy(agent.location) # find out the target location
#Check if agent is about to bump into a wall
if agent.direction.direction == Direction.R:
loc[0] += 1
elif agent.direction.direction == Direction.L:
loc[0] -= 1
elif agent.direction.direction == Direction.D:
loc[1] += 1
elif agent.direction.direction == Direction.U:
loc[1] -= 1
if agent.goalsearch == True:
goalsearch = ['goalsearch']
return goalsearch
if agent.location[1] == 0 :
things.append(WinningRow()) # Adding the thing to know if the target row is reached
elif not self.is_inbounds(loc):
things.append(Bump())
return things
def execute_action(self, agent, action):
'''changes the state of the environment based on what the agent does.'''
if action == 'turnright':
print('{} decided to {} at location: {}'.format(str(agent)[1:-1], action, agent.location))
agent.turn(Direction.R)
elif action == 'turnleft':
print('{} decided to {} at location: {}'.format(str(agent)[1:-1], action, agent.location))
agent.turn(Direction.L)
elif action == 'moveforward':
if isinstance(agent,PlayerA):
print('{} decided to move {}wards at location: {}'.format(str(agent)[1:-1], agent.direction.direction, agent.location))
agent.moveforward()
elif isinstance(agent,PlayerB):
"""
The Agent will see all the things near to him in the environment.
If there is an Energy Down in the next near cell(not Adjacent), it will change the path
"""
near_things = self.things_near(agent.location,2)
if 5 < agent.lives <= agent.max_lives and any(isinstance(element[0],PitFall) and element[1] == 3 for element in near_things):
"""
The Agent will see all the things near to him in the environment.
If there is an Pitfall in the next near cell(not Adjacent), it will Jump the pitfall
"""
for element in near_things:
if isinstance(element[0],PitFall):
if element[1] == 3: # This will check if only the near cells(not adjacent) have energy drop
current_location = agent.location
forward_check = [current_location[0],current_location[1]-1]
backward_check = [current_location[0],current_location[1]+1]
left_check = [current_location[0]-1,current_location[1]]
right_check = [current_location[0]+1,current_location[1]]
# The following conditions check in which cell the energy down is and change direction accordingly
if any(isinstance(element,PitFall) for element in self.list_things_at(forward_check)):
print('Found Pitfall ahead, {} decided to Jump at location: {}'.format(str(agent)[1:-1] , agent.location))
agent.location = forward_check
agent.direction.direction = Direction.U
agent.lives = 5
elif any(isinstance(element,PitFall) for element in self.list_things_at(left_check)):
print('Found Pitfall Leftwards, {} decided to Jump at location: {}'.format(str(agent)[1:-1] , agent.location))
agent.location = left_check
agent.direction.direction = Direction.L
agent.lives = 5
elif any(isinstance(element,PitFall) for element in self.list_things_at(right_check)):
print('Found Pitfall Rightwards, {} decided to Jump at location: {}'.format(str(agent)[1:-1] , agent.location))
agent.location = right_check
agent.direction.direction = Direction.R
agent.lives = 5
break #In case the energy down is found exit the loop
elif agent.lives < agent.max_lives and any(isinstance(element[0],EnergyDown) and element[1] == 3 for element in near_things):
# This will check if the near element has energy down
# if any(isinstance(element[0],EnergyDown) and element[1] == 3 for element in near_things) :
for element in near_things:
if isinstance(element[0],EnergyDown):
if element[1] == 3: # This will check if only the near cells(not adjacent) have energy drop
current_location = agent.location
forward_check = [current_location[0],current_location[1]-1]
backward_check = [current_location[0],current_location[1]+1]
left_check = [current_location[0]-1,current_location[1]]
right_check = [current_location[0]+1,current_location[1]]
# The following conditions check in which cell the energy down is and change direction accordingly
if any(isinstance(element,EnergyDown) for element in self.list_things_at(forward_check)):
agent.direction.direction = Direction.R
point = 'ahead'
#print('Found Energy Drop ahead, {} decided to move {}wards at location: {}'.format(str(agent)[1:-1] ,agent.direction.direction, agent.location))
elif any(isinstance(element,EnergyDown) for element in self.list_things_at(backward_check)):
agent.direction.direction = Direction.U
point = 'behind'
#print('Found Energy Drop behind, {} decided to move {}wards at location: {}'.format(str(agent)[1:-1] ,agent.direction.direction, agent.location))
elif any(isinstance(element,EnergyDown) for element in self.list_things_at(left_check)):
agent.direction.direction = Direction.U
point = 'Left'
#print('Found Energy Drop on Left Side, {} decided to move {}wards at location: {}'.format(str(agent)[1:-1] ,agent.direction.direction, agent.location))
elif any(isinstance(element,EnergyDown) for element in self.list_things_at(right_check)):
agent.direction.direction = Direction.U
point = 'Right'
#print('Found Energy Drop on Right Side, {} decided to move {}wards at location: {}'.format(str(agent)[1:-1] ,agent.direction.direction, agent.location))
print('Found Energy Drop {}, {} decided to move {}wards at location: {}'.format(point,str(agent)[1:-1] ,agent.direction.direction, agent.location))
break #In case the energy down is found exit the loop
else:
# In case No nearby cell has enegy down, continue normally
# In case No nearby cell has a pitfall, continue normally
print('{} decided to move {}wards at location: {}'.format(str(agent)[1:-1], agent.direction.direction, agent.location))
agent.moveforward()
elif action == 'lastlevel' :
agent.direction.direction = Direction.L
print('{} decided to move {}wards at location: {}'.format(str(agent)[1:-1] ,agent.direction.direction, agent.location))
agent.moveforward()
elif action == "powerup":
items = self.list_things_at(agent.location, tclass=PowerUp)
if len(items) != 0:
if agent.powerup(items[0]):
if isinstance(agent,PlayerA):
agent.lives +=1
print('{} received a power up by having {} at location: {}'
.format(str(agent)[1:-1], str(items[0])[1:-1], agent.location))
self.delete_thing(items[0])
elif isinstance(agent,PlayerB):
if agent.lives == agent.max_lives:
print('{} ignores a power up at location: {} as its Power is Full'
.format(str(agent)[1:-1], agent.location))
agent.direction.direction = Direction.U
print('{} decided to move {}wards at location: {}'.format(str(agent)[1:-1], agent.direction.direction, agent.location))
agent.moveforward()
else:
agent.lives +=1
print('{} received a power up by having {} at location: {}'
.format(str(agent)[1:-1], str(items[0])[1:-1], agent.location))
self.delete_thing(items[0])
elif action == "powerdown":
items = self.list_things_at(agent.location, tclass=PowerDown)
if len(items) != 0:
if agent.powerdown(items[0]):
print('{} received a power down by having {} at location: {}'
.format(str(agent)[1:-1], str(items[0])[1:-1], agent.location))
self.delete_thing(items[0])
elif action == "energyup":
items = self.list_things_at(agent.location, tclass=EnergyUp)
if len(items) != 0:
if agent.energyup(items[0]):
if isinstance(agent,PlayerA):
agent.lives += 4
print('{} energized by having {} at location: {}'
.format(str(agent)[1:-1], str(items[0])[1:-1], agent.location))
self.delete_thing(items[0])
elif isinstance(agent,PlayerB):
if agent.lives == agent.max_lives:
print('{} ignores a energy up at location: {} as its Power is Full'
.format(str(agent)[1:-1], agent.location))
agent.direction.direction = Direction.U
print('{} decided to move {}wards at location: {}'.format(str(agent)[1:-1], agent.direction.direction, agent.location))
agent.moveforward()
else:
agent.lives += 4
if agent.lives > agent.max_lives:
agent.lives = 10 # This is the maximum allowed power of the player
print('{} energized by having {} at location: {}'
.format(str(agent)[1:-1], str(items[0])[1:-1], agent.location))
self.delete_thing(items[0])
elif action == "energydown":
items = self.list_things_at(agent.location, tclass=EnergyDown)
if len(items) != 0:
if agent.energydown(items[0]):
print('{} drained by having {} at location: {}'
.format(str(agent)[1:-1], str(items[0])[1:-1], agent.location))
self.delete_thing(items[0])
elif action == 'grab':
things = [thing for thing in self.list_things_at(agent.location)
if agent.can_grab(thing)]
if len(things):
print('{} grabbed {} at location: {}'
.format(str(agent)[1:-1],things[0].__class__.__name__, agent.location))
print('Removing all Energy Down Points')
#print("Grabbing", things[0].__class__.__name__)
if len(things):
agent.holding.append(things[0])
self.delete_thing(things[0])
for element in self.things:
# Updating the list of things
self.things[:] = (element for element in self.things if not isinstance(element,EnergyDown))
elif action == "barrier":
items = self.list_things_at(agent.location, tclass=Barrier)
if len(items) !=0:
if agent.barrier(items[0]):
agent.direction.direction = Direction.L
print('{} encountered barrier and decided to move {}wards at location: {}'.format(str(agent)[1:-1] ,agent.direction.direction, agent.location))
agent.moveforward()
elif action == "pitfall":
items = self.list_things_at(agent.location, tclass=PitFall)
if len(items) != 0:
if agent.pitfall(items[0]):
print('{} died by falling into {} at location: {}'
.format(str(agent)[1:-1], str(items[0])[1:-1], agent.location))
self.delete_thing(items[0])
agent.alive = False
elif action == "searchproblem":
agent.display_performance = True
agent.location = agent.goalpath[0]
print('{} decided to move at location: {}'.format(str(agent)[1:-1], agent.location))
agent.steps +=1
agent.goalpath.pop(0)
def is_done(self):
if self.agents[0].display_performance:
print(f'Current Lives with {str(self.agents[0])[1:-1]} : {self.agents[0].lives}')
self.agents[0].display_performance = False
if self.agents[0].location == [0,0]: # This is for the Player A if he reaches the Goal
print(f'{str(self.agents[0])[1:-1]} won the game in {self.agents[0].steps} steps')
self.agents[0].alive = False
return True
elif self.agents[0].lives <= 0:
print(f'{str(self.agents[0])[1:-1]} died before reaching Goal with 0 lives')
print('GAME OVER')
self.agents[0].alive = False
return True
dead_agents = not any(agent.is_alive() for agent in self.agents)
#return dead_agents or no_edibles
return dead_agents
# #### Setting up the environment with agents and things
def agent_run(agent_name,steps,player = None, seed = False):
racefield = RaceFieldEnvironment(6,6, color={agent_name: (25, 32, 33),
'PowerUp': (157, 200, 0), 'PowerDown': (200, 103, 0),
'EnergyUp': (77, 200, 0), 'EnergyDown': (200, 0, 0),
'PitFall':(214, 30, 107), 'Barrier': (240, 222, 29),
'LifeJacket':(66, 135, 245)
})
# Creating the objects for the things
powerup = PowerUp()
powerdown = PowerDown()
energyup = EnergyUp()
energydown = EnergyDown()
energydown_1 = EnergyDown()
energydown_2 = EnergyDown()
barrier = Barrier()
lifejacket = LifeJacket()
pitfall = PitFall()
# Choosing a random start location
if seed:
start_location = seed
else:
start_location = random.choice([[1,5],[2,5],[3,5],[4,5],[5,5]])
racefield.add_thing(player, start_location) # Adding player A on the centre of last row
# TraceAgent(playerA)
_dict_things = {powerup:[0,5],powerdown: [2,1],energyup: [4,2],
energydown: [0,3],energydown_1: [4,4],energydown_2: [2,2],
barrier:[5,2], lifejacket:[2,4], pitfall:[1,1]
}
# Adding the things on location
for thing,loc in _dict_things.items():
racefield.add_thing(thing,loc)
#print(racefield.get_world()) # Shows the current world
print(f'{player.__class__.__name__} started at {start_location}, facing up. See if he Wins')
racefield.run(steps)
if not player.holding:
print(f'The {player.__class__.__name__} did not grab anything in the Game')
else:
print(f'{player.__class__.__name__} grabbed the following in Game:')
for grabs in player.holding:
print(str(grabs)[1:-1])
if player.is_alive():
print('===================================================\n')
print(f'{player.__class__.__name__} Exhausted all the steps before reaching goal(max. allowed steps= {steps})\n')
print(f' Goal: [0,0], current location {player.location} \n')
print('===================================================')
# ## Agent 1 Simple Reflex Agent
# This agent will know what is the immediate environment and will take the action accordingly
#
# 1. If the agent see's that there is a energy down in any adjacent cell, it will avoid going there
# 2. If agent's Current Live is 10(Maximum), then agent will avoid going to Power Up as it cannot hold more power
# 3. If a agent has Life Jacket in any adjacent cell, it will grab it.
# 4. If the agent see a pitfall, it will Jump over it.
# #### Creating the Agent (Player A)
# Agent :Player A is a Simple Reflex Agent
# 1. It percepts upon the items present in the environment
# 2. It percept the Wall and takes appropriate change in direction
# 3. It percepts the Winning Row and take the appropriate direction of moving towards goal
# 4. It percepts the available Power Ups and increase it Life COunt
# 5. It percepts the encounter of the Power Down and decreases its Life Count
# 6. It percepts the encounter of the Energy Up and increases its Life Count
# 7. It percepts the encounter of the Energy Down and decreases its Life Count
# 8. It percepts the Pitfall and dies
# +
class PlayerA(Agent):
"""This agent takes action based solely on the percept. <SIMPLE REFLEX AGENT>"""
def __init__(self, program=None, goalpath = None, goalsearch=False):
Agent.__init__(self,program)
self.goalsearch = goalsearch
self.goalpath = goalpath
location = [0,3]
direction = Direction("up")
steps = 0
max_lives = 10
lives = max_lives # lives with player on game start
display_performance = True
def can_grab(self, thing):
"""Explorer can only grab Life Jacket"""
return thing.__class__ == LifeJacket
def moveforward(self, success=True):
'''moveforward possible only if success (i.e. valid destination location)'''
self.display_performance = True
if not success:
return
if self.direction.direction == Direction.R:
self.location[0] += 1
self.steps +=1
elif self.direction.direction == Direction.L:
self.location[0] -= 1
self.steps +=1
elif self.direction.direction == Direction.D:
self.location[1] += 1
self.steps +=1
elif self.direction.direction == Direction.U:
self.location[1] -= 1
self.steps +=1
def turn(self, d):
self.display_performance = True
self.direction = self.direction + d
def powerup(self, thing):
'''returns True upon success or False otherwise'''
self.display_performance = True
if isinstance(thing, PowerUp):
#self.lives +=1
return True
return False
def powerdown(self, thing):
''' returns True upon success or False otherwise'''
self.display_performance = True
if isinstance(thing, PowerDown):
self.lives -=1
return True
return False
def energyup(self, thing):
''' returns True upon success or False otherwise'''
self.display_performance = True
if isinstance(thing, EnergyUp):
#self.lives += 4
return True
return False
def energydown(self, thing):
''' returns True upon success or False otherwise'''
self.display_performance = True
if isinstance(thing, EnergyDown):
self.lives -= 4
return True
return False
def pitfall(self,thing):
if isinstance(thing,PitFall):
return True
return False
def barrier(self,thing):
if isinstance(thing,Barrier):
return True
return False
# -
agent_run('PlayerA',100,PlayerA(program,goalsearch=False))
# ## Agent 2 Model Based Reflex Agent
# This agent will know what is the immediate environment and will take the action accordingly
#
# 1. If the agent see's that there is a energy down in any adjacent cell, it will avoid going there
# 2. If agent's Current Live is 10(Maximum), then agent will avoid going to Power Up as it cannot hold more power
# 3. If a agent has Life Jacket in any adjacent cell, it will grab it.
# 4. If the agent see a pitfall, it will Jump over it.
# #### Creating the Agent (Player B)
#
# Agent :Player B is a Model Based Reflex Agent
# 1. It percepts upon the items present in the environment <b>Done</b>
#
# 2. It percept the Wall and takes appropriate change in direction <b>Done</b>
#
# 3. It percepts the Winning Row and take the appropriate direction of moving towards goal <b>Done</b>
#
# 4. It percepts the available Power Ups and increase it Life Count <b>Done</b>
# <br>a. If agent's Current Live is 10(Maximum), then agent will avoid going to Power Up as it cannot hold more power - <b>Done</b>
#
# 5. It percepts the encounter of the Power Down and decreases its Life Count <b>Done</b>
#
# 6. It percepts the encounter of the Energy Up and increases its Life Count <b>Done</b>
# <br>a. If agent's Current Live is 10(Maximum), then agent will avoid going to Energy Up as it cannot hold more power <b>Done</b>
# <br>b. If after taking the Energy Up if the energy goes > 10 , it should keep it maximum to 10 <b>Done</b>
#
# 7. It percepts the encounter of the Energy Down and decreases its Life Count
# <br>a. If the agent live is < max lives, then its see's that there is a energy down in any near(non adjacent) cell, it will avoid going there -<b>Done</b>
#
# 8. It percepts the Pitfall and dies <b>Done</b>
# <br>a. If the agent see a pitfall, it will Jump over it if the lives are between 5 and 10. This operation has a cost,which reduces the agent lives to 4
# +
class PlayerB(Agent):
"""This agent takes action based on the percept and the near environment. <MODEL BASED REFLEX AGENT>"""
location = [0,3]
direction = Direction("up")
steps = 0
max_lives = 10
lives = max_lives # lives with player on game start
display_performance = True
def can_grab(self, thing):
"""Explorer can only grab Life Jacket"""
return thing.__class__ == LifeJacket
def moveforward(self, success=True):
'''moveforward possible only if success (i.e. valid destination location)'''
self.display_performance = True
if not success:
return
if self.direction.direction == Direction.R:
self.location[0] += 1
self.steps +=1
elif self.direction.direction == Direction.L:
self.location[0] -= 1
self.steps +=1
elif self.direction.direction == Direction.D:
self.location[1] += 1
self.steps +=1
elif self.direction.direction == Direction.U:
self.location[1] -= 1
self.steps +=1
def turn(self, d):
self.display_performance = True
self.direction = self.direction + d
def powerup(self, thing):
'''returns True upon success or False otherwise'''
self.display_performance = True
if isinstance(thing, PowerUp):
#self.lives +=1
return True
return False
def powerdown(self, thing):
''' returns True upon success or False otherwise'''
self.display_performance = True
if isinstance(thing, PowerDown):
self.lives -=1
return True
return False
def energyup(self, thing):
''' returns True upon success or False otherwise'''
self.display_performance = True
if isinstance(thing, EnergyUp):
#self.lives += 4
return True
return False
def energydown(self, thing):
''' returns True upon success or False otherwise'''
self.display_performance = True
if isinstance(thing, EnergyDown):
self.lives -= 4
return True
return False
def pitfall(self,thing):
if isinstance(thing,PitFall):
return True
return False
def barrier(self,thing):
if isinstance(thing,Barrier):
return True
return False
# -
agent_run('PlayerB',100,PlayerB(program))
# ## SEARCH
# #### Defining the Problem Class which will be the base class for the Problem Statement
import math , sys
from collections import deque
class Problem(object):
"""The abstract class for a formal problem. A new domain subclasses this,
overriding `actions` and `results`, and perhaps other methods.
The default heuristic is 0 and the default action cost is 1 for all states.
When yiou create an instance of a subclass, specify `initial`, and `goal` states
(or give an `is_goal` method) and perhaps other keyword args for the subclass."""
def __init__(self, initial=None, goal=None, **kwds):
self.__dict__.update(initial=initial, goal=goal, **kwds)
def actions(self, state):
raise NotImplementedError
def result(self, state, action):
raise NotImplementedError
def is_goal(self, state):
return state == self.goal
def action_cost(self, s, a, s1):
return 1
def h(self, node):
return 0
def __str__(self):
return '{}({!r}, {!r})'.format(type(self).__name__, self.initial, self.goal)
# #### Defining the Node Class ( A node in the search tree)
# +
class Node:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state."""
def __init__(self, state, parent=None, action=None, path_cost=0):
self.__dict__.update(state=state, parent=parent, action=action, path_cost=path_cost)
def __repr__(self):
return '<{}>'.format(self.state)
def __len__(self):
return 0 if self.parent is None else (1 + len(self.parent))
def __lt__(self, other):
return self.path_cost < other.path_cost
def path(self):
"""Return a list of nodes forming the path from the root to this node."""
node, path_back = self, []
while node:
path_back.append(node)
node = node.parent
return list(reversed(path_back))
def solution(self):
"""Return the sequence of actions to go from the root to this node."""
return [node.action for node in self.path()[:]]
failure = Node('failure', path_cost=math.inf) # Indicates an algorithm couldn't find a solution.
cutoff = Node('cutoff', path_cost=math.inf) # Indicates iterative deepening search was cut off.
# -
def expand(problem, node):
"Expand a node, generating the children nodes."
s = node.state
for action in problem.actions(s):
s1 = problem.result(s, action)
cost = node.path_cost + problem.action_cost(s, action, s1)
yield Node(s1, node, action, cost)
# +
FIFOQueue = deque
LIFOQueue = list
# -
# ### The Search Technique : Breadth First Search
def breadth_first_search(problem):
"Search shallowest nodes in the search tree first."
node = Node(problem.initial)
if problem.is_goal(problem.initial):
return node
frontier = FIFOQueue([node])
reached = {problem.initial}
while frontier:
node = frontier.pop()
for child in expand(problem, node):
s = child.state
if problem.is_goal(s):
return child
if s not in reached:
reached.add(s)
frontier.appendleft(child)
return failure
# ### The Search Technique : Depth Limited Search
# +
def is_cycle(node, k=30):
"Does this node form a cycle of length k or less?"
def find_cycle(ancestor, k):
return (ancestor is not None and k > 0 and
(ancestor.state == node.state or find_cycle(ancestor.parent, k - 1)))
return find_cycle(node.parent, k)
def depth_limited_search(problem, limit=10):
"Search deepest nodes in the search tree first."
frontier = LIFOQueue([Node(problem.initial)])
result = failure
while frontier:
node = frontier.pop()
if problem.is_goal(node.state):
return node
elif len(node) >= limit:
result = cutoff
elif not is_cycle(node):
for child in expand(problem, node):
frontier.append(child)
return result
# -
# ### The Search Technique : Iterative Deepening Search
def iterative_deepening_search(problem):
"Do depth-limited search with increasing depth limits."
for limit in range(1, sys.maxsize):
result = depth_limited_search(problem, limit)
if result != cutoff:
return result
# ### Defining the Problem Statement class
# +
class GoalSearch(Problem):
"""Finding a path on a 2D grid with obstacles. Obstacles are (x, y) cells."""
def __init__(self, initial=(5,5), goal=(0,0), obstacles=(), **kwds):
Problem.__init__(self, initial=initial, goal=goal,obstacles=set(obstacles) - {initial, goal}, **kwds)
directions = [(0, -1),(-1, 0), (+1, 0),(0, +1)]
def action_cost(self, s, action, s1):
return straight_line_distance(s, s1)
def h(self, node):
return straight_line_distance(node.state, self.goal)
def result(self, state, action):
"Both states and actions are represented by (x, y) pairs."
return action if action not in self.obstacles else state
def actions(self, state):
"""You can move one cell in any of `directions` to a non-obstacle cell."""
x, y = state
return {(x + dx, y + dy) for (dx, dy) in self.directions} - self.obstacles
def straight_line_distance(A, B):
"Straight-line distance between two points."
return sum(abs(a - b)**2 for (a, b) in zip(A, B)) ** 0.5
# -
seed_randomizer = [34,5,656,578,98,35,52,3,5,67,78,797,99,898,895586,63626,26,986,9,869,68,96]
x = random.choice(seed_randomizer)
random.seed(x)
start_location = random.choice([[1,5],[2,5],[3,5],[4,5],[5,5]])
start_location_search = tuple(start_location)
searchproblem = GoalSearch(initial = start_location_search,obstacles={(4,4),(0,3),(2,2),(5,2),(2,1),(1,1)})
# ##### Goal Test with Breadth First Search
# +
print('Breadth First Search \n Shotest Route :', breadth_first_search(searchproblem).solution())
goalpath_elements = breadth_first_search(searchproblem).solution()
goalpath = []
for element in goalpath_elements:
goalpath.append(list(element))
agent_run('PlayerA',100,PlayerA(program,goalpath, goalsearch=True),start_location)
# -
# ##### Goal Test with Depth Limited Search
# +
print('Depth Limited Search \n Shortest Path = ', depth_limited_search(searchproblem).solution())
goalpath_elements = depth_limited_search(searchproblem).solution()
goalpath = []
for element in goalpath_elements:
goalpath.append(list(element))
agent_run('PlayerA',100,PlayerA(program,goalpath, goalsearch=True),start_location)
# -
# ##### Goal Test with Iterative Deepening Search
# +
print('Iterative Deepening Search \n Shortest Path = ', iterative_deepening_search(searchproblem).solution())
goalpath_elements = iterative_deepening_search(searchproblem).solution()
goalpath = []
for element in goalpath_elements:
goalpath.append(list(element))
agent_run('PlayerA',100,PlayerA(program,goalpath, goalsearch=True),start_location)
# -
|
Assignments/Assignment 1/Assignment 1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### New to Plotly?
# Plotly's Python library is free and open source! [Get started](https://plotly.com/python/getting-started/) by downloading the client and [reading the primer](https://plotly.com/python/getting-started/).
# <br>You can set up Plotly to work in [online](https://plotly.com/python/getting-started/#initialization-for-online-plotting) or [offline](https://plotly.com/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plotly.com/python/getting-started/#start-plotting-online).
# <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
# #### Version Check
# Plotly's python package is updated frequently. Run `pip install plotly --upgrade` to use the latest version.
import plotly
plotly.__version__
# #### LaTeX Typesetting
# +
import plotly.plotly as py
import plotly.graph_objs as go
trace1 = go.Scatter(
x=[1, 2, 3, 4],
y=[1, 4, 9, 16],
name=r'$\alpha_{1c} = 352 \pm 11 \text{ km s}^{-1}$'
)
trace2 = go.Scatter(
x=[1, 2, 3, 4],
y=[0.5, 2, 4.5, 8],
name=r'$\beta_{1c} = 25 \pm 11 \text{ km s}^{-1}$'
)
data = [trace1, trace2]
layout = go.Layout(
xaxis=dict(
title=r'$\sqrt{(n_\text{c}(t|{T_\text{early}}))}$'
),
yaxis=dict(
title=r'$d, r \text{ (solar radius)}$'
)
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='latex')
# +
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
# ! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'latex-typesetting.ipynb', 'python/LaTeX/', 'LaTeX',
'How to add LaTeX to python graphs.',
title = 'Python LaTeX | Examples | Plotly',
has_thumbnail='true', thumbnail='thumbnail/latex.jpg',
language='python',
display_as='style_opt', order=3, ipynb='~notebook_demo/268')
# -
|
_posts/python-v3/advanced/latex/latex-typesetting.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="rKkRpKUF3Th6"
# # Stein's Paradox
# + [markdown] id="NXvBQPn72YxC"
# [Stein's paradox](https://en.wikipedia.org/wiki/Stein%27s_example)
#
# We will compare the risk of [James–Stein estimator](https://en.wikipedia.org/wiki/James%E2%80%93Stein_estimator) to a naive estimator on a simulated high-dimensional dataset.
# + id="c1I5Jpj1U4o5"
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
# + [markdown] id="tsa2Euoehvvk"
# We have a single data point $x$ drawn from a $d$-dimensional multivariate normal distribution with mean vector $\theta$ and covariance matrix $I_d$ (the $d\times d$ identity matrix).
#
# $X \sim \mathcal{N}(\theta, I_d)$
#
# We want to obtain an estimate $\hat{\theta}$ of $\theta$ by using only $x$.
#
# We will compare different estimators $\hat{\theta}(x)$ using their "risk", which is basically their mean squared error across trials.
# + [markdown] id="uGFe5U1kjTkw"
# The simplest estimator is $\hat{\theta}(x) = x$. We will call this the naive estimator.
# + id="vmkCowyUU7sW"
def est_naive(x):
return x
# + [markdown] id="k0XbMOBMjZgg"
# Stein's paradox suggests that we can come up with an alternative estimator that has lower risk: $\hat{\theta}(x) = \left(1 - \frac{d - 2}{||x||_2^2}\right) \times x$. We can think of this as shrinking our estimate $\hat{\theta}$ towards zero, tuning the strength of the shrinkage we apply by estimating something directly from our single data point (namely, it's Euclidean norm).
# + id="phu0s0cyhuUX"
def est_stein(x):
return (1 - (x.shape[1] - 2)/np.linalg.norm(x, axis=1)**2)[:, None] * x
# + [markdown] id="9k_0iiEAj9_O"
# We define a function to estimate the risk of an estimator at a particular true value of $\theta$ by averaging the mean squared error of the estimator over $m$ trials.
# + id="cxDaxHCfXBFK"
def mean_risk(est, theta, m=int(1e6)):
rvs = stats.multivariate_normal(theta, 1).rvs(m)
ests = est(rvs)
rs = np.linalg.norm((ests - theta), axis=1)**2
return np.mean(rs)
# + [markdown] id="sG2ipIRHkgo3"
# We now evaluate the mean risk for various choices of $\theta$. For simplicity, we just try a sequence of $\theta$'s whose components are all equal and take integer values between 0 and 10 inclusive.
# + id="4H4SF0ljXK2T"
d = 10
naive_risk = [mean_risk(est_naive, [t] * d) for t in range(11)]
stein_risk = [mean_risk(est_stein, [t] * d) for t in range(11)]
# + [markdown] id="HTxa3-Nlk5OL"
# We can then plot the mean risk.
# + id="e6hr1oKrhJaK" colab={"base_uri": "https://localhost:8080/", "height": 283} executionInfo={"status": "ok", "timestamp": 1634920544613, "user_tz": -330, "elapsed": 884, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="3aab121c-4310-45c6-b752-98f97c820821"
plt.plot(np.arange(len(naive_risk)), naive_risk, label='naive', color='r')
plt.plot(np.arange(len(stein_risk)), stein_risk, label='Stein', color='b')
plt.xlabel(r'$\theta_i$')
plt.ylabel('risk')
plt.legend()
plt.show()
# + [markdown] id="wn6i53r_mLQ3"
# We can see that when the true $\theta$ is close to the zero vector, the Stein estimator has a much lower risk than the naive estimator. This is what we expect to happen if we think of the Stein estimator as performing some kind of Bayesian shrinkage towards a prior distribution over $\theta$ which happens to match the true $\theta$ reasonably well. We could imagine that some property like this might also hold for 1 or 2 dimensions. What is perhaps more surprising is that the Stein estimator has lower risk than the naive estimator even when the true $\theta$ is far from the zero vector (the Stein estimator appears to asymptotically approach the risk of the naive estimator from below as the distance between the true $\theta$ and the zero vector goes to infinity). This suggests that even when the choice of a Bayesian prior is arbitrarily "wrong" in the sense that it is centered very far from the true value of the parameter, it is still better to apply the shrinkage (as long as we are in high-dimensional space, $d \geq 3$).
|
_notebooks/2022-01-23-stein.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:dbv1v1_0]
# language: python
# name: conda-env-dbv1v1_0-py
# ---
# <font color=gray>ADS Sample Notebook.
#
# Copyright (c) 2020 Oracle, Inc. All rights reserved. Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
# </font>
#
# ***
# # Working with SQL MAGIC
# <p style="margin-left:10%; margin-right:10%;">by the <font color=teal> Oracle Cloud Infrastructure Data Science Service Team </font></p>
#
# ***
#
# ## Overview:
#
# This notebook demonstrates how to use SQL Magic to work with a database. Magic commands are a set of functions which are not valid Python code but can be run and executed in Jupyter Notebooks. There are two types of magic commands, line magics and cell magics. Line magics start with `%` and operate on a single line of input. Cell magics start with `%%`, and they work on multiple lines in a call.
#
# IPython SQL magic extension allows you to directly write SQL queries in Jupyter notebook cells.
#
# ## Prerequisites:
#
# - Experience with the topics: Novice
# - Professional experience: None
#
# ---
#
# ## Objectives:
#
# - <a href='#setup'>Setting Up `ipython-sql`</a>
# - <a href='#sql_DML'>Data Manipulation Language Commands</a>
# - <a href='#sql_DQL'>Data Query Language Commands</a>
# - <a href='#sql_var'>Variable Bindings</a>
# - <a href='#sql_viz'>Data Visualizations</a>
# - <a href='#reference'>References</a>
# ***
#
#
#
import pandas as pd
# <a id='setup'></a>
# # Setting Up `ipython-sql`
#
# `ipython-sql` uses a number of ipython magic commands to interact directly with the database. The following sections will cover the following magic commands
# * `%config SqlMagic`
# * `load_ext sql`
# * `%sql`
# * `%%sql`
#
# In the following cell, the `ipython-sql` package is loaded with the magic command `%load_ext sql`. Note, it is not loaded with an `import` statement. `ipython-sql` supports a variety of databases. The command `%sql sqlite://` makes a connection to an in-memory SQLite database. This database is now bound to the notebook and future `%sql` command will be performed on this database.
# %load_ext sql
# %sql sqlite://
# You can configure the database using the `%config SqlMagic` magic command. This prints the current configuration information, descriptions of the options, current values, and what values can be set.
# %config SqlMagic
# The `%config SQLMagic` command also allows options to be set. These would be in the form of `%config SQLMagic.<option>=<value>` where `<option>` is the name of the option that is to be set and `<value>` is the value that is to be set. The `%config SQLMagic` command lists the options and valid values.
#
# The command `%config SQLMagic.<option>` will return the current value of the option.
# %config SqlMagic.autocommit
# %config SqlMagic.autocommit=False
# %config SqlMagic.autocommit
# <a id='sql_DML'></a>
# # Data Manipulation Language Commands
#
# A data manipulation language (DML) command can be issued with the `%%sql` command once a database is bound to the `ipython-sql` module. The DML statements in the next cell create a table called `writer` and populates it with three authors.
# + language="sql"
# DROP TABLE IF EXISTS author;
# CREATE TABLE author (given_name, family_name, year_of_death);
# INSERT INTO author VALUES ('William', 'Shakespeare', 1616);
# INSERT INTO author VALUES ('Bertold', 'Brecht', 1956);
# INSERT INTO author VALUES ('Virginia', 'Woolf', 1941);
# -
# The `--persists <variable>` can be used to copy a data set into a new table. The name of the table will be the same as the same of the variable. In the following cells, a Pandas DataFrame will be created. Then several `ipython-sql` commands will be issued. The first one will drop the table animals, if it exists. If animals already exists it will create an error. Then the `--persists` command will be used to copy the DataFrame into the database as a new table. The final command will query all the records in the newly created animals table.
animals = pd.DataFrame({'num_legs': [2, 4, 8, 0],
'num_wings': [2, 0, 0, 0],
'num_specimen_seen': [10, 2, 1, 8]},
index=['falcon', 'dog', 'spider', 'fish'])
# %sql DROP TABLE IF EXISTS 'animals'
# %sql --persist animals
# %sql SELECT * FROM animals
# <a id='sql_DQL'></a>
# # Data Query Language Commands
#
# A data query language (DQL) command can be used to obtain records from the database.
# If your query is short, you can use oneline of code:
# %sql SELECT * FROM author WHERE year_of_death >=1950;
# The previous cell printed the results of the query into the notebook. It is also possible to capture the results into a Python object. If the query can fit on a single line then the `<variable> = %sql <DQL>` command can be used. This will store the results in the specified variable. In the following cell, this approach is used to obtain authors that died before 1950.
# %config SqlMagic.autopandas=False
# old_author = %sql SELECT * FROM author WHERE year_of_death < 1950;
old_author
# For longer SQL commands use
# ```
# # %%sql <variable> <<
# <DQL>
# ```
# The result is stored in the `<variable>` variable.
# %config SqlMagic.autopandas=False
# + magic_args="author << " language="sql"
# SELECT given_name, family_name, year_of_death
# FROM author;
# -
author
# In the preceding cell, `author` is an object of class `sql.run.ResultSet`. It can be converted to a Pandas DataFrame using the `DataFrame()` method.
df = author.DataFrame()
type(df)
# To have `ipython-sql` return record sets in a Pandas DataFrame, set the `autopandas` option to `True`.
# %config SqlMagic.autopandas=True
# author = %sql SELECT given_name, family_name, year_of_death FROM author
type(author)
# <a id='sql_var'></a>
# # Variable Bindings
#
# Python variables can be bound to the SQL commands with the `:<variable>`, `'{variable}'` or `$variable` syntax. In the next cell, the variable `name` is set to William. The command is issued to return any results where the `first_name` is equal to the value of `name`.
first_name = 'William'
last_name = 'Shakespeare'
death_century = 1600
# + language="sql"
# SELECT *
# FROM author
# WHERE
# given_name LIKE :first_name
# AND family_name LIKE '{last_name}'
# AND year_of_death >= CAST('$death_century' AS INT)
# -
# <a id='sql_viz'></a>
# # Data Visualization
#
# Record sets that are of the class `sql.run.ResultSet` have the methods `.plot()`, `.pie()`, and `.bar()`. These are convient for making quick plots.
old_author.bar()
old_author.plot()
old_author.pie()
# <a id="reference"></a>
# # References
#
# * [Oracle ADS Library documentation](https://docs.cloud.oracle.com/en-us/iaas/tools/ads-sdk/latest/index.html)
# * [ipython-sql](https://pypi.org/project/ipython-sql/)
# * [SQLite Tutorial](https://www.sqlitetutorial.net/)
#
|
conda_environment_notebooks/oracle_database/sqlmagic.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Autism in Children under the age of 36 months in comparison to Adults with Asd.
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from random import randint
import pandas as pd
import scipy.stats as stats
from pandas.tools.plotting import scatter_matrix
import pandas as DataFrame
# # Variables for my dataset
# ### Age in months
# + active=""
# Autism_data = {
# 'Age_mons':['3', '6', '9', '12', '18', '24', '36'],
#
# }
# df1 = pd.DataFrame(Autism_data)
# df1
# -
# ### Gender
# +
Autism_data = {
'Gender':['m', 'f'],
}
df = pd.DataFrame(Autism_data)
df
# -
# ### Jaundice
# +
Autism_data = {
'Jaundice':['yes', 'no'],
}
df = pd.DataFrame(Autism_data)
df
# -
# ### ASD Traits
# +
ASD_traits = {
'Family members with ASD traits':['yes', 'no'],
}
df = pd.DataFrame(Autism_data)
df
# -
# ### Age of mother
Autism_1 = {
"Age of mother": ["20", "25", "30", "35", "40"]
}
df = pd.DataFrame(Autism_1)
df
# ### Ethnicity
Autism_1 = {
"Ethnicity": ["Irish", "Polish", "Romanian", "Latvian", "Italian"]
}
df = pd.DataFrame(Autism_1)
df
# ### Qchat 10 score
#
Autism_1 = {
"QCHAT 10 Score": ["2", "4", "6", "8", "10"]
}
df = pd.DataFrame(Autism_1)
df
# ### A1 - A10 represents binary digits 1s and 0s as answers to the QCHAT 1- questionaire further details in read me.
# ### Data frame for all variables
# +
# create data base using a dictionary of lists.
# each column is a dictionary key and that key becomes the column name.
# all the lists need to be the same length and these become the rows.
new_dataframe = pd.DataFrame(
{
"Qchat_10score": [2,4,6,8,10],
"Age_months": [6,12,18,24,36],
"Jaundice": [True, False, True, False, True],
"Family_members with Autism": [True, False, True, False, True],
"Age_of_Mother": [20, 25, 30, 35, 40],
"ASD_traits": [True, False, True, False, True],
"Gender": ["m", "f", "m", "f" ,"m"],
"Ethnicity": ["Irish", "Polish","Romanian", "Latvian", "Italian"],
"Who completed the test": ["Family_member", "Health_care_professional", "Family_member", "Health_care_professional", "Family_member"],
"A1": ["0", "1", "0", "1", "0"],
"A2": ["0", "1", "0", "1", "0"],
"A3": ["0", "1", "0", "1", "0"],
"A4": ["0", "1", "0", "1", "0"],
"A5": ["0", "1", "0", "1", "0"],
"A6": ["0", "1", "0", "1", "0"],
"A7": ["0", "1", "0", "1", "0"],
"A8": ["0", "1", "0", "1", "0"],
"A9": ["0", "1", "0", "1", "0"],
"A10": ["0", "1", "0", "1", "0"]
}
)
new_dataframe
# -
# read in the data Toddler Autism dataset csv file from excel worksheet on my computer
# with the variables i have set out above included
df1 = pd.read_csv("Toddler Autism dataset July 2018.csv")
# show data
df1
# where my file is on my computer
pwd
# information on the data set total columns/entries and types of data in df1 data set
df1.info()
# checking the type of columns we have in the dataset for example from A1 - A10 we can see they are integers 1s and 0s
df1.dtypes
# +
# importing pandas module
import pandas as pd
# reading csv file
df1 = pd.read_csv("Toddler Autism dataset July 2018.csv",)
# -
# showing the top five rows of data
df1.head()
# shows we have 1054 entries and 19 columns
df1.info()
# i want to drop the column in the data set called "who completed the test".
df1.drop(['Who completed the test'], axis=1)
# i now want to rename the column thats named Family members with ASD to change it to SIbling diagnosed with autism.
df1 = df1.rename(columns={"Family_mem_with_ASD": "Sibling diagnosed with autism"})
# show data
df1
# i now want to rename the column named Ethnicity to rename it Nationality
df1 = df1.rename(columns={"Ethnicity": "Nationality"})
df1
# type is a string
type("data")
# show first 100 rows
df1.head(100)
# shows the last 100 rows
df1.tail(100)
# names of columns in the dataset
df1.columns\
# description of the dataset including count,mean,std, min, max etc.....
df1.describe()
# View all Nationality names
df["Nationality"].unique()
# columns in data set
df.columns
# importing random package
import random
# gives us a floating point value between 6 and 36 months of age. Result is on average 19 months
value = random.uniform(6, 36)
print (value)
# gives us a floating point value between 6 and 36 months of age inclusive which means that its
# including both 6 and 36.
value = random.randint(6, 36)
# value = random integer including 6 and 36 and in between.
print (value)
# ten random numbers from Autism data set.
x = np.random.rand(10)
x
# this actualy does the same as above but gives a copy.
np.random.permutation(x)
#plot a histogram for the results of the np.random permutation analysis using x
plt.hist(x)
#show plot
plt.show()
#when given an integer n the permuation treats it as an array arrange(n)
np.random.permutation(10)
#plot a histogram of the aboove array
plt.hist(x)
#show plot
plt.show()
# +
# creates an array of the given shape
x = np.random.rand(10)
x
# -
#Run a random permuation test.
np.random.permutation(x)
#plot a histogram
plt.hist(x)
#show plot
plt.show()
# +
data = np.random.normal(0, 0.6, 36)
# age in months of toddlers with autism
df1 = pd.DataFrame(data, columns=['Age_mons'])
df1.head()
# +
#FINDING MAX AND MIN in age in months column results shows minimum 12 months old
p=df['Age_Mons'].max()
q=df['Age_Mons'].min()
print(q)
# +
#FINDING MAX AND MIN in age column results shows maximum is 36 months old
p=df['Age_Mons'].max()
q=df['Age_Mons'].min()
print(p)
# +
# importing packages we will require to do some exploratory analysis on this data set and also show some plots etc..
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# import pandas as pd.
import pandas as pd
# data use np.random.normal for std of 0-1000
# random normal between 1-1000
data = np.random.normal(0, 0.1, 1000)
df1 = pd.DataFrame(data, columns=['x'])
#first 10 lines in data frame
df1.head(10)
# -
#this is random uniform
x = np.random.uniform(0,12,36)
#plot a histogram
plt.hist(x)
#show plot
plt.show()
#plot a histogram
plt.hist(x)
#show plot
plt.show()
# read in csv file called autism data as df1 drop nan values
df1 = pd.read_csv('Autism_Data.arff',na_values='?')
# read in file called toddler autism dataset as df2 and drop nan values
df2 = pd.read_csv('Toddler Autism dataset July 2018.csv',na_values='?')
# just showing the first five lines of data
df1.head()
# i now want to rename the column named Ethnicity to rename it Nationality
df2 = df2.rename(columns={"Ethnicity": "Nationality"})
df2
# information on df1 data set shows it has 704 entries and 21 columns
df1.info()
# showing top five rows of df2 data set
df2.head()
# information on df2 data set shows 19 columns and 1054 entries
df2.info()
# we now want to see the percent of adults and toddlers with autism this is shwoing us tha 27% of adults have ASD
# and 69% of toddlers have ASD but we know that this is not a true reflection of the actual % that have ASD as we are only
# using qualitative properties for asd in our test sample.
# set style to white grid
sns.set_style('whitegrid')
# data 1 = data frame 1 yes = have class/asd.
data1= df1[df1['Class/ASD']=='YES']
# data 2 = data frame 2 = yes class/asd traits
data2= df2[df2['Class/ASD Traits ']=='Yes']
# print out adults hwo have this diagnosis of class/asd out of 100 adults
print("Adults: ",len(data1)/len(df1) * 100)
# print out toddlers who have this diagnosis of class asd traits out of 10 toddlers
print("Toddlers:",len(data2)/len(df2) * 100)
# i now want to see what the visual difference is between those with Jaundice and their gender and if there is anything
# significant to see in our results
# figure is a subplot and figure size is 20, 6.
fig, ax = plt.subplots(1,2,figsize=(20,6))
sns.countplot(x='jundice',data=data1,hue='gender',ax=ax[0])
#set title on the 0 axes to show ASD positive
ax[0].set_title('Adults diagnosed with ASD and born with jaundice based on gender')
ax[0].set_xlabel('Jaundice when born')
sns.countplot(x='Jaundice',data=data2,hue='Sex',ax=ax[1])
ax[1].set_title('Toddlers diagnosed with ASD and born with jaundice based on gender')
ax[1].set_xlabel('Jaundice when born')
# Research would tell us that autism is much higher in boys than in girls, saying in and around 5,6 times higher in boys, and this is refelected in our research also of toddlers but in our adult researhc this is not the case.
# Also research is now telling us that Jaundice born toddlers have a greater chance of having ASD but we can see above that this is not supported by our reasearch here.
#
# we now want to see a visual representation of their ages in both toddlers and adults regarding their age distribution.
fig, ax = plt.subplots(1,2,figsize=(20,6))
# bins = devided the range of values into 45 intervals using the colour blue as it socially represents autism!!
sns.distplot(data1['age'],kde=False,bins=45,color='blue',ax=ax[0])
# set x label to show age of adults in years
ax[0].set_xlabel('Age of adults in years')
# set the tile to show distribution of asd diagnosis regarding their age
ax[0].set_title('Distribution of ASD diagnosis regarding their Age')
# set plot to show data 2 age in months
# bins = devided the range of values into 30 intervals using the colour blue as it socially represents autism!!
sns.distplot(data2['Age_Mons'],kde=False,bins=30,color='blue',ax=ax[1])
# set x label to show age of toddlers in months
ax[1].set_xlabel('Age of Toddlers in months')
# set title to show distribution of asd diagnosis regarding their age
ax[1].set_title('Distribution of ASD diagnosis regarding their Age')
# Research shows that adults who have autism have as they get older have found techniques or therapies and interventions to help them decrease the severity of their ASD symptoms this is reflected here also in our research.
# But researching the same in toddlers research shows that from birth or in our case 6 months old up to 36 months old that it increases in severity as they normally present at a higer percent by the time the hit the age of 3 years while thsi is not always the case but majority it is predominantly due to late diagnosis among healthcare systems and poor education fo rfamilies in the are of ASD. Which could change the outcome of the toddler research greatly and also shows with a diagnsosis and the right interventions as an adult you could be decreasing your symptoms unlike the toddlers who are increasing in symptoms or severity.
# Details on each nationaly background shwoing white europeans at the top at 233 in comparison to turkish
# at the bottom at 6 and others at 1.
print(df1['ethnicity'].value_counts())
df2['Ethnicity'].value_counts()
# From the above printout of df 1 we can see 233 adults who are white european in comparison to 6 turkish have asd diagnosis and that in df2 the toddlers who are white european come in at 334 in comparison to italians at 26! this is a huge gap and devide in nationality devide in asd diagnosis. Is this due to population tested or some other error furhter analysis required.
# now i want to show both the family diagnsosis connection to their ethnic backgorund and viusalise this on a chart
fig, ax = plt.subplots(1,2,figsize=(20,6))
# x will = autism = data = df1 the colour is hue = ethnicity and palette is rainbow
sns.countplot(x='autism',data=data1,hue='ethnicity',palette='rainbow',ax=ax[0])
# the title for as =0 is Family relatives who are adults with asd diagnosis and their ethnicity
ax[0].set_title('Family relatives who are adults with ASD diagnosis and their Ethnicity')
# set the x label to show adults who have relatives with asd
ax[0].set_xlabel('Adults who have relatives with ASD')
# x will = autism = data = df2 the colour is hue = ethnicity and palette is rainbow
sns.countplot(x='Family_mem_with_ASD',data=data2,hue='Ethnicity',palette='rainbow',ax=ax[1])
# axes 1 title to show family realtives who are toddlers with asd dignosis and their eithnicity
ax[1].set_title('Family relatives who are Toddlers with ASD diagnosis and their Ethnicity')
# setting x_label to show toddlers who have realtives with ASD
ax[1].set_xlabel('Toddlers who have relatives with ASD')
#plot tight layout style
plt.tight_layout()
# This particular chart really shows us how close we are predisosed as white europeans to be diagnosed with ASD in comparison to other nationalities and this is also the case in many other research documents so we are definitely seeing some kind of link but nothing so far to show why this is the case. But certainly warrants futher investigation into this particular area.
# # End
|
Autism 1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2D map fitting
#
# ## Prerequisites:
# - To understand how a generel modelling and fiiting works in gammapy, please refer to the [analysis_3d tutorial](../3D/analysis_3d.ipynb)
#
# ## Context:
# We often want the determine the position and morphology of an object. To do so, we don't necessarily have to resort to a full 3D fitting but can perform a simple image fitting, in particular, in an energy range where the PSF does not vary strongly, or if we want to explore a possible energy dependance of the morphology.
#
#
# ## Objective:
# To localize a source and/or constrain its morphology.
#
# ## Proposed approach:
#
# The first step here, as in most analysis with DL3 data, is to create reduced datasets. For this, we will use the `Analysis` class to create a single set of stacked maps with a single bin in energy (thus, an *image* which behaves as a *cube*). This, we will then model with a spatial model of our choice, while keeping the spectral model fixed to an integrated power law.
# ## Setup
# As usual, we'll start with some general imports...
# +
# %matplotlib inline
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.time import Time
import logging
log = logging.getLogger(__name__)
# -
# Now let's import gammapy specific classes and functions
from gammapy.analysis import Analysis, AnalysisConfig
# ## Creating the config file
# Now, we create a config file for out analysis. You may load this from disc if you have a pre-defined config file.
#
# Here, we use 3 simulated CTA runs of the galactic center.
config = AnalysisConfig()
# Selecting the observations
config.observations.datastore = "$GAMMAPY_DATA/cta-1dc/index/gps/"
config.observations.obs_ids = [110380, 111140, 111159]
# Technically, gammapy implements 2D analysis as a special case of 3D analysis (one one bin in energy). So, we must specify the type of analysis as *3D*, and define the geometry of the analysis.
# +
config.datasets.type = "3d"
config.datasets.geom.wcs.skydir = {
"lon": "0 deg",
"lat": "0 deg",
"frame": "galactic",
} # The WCS geometry - centered on the galactic center
config.datasets.geom.wcs.width = {"width": "8 deg", "height": "6 deg"}
config.datasets.geom.wcs.binsize = "0.02 deg"
# The FoV radius to use for cutouts
config.datasets.geom.selection.offset_max = 2.5 * u.deg
config.datasets.safe_mask.methods = ["offset-max"]
config.datasets.safe_mask.parameters = {"offset_max": 2.5 * u.deg}
config.datasets.background.method = "fov_background"
config.fit.fit_range = {"min": "0.1 TeV", "max": "30.0 TeV"}
# We now fix the energy axis for the counts map - (the reconstructed energy binning)
config.datasets.geom.axes.energy.min = "0.1 TeV"
config.datasets.geom.axes.energy.max = "10 TeV"
config.datasets.geom.axes.energy.nbins = 1
config.datasets.geom.wcs.binsize_irf = 0.2 * u.deg
# -
print(config)
# ## Getting the reduced dataset
# We now use the config file and create a single `MapDataset` containing `counts`, `background`, `exposure`, `psf` and `edisp` maps.
# %%time
analysis = Analysis(config)
analysis.get_observations()
analysis.get_datasets()
print(analysis.datasets["stacked"])
# The counts and background maps have only one bin in reconstructed energy. The exposure and IRF maps are in true energy, and hence, have a different binning based upon the binning of the IRFs. We need not bother about them presently.
analysis.datasets["stacked"].counts
analysis.datasets["stacked"].background
analysis.datasets["stacked"].exposure
# We can have a quick look of these maps in the following way:
# + nbsphinx-thumbnail={"tooltip": "Source modelling and fitting in stacked observations using the high level interface."}
analysis.datasets["stacked"].counts.reduce_over_axes().plot(vmax=5)
# -
# ## Modelling
#
# Now, we define a model to be fitted to the dataset. **The important thing to note here is the dummy spectral model - an integrated powerlaw with only free normalisation**. Here, we use its YAML definition to load it:
model_config = """
components:
- name: GC-1
type: SkyModel
spatial:
type: PointSpatialModel
frame: galactic
parameters:
- name: lon_0
value: 0.02
unit: deg
- name: lat_0
value: 0.01
unit: deg
spectral:
type: PowerLaw2SpectralModel
parameters:
- name: amplitude
value: 1.0e-12
unit: cm-2 s-1
- name: index
value: 2.0
unit: ''
frozen: true
- name: emin
value: 0.1
unit: TeV
frozen: true
- name: emax
value: 10.0
unit: TeV
frozen: true
"""
analysis.set_models(model_config)
# We will freeze the parameters of the background
analysis.datasets["stacked"].background_model.parameters["tilt"].frozen = True
# To run the fit
analysis.run_fit()
# To see the best fit values along with the errors
analysis.fit_result.parameters.to_table()
|
docs/tutorials/analysis/2D/modeling_2D.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# -
# %matplotlib inline
STATS_DIR = "/hg191/corpora/legaldata/data/stats/"
SEM_FEATS_FILE = os.path.join (STATS_DIR, "ops.temp.semfeat")
INDEG_FILE = os.path.join (STATS_DIR, "ops.ind")
ind = pd.read_csv (INDEG_FILE, sep=",", header=None, names=["opid", "indeg"])
semfeat = pd.read_csv (SEM_FEATS_FILE, sep=",", header=None, names=["opid", "semfeat"])
indegs = pd.Series([ind[ind["opid"] == opid]["indeg"].values[0] for opid in semfeat.opid.values])
semfeat["indeg"] = indegs
def labelPercentile (series):
labels = list ()
p50 = np.percentile (series, q=50)
p75 = np.percentile (series, q=75)
p90 = np.percentile (series, q=90)
for value in series:
if value <= p50:
labels.append ("<=50")
elif value <= p90:
labels.append (">50")
elif value > p90:
labels.append (">90")
return labels
semfeat["percentile"] = pd.Series (labelPercentile(semfeat["semfeat"].values))
df = semfeat[semfeat["indeg"] > 0]
df["log(indeg)"] = np.log(df["indeg"])
ax = sns.boxplot(x="percentile", y="log(indeg)", data=df, order=["<=50", ">50", ">90"])
vals = df[df["percentile"] == ">50"]["log(indeg)"].values
np.sort(vals)[int(len(vals)/2)]
print(len(df[df["percentile"] == ">50"]))
print(len(df[df["percentile"] == ">90"]))
print (df[df["percentile"] == "<=50"]["log(indeg)"].median())
print (df[df["percentile"] == ">50"]["log(indeg)"].median())
print (df[df["percentile"] == ">90"]["log(indeg)"].median())
#print (semfeat[semfeat["percentile"] == ">P99"]["logindeg"].mean())
print (semfeat[semfeat["percentile"] == "<=P50"]["logindeg"].mean())
print (semfeat[semfeat["percentile"] == ">P50"]["logindeg"].mean())
print (semfeat[semfeat["percentile"] == ">P90"]["logindeg"].mean())
print (semfeat[semfeat["percentile"] == "<=P50"]["indeg"].median())
print (semfeat[semfeat["percentile"] == ">P50"]["indeg"].median())
print (semfeat[semfeat["percentile"] == ">P90"]["indeg"].median())
print (semfeat[semfeat["percentile"] == "<=P50"]["indeg"].median())
print (semfeat[semfeat["percentile"] == ">P50"]["indeg"].median())
print (semfeat[semfeat["percentile"] == ">P90"]["indeg"].median())
np.percentile(semfeat["semfeat"].values, q=90)
[semfeat["percentile"] == ">P90"]["indeg"].mean()
semfeat[semfeat["percentile"] == ">P90"].tail(500)
sorted(semfeat["indeg"], reverse=True)[0:10]
semfeat[semfeat["indeg"].isin(sorted(semfeat["indeg"], reverse=True)[0:10])]
semfeat.loc[48004,]["semfeat"] = 1
semfeat[semfeat["indeg"].isin(sorted(semfeat["indeg"], reverse=True)[0:10])]
print(np.mean((semfeat[semfeat["percentile"] == "<=P50"]["indeg"] > 0).values))
print(np.mean((semfeat[semfeat["percentile"] == ">P50"]["indeg"] > 0).values))
print(np.mean((semfeat[semfeat["percentile"] == ">P90"]["indeg"] > 0).values))
print (len(semfeat[(semfeat["percentile"] == "<=P50") & (semfeat["indeg"] > 0)]))
print (len(semfeat[(semfeat["percentile"] == ">P50") & (semfeat["indeg"] > 0)]))
print (len(semfeat[(semfeat["percentile"] == ">P90") & (semfeat["indeg"] > 0)]))
print (semfeat[(semfeat["percentile"] == "<=P50") & (semfeat["indeg"] > 0)]["indeg"].mean())
print (semfeat[(semfeat["percentile"] == ">P50") & (semfeat["indeg"] > 0)]["indeg"].mean())
print (semfeat[(semfeat["percentile"] == ">P90") & (semfeat["indeg"] > 0)]["indeg"].mean())
print (semfeat[(semfeat["percentile"] == "<=P50") & (semfeat["indeg"] > 0)]["logindeg"].mean())
print (semfeat[(semfeat["percentile"] == ">P50") & (semfeat["indeg"] > 0)]["logindeg"].mean())
print (semfeat[(semfeat["percentile"] == ">P90") & (semfeat["indeg"] > 0)]["logindeg"].mean())
ax = sns.violinplot(x="percentile", y="logindeg", data=df, order=["<=P50", ">P50", ">P90"])
semfeat[semfeat["indeg"] == 1]
|
notebooks/CL.plots.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/gordicaleksa/get-started-with-JAX/blob/main/Tutorial_3_JAX_Neural_Network_from_Scratch_Colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="XZuyP-M3KPUR"
# # MLP training on MNIST
# + id="8-SzJ0NTKRP1"
import numpy as np
import jax.numpy as jnp
from jax.scipy.special import logsumexp
import jax
from jax import jit, vmap, pmap, grad, value_and_grad
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
# + colab={"base_uri": "https://localhost:8080/"} id="G4NrxSVjKt8f" outputId="6bb8bef6-3098-4fd5-8ffe-62f4b0b1aa79"
seed = 0
mnist_img_size = (28, 28)
def init_MLP(layer_widths, parent_key, scale=0.01):
params = []
keys = jax.random.split(parent_key, num=len(layer_widths)-1)
for in_width, out_width, key in zip(layer_widths[:-1], layer_widths[1:], keys):
weight_key, bias_key = jax.random.split(key)
params.append([
scale*jax.random.normal(weight_key, shape=(out_width, in_width)),
scale*jax.random.normal(bias_key, shape=(out_width,))
]
)
return params
# test
key = jax.random.PRNGKey(seed)
MLP_params = init_MLP([784, 512, 256, 10], key)
print(jax.tree_map(lambda x: x.shape, MLP_params))
# + colab={"base_uri": "https://localhost:8080/"} id="U_z7eLxINv9x" outputId="e9909f9f-6778-4977-91f1-f5b14dd9ecd4"
def MLP_predict(params, x):
hidden_layers = params[:-1]
activation = x
for w, b in hidden_layers:
activation = jax.nn.relu(jnp.dot(w, activation) + b)
w_last, b_last = params[-1]
logits = jnp.dot(w_last, activation) + b_last
# log(exp(o1)) - log(sum(exp(o1), exp(o2), ..., exp(o10)))
# log( exp(o1) / sum(...) )
return logits - logsumexp(logits)
# tests
# test single example
dummy_img_flat = np.random.randn(np.prod(mnist_img_size))
print(dummy_img_flat.shape)
prediction = MLP_predict(MLP_params, dummy_img_flat)
print(prediction.shape)
# test batched function
batched_MLP_predict = vmap(MLP_predict, in_axes=(None, 0))
dummy_imgs_flat = np.random.randn(16, np.prod(mnist_img_size))
print(dummy_imgs_flat.shape)
predictions = batched_MLP_predict(MLP_params, dummy_imgs_flat)
print(predictions.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="5pPM1dZ4QyYe" outputId="3317666b-e167-46b7-8cf4-b8592adc065a"
def custom_transform(x):
return np.ravel(np.array(x, dtype=np.float32))
def custom_collate_fn(batch):
transposed_data = list(zip(*batch))
labels = np.array(transposed_data[1])
imgs = np.stack(transposed_data[0])
return imgs, labels
batch_size = 128
train_dataset = MNIST(root='train_mnist', train=True, download=True, transform=custom_transform)
test_dataset = MNIST(root='test_mnist', train=False, download=True, transform=custom_transform)
train_loader = DataLoader(train_dataset, batch_size, shuffle=True, collate_fn=custom_collate_fn, drop_last=True)
test_loader = DataLoader(test_dataset, batch_size, shuffle=False, collate_fn=custom_collate_fn, drop_last=True)
# test
batch_data = next(iter(train_loader))
imgs = batch_data[0]
lbls = batch_data[1]
print(imgs.shape, imgs[0].dtype, lbls.shape, lbls[0].dtype)
# optimization - loading the whole dataset into memory
train_images = jnp.array(train_dataset.data).reshape(len(train_dataset), -1)
train_lbls = jnp.array(train_dataset.targets)
test_images = jnp.array(test_dataset.data).reshape(len(test_dataset), -1)
test_lbls = jnp.array(test_dataset.targets)
# + id="YQEYcSNzVeim"
num_epochs = 5
def loss_fn(params, imgs, gt_lbls):
predictions = batched_MLP_predict(params, imgs)
return -jnp.mean(predictions * gt_lbls)
def accuracy(params, dataset_imgs, dataset_lbls):
pred_classes = jnp.argmax(batched_MLP_predict(params, dataset_imgs), axis=1)
return jnp.mean(dataset_lbls == pred_classes)
@jit
def update(params, imgs, gt_lbls, lr=0.01):
loss, grads = value_and_grad(loss_fn)(params, imgs, gt_lbls)
return loss, jax.tree_multimap(lambda p, g: p - lr*g, params, grads)
# Create a MLP
MLP_params = init_MLP([np.prod(mnist_img_size), 512, 256, len(MNIST.classes)], key)
for epoch in range(num_epochs):
for cnt, (imgs, lbls) in enumerate(train_loader):
gt_labels = jax.nn.one_hot(lbls, len(MNIST.classes))
loss, MLP_params = update(MLP_params, imgs, gt_labels)
if cnt % 50 == 0:
print(loss)
print(f'Epoch {epoch}, train acc = {accuracy(MLP_params, train_images, train_lbls)} test acc = {accuracy(MLP_params, test_images, test_lbls)}')
# + colab={"base_uri": "https://localhost:8080/", "height": 316} id="YmdBRBvU1wuA" outputId="efcfa75e-d0bb-4f16-9fb2-e85e82a53bcf"
imgs, lbls = next(iter(test_loader))
img = imgs[0].reshape(mnist_img_size)
gt_lbl = lbls[0]
print(img.shape)
import matplotlib.pyplot as plt
pred = jnp.argmax(MLP_predict(MLP_params, np.ravel(img)))
print('pred', pred)
print('gt', gt_lbl)
plt.imshow(img); plt.show()
# + [markdown] id="TwgI3fZbKRqM"
# # Visualizations
# + colab={"base_uri": "https://localhost:8080/", "height": 299} id="jddJj8zo4D1e" outputId="fb157d1c-4fbe-45a5-c84d-6abe38355a5e"
w = MLP_params[0][0]
print(w.shape)
w_single = w[500, :].reshape(mnist_img_size)
print(w_single.shape)
plt.imshow(w_single); plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 484} id="AZxm7G3j4iOS" outputId="521c3ad2-147d-4076-eea0-6537f32dafa0"
# todo: visualize embeddings using t-SNE
from sklearn.manifold import TSNE
def fetch_activations(params, x):
hidden_layers = params[:-1]
activation = x
for w, b in hidden_layers:
activation = jax.nn.relu(jnp.dot(w, activation) + b)
return activation
batched_fetch_activations = vmap(fetch_activations, in_axes=(None, 0))
imgs, lbls = next(iter(test_loader))
batch_activations = batched_fetch_activations(MLP_params, imgs)
print(batch_activations.shape) # (128, 2)
t_sne_embeddings = TSNE(n_components=2, perplexity=30,).fit_transform(batch_activations)
cora_label_to_color_map = {0: "red", 1: "blue", 2: "green", 3: "orange", 4: "yellow", 5: "pink", 6: "gray"}
for class_id in range(10):
plt.scatter(t_sne_embeddings[lbls == class_id, 0], t_sne_embeddings[lbls == class_id, 1], s=20, color=cora_label_to_color_map[class_id])
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="MHL27HumNgwf" outputId="d44b1e9c-33d6-4dd3-cf05-b3a1e19f0194"
# todo: dead neurons
def fetch_activations2(params, x):
hidden_layers = params[:-1]
collector = []
activation = x
for w, b in hidden_layers:
activation = jax.nn.relu(jnp.dot(w, activation) + b)
collector.append(activation)
return collector
batched_fetch_activations2 = vmap(fetch_activations2, in_axes=(None, 0))
imgs, lbls = next(iter(test_loader))
MLP_params2 = init_MLP([np.prod(mnist_img_size), 512, 256, len(MNIST.classes)], key)
batch_activations = batched_fetch_activations2(MLP_params2, imgs)
print(batch_activations[1].shape) # (128, 512/256)
dead_neurons = [np.ones(act.shape[1:]) for act in batch_activations]
for layer_id, activations in enumerate(batch_activations):
dead_neurons[layer_id] = np.logical_and(dead_neurons[layer_id], (activations == 0).all(axis=0))
for layers in dead_neurons:
print(np.sum(layers))
# + [markdown] id="jMmOX-VSKTjQ"
# # Parallelization
# + id="1aCkdHuhKUqV"
|
Tutorial_3_JAX_Neural_Network_from_Scratch_Colab.ipynb
|