code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Jupyter Notebook Playground
print("hello world!")
my_variable = 101
print(my_variable)
# # Text Classification Tutorial
# Credit to: https://stackabuse.com/text-classification-with-python-and-scikit-learn/
import numpy as np
import re
import nltk
from sklearn.datasets import load_files
import pickle
from nltk.corpus import stopwords
movie_data = load_files(r"review_polarity/txt_sentoken")
X, y = movie_data.data, movie_data.target
# ## Preprocessing The Data
# +
documents = []
from nltk.stem import WordNetLemmatizer
stemmer = WordNetLemmatizer()
for sen in range(0, len(X)):
# Remove all the special characters
document = re.sub(r'\W', ' ', str(X[sen]))
# remove all single characters
document = re.sub(r'\s+[a-zA-Z]\s+', ' ', document)
# Remove single characters from the start
document = re.sub(r'\^[a-zA-Z]\s+', ' ', document)
# Substituting multiple spaces with single space
document = re.sub(r'\s+', ' ', document, flags=re.I)
# Removing prefixed 'b'
document = re.sub(r'^b\s+', '', document)
# Converting to Lowercase
document = document.lower()
# Lemmatization
document = document.split()
document = [stemmer.lemmatize(word) for word in document]
document = ' '.join(document)
documents.append(document)
# -
# ## Feature Engineering
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(max_features=1500, min_df=5, max_df=0.7, stop_words=stopwords.words('english'))
X1 = vectorizer.fit_transform(documents).toarray()
# CountVectorizer makes very unique word a feature for the ml model. Parameters:
#
# - We take a max of 1500 features (the most common, highest occuring 1500 unique words)
# - min_df is number of documents they appear in at minimum
# - max_df is the maximum percentage of documents containing this word
# - Finally, stop_words removes any very commons words in the english language
from sklearn.feature_extraction.text import TfidfTransformer
tfidfconverter = TfidfTransformer()
X2 = tfidfconverter.fit_transform(X1).toarray()
# TfidfTransformer turns the count array earlier into an array of weights for each unique word based on the document:
# - TF-IDF weight is a weight often used in information retrieval and text mining
# - This weight is a statistical measure used to evaluate how important a word is to a document in a collection or corpus
# - The importance increases proportionally to the number of times a word appears in the document but is offset by the frequency of the word in the corpus
# - Variations of the tf-idf weighting scheme are often used by search engines as a central tool in scoring and ranking a document's relevance given a user query
# ## Splitting The Data (Train-Test Split)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X2, y, test_size=0.2, random_state=0)
# ## Training The Model
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=1000, random_state=0)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# Random Forest example:
#
# <img align="left" src=https://miro.medium.com/max/900/1*EFBVZvHEIoMdYHjvAZg8Zg.gif width="450" />
# <img align="left" src=https://static.javatpoint.com/tutorial/machine-learning/images/random-forest-algorithm2.png width="450" />
#
# Pros of random forest:
# - Great predictive performance for binary classification
# - They provide a reliable feature importance estimate
# - They offer efficient estimates of the test error without incurring the cost of repeated model training associated with cross-validation
# - Handles thousands of input variables without variable deletion
#
# Cons of random forest:
# - An ensemble model is inherently less interpretable than an individual decision tree
# - Training a large number of deep trees can have high computational costs (but can be parallelized) and use a lot of memory
# - Predictions are slower, which may create challenges for applications
#
# More considerations:
# - https://github.com/TayariAmine/ML_cheat_sheet/wiki/Random-forest-Pros-and-Cons
# - https://www.oreilly.com/library/view/hands-on-machine-learning/9781789346411/e17de38e-421e-4577-afc3-efdd4e02a468.xhtml
# ## Evaluating The Model
# +
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
print("CONFUSION MATRIX: \n{}".format(confusion_matrix(y_test,y_pred)))
print("\nACCURACY: {}".format(accuracy_score(y_test, y_pred)))
# -
# <img src=https://glassboxmedicine.files.wordpress.com/2019/02/confusion-matrix.png width="500">
print(classification_report(y_test,y_pred))
# ## Saving The Model
with open('text_classifier', 'wb') as picklefile:
pickle.dump(classifier,picklefile)
with open('text_classifier', 'rb') as training_model:
model = pickle.load(training_model)
# +
y_pred2 = model.predict(X_test)
print(confusion_matrix(y_test, y_pred2))
print(classification_report(y_test, y_pred2))
print(accuracy_score(y_test, y_pred2))
# -
# ## Try It Yourself!
# +
import numpy as np
import re
import nltk
from sklearn.datasets import load_files
import pickle
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfTransformer
movie_data = load_files(r"review_polarity/txt_sentoken")
X, y = movie_data.data, movie_data.target
# Preprocessing functions
def preprocessing(input_X):
stemmer = WordNetLemmatizer()
documents = []
for sen in range(0, len(input_X)):
# Remove all the special characters
document = re.sub(r'\W', ' ', str(input_X[sen]))
# remove all single characters
document = re.sub(r'\s+[a-zA-Z]\s+', ' ', document)
# Remove single characters from the start
document = re.sub(r'\^[a-zA-Z]\s+', ' ', document)
# Substituting multiple spaces with single space
document = re.sub(r'\s+', ' ', document, flags=re.I)
# Removing prefixed 'b'
document = re.sub(r'^b\s+', '', document)
# Converting to Lowercase
document = document.lower()
# Lemmatization
document = document.split()
document = [stemmer.lemmatize(word) for word in document]
document = ' '.join(document)
documents.append(document)
vectorizer = CountVectorizer(max_features=1500, min_df=5, max_df=0.7, stop_words=stopwords.words('english'))
X1 = vectorizer.fit_transform(documents).toarray()
tfidfconverter = TfidfTransformer()
X2 = tfidfconverter.fit_transform(X1).toarray()
return X2, documents
with open('text_classifier', 'rb') as training_model:
model = pickle.load(training_model)
# -
# Change the sentences in the 'new_X_test' array below!
# +
new_X_test = ['I loved this movie so much!', 'This movie was bad and had terrible actors']
count = len(new_X_test)
new_X_test.extend(X)
new_X2, new_documents = preprocessing(new_X_test)
# -
for i in range(count):
review = new_documents[i]
prediction = model.predict([new_X2[i]])
print("review: {}, pred: {}".format(review, prediction))
| NLP_Workshop_May2020/HandsOnWorkshop_May2020.ipynb |
/ ---
/ jupyter:
/ jupytext:
/ text_representation:
/ extension: .q
/ format_name: light
/ format_version: '1.5'
/ jupytext_version: 1.14.4
/ kernelspec:
/ display_name: SQL
/ language: sql
/ name: SQL
/ ---
/ # VIEW
/ ## Server: Microsoft SQL Server
/ ## Database: disney
/ ## Author: <NAME>
/ ## YouTube: https://youtu.be/_tocAYU6jl0
drop view if exists vw_g
drop view if exists vw_debut
drop view if exists vw_movie_director
drop view if exists vw_mw2000
SELECT * FROM movie_gross where mpaa_rating = 'G'
create view vw_g as
SELECT * FROM movie_gross where mpaa_rating = 'G'
select * from vw_g where year(release_date) > 2000
create or alter view vw_debut as
select movie_title, release_date,
datename(WEEKDAY, release_date) weekday,
month(release_date) month,
year(release_date) year
from movie_gross
ALTER view [dbo].[vw_debut] as
select movie_title, release_date,
datename(WEEKDAY, release_date) weekday,
month(release_date) month,
year(release_date) year
from movie_gross
select * from vw_debut
select weekday, count(weekday) from vw_debut
group by weekday
order by count(weekday) desc
drop view vw_g
select * from director
select top 10 * from movie_gross
create view vw_movie_director as
select m.movie_title, release_date, genre, d.director
from movie_gross m left join director d on m.movie_title = d.movie_title
select * from vw_movie_director
where genre='Musical'
create view vw_m2000 as
select * from vw_debut where year > 2000
select * from vw_m2000 where weekday ='Monday'
| src/disney_views.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### Importación de Librerías
# +
import pyarrow
import fastparquet
import pandas as pd
import numpy as np
import missingno as msno
import matplotlib.pyplot as plt
import seaborn as sns
import hashlib as hl
import re
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import SimpleImputer
from sklearn.impute import IterativeImputer
from sklearn.decomposition import PCA
from sklearn.preprocessing import KBinsDiscretizer
from sklearn import (base, decomposition, feature_extraction, impute,
neighbors, preprocessing)
from sklearn.preprocessing import MinMaxScaler
from unidecode import unidecode
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
# -
# ### Lectura del Dataset
df_orig = pd.read_parquet("cupones_ds_test.parquet")
df = df_orig.copy()
df
df['monto_compra_movimiento'].describe().round(2)
df_orig.shape
df.info()
# ## 1. Curación
# ### Eliminar filas duplicadas
df = df[~df.duplicated()]
df['monto_compra_movimiento'].describe().round(2)
# ### Ajuste por Inflación
df_infl = pd.read_csv('https://raw.githubusercontent.com/JIBarrionuevoGaltier/DiploDatos_2021_Mentoria_Grupo_2/main/notebooks_tp2/Indices%20de%20inflaci%C3%B3n.csv',
parse_dates=['mes'])
df_infl
# + tags=[]
# Procedimiento para ajustar la variable monto a la inflacion en el periodo muestreado
df['anio_mes_cupon'] = pd.DatetimeIndex(df['fecha_cupon_movimiento']).year * 100 + pd.DatetimeIndex(df['fecha_cupon_movimiento']).month
df_infl['anio_mes_infl'] = pd.DatetimeIndex(df_infl['mes']).year * 100 + pd.DatetimeIndex(df_infl['mes']).month
df = df.merge(df_infl[['anio_mes_infl', 'inflacion_acumulada']]
,left_on='anio_mes_cupon'
,right_on='anio_mes_infl'
,how='inner').drop(columns=['anio_mes_infl'])
df.loc[df.moneda_movimiento != 1,'monto_ajustado'] = \
df[df.moneda_movimiento != 1]['monto_compra_movimiento'] / (1 + df['inflacion_acumulada'])
# -
# Visualizamos montos ajustados en pesos luego del ajuste
df.loc[df['moneda_movimiento'] != 1, ['monto_compra_movimiento','monto_ajustado']].describe().round(2)
# ### Ajuste por Tipo de Cambio
# + tags=[]
# Obtención de la cotizacion del dolar para los meses en nuestro dataset
df_tdc = pd.read_csv('https://raw.githubusercontent.com/JIBarrionuevoGaltier/DiploDatos_2021_Mentoria_Grupo_2/main/notebooks_tp2/Tipos%20de%20cambio.csv',
parse_dates=['fecha_cotizacion'], dayfirst=True, delimiter=';')
df_tdc = df_tdc.sort_values(by=['fecha_cotizacion']).reset_index(level=None).drop(columns=['index'])
df_tdc['anio_mes'] = (df_tdc.fecha_cotizacion.dt.year * 100) + df_tdc.fecha_cotizacion.dt.month
df_tdc.head(10)
# +
# Procedimiento para calcular la fecha de cotizacion
df_tdc['day'] = df_tdc.fecha_cotizacion.dt.day
meses_cdiez = df_tdc[df_tdc['day'].isin([10])]['anio_mes'].unique()
meses_sdiez = df_tdc[(~df_tdc['day'].isin([10])) & (~df_tdc['anio_mes'].isin(meses_cdiez))]['anio_mes'].unique()
ind_fechas = []
for i in meses_cdiez:
ind_fechas.append(min((df_tdc[(df_tdc['anio_mes'] == i) & (df_tdc['day'] == 10)]['fecha_cotizacion'].index)))
for i in meses_sdiez:
ind_fechas.append(min((df_tdc[(df_tdc['anio_mes'] == i) & (df_tdc['day'] > 10)]['fecha_cotizacion'].index)))
# -
# Cotizacion del dia y el dia de vencimiento
df_tdc_cot = df_tdc.iloc[ind_fechas,:].sort_values(by=['fecha_cotizacion']).reset_index(level=None).drop(columns=['index'])
df_tdc_cot
# +
# Procesamos la variable anio_mes_cupon considerando la fecha de corte
df.fecha_cupon_movimiento = pd.to_datetime(df.fecha_cupon_movimiento)
def func_var(row):
if row.fecha_cupon_movimiento.day > 24:
if row.fecha_cupon_movimiento.month == 12:
return 202101
else:
return row['anio_mes_cupon'] + 1
else:
return row['anio_mes_cupon']
df.loc[df.moneda_movimiento == 1,'anio_mes_cupon'] = df[df.moneda_movimiento == 1].apply(func_var, axis=1)
# -
# Mostramos un ejemplo donde se puede visualizar que anio_mes_cupon cambia para antes y despues de la fecha de corte
# +
# Mergeamos con df original
df = df.merge(df_tdc_cot[['anio_mes', 'venta']]
,left_on='anio_mes_cupon'
,right_on='anio_mes'
,how='inner').drop(columns=['anio_mes'])
# Aplicamos el tipo de cambio
df.loc[df.moneda_movimiento == 1,'monto_ajustado'] = \
df[df.moneda_movimiento == 1]['monto_compra_movimiento'] * df['venta']
# Se descartan las columnas usadas para los calculos
df = df.drop(columns=['inflacion_acumulada','venta'])
# Redondeamos
df['monto_ajustado'] = df['monto_ajustado'].round(2)
# -
# Visualizamos montos ajustados en dolares luego de aplicar el tipo de cambio
df.loc[df['moneda_movimiento'] == 1, ['monto_compra_movimiento','monto_ajustado']].describe().round(2)
# ------
df['moneda_movimiento'].value_counts()
# ### 1.1 Asegurar IDs únicos
# +
# Reagrupar los comercios que tienen mas de un nombre y concatenar todos sus nombres
df.nombre_comercio_histo = df.nombre_comercio_histo.str.strip()
df_unicos = (
df[["id_comercio_movimiento", "nombre_comercio_histo"]].drop_duplicates()\
.groupby("id_comercio_movimiento").agg({"nombre_comercio_histo": 'sum'})
).reset_index()
df_unicos = df_unicos.rename(columns={'nombre_comercio_histo':'nombre_comercio_concat'})
# -
# Reagrupamos los comercios que tienen mas de un nombre y concatenamos todos sus nombres en una nueva columna **nombre_comercio_concat**
# +
# Unimos con dataframe original agregando la columna nueva con relacion 1:1 con el id
df = df.merge(df_unicos.drop_duplicates(subset=['id_comercio_movimiento'], keep='first'), on='id_comercio_movimiento', how='inner')
# Relacion 1:1 entre id_comercio_movimiento y la columna nueva con los nombres concatenados
df[['id_comercio_movimiento', 'nombre_comercio_concat']].drop_duplicates() \
.groupby(['id_comercio_movimiento']).count() # Deberia dar 3833 que es la cantidad de ids unicos
# -
# ### 1.2. Asegurar el tipo de dato de codigo postal
df.domicilio_codigo_postal = df[df['domicilio_codigo_postal'].isna()!=True]['domicilio_codigo_postal'].apply(int).apply(str)
df.domicilio_codigo_postal = df.domicilio_codigo_postal.str.strip()
# + [markdown] tags=[]
# ### 1.4. Reemplazar caracteres especiales
# + tags=[]
# Columnas que contienen strings
columnas_str = [
'nombre_comercio_histo',
'producto_naranja_movimiento',
'nombre_local_histo',
'estado_civil_descripcion',
'sexo_descripcion',
'pais',
'provincia',
'ciudad',
'domicilio_barrio',
'cargo_descripcion_histo',
'nivel_estudio_descripcion_histo',
'rel_vivienda_descripcion_histo',
'nombre_comercio_concat'
]
# + tags=[]
# Expresion regular que incluye solo caracteres de a-z mayúscula o minúscula y 0-9
regex = r'[^a-zA-Z0-9 ]'
# -
for col in columnas_str:
df[col] = df[df[col].isnull()==False][col].apply(unidecode)
# + tags=[]
for col in columnas_str:
df.loc[:, col] = df[df[col].isnull()==False][col].str.replace(regex, '')
# -
# ### 1.5. Asegurar formato de fecha
# +
# Columnas con fecha
col_fechas = [
'fecha_vto_cupon_movimiento',
'fecha_presentacion_movimiento',
'fecha_cupon_movimiento',
'fecha_carga_sistema_movimiento',
'fecha_nacimiento',
'fecha_extraccion_movimiento',
'fecha_de_ingreso_histo'
]
# -
df[col_fechas] = df[col_fechas].apply(pd.to_datetime)
df["fecha_cupon_movimiento"].max()
# + [markdown] tags=[]
# ### 1.6. Agrupar valores de cargo_descripcion_histo
# -
df = df[~(df['cargo_descripcion_histo'] == 'GERENTE')].copy()
df = df[~(df['cargo_descripcion_histo'] == 'JUEZ')].copy()
# +
sec_salud = ['ENFERMERO','MEDICO']
sec_edu = ['DOCENTE','PROFESTECNICO']
sec_seg = ['EMPSEGPUBLICA','PORTEROCONSERJ','VIGILADOR']
sec_fin = ['PRESGARANTIA','PRESTCREDITO']
sec_Emp_Com = ['ADMINISTRATIVO','CAJERO','EMPDE COMERCIO','MAESTRANZA','JEFE','ENCARGADO','VENDEDORPROMOT', 'COCINERO','MOZO']
sec_ope = ['CHOFER','INDEPENDIENTE','OPERARIO','PEONEMBARCADOS','PROPIETARIO']
sec_no_ope = ['JUBILADO']
sec_sd = ['SIN DATOS']
#sec_pers_jer = ['GERENTE']
#sec_jud = ['JUEZ']
df.loc[df['cargo_descripcion_histo'].str.contains('|'.join(sec_salud)),'cargo_sector_desc_hist'] = 'Sector_Salud'
df.loc[df['cargo_descripcion_histo'].str.contains('|'.join(sec_edu)),'cargo_sector_desc_hist'] = 'Sector_Educativo'
df.loc[df['cargo_descripcion_histo'].str.contains('|'.join(sec_seg)),'cargo_sector_desc_hist'] = 'Sector_Seguridad'
df.loc[df['cargo_descripcion_histo'].str.contains('|'.join(sec_fin)),'cargo_sector_desc_hist'] = 'Sector_Financiero'
df.loc[df['cargo_descripcion_histo'].str.contains('|'.join(sec_Emp_Com)),'cargo_sector_desc_hist'] = 'Sector_Empleado_Comercio'
df.loc[df['cargo_descripcion_histo'].str.contains('|'.join(sec_ope)),'cargo_sector_desc_hist'] = 'Sector_Operativo'
df.loc[df['cargo_descripcion_histo'].str.contains('|'.join(sec_no_ope)),'cargo_sector_desc_hist'] = 'Sector_No_Operativo'
#df.loc[df['cargo_descripcion_histo'].str.contains('|'.join(sec_pers_jer)),'cargo_sector_desc_hist'] = 'Sector_Personal_Jerarquico'
#df.loc[df['cargo_descripcion_histo'].str.contains('|'.join(sec_jud)),'cargo_sector_desc_hist'] = 'Sector_Judicial'
df.loc[df['cargo_descripcion_histo'].str.contains('|'.join(sec_sd)),'cargo_sector_desc_hist'] = 'Sector_Sin_Datos'
# -
df[df['cargo_sector_desc_hist'].isna()]
df = df.reset_index().drop(columns=['index'])
# -----
# ## 2 Data Enrichment
# ### 2.1. Cálculo de la edad de la persona
df['edad_cliente'] = ((df['fecha_cupon_movimiento'].max() - df['fecha_nacimiento'])/np.timedelta64(1, 'Y')).round(0)
df.edad_cliente = df.edad_cliente.astype('Int64', errors='ignore')
# ### 2.2 Cálculo de la antigüedad de la persona
df['antig_cliente']= ((df['fecha_cupon_movimiento'] - df['fecha_de_ingreso_histo'])/np.timedelta64(1, 'M'))
df['antig_cliente']= df.antig_cliente.apply(np.round).apply(int)
df[['fecha_cupon_movimiento', 'fecha_de_ingreso_histo', 'antig_cliente']]
# ### 2.3 Reagrupamiento de **nombre_comercio_histo**
# + tags=[]
df.id_comercio_movimiento.astype(str)
# + tags=[]
df.id_comercio_movimiento = df.id_comercio_movimiento.astype(str)
# + tags=[]
df['id_comercio_movimiento'] = df['id_comercio_movimiento'].str.zfill(9) # Se agrega el 0 a la izquierda
df['id_comercio_movimiento'].str.len().value_counts()
# + tags=[]
# Categorización
def func(row):
return row.id_comercio_movimiento[0:1]
df['comercio_cat'] = df.apply(func, axis=1)
# + tags=[]
# Visualizar la columna de comercios recategorizada, segun cuantos id_comercios y cuantas transacciones se abarcan.
df_comercios = df['comercio_cat'].value_counts().to_frame().reset_index().rename(
{'index':'comercio_cat',
'comercio_cat':'frecuencia_ts'}, axis=1)
df_com_freq = pd.DataFrame(df.groupby(['comercio_cat'])['id_comercio_movimiento'].nunique().sort_values(ascending=True)) \
.reset_index().rename({
'id_comercio_movimiento':'frecuencia_idcom'
}, axis=1)
df_com_freq = df_com_freq.merge(df_comercios, on='comercio_cat', how='inner')
df_com_freq
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print(df_com_freq.sort_values(by='frecuencia_idcom'))
# -
# ### 3.1 Análisis e imputación de Datos Nulos
# + [markdown] tags=[]
# Se puede observar cierta sistematicidad en la pérdida de valores relacionados a datos personales de los clientes. A continuación procedemos a analizar cada columna con valores faltantes.
# -
# #### Fecha carga sistema movimiento
# + tags=[]
df.drop('fecha_carga_sistema_movimiento', axis=1, inplace=True)
# -
# #### Tipo_prestamo_movimiento, Nombre_local_histo y Fecha_extraccion_movimiento
df.drop(columns = ['tipo_prestamo_movimiento','nombre_local_histo','fecha_extraccion_movimiento'], inplace=True)
# #### Datos Faltantes Personales de los clientes
df[df['sexo_descripcion'] == '']
columna = ['sexo_descripcion']
df['sexo_descripcion'] = df['sexo_descripcion'].replace('', 'Sin Datos')
df['sexo_descripcion'] = df['sexo_descripcion'].replace(np.nan, 'Sin Datos')
#df['sexo_descripcion']= df.sexo_descripcion.fillna(value=np.nan)
#const_imputer = SimpleImputer(missing_values= np.nan, strategy='constant',fill_value="Sin Datos")
#df.loc[ : , columna] = pd.DataFrame(const_imputer.fit_transform(df.loc[:][columna]) , columns = columna)
#df['sexo_descripcion'] = df['sexo_descripcion'].astype(str)
df['sexo_descripcion'].value_counts(dropna=False)
# ##### Datos faltantes Nivel de Estudio
df.nivel_estudio_descripcion_histo.value_counts()
columna = ['nivel_estudio_descripcion_histo']
df_faltante_estud = pd.DataFrame(df[df['nivel_estudio_descripcion_histo'].isna()])
df_faltante_estud[columna].isna().sum()
dnis_faltantes = df_faltante_estud.dni_titular_movimiento.values
df[(df['dni_titular_movimiento'].isin(dnis_faltantes)) & (~df['nivel_estudio_descripcion_histo'].isnull())]
# Observamos que la pérdida de datos de nivel de estudio también es sistemática, no pudiendose recuperar este dato, procedemos a imputar dichas filas con Sin datos.
columna = ['nivel_estudio_descripcion_histo']
#df['sexo_descripcion']= df.sexo_descripcion.fillna(value=np.nan)
const_imputer = SimpleImputer(missing_values= np.nan, strategy='constant',fill_value="Sin Datos")
df.loc[ : , columna] = pd.DataFrame(const_imputer.fit_transform(df.loc[:][columna]) , columns = columna)
df.nivel_estudio_descripcion_histo.value_counts()
# ##### Datos faltantes Vivienda
df.rel_vivienda_descripcion_histo.value_counts()
columna = ['rel_vivienda_descripcion_histo']
df_faltante_vivi = pd.DataFrame(df[df['rel_vivienda_descripcion_histo'].isna()])
df_faltante_vivi[columna].isna().sum()
dnis_faltantes = df_faltante_vivi.dni_titular_movimiento.values
df[(df['dni_titular_movimiento'].isin(dnis_faltantes)) & (~df['rel_vivienda_descripcion_histo'].isnull())]
columna = ['rel_vivienda_descripcion_histo']
#df["nivel_estudio_descripcion_histo"]= df.nivel_estudio_descripcion_histo.fillna(value=np.nan)
const_imputer = SimpleImputer(missing_values= np.nan, strategy='constant',fill_value="Sin Datos")
df.loc[ : , columna] = pd.DataFrame(const_imputer.fit_transform(df.loc[:][columna]) , columns = columna)
df.rel_vivienda_descripcion_histo.value_counts()
# ##### Datos faltantes Edad
# Procedemos a imputar usando IterativeImputer (en su operación por defecto BayesianRigde) la variable edad_cliente. Para imputar usamos las columnas **edad_cliente** y **antig_cliente**.
df.shape
df[df['edad_cliente'].isna()]
# +
#df['edad_cliente'] = df['edad_cliente'].replace(np.nan, 0)
# +
#df[df['edad_cliente'].isna()]
# +
#, missing_values=0
imp = IterativeImputer(max_iter=10, random_state=0)
imp_columns = ['edad_cliente', 'antig_cliente']
x_train = df[imp_columns]
imp.fit(x_train)
x_test = df[imp_columns]
x_test = np.round(imp.transform(x_test))
x_test = pd.DataFrame(x_test, columns=imp_columns)
x_test
# -
df['edad_cliente'] = x_test['edad_cliente']
df['edad_cliente'].describe().round(2)
df[df['edad_cliente'].isna()]
# La columna fecha_nacimiento se deja sin imputar debido a que su unica finalidad era obtener la edad del cliente. Por lo tanto, no se tiene en cuenta.
df.drop('fecha_nacimiento', inplace=True, axis=1)
# #### Datos Faltantes Geograficos de los clientes
# ##### Sistematicidad en la pérdida
col_geo = ['domicilio_codigo_postal', 'pais', 'provincia', 'ciudad', 'domicilio_barrio']
df[df['domicilio_codigo_postal'].isna()][col_geo].isna().sum()
# Tenemos un total de 24 provincias, el resto son registros NaN.
df['domicilio_codigo_postal'] = df['domicilio_codigo_postal'].replace('0',np.nan)
df[df['domicilio_codigo_postal']=='0']
df['ciudad'] = df['ciudad'].str.strip()
print('Cantidad de filas con 400 en el CP:',len(df[df['ciudad'].isin(['CAMPO HERRERA'])]))
df[df['ciudad'].isin(['CAMPO HERRERA'])][['pais','provincia','ciudad','domicilio_codigo_postal']].drop_duplicates()
df.loc[df['ciudad'].isin(['CAMPO HERRERA']),'domicilio_codigo_postal'] = '4105'
print('Cantidad de filas con 400 en el CP:',len(df[df['ciudad'].isin(['CAMPO HERRERA'])]))
df[df['ciudad'].isin(['CAMPO HERRERA'])][['pais','provincia','ciudad','domicilio_codigo_postal']].drop_duplicates()
df[df['pais'].isnull()][['pais','provincia','ciudad','domicilio_codigo_postal','domicilio_barrio']].isna().sum()
cp_ciu_nan = df[(df['domicilio_codigo_postal'].isna()!=True) & (df['ciudad'].isna()==True)]['domicilio_codigo_postal'].unique()
df[df['domicilio_codigo_postal'].isin(cp_ciu_nan)][['domicilio_codigo_postal','ciudad']]\
.drop_duplicates().sort_values(by=['domicilio_codigo_postal'])
df.loc[:,'ciudad'] = df['ciudad'].str.upper()
df_dp_cd = df[(df['domicilio_codigo_postal'].isin(cp_ciu_nan)) & (df['ciudad'].isna()==False)]\
[['domicilio_codigo_postal','ciudad']]\
.drop_duplicates()\
.sort_values(by=['domicilio_codigo_postal'])
df_dp_cd = df_dp_cd.rename(columns={'ciudad': 'ciudad_0', 'domicilio_codigo_postal': 'cp'})
df_dp_cd
# Realizamos merge sobre el dataframe creado, e imputamos los datos de ciudad.
df = df.merge(df_dp_cd.drop_duplicates(subset=['cp'], keep='first'),
how='left',
left_on = 'domicilio_codigo_postal',
right_on = 'cp')\
.drop(columns= ['cp'])
df.head()
# +
# Sustituimos los valores nulos de la columna provincia (provincia_x) por las provincias (en mayus) de la columna
# provincia creada en la anterior unión.
df.loc[df['ciudad'].isnull(), 'ciudad'] = df['ciudad_0'].str.upper()
df[(df['domicilio_codigo_postal'].isin(cp_ciu_nan)) & (df['ciudad'].isna()==True)]\
[['domicilio_codigo_postal','ciudad','ciudad_0']].drop_duplicates().sort_values(by=['domicilio_codigo_postal'])
# -
df = df.drop(columns=['ciudad_0'])
# Procedemos a imputar **provincia** de la misma manera que **ciudad**.
cp_prov_nan = df[(df['domicilio_codigo_postal'].isna()!=True) & (df['provincia'].isna()==True)]['domicilio_codigo_postal'].unique()
df[df['domicilio_codigo_postal'].isin(cp_prov_nan)][['domicilio_codigo_postal','provincia']]\
.drop_duplicates().sort_values(by=['domicilio_codigo_postal'])
df.loc[:,'provincia'] = df['provincia'].str.upper()
df_dp_pv = df[(df['domicilio_codigo_postal'].isin(cp_ciu_nan)) & (df['provincia'].isna()==False)]\
[['domicilio_codigo_postal','provincia']]\
.drop_duplicates()\
.sort_values(by=['domicilio_codigo_postal'])
df_dp_pv = df_dp_pv.rename(columns={'provincia': 'provincia_0', 'domicilio_codigo_postal': 'cp'})
df_dp_pv
df = df.merge(df_dp_pv.drop_duplicates(subset=['cp'], keep='first'),
how='left',
left_on = 'domicilio_codigo_postal',
right_on = 'cp')\
.drop(columns= ['cp'])
df.head()
# +
# Sustituimos los valores nulos de la columna provincia (provincia_0) por las provincias (en mayus) de la columna
# provincia creada en la anterior unión.
df.loc[df['provincia'].isnull(), 'provincia'] = df['provincia_0'].str.upper()
df[(df['domicilio_codigo_postal'].isin(cp_ciu_nan)) & (df['provincia'].isna()==True)]\
[['domicilio_codigo_postal','provincia','provincia_0']].drop_duplicates().sort_values(by=['domicilio_codigo_postal'])
# -
df = df.drop(columns=['provincia_0'])
df[col_geo].isna().sum()
# ##### Imputación con dataset externo
df_cp = pd.read_csv('https://raw.githubusercontent.com/JIBarrionuevoGaltier/localidades_AR/master/localidades_cp_maestro.csv')
df_cp.head()
# +
# Realizamos una curación de datos sobre este dataset.
df_cp.cp = df_cp[df_cp['cp'].isna()!=True]['cp'].apply(int).apply(str)
df_cp.head()
# -
# Observamos si todos los Códigos Postales de nuestro DataFrame original se encuentran en los objetos del merge.
df_cp = df_cp[~df_cp['cp'].isnull()]
df_cp.isna().sum()
df_cp = df_cp.rename(columns={'provincia': 'provincia_0'})
df_cp.head()
df.shape
# + [markdown] id="agH_T_lUbOsq"
# Procedemos a hacer el merge de los datasets, a través de la columna cp (Codigo Postal)
# + colab={"base_uri": "https://localhost:8080/", "height": 490} id="0yJnz2cG7UCJ" outputId="302064dd-4446-4130-c0f5-4a8c45de09ad"
# Unimos por cp, eliminamos duplicados, conservamos solo cps del dataframe original
df = df.merge(df_cp[['provincia_0','localidad','cp']].drop_duplicates(subset=['cp'], keep='first'),
how='left',
left_on = 'domicilio_codigo_postal',
right_on = 'cp')\
.drop(columns= ['cp'])
df.head()
# -
df.shape
df.loc[df['provincia'].isnull(), 'provincia'] = df['provincia_0'].str.upper()
df.loc[df['ciudad'].isnull(), 'ciudad'] = df['localidad'].str.upper()
df = df.drop(columns=['provincia_0','localidad'])
df[col_geo].isna().sum()
df.shape
df.drop(columns=['pais','domicilio_barrio'], inplace=True)
columna = ['domicilio_codigo_postal', 'provincia', 'ciudad']
#df['sexo_descripcion']= df.sexo_descripcion.fillna(value=np.nan)
const_imputer = SimpleImputer(missing_values= np.nan, strategy='constant',fill_value="Sin Datos")
df.loc[ : , columna] = pd.DataFrame(const_imputer.fit_transform(df.loc[:][columna]) , columns = columna)
df[['domicilio_codigo_postal', 'provincia', 'ciudad']].isna().sum()
df.shape
# + [markdown] tags=[]
# #### Nueva eliminación de filas duplicadas
# -
df = df[~df.duplicated()]
# + [markdown] tags=[]
# #### Variables categoricas (Reagrupación)
# -
df.shape
# + [markdown] tags=[]
# ##### Estado Civil
# + tags=[]
df.estado_civil_descripcion.value_counts()
# + [markdown] tags=[]
# Observamos que existe una alta frecuencia en las categorías Solteros y Casados, perdiendo representativadad en el resto de los estados civiles, por lo cual decidimos agrupar a la mismas en una misma categoría.
# + tags=[]
soltero = ['Solteroa']
casado = ['Casadoa']
otros = ['Divorciadoa','Viudoa','Concubinoa','Separacion de hecho','Novioa']
sin_datos = ['Sin Datos']
df.loc[df['estado_civil_descripcion'].str.contains('|'.join(soltero)),'estado_civil_cat'] = 'Soltero'
df.loc[df['estado_civil_descripcion'].str.contains('|'.join(casado)),'estado_civil_cat'] = 'Casado'
df.loc[df['estado_civil_descripcion'].str.contains('|'.join(otros)),'estado_civil_cat'] = 'Otros'
df.loc[df['estado_civil_descripcion'].str.contains('|'.join(sin_datos)),'estado_civil_cat'] = 'Sin_datos'
# + tags=[]
df.estado_civil_cat.value_counts()
# -
# Viendo las frecuencias por provincias, vamos a recategorizar sobre una nueva columna, las provincias por regiones.
df[df['provincia'] == '']
# +
dic_region = {'REGION_NOROESTE': ['JUJUY','SALTA','TUCUMAN','CATAMARCA','SGO DEL ESTERO'],
'REGION_NORDESTE': ['CHACO','FORMOSA','CORRIENTES','MISIONES'],
'REGION_PAMPEANA': ['CORDOBA','BUENOS AIRES','CAPITAL FEDERAL','ENTRE RIOS','LA PAMPA','SANTA FE'],
'REGION_CUYO': ['SAN JUAN','SAN LUIS','LA RIOJA','MENDOZA'],
'REGION_PATAGONIA': ['SANTA CRUZ','TIERRA DEL FUEGO','RIO NEGRO','NEUQUEN','CHUBUT'],
'SIN_DATOS': ['Sin Datos', '']}
df['region']= df['provincia']
for i in dic_region:
df['region'] = df['region'].replace(dic_region[i], i)
df.head()
# -
df['region'].value_counts(dropna=False)
df[df['region'].isna()]
# + tags=[]
df_prod = df['producto_naranja_movimiento'].value_counts().to_frame().reset_index().rename(
{'index':'producto_naranja_movimiento',
'producto_naranja_movimiento':'frecuencia_pr'}, axis=1)
df_prod_dni = pd.DataFrame(df.groupby(['producto_naranja_movimiento'])['dni_titular_movimiento'].nunique() \
.sort_values(ascending=True)) \
.reset_index()
df_prod_dni = df_prod_dni.merge(df_prod, on='producto_naranja_movimiento', how='inner')
df_prod_dni
# -
# Observamos que la distribución que resulta de esta transformacion no se parece a una normal. Por lo tanto, concluimos que se pierde mucha información y las ganancias obtenidas son bajas. Decidimos entonces, conservar la distribución original de esta variable.
# + [markdown] tags=[]
# ### 3.4.1 Seleccion de columnas a utilizar
# + tags=[]
df_final = df[['dni_titular_movimiento', 'fecha_cupon_movimiento', 'moneda_movimiento', 'producto_naranja_movimiento', 'tipo_producto_tarjeta_movimiento',
'anio_mes_cupon', 'sexo_descripcion', 'monto_ajustado',
'cargo_sector_desc_hist', 'edad_cliente', 'antig_cliente', 'comercio_cat',
'estado_civil_cat', 'region']]
# + tags=[]
df_final.shape
# + tags=[]
df_final
# -
df_final['moneda_movimiento'].value_counts()
df_final.isna().sum()
df_final.shape
# ## Consideraciones en el tratamiento de las variables
# ### Codificación
ordinal_ft = 'dni_titular_movimiento'
target = 'monto_ajustado'
#date_num = 'anio_mes_cupon'
num_features = ['anio_mes_cupon', 'edad_cliente', 'antig_cliente', 'fecha_cupon_movimiento']
trans_ft = ['producto_naranja_movimiento', 'tipo_producto_tarjeta_movimiento', 'moneda_movimiento', 'comercio_cat']
client_ft = ['sexo_descripcion', 'cargo_sector_desc_hist', 'estado_civil_cat', 'region']
# +
# Cambiamos los valores para moneda y
dic_tipo_prod = {42: 'American_express', 44: 'American_express_gold', 0: 'Clasica', 32: 'Master_internacional',
31: 'Master_nacional', 3: 'Naranja', 4: 'Naranja_virtual', 24: 'Naranja_visa_mini_int', 23: 'Naranja_visa_mini_nac',
12: 'Naranja_visa_internac.', 11: 'Naranja_visa_nacional', -1: 'Sin_Datos', 22: 'Visa_internacional', 21: 'Visa_nacional'}
dic_monedas = {0: 'Pesos',1: 'Dolares',3: 'Zeta'}
df_final.tipo_producto_tarjeta_movimiento = df_final.tipo_producto_tarjeta_movimiento.replace(dic_tipo_prod)
df_final.moneda_movimiento = df_final.moneda_movimiento.replace(dic_monedas)
# -
df_final.moneda_movimiento.value_counts()
# + tags=[]
# Ordenamos por fecha
df = df_final.sort_values(by = ['fecha_cupon_movimiento'], ascending = True).copy()
# Transformacion de fecha a numerica
df['fecha_cupon_movimiento'] = df['fecha_cupon_movimiento'].values.astype(float)/10**11
# Transformacion de tipo de moneda a string
df['moneda_movimiento'] = df['moneda_movimiento'].astype(str)
# Codificación
cat_transformer = OneHotEncoder(handle_unknown='ignore')
encoder = ColumnTransformer(
transformers=[
('dni', 'drop', [ordinal_ft]),
('num', 'passthrough', num_features),
# ('date_num', discretizer, [date_num]),
('trans', cat_transformer, trans_ft),
('client', cat_transformer, client_ft),
('target', 'passthrough', [target])])
df_enc = encoder.fit_transform(df)
cols = encoder.get_feature_names()
# + tags=[]
df['moneda_movimiento'].value_counts()
# + tags=[]
cols_o = np.hstack([[ordinal_ft],cols])
df_stack = np.hstack([df[[ordinal_ft]],df_enc])
df_encode = pd.DataFrame(df_stack , columns=cols_o)
# -
# ### Agrupación por Mes
# + tags=[]
# Renombramos
df_encode.rename({'edad_cliente':'client__edad',
'antig_cliente':'client__antig'}, axis=1, inplace=True)
cols = df_encode.columns
# Funciones de agregacion para cada columna
aggr = {}
aggr.update(dict.fromkeys([x for x in cols if 'client__' in x], 'max')) # Incluye a edad y antiguedad
aggr.update(dict.fromkeys([x for x in cols if 'trans__' in x], 'sum'))
aggr.update({target:'sum'})
# + tags=[]
num_features = ['anio_mes_cupon', 'client__edad', 'client__antig', 'fecha_cupon_movimiento']
# Convertimos las columnas categoricas de la transaccion a numericas para poder sumarizarlas
df_encode[num_features + [x for x in cols if 'trans' in x]] = \
df_encode[num_features + [x for x in cols if 'trans' in x]].apply(pd.to_numeric)
# Agrupamiento
group = ['dni_titular_movimiento', 'anio_mes_cupon']
df_mes = df_encode.groupby(group).agg(aggr).reset_index() # edad y antiguedad
# + tags=[]
df_mes.shape
# + tags=[]
df_mes
# -
# ### Guardado
# Decidimos usar el formato 'parquet' para guardar el dataset, por ser mas liviano y rapido que csv
df_mes.to_parquet('df_supervisado_agrupado_test.parquet')
df_mes.columns
df_mes.info()
# FIN
| notebooks/grupo2/Preprocesado_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Analysis with Python and Pandas Tutorial
# # Part 3 - Merging and Joining data
#
# This notebook is partially based on:
#
# https://jakevdp.github.io/PythonDataScienceHandbook/03.07-merge-and-join.html
# ## Tutorial Objectives
#
# In this tutorial, you will learn:
#
# * How to perform merge operations on dataframes, similar to SQL INNER JOINs
# * How to perform merge operations with inner, left, right, and (full) outer algorithms
# * How to identify rows with NaN value(s)
# import the Pandas library
import pandas as pd
# +
# First, let's define some simple employee data
empl_df1 = pd.DataFrame({
'employee': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_date': [2004, 2008, 2012, 2014]
})
empl_df2 = pd.DataFrame({
'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']
})
sal_df = pd.DataFrame({
'name': ['Bob', 'Jake', 'Lisa', 'Sue'],
'salary': [70000, 80000, 120000, 90000]
})
bonus_df = pd.DataFrame({
'name': ['Sue', 'Timmy', 'Lisa', 'Bob'],
'bonus': [500, 550, 1000, 300]
})
boss_df = pd.DataFrame({
'group': ['Accounting', 'Engineering', 'HR'],
'supervisor': ['Carly', 'Guido', 'Steve']}
)
skills_df = pd.DataFrame({
'group': ['Accounting', 'Accounting', 'Engineering', 'Engineering', 'HR', 'HR'],
'skills': ['math', 'spreadsheets', 'coding', 'linux', 'spreadsheets', 'organization']
})
# -
# Review the data frames
empl_df1
empl_df2
# Merge the data frames into a single frame
# Pandas finds the common column automatically
df = pd.merge(empl_df1, empl_df2)
df
# review the bosses dataframe
boss_df
# Merge employees with their group (many to one)
# The common column is "group", which we can explicitly specify
df = pd.merge(df, boss_df, on='group')
df
# review skills data
skills_df
# merge employee/group data with skills data (many to many)
# column to merge on is optional, but specify it anyway
pd.merge(empl_df2, skills_df, on='group')
# review salary data
sal_df
# Merge employee data with salary
# You'll have to identify the columns to merge on yourself
df = pd.merge(df, sal_df, left_on='employee', right_on='name')
df
# drop the duplicated column "name"
df.drop(columns='name', inplace=True)
df
# review end-of-year bonus data
bonus_df
# merge employee data with bonus data (disjoint sets)
# the default merge method is "inner join"
pd.merge(df, bonus_df, left_on='employee', right_on='name')
# left join to include every row on the left
pd.merge(df, bonus_df, left_on='employee', right_on='name', how='left')
# right join to include every row on the right
pd.merge(df, bonus_df, left_on='employee', right_on='name', how='right')
# outer join to include every row
all_df = pd.merge(df, bonus_df, left_on='employee', right_on='name', how='outer')
all_df
all_df.columns
# find rows with NaN
na_rows = all_df.isna().any(axis='columns')
# output those rows
all_df[na_rows]
# ## Exercise
# Go ahead and load the some Vietnam weather data, do basic cleanups of columns. The primary objective of the exercise is to merge the two datasets so that temperature and humidity are combined.
#
# The dataset is available at:
#
# https://1drv.ms/u/s!AgtH78k0_cuvglx5ww3BMV9GpIm1
#
# Discuss your solutions with the person next to you!
| 03 Merging and Joining data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#https://www.dataquest.io/blog/tutorial-time-series-analysis-with-pandas/
import pandas as pd
pd.to_datetime('2018-01-15 3:45pm')
pd.to_datetime('7/8/1952')
pd.to_datetime('7/8/1952', dayfirst=True)
pd.to_datetime(['2018-01-05', '7/8/1952', 'Oct 10, 1995'])
pd.to_datetime(['2/25/10', '8/6/17', '12/15/12'], format='%m/%d/%y')
import pandas as pd
import os
os.chdir('C:\\Users\\Admin\\Desktop\\')
opsd_daily = pd.read_csv('opsd_germany_daily.csv')
opsd_daily.shape
opsd_daily
opsd_daily.head(3)
opsd_daily.tail(3)
opsd_daily.dtypes
opsd_daily['Date']=pd.to_datetime(opsd_daily['Date'])
opsd_daily = opsd_daily.set_index('Date')
opsd_daily.head(3)
opsd_daily.index
#One step process
opsd_daily = pd.read_csv('opsd_germany_daily.csv', index_col=0, parse_dates=True)
opsd_daily
# Add columns with year, month, and weekday name
opsd_daily['Year'] = opsd_daily.index.year
opsd_daily['Month'] = opsd_daily.index.month
opsd_daily['Weekday Name'] = opsd_daily.index.weekday_name
# Display a random sampling of 5 rows
opsd_daily.sample(5, random_state=0)
opsd_daily.loc['2017-08-10']
opsd_daily.loc['2014-01-20':'2014-01-22']
opsd_daily.loc['2012-02']
import matplotlib.pyplot as plt
# Display figures inline in Jupyter notebook
opsd_daily['Consumption'].plot(linewidth=0.5);
cols_plot = ['Consumption', 'Solar', 'Wind']
axes = opsd_daily[cols_plot].plot(marker='.', alpha=0.5, linestyle='None', figsize=(11, 9), subplots=True)
for ax in axes:
ax.set_ylabel('Daily Totals (GWh)')
ax = opsd_daily.loc['2016':'2017', 'Consumption'].plot()
ax.set_ylabel('Daily Consumption (GWh)');
ax = opsd_daily.loc['2017-01':'2017-02', 'Consumption'].plot(marker='o', linestyle='-')
ax.set_ylabel('Daily Consumption (GWh)');
import matplotlib.dates as mdates
fig, ax = plt.subplots()
ax.plot(opsd_daily.loc['2017-01':'2017-02', 'Consumption'], marker='o', linestyle='-')
ax.set_ylabel('Daily Consumption (GWh)')
ax.set_title('Jan-Feb 2017 Electricity Consumption')
# Set x-axis major ticks to weekly interval, on Mondays
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=mdates.MONDAY))
# Format x-tick labels as 3-letter month name and day number
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'));
import seaborn as sns
sns.boxplot(data=opsd_daily, x='Weekday Name', y='Consumption');
# Specify the data columns we want to include (i.e. exclude Year, Month, Weekday Name)
data_columns = ['Consumption', 'Wind', 'Solar', 'Wind+Solar']
# Resample to weekly frequency, aggregating with mean
opsd_weekly_mean = opsd_daily[data_columns].resample('W').mean()
opsd_weekly_mean.head(3)
# Start and end of the date range to extract
start, end = '2016-01', '2017-06'
# Plot daily and weekly resampled time series together
fig, ax = plt.subplots()
ax.plot(opsd_daily.loc[start:end, 'Solar'],
marker='.', linestyle='-', linewidth=0.5, label='Daily')
ax.plot(opsd_weekly_mean.loc[start:end, 'Solar'],
marker='o', markersize=8, linestyle='-', label='Weekly Mean Resample')
ax.set_ylabel('Solar Production (GWh)')
ax.legend();
| Basics/MLBasics/TimeSeries-Plots.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C#
// language: csharp
// name: csharp
// ---
// 
// ## Welcome to The QuantConnect Research Page
// #### Refer to this page for documentation https://www.quantconnect.com/docs#Introduction-to-Jupyter
// #### Contribute to this template file https://github.com/QuantConnect/Lean/blob/master/Jupyter/BasicCSharpQuantBookTemplate.ipynb
// ## QuantBook Basics
//
// ### Start QuantBook
// - Load "QuantConnect.csx" with all the basic imports
// - Create a QuantBook instance
// +
#load "QuantConnect.csx"
var qb = new QuantBook();
// Selecting asset data
var spy = qb.AddEquity("SPY");
var eur = qb.AddForex("EURUSD");
var btc = qb.AddCrypto("BTCUSD");
var fxv = qb.AddData<FxcmVolume>("EURUSD_Vol", Resolution.Hour);
// -
// ### Historical Data Requests
//
// We can use the QuantConnect API to make Historical Data Requests. The data will be presented as multi-index pandas.DataFrame where the first index is the Symbol.
//
// For more information, please follow the [link](https://www.quantconnect.com/docs#Historical-Data-Historical-Data-Requests).
// Gets historical data from the subscribed assets, the last 360 datapoints with daily resolution
var h1 = qb.History(qb.Securities.Keys, 360, Resolution.Daily);
Console.WriteLine(string.Join(",", h1.SelectMany(slice => slice.Keys).Distinct()))
| Jupyter/BasicCSharpQuantBookTemplate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Gekoppelte Netze berechnen
# -
# Sie haben bereits gelernt, wie man Netze in pandapower und pandapipes aufsetzen und berechnen kann. Wesentliches Merkmal der Simulationsumgebung ist zudem die Möglichkeit, Netze miteinander zu koppeln, um analysieren zu können, wie sich der Zustand des einen auf den Zustand des anderen auswirkt. Um eine solche Berechnung durchzuführen, sind die folgenden Dinge vonnöten:
#
# - Die zwei zu verbindenden Netze
# - Das Hinzufügen von Kopplungselementen zwischen den Netzen. Dabei kann es sich bspw. um eine P2G-Anlage handeln. Kopplungselemente werden als Controller modelliert, da diese auch zur Regelung bestimmter Größen eingesetzt werden können. Controller existieren aber nicht nur zur Kopplung der Netze untereinander. Wir werden auch Fälle kennenlernen, in denen diese nur in einem Netz definiert sind.
# - Üblicherweise werden gekoppelte Netze immer über einen längeren Zeitraum beobachtet. Deswegen sehen wir uns auch an, wie man eine Zeitreihensimulation durchführen kann.
#
# In diesem Tutorial wird eine P2G-Anlage und eine G2P-Einheit genutzt, um ein Strom- mit einem Gasnetz zu verbinden. Eingabewerte für diese Anlagen werden zu Beginn der Simulation festgelegt. Während der Simulation werden Ausgabegrößen anhand von Effizienzfaktoren berechnet.
#
# Die Kopplung der Netze untereinenader erfolgt zunächst in drei Schritten. Eine Zeitreihenbetrachtung ist zunächst nicht enthalten, wird aufbauend auf der Kopplung aber hinzugefügt:
#
# 1. Erzeugen eines Containers zur Aufnahme der zu koppelnden Netze
# 2. Hinzufügen der Controller für die Kopplungselemente
# 3. Durchführen der Berechnung
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Erzeugen eines "Multi-Nets"
#
# Im Gegensatz zu den bereits erstellten Netzwerken, machen wir uns diesmal nicht die Arbeit, Netze in der Konsole zu generieren. Stattdessen laden wir bereits vorhandene Netze einfach und definieren das Fluid.
#
# + pycharm={"name": "#%%\n"}
from pandapower import networks as e_nw
net_power = e_nw.example_simple()
import pandapipes as ppipes
from pandapipes import networks as g_nw
net_gas = g_nw.gas_meshed_square()
# some adjustments:
net_gas.junction.pn_bar = net_gas.ext_grid.p_bar = 30
net_gas.pipe.diameter_m = 0.4
# set fluid:
ppipes.create_fluid_from_lib(net_gas, 'hydrogen', overwrite=True)
# + [markdown] pycharm={"name": "#%% md\n"}
#
# Anschließend wird der "Multinet"-Container erstellt. Er nimmt die zu verbindenden Netzwerke im Rahmen einer gekoppelten Anaylse auf. Jedes Netz muss einen eigenen Namen zugewiesen bekommen. Standardnamen sind "power" und "gas", aber es kann jeder beliebige Name gewählt werden. Die Zahl der Netze ist nicht begrenzt.
# + pycharm={"name": "#%%\n"}
from pandapipes.multinet.create_multinet import create_empty_multinet, add_net_to_multinet
multinet = create_empty_multinet('tutorial_multinet')
add_net_to_multinet(multinet, net_power, 'power')
add_net_to_multinet(multinet, net_gas, 'gas')
# + [markdown] pycharm={"name": "#%% md\n"}
# Die einzelnen Netzwerke können über den Variablennamen oder den Multinet-Container angesprochen werden:
# + pycharm={"name": "#%%\n"}
print(multinet.nets['power'])
print(multinet.nets['gas'])
# + pycharm={"name": "#%%\n"}
print(net_power)
print(net_gas)
# + pycharm={"name": "#%%\n"}
print(net_power is multinet.nets['power'])
print(net_gas is multinet.nets['gas'])
# + [markdown] pycharm={"name": "#%% md\n"}
# Folglich werden Änderungen in den jeweils vom "Multinet" losgelösten Netzen auf die im "Multinet" übertragen.
# -
# ## Kopplungspunkte hinzufügen
#
# Jetzt werden Elemente für die P2G und G2P-Controller hinzugefügt. Jeder Controller ist mit mindestens einem Element eines Netzwerks verbunden, von welchem er Elemente entnimmt oder dorthin überträgt. Im Falle von Kopplungspunkten für Multienergienetze gibt es zwei Verbindungen: Eine Verbindung zu einem Element des Gasnetzes und eine Verbindung zu einem Element des Stromnetzes.
#
# Im Folgenden werden zunächst die Elemente erzeugt, mit denen die Controller verbunden werden:
# + pycharm={"name": "#%%\n"}
import pandapower as ppower
import pandapipes as ppipes
p2g_id_el = ppower.create_load(net_power, bus=3, p_mw=2, name="power to gas consumption")
p2g_id_gas = ppipes.create_source(net_gas, junction=1, mdot_kg_per_s=0, name="power to gas feed in")
g2p_id_gas = ppipes.create_sink(net_gas, junction=1, mdot_kg_per_s=0.1, name="gas to power consumption")
g2p_id_el = ppower.create_sgen(net_power, bus=5, p_mw=0, name="fuel cell feed in")
# -
# Jetzt werden die eigentlichen Controller erzeugt und initialisiert. Die Netzelemente, die mit den Controllern verbunden sind, werden als Parameter übergeben. Der Controller agiert damit als Kopplungspunkt zwischen den Netzen.
#
# + pycharm={"name": "#%%\n"}
from pandapipes.multinet.control.controller.multinet_control import P2GControlMultiEnergy, \
G2PControlMultiEnergy
p2g_ctrl = P2GControlMultiEnergy(multinet, p2g_id_el, p2g_id_gas, efficiency=0.7,
name_power_net="power", name_gas_net="gas")
g2p_ctrl = G2PControlMultiEnergy(multinet, g2p_id_el, g2p_id_gas, efficiency=0.65,
name_power_net="power", name_gas_net="gas")
# + [markdown] pycharm={"name": "#%% md\n"}
# Intern arbeiten die Controller mit einem importierten Brennwert. Dieser stammt aus den Fluideigenschaften des Netzes
#
# *pandapipes/properties/[fluid_name]/higher_heating_value.txt*)
#
# Controller können auf vielfältige Weise eingesetzt werden. Alle Aspekte kann dieses Tutorial nicht abdecken. Weitere Infos finden Sie aber unter:https://pandapower.readthedocs.io/en/latest/control/control_loop.html
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Simulation
#
# Jetzt, wo die Netze und die Controller erstellt worden sind, kann die Berechnung gestartet werden. Es ist bekannt, dass die Berechnung von pandapower und pandapipes-Netzen mit den Kommandos `runpp` bzw. `pipeflow` gestartet wird. Werden gekoppelte Netze berechnet, so wird stattdessen der Befehl `run_control` eingesetzt, der intern die Berechnung der Teilnetze startet, aber auch dafür sorgt, dass die Controller aufgerufen werden.
# + pycharm={"name": "#%%\n"}
from pandapipes.multinet.control.run_control_multinet import run_control
run_control(multinet)
# + [markdown] pycharm={"name": "#%% md\n"}
# Nach der Berechnung wurden die Ausgabewerte aktualisiert und entsprechen der Eingangsleistung multipliziert mit dem Effizienzfaktor.
#
# + pycharm={"name": "#%%\n"}
print(net_gas.source.loc[p2g_id_gas, 'mdot_kg_per_s'])
print(net_power.sgen.loc[g2p_id_el, 'p_mw'])
# + [markdown] pycharm={"name": "#%% md\n"}
# Zusammengefasst:
# + pycharm={"name": "#%%\n"}
import pandapipes as ppipes
import pandapower as ppower
from pandapipes import networks as g_nw
from pandapower import networks as e_nw
from pandapipes.multinet.create_multinet import create_empty_multinet, add_net_to_multinet
from pandapipes.multinet.control.controller.multinet_control import P2GControlMultiEnergy, G2PControlMultiEnergy
from pandapipes.multinet.control.run_control_multinet import run_control
# get networks:
net_power = e_nw.example_simple()
net_gas = g_nw.gas_meshed_square()
# some adjustments:
net_gas.junction.pn_bar = net_gas.ext_grid.p_bar = 30
net_gas.pipe.diameter_m = 0.4
net_gas.controller.rename(columns={'controller': 'object'}, inplace=True) # due to new version
# set fluid:
fluid = {'name':'hydrogen', 'cal_value':38.4}
ppipes.create_fluid_from_lib(net_gas, fluid['name'], overwrite=True)
# create multinet and add networks:
multinet = create_empty_multinet('tutorial_multinet')
add_net_to_multinet(multinet, net_power, 'power')
add_net_to_multinet(multinet, net_gas, 'gas')
# create elements corresponding to conversion units:
p2g_id_el = ppower.create_load(net_power, bus=3, p_mw=2, name="power to gas consumption")
p2g_id_gas = ppipes.create_source(net_gas, junction=1, mdot_kg_per_s=0, name="power to gas feed in")
g2p_id_gas = ppipes.create_sink(net_gas, junction=1, mdot_kg_per_s=0.1, name="gas to power consumption")
g2p_id_el = ppower.create_sgen(net_power, bus=5, p_mw=0, name="fuel cell feed in")
# create coupling controllers:
p2g_ctrl = P2GControlMultiEnergy(multinet, p2g_id_el, p2g_id_gas, efficiency=0.7,
name_power_net="power", name_gas_net="gas")
g2p_ctrl = G2PControlMultiEnergy(multinet, g2p_id_el, g2p_id_gas, efficiency=0.65,
name_power_net="power", name_gas_net="gas")
# run simulation:
run_control(multinet)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Durchführung einer zeitabhängigen Simulation
#
# In der Regel möchte man die Zustände des Systems für den Fall ermitteln, dass Eingabedaten mit der Zeit variieren. Dies kann dann der Fall sein, wenn z. B. Lasten ein zeitlich nicht konstantes Profil aufweisen. Die Controller, die wir im vorigen Abschnitt eingeführt haben, bilden selbst kein zeitabhängiges Verhalten ab. Sie können aber mit einem sogenennaten ConstController kombiniert werden, welche Zeitreihen einlesen und in jedem Zeitschritt einen anderen Wert zur Verfügung stellen kann. Es gibt Funktionen, welche die kombinierten Controller direkt erzeugen können. Die Namen dieser Funktionen sind `coupled_p2g_const_control` und `coupled_g2p_const_control`.
#
# + [markdown] pycharm={"name": "#%% md\n"}
# Das Beispiel des letzten Abschnitts wird jetzt um eine zeitabhängige Simulation erweitert. Der folgende Block richtet die Netze wieder ein. Noch fehlen allerdings die Controller.
# + pycharm={"name": "#%%\n"}
# prepare just like before
net_power = e_nw.example_simple()
net_gas = g_nw.gas_meshed_square()
net_gas.junction.pn_bar = net_gas.ext_grid.p_bar = 30
net_gas.pipe.diameter_m = 0.4
net_gas.controller.rename(columns={'controller': 'object'}, inplace=True) # due to new version
fluid = {'name':'hydrogen', 'cal_value':38.4}
ppipes.create_fluid_from_lib(net_gas, fluid['name'], overwrite=True)
multinet = create_empty_multinet('tutorial_multinet')
add_net_to_multinet(multinet, net_power, 'power_net')
add_net_to_multinet(multinet, net_gas, 'gas_net')
p2g_id_el = ppower.create_load(net_power, bus=3, p_mw=2, name="power to gas consumption")
p2g_id_gas = ppipes.create_source(net_gas, junction=1, mdot_kg_per_s=0, name="power to gas feed in")
g2p_id_gas = ppipes.create_sink(net_gas, junction=1, mdot_kg_per_s=0.1, name="gas to power consumption")
g2p_id_el = ppower.create_sgen(net_power, bus=5, p_mw=0, name="fuel cell feed in")
# + [markdown] pycharm={"name": "#%% md\n"}
# Der folgende Block erstellt eine Funktion, die Zufallsdaten für die Zeitreihen erzeugt. Insgesamt werden 10 Zeitschritte berechnet, wie am Parameter der Funktion zu erkennen ist. Der mit Zufallszahlen gefüllte pandas DataFrame wird am Ende der Funktion als Attribut eines Objekts der DFData-Klasse gespeichert. Diese wird von pandapower definiert und dient dem einfacheren Zugriff auf die im Frame gespeichertern Daten. Alle Controller können mit dieser Datenstruktur umgehen.
# + pycharm={"name": "#%%\n"}
from pandas import DataFrame
from numpy.random import random
from pandapower.timeseries import DFData
def create_data_source(n_timesteps=10):
profiles = DataFrame()
profiles['power to gas consumption'] = random(n_timesteps) * 2 + 1
profiles['gas to power consumption'] = random(n_timesteps) * 0.1
ds = DFData(profiles)
return profiles, ds
profiles, ds = create_data_source(10)
# + [markdown] pycharm={"name": "#%% md\n"}
# Im Rahmen von zeitabhängigen Simulationen fallen größere Ergebnismengen an. Für jeden Zeitschritt kann der gesamte Zustand des Netzes gespeichert und anschließend ausgewertet werden. Für die Ergebnisse zeitabhängiger Simulationen wird eine weitere Datenstruktur bereitgestellt: Der OutputWriter. Auch bei diesem handelt es sich um eine Klasse.
#
# Die folgende Funktion legt für jedes Teilnetz einen eigenen OutputWriter an und speichert diese in einem Python-dictionary. Für jedes Netz wird eine Liste auszugebener Größen, die `log_variables`, erstellt. Es können Spalten verschiedener Ergebnistabellen kombiniert werden. Die erstellten Listen werden anschließend im OutputWriter gespeichert.
# + pycharm={"name": "#%%\n"}
from os.path import join, dirname
from pandapower.timeseries import OutputWriter
def create_output_writers(multinet, time_steps=None):
nets = multinet["nets"]
ows = dict()
for key_net in nets.keys():
ows[key_net] = {}
if isinstance(nets[key_net], ppower.pandapowerNet):
log_variables = [('res_bus', 'vm_pu'),
('res_line', 'loading_percent'),
('res_line', 'i_ka'),
('res_bus', 'p_mw'),
('res_bus', 'q_mvar'),
('res_load', 'p_mw'),
('res_load', 'q_mvar')]
ow = OutputWriter(nets[key_net], time_steps=time_steps,
log_variables=log_variables,
output_path=join(dirname('__file__'),'timeseries', 'results', 'power'),
output_file_type=".csv")
ows[key_net] = ow
elif isinstance(nets[key_net], ppipes.pandapipesNet):
log_variables = [('res_sink', 'mdot_kg_per_s'),
('res_source', 'mdot_kg_per_s'),
('res_ext_grid', 'mdot_kg_per_s'),
('res_pipe', 'v_mean_m_per_s'),
('res_junction', 'p_bar'),
('res_junction', 't_k')]
ow = OutputWriter(nets[key_net], time_steps=time_steps,
log_variables=log_variables,
output_path=join(dirname('__file__'), 'timeseries', 'results', 'gas'),
output_file_type=".csv")
ows[key_net] = ow
else:
raise AttributeError("Could not create an output writer for nets of kind " + str(key_net))
return ows
ows = create_output_writers(multinet, 10)
# + [markdown] pycharm={"name": "#%% md\n"}
# Jetzt werden die bereits erwähnten Controller hinzugefügt. Es ist zu beachten, dass die data_source, welche die Zeitreihen beschreibt, als Parameter mit übergeben wird. So weiß der jeweilige Controller, woher er die Eingangsdaten des aktuellen Zeitschritts nehmen soll.
# + pycharm={"name": "#%%\n"}
from pandapipes.multinet.control.controller.multinet_control import coupled_p2g_const_control, \
coupled_g2p_const_control
coupled_p2g_const_control(multinet, p2g_id_el, p2g_id_gas,
name_power_net="power_net", name_gas_net="gas_net",
profile_name='power to gas consumption', data_source=ds,
p2g_efficiency=0.7)
coupled_g2p_const_control(multinet, g2p_id_el, g2p_id_gas,
name_power_net="power_net", name_gas_net="gas_net",
element_type_power="sgen",
profile_name='gas to power consumption', data_source=ds,
g2p_efficiency=0.65)
# + [markdown] pycharm={"name": "#%% md\n"}
# Die ConstControllers werden in den Teilnetzen gespeichert. Die Kopplungscontroller, welche die Verbindung zwischen den Netzen herstellen, befinden sich dagegen im multinet.
#
# + pycharm={"name": "#%%\n"}
print(multinet.controller)
print(net_power.controller)
print(net_gas.controller)
# + [markdown] pycharm={"name": "#%% md\n"}
# Die Simulation wird mit dem Befehl `run_timeseries` gestartet. Zu beachten sind die Parameter der `run_timeseries`-Funktion. Sowohl die Zeitschrittweite, als auch die erstellte OutputWriter-Struktur wird der Funktion mit übergeben. Nach der Simulation kann auf die Outputwriter zugegriffen werden, um die gewünschten Größen zu extrahieren. Übriges: Innerhalb der `run_timeseries`-Funktion ruft pandapipes wieder die bereits bekannte `run_control`-Funktion auf. Im Wesentlichen wird nur eine Schleife um letztere Funktion gelegt, um die Berechnung für die angegebene Zahl von Zeitschritten zu wiederholen.
# + pycharm={"name": "#%%\n"}
from pandapipes.multinet.timeseries.run_time_series_multinet import run_timeseries
run_timeseries(multinet, time_steps=range(10), output_writers=ows)
| tutorials/multienergienetze.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Object Oriented Programming - Decorators and Multiple Inheritance
# ## Decorators
# In mathematics higher order functions are ones which take function/s as arguments and return a function as a result. Such capabilites are implemented in Python by using decorators.
#
# Of course, you can implement a similar functionality using combination of `def` and `lambda`. However, it is generally unsafe practice to use `lambda` methods. In fact, the creator of Python language, <NAME> suggested its removal but the whole community of programmers was too used to it, protested against its removal and hence it remained.
#
# A decorator is a function which takes as input another function and extends its behaviour/capability without making any explicit changes to it.
#
# First of all you need to understand the idea of `first class objects`.
#
# A first class object is an language entity that can be treated as a native variable. That means it can be created, destroyed, passed as an argument to a function, printed as you wish, etc.
# +
def func2(func1):
return func1 + 1
def func3(func2, arg):# Here func2 is being passed as a parameter.
return func2(arg)
print(func2(2))
print(func3(func2, 3))
# +
def user_defined_decorator(function1):
def wrapper():
print("This statement is being printed before the passed function is called.")
function1()
print("This statement is being printed after the passed function is called.")
return wrapper
@us
def task():
print("Lite")
user_defined_decorator(task)()
# -
# ### Common decorators
# `@staticmethod` acts as a wrapper and informs the interpreter that the method is one which does not depend on the class or the object. It is just a method which is logical to include in the class body.
#
# `@classmethod` acts as a wrapper and informs the interpreter that the method is one which depends on the class. This can be clearly understood cause the first argument is interpreted as the class type. It is a method that is commonly shared by all objects of the class type.
# +
class BITSian():
def __init__(self, name, bitsian=True):
self.name = name
self.bitsian = bitsian
@staticmethod
def is_object():
return True
def is_human(cls):
print(cls)
return True
def get_name(self):
print(self)
return self.name
def is_bitsian(self):
return str(self.name + " is a BITSian : " + str(self.bitsian))
p = BITSian("<NAME>")
print(p.get_name())
print(p.is_bitsian())
# If @staticmethod wasn't there, then this would result in an error cause arguments don't match.
print(p.is_object())
# If @classmethod wasn't there, then this "p" would be interpreted as "object" type and not "class" type.
print(classmethod(p.is_human)())
# +
class BITSian():
def __init__(self, name, bitsian=True):
self.k = name
self.bitsian = bitsian
@staticmethod
def is_object():
return True
@classmethod
def is_human(cls):
return True
@property
def name(self):
print("TEST getter")
return self.k
@name.setter
def name(self, name):
print("TEST setter")
self.k = name
def is_bitsian(self):
return str(self.name + " is a BITSian : " + str(self.bitsian))
def __str__(self):
return "BITSian : " + self.name
p = BITSian("Keerthana")
print(p)
p.name ="<NAME>"
print(p)
# -
# `@slow, @XFAIL`, etc are decorators used in unit testing(i.e. pytest). They will make sense only when unit testing is taught.
# ## Inheriting from Multiple Classes
# +
class A():
def save(self):
print("Save in A")
class B():
def save(self):
print("Save in B")
class C(A,B):
def __init__(self):
pass
a = A()
c = C()
b = B()
# -
# ## The Diamond Problem
#
# Consider a situation where there is one parentclass A and then two more subclasses B and C. Then consider a further subclass D inheriting from B and C both. If there be a method defined in A which is inherited in B and C and then overidden, which one will D use ?
# +
class A:
def test(self):
print("Test of A called")
class B(A):
def test(self):
print("Test of B called")
class C(A):
def test(self):
print("Test of C called")
class D(C, B):
pass
print(D.mro())
#d = D()
#d.test()
D.mro()[2].test(d)# This is a terrible thing to write in development level code!! Think about re-implementation.
# -
# If you write properly structured code then you should never run into the diamond paradox. If there are workarounds allowing you to override the Method Resolution Order and access the superclass B method then don't do it. Not advisable at all.
#
# Instead think about how to restructure your code.
| Week 5/Lecture_11_Decorators_Multiple_Inheritance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: _python
# language: python
# name: _python
# ---
# +
import numpy as np
import pandas as pd
from loaders.cognicity_loader import CognicityLoader
import chennai_config
config = chennai_config.config
loader = CognicityLoader(config)
df = loader.get_flood_depth()
a = loader.config["flood_pkeys"]
b = loader.config["no_flood_pkeys"]
all_keys = a.union(b)
diff = pd.Index(all_keys).difference(df.index)
blah = pd.DataFrame(data=np.array([0]*diff.size), index=diff, columns=["flood_depth"]).rename_axis(index="pkey")
blah = blah.append(df)
blah.index
# +
import pandas as pd
start_known_flood = "'2017-11-01 00:00:35.630000-04:00'"
end_known_flood = "'2017-11-07 00:00:35.630000-04:00'"
chennai_all_data = pd.read_sql_query('''
SELECT pkey, created_at, text, disaster_type, report_data, tags FROM riskmap.all_reports
''', params={"start_date": start_known_flood, "end_date": end_known_flood}, con=defaultConfig["database_engine"], index_col="pkey")
pd.options.display.max_rows = None
pd.options.display.max_colwidth = 1000
#from IPython.display import display
# chennai_all_data
# -
set(chennai_all_data.index)
import numpy as np
res = np.zeros((10, 1))
res[0] = 1
res
def get_data_bin_by_minute(start_date, end_date, interval="'900 minute'"):
""" Gets data from sql database between start_date and end_date
Args:
start_date (str): the start date and time as a ISO8601 string
end_date (str): the end date and time as an ISO8601 string
interval (str): a postgresql interval string
Returns:
Pandas dataframe, with the index being a date and the 'count' column
saying how many flood reports were received on that interval
Zero values are included for intervals that do not have any reports
"""
date_trunc_to = "minute"
num_reports_with_zeros = pd.read_sql_query('''
SELECT date, COALESCE(count, NULL, 0) as count FROM
(SELECT date_trunc(%(date_trunc_to)s, offs) as date FROM
generate_series(
%(start_date)s::timestamptz,
%(end_date)s::timestamptz,
%(interval)s::interval
) as offs ORDER BY date ASC) empty_hours
LEFT JOIN
(select date_trunc(%(date_trunc_to)s, created_at), count(pkey)
from riskmap.all_reports
WHERE text NOT SIMILAR To '%%(T|t)(E|e)(S|s)(T|t)%%'
GROUP BY date_trunc(%(date_trunc_to)s, created_at)
) no_test
ON date = date_trunc
''', params={"start_date":start_date, "end_date":end_date, "interval":interval, "date_trunc_to":date_trunc_to}, con=engine, index_col="date", parse_dates={"date":{"utc":True}})
return num_reports_with_zeros
df = get_data_bin_by_minute(start_known_flood, end_known_flood)
df
mock = pd.DataFrame({"text": ["sentence test"]},index=[125]).rename_axis("pkey")
mock.iloc[0]["text"]
import torch
t = torch.tensor(4)
l = []
l.append(t)
l.append(32)
l.append(3)
np.array(l)
l.pop(0)
l.append(534)
l
l[-1] - l[0]
| Chennai_scratch_pad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:eu-west-1:470317259841:image/datascience-1.0
# ---
# Now we use a CNN on the Mnist dataset
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
inputs = keras.Input(shape=(28, 28, 1))
x = layers.Conv2D(filters=32, kernel_size=3, activation="relu")(inputs)
x = layers.MaxPooling2D(pool_size=2)(x)
x = layers.Conv2D(filters=64, kernel_size=3, activation="relu")(x)
x = layers.MaxPooling2D(pool_size=2)(x)
x = layers.Conv2D(filters=128, kernel_size=3, activation="relu")(x)
x = layers.Flatten()(x)
outputs = layers.Dense(10, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.summary()
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1))
train_images = train_images.astype("float32") / 255
test_images = test_images.reshape((10000, 28, 28, 1))
test_images = test_images.astype("float32") / 255
model.compile(optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
model.fit(train_images, train_labels, epochs=5, batch_size=64)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(f"Test accuracy: {test_acc:.3f}")
| A_cnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Demonstration of System Identification using a Convolutional Layer
#
# This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Data-Driven Signal Processing. Please direct questions and suggestions to [<EMAIL>](mailto:<EMAIL>).
# System identification for a linear-time invariant (LTI) system $y[k] = \mathcal{H} \{ x[k] \} = x[k] \ast h[k]$ refers to estimating the impulse response (or transfer function) of the system from known input $x[k]$ and output $y[k]$ signals. In this example, a convolutional layer with one channel serves as a model for an LTI system. The parameters of the model are estimated from example input and output signals.
# +
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Input, Conv1D
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping
# -
# ### Generate Examples
#
# A set of examples is computed by convolving a normal distributed random signal with the impulse response of the system.
# +
N = 1000
F = 256
X = np.random.normal(size=(N,F))
h = [1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 0, 0, 0, 0, 0, 0, 0, 0]
Y = [np.convolve(xi, h, mode='valid') for xi in X]
Y = np.array(Y)
# -
# ### Definition and Training of the Model
#
# A model with one one-dimensional convolutional layer is defined. The number of channels is chosen as $C=1$, the kernel size is chosen accordingly to the length of the true impulse response of the system and a bias is excluded from the model. The mean-squared error (MSE) is used as loss function.
# +
# set up the model
inputs = Input(shape=(X.shape[1],1))
z = Conv1D(1, 16, use_bias=False)(inputs)
model = Model(inputs, z)
# compile the model
model.compile(loss="mse", optimizer="adam")
model.summary()
# -
# Now the model is trained for 100 epochs
history = model.fit(X, Y, epochs=100, batch_size=32)
# ### Evaluation of the Model
#
# The trained model is evaluated by inspecting the evolution of the loss over the epochs.
plt.figure()
plt.plot(history.history['loss'])
plt.ylabel('loss')
plt.xlabel('epoch')
plt.grid()
# Now the estimated impulse response $\hat{h}[k]$ is derived from the model by extracting the coefficients from the convolutional layer and reversing their order. The latter is necessary since convolutional layers essential perform a correlation. A comparison of the true impulse response (line plot) with the estimated coefficients reveals that the estimate impulse response is accurately estimated.
# +
hp = model.layers[1].weights[0].numpy()[::-1]
plt.stem(np.squeeze(hp), label=r'estimated $\hat{h}[k]$')
plt.plot(h, label=r'true $h[k]$', color='C1')
plt.xlabel(r'$k$')
plt.legend()
plt.grid()
# -
# **Copyright**
#
# This notebook is provided as [Open Educational Resource](https://de.wikipedia.org/wiki/Open_Educational_Resources).
# The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/)
# , the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: <NAME>, Data driven audio signal processing - Lecture supplementals.
| convolutional_neural_networks/system_identification_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import requests
import json
from io import BytesIO
from PIL import Image
# +
# modified from https://github.com/longquanwu/huabei
#雪场照片
__api_snow_pack_list = 'https://api.fenxuekeji.com/api/pw/photo_walls'
#雪场某天照片墙
__api_snow_pack_photo_walls_by_day = 'https://api.fenxuekeji.com/api/pw/photo_walls/%s/daily'
#雪场推荐照片墙
# __api_snow_pack_recommend_photo_walls = 'https://api.fenxuekeji.com/api/pw/photo_walls/%s/recommend_list'
#雪场每日列表
# __api_snow_pack_day_list = 'https://api.fenxuekeji.com/api/pw/photo_walls/%s/dailies'
page = 1
# -
# 雪场列表
# params1 = {'able_type': 'ski_ranch', 'lat': 39.97696126302083, 'lng': 116.4195960828993, 'page': page}
params1 = {'able_type': 'ski_ranch', 'lat': 40.96534722000000, 'lng': 115.3931194000000, 'page': page}
data1 = requests.get(__api_snow_pack_list, params1)
ski_ranch_list = data1.json()['data']['photo_walls']
# 确认雪场
ski_ranch_list[0]['name']
# 保存ID
uuid = ski_ranch_list[0]['uuid']
# +
# 输入时间
date_string = '2019-03-31'
# 参数
time_type = {
'a1': '10点前',
'n1': '10~12点',
'n2': '12~14点',
'p1': '14点后'
}
params2 = {'apn': 'n1', 'datestring': date_string, 'device': 'pc', 'page': page}
data2 = requests.get(__api_snow_pack_photo_walls_by_day %uuid, params2)
meta_data = data2.json()['data']
photos = meta_data['photos']
# -
img_url = photos[0]['image']['x1000']
image = Image.open(BytesIO(requests.get(img_url).content))
image
print (img_url)
| from_fenxuekeji/01.Test_API_get_img.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 朴素贝叶斯
import numpy as np
import pandas as pd
from scipy import stats
# ## 正确地读取数据
#
# 注意原始数据文件的格式,对其进行正确地处理后读入两个 DataFrame:`adult_data_df` 是训练集, `adult_test_df` 是测试集。DataFrame 中名为“50K”的列为标签(即分类)。
#
# 读取数据的方法与上个实验(决策树算法)完全相同。
# +
col_names = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', '50K']
adult_data_df = pd.read_csv('dataset/adult.data', index_col=False, header=None, names=col_names, sep=', ', engine='python')
adult_test_df = pd.read_csv('dataset/adult.test', skiprows=[0], index_col=False, header=None, names=col_names, sep=', ', engine='python')
adult_test_df['50K'] = adult_test_df['50K'].map(lambda x: x[:-1]) # 去除行末的点
# -
# ## 补充缺失值
#
# 通过对数据的基本观察得知,缺失值所在的列均为离散属性,因此只需要对离散缺失值进行补全即可,本例数据集上无需考虑连续型数据的补全。我采用的方法是使用该列出现次数最多的值(即众数)代替缺失值。
#
# 补充缺失值的方法与上个实验(决策树算法)完全相同。
# +
# 补充缺失值,
print('[adult.data]')
mode_df = adult_data_df.mode() # 众数
for col in adult_data_df:
if '?' in adult_data_df[col].tolist():
missing_count = adult_data_df[col].value_counts()['?'] # 缺失值的个数
adult_data_df[col] = adult_data_df[col].replace('?', mode_df[col][0])
print('{}: {} missing values are replaced with "{}"'.format(col, missing_count, mode_df[col][0]))
print('-------------------------------')
print('[adult.test]')
mode_df = adult_test_df.mode() # 众数
for col in adult_test_df:
if '?' in adult_test_df[col].tolist():
missing_count = adult_test_df[col].value_counts()['?'] # 缺失值的个数
adult_test_df[col] = adult_test_df[col].replace('?', mode_df[col][0])
print('{}: {} missing values are replaced with "{}"'.format(col, missing_count, mode_df[col][0]))
# -
# ## 预测和测试
#
# 对于测试集中的每个样本,使用朴素贝叶斯方法进行预测,然后与标签比对,并统计准确率。
# +
# 连续型属性
continuous_attrs = {'age', 'fnlwgt', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week'}
# 计算概率
def probability(df, attr, value):
"""
计算数据集中某属性为某值的概率。
Params:
df: 数据集。
attr_: 属性名。
value: 属性值。
Return:
对于离散型属性,返回给定属性中值等于给定值的比例;
对于连续型属性,返回对应高斯分布的概率密度函数值。
"""
attr_series = df[attr]
if attr in continuous_attrs: # 连续型属性
mean = attr_series.mean() # 期望
var = attr_series.var() # 方差
return stats.norm.pdf(value, loc=mean, scale=np.sqrt(var)) # 高斯分布的概率密度
else: # 离散型属性
return list(attr_series).count(value) / len(df)
# -
def predict(sample):
"""
对一个样本进行预测。
Params:
sample: 待测样本。
Returns:
预测分类结果。
"""
class_list = ['<=50K', '>50K'] # 所有类别
max_prob = 0
max_class = ''
# 遍历所有可能的分类(本例中只有两种分类)
for class_ in class_list:
class_df = adult_data_df[adult_data_df['50K']==class_] # 按类划分数据集
prob = adult_data_df['50K'].value_counts().get('<=50K', 0) / len(adult_data_df) # 初始化为类的先验概率
for attr in sample.index:
if attr == '50K': # 标签列不是属性,要跳过
continue
prob *= probability(class_df, attr, sample[attr]) # 累乘每个属性在数据集中出现的概率
if prob >= max_prob:
max_prob = prob
max_class = class_
return max_class # 返回概率最大的类作为预测结果
correct_count = 0
for i in range(len(adult_test_df)):
sample = adult_test_df.iloc[i]
if predict(sample) == sample['50K']:
correct_count += 1
print('准确率:{:.3%}'.format(correct_count / len(adult_test_df)))
| E12_NB/src/NaiveBayes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# __Method__
#
# Soccer is played by 11 players on each side, therefore, the best method for capturing the pattern of teams from optical tracking data seems to use players’ individual attributes. Given the data for every player, we initialize the players to some canonical order and the order remains fixed throughout the match. For $N = 22$ players on the pitch, the data representation at a frame t can be set as $X_t = [x1, y1, x2, y2, x3, y3, ..., x22, y22]^T$. Directly using players’ individual jersey numbers for the analysis seems to be a good approach. However, it lacks important contextual concepts such as when a substitution occurs, players receive medical treatment outside of the pitch, a player is sent away, or when analyzing different teams (i.e., different jersey numbers or identities). To deal with all these problems and players’ positional interchanges, a role based analysis is conducted to make dynamic representation possible while initializing the players into the order that is based on their roles. This approach allows each player to be assigned to different roles, only role per frame while changing their positions throughout the match. After grouping players to particular roles, we use players’ $(x, y)$ coordinates and speed attributes to generate features, such as average coordinates and the fastest player attributes in the group. Soccer has its own well-established game strategies such as the tendency of teams to keep their formation structure when opposition team owns the ball. To carry out a role assignment, a cumulative average position of a team is calculated at the frames when the rival team has the ball. A cumulative average position is calculated for the window of the previous 15 minutes and updated at each minute. For each frame, player’s average $(x, y)$ positions is scaled based on the dispersion of the players’ distribution.
#
# __How we assign role to each player?__
# 1. Define which player's in the game.
# 2. Generate players' average positions based on their position in last 15 minutes.
# 3. Each minute update their average position.
# 4. Scale average positions to the range $\left [\delta^{1} + \frac{1}{N} \sum_{i}^{N} \left ( p_i-\bar{p} \right )^{2}, \delta^{2} - \frac{1}{N} \sum_{i}^{N} \left ( p_i-\bar{p} \right )^{2} \right ]$ based on their standard deviation of averge position data $\eqref{eq:pythagoras}$.
# 5. Seperate pitch into different role-based segments.
# 6. Assign role to player based on given pitch segments.
#
# $$
# \begin{equation}
# p^{'}_{k} = \delta_{2}- \frac{\left( \left( \delta_{2}-\frac{1}{N} \sum_{i}^{N} \left ( p_i-\bar{p} \right )^{2} \right) - \left(\delta_{1}+\frac{1}{N} \sum_{i}^{N} \left ( p_i-\bar{p} \right )^{2} \right)\right)\left(\alpha-p_{k}\right) }{\alpha - \beta}
# \label{eq:pythagoras}
# \tag{1}
# \end{equation}
# $$ where $\delta^{1}$ are $\delta^{2}$ respectively start and end points of a segment for a given axis, $\alpha = \underset{1\leq i \leq N }{\max p_i}$, and $\beta = \underset{1\leq i \leq N }{\min p_i}$.
# 7. Calculate the features for each player
#
#
#
# <img src="../assets/scaling_exmp.png" width="400"/>
#
# ***note:*** *Above image depicts the role assigment to players after sclaing their avg pos for the span of last 15 min.*
#
# Detailed info on features and their calculations can be found on classes.FeatureBuilder. Target data is derived from a player who owns the ball. On raw data, we have no information on ball location or its states. We are only able to determine the ball location if a specific player who has the ball at the moment. Then we set player's location as a ball location. Therefore, we have no information on a significant amount of data frames when the ball passes from one player to another and when game stops. We only going to use data frames when some player has the ball.
# **Raw data-dict structure for a single frame** (structure of per_sec_data_id.json):
#
# {<br/>
#   **'distance'**: 3.1426183970482264, <br/>
#   **'half'**: 1, <br/>
#   **'hasballJerseyNumber'**: 6, <br/>
#   **'hasballTeamId'**: 101, <br/>
#   **'id'**: 280999, <br/>
#   **'jerseyNumber'**: 6, <br/>
#   **'matchId'**: 60568, <br/>
#   **'minute'**: 19, <br/>
#   **'second'**: 19, <br/>
#   **'speed'**: 3.139151535835957, <br/>
#   **'teamId'**: 101, <br/>
#   **'xpos'**: 34.366599999999686,<br/>
#   **'ypos'**: 23.508333333333262
# <br/>
# }
# +
import json
import pickle
import pandas as pd
import numpy as np
import csv
import math
from scipy.ndimage.interpolation import shift
import operator
import scipy as sc
# our common models and utility functions
from classes.DataModel import *
from classes.PlayersRoleSet import *
from classes.FeatureBuilder import *
from classes.Utils import load_json_content
pd.options.display.max_columns = None
pd.set_option('compute.use_bottleneck', True)
pd.set_option('compute.use_numexpr', True)
# -
# ### Use different segment division and roles assignment
# +
# first we should decide what kind of segment division and role assignment we're going to employ.
# Then based on role group, we will calculate all features combinations in next steps.
# BELOW IS AN TO EXAMPLE TO HOW TO OVERRIDE FUNCTIONS OF PLAYERSROLESET: YOU CAN TRY YOUR OWN STUFF
# Here you define segment coordinates and override find_segments functions below to seperate them.
# new_role_segment_coords = {'LB':[-1, 40, -1, 19], # Left Back
# 'CB':[-1, 35, 19, 49], # Center Back
# 'RB':[-1, 40, 49, 69], # Right Back
# 'LW':[ 40, 106, -1, 19], # Left Winger
# 'CM':[ 35, 75, 19, 49], # Center Midfielder
# 'RW':[ 40, 106, 49, 69], # Right Winger
# 'CF':[ 75, 106, 19, 49]} # Center Forward/Attacking Middle
# we need to ovverride our find_segments function in order to do 2nd role assigmnet
# class PlayersRoleSet_New(PlayersRoleSet):
# def find_segments(self, x, y):
# for role, segment in self.role_segment_coords.items():
# if (x>=segment[0] and x<=segment[1] and y>=segment[2] and y<=segment[3]):
# return role
# return None
# playersRoleSet_New = PlayersRoleSet_New()
# playersRoleSet.set_role_segment_coords(new_role_segment_coords)
# -
# INIT PlayersRoleSet class
playersRoleSet = PlayersRoleSet()
# +
# runing this you can see the list of all feature combinations we're going to use
featureBuilder = FeatureBuilder(list(playersRoleSet.get_role_segment_coords().keys()))
feature_list = featureBuilder.get_feature_labels()
feature_list[:10] # print the first 10 features
# -
# #### More detailed infor on features list
#
# group features (home&away) | Seperate Team features (home&away) | Teams features (all together) | Refree features
# --- | --- | --- | ---
# avrg x,y | $dbscan^{5*}$ avrg x,y | avrg x,y | x,y
# avrg speed | dbscan avrg speed | avrg speed | speed
# avrg speed direction on x,y | inner distance to dbscan | inner distance to avrg pos | direction on x,y
# avrg pos | gk x,y | dbscan avrg x,y |
# $inner distance^{1*}$ to avrg pos | gk speed | dbscan avrg speed |
# $slow^{2*}$ players' avrg x,y | gk direction on x,y | inner distance to dbscan pos |
# slow players' avrg x,y | min x,y | |
# $hir^{3*}$ players' avrg x,y | min (x,y)'s speed | |
# hir players' avrg x,y | min speed | |
# $sprint^{4*}$ players' avrg x,y | min speed's x,y | |
# sprint players' avrg x,y | max x,y | |
# | max (x,y)' speed | |
# | max speed | |
# | max speed's x,y | |
#
# $^{1*}$Inner distance shows the sum of distance of players' from the average position of the traget cluster.
#
# $^{*2, *3, *4}$Analyzing the strategic context of soccer, three speed groups were selected and their attributes were calculated. These groups are Slow, Hir (high-intensity run) and Sprint, where
# $$
# \left\{\begin{matrix}
# \vec{V}_{slow}\leq1.5\frac{m}{s},\\
# 1.5\frac{m}{s}<\vec{V}_{hir}\leq 3\frac{m}{s},\\
# 3\frac{m}{s}<\vec{V}_{sprint}.
# \end{matrix}\right.
# $$
#
# Each speed category of any group shows different characters in their distance to the ball's coordinates.
#
#
# The average, max, min (x, y) coordinates of players were used to recognize
# the moments when same team players were approaching each
# other, and eventually approaching to the ball as well, to gain
# more control of the ball. However, the average position for
# any role group did not give the desired values when some
# players in that group were nested and the remaining players
# were relatively remote.
#
# For this reason, clusters were found by
# applying density-based clustering (DBSCAN)$^{*5}$ method on
# all role groups. For both axes, we set the minimum number of
# samples in a neighborhood required to form a cluster to k (we set this number to 7 meters when calculating teams-altogether features, to 4 when calculating separate team features ) and
# the maximum distance between two samples in a neighborhood
# to 20 meters. Consequently, the average (x, y) coordinates of
# each role group were calculated for the given clusters.
# ### Calculate features
# +
# CONTANTS
match_start_minute={1:0, 2:45} # indicate starting minutes of matches for each half
error_list = [60706, 60795, 60798, 60845, 116001326] # match IDs with data error
# MATCH INFO LABELS
match_info = {'match_id':None, 'half':None, 'minute':None, 'second':None, 'game_state':None, 'x':None, 'y':None}
# -
# GET GENERAL INFO ABOUT MATCHES
matches = load_json_content('../data/general/matches_2017_2018_v1.json')
# +
# FEATURE EXTRACTION
for match_data in matches:
match_id = int(match_data['id'])
if match_id not in error_list:
match_info['match_id'] = match_id # set match id
# get roaster data
match_squad_info = load_json_content('../data/match_' + str(match_id) + '/roster_data_' + str(match_id) + '.json')
# get player data
match_palyer_info = load_json_content('../data/match_' + str(match_id) + '/players_data_' + str(match_id) + '.json')
# get per second data
data_persec = load_json_content('../data/match_' + str(match_id) + '/per_sec_data_' + str(match_id) + '.json')
# INIT CLASSES
# INIT DataModel class
dataModel = DataModel(minutes_step=15)
# init players info
dataModel.init_players_info(match_data, match_squad_info, match_palyer_info)
# INIT FeatureBuilder class
featureBuilder = FeatureBuilder(list(playersRoleSet.get_role_segment_coords().keys())) # get rollist and init feature builder
del match_squad_info, match_palyer_info
# list to store features
features_set = []
# reset ball position info: x-pos, y-pos, game state to define if game stopped or not
ball_position = {'x':0, 'y':0, 'game_state':False}
# var for define half, second
match_start_half = 0
prev_second = 0
# ITERATE THROUGH EACH SECOND
for data_t in data_persec:
# AT THE BEGNING OF EACH HALF GET PLAYER INFO FOR SELECTED TIMEWINDOW FOR ROLE ASSIGNING, THEN START ITERATION
if data_t['half']!=match_start_half:
dataModel.reset_positional_info()
match_start_half=data_t['half']
dataModel.calculate_fist_time_step_data(data_persec=data_persec, match_half=match_start_half, threshold_min=5)
#---------- assign role -------
# get scaled values
scale_values=playersRoleSet.get_scaled_values(dataModel=dataModel, posCounIndex=-2)
# iterate through each player
for team, _ in dataModel.player_data.items():
for ID, playerdata in dataModel.player_data[team].items():
# set role to player if sum of rolePosCount and last min count of posCount is equal is nonzero
dataModel.player_data[team][ID]['role'] = playersRoleSet.set_role(playerdata=playerdata, posCounIndex=-2, scale_values=scale_values, team=team)
#---------- assign role -------
prev_minute = match_start_minute[match_start_half]
# EACH MINUTE ASSIGN ROLE TO PLAYERS
if prev_minute!=data_t['minute']:
prev_minute=data_t['minute']
#---------- assign role -------
# get scaled values
scale_values=playersRoleSet.get_scaled_values(dataModel=dataModel, posCounIndex=-1)
# iterate through each player
for team, _ in dataModel.player_data.items():
for ID, playerdata in dataModel.player_data[team].items():
# set role to player if sum of rolePosCount and last min count of posCount is equal is nonzero
dataModel.player_data[team][ID]['role'] = playersRoleSet.set_role(playerdata=playerdata, posCounIndex=-1, scale_values=scale_values, team=team)
#---------- assign role -------
# shif time-step data to right after role assignment
dataModel.shift_tms_data_right()
# EACH SECOND CALCULATE FEATURES HERE
# after geting all players features for a second calculate all at the end of second
if prev_second!=data_t['second']:
prev_second=data_t['second']
# make sure we have more than six players on the pitch for each team at the time
if len(featureBuilder.role_data['home']['Team']['all_x'])>6 and len(featureBuilder.role_data['away']['Team']['all_x'])>6:
# get match info and Y
match_info['half'] = data_t['half']
match_info['minute'] = data_t['minute']-1 if (data_t['second']==0) else data_t['minute']
match_info['second'] = 59 if (data_t['second']==0) else data_t['second']-1
match_info['game_state'] = 1 if ball_position['game_state']==True else 0
match_info['x'] = -1 if (ball_position['game_state']==False and ball_position['x']==0) else ball_position['x']
match_info['y'] = -1 if (ball_position['game_state']==False and ball_position['y']==0) else ball_position['y']
# get features and match info then add to main list
features_set.append(list(match_info.values()) + featureBuilder.calculate_features())
# at the end, reset current_data dict-array
featureBuilder.reset_referee_data()
featureBuilder.reset_goalkeeper_data()
featureBuilder.reset_role_data()
# reset ball position info
ball_position = {'x':0, 'y':0, 'game_state':False}
if (data_t['xpos']>-1 and data_t['xpos']<106) and (data_t['ypos']>-1 and data_t['ypos']<69):
# for role assignment get players data when the rival team has the ball
dataModel.add_player_activity(data_t['teamId'], data_t['hasballTeamId'], data_t['jerseyNumber'], data_t['xpos'], data_t['ypos'])
# round data to 2 in order
xpos = round(data_t['xpos'], 2)
ypos = round(data_t['ypos'], 2)
speed = round(data_t['speed'], 2)
if (data_t['hasballTeamId']==data_t['teamId'] and data_t['hasballJerseyNumber']==data_t['jerseyNumber']
and data_t['teamId']!=0 and data_t['jerseyNumber']!=0):
ball_position['x'] = xpos
ball_position['y'] = ypos
ball_position['game_state'] = True
if data_t['teamId']!=dataModel.referee_id:
team = dataModel.team_id_dict[data_t['teamId']]
role = dataModel.player_data[team][data_t['jerseyNumber']]['role']
if (role != None and np.sum(dataModel.player_data[team][data_t['jerseyNumber']]['rolePosCount'])!=0):
if role!='GK':
# define speed categorgy
speed_prefix='sprint' if speed>3 else 'slow' if speed<1.5 else 'hir'
for tmp_role in [role, 'Team']:
for tmp_speed in [speed_prefix, 'all']:
# add player values
featureBuilder.role_data[team][tmp_role][tmp_speed+'_x'].append(xpos)
featureBuilder.role_data[team][tmp_role][tmp_speed+'_y'].append(ypos)
featureBuilder.role_data[team][tmp_role][tmp_speed+'_speed'].append(speed)
else:
# add goalkeeper values
featureBuilder.goalkeeper_data[team]['x'].append(xpos)
featureBuilder.goalkeeper_data[team]['y'].append(ypos)
featureBuilder.goalkeeper_data[team]['speed'].append(speed)
else:
# add referee values
featureBuilder.referee_data['x'].append(xpos)
featureBuilder.referee_data['y'].append(ypos)
featureBuilder.referee_data['speed'].append(speed)
# SAVE FEATURES SET
with open('../data/general/feature-set/match_{}.csv'.format(match_info['match_id']),'w') as file:
writer = csv.writer(file)
writer.writerow(list(match_info.keys())+featureBuilder.get_feature_labels())
writer.writerows(features_set)
file.close()
del dataModel
del featureBuilder
del data_persec
| src/feature_generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: IPython (Python 3)
# language: python
# name: python3
# ---
# +
import glob
import matplotlib as mpl
import re
import pandas as pd
# -
import sys
print(sys.version)
# %matplotlib inline
# !ls ../../unused_reads/
module_dir = "../../unused_reads/"
# Import .py file from a different path:
# first add the path to that dir to my path.
sys.path.append(module_dir)
import analysis
import unused_reads as ur
# + active=""
# import glob
# import re
#
# import matplotlib.pyplot as plt
# import matplotlib as mpl
#
# import pandas as pd
# import seaborn as sns
#
#
# +
# pd.set_option('display.width', 1000)
# -
pd.options.display.max_colwidth = 1000
PLOT_DIR = '../../unused_reads/plots/'
ur.create_dir(PLOT_DIR)
# Get the loaded dataframes.
df_dict = analysis.run_analysis(make_plots=False)
df_dict.keys()
unspecific = df_dict['unspecific']
unmapped = df_dict['unmapped']
# ## Unmapped reads:
unmapped.head(2)
unmapped[['sample', 'downsample granularity']].drop_duplicates()
# analysis.plot_length_dist(analysis.unmapped, 'unmapped', analysis.PLOT_DIR)
# analysis.plot_pident_dist(analysis.unmapped, 'unmapped', analysis.PLOT_DIR)
# ## Multiply Mapped Reads
# ## Look at summary data
unmapped_summary = analysis.summarise(df=unmapped, min_pident=90, min_length=140,
downsample_granularity=10000)
unmapped_summary.head()
unmapped_summary.to_csv("160324_explore_unmapped.csv")
unmapped_simplified = unmapped_summary[['stitle', 'qseqid', 'sample']].drop_duplicates()
unmapped_simplified.head()
# +
### Look at just one sample
# -
unmapped_simp_74 = unmapped_simplified[unmapped_simplified['sample']=='74_LOW10']
test_df = unmapped[unmapped['sample']=='74_LOW10']
print(test_df.shape[0])
too_many = analysis.reads_appearing_more_than_once(test_df)
too_many
# + active=""
# analysis.save_summary(unmapped[unmapped['sample']=='74_LOW10'], '74_LOW10', module_dir + 'results/')
# -
# ### Apply save_summary for each sample.
# + active=""
# for desc, df in unmapped.groupby(['sample', 'downsample granularity']):
# print(desc)
# analysis.save_summary(df,
# '{}_{}'.format(desc[0], desc[1]),
# module_dir + 'unmapped-final' + '/results_summary/' + '/downsample_{}/'.format(desc[1]))
# -
analysis.summarise_results_across_samples(unmapped, module_dir + 'unmapped-final' )
analysis.summarise_results_across_samples(unspecific, module_dir + 'multiply_mapped-final' )
| ipython_notebooks/160321_unused_read_investigation/unused_reads.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: openmc-env
# language: python
# name: openmc-env
# ---
# # Fuel depletion
#
# Please indicate your name below, since you will need to submit this notebook completed latest the day after the datalab.
#
# Don't forget to save your progress during the datalab to avoid any loss due to crashes.
name=''
# During this datalab we will investigate how the fuel composition changes during the reactor operation. For this we will use openmc's `deplete` module. OpenMC solves the transport problem with a Monte Carlo approach to obtain reaction rates, and then the Batemen-equations are solved with these reaction rates. Nevertheless, the coupling between the depletion and the transport calculation is not a trivial task, there are various coupling schemes to achieve that. Within this datalab we do not look into the depth of various coupling techniques, and we will just use the most default settings.
#
# In order to achieve an accurate depletion study one needs to have as many time steps when the depletion equations are solved as possible, and due to this, burnup calculations are time consuming, espescially when the reaction rates are based on Monte Carlo transport solvers. During this datalab we will focus on the phenomena and on qualitative results, therefore we will not care about accuracy. That said we will use only few particles per batch, and very long depletion steps. We will also neglect downtime periods (when the reactor is not operating).
#
# The lab is going to have two experiments.
#
# 1. We will use our good old PWR pincell model to investigate how the neutron spectrum, the k-eigenvalue and the nuclide concentrations change over time
# 2. We will build a graphite moderated, natural uranium fuelled reactor's pincell model to estimate the Plutonium production. This is already part of your last set of home assignments, so don't feel stressed to finish it today.
#
# Let's get started.
#
# ## Experiment 1
#
# First, we will define the same PWR pincell model as in the previous datalabs. The only newly added feature here is that we set the `volume` attribute for the object `uo2`. This is necessary for burnup calculations. Since we have an axially infinite pin, we only gave the cross sectional area of the pin.
#
# You can execute the code below. And you can already execute the next code block, because it takes some time to run. During that time you can do the reading.
# +
import openmc
import math
import os
import numpy as np
import matplotlib.pyplot as plt
uo2 = openmc.Material(1, "uo2", temperature=1200)
# Add nuclides to uo2
uo2.add_nuclide('U235', 0.04)
uo2.add_nuclide('U238', 0.96)
uo2.add_nuclide('O16', 2.0)
uo2.set_density('g/cm3', 10.5)
uo2.volume = math.pi * 0.41 ** 2
zirconium = openmc.Material(2, "zirconium", temperature=900)
zirconium.add_element('Zr', 1.0)
zirconium.set_density('g/cm3', 6.6)
water = openmc.Material(3, "h2o")
water.add_nuclide('H1', 2.0)
water.add_nuclide('O16', 1.0)
water.set_density('g/cm3', 0.75)
water.add_s_alpha_beta('c_H_in_H2O')
mats = openmc.Materials([uo2, zirconium, water])
mats.export_to_xml()
fuel_or = openmc.ZCylinder(r=0.41)
clad_ir = openmc.ZCylinder(r=0.42)
clad_or = openmc.ZCylinder(r=0.45)
fuel_region = -fuel_or
gap_region = +fuel_or & -clad_ir
clad_region = +clad_ir & -clad_or
fuel = openmc.Cell(1, 'fuel')
fuel.fill = uo2
fuel.region = fuel_region
gap = openmc.Cell(2, 'air gap')
gap.region = gap_region
clad = openmc.Cell(3, 'clad')
clad.fill = zirconium
clad.region = clad_region
pitch = 1.26
left = openmc.XPlane(x0=-pitch/2, boundary_type='reflective')
right = openmc.XPlane(x0=pitch/2, boundary_type='reflective')
bottom = openmc.YPlane(y0=-pitch/2, boundary_type='reflective')
top = openmc.YPlane(y0=pitch/2, boundary_type='reflective')
water_region = +left & -right & +bottom & -top & +clad_or
moderator = openmc.Cell(4, 'moderator')
moderator.fill = water
moderator.region = water_region
root = openmc.Universe(cells=(fuel, gap, clad, moderator))
geom = openmc.Geometry()
geom.root_universe = root
geom.export_to_xml()
cell_filter = openmc.CellFilter(fuel)
energybins=np.logspace(-2,7,500)
energy_filter = openmc.EnergyFilter(energybins)
t = openmc.Tally(1)
t.filters = [cell_filter,energy_filter]
t.scores = ['flux']
tallies = openmc.Tallies([t])
tallies.export_to_xml()
point = openmc.stats.Point((0, 0, 0))
src = openmc.Source(space=point)
settings = openmc.Settings()
settings.source = src
settings.batches = 100
settings.inactive = 10
settings.particles = 500
settings.export_to_xml()
# -
# Now, instead of just running the code as before, we will set up the depletion calculation. For further details you can visit the [documentation](https://docs.openmc.org/en/stable/usersguide/depletion.html). We will need to do several things:
#
# 1. define an `Operator()` object, which will be our transport operator. Here we link the geometry and the settings objects.
# 2. within the operator we provide the path to the depletion chain we would like to use. OpenMC provides some [pre-generated chains](https://openmc.org/depletion-chains/). This is used for the depletion solver to set up the Batemen-equations.
# 3. we define a power or a power density (below we defined the power density in W/gHeavyMetal)
# 4. we define the depletion steps. This we can give in time or in burnup units, below 8 steps are given each are 5 MWd/kgU.
# 5. Then we define an integrator (the scheme how the transport and the depletion is coupled). OpenMC provides other, more accurate, and more demanding schemes, which you can find in the documentation.
# 6. Finally we call the `integrate()` method of the integrator. This will run the simulation.
#
# By default openMC is going to deplete every material which contains fissionable isotopes.
#
# (**Note**, in case you have difficulties with running the problem -which might happen with the virtual machine and limited computational resources-, then you can find the output files in the '/results' folder, you only need to modify the path in the code blocks performing the the post-processing to use these files).
# +
import openmc.deplete
import os
operator = openmc.deplete.Operator(geom, settings, '/home/rfp/Depletion_chains/chain_casl_pwr.xml')
powdens = 27.39726 #W/gHM; this power density corresponds to 10MWd/kgHM burnup over one year
burnup_steps = np.array([5,5,5,5,5,5,5,5])
integrator = openmc.deplete.PredictorIntegrator(operator, timesteps=burnup_steps,
power_density=powdens,timestep_units='MWd/kg')
integrator.integrate()
# -
# If you list the contents of the folder, you can see that several several h5 output files were created. Some are called 'openmc_simulation_nN.h5 (where N is the index of the depletion step, 0 stands for zero burnup). These files contain the tally results. We had only one tally this time with id 1.
#
# Let's plot the group flux per source particle for the 0th step (fresh fuel) and for the 8th step (40 MWd/kgU). Do you notice any change? If yes, what do you think what is the reason for that?
import matplotlib.pyplot as plt
plt.figure()
for i in [0,8]:
#print('openmc_simuation_n%d.h5'%i)
sp = openmc.StatePoint('openmc_simulation_n%d.h5'%i)
tallydf=sp.tallies[1].get_pandas_dataframe()
plt.loglog((tallydf['energy low [eV]']+tallydf['energy high [eV]'])/2,tallydf['mean'],label=str(i))
plt.xlabel('Energy (eV)')
plt.ylabel('Group flux per particle')
plt.xlim(1e-2,1e7)
#plt.ylim(1e-3,1)
plt.legend()
plt.show()
# Change this line to your conclusion!
# There is also an other new file named 'depletion_results.h5'. This file contains the nuclide inventory and the k-eigenvalues during the burnup (and also some other quantities, which you can list by hitting tab on `results.`).
#
# We will load the results into the variable `results`, and use the `get_eigenvalue()` method to read in the k-eigenvalue. The change of the k-infinity value with burnup is often referred to as reactivity swing.
#
# Of course in practice the decrease of reactivity due to depletion is compensated for (eg. by decreasing the boron content of the coolant during the reactor cycle).
# +
results = openmc.deplete.ResultsList.from_hdf5("./depletion_results.h5")
time, k = results.get_eigenvalue()
time /= (24 * 60 * 60) # convert back to days from seconds
plt.errorbar(time, k[:, 0], yerr=k[:, 1])
plt.xlabel('Time [d]')
plt.ylabel('$k_{eff}$')
plt.show()
# -
# Now with the `get_atoms()` we will load in the atom concentration of nuclides. The `"1"` refers to material with ID 1 (the fuel). We will set the unit to atom/barn-cm (which can be converted to atom/cm3 by multiplying the value with $10^{24}$; one can also convert to g/cm3 with some mind work).
#
# (Note that the variable name `_time` begins with an underscore, this is to highlight, that we already have a `time` array with converted time units which we intend to use, therefore we will store the time information provided by `get_atoms()` in a dummy array not intended to be used later)
#
# Go ahead, and do the same for couple of other nuclides, for example Pu239, or Cs137. What do you think, why is Cs137 a good indicator of burnup?
#
# +
_time, u5 = results.get_atoms("1", "U235",nuc_units='atom/b-cm') #we call it _time, because we already have a time variable in the correct day units which we intend to use
plt.figure()
plt.plot(time, u5, label="U235")
plt.xlabel("Time [d]")
plt.ylabel("Atom concentration (atom/b-cm)")
plt.show()
# -
# Change this line to your conclusion!
# # Experiment 2
#
# Note, that this is home assignment HA3-3, thus you will not need to submit this part the day after the datalab, also you should modify HA3-3.ipynb instead.
#
# A hypothetical dictatorship is desperately trying to build a nuclear weapon. Several years ago they have managed to acquire the design of a graphite moderated, CO2 cooled, natural uranium fuelled reactor. Your country's intelligence agency asked you to estimate the amount of plutonium the reactor produced in the previous years. You will need to make a pincell model of the reactor, and perform a depletion calculation.
#
# The following information is available to you:
#
# The fuel pin is made of UAl (density is 18.167 g/cm3) with the following atomic percentage:
# - U235 0.00707444999
# - U238 0.98792555
# - Al27 0.005
#
# The cladding is made of a mixture of Magnesium and Aluminium (density is 1.738 g/cm3), with the following weight fractions:
# - Mg 0.99
# - Al27 0.01
# (Note that you can add Mg as an element, and you can set weight fractions instead of atomic with `add_element('Mg',0.99,percent_type='wo')`)
#
# The fuel is cooled with CO2, however you know that due to its low density you can neglect it for now.
#
# The moderator is made of graphite (density is 1.7 g/cm3). For this you can `add_nuclide('C0',1)` and set the thermal scattering laws with `add_s_alpha_beta('c_Graphite')`.
#
# The geometry is the following:
# - fuel outer radius is 1.45 cm
# - clad outer radius is 1.5 cm (there is no gap between the clad and the fuel)
# - the coolant channel outer radius is 3.25 cm
# - outside the coolant we have the graphite moderator
# - the pitch of the pincell is 20.0 cm.
#
# The active core height is 52 cm, and there are 8120 such pincells in the reactor.
#
# You know from your intel, that the reactor was running for 1785 continous days (which you can simulate with eight 223.125 days long steps) with a power density of 0.336 W/gHM.
#
# 1. Create the openMC model for the pincell (you do not need to include any tallies)
# 2. Perform a depletion calculation
# 3. Look at the change of the k-eigenvalue over time, just because you are a curious reactor physicist
# 4. Estimate the total mass of Pu-239 at the end of the 1785 days operation (in kg)
# 5. Calculate the Plutonium-vector (the weight% of Pu-238, Pu-239, Pu-240, Pu-241, Pu-242 in the total amount of plutonium), to see whether the material would qualify as weapon grade or reactor grade plutonium (for this read up on weapon and reactor grade plutonium!).
#
# (Note, in case you have difficulties with running the problem -which might happen with the virtual machine and limited computational resources-, then you can find the output files in the '/results' folder. Nevertheless, you still need to implement the geometry, run a simple k-eigenvalue calculation and create a plot of the geometry to show that your implementation is working. But then you can use the available output for processing).
| Datalabs/Datalab11/11-Depletion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Importing necessary libraries for covid-19 data analysis
import pandas as pd
import numpy as np
import os
#Loading raw dataset of covid-19
data=pd.read_csv(r'C:\Users\slowjerm.jerm1\Downloads\Documents\owd-covid-data.csv')
data
# -
#Dropping missing values
data.isnull().any()
#true in the above means those columns have null values ,likewise false means no null values
data.head()
#dropping null values
data.dropna()
# +
#drop a column based on name
#Delete or drop column in pandas by column name using drop() function
data.drop('total_cases',axis=1)
# -
# drop a column based on column index
#WE have successfully dropped date wwhich is index 3
data.drop(data.columns[3],axis=1)
# +
# delete a column base on column name
del data['continent']
data
# We have successfully deleted continent
# -
''' drop multiple column based on name'''
2
data.drop(['total_deaths', 'date'], axis = 1)
# +
# drop multiple columns based on column index
data.drop(data.columns[[1,4]], axis = 1)
# +
'''Drop multiple columns between two column index in pandas
Let’s see an example of how to drop multiple columns between two index using iloc() function'''
''' Remove columns between two column using index - using iloc() '''
data.drop(data.iloc[:, 1:3], axis = 1)
# +
#In the above example column with index 1 (2nd column) and Index 2 (3rd column) is dropped.
# +
''' Remove columns between two column using column name - using loc() '''
data.drop(data.loc[:, 'date':'iso_code'].columns, axis = 1)
# +
''' drop column name starts with a character '''
data.loc[:,~ data.columns.str.startswith('A')]
# +
''' drop column name ends with a character'''
data.loc[:,~data.columns.str.endswith('e')]
# +
''' drop column name contains ---- drop column name like% in'''
data.loc[:,~data.columns.str.contains('sc',case =False)]
# +
''' drop column name using regular expression '''
data[data.columns.drop(list(data.filter(regex="(Sc)+?.+")))]
# -
| dropAnddelete.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="6f776c0a7a1b5f5dff182fe32e0da17a2936220c"
# # Exercise 01 - Syntax, Variables and Numbers
# + [markdown] _uuid="313472bf1e8e8650066c4d2b480c4983c7170c36"
# Welcome to your first set of Python coding problems!
#
# **Notebooks** are composed of blocks (called "cells") of text and code. Each of these is editable, though you'll mainly be editing the code cells to answer some questions.
#
# To get started, try running the code cell below (by pressing the `►| Run` button, or clicking on the cell and pressing `ctrl+Enter`/`shift+Enter` on your keyboard).
# + _uuid="1aca02cda819d5263cd81f4bbcfa2aaf8dc138d2"
print("You've successfully run some Python code")
print("Congratulations!")
# -
print("Hello Python World)
# + [markdown] _uuid="9b97ef75e9acabc1f847f6d0f06ab647a44fb3e0"
# Try adding another line of code in the cell above and re-running it.
#
# Now let's get a little fancier: Add a new code cell by clicking on an existing code cell, hitting the `escape` key *(turn to command mode)*, and then hitting the `a` or `b` key.
# - The `a` key will add a cell above the current cell.
# - The `b` adds a cell below.
#
# Great! Now you know how to use Notebooks.
# + [markdown] _uuid="4cea9061fa5c20161f2c8c46dbd1a184512317e8"
# ## 0. Creating a Variable
#
# **What is your favorite color? **
#
# To complete this question, create a variable called `color` in the cell below with an appropriate `string` value.
# + _uuid="ecfae4ce4c4494d4e1073da6f536f9295775ae77"
# Create a variable called color with an appropriate value on the line below
# (Remember, strings in Python must be enclosed in 'single' or "double" quotes)
color = input('What is your favorite color?')
print("It's "+ str(color))
# + [markdown] _uuid="ebf3dd970265223ba1c5ae0c09f57a57451c8635"
# <hr/>
#
# ## 1. Simple Arithmetic Operation
#
# Complete the code below. In case it's helpful, here is the table of available arithmatic operations:
#
#
#
# | Operator | Name | Description |
# |--------------|----------------|--------------------------------------------------------|
# | ``a + b`` | Addition | Sum of ``a`` and ``b`` |
# | ``a - b`` | Subtraction | Difference of ``a`` and ``b`` |
# | ``a * b`` | Multiplication | Product of ``a`` and ``b`` |
# | ``a / b`` | True division | Quotient of ``a`` and ``b`` |
# | ``a // b`` | Floor division | Quotient of ``a`` and ``b``, removing fractional parts |
# | ``a % b`` | Modulus | Integer remainder after division of ``a`` by ``b`` |
# | ``a ** b`` | Exponentiation | ``a`` raised to the power of ``b`` |
# | ``-a`` | Negation | The negative of ``a`` |
#
# <span style="display:none"></span>
#
# + _uuid="1aeec7bf0a7813aa00f8bc5587a3205dcc631f04"
pi = 3.14159 # approximate
diameter = 3
# Create a variable called 'radius' equal to half the diameter
radius = float(diameter/2)
# Create a variable called 'area', using the formula for the area of a circle: pi times the radius squared
area = pi * radius**2
print('Area = '+ str(area) )
# -
# **Results**:
# - Area = 7.0685775
# + [markdown] _uuid="cbeff112768642501fb0c3620228fefd18fd2757"
# ## 2. Variable Reassignment
#
# Add code to the following cell to swap variables `a` and `b` (so that `a` refers to the object previously referred to by `b` and vice versa).
# + _uuid="8aba488a5eff621ab69d89eab68d89aeecccd9e5"
# If you're curious, these are examples of lists. We'll talk about
# them in depth a few lessons from now. For now, just know that they're
# yet another type of Python object, like int or float.
a = [1, 2, 3]
b = [3, 2, 1]
######################################################################
# Your code goes here. Swap the values to which a and b refer.
# Hint: Try using a third variable
print('a: '+str(a))
print('b: '+str(b))
c = a
a = b
b = c
print("After swapping a and b")
print('a: '+str(a))
print('b: '+str(b))
# + [markdown] _uuid="afe12df4d86734e4ed08d112a18a0b71fd323d30"
# ## 3. Order of Operations
#
#
# a) Add parentheses to the following expression so that it evaluates to 1.
#
# *Hint*: Following its default "**PEMDAS**"-like rules for order of operations, Python will first divide 3 by 2, then subtract the result from 5. You need to add parentheses to force it to perform the subtraction first.
# + _uuid="1e3cb6016456b82066cd39b0fd71257c4d127d07"
(5 - 3) // 2
# + [markdown] _uuid="ab6535061e07d0b8bfcdbb9f7883a691078bab22"
# <small>Questions, like this one, marked a spicy pepper are a bit harder. Don't feel bad if you can't get these.</small>
#
# b) <span title="A bit spicy" style="color: darkgreen ">🌶️</span> Add parentheses to the following expression so that it evaluates to **0**.
# + _uuid="e1dd7d9dd544f6e29f27379f8077170f0aa31a7c"
8 - 3 * 2 - (1 + 1)
# + [markdown] _uuid="5ad71f1a4cdcaa3d18f5401503010076088ed1aa"
# ## 4. Your Turn
# Alice, Bob and Carol have agreed to pool their Halloween candies and split it evenly among themselves.
# For the sake of their friendship, any candies left over will be smashed. For example, if they collectively
# bring home 91 candies, they'll take 30 each and smash 1.
#
# Write an arithmetic expression below to calculate how many candies they must smash for a given haul.
#
# > *Hint*: You'll probably want to use the modulo operator, `%`, to obtain the remainder of division.
# + _uuid="01ed33d051d37c136ca53d7203fc350c4c1229db"
# Variables representing the number of candies collected by Alice, Bob, and Carol
alice_candies = 121
bob_candies = 77
carol_candies = 109
# Your code goes here! Replace the right-hand side of this assignment with an expression
# involving alice_candies, bob_candies, and carol_candies
to_smash = -1
to_smash = (alice_candies + bob_candies + carol_candies) % 3
print('The number of candies they have to smash: '+str(to_smash))
# + [markdown] _uuid="0e80e6d2e40d572729fc91291f25fde2e7e9d4d5"
# # Keep Going 💪
| assignment_3/NguyenThaoNguyen_Assigment3_Ex01_Syntax_Variable_Numbers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# %matplotlib inline
# +
# Example of Naive Bayes implemented from Scratch in Python
# Different fetures in the "pima-indians-diabetes.data.csv"
# -
# - Pregnancies: Number of times pregnant
# - Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test
# - BloodPressure: Diastolic blood pressure (mm Hg)
# - SkinThickness: Triceps skin fold thickness (mm)
# - Insulin: 2-Hour serum insulin (mu U/ml)
# - BMI: Body mass index (weight in kg/(height in m)^2)
# - DiabetesPedigreeFunction: Diabetes pedigree function
# - Age: Age (years)
# - Outcome: Class variable (0 or 1)
import csv
import math
import random
# +
# Load data from file
def loadCsv(filename):
lines = csv.reader(open(filename,'r'))
dataset = list(lines)
for i in range(len(dataset)):
dataset[i] = [float(x) for x in dataset[i]]
return dataset
d = loadCsv("data/pima-indians-diabetes.data.csv")
# -
d[0]
# random.randrange(100) #it generate single digit b/w 0 to 100
#
# random.random() #it generate single random number b/w 0 to 1
#
# l = ['f','gh','he','w','r','rt']
# l.pop(2) #pop() return and delete the list data at that particular index
# l
# l.pop(3)
# l
#split data into train and test
def splitDataset(dataset, splitRatio):
trainSize = int(len(dataset) * splitRatio)
trainSet = []
copy = list(dataset)
while len(trainSet) < trainSize:
index = random.randrange(len(copy))
trainSet.append(copy.pop(index))
return [trainSet, copy]
#group instances by class (+ve or -ve) in the form of dictionary
def separateByClass(dataset):
seperated = {}
for i in range(len(dataset)):
vector = dataset[i]
if(vector[-1] not in seperated):
seperated[vector[-1]] = []
seperated[vector[-1]].append(vector)
return seperated
#compute mean of a vector
def mean(numbers):
return sum(numbers)/float(len(numbers))
#computed std dev of a vector
def stdev(numbers):
avg = mean(numbers)
varience = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1)
return math.sqrt(varience)
zip(*d) #it will return columns/feture
for attribute in zip(*d):
print('a')
#compute summaries where summary is mean and std dev of each column in a dataset
def summarize(dataset):
summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]
del summaries[-1] #delete last column bec it is class column
return summaries
#now we want to compute mean and stdev of people who have diabetes and who haven't
def summarizeByClass(dataset):
separated = separateByClass(dataset)
# print(separated.items())
summaries = {}
for classValue, instances in separated.items():
summaries[classValue] = summarize(instances)
return summaries
print("These all are the mean and stdev pair of all the attribute in each class label")
dataset = loadCsv('data/pima-indians-diabetes.data.csv')
summarizeByClass(dataset) #it returns all the mean and stdev pair of all the attribute for each class label
#compute prob using a Gaussian distribution
def calculateProbability(x, mean, stdev):
exponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))
return (1 / (math.sqrt(2*math.pi) * stdev)) * exponent
#compute P(x|C)
def calculateClassProbabilities(summaries, inputVector):
probabilities = {}
for classValue, classSummaries in summaries.items():
probabilities[classValue] = 1
for i in range(len(classSummaries)):
mean, stdev = classSummaries[i]
x = inputVector[i]
probabilities[classValue] *= calculateProbability(x, mean, stdev)
return probabilities
#predict class label for an inputVector
def predict(summaries, inputVector):
probabilities = calculateClassProbabilities(summaries, inputVector)
bestLabel, bestProb = None, -1
for classValue, probability in probabilities.items():
if bestLabel is None or probability > bestProb:
bestProb = probability
bestLabel = classValue
return bestLabel
#predict class label for a test dataset
def getPredictions(summaries, testSet):
predictions = []
for i in range(len(testSet)):
result = predict(summaries, testSet[i])
predictions.append(result)
return predictions
#compute accuracy of predictions for the test dataset
def getAccuracy(testSet, predictions):
correct = 0
for i in range(len(testSet)):
if testSet[i][-1] == predictions[i]:
correct += 1
return (correct/float(len(testSet))) * 100.0
def main():
filename = 'data/pima-indians-diabetes.data.csv'
splitRatio = 0.67
dataset = loadCsv(filename)
trainingSet, testSet = splitDataset(dataset, splitRatio)
print('Split',len(dataset),' rows into train=',len(trainingSet),' and test=', len(testSet),' rows')
# prepare model
summaries = summarizeByClass(trainingSet)
# test model
predictions = getPredictions(summaries, testSet)
accuracy = getAccuracy(testSet, predictions)
print('Accuracy',accuracy)
main()
| Gaussian Naive Bayes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="QHMtnYN4JbzR" outputId="53ff1f3f-889b-4077-d180-c71fdbfff49e"
from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, BatchNormalization
import os
import warnings
warnings.filterwarnings(action='ignore')
# + colab={} colab_type="code" id="xN76jTnaJku7"
batch_size = 16
num_classes = 10
epochs = 25
# + colab={"base_uri": "https://localhost:8080/", "height": 105} colab_type="code" id="BuwjP0yeJlyu" outputId="261cf588-746a-437c-c46a-d8e3cea60572"
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# + colab={} colab_type="code" id="ICtLv_uZJoXe"
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# + colab={} colab_type="code" id="-rfn1qzqJ5dW"
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# + colab={"base_uri": "https://localhost:8080/", "height": 498} colab_type="code" id="qMF9TgYlJvFq" outputId="fee3bcc3-9310-4860-9232-a0a3e40cca1f"
n_kernels = 32
model = Sequential()
model.add(Conv2D(n_kernels, (3,3), activation='relu', kernel_initializer='he_normal', padding='valid', input_shape=(32, 32, 3)))
model.add(Conv2D(n_kernels*2, (3, 3), activation='relu', kernel_initializer='he_normal', padding='valid'))
model.add(BatchNormalization())
model.add(MaxPooling2D())
model.add(Conv2D(n_kernels*2, (3,3), activation='relu', kernel_initializer='he_normal', padding='valid'))
model.add(Conv2D(n_kernels*4, (3, 3), activation='relu', kernel_initializer='he_normal', padding='valid'))
model.add(BatchNormalization())
model.add(MaxPooling2D())
model.add(Conv2D(n_kernels*8, (3,3), activation='relu', kernel_initializer='he_normal', padding='valid'))
model.add(BatchNormalization())
model.add(GlobalAveragePooling2D())
model.add(Dense(10, activation='softmax', kernel_initializer='glorot_uniform'))
# + colab={"base_uri": "https://localhost:8080/", "height": 585} colab_type="code" id="tTTlF8aFH4gR" outputId="2018e4ba-c46c-47f2-9b39-ffabf5b04179"
model.summary()
# + colab={} colab_type="code" id="NT5sENY6KNrG"
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# + colab={"base_uri": "https://localhost:8080/", "height": 107} colab_type="code" id="Zd1E5FY4JqQG" outputId="2c6df58c-5ee1-4d15-e0fb-62fc2e69081f"
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/", "height": 923} colab_type="code" id="lkA0Pd6vJ_IO" outputId="4d51de91-364e-40f8-8bf8-c8dd6128948c"
trained_model = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
# + colab={} colab_type="code" id="KAnM6dDEKPMu"
epochs_range = range(25)
validation_accuracy = trained_model.history['val_accuracy']
training_accuracy = trained_model.history['accuracy']
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" id="AUEiUJm3KUbR" outputId="38dd342f-67e2-4203-bd53-d65892eaebd8"
import matplotlib.pyplot as plt
plt.plot(epochs_range, training_accuracy, 'b+', label='training accuracy')
plt.plot(epochs_range, validation_accuracy, 'bo', label='validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Validation accuracy')
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 70} colab_type="code" id="aU9keG2bKY9j" outputId="02b4cfb8-045b-4beb-c032-9503e96b558a"
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# + colab={} colab_type="code" id="iTMyetH0fIRD"
| AI 이노베이션 스퀘어 시각지능 과정/202004/20200420/Cifar10_Challenge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sentiment Analysis
# <div class="alert alert-info">
#
# This tutorial is available as an IPython notebook at [Malaya/example/sentiment](https://github.com/huseinzol05/Malaya/tree/master/example/sentiment).
#
# </div>
# <div class="alert alert-info">
#
# This module trained on both standard and local (included social media) language structures, so it is save to use for both.
#
# </div>
# %%time
import malaya
# ### Models accuracy
#
# We use `sklearn.metrics.classification_report` for accuracy reporting, check at https://malaya.readthedocs.io/en/latest/models-accuracy.html#sentiment-analysis
# ### labels supported
#
# Default labels for sentiment module.
malaya.sentiment.label
# ### Example texts
#
# Copy pasted from random tweets.
string1 = 'Sis, students from overseas were brought back because they are not in their countries which is if something happens to them, its not the other countries’ responsibility. Student dalam malaysia ni dah dlm tggjawab kerajaan. Mana part yg tak faham?'
string2 = 'Harap kerajaan tak bukak serentak. Slowly release week by week. Focus on economy related industries dulu'
string3 = 'Idk if aku salah baca ke apa. Bayaran rm350 utk golongan umur 21 ke bawah shj ? Anyone? If 21 ke atas ok lah. If umur 21 ke bawah? Are you serious? Siapa yg lebih byk komitmen? Aku hrp aku salah baca. Aku tk jumpa artikel tu'
string4 = 'Jabatan Penjara Malaysia diperuntukkan RM20 juta laksana program pembangunan Insan kepada banduan. Majikan yang menggaji bekas banduan, bekas penagih dadah diberi potongan cukai tambahan sehingga 2025.'
string5 = 'Dua Hari <NAME>, Murai Batu Ceriwis Siap Meraikan Even Bekasi Bersatu!'
string6 = '@MasidiM Moga kerajaan sabah, tidak ikut pkp macam kerajaan pusat. Makin lama pkp, makin ramai hilang pekerjaan. Ti https://t.co/nSIABkkEDS'
string7 = 'Hopefully esok boleh ambil gambar dengan'
# ### Load multinomial model
#
# ```python
# def multinomial(**kwargs):
# """
# Load multinomial emotion model.
#
# Returns
# -------
# result : malaya.model.ml.Bayes class
# """
# ```
model = malaya.sentiment.multinomial()
# #### Predict batch of strings
#
# ```python
# def predict(self, strings: List[str]):
# """
# classify list of strings.
#
# Parameters
# ----------
# strings: List[str]
#
# Returns
# -------
# result: List[str]
# """
# ```
model.predict([string1, string2, string3, string4, string5, string6, string7])
# #### Predict batch of strings with probability
#
# ```python
# def predict_proba(self, strings: List[str]):
# """
# classify list of strings and return probability.
#
# Parameters
# ----------
# strings: List[str]
#
# Returns
# -------
# result: List[dict[str, float]]
# """
# ```
model.predict_proba([string1, string2, string3, string4, string5, string6, string7])
# ### List available Transformer models
malaya.sentiment.available_transformer()
# ### Load Transformer model
#
# ```python
# def transformer(model: str = 'bert', quantized: bool = False, **kwargs):
# """
# Load Transformer sentiment model.
#
# Parameters
# ----------
# model : str, optional (default='bert')
# Model architecture supported. Allowed values:
#
# * ``'bert'`` - Google BERT BASE parameters.
# * ``'tiny-bert'`` - Google BERT TINY parameters.
# * ``'albert'`` - Google ALBERT BASE parameters.
# * ``'tiny-albert'`` - Google ALBERT TINY parameters.
# * ``'xlnet'`` - Google XLNET BASE parameters.
# * ``'alxlnet'`` - Malaya ALXLNET BASE parameters.
# * ``'fastformer'`` - FastFormer BASE parameters.
# * ``'tiny-fastformer'`` - FastFormer TINY parameters.
#
# quantized : bool, optional (default=False)
# if True, will load 8-bit quantized model.
# Quantized model not necessary faster, totally depends on the machine.
#
# Returns
# -------
# result: model
# List of model classes:
#
# * if `bert` in model, will return `malaya.model.bert.MulticlassBERT`.
# * if `xlnet` in model, will return `malaya.model.xlnet.MulticlassXLNET`.
# * if `fastformer` in model, will return `malaya.model.fastformer.MulticlassFastFormer`.
# """
# ```
model = malaya.sentiment.transformer(model = 'xlnet')
# ### Load Quantized model
#
# To load 8-bit quantized model, simply pass `quantized = True`, default is `False`.
#
# We can expect slightly accuracy drop from quantized model, and not necessary faster than normal 32-bit float model, totally depends on machine.
quantized_model = malaya.sentiment.transformer(model = 'xlnet', quantized = True)
# #### Predict batch of strings
#
# ```python
# def predict(self, strings: List[str]):
# """
# classify list of strings.
#
# Parameters
# ----------
# strings: List[str]
#
# Returns
# -------
# result: List[str]
# """
# ```
# +
# %%time
model.predict([string1, string2, string3, string4, string5, string6, string7])
# +
# %%time
quantized_model.predict([string1, string2, string3, string4, string5, string6, string7])
# -
# #### Predict batch of strings with probability
#
# ```python
# def predict_proba(self, strings: List[str]):
# """
# classify list of strings and return probability.
#
# Parameters
# ----------
# strings: List[str]
#
# Returns
# -------
# result: List[dict[str, float]]
# """
# ```
# +
# %%time
model.predict_proba([string1, string2, string3, string4, string5, string6, string7])
# +
# %%time
quantized_model.predict_proba([string1, string2, string3, string4, string5, string6, string7])
# -
# #### Open sentiment visualization dashboard
#
# Default when you call `predict_words` it will open a browser with visualization dashboard, you can disable by `visualization=False`.
#
# ```python
# def predict_words(
# self,
# string: str,
# method: str = 'last',
# bins_size: float = 0.05,
# visualization: bool = True,
# ):
# """
# classify words.
#
# Parameters
# ----------
# string : str
# method : str, optional (default='last')
# Attention layer supported. Allowed values:
#
# * ``'last'`` - attention from last layer.
# * ``'first'`` - attention from first layer.
# * ``'mean'`` - average attentions from all layers.
# bins_size: float, optional (default=0.05)
# default bins size for word distribution histogram.
# visualization: bool, optional (default=True)
# If True, it will open the visualization dashboard.
#
# Returns
# -------
# dictionary: results
# """
# ```
quantized_model.predict_words(string4, bins_size = 0.01)
# ### Vectorize
#
# Let say you want to visualize sentence / word level in lower dimension, you can use `model.vectorize`,
#
# ```python
# def vectorize(self, strings: List[str], method: str = 'first'):
# """
# vectorize list of strings.
#
# Parameters
# ----------
# strings: List[str]
# method : str, optional (default='first')
# Vectorization layer supported. Allowed values:
#
# * ``'last'`` - vector from last sequence.
# * ``'first'`` - vector from first sequence.
# * ``'mean'`` - average vectors from all sequences.
# * ``'word'`` - average vectors based on tokens.
#
# Returns
# -------
# result: np.array
# """
# ```
# #### Sentence level
r = quantized_model.vectorize([string1, string2, string3, string4], method = 'first')
# +
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE().fit_transform(r)
tsne.shape
# -
plt.figure(figsize = (7, 7))
plt.scatter(tsne[:, 0], tsne[:, 1])
labels = [string1, string2, string3, string4]
for label, x, y in zip(
labels, tsne[:, 0], tsne[:, 1]
):
label = (
'%s, %.3f' % (label[0], label[1])
if isinstance(label, list)
else label
)
plt.annotate(
label,
xy = (x, y),
xytext = (0, 0),
textcoords = 'offset points',
)
# #### Word level
r = quantized_model.vectorize([string1, string2, string3, string4], method = 'word')
x, y = [], []
for row in r:
x.extend([i[0] for i in row])
y.extend([i[1] for i in row])
tsne = TSNE().fit_transform(y)
tsne.shape
plt.figure(figsize = (7, 7))
plt.scatter(tsne[:, 0], tsne[:, 1])
labels = x
for label, x, y in zip(
labels, tsne[:, 0], tsne[:, 1]
):
label = (
'%s, %.3f' % (label[0], label[1])
if isinstance(label, list)
else label
)
plt.annotate(
label,
xy = (x, y),
xytext = (0, 0),
textcoords = 'offset points',
)
# Pretty good, the model able to know cluster top left as positive sentiment, bottom right as negative sentiment.
# ### Stacking models
#
# More information, you can read at [https://malaya.readthedocs.io/en/latest/Stack.html](https://malaya.readthedocs.io/en/latest/Stack.html)
multinomial = malaya.sentiment.multinomial()
alxlnet = malaya.sentiment.transformer(model = 'alxlnet')
malaya.stack.predict_stack([multinomial, alxlnet, model],
[string1, string2, string3, string4, string5, string6, string7])
| docs/load-sentiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Imports
import os
import time
import shutil
import nbformat
import numpy as np
# # Conversion function
def convert_notebook(repo,subfolder,filetype,k,filename):
filename_in = subfolder + '/' + filename
folder_out = 'web/' + subfolder + '/'
filename_out = folder_out + filename
print(filename_in)
# a. load
nb = nbformat.read(filename_in,as_version=nbformat.NO_CONVERT)
# b. turn-off autonumbering
nb['metadata']['toc-autonumbering'] = False
# c. construct toc
i = 0
first = True
toc = ''
for cell in nb['cells']:
if cell['cell_type'] == 'markdown':
# i. identify type of header
h1 = True if cell['source'].startswith('# ') else False
h2 = True if cell['source'].startswith('## ') else False
if not (h1 or h2): continue
# ii. skip first (title)
if first:
old_title = cell['source'].split('# ')[1].split('\n')[0]
cell['source'] = f'# {filetype} {k}: ' + old_title
first = False
continue
# iii. find title
if h1:
title = cell['source'].split('# ')[1].split('\n')[0]
i += 1
j = 0
elif h2:
title = cell['source'].split('## ')[1].split('\n')[+0]
j += 1
else:
continue
title_safe = title.replace(' ','-')
# iii. target prefix
if h1:
prefix = f'<a id="{title_safe}"></a>\n\n'
else:
prefix = ''
# iv. add new title
if h1:
new_title = cell['source'].replace('#',f'# {i}.')
elif h2:
new_title = cell['source'].replace('##',f'## {i}.{j}')
cell['source'] = prefix + new_title
# v. extend toc
if h1:
toc += f'{i}. [{title}](#{title_safe})\n'
# d. insert toc
toc_cell = nbformat.v4.new_markdown_cell(toc)
nb['cells'].insert(1,toc_cell)
links = '[Download on GitHub](https://github.com/NumEconCopenhagen/' + repo + ')\n\n'
links += '[<img src="https://mybinder.org/badge_logo.svg">]'
links += f'(https://mybinder.org/v2/gh/NumEconCopenhagen/'
links += repo + '/master?urlpath=lab/tree/' + subfolder + '/' + filename + ')'
links_cell = nbformat.v4.new_markdown_cell(links)
nb['cells'].insert(1,links_cell)
# e. save
nbformat.write(nb,filename_out)
# # Lectures
# +
# a. basic settings
repo = 'lectures-2022'
web_folder = 'web/'
# b. clear
if os.path.isdir(web_folder):
time.sleep(5)
shutil.rmtree(web_folder)
os.mkdir(web_folder)
else:
os.mkdir(web_folder)
os.chmod(web_folder, 0o777)
# c. list of lectures
lectures = ['01/Introduction.ipynb',
'02/Primitives.ipynb',
'03/Optimize_print_and_plot.ipynb',
'04/Random_numbers_and_simulation.ipynb',
'05/Workflow_and_debugging.ipynb',
'06/Examples_and_overview.ipynb',
'07/Load_save_and_structure_data.ipynb',
'08/Basic_data_analysis.ipynb',
'09/Searching_and_sorting.ipynb',
'10/Solving_equations.ipynb',
'11/Numerical_optimization.ipynb',
'12/Canonical_economic_models.ipynb',
'13/ABMs_SMD.ipynb',
'14/The_need_for_speed.ipynb',
'A/Other_programming_languages.ipynb']
# d. create structure
for lecture in lectures:
subfolder = lecture.split('/')[0]
filename = lecture.split('/')[1]
folder_out = 'web/' + subfolder + '/'
if os.path.isdir(folder_out):
os.rmdir(folder_out)
#if not os.path.isdir('/web/'):
# os.mkdir('/web/')
os.mkdir(folder_out)
# e. create notebooks
for lecture in lectures:
subfolder = lecture.split('/')[0]
filename = lecture.split('/')[1]
if subfolder.isnumeric():
convert_notebook(repo,subfolder,'Lecture',subfolder,filename)
else:
convert_notebook(repo,subfolder,'Appendix Lecture',subfolder,filename)
| UpdateWeb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Sample Input
#
# 9
# 1 2 3 4 5 6 7 8 9
# 9
# 10 1 2 3 11 21 55 6 8
# #### Sample Output
#
# 13
# #### Explanation
#
# Roll numbers of students who have at least one subscription:
# and . Roll numbers: and are in both sets so they are only counted once.
# Hence, the total is students.
# +
number_of_english_subscriber = int(input())
english_roll_numbers = set(map(int, input().split()))
number_of_french_subscriber = int(input())
french_roll_numbers = set(map(int, input().split()))
print(len(english_roll_numbers | french_roll_numbers))
| Hacker_rank/Easy/180926_Set .union() Operation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import csv
import re
from num2words import num2words
from collections import OrderedDict
# Turn CSV into a pandas DataFrame
raw_data_table = pd.read_csv('./spreadsheets/group_detail.csv', sep=',')
#raw_data_table[:10]
# -
# Dictionary of FBbt IDs and short names (not labels) for new and existing terms to be used
ID_table = pd.read_csv('./spreadsheets/ID_name.csv', sep=',')
lookup = dict(zip(ID_table.Keys,ID_table.Values))
#lookup
position_dict = {"AV" : "anterior ventral", \
"PV" : "posterior ventral", \
"AD" : "anterior dorsal", \
"PD" : "posterior dorsal"}
# +
# Make a dictionary with key - column header & value = template specification (first row of table).
template_seed = OrderedDict([ ('ID' , 'ID'), ('CLASS_TYPE' , 'CLASS_TYPE'),\
('RDF_Type' , 'TYPE' )])
#label, definition, short synonym:
template_seed.update([("Name" , "A rdfs:label"), ("Definition" , "A IAO:0000115"),\
("Xref_def" , ">A oboInOwl:hasDbXref"),\
("created_by" , "A oboInOwl:created_by"),\
("creation_date", "A oboInOwl:creation_date")])
#short name synonym
template_seed.update([("Synonym" , "A oboInOwl:hasExactSynonym"),\
("syn_ref" , ">A oboInOwl:hasDbXref")])\
# Columns for tract superclass:
template_seed.update([("PNT" , "SC %"),\
("LH", "SC 'innervates' some %"),\
("Brain", "SC 'part of' some %")])
# Create dataFrame for template
template = pd.DataFrame.from_records([template_seed])
template
# -
def groupname_splitter(shortname):
"""
Splits group names - two letters / one or more digits.
"""
short = str(shortname)
pattern = re.compile("[A-Z][A-Z][0-9]+")
if pattern.fullmatch(short):
return [short[0:2], short[2:]]
else:
raise ValueError("Invalid group name - must be 2 letters, followed by numbers.")
def definition_generator(shortname, dictionary):
location = groupname_splitter(shortname)
brain_area = dictionary[location[0]]
return ("Primary neurite tract of the %s (%s) adult brain in the %s clockwise position "
"(from %s perspective), from ventrolateral to dorsal to ventromedial, of %s "
"tracts that enter the lateral horn (Frechter et al., 2019)."
%(brain_area, location[0], num2words(int(location[1]), to='ordinal'),\
brain_area.split()[0], location[0]))
def label_maker(shortname, dictionary):
location = groupname_splitter(shortname)
return "adult %s primary neurite tract %s"%(dictionary[location[0]],location[1])
# +
count = 0 # first row
for i in raw_data_table.index:
r = raw_data_table.short[count]
row_od = OrderedDict([]) #new template row as an empty ordered dictionary
for c in template.columns: #make columns and blank data for new template row
row_od.update([(c , "")])
#these are the same in each row
row_od["CLASS_TYPE"] = "subclass"
row_od["RDF_Type"] = "owl:Class"
row_od["Xref_def"] = "FlyBase:FBrf0242628"
row_od["syn_ref"] = "FlyBase:FBrf0242628"
row_od["created_by"] = "http://orcid.org/0000-0002-1373-1705"
row_od["creation_date"] = "2020-04-29T12:00:00Z"
row_od["PNT"] = lookup["PNT"]
row_od["LH"] = lookup["adLH"]
row_od["Brain"] = lookup["adBrain"]
#easy to generate data
row_od["ID"] = lookup[r]
row_od["Synonym"] = "adult " + r + " primary neurite tract"
row_od["Name"] = label_maker(r,position_dict)
#STUFF FOR DEFINITION
definition = definition_generator(r,position_dict)
if raw_data_table.main_type[i] == "LHLN":
definition += (" Lateral horn neurons that enter the neuropil via this "
"tract are predominantly local neurons (Frechter et al., 2019).")
elif raw_data_table.main_type[i] == "LHON":
definition += (" Lateral horn neurons that enter the neuropil via this "
"tract are predominantly output neurons (Frechter et al., 2019).")
row_od["Definition"] = definition
#make new row into a DataFrame and add it to template
new_row = pd.DataFrame.from_records([row_od])
template = pd.concat([template, new_row], ignore_index=True, sort=False)
count +=1
template
# -
template.to_csv("./template.tsv", sep = "\t", header=True, index=False)
| src/patterns/robot_template_projects/LH_neurons/tract_template_gen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # initialize
from __future__ import nested_scopes
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
display(HTML('<style>.CodeMirror{font-family: "Courier New";font-size: 12pt;}</style>'))
import pyspark.sql.functions as F
import json
import builtins
from itertools import chain
import seaborn as sns
# +
from pyspark.sql.types import (StructType, StructField, DateType,
TimestampType, StringType, LongType, IntegerType, DoubleType,FloatType)
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import StringIndexer, VectorAssembler
from pyspark.sql.window import Window
# +
import math
from functools import reduce
import re
import collections
from pyspark.ml import Pipeline
import pandas
import numpy
import time
from pandasql import sqldf
pandas.options.display.max_rows=50
pandas.options.display.max_columns=200
pandas.options.display.float_format = '{:,}'.format
# -
from ipywidgets import IntProgress,Layout
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
# +
import pyhdfs
fs = pyhdfs.HdfsClient(hosts='10.0.2.125:50070', user_name='yuzhou')
# -
# # base class
# + code_folding=[]
class SparkLog_Analysis:
def __init__(self, appid,jobids,clients):
pass
# + code_folding=[]
class Analysis:
def __init__(self,file):
self.file=file
self.starttime=0
self.df=None
def load_data(self):
pass
def generate_trace_view_list(self,id=0, **kwargs):
if self.df==None:
self.load_data()
trace_events=[]
node=kwargs.get('node',"node")
trace_events.append(json.dumps({"name": "process_name","ph": "M","pid":id,"tid":0,"args":{"name":" "+node}}))
return trace_events
def generate_trace_view(self, trace_output, **kwargs):
traces=[]
traces.extend(self.generate_trace_view_list(0,**kwargs))
output='''
{
"traceEvents": [
''' + \
",\n".join(traces)\
+ '''
],
"displayTimeUnit": "ns"
}'''
with open(trace_output+'.json', 'w') as outfile:
outfile.write(output)
display(HTML("<a href=http://xxx:1088/tracing_examples/trace_viewer.html#/tracing/test_data/"+trace_output+".json>http://xxx:1088/tracing_examples/trace_viewer.html#/tracing/test_data/"+trace_output+".json</a>"))
# -
# # app log analysis
# + code_folding=[186]
from pyspark.sql.functions import udf
@udf("long")
def isfinish_udf(s):
import json
s=json.loads(s)
def isfinish(root):
if "isFinalPlan=false" in root['simpleString'] or root['children'] is None:
return 0
for c in root["children"]:
if isfinish(c)==0:
return 0
return 1
if len(s)>0:
return isfinish(s[0])
else:
return 0
@pandas_udf("taskid long, start long, dur long, name string", PandasUDFType.GROUPED_MAP)
def time_breakdown(pdf):
ltime=pdf['Launch Time'][0]+2
pdf['start']=0
pdf['dur']=0
outpdf=[]
ratio=(pdf["Finish Time"][0]-pdf["Launch Time"][0])/pdf["Update"].sum()
ratio=1 if ratio>1 else ratio
for idx,l in pdf.iterrows():
if(l["Update"]*ratio>1):
outpdf.append([l["Task ID"],ltime,int(l["Update"]*ratio),l["mname"]])
ltime=ltime+int(l["Update"]*ratio)
if len(outpdf)>0:
return pandas.DataFrame(outpdf)
else:
return pandas.DataFrame({'taskid': pandas.Series([], dtype='long'),
'start': pandas.Series([], dtype='long'),
'dur': pandas.Series([], dtype='long'),
'name': pandas.Series([], dtype='str'),
})
class App_Log_Analysis(Analysis):
def __init__(self, file, jobids):
Analysis.__init__(self,file)
self.jobids=[] if jobids is None else [str(l) for l in jobids]
self.df=None
self.pids=[]
def load_data(self):
print("load data ", self.file)
jobids=self.jobids
df=spark.read.json(self.file)
if 'App ID' in df.columns:
self.appid=df.where("`App ID` is not null").collect()[0]["App ID"]
else:
self.appid="Application-00000000"
if df.where("Event='org.apache.spark.sql.execution.ui.SparkListenerDriverAccumUpdates'").count()>0:
self.dfacc=df.where("Event='org.apache.spark.sql.execution.ui.SparkListenerDriverAccumUpdates'").select(F.col("executionId").alias("queryid"),F.explode("accumUpdates"))
else:
self.dfacc = None
if "sparkPlanInfo" in df.columns:
self.queryplans=df.where("(Event='org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart' or Event='org.apache.spark.sql.execution.ui.SparkListenerSQLAdaptiveExecutionUpdate') \
and (sparkPlanInfo.nodeName!='AdaptiveSparkPlan' or sparkPlanInfo.simpleString='AdaptiveSparkPlan isFinalPlan=true') ").select(F.col("executionId").alias("queryid"),'physicalPlanDescription',"sparkPlanInfo.*")
else:
self.queryplans=None
seen = set()
if self.queryplans is not None:
self.queryplans=self.queryplans.where(isfinish_udf(F.to_json("children"))==1)
self.allmetrics=[]
if self.queryplans.count() > 0:
metrics=self.queryplans.collect()
def get_metric(root):
for l in root["metrics"]:
if l['accumulatorId'] not in seen:
seen.add(l['accumulatorId'])
self.allmetrics.append([l['accumulatorId'],l["metricType"],l['name'],root["nodeName"]])
if root['children'] is not None:
for c in root["children"]:
get_metric(c)
for c in metrics:
get_metric(c)
amsdf=spark.createDataFrame(self.allmetrics)
amsdf=amsdf.withColumnRenamed("_1","ID").withColumnRenamed("_2","type").withColumnRenamed("_3","Name").withColumnRenamed("_4","nodeName")
if self.dfacc is not None:
self.dfacc=self.dfacc.select("queryid",(F.col("col")[0]).alias("ID"),(F.col("col")[1]).alias("Update")).join(amsdf,on=["ID"])
if self.queryplans is not None:
self.metricscollect=[l for l in self.allmetrics if l[1] in ['nsTiming','timing'] and (l[2].startswith("totaltime_") or l[2].startswith("scan time") or l[2].startswith("shuffle write time")) and l[2] not in("totaltime_collectbatch") ]
#config=df.where("event='SparkListenerJobStart' and Properties.`spark.executor.cores` is not null").select("Properties.*").limit(1).collect()
config=df.select("`Spark Properties`.*").where("`spark.app.id` is not null").limit(1).collect()
configdic=config[0].asDict()
self.parallelism=int(configdic['spark.sql.shuffle.partitions']) if 'spark.sql.shuffle.partitions' in configdic else 1
self.executor_cores=int(configdic['spark.executor.cores']) if 'spark.executor.cores' in configdic else 1
self.executor_instances=int(configdic['spark.executor.instances']) if 'spark.executor.instances' in configdic else 1
self.taskcpus= int(configdic['spark.task.cpus'])if 'spark.task.cpus' in configdic else 1
self.batchsize= int(configdic['spark.sql.execution.arrow.maxRecordsPerBatch'])if 'spark.sql.execution.arrow.maxRecordsPerBatch' in configdic else 1
self.realexecutors = df.where(~F.isnull(F.col("Executor ID"))).select("Executor ID").distinct().count()
if "spark.sql.execution.id" in df.where("Event='SparkListenerJobStart'").select("Properties.*").columns:
df_jobstart=df.where("Event='SparkListenerJobStart'").select("Job ID","Submission Time",F.col("Properties.`spark.sql.execution.id`").alias("queryid"),"Stage IDs")
else:
df_jobstart=df.where("Event='SparkListenerJobStart'").select("Job ID","Submission Time",F.lit(0).alias("queryid"),"Stage IDs")
df_jobend=df.where("Event='SparkListenerJobEnd'").select("`Job ID`","Completion Time")
df_job=df_jobstart.join(df_jobend,"Job ID")
df_job=df_job.withColumnRenamed("Submission Time","job_start_time")
df_job=df_job.withColumnRenamed("Completion Time","job_stop_time")
self.df_job=df_job
jobstage=df_job.select("*",F.explode("Stage IDs").alias("Stage ID"))
task=df.where("(Event='SparkListenerTaskEnd' or Event='SparkListenerTaskStart') ").select("Event","Stage ID","task info.*","task metrics.*")
self.failed_stages = [str(l['Stage ID']) for l in task.where("Failed='true'").select("Stage ID").distinct().collect()]
taskjob=task.\
select("Host","`Event`","`Launch Time`","`Executor ID`","`Task ID`","`Finish Time`",
"`Stage ID`","`Input Metrics`.`Bytes Read`","`Disk Bytes Spilled`","`Memory Bytes Spilled`","`Shuffle Read Metrics`.`Local Bytes Read`","`Shuffle Read Metrics`.`Remote Bytes Read`",
"`Shuffle Write Metrics`.`Shuffle Bytes Written`","`Executor Deserialize Time`","`Shuffle Read Metrics`.`Fetch Wait Time`","`Executor Run Time`","`Shuffle Write Metrics`.`Shuffle Write Time`",
"`Result Serialization Time`","`Getting Result Time`","`JVM GC Time`","`Executor CPU Time`","Accumulables","Peak Execution Memory",
F.when(task['Finish Time']==0,task['Launch Time']).otherwise(task['Finish Time']).alias('eventtime')
).join(jobstage,"Stage ID")
self.df=taskjob
if len(jobids)>0:
self.df=self.df.where('`Job ID` in ({:s})'.format(','.join(jobids)))
queryids=self.df.select(F.col("queryid").astype(IntegerType())).distinct().where("queryid is not null").orderBy("queryid").toPandas()
self.query_num=len(queryids)
if self.query_num>0:
queryidx=queryids.reset_index()
queryidx['index']=queryidx['index']+1
#tpcds query
if self.query_num==103:
queryidx['index']=queryidx['index'].map(tpcds_query_map)
qidx=spark.createDataFrame(queryidx)
qidx=qidx.withColumnRenamed("index","real_queryid")
self.df=self.df.join(qidx,on="queryid",how="left")
if self.dfacc is not None:
self.dfacc=self.dfacc.join(qidx,on="queryid",how='left')
if self.queryplans:
self.queryplans=self.queryplans.join(qidx,"queryid",how="right")
self.df=self.df.fillna(0)
self.df=self.df.withColumn('Executor ID',F.when(F.col("Executor ID")=="driver",1).otherwise(F.col("Executor ID")))
self.df.cache()
##############################
dfx=self.df.where("Event='SparkListenerTaskEnd'").select("Stage ID","Launch Time","Finish Time","Task ID")
dfxpds=dfx.toPandas()
dfxpds.columns=[l.replace(" ","_") for l in dfxpds.columns]
dfxpds_ods=sqldf('''select * from dfxpds order by finish_time desc''')
criticaltasks=[]
idx=0
prefinish=0
launchtime=dfxpds_ods["Launch_Time"][0]
criticaltasks.append([dfxpds_ods["Task_ID"][0],launchtime,dfxpds_ods["Finish_Time"][0]])
total_row=len(dfxpds_ods)
while True:
while idx<total_row:
if dfxpds_ods["Finish_Time"][idx]-2<launchtime:
break
idx=idx+1
else:
break
cur_finish=dfxpds_ods["Finish_Time"][idx]
cur_finish=launchtime-1 if cur_finish>=launchtime else cur_finish
launchtime=dfxpds_ods["Launch_Time"][idx]
criticaltasks.append([dfxpds_ods["Task_ID"][idx],launchtime,cur_finish])
self.criticaltasks=criticaltasks
def get_physical_plan(appals,**kwargs):
if appals.df is None:
appals.load_data()
queryid=kwargs.get('queryid',None)
shownops=kwargs.get("shownops",['ArrowRowToColumnarExec','ColumnarToRow','RowToArrowColumnar','ArrowColumnarToRow','Filter','HashAggregate','Project','SortAggregate','SortMergeJoin','window'])
def get_fields(colss):
lvls=0
colns=[]
ks=""
for c in colss:
if c=="," and lvls==0:
colns.append(ks)
ks=""
continue
if c==" " and ks=="":
continue
if c=="(":
lvls+=1
if c==")":
lvls-=1
ks+=c
if ks!="":
colns.append(ks)
return colns
def get_column_names(s, opname, resultname, prefix, columns, funcs):
p=re.search(r" "+opname+" ",s[0])
if p:
for v in s[1].split("\n"):
if v.startswith(resultname):
cols=re.search("\[([^0-9].+)\]",v)
if cols:
colss=cols.group(1)
colns=get_fields(colss)
if opname+str(len(columns)) not in funcs:
funcs[opname+str(len(columns))]=[]
funcs[opname+str(len(columns))].extend(colns)
for c in colns:
if " AS " in c:
colname=re.search(r" AS (.+)",c).group(1)
if colname not in columns:
columns[colname]=prefix+str(len(columns))
plans=appals.queryplans.select('real_queryid','physicalPlanDescription').collect() if queryid is None else appals.queryplans.where(f"real_queryid='{queryid}'").select("physicalPlanDescription").collect()
for pr in range(0,len(plans)):
plan=plans[pr]['physicalPlanDescription']
nodes={}
lines=plan.split("\n")
for idx in range(0,len(lines)):
l=lines[idx]
if l=='+- == Final Plan ==':
while l!='+- == Initial Plan ==':
idx+=1
l=lines[idx]
if not l.endswith(")"):
break
idv=re.search("\(\d+\)$",l).group(0)
nodes[idv]=[l]
if l=="== Physical Plan ==":
while not lines[idx+1].startswith("("):
idx+=1
l=lines[idx]
if not l.endswith(")"):
break
idv=re.search("\(\d+\)$",l).group(0)
nodes[idv]=[l]
if l.startswith("("):
idv=re.search("^\(\d+\)",l).group(0)
if idv in nodes:
desc=""
while l!="":
desc+=l+"\n"
idx+=1
l=lines[idx]
nodes[idv].append(desc)
tables={}
columns={}
functions={}
for s in nodes.values():
p=re.search(r"Scan arrow default\.([^ ]+)",s[0])
if p:
tn=p.group(1)
if not tn in tables:
tables[tn]="t_"+str(len(tables))
s[0]=s[0].replace(tn,tables[tn])
s[1]=s[1].replace(tn,tables[tn])
colsv=[]
schema=[]
for v in s[1].split("\n"):
if v.startswith("ReadSchema"):
cols=re.search("<(.*)>",v)
if cols:
colss=cols.group(1).split(",")
for c in colss:
cts=c.split(":")
ct=cts[0]
if not ct in columns:
if len(cts)==2:
cts[1]=cts[1]
columns[ct]=cts[1]+"_"+str(len(columns))
else:
columns[ct]="c_"+str(len(columns))
get_column_names(s, "Project", "Output", "proj_", columns, functions)
get_column_names(s, "HashAggregate", "Results", "shagg_", columns, functions)
get_column_names(s, "SortAggregate", "Results", "stagg_", columns, functions)
for s in nodes.values():
s[1]=html.escape(s[1])
for c,v in columns.items():
if v.startswith("array") or v.startswith("map") or v.startswith("struct"):
s[1]=s[1].replace(c,'<span style="color:red;background-color:yellow">'+html.escape(v)+"</span>")
else:
s[1]=s[1].replace(c,"<font color=#33cc33>"+html.escape(v)+"</font>")
htmls=['''<table style="table-layout:fixed;max-width: 100%;">''']
qid=pr+1 if queryid is None else queryid
htmls.append(f"<tr><td colspan=2>{qid}</td></tr>")
for l in nodes.values():
if shownops is not None:
for k in shownops:
if " "+k+" " in l[0]:
break
else:
continue
htmls.append("<tr>")
htmls.append('<td width=33%><div align="left" style="font-family:Courier New;overflow-wrap: anywhere">')
htmls.append(l[0].replace(" ","_")
.replace("ColumnarToRow","<font color=blue>ColumnarToRow</font>")
.replace("RowToArrowColumnar","<font color=blue>RowToArrowColumnar</font>")
.replace("ArrowColumnarToRow","<font color=blue>ArrowColumnarToRow</font>")
.replace("ArrowRowToColumnar","<font color=blue>ArrowRowToColumnar</font>")
)
htmls.append("</div></td>")
htmls.append('<td width=66%><div align="left" style="font-family:Courier New;overflow-wrap: anywhere">')
ls=l[1].split("\n")
lsx=[]
for t in ls:
cols=re.search("\[([^0-9].+)\]",t)
if cols:
colss=cols.group(1)
colns=get_fields(colss)
t=re.sub("\[([^0-9].+)\]","",t)
t+="["+'<span style="background-color:#ededed;">;</span>'.join(colns)+"]"
if ":" in t:
lsx.append(re.sub(r'^([^:]+:)',r'<font color=blue>\1</font>',t))
else:
lsx.append(t)
htmls.append("<br>".join(lsx))
htmls.append("</div></td>")
htmls.append("</tr>")
htmls.append("</table>")
display(HTML("\n".join(htmls)))
for k, v in functions.items():
functions[k]=[l for l in v if "(" in l]
for f in functions.values():
for idx in range(0,len(f)):
for c,v in columns.items():
if v.startswith("array") or v.startswith("map") or v.startswith("struct"):
f[idx]=f[idx].replace(c,'<span style="color:red;background-color:yellow">'+html.escape(v)+"</span>")
else:
f[idx]=f[idx].replace(c,"<font color=#33cc33>"+html.escape(v)+"</font>")
funchtml="<table>"
for k,v in functions.items():
if shownops is not None:
for ks in shownops:
if " "+ks+" " in k:
break
else:
continue
funchtml+="<tr><td width=10%>"+k+'</td><td width=90%><table stype="width:100%;table-layout:fixed">'
for f in v:
funchtml+='<tr><td width=100% ><div align="left" style="font-family:Courier New">'+f+"</div></td></tr>"
funchtml+="</table></td></tr>"
funchtml+="</table>"
display(HTML(funchtml))
return plans
def get_physical_allnodes(appals,**kwargs):
if appals.df is None:
appals.load_data()
queryid=None
plans=appals.queryplans.select('real_queryid','physicalPlanDescription').collect() if queryid is None else appals.queryplans.where(f"real_queryid='{queryid}'").select("physicalPlanDescription").collect()
allnodes={}
for pr in range(0,len(plans)):
plan=plans[pr]['physicalPlanDescription']
allnodes[pr]={}
nodes=allnodes[pr]
if plan is None:
continue
lines=plan.split("\n")
for idx in range(0,len(lines)):
l=lines[idx]
if l=='+- == Final Plan ==':
while l!='+- == Initial Plan ==':
idx+=1
l=lines[idx]
if not l.endswith(")"):
break
idv=re.search("\(\d+\)$",l).group(0)
nodes[idv]=[l]
if l.startswith("("):
idv=re.search("^\(\d+\)",l).group(0)
if idv in nodes:
desc=""
while l!="":
desc+=l+"\n"
idx+=1
l=lines[idx]
nodes[idv].append(desc)
return allnodes
def get_basic_state(appals):
if appals.df is None:
appals.load_data()
display(HTML("<a href=http://sr525:18080/history/"+appals.appid+">http://sr525:18080/history/"+appals.appid+"</a>"))
errorcolor="#000000" if appals.executor_instances == appals.realexecutors else "#c0392b"
qtime=appals.get_query_time(plot=False)
sums=qtime.sum()
if len(appals.failed_stages)>0:
failure="<br>".join(["query: " + str(l["real_queryid"])+"|stage: " + str(l["Stage ID"]) for l in appals.df.where("`Stage ID` in ("+",".join(appals.failed_stages)+")").select("real_queryid","Stage ID").distinct().collect()])
else:
failure=""
display(HTML(f'''
<table border="1" cellpadding="1" cellspacing="1" style="width:500px">
<tbody>
<tr>
<td style="width:135px">appid</td>
<td style="width:351px"><span style="color:#000000"><strong>{appals.appid}</strong></span></td>
</tr>
<tr>
<td style="width:135px">executor.instances</td>
<td style="width:351px"><span style="color:#000000"><strong>{appals.executor_instances}</strong></span></td>
</tr>
<tr>
<td style="width:135px">executor.cores</td>
<td style="width:351px"><span style="color:#000000"><strong>{appals.executor_cores}</strong></span></td>
</tr>
<tr>
<td style="width:135px">shuffle.partitions</td>
<td style="width:351px"><span style="color:#000000"><strong>{appals.parallelism}</strong></span></td>
</tr>
<tr>
<td style="width:135px">batch size</td>
<td style="width:351px"><span style="color:#000000"><strong>{appals.batchsize}</strong></span></td>
</tr>
<tr>
<td style="width:135px">real executors</td>
<td style="width:351px"><span style="color:{errorcolor}"><strong>{appals.realexecutors}</strong></span></td>
</tr>
<tr>
<td style="width:135px">Failed Tasks</td>
<td style="width:351px"><span style="color:{errorcolor}"><strong>{failure}</strong></span></td>
</tr>
<tr>
<td style="width:135px">runtime</td>
<td style="width:351px"><strong>{round(sums['runtime'],2)}</strong></td>
</tr>
<tr>
<td style="width:135px">disk spilled</td>
<td style="width:351px"><strong>{round(sums['disk spilled'],2)}</strong></td>
</tr>
<tr>
<td style="width:135px">memspilled</td>
<td style="width:351px"><strong>{round(sums['memspilled'],2)}</strong></td>
</tr>
<tr>
<td style="width:135px">local_read</td>
<td style="width:351px"><strong>{round(sums['local_read'],2)}</strong></td>
</tr>
<tr>
<td style="width:135px">remote_read</td>
<td style="width:351px"><strong>{round(sums['remote_read'],2)}</strong></td>
</tr>
<tr>
<td style="width:135px">shuffle_write</td>
<td style="width:351px"><strong>{round(sums['shuffle_write'],2)}</strong></td>
</tr>
<tr>
<td style="width:135px">task run time</td>
<td style="width:351px"><strong>{round(sums['run_time'],2)}</strong></td>
</tr>
<tr>
<td style="width:135px">ser_time</td>
<td style="width:351px"><strong>{round(sums['ser_time'],2)}</strong></td>
</tr>
<tr>
<td style="width:135px">f_wait_time</td>
<td style="width:351px"><strong>{round(sums['f_wait_time'],2)}</strong></td>
</tr>
<tr>
<td style="width:135px">gc_time</td>
<td style="width:351px"><strong>{round(sums['gc_time'],2)}</strong></td>
</tr>
<tr>
<td style="width:135px">input read</td>
<td style="width:351px"><strong>{round(sums['input read'],2)}</strong></td>
</tr>
<tr>
<td style="width:135px">acc_task_time</td>
<td style="width:351px"><strong>{round(sums['acc_task_time'],2)}</strong></td>
</tr>
</tbody>
</table>
'''))
def generate_trace_view_list_exec(self,id=0,**kwargs):
Analysis.generate_trace_view_list(self,**kwargs)
showcpu=kwargs.get('showcpu',False)
shownodes=kwargs.get("shownodes",None)
showdf=self.df.where(F.col("Host").isin(shownodes)) if shownodes else self.df
events=showdf.toPandas()
coretrack={}
trace_events=[]
starttime=self.starttime
taskend=[]
trace={"traceEvents":[]}
exec_hosts={}
hostsdf=showdf.select("Host").distinct().orderBy("Host")
hostid=100000
ended_event=[]
for i,l in hostsdf.toPandas().iterrows():
exec_hosts[l['Host']]=hostid
hostid=hostid+100000
for idx,l in events.iterrows():
if l['Event']=='SparkListenerTaskStart':
hostid=exec_hosts[l['Host']]
tsk=l['Task ID']
pid=int(l['Executor ID'])*100+hostid
self.pids.append(pid)
stime=l['Launch Time']
#the task's starttime and finishtime is the same, ignore it.
if tsk in ended_event:
continue
if not pid in coretrack:
tids={}
trace_events.append({
"name": "process_name",
"ph": "M",
"pid":pid,
"tid":0,
"args":{"name":"{:s}.{:s}".format(l['Host'],l['Executor ID'])}
})
else:
tids=coretrack[pid]
for t in tids.keys():
if tids[t][0]==-1:
tids[t]=[tsk,stime]
break
else:
t=len(tids)
tids[t]=[tsk,stime]
#print("task {:d} tid is {:s}.{:d}".format(tsk,pid,t))
coretrack[pid]=tids
if l['Event']=='SparkListenerTaskEnd':
sevt={}
eevt={}
hostid=exec_hosts[l['Host']]
pid=int(l['Executor ID'])*100+hostid
tsk=l['Task ID']
fintime=l['Finish Time']
tids=coretrack[pid]
for t in tids.keys():
if tids[t][0]==tsk:
tids[t]=[-1,-1]
break
else:
ended_event.append(tsk)
continue
for ps in reversed([key for key in tids.keys()]) :
if tids[ps][1]-fintime<0 and tids[ps][1]-fintime>=-2:
fintime=tids[ps][1]
tids[t]=tids[ps]
tids[ps]=[-1,-1]
break
if starttime==0:
starttime=l['Launch Time']
print(f'applog start time: {starttime}')
sstime=l['Launch Time']-starttime
trace_events.append({
'tid':pid+int(t),
'ts':sstime,
'dur':fintime-l['Launch Time'],
'pid':pid,
"ph":'X',
'name':"stg{:d}".format(l['Stage ID']),
'args':{"job id": l['job id'],
"stage id": l['Stage ID'],
"tskid":tsk,
"input":builtins.round(l["Bytes Read"]/1024/1024,2),
"spill":builtins.round(l["Memory Bytes Spilled"]/1024/1024,2),
"Shuffle Read Metrics": "",
"|---Local Read": builtins.round(l["Local Bytes Read"]/1024/1024,2),
"|---Remote Read":builtins.round(l["Remote Bytes Read"]/1024/1024,2),
"Shuffle Write Metrics": "",
"|---Write":builtins.round(l['Shuffle Bytes Written']/1024/1024,2)
}
})
des_time=l['Executor Deserialize Time']
read_time=l['Fetch Wait Time']
exec_time=l['Executor Run Time']
write_time=math.floor(l['Shuffle Write Time']/1000000)
ser_time=l['Result Serialization Time']
getrst_time=l['Getting Result Time']
durtime=fintime-sstime-starttime;
times=[0,des_time,read_time,exec_time,write_time,ser_time,getrst_time]
time_names=['sched delay','deserialize time','read time','executor time','write time','serialize time','result time']
evttime=reduce((lambda x, y: x + y),times)
if evttime>durtime:
times=[math.floor(l*1.0*durtime/evttime) for l in times]
else:
times[0]=durtime-evttime
esstime=sstime
for idx in range(0,len(times)):
if times[idx]>0:
trace_events.append({
'tid':pid+int(t),
'ts':esstime,
'dur':times[idx],
'pid':pid,
'ph':'X',
'name':time_names[idx]})
if idx==3:
trace_events.append({
'tid':pid+int(t),
'ts':esstime,
'dur':l['JVM GC Time'],
'pid':pid,
'ph':'X',
'name':'GC Time'})
if showcpu:
trace_events.append({
'tid':pid+int(t),
'ts':esstime,
'pid':pid,
'ph':'C',
'name':'cpu% {:d}'.format(pid+int(t)),
'args':{'value':l['Executor CPU Time']/1000000.0/times[idx]}})
trace_events.append({
'tid':pid+int(t),
'ts':esstime+times[idx],
'pid':pid,
'ph':'C',
'name':'cpu% {:d}'.format(pid+int(t)),
'args':{'value':0}})
esstime=esstime+times[idx]
self.starttime=starttime
return [json.dumps(l) for l in trace_events]
def generate_trace_view_list(self,id=0,**kwargs):
Analysis.generate_trace_view_list(self,**kwargs)
showcpu=kwargs.get('showcpu',False)
shownodes=kwargs.get("shownodes",None)
showdf=self.df.where(F.col("Host").isin(shownodes)) if shownodes else self.df
showdf=showdf.orderBy(["eventtime", "Finish Time"], ascending=[1, 0])
events=showdf.drop("Accumulables").toPandas()
coretrack={}
trace_events=[]
starttime=self.starttime
taskend=[]
trace={"traceEvents":[]}
exec_hosts={}
hostsdf=showdf.select("Host").distinct().orderBy("Host")
hostid=100000
ended_event=[]
for i,l in hostsdf.toPandas().iterrows():
exec_hosts[l['Host']]=hostid
hostid=hostid+100000
tskmap={}
for idx,l in events.iterrows():
if l['Event']=='SparkListenerTaskStart':
hostid=exec_hosts[l['Host']]
tsk=l['Task ID']
pid=int(l['Executor ID'])*100+hostid
self.pids.append(pid)
stime=l['Launch Time']
#the task's starttime and finishtime is the same, ignore it.
if tsk in ended_event:
continue
if not pid in coretrack:
tids={}
trace_events.append({
"name": "process_name",
"ph": "M",
"pid":pid,
"tid":0,
"args":{"name":"{:s}.{:s}".format(l['Host'],l['Executor ID'])}
})
else:
tids=coretrack[pid]
for t in tids.keys():
if tids[t][0]==-1:
tids[t]=[tsk,stime]
break
else:
t=len(tids)
tids[t]=[tsk,stime]
#print("task {:d} tid is {:s}.{:d}".format(tsk,pid,t))
coretrack[pid]=tids
if l['Event']=='SparkListenerTaskEnd':
sevt={}
eevt={}
hostid=exec_hosts[l['Host']]
pid=int(l['Executor ID'])*100+hostid
tsk=l['Task ID']
fintime=l['Finish Time']
tids=coretrack[pid]
for t in tids.keys():
if tids[t][0]==tsk:
tids[t]=[-1,-1]
break
else:
ended_event.append(tsk)
continue
for ps in reversed([key for key in tids.keys()]) :
if tids[ps][1]-fintime<0 and tids[ps][1]-fintime>=-2:
fintime=tids[ps][1]
tids[t]=tids[ps]
tids[ps]=[-1,-1]
break
if starttime==0:
starttime=l['Launch Time']
print(f'applog start time: {starttime}')
sstime=l['Launch Time']-starttime
trace_events.append({
'tid':pid+int(t),
'ts':sstime,
'dur':fintime-l['Launch Time'],
'pid':pid,
"ph":'X',
'name':"stg{:d}".format(l['Stage ID']),
'args':{"job id": l['Job ID'],
"stage id": l['Stage ID'],
"tskid":tsk,
"input":builtins.round(l["Bytes Read"]/1024/1024,2),
"spill":builtins.round(l["Memory Bytes Spilled"]/1024/1024,2),
"Shuffle Read Metrics": "",
"|---Local Read": builtins.round(l["Local Bytes Read"]/1024/1024,2),
"|---Remote Read":builtins.round(l["Remote Bytes Read"]/1024/1024,2),
"Shuffle Write Metrics": "",
"|---Write":builtins.round(l['Shuffle Bytes Written']/1024/1024,2)
}
})
tskmap[tsk]={'pid':pid,'tid':pid+int(t)}
self.starttime=starttime
self.tskmap=tskmap
output=[json.dumps(l) for l in trace_events]
df=self.df
if showcpu and len(self.metricscollect)>0:
metricscollect=self.metricscollect
metrics_explode=df.where("Event='SparkListenerTaskEnd'").withColumn("metrics",F.explode("Accumulables"))
m1092=metrics_explode.select(F.col("Executor ID"),F.col("`Stage ID`"),"`Task ID`",F.col("`Finish Time`"),F.col("`Launch Time`"),(F.col("`Finish Time`")-F.col("`Launch Time`")).alias("elapsedtime"),"metrics.*").where(F.col("ID").isin([l[0] for l in metricscollect]))
metric_name_df = spark.createDataFrame(metricscollect)
metric_name_df=metric_name_df.withColumnRenamed("_1","ID")
metric_name_df=metric_name_df.withColumnRenamed("_2","unit")
metric_name_df=metric_name_df.withColumnRenamed("_3","mname")
met_df=m1092.join(metric_name_df,on="ID")
met_df=met_df.withColumn("Update",F.when(F.col("unit")=='nsTiming',F.col("Update")/1000000).otherwise(F.col("Update")+0))
met_df=met_df.where("Update>1")
metdfx=met_df.groupBy("Task ID","elapsedtime").agg(F.sum("Update").alias("totalCnt"))
taskratio=metdfx.withColumn("ratio",F.when(F.col("totalCnt")<F.col("elapsedtime"),1).otherwise(F.col("elapsedtime")/F.col("totalCnt"))).select("Task ID","ratio")
met_df=met_df.join(taskratio,on="Task ID")
met_df=met_df.withColumn("Update",F.col("Update")*F.col("ratio"))
w = (Window.partitionBy('Task ID').orderBy(F.desc("Update")).rangeBetween(Window.unboundedPreceding, 0))
met_df=met_df.withColumn('cum_sum', F.sum('Update').over(w))
met_df=met_df.withColumn("starttime",F.col("Launch Time")+F.col("cum_sum")-F.col("Update"))
tskmapdf = spark.createDataFrame(pandas.DataFrame(self.tskmap).T.reset_index())
met_df=met_df.join(tskmapdf,on=[met_df["Task ID"]==tskmapdf["index"]])
rstdf=met_df.select(
F.col("tid"),
F.round(F.col("starttime")-self.starttime,0).alias("ts"),
F.round(F.col("Update"),0).alias("dur"),
F.col("pid"),
F.lit("X").alias("ph"),
F.col("mname").alias("name")
).where(F.col("ts").isNotNull()).orderBy('ts')
output.extend(rstdf.toJSON().collect())
qtime=df.where("Event='SparkListenerTaskEnd'").groupBy("real_queryid").agg(F.min("Finish Time").alias("time"))
output.extend(qtime.select(
F.lit("i").alias("ph"),
(F.col("time")-starttime).alias('ts'),
F.lit(0).alias("pid"),
F.lit(0).alias("tid"),
F.lit("p").alias("s")
).toJSON().collect())
self.starttime=starttime
if kwargs.get("show_criticalshow_time_metric_path",True):
output.extend(self.generate_critical_patch_traceview(hostid-1))
return output
def generate_critical_patch_traceview(self,pid):
if self.df is None:
self.load_data()
traces=[]
df=self.df.where("Event='SparkListenerTaskEnd' and real_queryid is not null")
criticaltasks=self.criticaltasks
cripds=pandas.DataFrame(criticaltasks)
cripds.columns=['task_id',"launch","finish"]
cridf=spark.createDataFrame(cripds)
df_ctsk=df.join(cridf,on=[F.col("task_id")==F.col("Task ID")],how="inner")
traces.extend(df_ctsk.select(F.lit(38).alias("tid"),
(F.col("launch")-F.lit(self.starttime)+1).alias("ts"),
(F.col("finish")-F.col("launch")-1).alias("dur"),
F.lit(pid).alias("pid"),
F.lit("X").alias("ph"),
F.concat(F.lit("stg"),F.col("Stage ID")).alias("name"),
F.struct(
F.col("Task ID").alias('taskid'),
F.col("Executor ID").astype(IntegerType()).alias('exec_id'),
F.col("Host").alias("host"),
).alias("args")
).toJSON().collect())
traces.extend(df.groupBy("real_queryid").agg(F.max("Finish Time").alias("finish"),F.min("Launch Time").alias("launch")).select(
F.lit(38).alias("tid"),
(F.col("launch")-F.lit(self.starttime)).alias("ts"),
(F.col("finish")-F.col("launch")).alias("dur"),
F.lit(pid).alias("pid"),
F.lit("X").alias("ph"),
F.concat(F.lit("qry"),F.col("real_queryid")).alias("name")).toJSON().collect())
metricscollect=self.metricscollect
metrics_explode=df_ctsk.where("Event='SparkListenerTaskEnd'").withColumn("metrics",F.explode("Accumulables"))
m1092=metrics_explode.select(F.col("Executor ID"),F.col("`Stage ID`"),"`Task ID`",F.col("`Finish Time`"),F.col("`Launch Time`"),(F.col("`Finish Time`")-F.col("`Launch Time`")).alias("elapsedtime"),"metrics.*").where(F.col("ID").isin([l[0] for l in metricscollect]))
metric_name_df = spark.createDataFrame(metricscollect)
metric_name_df=metric_name_df.withColumnRenamed("_1","ID")
metric_name_df=metric_name_df.withColumnRenamed("_2","unit")
metric_name_df=metric_name_df.withColumnRenamed("_3","mname")
metric_name_df=metric_name_df.withColumnRenamed("_4","node")
metric_name_df=metric_name_df.where("mname <> 'totaltime_collectbatch'")
met_df=m1092.join(metric_name_df,on="ID")
met_df=met_df.withColumn("Update",F.when(F.col("unit")=='nsTiming',F.col("Update")/1000000).otherwise(F.col("Update")+0))
#pandas UDF doesn't work. hang
#tmbk=met_df.groupBy('Task ID').apply(time_breakdown)
w=Window.partitionBy('Task ID')
met_df1=met_df.withColumn("sum_update",F.sum("Update").over(w))
met_df2=met_df1.withColumn("ratio",(F.col("Finish Time")-F.col("Launch Time")-2)/F.col("sum_update"))
met_df3=met_df2.withColumn("ratio",F.when(F.col("ratio")>1,1).otherwise(F.col("ratio")))
met_df4=met_df3.withColumn("update_ratio",F.floor(F.col("ratio")*F.col("Update")))
met_df5=met_df4.where(F.col("update_ratio")>2)
w = (Window.partitionBy('Task ID').orderBy(F.desc("update_ratio")).rowsBetween(Window.unboundedPreceding, Window.currentRow))
met_df6=met_df5.withColumn('ltime_dur', F.sum('update_ratio').over(w))
met_df8=met_df6.withColumn("ltime",F.col("ltime_dur")+F.col("Launch Time")-F.col("update_ratio"))
tmbk=met_df8.withColumn("taskid",F.col("Task ID")).withColumn("start",F.col("ltime")+F.lit(1)).withColumn("dur",F.col("update_ratio")-F.lit(1)).withColumn("name",F.col("mname"))
traces.extend(tmbk.select(
F.lit(38).alias("tid"),
(F.col("start")-F.lit(self.starttime)).alias("ts"),
(F.col("dur")).alias("dur"),
F.lit(pid).alias("pid"),
F.lit("X").alias("ph"),
F.col("name").alias("name")).toJSON().collect())
traces.append(json.dumps({
"name": "process_name",
"ph": "M",
"pid":pid,
"tid":0,
"args":{"name":"critical path"}
}))
return traces
def show_Stage_histogram(apps,stageid,bincount):
if apps.df is None:
apps.load_data()
inputsize = apps.df.where("`Stage ID`={:d}".format(stageid)).select("Stage ID","Executor ID", "Task ID", F.explode("Accumulables")) \
.select("Stage ID","Executor ID", "Task ID","col.*") \
.where("Name='input size in bytes' or Name='size of files read'") \
.groupBy("Task ID") \
.agg((F.sum("Update")).alias("input read"))
stage37=apps.df.where("`Stage ID`={:d} and event='SparkListenerTaskEnd'".format(stageid) )\
.join(inputsize,on=["Task ID"],how="left")\
.fillna(0) \
.select(F.col('Host'),
F.round((F.col('Finish Time')/1000-F.col('Launch Time')/1000),2).alias('elapsedtime'),
F.round((F.col('`input read`')+F.col('`Bytes Read`')+F.col('`Local Bytes Read`')+F.col('`Remote Bytes Read`'))/1024/1024,2).alias('input'))
stage37=stage37.cache()
hist_elapsedtime=stage37.select('elapsedtime').rdd.flatMap(lambda x: x).histogram(15)
hist_input=stage37.select('input').rdd.flatMap(lambda x: x).histogram(15)
fig, axs = plt.subplots(figsize=(30, 5),nrows=1, ncols=2)
ax=axs[0]
binSides, binCounts = hist_elapsedtime
binSides=[builtins.round(l,2) for l in binSides]
N = len(binCounts)
ind = numpy.arange(N)
width = 0.5
rects1 = ax.bar(ind+0.5, binCounts, width, color='b')
ax.set_ylabel('Frequencies')
ax.set_title('stage{:d} elapsed time breakdown'.format(stageid))
ax.set_xticks(numpy.arange(N+1))
ax.set_xticklabels(binSides)
ax=axs[1]
binSides, binCounts = hist_input
binSides=[builtins.round(l,2) for l in binSides]
N = len(binCounts)
ind = numpy.arange(N)
width = 0.5
rects1 = ax.bar(ind+0.5, binCounts, width, color='b')
ax.set_ylabel('Frequencies')
ax.set_title('stage{:d} input data breakdown'.format(stageid))
ax.set_xticks(numpy.arange(N+1))
ax.set_xticklabels(binSides)
out=stage37
outpds=out.toPandas()
fig, axs = plt.subplots(nrows=1, ncols=3, sharey=False,figsize=(30,8),gridspec_kw = {'width_ratios':[1, 1, 1]})
plt.subplots_adjust(wspace=0.01)
groups= outpds.groupby('Host')
for name, group in groups:
axs[0].plot(group.input, group.elapsedtime, marker='o', linestyle='', ms=5, label=name)
axs[0].set_xlabel('input size (MB)')
axs[0].set_ylabel('elapsed time (s)')
axs[0].legend()
axs[0].get_shared_y_axes().join(axs[0], axs[1])
sns.violinplot(y='elapsedtime', x='Host', data=outpds,palette=['g'],ax=axs[1])
sns.violinplot(y='input', x='Host', data=outpds,palette=['g'],ax=axs[2])
#ax.xaxis.set_major_formatter(mtick.FormatStrFormatter(''))
#ax.yaxis.set_major_formatter(mtick.FormatStrFormatter(''))
if False:
out=stage37
vecAssembler = VectorAssembler(inputCols=["input",'elapsedtime'], outputCol="features").setHandleInvalid("skip")
new_df = vecAssembler.transform(out)
kmeans = KMeans(k=2, seed=1) # 2 clusters here
model = kmeans.fit(new_df.select('features'))
transformed = model.transform(new_df)
outpds=transformed.select('Host','elapsedtime','input','prediction').toPandas()
fig, axs = plt.subplots(nrows=1, ncols=2, sharey=False,figsize=(30,8),gridspec_kw = {'width_ratios':[1, 1]})
plt.subplots_adjust(wspace=0.01)
groups= outpds.groupby('prediction')
for name, group in groups:
axs[0].plot(group.input, group.elapsedtime, marker='o', linestyle='', ms=5, label=name)
axs[0].legend()
bars=transformed.where('prediction=1').groupBy("Host").count().toPandas()
axs[1].bar(bars['Host'], bars['count'], 0.4, color='coral')
axs[1].set_title('cluster=1')
plt.show()
def show_Stages_hist(apps,**kwargs):
if apps.df is None:
apps.load_data()
bincount=kwargs.get("bincount",15)
threshold=kwargs.get("threshold",0.9)
query=kwargs.get("queryid",None)
if query and type(query)==int:
query = [query,]
df=apps.df.where(F.col("real_queryid").isin(query)) if query else apps.df
totaltime=df.where("event='SparkListenerTaskEnd'" ).agg(F.sum(F.col('Finish Time')-F.col('Launch Time')).alias('total_time')).collect()[0]['total_time']
stage_time=df.where("event='SparkListenerTaskEnd'" ).groupBy('`Stage ID`').agg(F.sum(F.col('Finish Time')-F.col('Launch Time')).alias('total_time')).orderBy('total_time', ascending=False).toPandas()
stage_time['acc_total'] = stage_time['total_time'].cumsum()/totaltime
stage_time=stage_time.reset_index()
fig, ax = plt.subplots(figsize=(30, 5))
rects1 = ax.plot(stage_time['index'],stage_time['acc_total'],'b.-')
ax.set_xticks(stage_time['index'])
ax.set_xticklabels(stage_time['Stage ID'])
ax.set_xlabel('stage')
ax.grid(which='major', axis='x')
plt.show()
shownstage=[]
for x in stage_time.index:
if stage_time['acc_total'][x]<=threshold:
shownstage.append(stage_time['Stage ID'][x])
else:
shownstage.append(stage_time['Stage ID'][x])
break
for row in shownstage:
apps.show_Stage_histogram(row,bincount)
def get_hottest_stages(apps,**kwargs):
if apps.df is None:
apps.load_data()
bincount=kwargs.get("bincount",15)
threshold=kwargs.get("threshold",0.9)
plot=kwargs.get("plot",True)
query=kwargs.get("queryid",None)
if query and type(query)==int:
query = [query,]
df=apps.df.where(F.col("real_queryid").isin(query)) if query else apps.df.where("queryid is not NULL")
stage_time=df.where("event='SparkListenerTaskEnd'" ).groupBy('`Stage ID`','Job ID','real_queryid').agg(
F.sum(F.col('Finish Time')-F.col('Launch Time')).alias('total_time'),
F.stddev(F.col('Finish Time')/1000-F.col('Launch Time')/1000).alias('stdev_time'),
F.count("*").alias("cnt"),
F.first('queryid').astype(IntegerType()).alias('queryid')
)\
.select('`Stage ID`','Job ID','real_queryid','queryid',
(F.col("total_time")/1000/(F.when(F.col("cnt")>F.lit(apps.executor_instances*apps.executor_cores/apps.taskcpus),F.lit(apps.executor_instances*apps.executor_cores/apps.taskcpus)).otherwise(F.col("cnt")))).alias("total_time"),
F.col("stdev_time")
).orderBy('total_time', ascending=False).toPandas()
totaltime=stage_time['total_time'].sum()
stage_time['acc_total'] = stage_time['total_time'].cumsum()/totaltime
stage_time['total'] = stage_time['total_time']/totaltime
stage_time=stage_time.reset_index()
shownstage=stage_time.loc[stage_time['acc_total'] <=threshold]
shownstage['stg']=shownstage['real_queryid'].astype(str)+'_'+shownstage['Job ID'].astype(str)+'_'+shownstage['Stage ID'].astype(str)
if plot:
shownstage.plot.bar(x="stg",y="total",figsize=(30,8))
norm = matplotlib.colors.Normalize(vmin=0, vmax=max(stage_time.queryid))
cmap = matplotlib.cm.get_cmap('brg')
def setbkcolor(x):
rgba=cmap(norm(x['queryid']))
return ['background-color:rgba({:d},{:d},{:d},1); color:white'.format(int(rgba[0]*255),int(rgba[1]*255),int(rgba[2]*255))]*9
if plot:
display(stage_time.style.apply(setbkcolor,axis=1).format({"total_time":lambda x: '{:,.2f}'.format(x),"acc_total":lambda x: '{:,.2%}'.format(x),"total":lambda x: '{:,.2%}'.format(x)}))
return stage_time
def scatter_elapsetime_input(apps,stageid):
if apps.df is None:
apps.load_data()
stage37=apps.df.where("`Stage ID`={:d} and event='SparkListenerTaskEnd'".format(stageid) ).select(F.round((F.col('Finish Time')/1000-F.col('Launch Time')/1000),2).alias('elapsedtime'),F.round((F.col('`Bytes Read`')+F.col('`Local Bytes Read`')+F.col('`Remote Bytes Read`'))/1024/1024,2).alias('input')).toPandas()
stage37.plot.scatter('input','elapsedtime',figsize=(30, 5))
def get_critical_path_stages(self):
df=self.df.where("Event='SparkListenerTaskEnd'")
criticaltasks=self.criticaltasks
cripds=pandas.DataFrame(criticaltasks)
cripds.columns=['task_id',"launch","finish"]
cridf=spark.createDataFrame(cripds)
df_ctsk=df.join(cridf,on=[F.col("task_id")==F.col("Task ID")],how="inner")
df_ctsk=df_ctsk.withColumn("elapsed",(F.col("Finish Time")-F.col("Launch Time"))/1000)
return df_ctsk.where("elapsed>10").orderBy(F.desc("elapsed")).select("real_queryid",F.round("elapsed",2).alias("elapsed"),"Host","executor ID","Stage ID","Task ID",F.round(F.col("Bytes Read")/1000000,0).alias("file read"),F.round((F.col("Local Bytes Read")+F.col("Remote Bytes Read"))/1000000,0).alias("shuffle read")).toPandas()
def show_time_metric(self,**kwargs):
if self.df is None:
self.load_data()
shownodes=kwargs.get("shownodes",None)
query=kwargs.get("queryid",None)
plot=kwargs.get("plot",True)
taskids=kwargs.get("taskids",None)
if query and type(query)==int:
query = [query,]
showexecutor=kwargs.get("showexecutor",True) if not taskids else False
queryid = query[0] if query else 0
df=self.df.where(F.col("Host").isin(shownodes)) if shownodes else self.df
df=df.where(F.col("real_queryid").isin(query)) if query else df.where("queryid is not NULL")
df=df.where(F.col("Task ID").isin(taskids)) if taskids else df
exec_cores=1 if taskids else self.executor_cores
execs=1 if taskids else self.executor_instances
metricscollect=self.metricscollect
metrics_explode=df.where("Event='SparkListenerTaskEnd'").withColumn("metrics",F.explode("Accumulables"))
m1092=metrics_explode.select(F.col("Executor ID"),F.col("`Stage ID`"),"`Task ID`",F.col("`Finish Time`"),F.col("`Launch Time`"),(F.col("`Finish Time`")-F.col("`Launch Time`")).alias("elapsedtime"),"metrics.*").where(F.col("ID").isin([l[0] for l in metricscollect]))
metric_name_df = spark.createDataFrame(metricscollect)
metric_name_df=metric_name_df.withColumnRenamed("_1","ID")
metric_name_df=metric_name_df.withColumnRenamed("_2","unit")
metric_name_df=metric_name_df.withColumnRenamed("_3","mname")
metric_name_df=metric_name_df.withColumnRenamed("_4","node")
runtime=metrics_explode.agg(F.round(F.max("Finish Time")/1000-F.min("Launch Time")/1000,2).alias("runtime")).collect()[0]["runtime"]
met_df=m1092.join(metric_name_df,on="ID")
met_df=met_df.withColumn("Update",F.when(F.col("unit")=='nsTiming',F.col("Update")/1000000).otherwise(F.col("Update")+0))
outpdf=met_df.groupBy("`Executor ID`","mname").sum("Update").orderBy("Executor ID").toPandas()
met_time_cnt=df.where("Event='SparkListenerTaskEnd'")
exectime=met_time_cnt.groupBy("Executor ID").agg((F.max("Finish Time")-F.min("Launch Time")).alias("totaltime"),F.sum(F.col("`Finish Time`")-F.col("`Launch Time`")).alias("tasktime"))
totaltime_query=met_time_cnt.groupBy("real_queryid").agg((F.max("Finish Time")-F.min("Launch Time")).alias("totaltime")).agg(F.sum("totaltime").alias("totaltime")).collect()
totaltime_query=totaltime_query[0]["totaltime"]
pdf=exectime.toPandas()
exeids=set(outpdf['Executor ID'])
outpdfs=[outpdf[outpdf["Executor ID"]==l] for l in exeids]
tasktime=pdf.set_index("Executor ID").to_dict()['tasktime']
def comb(l,r):
execid=list(r['Executor ID'])[0]
lp=r[['mname','sum(Update)']]
lp.columns=["mname","val_"+execid]
idle=totaltime_query*exec_cores-tasktime[execid]
nocount=tasktime[execid]-sum(lp["val_"+execid])
if idle<0:
idle=0
if nocount<0:
nocount=0
lp=lp.append([{"mname":"idle","val_"+execid:idle}])
lp=lp.append([{"mname":"not_counted","val_"+execid:nocount}])
if l is not None:
return pandas.merge(lp, l,on=["mname"],how='outer')
else:
return lp
rstpdf=None
for l in outpdfs[0:]:
rstpdf=comb(rstpdf,l)
for l in [l for l in rstpdf.columns if l!="mname"]:
rstpdf[l]=rstpdf[l]/1000/exec_cores
rstpdf=rstpdf.sort_values(by="val_"+list(exeids)[0],axis=0,ascending=False)
if showexecutor and plot:
rstpdf.set_index("mname").T.plot.bar(stacked=True,figsize=(30,8))
pdf_sum=pandas.DataFrame(rstpdf.set_index("mname").T.sum())
totaltime=totaltime_query/1000
pdf_sum[0]=pdf_sum[0]/(execs)
pdf_sum[0]["idle"]=(totaltime_query-sum(tasktime.values())/execs/exec_cores)/1000
pdf_sum=pdf_sum.sort_values(by=0,axis=0,ascending=False)
pdf_sum=pdf_sum.T
pdf_sum.columns=["{:>2.0f}%_{:s}".format(pdf_sum[l][0]/totaltime*100,l) for l in pdf_sum.columns]
matplotlib.rcParams['font.sans-serif'] = "monospace"
matplotlib.rcParams['font.family'] = "monospace"
import matplotlib.font_manager as font_manager
if plot:
ax=pdf_sum.plot.bar(stacked=True,figsize=(30,8))
font = font_manager.FontProperties(family='monospace',
style='normal', size=14)
ax.legend(prop=font,loc=4)
plt.title("{:s} q{:d} executors={:d} cores_per_executor={:d} parallelism={:d} sumtime={:.0f} runtime={:.0f}".format(self.file.split("/")[2],queryid,self.executor_instances,self.executor_cores,self.parallelism,totaltime,runtime),fontdict={'fontsize':24})
return pdf_sum
def show_critical_path_time_breakdown(self,**kwargs):
if self.df is None:
self.load_data()
return self.show_time_metric(taskids=[l[0].item() for l in self.criticaltasks])
def get_spark_config(self):
df=spark.read.json(self.file)
self.appid=df.where("`App ID` is not null").collect()[0]["App ID"]
pandas.set_option('display.max_rows', None)
pandas.set_option('display.max_columns', None)
pandas.set_option('display.max_colwidth', 100000)
return df.select("Properties.*").where("`spark.app.id` is not null").limit(1).toPandas().T
def get_app_name(self):
cfg=self.get_spark_config()
display(HTML("<font size=5 color=red>" + cfg.loc[cfg.index=='spark.app.name'][0][0]+"</font>"))
def get_query_time(self,**kwargs):
if self.df is None:
self.load_data()
queryid=kwargs.get("queryid",None)
showtable=kwargs.get("showtable",True)
plot=kwargs.get("plot",True)
if queryid and type(queryid)==int:
queryid = [queryid,]
df=self.df.where(F.col("real_queryid").isin(queryid)) if queryid else self.df.where("queryid is not NULL")
stages=df.select("real_queryid","Stage ID").distinct().orderBy("Stage ID").groupBy("real_queryid").agg(F.collect_list("Stage ID").alias("stages")).orderBy("real_queryid")
runtimeacc=df.where("Event='SparkListenerTaskEnd'") \
.groupBy("real_queryid") \
.agg(F.round(F.sum(F.col("Finish Time")-F.col("Launch Time"))/1000/self.executor_instances/self.executor_cores*self.taskcpus,2).alias("acc_task_time"))
inputsize = df.select("real_queryid","Stage ID","Executor ID", "Task ID", F.explode("Accumulables")) \
.select("real_queryid","Stage ID","Executor ID", "Task ID","col.*") \
.where("Name='input size in bytes' or Name='size of files read'") \
.groupBy("real_queryid") \
.agg(F.round(F.sum("Update")/1024/1024/1024,2).alias("input read")).orderBy("real_queryid")
if self.dfacc is not None:
inputsizev1 = self.dfacc.where("Name='size of files read'").groupBy("real_queryid").agg(F.round(F.sum("Update")/1024/1024/1024,2).alias("input read v1")).orderBy("real_queryid")
inputsize=inputsize.join(inputsizev1,on="real_queryid",how="outer")
inputsize=inputsize.withColumn("input read",F.coalesce(F.col("input read"),F.col("input read v1"))).drop("input read v1")
outputrows = df.select("real_queryid","Stage ID","Stage ID",F.explode("Accumulables"))\
.select("real_queryid","Stage ID","Stage ID","col.*")\
.where("Name='number of output rows'")\
.groupBy("real_queryid")\
.agg(F.round(F.sum("Update")/1000000000,2).alias("output rows"))
stages=runtimeacc.join(stages,on="real_queryid",how="left")
stages=inputsize.join(stages,on="real_queryid",how="left")
stages=stages.join(outputrows,on='real_queryid',how="left")
out=df.groupBy("real_queryid").agg(
F.round(F.max("job_stop_time")/1000-F.min("job_start_time")/1000,2).alias("runtime"),
F.round(F.sum("Disk Bytes Spilled")/1024/1024/1024,2).alias("disk spilled"),
F.round(F.sum("Memory Bytes Spilled")/1024/1024/1024,2).alias("memspilled"),
F.round(F.sum("Local Bytes Read")/1024/1024/1024,2).alias("local_read"),
F.round(F.sum("Remote Bytes Read")/1024/1024/1024,2).alias("remote_read"),
F.round(F.sum("Shuffle Bytes Written")/1024/1024/1024,2).alias("shuffle_write"),
F.round(F.sum("Executor Deserialize Time")/1000/self.parallelism,2).alias("deser_time"),
F.round(F.sum("Executor Run Time")/1000/self.parallelism,2).alias("run_time"),
F.round(F.sum("Result Serialization Time")/1000/self.parallelism,2).alias("ser_time"),
F.round(F.sum("Fetch Wait Time")/1000/self.parallelism,2).alias("f_wait_time"),
F.round(F.sum("JVM GC Time")/1000/self.parallelism,2).alias("gc_time"),
F.round(F.max("Peak Execution Memory")/1000000000*self.executor_instances*self.executor_cores,2).alias("peak_mem"),
F.max("queryid").alias("queryid")
).join(stages,"real_queryid",how="left").orderBy("real_queryid").toPandas().set_index("real_queryid")
out["executors"]=self.executor_instances
out["core/exec"]=self.executor_cores
out["task.cpus"]=self.taskcpus
out['parallelism']=self.parallelism
if not showtable:
return out
def highlight_greater(x):
m1 = x['acc_task_time'] / x['runtime'] * 100
m2 = x['run_time'] / x['runtime'] * 100
m3 = x['f_wait_time'] / x['runtime'] * 100
df1 = pandas.DataFrame('', index=x.index, columns=x.columns)
df1['acc_task_time'] = m1.apply(lambda x: 'background-image: linear-gradient(to right,#5fba7d {:f}%,white {:f}%)'.format(x,x))
df1['run_time'] = m2.apply(lambda x: 'background-image: linear-gradient(to right,#5fba7d {:f}%,white {:f}%)'.format(x,x))
df1['f_wait_time'] = m3.apply(lambda x: 'background-image: linear-gradient(to right,#d65f5f {:f}%,white {:f}%)'.format(x,x))
return df1
cm = sns.light_palette("green", as_cmap=True)
if plot:
display(out.style.apply(highlight_greater, axis=None).background_gradient(cmap=cm,subset=['input read', 'shuffle_write']))
return out
def get_query_time_metric(self):
if self.df is None:
self.load_data()
querids=self.df.select("queryid").distinct().collect()
for idx,q in enumerate([l["queryid"] for l in querids]):
self.show_time_metric(query=[q,],showexecutor=False)
def getOperatorCount(self):
if self.df is None:
self.load_data()
df=spark.read.json(self.file)
queryids=self.df.select(F.col("queryid").astype(LongType()),F.col("real_queryid")).distinct().orderBy("real_queryid")
queryplans=self.queryplans.collect()
list_queryid=[l.real_queryid for l in queryids.collect()]
def get_child(execid,node):
if node["nodeName"] not in qps:
qps[node["nodeName"]]={l:0 for l in list_queryid}
qps[node["nodeName"]][execid]=qps[node["nodeName"]][execid]+1
if node["children"] is not None:
for c in node["children"]:
get_child(execid,c)
qps={}
for c in queryplans:
get_child(c['real_queryid'],c)
return pandas.DataFrame(qps).T.sort_index(axis=0)
def get_query_plan(self,**kwargs):
if self.df is None:
self.load_data()
queryid=kwargs.get("queryid",None)
stageid=kwargs.get("stageid",None)
outputstage=kwargs.get("outputstage",None)
show_plan_only=kwargs.get("show_plan_only",False)
show_simple_string=kwargs.get("show_simple_string",False)
plot=kwargs.get("plot",True)
colors=["#{:02x}{:02x}{:02x}".format(int(l[0]*255),int(l[1]*255),int(l[2]*255)) for l in matplotlib.cm.get_cmap('tab20').colors]
if queryid is not None:
if type(queryid)==int or type(queryid)==str:
queryid = [queryid,]
shown_stageid = [l["Stage ID"] for l in self.df.where(F.col("real_queryid").isin(queryid)).select("Stage ID").distinct().collect()]
if stageid is not None:
if type(stageid)==int:
shown_stageid = [stageid,]
elif type(stageid)==list:
shown_stageid = stageid
queryid = [l["real_queryid"] for l in self.df.where(F.col("`Stage ID`").isin(shown_stageid)).select("real_queryid").limit(1).collect()]
queryplans=[]
queryplans = self.queryplans.where(F.col("real_queryid").isin(queryid)).orderBy("real_queryid").collect() if queryid else self.queryplans.orderBy("real_queryid").collect()
dfmetric=self.df.where("Event='SparkListenerTaskEnd'").select("queryid","real_queryid","Stage ID","Job ID",F.explode("Accumulables").alias("metric")).select("*","metric.*").select("Stage ID","ID","Update").groupBy("ID","Stage ID").agg(F.round(F.sum("Update"),1).alias("value"),F.round(F.stddev("Update"),1).alias("stdev")).collect()
accid2stageid={l.ID:(l["Stage ID"],l["value"],l["stdev"]) for l in dfmetric}
stagetime=self.df.where((F.col("real_queryid").isin(queryid))).where(F.col("Event")=='SparkListenerTaskEnd').groupBy("Stage ID").agg(
F.round(F.sum(F.col("Finish Time")-F.col("Launch Time"))/1000/self.executor_instances/self.executor_cores*self.taskcpus,1).alias("elapsed time"),
F.round(F.stddev(F.col("Finish Time")-F.col("Launch Time"))/1000,1).alias("time stdev"),
F.count(F.col("Task ID")).alias("partitions")
).orderBy(F.desc("elapsed time")).collect()
apptotaltime=reduce(lambda x,y: x+y['elapsed time'], stagetime,0)
if apptotaltime==0:
display(HTML("<font size=4 color=red>Error, totaltime is 0 </font>"))
apptotaltime=1
return ""
stagemap={l["Stage ID"]:l["elapsed time"] for l in stagetime}
stage_time_stdev_map={l["Stage ID"]:l["time stdev"] for l in stagetime}
stagepartmap={l["Stage ID"]:l["partitions"] for l in stagetime}
keystage=[]
keystagetime=[]
subtotal=0
for s in stagetime:
subtotal=subtotal+s['elapsed time']
keystage.append(s['Stage ID'])
keystagetime.append(s['elapsed time'])
if subtotal/apptotaltime>0.9:
break
keystagetime=["{:02x}{:02x}".format(int(255*l/keystagetime[0]),255-int(255*l/keystagetime[0])) for l in keystagetime if keystagetime[0]>0]
keystagemap=dict(zip(keystage,keystagetime))
outstr=[]
def print_plan(real_queryid,level,node,parent_stageid):
stageid = accid2stageid[int(node["metrics"][0]["accumulatorId"])][0] if node["metrics"] is not None and len(node["metrics"])>0 and node["metrics"][0]["accumulatorId"] in accid2stageid else parent_stageid
if stageid in shown_stageid:
fontcolor=f"color:#{keystagemap[stageid]}00;font-weight:bold" if stageid in keystagemap else "color:#000000"
stagetime=0 if stageid not in stagemap else stagemap[stageid]
stageParts=0 if stageid not in stagepartmap else stagepartmap[stageid]
input_rowcntstr=""
output_rowcntstr=""
timestr=""
timename=""
input_columnarbatch=""
output_columnarbatch=""
output_row_batch=""
other_metric_name_str=""
other_metric_str=""
outputrows=0
outputbatches=0
if node["metrics"] is not None:
for m in node["metrics"]:
if m["accumulatorId"] not in accid2stageid:
continue
value=accid2stageid[m["accumulatorId"]][1]
stdev_value=accid2stageid[m["accumulatorId"]][2]
if m["metricType"] in ['nsTiming','timing']:
totaltime=value/1000 if m["metricType"] == 'timing' else value/1000000000
stdev_value=stdev_value/1000 if m["metricType"] == 'timing' else stdev_value/1000000000
timename=timename+m["name"]+"<br>"
timeratio= 0 if stagetime==0 else totaltime/self.executor_instances/self.executor_cores*self.taskcpus/stagetime*100
timeratio_query = totaltime/self.executor_instances/self.executor_cores*self.taskcpus/apptotaltime*100
if timeratio > 10 or timeratio_query>10:
timestr=timestr+"<font style='background-color:#ffff42'>{:.2f}s ({:.1f}%, {:.1f}%, {:.2f})</font><br>".format(totaltime,timeratio, totaltime/self.executor_instances/self.executor_cores*self.taskcpus/apptotaltime*100,stdev_value)
else:
timestr=timestr+"{:.2f}s ({:.1f}%, {:.1f}%, {:.2f})<br>".format(totaltime,timeratio, totaltime/self.executor_instances/self.executor_cores*self.taskcpus/apptotaltime*100,stdev_value)
elif m["name"]=="number of output rows":
output_rowcntstr="{:,.1f}".format(value/1000/1000)+" M"
outputrows=value
elif m["name"] in ["number of output columnar batches","number of output batches","output_batches"]:
output_columnarbatch="{:,d}".format(int(value))
outputbatches=value
elif m["name"]=="number of input rows":
input_rowcntstr="{:,.1f}".format(value/1000/1000)+" M"
elif m["name"] in ["number of input batches","number of Input batches","input_batches"]:
input_columnarbatch="{:,d}".format(int(value))
else:
other_metric_name_str=other_metric_name_str+m["name"]+"<br>"
if value>1000000000:
other_metric_str=other_metric_str+"{:,.1f} G (stdev: {:,.1f})<br>".format(value/1000000000,stdev_value/1000000000)
elif value>1000000:
other_metric_str=other_metric_str+"{:,.1f} M (stdev: {:,.1f})<br>".format(value/1000000,stdev_value/1000000)
elif value>1000:
other_metric_str=other_metric_str+"{:,.1f} K (stdev: {:,.1f})<br>".format(value/1000,stdev_value/1000)
else:
other_metric_str=other_metric_str+"{:,d} (stdev: {:,.1f})<br>".format(int(value),stdev_value)
if outputrows>0 and outputbatches>0:
output_row_batch="{:,d}".format(int(outputrows/outputbatches))
fontcolor=f"color:#{keystagemap[stageid]}00;font-weight:bold" if stageid in keystage else "color:#000000"
stagetime=0 if stageid not in stagemap else stagemap[stageid]
stage_time_stdev=0 if stageid not in stage_time_stdev_map else stage_time_stdev_map[stageid]
nodenamestr=node["nodeName"]
if nodenamestr is None:
nodenamestr=""
if nodenamestr in ['ColumnarToRow','RowToArrowColumnar','ArrowColumnarToRow','ArrowRowToColumnarExec']:
nodename='<span style="color: green; background-color: #ffff42">'+nodenamestr+'</span>'
else:
nodename=nodenamestr
if outputstage is not None:
outputstage.append({"queryid":real_queryid,"stageid":stageid,"stagetime":stagetime,"stageParts":stageParts,"nodename":nodenamestr,"output_rowcnt":outputrows,"nodename_level":" ".join(["|_" for l in range(0,level)]) + " " + nodenamestr})
if not show_plan_only:
nodestr= " ".join(["|_" for l in range(0,level)]) + " " + nodename
if show_simple_string :
simstr=node['simpleString']
nodestr = nodestr + "<br>\n" + simstr
outstr.append(f"<tr><td style='{fontcolor}'>{stageid}</td>"+
f"<td style='{fontcolor}'> {stagetime}({stage_time_stdev}) </td>"+
f"<td style='{fontcolor}'> {stageParts} </td>"+
f"<td style='text-align:left; background-color:{colors[stageid % 20]}'>" + nodestr + f"</td>"+
f"<td style='{fontcolor}'> {input_rowcntstr} </td>"+
f"<td style='{fontcolor}'> {input_columnarbatch} </td>"+
f"<td style='{fontcolor}'> {output_rowcntstr} </td>"+
f"<td style='{fontcolor}'> {output_columnarbatch} </td>"+
f"<td style='{fontcolor}'> {output_row_batch} </td>"+
f"<td style='{fontcolor}'> {timename} </td>"+
f"<td style='{fontcolor}'>{timestr}</td>"+
f"<td style='{fontcolor}'> {other_metric_name_str} </td>"+
f"<td style='{fontcolor}'>{other_metric_str}</td></tr>")
else:
outstr.append(f"<tr><td style='{fontcolor}'>{stageid}</td>"+
f"<td style='{fontcolor}'> {stagetime} </td>"+
f"<td style='{fontcolor}'> {stageParts} </td>"+
f"<td style='text-align:left; background-color:{colors[stageid % 20]}'>" + " ".join(["|_" for l in range(0,level)]) + " " + nodename + f"</td>"+
f"<td style='{fontcolor}'> {output_rowcntstr} </td></tr>")
if node["children"] is not None:
for c in node["children"]:
print_plan(real_queryid, level+1,c,stageid)
for c in queryplans:
outstr.append("<font color=red size=4>"+str(c['real_queryid'])+"</font><table>")
if not show_plan_only:
outstr.append('''<tr>
<td>stage id</td>
<td>stage time</td>
<td>partions</td>
<td>operator</td>
<td>input rows</td>
<td>input batches</td>
<td>output rows</td>
<td>output batches</td>
<td>output rows/batch</td>
<td width=150>time metric name</td>
<td width=200>time(%stage,%total,stdev)</td>
<td width=150>other metric name</td>
<td width=130>value</td>
</tr>''')
else:
outstr.append('''<tr>
<td>stage id</td>
<td>stage time</td>
<td>partions</td>
<td>operator</td>
<td>output rows</td>
</tr>''')
print_plan(c['real_queryid'],0,c,0)
outstr.append("</table>")
if plot:
display(HTML(" ".join(outstr)))
return " ".join(outstr)
def get_metric_output_rowcnt(self, **kwargs):
return self.get_metric_rowcnt("number of output rows",**kwargs)
def get_metric_input_rowcnt(self, **kwargs):
return self.get_metric_rowcnt("number of input rows",**kwargs)
def get_metric_rowcnt(self,rowname, **kwargs):
if self.df is None:
self.load_data()
queryid=kwargs.get("queryid",None)
stageid=kwargs.get("stageid",None)
if queryid and type(queryid)==int:
queryid = [queryid,]
if stageid and type(stageid)==int:
stageid = [stageid,]
queryplans = self.queryplans.where(F.col("real_queryid").isin(queryid)).orderBy("real_queryid").collect() if queryid else self.queryplans.orderBy("real_queryid").collect()
qps=[]
rownames=rowname if type(rowname)==list else [rowname,]
def get_child(execid,node):
if node['metrics'] is not None:
outputrows=[x for x in node["metrics"] if "name" in x and x["name"] in rownames]
if len(outputrows)>0:
qps.append([node["nodeName"],execid,outputrows[0]['accumulatorId']])
if node["children"] is not None:
for c in node["children"]:
get_child(execid,c)
for c in queryplans:
get_child(c['real_queryid'],c)
if len(qps)==0:
print("Metric ",rowname," is not found. ")
return None
stagetime=self.df.where("Event='SparkListenerTaskEnd'").groupBy("Stage ID").agg(F.round(F.sum(F.col("Finish Time")-F.col("Launch Time"))/1000/self.executor_instances/self.executor_cores*self.taskcpus,2).alias("stage time"))
dfmetric=self.df.where("Event='SparkListenerTaskEnd'").select("queryid","real_queryid","Stage ID","Job ID",F.explode("Accumulables").alias("metric")).select("*","metric.*").drop("metric")
numrowmetric=spark.createDataFrame(qps)
numrowmetric=numrowmetric.withColumnRenamed("_1","metric").withColumnRenamed("_2","real_queryid").withColumnRenamed("_3","metricid")
dfmetric_rowcnt=dfmetric.join(numrowmetric.drop("real_queryid"),on=[F.col("metricid")==F.col("ID")],how="right")
stagemetric=dfmetric_rowcnt.groupBy("queryid","real_queryid","Job ID","Stage ID","metricid").agg(F.round(F.sum("Update")/1000000,2).alias("total_row"),F.max("metric").alias("nodename")).join(stagetime,"Stage ID")
if queryid:
return stagemetric.where(F.col("real_queryid").isin(queryid)).orderBy("Stage ID").toPandas()
else:
noderow=stagemetric.groupBy("real_queryid","nodename").agg(F.round(F.sum("total_row"),2).alias("total_row")).orderBy("nodename").collect()
out={}
qids=set([r.real_queryid for r in noderow])
for r in noderow:
if r.nodename not in out:
out[r.nodename]={c:0 for c in qids}
out[r.nodename][r.real_queryid]=r.total_row
return pandas.DataFrame(out).T
def get_query_info(self,queryid):
display(HTML("<font color=red size=7 face='Courier New'><b> time stat info </b></font>",))
tmp=self.get_query_time(queryid=queryid)
display(HTML("<font color=red size=7 face='Courier New'><b> stage stat info </b></font>",))
display(self.get_stage_stat(queryid=queryid))
display(HTML("<font color=red size=7 face='Courier New'><b> query plan </b></font>",))
self.get_query_plan(queryid=queryid)
display(HTML("<font color=red size=7 face='Courier New'><b> stage hist info </b></font>",))
self.show_Stages_hist(queryid=queryid)
display(HTML("<font color=red size=7 face='Courier New'><b> time info </b></font>",))
display(self.show_time_metric(queryid=queryid))
display(HTML("<font color=red size=7 face='Courier New'><b> operator and rowcount </b></font>",))
display(self.get_metric_input_rowcnt(queryid=queryid))
display(self.get_metric_output_rowcnt(queryid=queryid))
def get_app_info(self,**kwargs):
if self.df is None:
self.load_data()
display(HTML(f"<font color=red size=7 face='Courier New'><b> {self.appid} </b></font>",))
display(HTML(f"<a href=http://sr525:18080/history/{self.appid}>http://sr525:18080/history/{self.appid}</a>"))
display(HTML("<font color=red size=7 face='Courier New'><b> query time </b></font>",))
tmp=self.get_query_time(**kwargs)
display(HTML("<font color=red size=7 face='Courier New'><b> operator count </b></font>",))
pdf=self.getOperatorCount()
display(pdf.style.apply(background_gradient,
cmap='OrRd',
m=pdf.min().min(),
M=pdf.max().max(),
low=0,
high=1))
display(HTML("<font color=red size=7 face='Courier New'><b> operator input row count </b></font>",))
pdf=self.get_metric_input_rowcnt(**kwargs)
if pdf is not None:
display(pdf.style.apply(background_gradient,
cmap='OrRd',
m=pdf.min().min(),
M=pdf.max().max(),
low=0,
high=1))
display(HTML("<font color=red size=7 face='Courier New'><b> operator output row count </b></font>",))
pdf=self.get_metric_output_rowcnt(**kwargs)
if pdf is not None:
display(pdf.style.apply(background_gradient,
cmap='OrRd',
m=pdf.min().min(),
M=pdf.max().max(),
low=0,
high=1))
self.show_time_metric(**kwargs)
def get_stage_stat(self,**kwargs):
if self.df is None:
self.load_data()
queryid=kwargs.get("queryid",None)
if queryid and type(queryid)==int:
queryid = [queryid,]
df=self.df.where(F.col("real_queryid").isin(queryid)).where(F.col("Event")=='SparkListenerTaskEnd')
inputsize = df.select("real_queryid","Stage ID","Executor ID", "Task ID", F.explode("Accumulables")) \
.select("real_queryid","Stage ID","Executor ID", "Task ID","col.*") \
.where("Name='input size in bytes' or Name='size of files read'") \
.groupBy("Stage ID") \
.agg(F.round(F.sum("Update")/1024/1024/1024,2).alias("input read"))
return df.groupBy("Job ID","Stage ID").agg(
F.round(F.sum(F.col("Finish Time")-F.col("Launch Time"))/1000/self.executor_instances/self.executor_cores*self.taskcpus,1).alias("elapsed time"),
F.round(F.sum(F.col("Disk Bytes Spilled"))/1024/1024/1024,1).alias("disk spilled"),
F.round(F.sum(F.col("Memory Bytes Spilled"))/1024/1024/1024,1).alias("mem spilled"),
F.round(F.sum(F.col("Local Bytes Read"))/1024/1024/1024,1).alias("local read"),
F.round(F.sum(F.col("Remote Bytes Read"))/1024/1024/1024,1).alias("remote read"),
F.round(F.sum(F.col("Shuffle Bytes Written"))/1024/1024/1024,1).alias("shuffle write"),
F.round(F.sum(F.col("Executor Deserialize Time"))/1000,1).alias("deseri time"),
F.round(F.sum(F.col("Fetch Wait Time"))/1000,1).alias("fetch wait time"),
F.round(F.sum(F.col("Shuffle Write Time"))/1000000000,1).alias("shuffle write time"),
F.round(F.sum(F.col("Result Serialization Time"))/1000,1).alias("seri time"),
F.round(F.sum(F.col("Getting Result Time"))/1000,1).alias("get result time"),
F.round(F.sum(F.col("JVM GC Time"))/1000,1).alias("gc time"),
F.round(F.sum(F.col("Executor CPU Time"))/1000000000,1).alias("exe cpu time")
).join(inputsize,on=["Stage ID"],how="left").orderBy("Stage ID").toPandas()
def get_metrics_by_node(self,node_name):
if self.df is None:
self.load_data()
metrics=self.queryplans.collect()
coalesce=[]
metricsid=[0]
def get_metric(root):
if root['nodeName']==node_name:
metricsid[0]=metricsid[0]+1
for l in root["metrics"]:
coalesce.append([l['accumulatorId'],l["metricType"],l['name'],root["nodeName"],metricsid[0]])
if root["children"] is not None:
for c in root["children"]:
get_metric(c)
for c in metrics:
get_metric(c)
df=self.df.select("queryid","real_queryid",'Stage ID','Task ID','Job ID',F.explode("Accumulables"))
df=df.select("*","col.*")
metricdf=spark.createDataFrame(coalesce)
metricdf=metricdf.withColumnRenamed("_1","ID").withColumnRenamed("_2","Unit").withColumnRenamed("_3","metricName").withColumnRenamed("_4","nodeName").withColumnRenamed("_5","nodeID")
df=df.join(metricdf,on=["ID"],how="right")
shufflemetric=set(l[2] for l in coalesce)
metricdfs=[df.where(F.col("Name")==l).groupBy("real_queryid","nodeID","Stage ID").agg(F.stddev("Update").alias(l+"_stddev"),F.mean("Update").alias(l+"_mean"),F.mean("Update").alias(l) if l.startswith("avg") else F.sum("Update").alias(l)) for l in shufflemetric]
stagetimedf=self.df.where("Event='SparkListenerTaskEnd'").groupBy("Stage ID").agg(F.count("*").alias("partnum"),F.round(F.sum(F.col("Finish Time")-F.col("Launch Time"))/1000,2).alias("ElapsedTime"))
nodemetric=reduce(lambda x,y: x.join(y, on=['nodeID',"Stage ID","real_queryid"],how="full"),metricdfs)
return nodemetric.join(stagetimedf,on="Stage ID")
def get_coalesce_batch_row_cnt(self,**kwargs):
stagesum=self.get_metrics_by_node("CoalesceBatches")
pandas.options.display.float_format = '{:,}'.format
stagesum=stagesum.withColumnRenamed("number of output rows","rows")
coalescedf = stagesum.orderBy("real_queryid",'Stage ID').where("rows>4000").toPandas()
coalescedf["row/input_batch"] = coalescedf["rows"]/coalescedf["input_batches"]
coalescedf["row/out_batch"] = coalescedf["rows"]/coalescedf["output_batches"]
coalescedf['stage']=coalescedf["real_queryid"].astype(str)+"_"+coalescedf['Stage ID'].astype(str)
ax=coalescedf.plot(y=["row/input_batch","row/out_batch"],figsize=(30,8),style="-*")
coalescedf.plot(ax=ax,y=['rows'],secondary_y=['rows'],style="k_")
self.print_real_queryid(ax,coalescedf)
return coalescedf
def print_real_queryid(self,ax,dataset):
ax.axes.get_xaxis().set_ticks([])
ymin, ymax = ax.get_ybound()
real_queryid=list(dataset['real_queryid'])
s=real_queryid[0]
lastx=0
for idx,v in enumerate(real_queryid):
if v!=s:
xmin = xmax = idx-1+0.5
l = mlines.Line2D([xmin,xmax], [ymin,ymax],color="green")
ax.add_line(l)
ax.text(lastx+(xmin-lastx)/2-0.25,ymin-(ymax-ymin)/20,f"{s}",size=20)
s=v
lastx=xmin
def get_shuffle_stat(self,**kwargs):
if self.df is None:
self.load_data()
shufflesize=kwargs.get("shuffle_size",1000000)
queryid=kwargs.get("queryid",None)
exchangedf=self.get_metrics_by_node("ColumnarExchange")
exchangedf.cache()
exchangedf.count()
mapdf=exchangedf.where("totaltime_split is not null").select("nodeID",F.col("Stage ID").alias("map_stageid"),"real_queryid",F.floor(F.col("totaltime_split")/F.col("totaltime_split_mean")).alias("map_partnum"),"totaltime_compress","totaltime_computepid","totaltime_split","shuffle write time",'shuffle records written','data size','shuffle bytes written','shuffle bytes written_mean','shuffle bytes written_stddev','shuffle bytes spilled','number of input rows')
reducerdf=exchangedf.where("totaltime_split is null").select("nodeID",F.col("Stage ID").alias("reducer_stageid"),"real_queryid",'local blocks read','local bytes read',F.floor(F.col("records read")/F.col("records read_mean")).alias("reducer_partnum"),(F.col('avg read batch num rows')/10).alias("avg read batch num rows"),'remote bytes read','records read','remote blocks read',(F.col("number of output rows")/F.col("records read")).alias("avg rows per split recordbatch"))
shuffledf=mapdf.join(reducerdf,on=["nodeID","real_queryid"],how="full")
if queryid is not None:
shuffledf=shuffledf.where(F.col("real_queryid")==queryid)
shuffle_pdf=shuffledf.where("`shuffle bytes written`>1000000").orderBy("real_queryid","map_stageid","nodeID").toPandas()
shuffle_pdf["shuffle bytes written"]=shuffle_pdf["shuffle bytes written"]/1000000000
shuffle_pdf["data size"]=shuffle_pdf["data size"]/1000000000
shuffle_pdf["shuffle bytes written_mean"]=shuffle_pdf["shuffle bytes written_mean"]/1000000
shuffle_pdf["shuffle bytes written_stddev"]=shuffle_pdf["shuffle bytes written_stddev"]/1000000
ax=shuffle_pdf.plot(y=["avg read batch num rows",'avg rows per split recordbatch'],figsize=(30,8),style="b-*",title="average batch size after split")
self.print_real_queryid(ax,shuffle_pdf)
shuffle_pdf["split_ratio"]=shuffle_pdf["records read"]/shuffle_pdf['shuffle records written']
ax=shuffle_pdf.plot(y=["split_ratio","records read"],secondary_y=["records read"],figsize=(30,8),style="-*",title="Split Ratio")
self.print_real_queryid(ax,shuffle_pdf)
shuffle_pdf["compress_ratio"]=shuffle_pdf["data size"]/shuffle_pdf['shuffle bytes written']
ax=shuffle_pdf.plot(y=["shuffle bytes written","compress_ratio"],secondary_y=["compress_ratio"],figsize=(30,8),style="-*",title="compress ratio")
self.print_real_queryid(ax,shuffle_pdf)
shufflewritepdf=shuffle_pdf
ax=shufflewritepdf.plot.bar(y=["shuffle write time","totaltime_compress","totaltime_split","totaltime_computepid"],stacked=True,figsize=(30,8),title="split time + shuffle write time vs. shuffle bytes written")
ax=shufflewritepdf.plot(ax=ax,y=["shuffle bytes written"],secondary_y=["shuffle bytes written"],style="-*")
self.print_real_queryid(ax,shufflewritepdf)
metrics=self.queryplans.collect()
coalesce=[]
metricsid=[0]
def get_metric(root):
if root['nodeName']=="ColumnarExchange":
metricsid[0]=metricsid[0]+1
for l in root["metrics"]:
coalesce.append([l['accumulatorId'],l["metricType"],l['name'],root["nodeName"],metricsid[0],root["simpleString"]])
if root["children"] is not None:
for c in root["children"]:
get_metric(c)
for c in metrics:
get_metric(c)
tps={}
for r in coalesce:
rx=re.search(r"\[OUTPUT\] List\((.*)\)",r[5])
if rx:
if r[4] not in tps:
tps[r[4]]={}
fds=rx.group(1).split(", ")
for f in fds:
if not re.search(r":(.+Type)",f):
print(fds)
else:
tp=re.search(r":(.+Type)",f).group(1)
if tp not in tps[r[4]]:
tps[r[4]][tp]=1
else:
tps[r[4]][tp]+=1
if len(tps)>0:
typedf=pandas.DataFrame(tps).T.reset_index()
typedf=typedf.fillna(0)
shuffle_pdf=pandas.merge(shuffle_pdf,typedf,left_on="nodeID",right_on="index")
shufflewritepdf=shuffle_pdf
ax=shufflewritepdf.plot.bar(y=["number of input rows"],stacked=True,figsize=(30,8),title="rows vs. shuffle data type")
ax=shufflewritepdf.plot(ax=ax,y=list(typedf.columns[1:]),secondary_y=list(typedf.columns[1:]),style="-o")
self.print_real_queryid(ax,shufflewritepdf)
ax=shufflewritepdf.plot.bar(y=["totaltime_split"],stacked=True,figsize=(30,8),title="split time vs. shuffle data type")
ax=shufflewritepdf.plot(ax=ax,y=list(typedf.columns[1:]),secondary_y=list(typedf.columns[1:]),style="-o")
self.print_real_queryid(ax,shufflewritepdf)
shufflewritepdf.plot(x="shuffle bytes written",y=["shuffle write time","totaltime_split"],figsize=(30,8),style="*")
shufflewritepdf["avg shuffle batch size after split"]=shufflewritepdf["shuffle bytes written"]*1000000/shufflewritepdf['records read']
shufflewritepdf["avg batch size after split"]=shufflewritepdf["data size"]*1000000/shufflewritepdf['records read']
ax=shufflewritepdf.plot(y=["avg shuffle batch size after split","avg batch size after split","shuffle bytes written"],secondary_y=["shuffle bytes written"],figsize=(30,8),style="-*",title="avg batch KB after split")
self.print_real_queryid(ax,shufflewritepdf)
shufflewritepdf["avg batch# per splitted partition"]=shufflewritepdf['records read']/(shufflewritepdf['local blocks read']+shufflewritepdf['remote blocks read'])
ax=shufflewritepdf.plot(y=["avg batch# per splitted partition",'records read'],secondary_y=['records read'],figsize=(30,8),style="-*",title="avg batch# per splitted partition")
self.print_real_queryid(ax,shufflewritepdf)
fig, ax = plt.subplots(figsize=(30,8))
ax.set_title('shuffle wite bytes with stddev')
ax.errorbar(x=shuffle_pdf.index,y=shuffle_pdf['shuffle bytes written_mean'], yerr=shuffle_pdf['shuffle bytes written_stddev'], linestyle='None', marker='o')
self.print_real_queryid(ax,shuffle_pdf)
shuffle_pdf['record batch per mapper per reducer']=shuffle_pdf['records read']/(shuffle_pdf["map_partnum"]*shuffle_pdf['reducer_partnum'])
ax=shuffle_pdf.plot(y=["record batch per mapper per reducer"],figsize=(30,8),style="b-*",title="record batch per mapper per reducer")
self.print_real_queryid(ax,shuffle_pdf)
dfx=self.df.where("Event='SparkListenerTaskEnd'").select("real_queryid","Stage ID",(F.col("Finish Time")/1000-F.col("Launch Time")/1000).alias("elapsed")).groupBy('real_queryid',"Stage ID").agg(F.mean("elapsed").alias("elapsed"),F.stddev("elapsed").alias("stddev")).orderBy("real_queryid","Stage ID").toPandas()
if queryid is not None:
dfx=dfx.where(F.col("real_queryid")==queryid)
fig, ax = plt.subplots(figsize=(30,8))
ax.set_title('stage time')
ax.errorbar(x=dfx.index,y=dfx['elapsed'], yerr=dfx['stddev'], linestyle='None', marker='o')
self.print_real_queryid(ax,dfx)
return (shuffle_pdf,dfx)
def get_stages_w_odd_partitions(appals,**kwargs):
if appals.df is None:
appals.load_data()
return appals.df.where("Event='SparkListenerTaskEnd'")\
.groupBy("Stage ID","real_queryid")\
.agg((F.sum(F.col('Finish Time')-F.col('Launch Time'))/1000).alias("elapsed time"),
F.count('*').alias('partitions'))\
.where(F.col("partitions")%(appals.executor_cores*appals.executor_instances/appals.taskcpus)!=0)\
.orderBy(F.desc("elapsed time")).toPandas()
def get_scaned_column_v1(appals):
def get_scans(node):
if node['nodeName'].startswith("Scan arrow"):
scans.append(node)
for c in node['children']:
get_scans(c)
alltable=[]
for qid in range(1,23):
scans=[]
plans=appals.queryplans.where("real_queryid="+str(qid)).collect()
get_scans(plans[0])
for s in scans:
alltable.append([qid,",".join([l.split(":")[0] for l in re.split(r'[<>]',s['metadata']['ReadSchema'])[1].split(",")])])
return alltable
def get_scaned_column_v2(appals):
def get_scans(node):
if node['nodeName'].startswith("ColumnarBatchScan"):
scans.append(node)
for c in node['children']:
get_scans(c)
alltable=[]
for qid in range(1,23):
scans=[]
plans=appals.queryplans.where("real_queryid="+str(qid)).collect()
get_scans(plans[0])
for s in scans:
alltable.append([qid,",".join([l.split("#")[0] for l in re.split(r"[\[\]]",s['simpleString'])[1].split(",")])])
return alltable
def compare_query(appals,queryid,appbaseals):
print(f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~Query{queryid}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
appals.show_critical_path_time_breakdown(queryid=22)
s1=appals.get_stage_stat(queryid=queryid)
s2=appbaseals.get_stage_stat(queryid=queryid)
ls=s1[['Stage ID','elapsed time']]
ls.columns=['l sid','l time']
rs=s2[['Stage ID','elapsed time']]
rs.columns=['r sid','r time']
js=ls.join(rs)
js['gap']=js['r time'] - js['l time']
js['gap']=js['gap'].round(2)
display(js)
display(s1)
display(s2)
stagesmap={}
for x in range(0,min(len(s1),len(s2))):
stagesmap[s1['Stage ID'][x]]=s2['Stage ID'][x]
totaltime=sum(s1['elapsed time'])
acctime=0
s1time=s1.sort_values("elapsed time",ascending=False,ignore_index=True)
ldfx=appals.get_metric_output_rowcnt(queryid=queryid)
rdfx=appbaseals.get_metric_output_rowcnt(queryid=queryid)
for x in range(0,len(s1time)):
sid1=int(s1time['Stage ID'][x])
sid2=int(stagesmap[sid1])
print(f"============================================================")
display(ldfx[ldfx['Stage ID']==sid1])
display(rdfx[ldfx['Stage ID']==sid2])
print(f" Gazelle Query {queryid} Stage {sid1}")
xf=appals.get_query_plan(stageid=sid1,show_simple_string=True)
print(f" Photon Query {queryid} Stage {sid2}")
xf=appbaseals.get_query_plan(stageid=sid2,show_simple_string=True)
acctime+=s1time['elapsed time'][x]
if acctime/totaltime>=0.9:
break
# -
notlist=['resource.executor.cores',
'spark.app.id',
'spark.app.initial.file.urls',
'spark.app.name',
'spark.app.startTime',
'spark.driver.port',
'spark.job.description',
'spark.jobGroup.id',
'spark.org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter.param.PROXY_HOSTS',
'spark.org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter.param.PROXY_URI_BASES',
'spark.rdd.scope',
'spark.sql.execution.id',
'__fetch_continuous_blocks_in_batch_enabled',
'spark.driver.appUIAddress'
'spark.driver.appUIAddress',
'spark.driver.host',
'spark.driver.appUIAddress',
'spark.driver.extraClassPath',
'spark.eventLog.dir',
'spark.executorEnv.CC',
'spark.executorEnv.LD_LIBRARY_PATH',
'spark.executorEnv.LD_PRELOAD',
'spark.executorEnv.LIBARROW_DIR',
'spark.files',
'spark.history.fs.logDirectory',
'spark.sql.warehouse.dir',
'spark.yarn.appMasterEnv.LD_PRELOAD',
'spark.yarn.dist.files'
]
def comp_spark_conf(app0,app1):
pdf_sparkconf_0=app0.get_spark_config()
pdf_sparkconf_1=app1.get_spark_config()
pdfc=pdf_sparkconf_0.join(pdf_sparkconf_1,lsuffix=app0.appid[-8:],rsuffix=app1.appid[-8:])
pdfc["0"+app0.appid[-8:]]=pdfc["0"+app0.appid[-8:]].str.lower()
pdfc["0"+app1.appid[-8:]]=pdfc["0"+app1.appid[-8:]].str.lower()
pdfc['comp']=(pdfc["0"+app0.appid[-8:]]==pdfc["0"+app1.appid[-8:]])
return pdfc.loc[(pdfc['comp']==False) & (~pdfc.index.isin(notlist))]
# # MISC
def show_rst(pdrst):
html='''<style type="text/css">
.tg {border-collapse:collapse;border-spacing:0;border-color:#aabcfe;}
.tg td{font-family:Courier New;font-size:18px;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#aabcfe;color:#669;background-color:#e8edff;}
.tg th{font-family:Courier New;font-size:18px;font-weight:normal;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#aabcfe;color:#039;background-color:#b9c9fe;}
.tg .tg-phtq{background-color:#D2E4FC;border-color:inherit;text-align:left;vertical-align:top}
.tg .tg-c3ow{border-color:inherit;text-align:center;vertical-align:top}
.tg .tg-0pky{border-color:inherit;text-align:left;vertical-align:top}
.tg .tg-phtq_v{background-color:#D2E4FC;border-color:inherit;text-align:left;vertical-align:top; color:#FF0000}
.tg .tg-0pky_v{border-color:inherit;text-align:left;vertical-align:top; color:#FF0000}
</style>
<table class="tg">
<tr>
'''
cols=pdrst.columns
html=html+''.join(['<th class="tg-c3ow">{:s}</th>'.format(l) for l in cols if l!='app_id'])
html=html+'<th class="tg-c3ow">spark log</th><th class="tg-c3ow">trace_view</th></tr>'
for idx, r in pdrst.iterrows():
html=html+"<tr>"
html=html+"".join(['<td class="{:s}">{:s}</td>'.format('tg-phtq' if l!='elapsed time' else 'tg-phtq_v', str(r[l])) for l in cols if l!='app_id'])
html=html+'''<td class="tg-phtq"><a href="http://10.1.2.107:18080/history/{:s}/jobs"> {:s}</a></td><td class="tg-phtq"><a href=http://sr525:1088/tracing_examples/trace_viewer.html#/tracing/test_data/{:s}.json> {:s}</a></td>'''.format(r['app_id'].appid,r['app_id'].appid,r['app_id'].appid,r['app_id'].appid)
html=html+"</tr>"
html=html+"</table>"
display(HTML(html))
def reduce_metric(pdrst,slave_id,metric,core,agg_func):
pdrst['rst']=pdrst.apply(lambda x:x['app_id'].get_reduce_metric(slave_id,metric,core,agg_func), axis=1)
for l in agg_func:
pdrst[get_alias_name(metric,l)]=pdrst.apply(lambda x:x['rst'].iloc[0][get_alias_name(metric,l)],axis=1)
return pdrst.drop(columns=['rst'])
def clean_data(rsta):
for i in range(0,r):
m=rsta.loc[i,'emon_mem_bw']
if m>40000:
for j in range(i,i+5):
if j>=r:
break
if rsta.loc[j,'emon_mem_bw']<40000:
m=0
break
rsta.loc[i,'emon_mem_bw']=m
else:
rsta.loc[i,'emon_mem_bw']=False
def background_gradient(s, m, M, cmap='PuBu', low=0, high=0):
from matplotlib import colors
rng = M - m
norm = colors.Normalize(m - (rng * low),
M + (rng * high))
normed = norm(s.values)
c = [colors.rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]
return ['background-color: {:s}'.format(color) for color in c]
# # TPCDS query map
# + code_folding=[]
m='''1 q01
2 q02
3 q03
4 q04
5 q05
6 q06
7 q07
8 q08
9 q09
10 q10
11 q11
12 q12
13 q13
14 q14a
15 q14b
16 q15
17 q16
18 q17
19 q18
20 q19
21 q20
22 q21
23 q22
24 q23a
25 q23b
26 q24a
27 q24b
28 q25
29 q26
30 q27
31 q28
32 q29
33 q30
34 q31
35 q32
36 q33
37 q34
38 q35
39 q36
40 q37
41 q38
42 q39a
43 q39b
44 q40
45 q41
46 q42
47 q43
48 q44
49 q45
50 q46
51 q47
52 q48
53 q49
54 q50
55 q51
56 q52
57 q53
58 q54
59 q55
60 q56
61 q57
62 q58
63 q59
64 q60
65 q61
66 q62
67 q63
68 q64
69 q65
70 q66
71 q67
72 q68
73 q69
74 q70
75 q71
76 q72
77 q73
78 q74
79 q75
80 q76
81 q77
82 q78
83 q79
84 q80
85 q81
86 q82
87 q83
88 q84
89 q85
90 q86
91 q87
92 q88
93 q89
94 q90
95 q91
96 q92
97 q93
98 q94
99 q95
100 q96
101 q97
102 q98
103 q99'''.split("\n")
tpcds_query_map=[l.strip().split("\t") for l in m]
tpcds_query_map={int(l[0]):l[1] for l in tpcds_query_map}
# -
| tools/sparklog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The current pageview api will only go back to 2015 which makes a lot of the work I've done recently not matter so much.
#
# July 1, 2015
#
ultimate_start_date = '20150701'
# +
from mwviews.api import PageviewsClient
p = PageviewsClient(user_agent = '<EMAIL>')
# -
import pandas as pd
test = p.article_views('en.wikipedia', ['Cryptocurrency', 'Vagina'],agent='user', start=ultimate_start_date)
pd.DataFrame.from_dict(test, orient='index')
# Seems like it's trivial to query this kind of data. So it'll be a breeze to program this out.
| exploratory_notebooks/20180620_Importing_page_view_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tqdm
from scipy import sparse
# # Generate some indices
# Even the sparse matrices won't fit in memory. So we will have to loop through them when making predictions or sampling random items.
# +
#count number of items:
indptr = [0]
for chunkID in range(10):
scores = np.load(f'../processed_data/AmpC_all{chunkID}.npy')
indptr.append(indptr[-1] + scores.shape[0])
# -
scores = np.concatenate([np.load(f'../processed_data/AmpC_all{i}.npy') for i in range(10)])
# # functions to handle the slabs
#
# For training, these loop through the chunks and extract the indices that have been selected either at random or suggested by the surrogate model.
#
# For predicting, these loop through the chunks and perform the `predict_proba` method on each chunk (after removing the training indices), outputting a concatenated numpy array of predicted values.
# +
def extractFPs(chunkID, indptr, isTrain):
fp = sparse.load_npz(f'../processed_data/AmpC_all{chunkID}.npz')
mask = isTrain[indptr[chunkID]:indptr[chunkID+1]]
return fp[mask]
def buildTrain(indptr, isTrain, verbose=0):
if verbose:
print('building training matrix')
fps = sparse.vstack([extractFPs(i, indptr, isTrain) for i in range(10)])
return fps
def chunkPredictProba(model, indptr, isTrain, verbose=0):
if verbose:
print('predicting probabilities')
probas = []
for chunkID in range(10):
fps = extractFPs(chunkID, indptr, ~isTrain)
proba = model.predict_proba(fps)[:,1]
probas.append(proba)
return np.concatenate(probas)
def chunkPredict(model, indptr, isTrain, verbose=0):
if verbose:
print('predicting probabilities')
preds = []
for chunkID in range(10):
fps = extractFPs(chunkID, indptr, ~isTrain)
pred = -1*model.predict(fps) #best scoring will now be on top (like the proba)
preds.append(pred)
return np.concatenate(preds)
# -
# # Train and RF regressor and Logistic Regression models
# +
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(max_iter=10000, C=1)
# -
# # How long to find the 50k - 200k top 0.3% docking scores from one iteration of Logistic regression?
# +
trainingSetSizes = [5000, 10_000] + [10000*2<<i for i in range(0,8)]
num_actual = scores.shape[0] * 0.003
desiredNumLigands = [50_000, 100_000, 150_000, 200_000,]
# +
#this is the _actual_ observed cutoff at 0.3th percentile.
test_cutoff = np.percentile(scores, 0.3)
#mask identifying the top hits.
topK = scores<test_cutoff
#df = pd.DataFrame(columns=['Algorithm', 'Training size', 'Fraction', 'N hits wanted', 'N hits explored'])
df = pd.DataFrame(columns=['Algorithm', 'Training size', 'N hits wanted', 'N hits explored'])
count=0
for i in range(3):
#for percent in np.array([0.1, 0.25, 0.5, 0.75, 0.9]):
for numWanted in desiredNumLigands:
idx = np.arange(scores.shape[0])
np.random.shuffle(idx)
for size in trainingSetSizes:
#numWanted = int(percent * scores.shape[0] * 0.003)
#print('numWanted:', numWanted, 'percent:', percent)
#split indices into train and test:
train = idx[:size].copy()
test = idx[size:].copy()
train.sort()
test.sort()
#generate a 'is a training instance' mask.
isTrain = np.zeros(scores.shape[0]).astype(bool)
isTrain[train]=True
#topK molecules already found in the training set:
numFound = topK[train].sum()
numRequired = numWanted - numFound
#fit model:
cutoff = np.percentile(scores[isTrain],0.3)
model.fit(buildTrain(indptr, isTrain, 1), scores[isTrain]<cutoff)
#predict (slowest step):
proba = chunkPredictProba(model, indptr, isTrain, 1)
#rank the probabilities
proba_sorted = (-proba).argsort()
#sorted the unseen instances by probability (highest prob first):
test = test[proba_sorted]
#topK molecules already found in the training set:
numSampled = np.argmax(np.cumsum(topK[test])>numRequired)
#df.loc[count] = ['morgan_feat', size, percent, numWanted, numSampled+size]
df.loc[count] = ['morgan_feat', size,numWanted, numSampled+size]
count+=1
print(count, size, numWanted, numSampled+size)
df.to_csv('../processed_data/AmpC_single_'+str(0.3)+'.csv')
| code/AmpC_all_single.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Grants? What grants?
# The recently concluded GR12 seemed like a great success judging by the numbers of contributors, crowdfunded amounts, number of grants and so on. However, those numbers are a topic of another analysis. This one instead focuses on the content of the grants - what were the topics that attracted most funds?
#
# We would try to extract simple keywords (or phrases) from the grants' descriptions and then try to ruminate on what they mean.
#
# But first - how to find the top grants? We'd like to consider `crowdfund_amount_contributions_usd` as the most important property of a grant - it speaks of the contributors' dedication.
# + pycharm={"name": "#%%\n"}
import os
import openai
import pandas as pd
# + pycharm={"name": "#%%\n"}
GR12_DATA = 'data/Grants Results History Round over Round + Grant over Grant - GR1-GR12.csv'
df = pd.read_csv(GR12_DATA)
# + pycharm={"name": "#%%\n"}
# convert from string to float
df['crowdfund_amount_contributions_usd'] = df['crowdfund_amount_contributions_usd']\
.astype(str)\
.apply(lambda x: x.strip('$').replace(',', ''))\
.astype(float)
# + pycharm={"name": "#%%\n"}
gr12_top3 = df[df['round_number'] == 12] \
.sort_values(by='crowdfund_amount_contributions_usd', ascending=False) \
.head(n=3)
gr12_top3[['grant_title', 'grant_id', 'url', 'crowdfund_amount_contributions_usd', 'num_unique_contributors']]
# -
# Next we can manually get the text description of each grant from its webpage. It has to be done manually as there is currently no API provided to properly get (e.g. by id) and we would like to avoid crawling the pages. The top 3 grants should be displayed with their id and urls so it would be easy to confirm that the following texts were copied from the grant web pages.
# + pycharm={"name": "#%%\n"}
text_4352 = """Vision
ZigZag wants to be a revolutionary project in the ZK Rollup space and aims at the end-game scaling solutions for Ethereum. We want to be the first, we want to be the best. We want to push out great products with high quality as fast as possible. We do not limit ourselves. Our vision is not to launch a mediocre DEX. Instead, we are aiming to bring the usability of centralised exchanges to a DEX that previously was not possible. With ZK Rollups, it is. Having sufficient liquidity and orderbook depth is a key factor that holds back a lot of DEXs from succeeding, which is what our main aim is to get right.
Introduction
ZigZag is a decentralised orderbook exchange that utilizes ZK-Rollup tech by allowing traders to perform spot trades with minimal slippage and thick orderbooks. The problem that every AMM-based DEX has on other Layer 2 Rollups is having miniscule liquidity. For simple swaps the impact on price movement is not significant. However, if one is attempting to trade with size relative to over $500k, to get the best quotes it is necessary to bridge back to mainnet Ethereum and pay its fees for aggregated liquidity that is available there. ZK Rollups can solve this by offering negligible fees on transactions, allowing for any market inefficiency to be taken advantage of in an instant by market makers, which was previously not possible on a DEX. We are aiming to acquire sufficient liquidity to the extent that users will not have to pay Ethereum gas fees on mainnet in order to get quoted for a similar price.
We launched as the first and still the only DEX on zkSync 1.x. We will also launch as the first DEX on StarkNet, which you can currently try out on our testnet (limited functionality). Due to our first mover advantage we are capturing a lot of attention. Our volume has been breaking records every week. Last week we had a total volume of $52M with a record breaking day of $13.2M in volume on the last day of the week.
Team background
Our team derives from crypto natives who are all experienced traders. Having personal experience in using decentralized exchanges allows us to recognize what is required for a DEX to succeed. The founder of ZigZag is a leading developer in Solidity, but we dare to say he is one of the, if not the best, Cairo dev out there. Our founder coded most of our DEX on zkSync and is the reason we exist and captured the first mover advantage we have. Another experienced dev joined our team recently and focuses on StarkNet and coding a non-custodial liquidity pool for market makers. With these guys we have a confirmed advantage and head start over any other competition building on StarkNet. Our team is joined by a small group of experienced devs who are very committed to helping us expand ZigZag even more. We are also in contact with most of the big and small players that are building next to us and are looking forward to collaborating with anyone for the good of Ethereum’s scalability.
With our combined skills, trading knowledge and maybe most importantly full-time commitment, we have the ability to create a leading DEX on zkSync and StarkNet from the development side, as well as ensuring that the incentives for traders are sufficient for them to not require an alternative.
Achievements and future plans
Currently, we have our exchange live on zkSync 1.0. We’re using the native zkSync atomic swap feature to match orders. The gas fee is paid to the relayer and included in the atomic swap. We also made a bridge UI that taps into zkSync’s smart contract to bridge between Ethereum and zkSync. We have a StarkNet testnet up, but right now with limited functionality. However, this will soon change as Starkware is moving quickly. Be sure to check out our announcements. Furthermore, our governance proposal for Frax passed (https://gov.frax.finance/t/fip-36-frax-x-zigzag-partnership/272): Frax will provide us for a total of $20M in liquidity. The first millions have arrived. This liquidity will be used to keep our DEX liquid on zkSync for now and once StarkNet fully launches we will move the majority of our liquidity there since it will have more functionality. We also have a MIM proposal up (https://forum.abracadabra.money/t/proposal-bring-mim-onto-starknet-through-zigzag-exchange/1065) to kickstart our DEX on StarkNet, since StarkNet will have more functionality than zkSync 1.0 currently has.
Limit orders and margin trading will be possible on StarkNet and zkSync 2.0. On StarkNet we are also building a non-custodial liquidity pool that market makers can tap into to use and market make on our DEX. We can turn this into a dAMM. As said before, we made a bridge UI for zkSync, but we are working on a fast withdrawal mechanism. We’re also working on a bridge UI for StarkNet and will provide fast withdrawals there too. Another future plan is adding more bridges to our website. One that has the most priority would be zkSync <-> StarkNet. We are thinking further into the future about implementing NFT related features on zkSync: viewing and sending NFTs on our website. We would later turn this into the first NFT Marketplace.
We’re eagerly awaiting zkSync 2.0, which will be zkEVM. This will give us way more possibilities on our DEX. We might even build on other Layer 2 ZK Rollups if we have the developer capacity for it. Once Loopring, ZKSwap and Polygon Hermez are zkEVM, we could start building there.
Reasoning for grant
As the first DEX on zkSync we've been very active in the zkSync ecosystem, but also with the community and catering to them. One of the few live use cases on zkSync right now, besides our DEX, is donating to Gitcoin grants. This can be done in a cheap way by either by bridging funds from Ethereum Layer 1 -> zkSync Layer 2 (https://trade.zigzag.exchange/bridge) or by using a fiat ramp like Ramp Network (https://ramp.network/) to zkSync. These Gitcoin grants created demand for DAI, so a while back we added DAI pairs to our exchange (ETH/DAI, WBTC/DAI, DAI/USDC, DAI/USDT). A lot of people started using ZigZag to grab DAI for Gitcoin grants and started requesting us to create our own grant. They wanted to donate to us! We decided to open a Twitter poll (https://twitter.com/ZigZagExchange/status/1469983150180909057) and after 1 day more than 1300 people voted "Yes", telling us that they would want to donate to us. This gave us great confidence in creating our own grant.
Use of funds
As seen in our tweet, we are self funding right now. This Gitcoin grant will give us the ability to spend more funding on development. We would love to scale up our development team and with the support of our community it seems like we will be able to do this. Development can mean anything ranging from frontend and backend development to GFX/UI/UX design.
Developer? Contact us!
We would love to hear your feedback on our product, so please use our exchange, read our code in Github and join our community. If you are interested in our project and think that you could help us out or even contribute to building on ZK Rollups, don't hesitate and contact us here: https://info.zigzag.exchange/#contact. With our grant donations we will be able to take onboard more developers and maybe that person will be you! Who knows we’ll even see you on our team in the future!"""
# + pycharm={"name": "#%%\n"}
text_2323 = """Dark Forest is an MMO strategy game built with zkSNARKs on Ethereum and xDAI. Players explore an infinite, procedurally-generated universe, conquering planets and growing a space empire. More info: zkSNARKs for blockchain gaming
Over 2,500+ crypto enthusiasts have participated in the first four whitelisted Dark Forest rounds, spending over one trillion gas on xDAI chain and the Ropsten testnet. This playerbase represents about 15% of our current waitlist, and we're excited to continue development so that we can open up the game to more players.
>>> What have we been up to?
After the previous Gitcoin grant round concluded, we launched DF v0.6 Round 4, aka The Lightning Round. We’ve also seen an abundance of life come from within the community in the form of the Dark Forest Community Art Contest, MarrowDAO’s recent Dark Forest Community Round, and d_fdao's recently announced Dark Forest Community New Year's Round!
We are actively seeking a front end web developer. (Sign up for the Dark Forest Jobs newsletter if you’d like to be notified of new openings!)
We’ve got one more round of v0.6 left, which will likely launch in Q1 2022. After that, we plan to embark on the next stage of our development cycle: v0.7. In addition to releasing rounds ourselves, we are enabling individual players and DAOs to deploy their own rounds, adding a unique spin to our core game.
Throughout 2022, we hope to facilitate the launch of many community rounds, so that players can play the game without having to wait for our months-long release process. Your contributions, in addition to fostering the growth of Dark Forest, will help make that happen.
We’ve learned a ton from shipping this game for our enthusiastic audience, and are eternally grateful for the opportunity to build such an outlandish piece of software.
Join the Dark Forest Discord if you'd like to get involved.
Image
Image
Decentralized Digital Worlds: We want to build a massively-multiplayer persistent and economic universe, interoperable with the rest of the Ethereum metaverse. We believe that zkSNARKs will unlock the first generation of truly compelling decentralized games, and that decentralized games will pave the way for the community-owned and community-designed digital worlds of the future.
Community-driven: Beyond working on the Dark Forest game itself, our team also works closely with Project Sophon, a group of players working on third-party tools. Our vision is for the Dark Forest gameplay experience to be built and freely modified by the community. For a taste of this, see the Dark Forest Community Plugins Homepage.
ETH/ZK Education: We're also spending time on community and education initiatives aimed at bringing both Ethereum and zero-knowledge application development to more students and developers, including educational programs, starter repos and shared infrastructure, and more (to be announced on our blog in the coming weeks!)
Our work so far has been supported by a handful of one-off developer grants from organizations like xDAI, the Mozilla Builders program, and an in-game tip jar. Additionally, we are now funded by a new research foundation: 0xPARC. You can read more about it here.
We're so thankful to be a part of this ecosystem and we're excited to keep building for you all! :)"""
# + pycharm={"name": "#%%\n"}
text_1668 = """Based in Washington, D.C., Coin Center is the leading non-profit research and advocacy center focused on the public policy issues facing cryptocurrency and decentralized computing technologies like Bitcoin and Ethereum.
Our mission is to build a better understanding of these technologies and to promote a regulatory climate that preserves the freedom to innovate using permissionless blockchain technologies.
We do this by producing and publishing policy research from respected academics and experts, educating policymakers and the media about blockchain technology, and by engaging in advocacy for sound public policy."""
# + [markdown] pycharm={"name": "#%% md\n"}
# Now that we have the texts we can get the top keywords or key phrases. For that, we chose a simple call to OpenAI's GPT3 service which is a top NLP model and should be good at keyword detection. But first - merge the data.
# + pycharm={"name": "#%%\n"}
gr12_top3['body'] = [text_4352, text_2323, text_1668]
# Prepare the texts for GPT3 - we need to clean them from new lines
gr12_top3['body'] = gr12_top3['body'].apply(lambda text: text.replace('\n', ' '))
print(gr12_top3['body'])
# -
# The next section is non-deterministic as GPT3 is itself non-deterministic - it often returns different results, sometimes empty ones. It may need to be rerun once or twice. To ensure proper work one need to fiddle with the OpenAI completion parameters. For our current purposes it's good enough as a few different runs usually return similar results - we can be confident in GPT3's predictions.
# + pycharm={"name": "#%%\n"}
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_keywords(text: str):
prompt = f"Text: {text}\n\nKeywords:"
# print(prompt)
return openai.Completion.create(
engine="davinci",
prompt=prompt,
temperature=0.3,
max_tokens=120,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0,
stop=["\n"]
)
grant_keywords = []
for text in gr12_top3['body']:
response = get_keywords(text)
# print(response)
keywords = response['choices'][0]['text'].split(',')
keywords = [k.strip() for k in keywords]
grant_keywords.append(keywords)
# print(keywords)
gr12_top3['keywords'] = grant_keywords
# + [markdown] pycharm={"name": "#%% md\n"}
# # Words
# Now, let's explore the keywords of the top 3 grants.
# + pycharm={"name": "#%%\n"}
for row in gr12_top3.itertuples():
result = f'{row.grant_title} - {", ".join(row.keywords)} - ${row.crowdfund_amount_contributions_usd}.'
print(result)
# -
# ----
# # Bonus - trends
# Let's check the previous round's most supported grants and see how they differ from GR12.
# + pycharm={"name": "#%%\n"}
gr11_top3 = df[df['round_number'] == 11]\
.sort_values(by='crowdfund_amount_contributions_usd', ascending=False)\
.head(n=3)
gr11_top3[['grant_title', 'grant_id', 'url', 'crowdfund_amount_contributions_usd', 'num_unique_contributors']]
# + pycharm={"name": "#%%\n"}
text_2323 = """Dark Forest is an MMO strategy game built with zkSNARKs on Ethereum and xDAI. Players explore an infinite, procedurally-generated universe, conquering planets and growing a space empire. More info: zkSNARKs for blockchain gaming
Over 2,500+ crypto enthusiasts have participated in the first four whitelisted Dark Forest rounds, spending over one trillion gas on xDAI chain and the Ropsten testnet. This playerbase represents about 15% of our current waitlist, and we're excited to continue development so that we can open up the game to more players.
>>> What have we been up to?
After the previous Gitcoin grant round concluded, we launched DF v0.6 Round 4, aka The Lightning Round. We’ve also seen an abundance of life come from within the community in the form of the Dark Forest Community Art Contest, MarrowDAO’s recent Dark Forest Community Round, and d_fdao's recently announced Dark Forest Community New Year's Round!
We are actively seeking a front end web developer. (Sign up for the Dark Forest Jobs newsletter if you’d like to be notified of new openings!)
We’ve got one more round of v0.6 left, which will likely launch in Q1 2022. After that, we plan to embark on the next stage of our development cycle: v0.7. In addition to releasing rounds ourselves, we are enabling individual players and DAOs to deploy their own rounds, adding a unique spin to our core game.
Throughout 2022, we hope to facilitate the launch of many community rounds, so that players can play the game without having to wait for our months-long release process. Your contributions, in addition to fostering the growth of Dark Forest, will help make that happen.
We’ve learned a ton from shipping this game for our enthusiastic audience, and are eternally grateful for the opportunity to build such an outlandish piece of software.
Join the Dark Forest Discord if you'd like to get involved."""
text_468 = """ETHPlanet is a ethereum community with a spirit of open source, sharing, and mutual growth. We are committed to serving the ethereum ecosystem and its developers, communities, projects, and related platforms. We organize online and offline activities, including global conferences, forums, hackathons, etc., and are constantly exploring new ways to support and grow together with Ethereum."""
text_373 = """About Nym
The Nym network is a decentralized mixnet to support anonymity and unlinkability at the network layer (layer 0) across any blockchain and application. Unlike Tor and Katzenpost/Meson, Nym uses blockchains in order not to have a central directory and uses incentives to decentralize the mix network itself. Due to cover traffic and timing delays based on the Loopix design, Nym provides superior privacy than both VPNs and Tor, and can be integrated to communicate with any blockchain to enable communication without censorship or fear. Nym fills in the missing pieces of the cryptographic revolution necessary to end mass surveillance.
Learn more from our litepaper.
Team Nym
The Nym team is comprised by established coders and researchers in privacy-enhancing technologies.
<NAME>, CEO is a renowned computer scientist and a leader in the movement against mass surveillance who worked under web inventor <NAME> at the World Wide Web Consortium, lead NEXTLEAP and PANORAMIX, amongst other accomplishments.
<NAME>, CSO (Chief Science Officer) with 20 years working on privacy-enhancing technologies, she is one of Europe’s preeminent researchers in the field recognized for her contribution to the foundations of metadata protection from surveillance, traffic analysis, tracking, localization and behavioral profiling.
<NAME>, CTO has founded, built and sold several companies from Chainspace, Blockmainia, and the Vega protocol. He is a co-author of the blockchain-sharding protocol Chainspace and the leaderless, PBFT-variant consensus protocol Blockmania, the prototype of which turned out to be likely the world’s fastest distributed ledger (400,000 TPS with 2 secs finality).
<NAME>, Lead Developer has a Master of Engineering in Computer Science with First Class Honours from University College London. During his thesis work he studied the Coconut signature scheme under the supervision of George Danezis and then implemented it.
<NAME>, Head of Research has masters degree with specialization in Analysis of Algorithms from the Faculty of Fundamental Problems of Technology at Wroclaw University of Technology and is a designer of the Loopix mixnet, with her PhD advisor and original Nym and Chainspace co-founder <NAME>.
More on our team here.
Status
We just launched our Rust code for the Nym mixnet at the end of December 2019. For the rest of Q1 2020, we are fine tuning our validator development in order to prepare for beta release that will enable use of Nym with Bitcoin and Ethereum.
https://github.com/nymtech/nym
Resources
Github
Documentation
Contact
For questions and comments, you can reach us on our official channels.
Twitter
Telegram
"""
# + pycharm={"name": "#%%\n"}
assert all(grant_id == text_id
for grant_id, text_id in zip(gr11_top3['grant_id'].to_numpy(),
[2323, 468, 373]))
gr11_top3['body'] = [text_2323, text_468, text_373]
# Prepare the texts for GPT3 - we need to clean them from new lines
gr11_top3['body'] = gr11_top3['body'].apply(lambda text: text.replace('\n', ' '))
print(gr11_top3['body'])
# + pycharm={"name": "#%%\n"}
grant_keywords = []
for text in gr11_top3['body']:
response = get_keywords(text)
keywords = response['choices'][0]['text'].split(',')
keywords = [k.strip() for k in keywords]
grant_keywords.append(keywords)
gr11_top3['keywords'] = grant_keywords
for row in gr11_top3.itertuples():
result = f'{row.grant_title} - {", ".join(row.keywords)} - ${row.crowdfund_amount_contributions_usd}.'
print(result)
# + [markdown] pycharm={"name": "#%% md\n"}
# ----
# # Conclusion
# Can we say something about the topics of the most crowdfunded grants? We can conclude that:
#
# * blockchain games (GameFi, etc.) remain a top interest. Particularly - Dark Forest is among top 3 in two consecutive rounds
# * rollups, L2, etc. became more popular
# * privacy and anonymity gave way to policy and research
# * the top 3 GR12 grants have attracted 2.5x more crowdfunded amounts - from ~160k to ~425k
# * the number of unique contributors (total for top 3) has almost doubled - from ~14k to ~27k
#
#
# Once the grant texts' are available through an API this approach may be extended to all historical rounds to see how the trends are shifting. One interesting question is what would be the most popular topics of GR13...
| gr12_eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Lecture01 Introduction
#
# ## 1.1 Mathmetical Optimization Problem
#
# (mathmetical) optimization problem
#
# minimize $f_0(x)$
#
# subject to $f_i(x) < b_i, i = 1, \dots, m$
#
# **Example**
#
# 1. Portfolio optimization(资产组合优化)
# * variables(优化变量):不同资产投资额
# * constraints(约束):总预算,每项资产最低最高投资额,最小回报
# * objective(目标):降低总风险(如回报方差、可能的损失等等)
#
# 2. Device sizing in electronic circuits
# * variables: device widths and lengths
# * constraints: manufacturing limits, timing requirements, maximum area
# * objective: power comsumption
#
# 3. Data fitting
# * variables: model param
# * constraints: prior information, param limits
# * objective: measure of misfit or prediction
# ## 1.2 Least-squares and linear programming
#
# ### 1.2.1 Least-squares problems 最小二乘问题
#
# minimize $f_0(x) = ||Ax - b||_2^2 = \sum^k_{i=1}(a_i^Tx - b_i)^2$
#
# 此处$A \in R^{k \times n}$,而$a_i^T$表示A中的行,向量$x \in R^n$是优化变量。
#
# #### 解最小二乘问题
#
# 最小二乘问题可以被归约为解一组线性方程,
#
# $$Ax - b = 0$$
#
# $$(A^TA)x = A^Tb$$
#
# 由此解析解为$x = (A^TA)^{-1}A^Tb$
#
| optimization/cvx101/01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Rdgk-2oaizy9"
import pandas as pd
import pathlib
from pathlib import Path
# + colab={"base_uri": "https://localhost:8080/"} id="gIZvpKvujKzc" outputId="248ee6b4-9c75-4bda-a040-cd9c6a8ed985"
#print the files in the current directory
base = Path(".")
print(list(base.iterdir()))
#locate the files in the current directory that end with ".xlsx"
excel_file_path = list(base.glob("*.xlsx"))[0]
# + id="iSS9oa2ti7yX"
df = pd.read_excel(str(excel_file_path),"Sheet2",index_col=None)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="etWTdbN-v9Bs" outputId="ec6006c4-7603-4cba-9dab-76bc2fe32fa0"
df
# + colab={"base_uri": "https://localhost:8080/"} id="TMoMQO6Au3A5" outputId="518100fe-5c28-418a-aaad-a290bcbac6af"
df.columns
#we want the columns of the dataframe to be "Country" and "Salary", and not "Australia" and 24526.15
# + colab={"base_uri": "https://localhost:8080/"} id="jUy5NmFVvZ0f" outputId="d22345ea-eb22-490a-f8a2-bbb5e7f1498e"
#rename the columns
df_new = df.rename(columns={'Australia': 'Country',24526.15:"Salary"})
print(df_new)
# + id="V1Dn-sBNvu5A"
df = df_new.append({"Country":"Australia","Salary":24526.15},ignore_index=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Tkyrd_jrvkbs" outputId="4a381885-3d2d-4172-d8a8-cc4a1026cf1d"
df
# + colab={"base_uri": "https://localhost:8080/"} id="ZnMb3zxjvNry" outputId="6ef79749-6696-44fc-c312-3cc1cd34c569"
#sort the values in the dataframe based on salary in ascending order
df = df.sort_values("Salary",ascending=False)
print(df)
# + colab={"base_uri": "https://localhost:8080/"} id="oz1jtoWmwY4L" outputId="84ec0ce0-22f6-4c4d-aebf-32693cca929c"
#the indexes are kinda messed up, so reset them
df = df.reset_index()
print(df)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="V0ffLYO2wcKc" outputId="b1724c64-7655-482d-94b0-5dfaa556b49e"
#as you can see above, theres a new index column (representing the origional indexes) which we will get rid of
df = df.drop("index",axis=1)
df
#perfect!!!
# + id="Attog7vmkEA9"
#save the new excel file into google colabratory
df.to_excel("sorted.xlsx")
| ExcelSorter64.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [learning-python3.ipynb]: https://gist.githubusercontent.com/kenjyco/69eeb503125035f21a9d/raw/learning-python3.ipynb
#
# Right-click -> "save link as" [https://gist.githubusercontent.com/kenjyco/69eeb503125035f21a9d/raw/learning-python3.ipynb][learning-python3.ipynb] to get most up-to-date version of this notebook file.
#
# ## Quick note about Jupyter cells
#
# When you are editing a cell in Jupyter notebook, you need to re-run the cell by pressing **`<Shift> + <Enter>`**. This will allow changes you made to be available to other cells.
#
# Use **`<Enter>`** to make new lines inside a cell you are editing.
#
# #### Code cells
#
# Re-running will execute any statements you have written. To edit an existing code cell, click on it.
#
# #### Markdown cells
#
# Re-running will render the markdown text. To edit an existing markdown cell, double-click on it.
#
# <hr>
# ## Common Jupyter operations
#
# Near the top of the https://try.jupyter.org page, Jupyter provides a row of menu options (`File`, `Edit`, `View`, `Insert`, ...) and a row of tool bar icons (disk, plus sign, scissors, 2 files, clipboard and file, up arrow, ...).
#
# #### Inserting and removing cells
#
# - Use the "plus sign" icon to insert a cell below the currently selected cell
# - Use "Insert" -> "Insert Cell Above" from the menu to insert above
#
# #### Clear the output of all cells
#
# - Use "Kernel" -> "Restart" from the menu to restart the kernel
# - click on "clear all outputs & restart" to have all the output cleared
#
# #### Save your notebook file locally
#
# - Clear the output of all cells
# - Use "File" -> "Download as" -> "IPython Notebook (.ipynb)" to download a notebook file representing your https://try.jupyter.org session
#
# #### Load your notebook file in try.jupyter.org
#
# 1. Visit https://try.jupyter.org
# 2. Click the "Upload" button near the upper right corner
# 3. Navigate your filesystem to find your `*.ipynb` file and click "open"
# 4. Click the new "upload" button that appears next to your file name
# 5. Click on your uploaded notebook file
#
# <hr>
# ## References
#
# - https://try.jupyter.org
# - https://docs.python.org/3/tutorial/index.html
# - https://docs.python.org/3/tutorial/introduction.html
# - https://daringfireball.net/projects/markdown/syntax
#
# <hr>
# ## Python objects, basic types, and variables
#
# Everything in Python is an **object** and every object in Python has a **type**. Some of the basic types include:
#
# - **`int`** (integer; a whole number with no decimal place)
# - `10`
# - `-3`
# - **`float`** (float; a number that has a decimal place)
# - `7.41`
# - `-0.006`
# - **`str`** (string; a sequence of characters enclosed in single quotes, double quotes, or triple quotes)
# - `'this is a string using single quotes'`
# - `"this is a string using double quotes"`
# - `'''this is a triple quoted string using single quotes'''`
# - `"""this is a triple quoted string using double quotes"""`
# - **`bool`** (boolean; a binary value that is either true or false)
# - `True`
# - `False`
# - **`NoneType`** (a special type representing the absence of a value)
# - `None`
#
# In Python, a **variable** is a name you specify in your code that maps to a particular **object**, object **instance**, or value.
#
# By defining variables, we can refer to things by names that make sense to us. Names for variables can only contain letters, underscores (`_`), or numbers (no spaces, dashes, or other characters). Variable names must start with a letter or underscore.
#
# <hr>
# ## Basic operators
#
# In Python, there are different types of **operators** (special symbols) that operate on different values. Some of the basic operators include:
#
# - arithmetic operators
# - **`+`** (addition)
# - **`-`** (subtraction)
# - **`*`** (multiplication)
# - **`/`** (division)
# - __`**`__ (exponent)
# - assignment operators
# - **`=`** (assign a value)
# - **`+=`** (add and re-assign; increment)
# - **`-=`** (subtract and re-assign; decrement)
# - **`*=`** (multiply and re-assign)
# - comparison operators (return either `True` or `False`)
# - **`==`** (equal to)
# - **`!=`** (not equal to)
# - **`<`** (less than)
# - **`<=`** (less than or equal to)
# - **`>`** (greater than)
# - **`>=`** (greater than or equal to)
#
# When multiple operators are used in a single expression, **operator precedence** determines which parts of the expression are evaluated in which order. Operators with higher precedence are evaluated first (like PEMDAS in math). Operators with the same precedence are evaluated from left to right.
#
# - `()` parentheses, for grouping
# - `**` exponent
# - `*`, `/` multiplication and division
# - `+`, `-` addition and subtraction
# - `==`, `!=`, `<`, `<=`, `>`, `>=` comparisons
#
# > See https://docs.python.org/3/reference/expressions.html#operator-precedence
# Assigning some numbers to different variables
num1 = 10
num2 = -3
num3 = 7.41
num4 = -.6
num5 = 7
num6 = 3
num7 = 11.11
# Addition
print(num1 + num2)
print(num5 + num6)
# Subtraction
num2 - num3
# Multiplication
num3 * num4
# Division
num10 = num4/num5
print(num10)
print(num4 // num5)
# Exponent
num5 ** num6
# Increment existing variable
num7 += 4
num7
# Decrement existing variable
num6 -= 2
num6
# Multiply & re-assign
num3 *= 5
num3
# Assign the value of an expression to a variable
num8 = num1 + num2 * num3
num8
# Are these two expressions equal to each other?
num1 + num2 == num5
# Are these two expressions not equal to each other?
num3 != num4
# Is the first expression less than the second expression?
num5 < num6
# Is this expression True?
5 > 3 > 1
# Is this expression True?
5 > 3 < 4 == 3 + 1
# Assign some strings to different variables
simple_string1 = 'an example'
simple_string2 = "oranges "
# Addition
simple_string1 + ' of using the + operator'
# Notice that the string was not modified
simple_string1
# Multiplication
simple_string2 * 4
# This string wasn't modified either
simple_string2
# Are these two expressions equal to each other?
simple_string1 == simple_string2
# Are these two expressions equal to each other?
simple_string1 == 'an example'
# Add and re-assign
#simple_string5 = simple_string1 + 'gigetto'
simple_string1 += ' that re-assigned the original string'
simple_string1
#simple_string1 -= 'string'
# Multiply and re-assign
simple_string2 *= 3
simple_string2
# +
# Note: Subtraction, division, and decrement operators do not apply to strings.
# -
# ## Basic containers
#
# > Note: **mutable** objects can be modified after creation and **immutable** objects cannot.
#
# Containers are objects that can be used to group other objects together. The basic container types include:
#
# - **`str`** (string: immutable; indexed by integers; items are stored in the order they were added)
# - **`list`** (list: mutable; indexed by integers; items are stored in the order they were added)
# - `[3, 5, 6, 3, 'dog', 'cat', False]`
# - **`tuple`** (tuple: immutable; indexed by integers; items are stored in the order they were added)
# - `(3, 5, 6, 3, 'dog', 'cat', False)`
# - **`set`** (set: mutable; not indexed at all; items are NOT stored in the order they were added; can only contain immutable objects; does NOT contain duplicate objects)
# - `{3, 5, 6, 3, 'dog', 'cat', False}`
# - **`dict`** (dictionary: mutable; key-value pairs are indexed by immutable keys; items are NOT stored in the order they were added)
# - `{'name': 'Jane', 'age': 23, 'fav_foods': ['pizza', 'fruit', 'fish']}`
#
# When defining lists, tuples, or sets, use commas (,) to separate the individual items. When defining dicts, use a colon (:) to separate keys from values and commas (,) to separate the key-value pairs.
#
# Strings, lists, and tuples are all **sequence types** that can use the `+`, `*`, `+=`, and `*=` operators.
# Assign some containers to different variables
list1 = [3, 5, 6, 3, 'dog', 'cat', False]
tuple1 = (3, 5, 6, 3, 'dog', 'cat', False)
set1 = {3, 5, 6, 3, 'dog', 'cat', False}
dict1 = {'name': 'Jane', 'age': 23, 'fav_foods': ['pizza', 'fruit', 'fish']}
# Items in the list object are stored in the order they were added
list1
# Items in the tuple object are stored in the order they were added
tuple1
# Items in the set object are not stored in the order they were added
# Also, notice that the value 3 only appears once in this set object
set1
# Items in the dict object are not stored in the order they were added
dict1
# Add and re-assign
list1 += [5, 'grapes']
print(list1)
list1.append('gigetto')
list1
# Add and re-assign
tuple1 += (5, 'grapes')
tuple1
# Multiply
[1, 2, 3, 4] * 2
# Multiply
(1, 2, 3, 4) * 3
# ## Accessing data in containers
#
# For strings, lists, tuples, and dicts, we can use **subscript notation** (square brackets) to access data at an index.
#
# - strings, lists, and tuples are indexed by integers, **starting at 0** for first item
# - these sequence types also support accesing a range of items, known as **slicing**
# - use **negative indexing** to start at the back of the sequence
# - dicts are indexed by their keys
#
# > Note: sets are not indexed, so we cannot use subscript notation to access data elements.
# Access the first item in a sequence
list1[0]
# Access the last item in a sequence
tuple1[-1]
# Access a range of items in a sequence
simple_string1[3:8]
# Access a range of items in a sequence
print(tuple1)
tuple1[:-3]
# Access a range of items in a sequence
print(list1)
list1[4:]
numeri = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print(numeri)
print(numeri[3:7])
print(numeri[7:9])
print(numeri[9:])
# Access an item in a dictionary
dict1['name']
# # Esercizio
#
# Costruire un dictionary che contiene un dictionary di stringhe. Accedere ad una stringa del dictonary più interno
# +
# Access an element of a sequence in a dictionary
print(dict1)
print(dict1['fav_foods'][2])
# TODO: Esperimento interessante con le dictionary
dict2 = {'name': 'Jane', 'age': 23, 'fav_foods': ['pizza', 'fruit', 'fish'],
'address': {'street': 'Geary Street 53rd', 'city': "San Francisco"}}
print(dict2['address']['city'])
# -
# ## Python built-in functions and callables
#
# A **function** is a Python object that you can "call" to **perform an action** or compute and **return another object**. You call a function by placing parentheses to the right of the function name. Some functions allow you to pass **arguments** inside the parentheses (separating multiple arguments with a comma). Internal to the function, these arguments are treated like variables.
#
# Python has several useful built-in functions to help you work with different objects and/or your environment. Here is a small sample of them:
#
# - **`type(obj)`** to determine the type of an object
# - **`len(container)`** to determine how many items are in a container
# - **`callable(obj)`** to determine if an object is callable
# - **`sorted(container)`** to return a new list from a container, with the items sorted
# - **`sum(container)`** to compute the sum of a container of numbers
# - **`min(container)`** to determine the smallest item in a container
# - **`max(container)`** to determine the largest item in a container
# - **`abs(number)`** to determine the absolute value of a number
# - **`repr(obj)`** to return a string representation of an object
#
# > Complete list of built-in functions: https://docs.python.org/3/library/functions.html
#
# There are also different ways of defining your own functions and callable objects that we will explore later.
# Use the type() function to determine the type of an object
type(simple_string1)
# Use the len() function to determine how many items are in a container
len(dict1)
# Use the len() function to determine how many items are in a container
len(simple_string2)
# Use the callable() function to determine if an object is callable
callable(len)
# Use the callable() function to determine if an object is callable
callable(dict1)
# Use the sorted() function to return a new list from a container, with the items sorted
sorted([10, 1, 3.6, 7, 5, 2, -3])
# Use the sorted() function to return a new list from a container, with the items sorted
# - notice that capitalized strings come first
sorted(['dogs', 'cats', 'zebras', 'Chicago', 'California', 'ants', 'mice'])
# Use the sum() function to compute the sum of a container of numbers
sum([10, 1, 3.6, 7, 5, 2, -3])
# Use the min() function to determine the smallest item in a container
min([10, 1, 3.6, 7, 5, 2, -3])
# Use the min() function to determine the smallest item in a container
min(['g', 'z', 'a', 'y'])
# Use the max() function to determine the largest item in a container
max([10, 1, 3.6, 7, 5, 2, -3])
# Use the max() function to determine the largest item in a container
max('gibberish')
# Use the abs() function to determine the absolute value of a number
abs(10)
# Use the abs() function to determine the absolute value of a number
abs(-12)
# Use the repr() function to return a string representation of an object
repr(set1)
# ## Python object attributes (methods and properties)
#
# Different types of objects in Python have different **attributes** that can be referred to by name (similar to a variable). To access an attribute of an object, use a dot (`.`) after the object, then specify the attribute (i.e. `obj.attribute`)
#
# When an attribute of an object is a callable, that attribute is called a **method**. It is the same as a function, only this function is bound to a particular object.
#
# When an attribute of an object is not a callable, that attribute is called a **property**. It is just a piece of data about the object, that is itself another object.
#
# The built-in `dir()` function can be used to return a list of an object's attributes.
#
# <hr>
# ## Some methods on string objects
#
# - **`.capitalize()`** to return a capitalized version of the string (only first char uppercase)
# - **`.upper()`** to return an uppercase version of the string (all chars uppercase)
# - **`.lower()`** to return an lowercase version of the string (all chars lowercase)
# - **`.count(substring)`** to return the number of occurences of the substring in the string
# - **`.startswith(substring)`** to determine if the string starts with the substring
# - **`.endswith(substring)`** to determine if the string ends with the substring
# - **`.replace(old, new)`** to return a copy of the string with occurences of the "old" replaced by "new"
# Assign a string to a variable
a_string = 'tHis is a sTriNg'
# Return a capitalized version of the string
a_string2 = a_string.capitalize()
print(a_string2)
# Return an uppercase version of the string
a_string.upper()
# Return a lowercase version of the string
a_string.lower()
# Notice that the methods called have not actually modified the string
a_string
# Count number of occurences of a substring in the string
a_string.count('i')
# Count number of occurences of a substring in the string after a certain position
a_string.count('i', 7)
# Count number of occurences of a substring in the string
a_string.count('is')
# Does the string start with 'this'?
a_string.startswith('this')
# Does the lowercase string start with 'this'?
a_string.lower().startswith('this')
# Does the string end with 'Ng'?
a_string.endswith('Ng')
# Return a version of the string with a substring replaced with something else
a_string.replace('is', 'XYZ')
# Return a version of the string with a substring replaced with something else
a_string.replace('i', '!')
# Return a version of the string with the first 2 occurences a substring replaced with something else
a_string.replace('i', '!', 2)
# ## Some methods on list objects
#
# - **`.append(item)`** to add a single item to the list
# - **`.extend([item1, item2, ...])`** to add multiple items to the list
# - **`.remove(item)`** to remove a single item from the list
# - **`.pop()`** to remove and return the item at the end of the list
# - **`.pop(index)`** to remove and return an item at an index
#
# Lists are Mutable.
set2 = { 3, 5, 4, 3, 'ciao'}
list3 = [ 'gino', 'pino', 'tino']
list3.append('thimoty')
list3.remove('pino')
list3
elemento_poppato = list3.pop()
print(elemento_poppato)
list3
# ## Some methods on set objects
#
# - **`.add(item)`** to add a single item to the set
# - **`.update([item1, item2, ...])`** to add multiple items to the set
# - **`.update(set2, set3, ...)`** to add items from all provided sets to the set
# - **`.remove(item)`** to remove a single item from the set
# - **`.pop()`** to remove and return a random item from the set
# - **`.difference(set2)`** to return items in the set that are not in another set
# - **`.intersection(set2)`** to return items in both sets
# - **`.union(set2)`** to return items that are in either set
# - **`.symmetric_difference(set2)`** to return items that are only in one set (not both)
# - **`.issuperset(set2)`** does the set contain everything in the other set?
# - **`.issubset(set2)`** is the set contained in the other set?
#
# Sets are mutable as well.
set2.add('hello')
print(set2)
# ## Some methods on dict objects
#
# - **`.update([(key1, val1), (key2, val2), ...])`** to add multiple key-value pairs to the dict
# - **`.update(dict2)`** to add all keys and values from another dict to the dict
# - **`.pop(key)`** to remove key and return its value from the dict (error if key not found)
# - **`.pop(key, default_val)`** to remove key and return its value from the dict (or return default_val if key not found)
# - **`.get(key)`** to return the value at a specified key in the dict (or None if key not found)
# - **`.get(key, default_val)`** to return the value at a specified key in the dict (or default_val if key not found)
# - **`.keys()`** to return a list of keys in the dict
# - **`.values()`** to return a list of values in the dict
# - **`.items()`** to return a list of key-value pairs (tuples) in the dict
dict2.items()
len(dict2)
# ## Positional arguments and keyword arguments to callables
#
# You can call a function/method in a number of different ways:
#
# - `func()`: Call `func` with no arguments
# - `func(arg)`: Call `func` with one positional argument
# - `func(arg1, arg2)`: Call `func` with two positional arguments
# - `func(arg1, arg2, ..., argn)`: Call `func` with many positional arguments
# - `func(kwarg=value)`: Call `func` with one keyword argument
# - `func(kwarg1=value1, kwarg2=value2)`: Call `func` with two keyword arguments
# - `func(kwarg1=value1, kwarg2=value2, ..., kwargn=valuen)`: Call `func` with many keyword arguments
# - `func(arg1, arg2, kwarg1=value1, kwarg2=value2)`: Call `func` with positonal arguments and keyword arguments
# - `obj.method()`: Same for `func`.. and every other `func` example
#
# When using **positional arguments**, you must provide them in the order that the function defined them (the function's **signature**).
#
# When using **keyword arguments**, you can provide the arguments you want, in any order you want, as long as you specify each argument's name.
#
# When using positional and keyword arguments, positional arguments must come first.
# +
def fib(n): # write Fibonacci series up to n
"""Print a Fibonacci series up to n."""
a, b = 0, 1
while a < n:
print(a, end=' ')
a, b = b, a+b
print()
# Now call the function we just defined:
fib(2000)
print(n)
# -
# The actual parameters (arguments) to a function call are introduced in the local symbol table of the called function when it is called; thus, arguments are passed using call by value (where the value is always an object reference, not the value of the object). 1 When a function calls another function, a new local symbol table is created for that call.
#
# A function definition introduces the function name in the current symbol table. The value of the function name has a type that is recognized by the interpreter as a user-defined function. This value can be assigned to another name which can then also be used as a function.
# +
fib
f = fib
f(100)
# +
# Returning a result
def fib2(n): # return Fibonacci series up to n
"""Return a list containing the Fibonacci series up to n."""
result = []
a, b = 0, 1
while a < n:
result.append(a) # see below
a, b = b, a+b
return result
f100 = fib2(100) # call it
f100 # write the result
# +
# Default argument values
def ask_ok(prompt, retries=4, reminder='Please try again!'):
while True:
ok = input(prompt)
if ok in ('y', 'ye', 'yes'):
return True
if ok in ('n', 'no', 'nop', 'nope'):
return False
retries = retries - 1
if retries < 0:
raise ValueError('invalid user response')
print(reminder)
# Call the function
ask_ok("are you ok?", reminder='Prova ancora')
# -
def parrot(voltage, state='a stiff', action='voom', type='Norwegian Blue'):
print("-- This parrot wouldn't", action, end=' ')
print("if you put", voltage, "volts through it.")
print("-- Lovely plumage, the", type)
print("-- It's", state, "!")
parrot(1000) # 1 positional argument
parrot(voltage=1000) # 1 keyword argument
parrot(voltage=1000000, action='VOOOOOM') # 2 keyword arguments
parrot(action='VOOOOOM', voltage=1000000) # 2 keyword arguments
parrot('a million', 'bereft of life', 'jump') # 3 positional arguments
parrot('a thousand', state='pushing up the daisies') # 1 positional, 1 keyword
# ## Lambda expressions##
#
# Small anonymous functions can be created with the lambda keyword. This function returns the sum of its two arguments: lambda a, b: a+b. Lambda functions can be used wherever function objects are required. They are syntactically restricted to a single expression
# +
def make_incrementor(n):
return lambda x: x + n
f = make_incrementor(42)
print(callable(f))
print(f(0))
print(f(1))
print(f(32))
# +
def generalMultiplier(factor):
return lambda x: x*factor
raddoppiatore = generalMultiplier(2)
print(raddoppiatore(4))
print(raddoppiatore([1, 2, 3]))
# +
def salutatore(formaDiSaluto):
return lambda nome, cognome: formaDiSaluto+", "+nome+" "+cognome
salutatoreFormale = salutatore('Buongiorno')
salutatoreInglese = salutatore('Hello')
salutatoreSimpatico = salutatore('Uellà')
print(salutatoreFormale('Thimoty', 'Barbieri'))
print(salutatoreFormale('Eleonora', 'Cernuschi'))
print(salutatoreSimpatico('Carla', 'Maggi'))
# -
# ## Salutatore
#
# Trovare un modo per generare funzioni che salutano in modo formale, in modo simpatico, in inglese. Queste funzioni riceveono tutte come parametro il nome e il cognome della persona.
# Usarle per salutare l'utente.
pairs = [(1, 'one'), (2, 'two'), (3, 'three'), (4, 'four')]
pairs.sort(key=lambda pair: pair[1])
print(pairs)
pairs.sort(key=lambda pair: 1/pair[0])
pairs
# ## Formatting strings and using placeholders
# +
print('{0} and {1}'.format('spam', 'eggs'))
print('{1} and {0}'.format('spam', 'eggs'))
# -
print('This {food} is {adjective}.'.format(
food='spam', adjective='absolutely horrible'))
# See Python's Format String Syntax
for x in range(1, 11):
print('{0:2d} {1:3d} {2:4d}'.format(x, x*x, x*x*x))
# ## Python "for loops"
#
# It is easy to **iterate** over a collection of items using a **for loop**. The strings, lists, tuples, sets, and dictionaries we defined are all **iterable** containers.
#
# The for loop will go through the specified container, one item at a time, and provide a temporary variable for the current item. You can use this temporary variable like a normal variable.
# Measure some strings:
words = ['cat', 'window', 'defenestrate']
for w in words:
print(w, len(w))
# Iterate while changing the inspected array
for w in words[:]: # Loop over a slice copy of the entire list.
if len(w) > 6:
words.insert(0, w)
words
# To iterate over the indices of a sequence, you can combine range() and len() as follows. More elaborate techniques (such as enumerate() are described in Looping Techniques)
a = ['Mary', 'had', 'a', 'little', 'lamb']
for i in range(len(a)):
print(i, a[i])
# Using enumerate
for i, v in enumerate(['tic', 'tac', 'toe']):
print(i, v)
# Looping two or more sequences in parallel with zip()
questions = ['name', 'quest', 'favorite color']
answers = ['lancelot', 'the holy grail', 'blue']
for q, a in zip(questions, answers):
print('What is your {0}? It is {1}.'.format(q, a))
# ## Python "if statements" and "while loops"
#
# Conditional expressions can be used with these two **conditional statements**.
#
# The **if statement** allows you to test a condition and perform some actions if the condition evaluates to `True`. You can also provide `elif` and/or `else` clauses to an if statement to take alternative actions if the condition evaluates to `False`.
#
# The **while loop** will keep looping until its conditional expression evaluates to `False`.
#
# > Note: It is possible to "loop forever" when using a while loop with a conditional expression that never evaluates to `False`.
# >
# > Note: Since the **for loop** will iterate over a container of items until there are no more, there is no need to specify a "stop looping" condition.
## Fibonacci
a, b = 0, 1
while a < 10:
print(a)
a, b = b, a+b
# if, elif, else block
x = 42
if x < 0:
x = 0
print('Negative changed to zero')
elif x == 0:
print('Zero')
elif x == 1:
print('Single')
else:
print('More')
# ## List, set, and dict comprehensions
# +
squares = []
for x in range(10):
squares.append(x**2)
squares
# -
# list comprehension
squares = [x**2 for x in range(10)]
squares
# Lambda equivalent
squares = list(map(lambda x: x**2, range(10)))
# Obtaining a 2D cartesian space for two vectors
[(x, y) for x in [1,2,3] for y in [3,1,4] if x != y]
# dict comprehensions
{x: x**2 for x in (2, 4, 6)}
# set comprehensions
a = {x for x in 'abracadabra' if x not in 'abc'}
a
# ## Creating objects from arguments or other objects
#
# The basic types and containers we have used so far all provide **type constructors**:
#
# - `int()`
# - `float()`
# - `str()`
# - `list()`
# - `tuple()`
# - `set()`
# - `dict()`
#
# Up to this point, we have been defining objects of these built-in types using some syntactic shortcuts, since they are so common.
#
# Sometimes, you will have an object of one type that you need to convert to another type. Use the **type constructor** for the type of object you want to have, and pass in the object you currently have.
j = int(23)
j
myset = set(['hello', 1, 3, 3, 6])
myset
# ## Importing modules
#
# A module is a file containing Python definitions and statements. The file name is the module name with the suffix .py appended. Within a module, the module’s name (as a string) is available as the value of the global variable __name__.
# +
# Create a file fipo.py with fib and fib2 functions
# import fibo
#fibo.fib(1000)
#fibo.fib2(100)
#fibo.__name__
# See https://docs.python.org/3/library/ Python Standard Library
import statistics as stat
print(stat.mean([1, 2, 3, 4, 4]))
import os
entries = os.listdir('./')
print(entries)
# -
# ## Exceptions
#
# Even if a statement or expression is syntactically correct, it may cause an error when an attempt is made to execute it. Errors detected during execution are called exceptions and are not unconditionally fatal
while True:
try:
x = int(input("Please enter a number: "))
break
except ValueError:
print("Oops! That was no valid number. Try again...")
# +
import sys
try:
f = open('myfile.txt')
s = f.readline()
i = int(s.strip())
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to an integer.")
except:
print("Unexpected error:", sys.exc_info()[0])
raise
# -
# ## Classes: Creating your own objects
#
# Classes provide a means of bundling data and functionality together. Creating a new class creates a new type of object, allowing new instances of that type to be made. Each class instance can have attributes attached to it for maintaining its state. Class instances can also have methods (defined by its class) for modifying its state.
# ## Defining functions and methods
# +
class MyClass:
"""A simple example class"""
i = 12345
def f(self):
return 'hello world'
mc = MyClass()
mc.f()
# -
# ## Creating an initializer method for your classes
# +
class Complex:
def __init__(self, realpart, imagpart):
self.r = realpart
self.i = imagpart
x = Complex(3.0, -4.5)
x.r, x.i
# +
class Dog:
kind = 'canine' # class variable shared by all instances
def __init__(self, name):
self.name = name # instance variable unique to each instance
d = Dog('Fido')
e = Dog('Buddy')
print(d.kind) # shared by all dogs
print(e.kind) # shared by all dogs
print(d.name) # unique to d
print(e.name) # unique to e
# -
# ## Inheritance
# +
# Define a new class called `Thing` that is derived from the base Python object
class Thing(object):
my_property = 'I am a "Thing"'
# Define a new class called `DictThing` that is derived from the `dict` type
class DictThing(dict):
my_property = 'I am a "DictThing"'
# -
print(Thing)
print(type(Thing))
print(DictThing)
print(type(DictThing))
print(issubclass(DictThing, dict))
print(issubclass(DictThing, object))
# Create "instances" of our new classes
t = Thing()
d = DictThing()
print(t)
print(type(t))
print(d)
print(type(d))
# Interact with a DictThing instance just as you would a normal dictionary
d['name'] = 'Sally'
print(d)
d.update({
'age': 13,
'fav_foods': ['pizza', 'sushi', 'pad thai', 'waffles'],
'fav_color': 'green',
})
print(d)
print(d.my_property)
# ## Context managers and the "with statement"
| learning-python3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.4
# language: julia
# name: julia-0.6
# ---
include("../output.jl")
using ImpvolOutput, FileIO
results = load("../experiments/baseline/actual/results.jld2")["results"]
parameters = load("../experiments/baseline/common_parameters.jld2")["parameters"]
real_GDP_model = sum(ImpvolOutput.make_series(results, :real_GDP), 3)
rows = [("1970s", 1:8), ("1980s", 9:18), ("1990s", 19:28), ("2000s", 28:36)]
columns = ["actual", "kappa1972", "nosectoral", "nosectoral_kappa1972"]
function get_volatility(rows, column)
results = load("../experiments/baseline/$(column)/results.jld2")["results"]
real_GDP_model = sum(ImpvolOutput.make_series(results, :real_GDP), 3)
return [mean(ImpvolOutput.calculate_volatilities(real_GDP_model, parameters, true, row[2])) for row in rows]
end
using DataFrames
volatility = DataFrame()
volatility[:decades] = [row[1] for row in rows]
for column in columns
volatility[Symbol(column)] = get_volatility(rows, column)
end
volatility
stats = volatility
stats[:trade_barriers] = 100 * (stats[:actual] - stats[:kappa1972]) ./ stats[:kappa1972]
stats[:diversification] = 100 * (stats[:nosectoral] - stats[:nosectoral_kappa1972]) ./ stats[:kappa1972]
stats[:specialization] = 100 * (stats[:actual] - stats[:kappa1972] - stats[:nosectoral] + stats[:nosectoral_kappa1972]) ./ stats[:kappa1972]
for col in columns
delete!(volatility, Symbol(col))
end
using CSV
CSV.write("../volatility_by_decade.csv", volatility)
| notebooks/Volatility by decade.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import inspect
import pandas as pd
import great_expectations as ge
expectation_list = [exp for exp in ge.dataset.Dataset.__dict__.keys() if "expect_" in exp]
# expectation_list
# +
## Manually copied from https://github.com/great-expectations/great_expectations/blob/develop/tests/test_utils.py
sql_missing_expectations = [
# "expect_column_to_exist",
# "expect_table_row_count_to_be_between",
# "expect_table_row_count_to_equal",
# "expect_table_columns_to_match_ordered_list",
# "expect_column_values_to_be_unique",
# "expect_column_values_to_not_be_null",
# "expect_column_values_to_be_null",
"expect_column_values_to_be_of_type",
"expect_column_values_to_be_in_type_list",
# "expect_column_values_to_be_in_set",
# "expect_column_values_to_not_be_in_set",
# "expect_column_distinct_values_to_equal_set",
# "expect_column_distinct_values_to_contain_set",
# "expect_column_values_to_be_between",
"expect_column_values_to_be_increasing",
"expect_column_values_to_be_decreasing",
# "expect_column_value_lengths_to_be_between",
# "expect_column_value_lengths_to_equal",
"expect_column_values_to_match_regex",
"expect_column_values_to_not_match_regex",
"expect_column_values_to_match_regex_list",
"expect_column_values_to_not_match_regex_list",
"expect_column_values_to_match_strftime_format",
"expect_column_values_to_be_dateutil_parseable",
"expect_column_values_to_be_json_parseable",
"expect_column_values_to_match_json_schema",
#"expect_column_mean_to_be_between",
#"expect_column_median_to_be_between",
"expect_column_stdev_to_be_between",
#"expect_column_unique_value_count_to_be_between",
#"expect_column_proportion_of_unique_values_to_be_between",
"expect_column_most_common_value_to_be_in_set",
# "expect_column_sum_to_be_between",
# "expect_column_min_to_be_between",
# "expect_column_max_to_be_between",
# "expect_column_chisquare_test_p_value_to_be_greater_than",
"expect_column_bootstrapped_ks_test_p_value_to_be_greater_than",
# "expect_column_kl_divergence_to_be_less_than",
"expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than",
"expect_column_pair_values_to_be_equal",
"expect_column_pair_values_A_to_be_greater_than_B",
"expect_column_pair_values_to_be_in_set",
"expect_multicolumn_values_to_be_unique"
]
spark_missing_expectations = [
# "expect_column_to_exist",
# "expect_table_row_count_to_be_between",
# "expect_table_row_count_to_equal",
# "expect_table_columns_to_match_ordered_list",
# "expect_column_values_to_be_unique",
# "expect_column_values_to_not_be_null",
# "expect_column_values_to_be_null",
"expect_column_values_to_be_of_type",
"expect_column_values_to_be_in_type_list",
# "expect_column_values_to_be_in_set",
# "expect_column_values_to_not_be_in_set",
# "expect_column_distinct_values_to_equal_set",
# "expect_column_distinct_values_to_contain_set",
"expect_column_values_to_be_between",
"expect_column_values_to_be_increasing",
"expect_column_values_to_be_decreasing",
"expect_column_value_lengths_to_be_between",
# "expect_column_value_lengths_to_equal",
# "expect_column_values_to_match_regex",
# "expect_column_values_to_not_match_regex",
"expect_column_values_to_match_regex_list",
"expect_column_values_to_not_match_regex_list",
"expect_column_values_to_match_strftime_format",
"expect_column_values_to_be_dateutil_parseable",
"expect_column_values_to_be_json_parseable",
"expect_column_values_to_match_json_schema",
# "expect_column_mean_to_be_between",
"expect_column_median_to_be_between",
# "expect_column_stdev_to_be_between",
# "expect_column_unique_value_count_to_be_between",
# "expect_column_proportion_of_unique_values_to_be_between",
# "expect_column_most_common_value_to_be_in_set",
# "expect_column_sum_to_be_between",
# "expect_column_min_to_be_between",
# "expect_column_max_to_be_between",
# "expect_column_chisquare_test_p_value_to_be_greater_than",
"expect_column_bootstrapped_ks_test_p_value_to_be_greater_than",
# "expect_column_kl_divergence_to_be_less_than",
"expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than",
"expect_column_pair_values_to_be_equal",
"expect_column_pair_values_A_to_be_greater_than_B",
"expect_column_pair_values_to_be_in_set",
"expect_multicolumn_values_to_be_unique"
]
# -
is_present_dict = {
"expectations" : expectation_list
}
is_present_dict["pandas"] = [True for exp in expectation_list]
is_present_dict["sql"] = [exp not in sql_missing_expectations for exp in expectation_list]
is_present_dict["spark"] = [exp not in spark_missing_expectations for exp in expectation_list]
df = pd.DataFrame.from_dict(is_present_dict)
output = """
+-----------------------------------------------------------------------------+----------+----------+----------+
|**Expectations** |**Pandas**|**SQL** |**Spark** |
+-----------------------------------------------------------------------------+----------+----------+----------+"""
# # +=============================================================================+========+========+========+"""
for i, row in df.iterrows():
row_dict = row.to_dict()
output += """
|%s| %s | %s | %s |
+-----------------------------------------------------------------------------+----------+----------+----------+""" % (
("`"+row_dict["expectations"]+"`").ljust(77),
str(row_dict["pandas"]).ljust(8),
str(row_dict["sql"]).ljust(8),
str(row_dict["spark"]).ljust(8),
)
print(output)
| scratch/generate_rst_table_for_missing_expectations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tf3]
# language: python
# name: conda-env-tf3-py
# ---
# +
import wget
import tarfile
from os import listdir, getcwd
from os.path import isfile, join
import numpy as np
import pandas as pd
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
import gensim
import keras
import re
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation, LSTM, embeddings
pd.set_option("display.width", 150)
# -
# First, let's go get the data. The IMDB reviews are hosted on Stanford's servers. This little block does a couple things:
# 1. Pull the tarball down from Stanford's site. I'm on Windows, so use the wget package. On a Linux machine, you can use subprocess.call() to run wget from the shell.
# 2. Unzip the tarball, and place it in the current directory (or you can specify one).
f = wget.download('https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz') # can use out= to place it somewhere
tar = tarfile.open(f, "r:gz")
tar.extractall() # path= can change directory
tar.close()
# ..* a
# ..* b
# Next, we'll parse the unzipped tructure. Like many ML datasets, the positive/negative sentiment and train/test are split out by nested directories. For explortaion, we're going to put it all in a pandas DF.
#
# The folder structure goes like this:
#
# ```
# aclImdb/
# │
# └───train/
# │ └─── pos/
# │ └─── neg/
# └───test/
# │ └─── pos/
# │ └─── neg/
# ```
#
# Within there are files for the review URLs, the vocabulary of unique words, and an extra folder of uncategorized reviews.
#
# Each file is named file_<id>_<rating>.txt, so we're going to initialize a dataframe, and go through each folder to append the reviews. From the folders, we get:
# 1. train/test split
# 2. response (positive or negative sentiment)
# 3. from the filename we get the actual rating and
# 4. from the file we get the review text
#
# *reword this*
# +
df = pd.DataFrame(columns = ['split','rating','sentiment','review'])
for split in ['train', 'test']:
for sent in ['pos', 'neg']:
loc = join('.', 'aclImdb', split, sent) # Change the '.' with the path, if aclImdb is in a different folder
for file in [f for f in listdir(loc) if isfile(join(loc, f)) and f.split('.')[-1]=='txt']:
with open(join(loc,file),'r', encoding="utf-8") as f:
text = f.read()
df = df.append({'split':split,
'rating': file.split('_')[-1].split('.')[0],
'sentiment':sent,
'review':text}, ignore_index=True)
# -
df.to_pickle('df_saved')
df.head()
# A few times throughout, we're going to need to flatten the review column into a blob of text. To ease this, the below helper function will be employed. It emplays two sets of ```" ".join()```s with an argument-less ```str.split()``` in the middle. The first combines the ```series.tolist()``` output, then the split breaks by spaces, tabs and linebreaks, and the outer join brings it all into a blob.
def flatten(ser):
if not isinstance(ser, pd.Series):
raise ValueError("Please pass a series")
return " ".join(" ".join(ser.tolist()).split())
# Using a counter, let's look at how many unique words there are
# +
from collections import Counter
cnt = Counter(flatten(df.review).split())
print("There are {} unique tokens. The top are: \n\n{}".format(len(cnt),cnt.most_common(40)))
# -
# Dealing with natural text is, frankly, a pain.
#
# As you can already see here, things like capitalization (this vs This) are going to be an issue. But so will punctuation, since reviewers don't use it consistently:
print("The word can't appears {} times, and cant {} times.".format(cnt["can't"],cnt["cant"]))
# To train our word vectors, we're going to want to clean this up a bit.
#
# ---
# ## Cleaning and Tokenizing
#
# To help reduce the volume of our text, we're going to employ regular expressions to stripe out the non-alphanumeric characters. So can't -> cant will reduce our text down. After making that function, we'll tokenize the sentences and words, then run the regex to remove crud.
#
# This isn't a perfect method and, frankly, there *may* be times where you want to capture all unique tokens entered by users. However, like all things in statistics, it's hard to estimate the effect of sparse parameters (and text is sparse enough as it is!).
#
# First things first, did you notice ('/><br') above? This is because the text contains ```<br />``` HTML breaks. We're going to just outright remove them with some regular expressions (regex) to save a lot of headache later.
df["review"] = df["review"].apply(lambda x: re.sub('<.*?br.*?>', '', x))
# With that done, let's set up our text cleaning function, stripping out the non-alphanumeric characters and making everything lower cased.
# +
def simple_clean(text):
return re.sub(r'([^\s\w]|_)+', '', text.lower())
simple_clean("While it's a little like doing surgery with a hacksaw, this sort of cleaning-method is quick and effective")
# -
# Just how much might this reduce our text volume?
print("Reduced to {} unique tokens".format(len(Counter(flatten(df.review.apply(simple_clean)).split()))))
# 52% reduction in tokens just from that!
#
# This will help. But before we actually do the regex on our reviews, we're going to tokenize the sentences. This will be relevant when we start embedding with word2vec. NLTK includes a sentence tokenizer from the punkt collection.
#
# Sentence tokenization isn't extremely straighforward--we can split on punctuation marks (.?!) but would need some regex to handle honorifics (Dr., Mr., D.M.D) and then random abbreviations would wreak havoc too (approx., lt., loc.).
#
# Instead, we can use a sentence tokenizer that's already been trained on English. The one NLTK includes, punkt, uses an unsupervised method to learn sentences.
#
# *If you've never used nltk's collections before, you'll need to download them by calling ```nltk.download()``` in a python shell. If you're unable to download the collections, don't worry. We can just split the text on (.?!) and it will be good enough.*
# +
txt = """This is the first sentence.
Lt. Dan told me that this was the second.
Tokenizing sentences is an approx. art, but Dr. Somebody D.M.D says otherwise."""
print(sent_tokenize(txt))
# -
# As you can see, it's not perfect. "approx." seems like it wasn't learned, but everything else tokenized correctly. When we send our text to word2vec, we're going to have it train on context windows around words. However, we want these windows to only include words from that sentence, since the next sentence could be totally unrelated.
#
# This puts us with a dilema: Do we use the tokenizer, and know that some sentences will get fragmented, or just split on (.?!) knowing some context will leak between sentences?
#
# We're going to do the latter, but I've used the former before with trememndous success, so it's not a bad option.
# +
clean_reviews = [simple_clean(txt) for txt in sent_tokenize(flatten(df.review))]
print(clean_reviews[0:10])
# -
| lstm_embedding_post.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''playground-MkuFeJM_'': pipenv)'
# name: python3
# ---
# +
from enum import Enum
class Color(Enum):
red = 1
green = 2
blue = 3
class Weekday(Enum):
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
# -
for col in Color:
print(col.name)
print(Color.red)
print(Color.red.value)
print(Color.red.name)
print(type(Color.red))
print(type(Color))
print(Color['red'])
print(Color['red'].value)
print(Color['red'].name)
print(Color(2))
print(Color(2).value)
print(Color(2).name)
Color.red.value == Weekday.MONDAY.value
# +
from enum import Enum
class Api(Enum):
IB = 1
TV = 2
class TwoDimensionEnum(Enum):
def encode(self, to_api:Api) -> Enum:
"""Encode from Barbucket notation to specific api notation"""
if to_api in self.value.keys():
return self.value[to_api]
else:
raise AttributeError(f"Attribute '{to_api}' not found in '{type(self)}'.")
@classmethod
def decode(cls, name:str, from_api:Api) -> Enum:
"""Decode from specific api notation to Barbucket notation"""
for element in cls:
if element.value[from_api] == name:
return element
raise AttributeError(f"Attribute '{name}' not found in '{cls}'.")
class Exchange(TwoDimensionEnum):
NASDAQ = {Api.IB: 'ISLAND', Api.TV: 'Nasdaq'}
ARCA = {Api.IB: 'NYSE_ARCA', Api.TV: 'Nyse Arca'}
# override
@classmethod
def decode(cls, name: str, from_api: Api) -> Enum:
"""Decode from specific api notation to Barbucket notation"""
if (from_api == Api.TV) and (name == 'Nyse Arca Something'):
name = '<NAME>'
return super().decode(name=name, from_api=from_api)
class ContractType(TwoDimensionEnum):
COMMON_STOCK = {Api.IB: 'common', Api.TV: 'Common Stock'}
ETF = {Api.IB: 'etf', Api.TV: 'Etf'}
my_type = ContractType.COMMON_STOCK
print(my_type.encode(to_api=Api.IB))
print(my_type.value[Api.IB])
my_ex = Exchange.decode(
name='Nyse Arca Something',
from_api=Api.TV)
print(my_ex)
# +
from enum import Enum
class Dates(Enum):
@classmethod
def some_method(cls):
print("something")
class Weekday(Dates):
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
Weekday.some_method()
Weekday.MONDAY
# -
| software_dev/enums_.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/GarreauArthur/rgbspace/blob/master/model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="cY0mDDIIHF_B" colab_type="text"
# # Import
# + id="lqhaUYcvFIj4" colab_type="code" colab={}
import numpy as np
import pandas as pd
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from __future__ import absolute_import, division, print_function, unicode_literals
# Install TensorFlow
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
# + [markdown] id="wd0BhRilHWQd" colab_type="text"
# # The data
#
# first download the data
# + id="ts0kLQiDHPFv" colab_type="code" outputId="ce7e9b64-2f85-41b9-97ca-31baee44eebb" colab={"base_uri": "https://localhost:8080/", "height": 119}
# !rm -rf rgbspace
# !git clone https://github.com/GarreauArthur/rgbspace.git
# + id="qNoy-aBTHbpC" colab_type="code" outputId="f0e8efdf-5493-43c9-b9d8-b80621714cea" colab={"base_uri": "https://localhost:8080/", "height": 204}
data = pd.read_csv("./rgbspace/data.csv")
data.head()
# + [markdown] id="wrpNvaoNHpk3" colab_type="text"
# The data is made of 4 column, one for each color RGB, and one giving the label of the color:
#
# * 0 is red
# * 1 is yellow
# * 2 is green
# * 3 is cyan
# * 4 is blue
# * 5 is magenta
#
# Let's extract the data, and normalize the inputs
# + id="J-McVhK8Hggr" colab_type="code" outputId="58fd678c-5130-4289-8b52-0f0aa0f1954a" colab={"base_uri": "https://localhost:8080/", "height": 204}
color = data.loc[:, 'color']
rgb = data.loc[:, :'green']
normalized_rgb = (rgb-128)/255
normalized_rgb.head()
# + [markdown] id="c9U4xKpDP6Pm" colab_type="text"
# Let's try to visualize the data
# + id="t3V4EtLOP9Yf" colab_type="code" outputId="dbece475-871a-4b71-c12a-91bf468e7f22" colab={"base_uri": "https://localhost:8080/", "height": 612}
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, aspect='equal')
for i in range(0,11):
pixel = rgb.loc[i, :].values/255
rec = patches.Rectangle((0, -i*10), 10, 10, color=pixel)
ax.add_patch(rec)
ax.text(15, -i*10 + 5, color.loc[i])
plt.axis((0,50,-100,10))
# + id="I0SkELTYTJL6" colab_type="code" outputId="9f901285-ffd9-4e20-8c5d-337bac7c6978" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(rgb.loc[1, :].values/255)
# + [markdown] id="67sSSZe7ITW1" colab_type="text"
# Let's define the size of our data sets
# + id="78A7pA_yIMYZ" colab_type="code" outputId="e8d7948b-d4e2-4e75-9c65-4bdbb43f19c3" colab={"base_uri": "https://localhost:8080/", "height": 34}
total_number_of_examples = rgb['red'].count()
m_train_int = int(total_number_of_examples*0.8)
m_dev_int = int(total_number_of_examples*0.1)
m_test_int = int(total_number_of_examples*0.1)
m_train = np.float64(m_train_int)
m_dev = np.float64(m_dev_int)
m_test = np.float64(m_test_int)
print(m_train, m_test, m_dev)
# + [markdown] id="43wr44lOIcaN" colab_type="text"
# Now, we prepare the datasets
# + id="G-mzgqOXIX-F" colab_type="code" outputId="784a0974-e1b3-4e24-bf5b-93e42ec74526" colab={"base_uri": "https://localhost:8080/", "height": 34}
# split the dataset in 3
x_train = normalized_rgb.loc[:(m_train-1), 'red':'green'].values
x_dev = normalized_rgb.loc[m_train:(m_train+m_dev-1), 'red':'green'].values
x_test = normalized_rgb.loc[(m_train+m_dev):, 'red':'green'].values
# shape (number of feature, number of example
# transform the color (output) into a one hot vector
nb_classes = 6
Y = color.astype('int64').values.reshape(-1)
one_hot = np.eye(nb_classes)[Y]
one_hot = one_hot.reshape(10000, 6)
y_train = one_hot[:(m_train_int), :]
y_dev = one_hot[m_train_int:(m_train_int+m_dev_int), :]
y_test = one_hot[(m_train_int+m_dev_int):, :]
print(y_train.shape)
# + [markdown] id="KzLd6LjwI0iA" colab_type="text"
# Let's build a tensorflow 2 model
# + id="SdNZ1qipIite" colab_type="code" colab={}
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(5, activation='relu'),
tf.keras.layers.Dense(7, activation='relu'),
tf.keras.layers.Dense(6, activation='softmax'),
])
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
# + [markdown] id="s8J_YWSlJ3X7" colab_type="text"
# Train
#
#
# + id="dizXVN6vJ1p9" colab_type="code" outputId="221f7f9f-e98f-4714-a489-a00ba5a26880" colab={"base_uri": "https://localhost:8080/", "height": 731}
model.fit(x_train, y_train, epochs=20, batch_size=32)
# + id="p_OyhLjLLDsr" colab_type="code" outputId="3494e236-229c-4d08-88c3-92e59c5c5d03" colab={"base_uri": "https://localhost:8080/", "height": 51}
model.evaluate(x_test, y_test)
# + id="B_dFHxNhNDB_" colab_type="code" outputId="392c6bc4-1bdf-4eb9-b106-0433229d459e" colab={"base_uri": "https://localhost:8080/", "height": 85}
data_pred = np.array([[255,1,1], [1, 255, 1], [1, 1, 255], [237, 235, 112]])
print(model.predict(data_pred))
# + id="Do6t5Fq0Na8g" colab_type="code" colab={}
| model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>Populate remote grid nodes with labeled tensors </h1>
# In this notebook, we will populate our grid nodes with labeled data so that it will be used later by people interested in train models.
#
# **NOTE:** At the time of running this notebook, we were running the grid components in background mode.
#
# Components:
# - Grid Gateway(http://localhost:8080)
# - Grid Node Bob (http://localhost:3000)
# - Grid Node Alice (http://localhost:3001)
# - Grid Node Bill (http://localhost:3002)
#
# This notebook was made based on <a href="https://github.com/OpenMined/PySyft/blob/dev/examples/tutorials/Part%2010%20-%20Federated%20Learning%20with%20Secure%20Aggregation.ipynb">Part 10: Federated Learning with Encrypted Gradient Aggregation</a> tutorial
# <h2>Import dependencies</h2>
import grid as gr
import syft as sy
import torch
import pickle
import time
# <h2>Setup config</h2>
# Init hook, connect with grid nodes, etc...
# +
hook = sy.TorchHook(torch)
# Connect directly to grid nodes
nodes = [("http://localhost:3000/", "Bob"),
("http://localhost:3001/", "Alice"),
("http://localhost:3002/", "Bill") ]
compute_nodes = []
for i, node in enumerate(nodes):
compute_nodes.append( gr.WebsocketGridClient(hook, node[0], id=node[1]) )
compute_nodes[i].connect()
# -
# <h2>Load dataset</h2>
# Load and prepare the dataset
# +
# Load Data
with open('../dataset/boston_housing.pickle','rb') as f:
((X, y), (X_test, y_test)) = pickle.load(f)
X = torch.from_numpy(X).float()
y = torch.from_numpy(y).float()
# preprocessing
mean = X.mean(0, keepdim=True)
dev = X.std(0, keepdim=True)
mean[:, 3] = 0. # the feature at column 3 is binary,
dev[:, 3] = 1. # so I'd rather not standardize it
X = (X - mean) / dev
# -
# <h2> Split dataset </h2>
# We will split our dataset to send to nodes
datasets = torch.split(X, int(len(X) / len(compute_nodes)), dim=0 ) #tuple of chunks (dataset / number of nodes)
labels = torch.split(y, int(len(X) / len(compute_nodes)), dim=0 ) #tuple of chunks (labels / number of nodes)
# <h2>Tagging tensors</h2>
# +
tag_x = []
tag_y = []
for i in range(len(compute_nodes)):
tag_x.append(datasets[i].tag("#X", "#boston", "#housing").describe("The input datapoints to the boston housing dataset."))
tag_y.append(labels[i].tag("#Y", "#boston", "#housing").describe("Boston Housing labels"))
# -
# <h2>Sending our tensors to grid nodes</h2>
# +
# NOTE: For some reason, there is strange behavior when trying to send within a loop.
# Ex : tag_x[i].send(compute_nodes[i])
# When resolved, this should be updated.
shared_x1 = tag_x[0].send(compute_nodes[0]) # First chunk of dataset to Bob
shared_x2 = tag_x[1].send(compute_nodes[1]) # Second chunk of dataset to Alice
shared_x3 = tag_x[2].send(compute_nodes[2]) # Third chunk of dataset to Bill
shared_y1 = tag_y[0].send(compute_nodes[0]) # First chunk of labels to Bob
shared_y2 = tag_y[1].send(compute_nodes[1]) # Second chunk of labels to Alice
shared_y3 = tag_y[2].send(compute_nodes[2]) # Third chunk of labels to Bill
# -
print("X tensor pointers: ", shared_x1, shared_x2, shared_x3)
print("Y tensor pointers: ", shared_y1, shared_y2, shared_y3)
# <h2>Disconnect nodes</h2>
for i in range(len(compute_nodes)):
compute_nodes[i].disconnect()
| examples/experimental/Fed.Learning [ Part-1 ] - Populate a Grid Network ( Dataset ).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Expectation Maximisation: estimating bias from coin flips
#
# This example is based on the article *[What is the expectation maximization
# algorithm?](https://www.nature.com/nbt/journal/v26/n8/pdf/nbt1406.pdf)* by <NAME>.
# ## Setup
#
# Suppose we have two coins, A and B. Suppose also that we have several sequences of coin flips, each generated by flipping either coin A or coin B some number of times:
# +
# N.B. each coin label in `labels` corresponds to the sequence of flips at the same index in `flip_seqs`
labels = ['B', 'A', 'A', 'B', 'A']
flip_seqs = ['HTHHHTTHHHHTHHHTTHHHHHTHHHHH',
'HHTHHHHHTTTTTTTHHTT',
'HTHHHHTTHHTTTTTTTHTTTTHHTTT',
'HTHTTHHTTHHHHHTHHHHHHHTHHHHHTHHHHHHTTHHHHH',
'THHHTHHHTHTTTTTTTTTT']
# -
# ## Probability of heads
#
# Call the probability of a coin landing heads $\theta$. This parameter may be $0.5$ (a fair coin), or it could be higher (we should see more heads), or lower (we should see fewer heads).
#
# We can estimate this parameter for each coin separately from our data using the formula
#
# $$\theta = \frac{H}{H+T}$$
#
# where $H$ is the total number of heads and $T$ is the total number of tails seen across the flip sequences for each coin. Here's a Python function to do exactly this:
def estimate_theta(H, T):
"""
Fraction of heads given H heads and T tails.
"""
return H / (H + T)
# Very simple. To use it, we just need to count the number of heads and tails across the sequences for each of the labels A and B.
# +
from collections import Counter, defaultdict
count_dict = defaultdict(Counter)
for label, seq in zip(labels, flip_seqs):
count_dict[label] += Counter(seq)
print('Coin A: there were {H} heads and {T} tails across the flip sequences'.format(**count_dict['A']))
print('Coin B: there were {H} heads and {T} tails across the flip sequences'.format(**count_dict['B']))
# -
# So we estimate $\theta_A$ to be:
estimate_theta(**count_dict['A'])
# And we estimate $\theta_B$ to be:
estimate_theta(**count_dict['B'])
# ## Likelihood functions
#
# The values above are the **maximum likelihood** estimates for $\theta_A$ and $\theta_B$. They are the values that are most likely to produce the counts of heads and tails we observed for each coin. They maximise the **likelihood function** for A and for B (no other input value to the function would yield a greater output).
#
# What is a likelihood function? It tells you, given some data and a parameter, how well the parameter fits the data. In other words, how likely the parameter is be "correct".
#
# What is the likelihood function in this coin flip example?
#
# Recall that the [binomial formula](https://en.wikipedia.org/wiki/Binomial_distribution#Probability_mass_function) computes the probability that we have $H$ successes in $H+T$ independent trials, given that the proability of success is $\theta$:
#
# $$p(H,T \mid \theta)=\left({H+T \atop H}\right) \theta^H (1-\theta)^T$$
#
# By [Bayes' theorem](https://en.wikipedia.org/wiki/Bayes%27_theorem) we can also use this formula to compute the probability of $\theta$ given $H$ heads and $T$ tails, i.e. $p(\theta \mid H, T) = p(H,T \mid \theta) \cdot k$.
#
# We're making the assumption here that $k$ is constant because it is reasonable to treat $p(\theta)$ as constant (prior to the experiment we think that one possible value for $\theta$ is just as probable as another).
#
# Unlike a probability function, a likelihood function does not need to produce a value between 0 and 1. All we're interested in here is making it as large as possible for our choice of $\theta$ which means we can just ignore constant multipliers like $k$ and $\left({H+T \atop H}\right)$.
#
# So the likelihood of $\theta$, given we saw $H$ heads and $T$ tails, can just be expressed as
#
# $$L(\theta \mid H, T)=\theta^H (1-\theta)^{T}$$
#
# Here's a Python function to compute the likelihood of $\theta$ given $H$ and $T$:
def likelihood_of_theta(theta, H, T):
"""
Likelihood of theta, given that we saw H heads and T tails.
"""
return theta**H * (1 - theta)**T
# If we plot $L(\theta \mid H, T)$ for all values of $\theta$ between 0 and 1, we can see that $\frac{H}{H+T}$ does indeed give find the value of $\theta$ that produces the maximum likelihood (the dotted vertical line in the plot below):
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
counts = count_dict['A']
H = counts['H']
T = counts['T']
theta = np.linspace(0, 1, 300)
theta_likelihood = likelihood_of_theta(theta, H, T)
# compute maximum likelihood estimate
m = estimate_theta(H, T)
m_likelihood = likelihood_of_theta(m, H, T)
# plot
plt.rcParams['figure.figsize'] = (15, 5)
plt.plot(theta, theta_likelihood, color='b');
plt.vlines(m, 0, m_likelihood, 'b', '--');
plt.title(r'Likelihood of $\theta$ given {} heads and {} tails'.format(H, T), fontsize=17);
plt.ylabel(r'$L(\theta \mid H,T)$', fontsize=15);
plt.xlabel(r'$\theta$', fontsize=15);
# -
# *N.B. curve may no be drawn completely accurately if values for $H$ or $T$ are large, given limited precision of floating-point values.*
# If you want to read more about switching from $p(\theta \mid H, T)$ to $p(H,T \mid \theta)$ (and the connection with the Beta distribution which I did not mention above) you may find the short blog post [Bayesian inference for a binomial proportion](https://stephens999.github.io/fiveMinuteStats/bayes_beta_binomial.html) by <NAME> helpful.
#
# Note: to derive the formula $\frac{H}{H+T}$ analytically, differentiate $L$ with respect to $\theta$ and find the stationary points in terms of $H$ and $T$.
# ## Hidden labels
# Now suppose that we've lost the labels for the coin flips.
#
# In other words, we know that we have two coins and that they have been flipped to produce these sequences of flips:
print(*flip_seqs, sep='\n')
# ...but the label of each coin is now a *hidden variable*.
#
# The function `estimate_theta()` is unable to compute our maximum likelihood estimates for $\theta_A$ and $\theta_B$ because as we do not know the true counts of heads and tails for each coin.
# ## Expectation Maximisation
#
# All is not lost. We can use **Expectation Maximisation** (EM) to estimate the parameter values that are most likely to produce the data we have observed.
#
# This method is not guaranteed to produce the exact same values we'd derive if we *could* see the labels, but it may well get us very close.
#
# There are five steps:
#
# 1. Start with initial estimates of $\theta_A$ and $\theta_B$ (these can be anything from 0 to 1).
# 2. Check how likely each value estimate is to produce each sequence of coin flips (using the likelihood function $L$).
# 3. Use this likelihood to produce a weighting for indicating the probability that each sequence was generated by $\theta_A$ or $\theta_B$. Adjust the counts of heads and tails by multiplying them by the weighting (the **Expectation** step).
#
# 4. Add up the total number of weighted counts for heads and tails across all sequences (call these counts $H'$ and $T'$) for both parameter estimates. Produce new estimates for $\theta_A$ and $\theta_B$ using the maximum likelihood formula $\frac{H'}{H' + T'}$ (the **Maximisation** step).
#
# 5. Repeat steps 2-4 until each parameter estimate has converged, or a set number of iterations has been reached.
# The total weight for each sequence (red weight plus blue weight) should be normalised to 1.
#
# The Python function to re-weight the counts of heads and tails looks like this (e.g. if $H$ is 13 and the weight is 0.75, the expected count is then 9.75):
def expected_counts(counter, weight):
"""
Adjust the counts in the counter dictionary
by multiplying them by a weight value.
N.B. counts may not be integer values after
weighting is applied.
"""
return Counter({k: v*weight for k, v in counter.items()})
# Now we can implement the code for EM, run it, and see what estimates for $\theta_A$ and $\theta_B$ the algorithm finishes with:
# +
# count the number of heads and tails in each sequence of coin flips
counts_for_seqs = [Counter(seq) for seq in flip_seqs]
# 1. Make initial estimates for each parameter
theta_A = 0.51
theta_B = 0.001
ITER = 10 # number of iterations to run EM for
for index in range(0, ITER+1):
print('{}\ttheta_A: {:.5f}\ttheta_B: {:.5f}'.format(index, theta_A, theta_B))
## Expectation step
## ----------------
# 2. How likely are the current estimates of theta to produce H heads and T tails for each sequence?
l_A = [likelihood_of_theta(theta_A, **counts) for counts in counts_for_seqs]
l_B = [likelihood_of_theta(theta_B, **counts) for counts in counts_for_seqs]
# 3. Normalise these likelihoods so that they sum to 1, call them 'weights'
weight_A = [a / (a+b) for a, b in zip(l_A, l_B)]
weight_B = [b / (a+b) for a, b in zip(l_A, l_B)]
# expected counts of heads/tails for sequences of coin flips given weights
exp_counts_A = [expected_counts(counts, w) for counts, w in zip(counts_for_seqs, weight_A)]
exp_counts_B = [expected_counts(counts, w) for counts, w in zip(counts_for_seqs, weight_B)]
## Maximisation step
## -----------------
# 4. Find total number of heads/tails across the sequences of coin flips
total_A = sum(exp_counts_A, Counter())
total_B = sum(exp_counts_B, Counter())
# compute new parameter estimates for theta
theta_A = estimate_theta(**total_A)
theta_B = estimate_theta(**total_B)
# -
# We see that the guesses for each parameter converge quite quickly (using a precision of 5 decimal places). Compare these guesses with the maximum likelihood estimates made when we *did* know the label for each sequence of coin flips:
# +
from IPython.display import Markdown
actual_A = estimate_theta(**count_dict['A'])
actual_B = estimate_theta(**count_dict['B'])
md = r"""
| | Actual | EM estimate |
| ---------- |:---------------| :------------ |
| $\theta_A$ | {actual_A:.5f} | {em_A:.5f} |
| $\theta_B$ | {actual_B:.5f} | {em_B:.5f} |
"""
Markdown(
md.format(
actual_A=actual_A,
actual_B=actual_B,
em_A=theta_A,
em_B=theta_B,
)
)
# -
# You might notice that the estimates for the coins appear to have been switched around. This can depend on the intial guesses for the parameters. EM has no opinion about which coin has which parameter here, just the most likely values of the parameters.
# ## The weight intuition
#
# Now we have seen the algorithm in action, it is reasonable to ask *why* it works. In particular, we might ask why the parameter estimates improve (get closer to the actual maximum likelihood estimate) with each iteration and eventually converge.
#
# In EM, every piece of data (be it a sequence of coin flips, or something else) carries some amount of **weight** during each iteration. That weight represents how much the data fits one current parameter estimate compared to another (the sum of the weights on each piece of data is always 1).
#
# The Expectation step lets us scale how strongly each data point should contribute to the new maximum likelihood estimate (which is computed in the Maximisation step). For example in our case, if we look at a sequence of flips $S_1$ and see its weight for $\theta_A$ is 0.9, then $S_1$ is going to influence the next estimate for $\theta_A$ quite a lot. Conversely, the effect of $S_1$ on $\theta_B$ will be weak since its weight here is just 0.1.
#
# This means that when we sum up the weighted counts across all $S_i$ for a particular value of $\theta_A$, the sequences that are more heavily weighted towards $\theta_A$ will have the effect of pulling the next estimate closer to the maximum likelihood estimate for $\theta_A$.
#
# Then, on the next iteration, we will find that the likelihood that those same sequences were generated by the estimate for $\theta_A$ is even greater, subsequently leading to an *even greater* weight for each of those sequences. Since the total weight can never be greater than 1, we will see a convergence to 1 for sequences that the algorithm decides have been generated by $\theta_A$, and to 0 for those sequences which have not:
weight_A
weight_B
# In turn, this will mean that our estimates for $\theta_A$ and $\theta_B$ converge.
| em-notebook-1.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
library(tidyverse)
library(skimr)
Africa <- read_csv(file = "Africa.csv")
Africa$pos = Africa$Value >= 0
Africa_climate_plot <- Africa %>%
filter( Year >= 1950) %>%
ggplot(aes(
x = Year,
y = Value,
fill = pos)) +
labs(title = "Africa") +
scale_x_continuous(breaks=seq(1950, 2020, 10)) +
scale_y_continuous(breaks=seq(-1, 1.8, 0.2)) +
geom_bar(stat = "identity",position = "identity", colour = "black", size = 0.05) +
xlab("Year") + ylab ("Surface Temperature ( ºC )") +
theme_light()+
theme(plot.title = element_text(hjust = 0.5)) +
scale_fill_manual(values = c("#CCEEFF", "#FFDDDD"), guide = FALSE)
Africa_climate_plot
North_America <- read_csv(file = "North America.csv")
North_America$pos = North_America$Value >= 0
North_America_climate_plot <- North_America %>%
filter( Year >= 1950) %>%
ggplot(aes(
x = Year,
y = Value,
fill = pos)) +
labs(title = "North America") +
scale_x_continuous(breaks=seq(1950, 2020, 10)) +
scale_y_continuous(breaks=seq(-1, 1.8, 0.2)) +
geom_bar(stat = "identity",position = "identity", colour = "black", size = 0.05) +
xlab("Year") + ylab ("Surface Temperature ( ºC )") +
theme_light()+
theme(plot.title = element_text(hjust = 0.5)) +
scale_fill_manual(values = c("#CCEEFF", "#FFDDDD"), guide = FALSE)
North_America_climate_plot
South_America <- read_csv(file = "South America.csv")
South_America$pos = South_America$Value >= 0
South_America_climate_plot <- South_America %>%
filter( Year >= 1950) %>%
ggplot(aes(
x = Year,
y = Value,
fill = pos)) +
labs(title = "South America") +
scale_x_continuous(breaks=seq(1950, 2020, 10)) +
scale_y_continuous(breaks=seq(-1, 1.8, 0.2)) +
geom_bar(stat = "identity",position = "identity", colour = "black", size = 0.05) +
xlab("Year") + ylab ("Surface Temperature ( ºC )") +
theme_light()+
theme(plot.title = element_text(hjust = 0.5)) +
scale_fill_manual(values = c("#CCEEFF", "#FFDDDD"), guide = FALSE)
South_America_climate_plot
Europe <- read_csv(file = "Europe.csv")
Europe$pos = Europe$Value >= 0
Europe_climate_plot <- Europe %>%
filter( Year >= 1950) %>%
ggplot(aes(
x = Year,
y = Value,
fill = pos)) +
labs(title = "Europe") +
scale_x_continuous(breaks=seq(1950, 2020, 10)) +
scale_y_continuous(breaks=seq(-1, 1.8, 0.2)) +
geom_bar(stat = "identity",position = "identity", colour = "black", size = 0.05) +
xlab("Year") + ylab ("Surface Temperature ( ºC )") +
theme_light()+
theme(plot.title = element_text(hjust = 0.5)) +
scale_fill_manual(values = c("#CCEEFF", "#FFDDDD"), guide = FALSE)
Europe_climate_plot
Asia <- read_csv(file = "Asia.csv")
Asia$pos = Asia$Value >= 0
Asia_climate_plot <- Asia %>%
filter( Year >= 1950) %>%
ggplot(aes(
x = Year,
y = Value,
fill = pos)) +
labs(title = "Asia") +
scale_x_continuous(breaks=seq(1950, 2020, 10)) +
scale_y_continuous(breaks=seq(-1, 1.8, 0.2)) +
geom_bar(stat = "identity",position = "identity", colour = "black", size = 0.05) +
xlab("Year") + ylab ("Surface Temperature ( ºC )") +
theme_light()+
theme(plot.title = element_text(hjust = 0.5)) +
scale_fill_manual(values = c("#CCEEFF", "#FFDDDD"), guide = FALSE)
Asia_climate_plot
Oceania <- read_csv(file = "Oceania.csv")
Oceania$pos = Oceania$Value >= 0
Oceania_climate_plot <- Oceania %>%
filter( Year >= 1950) %>%
ggplot(aes(
x = Year,
y = Value,
fill = pos)) +
labs(title = "Oceania") +
scale_x_continuous(breaks=seq(1950, 2020, 10)) +
scale_y_continuous(breaks=seq(-1, 1.8, 0.2)) +
geom_bar(stat = "identity",position = "identity", colour = "black", size = 0.05) +
xlab("Year") + ylab ("Surface Temperature ( ºC )") +
theme_light()+
theme(plot.title = element_text(hjust = 0.5)) +
scale_fill_manual(values = c("#CCEEFF", "#FFDDDD"), guide = FALSE)
Oceania_climate_plot
library(ggpubr)
climate_figure <- ggarrange(Africa_climate_plot, Asia_climate_plot,
Europe_climate_plot, South_America_climate_plot,
North_America_climate_plot, Oceania_climate_plot, ncol = 2, nrow = 3)
climate_figure
ggsave(climate_figure,filename = "climate_figure plot.pdf",width = 12,height = 9)
| chart/climate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Science with Python: Poisson Distribution #6361
# A <b>distribution</b> provides a parameterised mathematical function that we can use to calculate the probability for any observation from the sample space.
# The <b>Poisson distribution</b> is a discrete probability distribution for the counts of events that occur randomly in a given interval of time (or space). In other words, we can say that a Poisson distribution measures how many times an event is likely to occur within “t” period of time. Poisson distribution is a limiting process of the <b>binomial distribution</b>.
#
# Let X = The number of events in a given interval.
# Then, if the average number of events per interval is λ
# In a given interval the probability of observing x events is given by
#
# <img src = "https://anomaly.io/wp-content/uploads/2015/06/poisson-formula.png" width="200"/>
# ## Poisson Distribution: plotting
# +
# Let us import the libraries
from scipy.stats import poisson
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 150, 0.5)
# Poisson Distribution data for y-axis
# We will use scipy’s poisson.pmf method to get the poisson probability mass function(pmf) plot in python .
y = poisson.pmf(x, mu=50, loc=20)
# Plotting the graph
plt.plot(x, y)
plt.title("My Poisson Distribution")
plt.show()
# -
# ## Poisson Distribution : PMF with Different λ
# +
# Importing the required modules
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy.stats import poisson
# Applying the poisson class methods
x = np.arange(0, 20)
pmf_2 = poisson.pmf(x, mu=2)
pmf_4 = poisson.pmf(x, mu=4)
pmf_6 = poisson.pmf(x, mu=6)
pmf_8 = poisson.pmf(x, mu=8)
df = pd.DataFrame({'x': x, 'probability(λ=2)': pmf_2, 'probability(λ=4)': pmf_4,
'probability(λ=6)': pmf_6, 'probability(λ=8)': pmf_8,
})
# Vizualization
plt.figure(figsize=(20,10))
plt.plot(df['x'], df['probability(λ=2)'])
plt.plot(df['x'], df['probability(λ=4)'])
plt.plot(df['x'], df['probability(λ=6)'])
plt.plot(df['x'], df['probability(λ=8)'])
plt.ylabel('Probability')
plt.xlabel('Intervals')
plt.show()
# -
# ## Poisson Distribution : A random example
# Let's just say I am observing cars in a national highway.
#
# Looking at the data of the last few hours, I observe that in average, there are four cars detected in an interval of one minute. So the value 4 could be a good candidate for the parameter of the distribution λ. My goal is to know the probability that a specific number of cars will be seen in the next minute.
#
# Let’s implement the Poisson distribution function with the help of the formula we learnt above:
def poisson_distribution(k, lambd):
return (lambd ** k * np.exp(-lambd)) / np.math.factorial(k)
# +
# For instance, the probability of observing 5 cars in the next minute would be:
poisson_distribution(k=5, lambd=4)
# +
# Alternative method
from scipy.stats import poisson
poisson.pmf(5, 4)
# +
#Let’s plot the distribution for various values of k:
lambd=4
k_axis = np.arange(0, 20)
distribution = np.zeros(k_axis.shape[0])
for i in range(k_axis.shape[0]):
distribution[i] = poisson.pmf(i, lambd)
plt.bar(k_axis, distribution)
plt.title("Poisson distribution for λ=4")
plt.xlabel("k_values")
plt.ylabel("Probability")
# -
# ## Poisson Distribution : Another example
#
# Let's just say I am counting errors of a skilled typist.
#
# Looking at the data of the last few days, I observe that in average, there are two errors detected in an interval of one hour. So the value 2 could be a good candidate for the parameter of the distribution λ. My goal is to know the probability that 5 errors will be seen in the next hour.
from scipy.stats import poisson
poisson.pmf(5, 2)
# +
#Let’s plot the distribution for various values of k:
lambd=2
k_axis = np.arange(0, 15)
distribution = np.zeros(k_axis.shape[0])
for i in range(k_axis.shape[0]):
distribution[i] = poisson.pmf(i, lambd)
plt.bar(k_axis, distribution)
plt.title("Poisson distribution for λ=2")
plt.xlabel("k_values")
plt.ylabel("Probability")
# -
# There are many applications of the Poisson distribution in many fields. In Data Science and Machine Learning, the Poisson Distribution is used in various probabilistic models. If the random variable represents a count, we can use the Poisson Distribution to model it. In real-world applications, these models are used to predicting or simulate complex systems.
| Datascience_With_Python/Statistics/Tutorials/Poisson Distribution/poisson_distribution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Momentum Bar Charts:
#
# ### PH211
#
# ##### Why?
#
# Momentum bar charts are analogous to energy bar charts as a tool for tracking term in our conservation laws. In some ways momentum bar charts are a little less complex since all of the bars represent the same calculation $\bar{p} = m\bar{v}$ although there is the issue of tracking components in whatever coordinate system we are using. This noteboook is a modification of the energy bar chart notebook
#
#
# ### Libraries
#
# There are a number of different widget libraries. In the end the ipywidgets was most adaptable to my purposes. I suspect this would change if I were seeking to build this tool as a webpage. References that I used in sorting this all out are given in my [InteractiveStudy notebook](https://github.com/smithrockmaker/ENGR212/blob/main/InteractiveStudy.ipynbhttps://github.com/smithrockmaker/ENGR212/blob/main/InteractiveStudy.ipynb). At the moment (2/21) this is miserably documented but the references contained therein are much better if they are still live.
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
from IPython.display import display
from ipywidgets import interact, interactive, fixed, interact_manual, Layout
# ### Setting Up the Bar Graph
#
# This is where the decisions about how many bars and how they will be labelled are made. In the end I opted to create an enormous text str to label the bars which the barLabels. The locate and locateShift lists articulate x values (locations) for each of the bar. This involves skipping values to leave space for the vertical dividers that help this all make sense to me conceptually.
#
# +
# set up locations for bars and other objects
# start with how objects and assume possibility of 2D
numObjects = 2
xBars0 = numObjects
xBarsf = numObjects
# formatting - 3 dividers, x0; xf; netx
numDividers = 3
# total number of bars that are interactive. Gaps and other spacing issues handled at end of cell
# last 2 are the netx and net y bars
Nbase = xBars0 + xBarsf + numDividers + 1
locate = np.arange(Nbase)
# shifted locations for labels
locateShift = locate - 0.4
# the x locations for the groups
# Having them in separate lists allows me to choose different colors for each section
# of the bar graph more easily (without creating a color list that I need to edit)
x0Loc = locate[0:xBars0]
xfLoc = locate[xBars0 + 1:xBars0 + xBarsf + 1]
vlineLoc = [xBars0, (xBars0 + xBarsf + 1)]
netLoc = locate[Nbase - 2]
# check alignments -- I had a lot of trouble making sure that everything lined up
# appropriately. These are diagnostic print statements to be sure I'm visualizing
# the bar and divider locations correctly.
print("x0 Bars:",x0Loc)
#print("y0 Bars:",y0Loc)
print("xf Bars:",xfLoc)
#print("yf Bars:",yfLoc)
print("Net Bars:",netLoc)
print("locate:",locate)
# Structure bar width - this is a proportional value apparently
# it scales with plot figure size.
width = 0.4
# bar labels
labelx10 = 'p10x' # initial
labelx20 = 'p20x' # initial
labely10 = 'p10y' # initial
labely20 = 'p10y' # unknown source of energy initial
labelx1f = 'p1fx' # initial
labelx2f = 'p2fx' # initial
labely1f = 'p1fy' # initial
labely2f = 'p2fy' # initial
labelNetX = 'netX' # final
labelNetY = 'netY' # final
vertBar = ''
lSpace = ' '
lScale = 7
# assemble labels for each section. Spacing is most easily adjusted using the lScale variabkel above
initialLabels = labelx10 + (lScale)*lSpace + labelx20 + (lScale)*lSpace
finalLabels = labelx1f + lScale*lSpace + labelx2f + (lScale)*lSpace
netLabels = labelNetX
vertLabel = vertBar
# put it all together for labels
barLabels = initialLabels + 2*lScale*lSpace + finalLabels + 2*lScale*lSpace + netLabels + 2*lScale*lSpace
# check the label string if needed.
print("barlabels:", barLabels)
# -
# ### Energy Bar Graph Function
#
# This may not be the only or best way to do this but eventually it seemed easiest given my experience or lack of it. I tested everything using fixed values for the bars (you can see this in early version of this notebook). Because I decided I wanted to update the values of each bar on the plot I also needed to generate a dynamic text string that depended on the bar values passed to the plotting function. barValues represents this aspect of the plot.
#
# The plot scales vertically relatively smoothly. It will **NOT** scale horizontally since the text strings probably won't follow the bars properly. I can imagine how to sort that out but it's not important enough to take that time at this point. Very basic intro to bar plots is linked below.
#
# [pyplot.bar documentation](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.bar.html)[
#
def momentumBar(p10x, p20x, p1fx, p2fx):
# create array of bar heights (energy)
x0Heights = [p10x, p20x]
xfHeights = [p1fx, p2fx]
netxMomentum = (p10x + p20x) - (p1fx + p2fx)
netHeights = [netxMomentum]
# truncate current bar values and create value array to display current value under each bar
# for creating text string for labels
sLabel = ' '
sScale = 17
sOffset = 10
# initial values for object 1 and 2
p10xVal = str(np.trunc(p10x))
p20xVal = str(np.trunc(p20x))
initialValues =p10xVal + (sScale)*sLabel + p20xVal + (sScale)*sLabel
# final values for object 1 and 2
p1fxVal = str(np.trunc(p1fx))
p2fxVal = str(np.trunc(p2fx))
finalValues =p1fxVal + (sScale)*sLabel + p2fxVal + (sScale)*sLabel
# net value
netValue = str(np.trunc(netxMomentum))
# current value string
barValues = initialValues + (sScale+sOffset)*sLabel + finalValues + (sScale+sOffset)*sLabel + netValue
# determine plot max/min
initMax = np.max(x0Heights)
finalMax = np.max(xfHeights)
# include 10 as a lower limit on the top of plot
collectMax = [initMax,finalMax, 10]
globalMax = 1.1*np.max(collectMax)
initMin = np.min(x0Heights)
finalMin = np.min(xfHeights)
collectMin = [initMin,finalMin, -5.]
globalMin = 1.1*np.min(collectMin)
# choose y limits from min/max
if np.abs(globalMin) < globalMax:
yLim = globalMax
else:
yLim = np.abs(globalMin)
# create the plot
fig1, ax1 = plt.subplots()
# bar graph sections
ax1.bar(x0Loc,
x0Heights,
width,
color = 'red',
label= 'initial momentum',
alpha = 0.4)
ax1.bar(xfLoc,
xfHeights,
width,
color = 'blue',
label= 'final momentum',
alpha = 0.4)
ax1.bar(netLoc,
netHeights,
width,
color = 'green',
label= 'net momentum',
alpha = 0.4)
# dividing lines
ax1.vlines(vlineLoc, -.95*yLim, .95*yLim, linestyles= 'dashed', color = 'navy')
# limits of plot
plt.xlim(-1, Nbase)
plt.ylim(-yLim, yLim)
# turn on plot grid
ax1.grid()
# labeling stuff
#ax1.tick_params(axis="x",direction="in", pad=-200)
#plt.xticks(locateShift, barLabels, fontsize = 12)
plt.text(-.5, -.1*yLim, barLabels)
plt.text(-.5, -.2*yLim, barValues)
#ax1.tick_params(axis="x",direction="in", pad=-170)
#plt.xticks(locate, barLabels, fontsize = 12)
# axis labels
# currently forcing plt.legend to put legend top right for consistency
plt.xlabel('momentum by object', fontsize = 20)
plt.ylabel('momentum', fontsize = 20)
plt.title('Momentum Bar Chart', fontsize = 20)
plt.legend(loc = 1)
# Set the size of my plot for better visibility
fig1.set_size_inches(12, 6)
#fig.savefig("myplot.png")
plt.show()
# ### Setting up widgets and interactivity
#
# Once the active function is defined then we define the interactive widgets which are mostly sliders for visual connection to the bar graph. In hindsight I might have done well to make the sliders vertical so they move in the same direction as the bars but hey .... got to save something for a rainy day.
#
# The cap# variables are strings for labeling the different sections of the slider array. Hbox and VBox are used to lay out the panel. Last two lines pull the trigger and set up the interactivity.
# +
# Set up widgetsm - captions
cap1 = widgets.Label(value=' Initial Momentum (x)')
cap2 = widgets.Label(value=' Final Momentum (x)')
cap3 = widgets.Label(value=' Net Momentum (x)')
cap4 = widgets.Label(value='Object 1:')
cap5 = widgets.Label(value='Object 2:')
# initial momentum (x) sliders
p10x=widgets.FloatText(min=-100, max=100, value=.0, description = 'Initial px',continuous_update=False,
layout=Layout(width='60%'))
p20x=widgets.FloatText(min=-100, max=100, value=.0, description = 'Initial px',continuous_update=False,
layout=Layout(width='60%'))
# initial momentum (x) sliders
p1fx=widgets.FloatText(min=-100, max=100, value=.0, description = 'Final px',continuous_update=False,
layout=Layout(width='60%'))
p2fx=widgets.FloatText(min=-100, max=100, value=.0, description = 'Final px',continuous_update=False,
layout=Layout(width='60%'))
# An HBox lays out its children horizontally, VBox lays them out vertically
col1 = widgets.VBox([cap1, cap4, p10x, cap5, p20x])
col2 = widgets.VBox([cap2, cap4, p1fx, cap5, p2fx])
col3 = widgets.VBox([cap3])
panel = widgets.HBox([col1, col2, col3])
out = widgets.interactive_output(momentumBar, {'p10x': p10x, 'p20x': p20x,
'p1fx': p1fx, 'p2fx': p2fx})
display(out, panel)
# -
| MomentumBarGraph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.3 64-bit (''base'': conda)'
# language: python
# name: python38364bitbasecondae7cd72b7144542bdae788b1dbf27e222
# ---
# +
#convert
# -
# # babilim.model.layers.pooling
#
# > Pooling operations.
#export
from babilim.core.annotations import RunOnlyOnce
from babilim.core.module_native import ModuleNative
#export
class MaxPooling1D(ModuleNative):
def __init__(self, pool_size=2, stride=None):
"""
A N max pooling layer.
Computes the max of a N region with stride S.
This divides the feature map size by S.
:param pool_size: Size of the region over which is pooled.
:param stride: The stride defines how the top left corner of the pooling moves across the image. If None then it is same to pool_size resulting in zero overlap between pooled regions.
"""
super().__init__()
self.pool_size = pool_size
self.stride = stride
if self.stride is None:
self.stride = self.pool_size
@RunOnlyOnce
def _build_pytorch(self, features):
pass
def _call_pytorch(self, features):
from torch.nn.functional import max_pool1d as _MaxPooling1D
return _MaxPooling1D(features, self.pool_size, stride=self.stride)
@RunOnlyOnce
def _build_tf(self, features):
from tensorflow.keras.layers import MaxPooling1D as MaxPooling1D
self.pool = _MaxPooling1D(pool_size=self.pool_size, strides=self.stride)
def _call_tf(self, features):
return self.pool(features)
# + tags=[]
from babilim.core.tensor import Tensor
import numpy as np
max_pool_1d = MaxPooling1D()
tensor = Tensor(data=np.zeros((10,8,16)), trainable=False)
print(tensor.shape)
result = max_pool_1d(tensor)
print(result.shape)
# -
#export
class MaxPooling2D(ModuleNative):
def __init__(self, pool_size=(2, 2), stride=None):
"""
A NxN max pooling layer.
Computes the max of a NxN region with stride S.
This divides the feature map size by S.
:param pool_size: Size of the region over which is pooled.
:param stride: The stride defines how the top left corner of the pooling moves across the image. If None then it is same to pool_size resulting in zero overlap between pooled regions.
"""
super().__init__()
self.pool_size = pool_size
self.stride = stride
if self.stride is None:
self.stride = self.pool_size
@RunOnlyOnce
def _build_pytorch(self, features):
pass
def _call_pytorch(self, features):
from torch.nn.functional import max_pool2d as _MaxPooling2D
return _MaxPooling2D(features, self.pool_size, stride=self.stride)
@RunOnlyOnce
def _build_tf(self, features):
from tensorflow.keras.layers import MaxPooling2D as MaxPooling2D
self.pool = _MaxPooling2D(pool_size=self.pool_size, strides=self.stride)
def _call_tf(self, features):
return self.pool(features)
# + tags=[]
from babilim.core.tensor import Tensor
import numpy as np
max_pool_2d = MaxPooling2D()
tensor = Tensor(data=np.zeros((10,8,16,32)), trainable=False)
print(tensor.shape)
result = max_pool_2d(tensor)
print(result.shape)
# -
#export
class GlobalAveragePooling1D(ModuleNative):
def __init__(self):
"""
A global average pooling layer.
This computes the global average in N dimension (B, N, C), so that the result is of shape (B, C).
"""
super().__init__()
@RunOnlyOnce
def _build_pytorch(self, features):
from babilim.model.layers.flatten import Flatten
self.flatten = Flatten()
def _call_pytorch(self, features):
from torch.nn.functional import avg_pool1d as _AveragePooling1D
return self.flatten(_AveragePooling1D(features, features.size()[2:]))
@RunOnlyOnce
def _build_tf(self, features):
from tensorflow.keras.layers import GlobalAveragePooling1D as _GlobalAveragePooling1D
self.pool = _GlobalAveragePooling1D()
def _call_tf(self, features):
return self.pool(features)
# + tags=[]
from babilim.core.tensor import Tensor
import numpy as np
global_avg_pool_1d = GlobalAveragePooling1D()
tensor = Tensor(data=np.zeros((10,8,5)), trainable=False)
print(tensor.shape)
result = global_avg_pool_1d(tensor)
print(result.shape)
# -
#export
class GlobalAveragePooling2D(ModuleNative):
def __init__(self):
"""
A global average pooling layer.
This computes the global average in W, H dimension, so that the result is of shape (B, C).
"""
super().__init__()
@RunOnlyOnce
def _build_pytorch(self, features):
from babilim.model.layers.flatten import Flatten
self.flatten = Flatten()
def _call_pytorch(self, features):
from torch.nn.functional import avg_pool2d as _AveragePooling2D
return self.flatten(_AveragePooling2D(features, features.size()[2:]))
@RunOnlyOnce
def _build_tf(self, features):
from tensorflow.keras.layers import GlobalAveragePooling2D as _GlobalAveragePooling2D
self.pool = _GlobalAveragePooling2D()
def _call_tf(self, features):
return self.pool(features)
# + tags=[]
from babilim.core.tensor import Tensor
import numpy as np
global_avg_pool_2d = GlobalAveragePooling2D()
tensor = Tensor(data=np.zeros((10,8,5,3)), trainable=False)
print(tensor.shape)
result = global_avg_pool_2d(tensor)
print(result.shape)
| babilim/model/layers/pooling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploratory Analysis
#
# **Sample Scenario**
#
# We would like to be able to use attributes of customers to estimate their spending score.
# In doing so, we can target those customers that are likely to be most profitable.
#
# Our target variable is spending_score.
# Currently the only customer data we have available to use in this project is age, annual_income and gender.
# It is possible we may not have enough information to build a valuable model. If not, maybe we could do some unsupervised learning, and find clusters of similar customers using all of the variables (including spending_score) and that could help us with a starting point for our targeted marketing.
# ## Wrangle Data
# +
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
# Wrangling
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
# Statistical Tests
import scipy.stats as stats
# Visualizing
import matplotlib.pyplot as plt
from matplotlib import cm
import seaborn as sns
from sklearn.model_selection import learning_curve
import wrangle
# -
min_max_scaler, train, validate, test = wrangle.wrangle_mall_df()
df = train
df.head()
df.describe().T
# NB. We'll explore unscaled, but scale before modeling.
# ## Answer Questions
# ### What is the distribution of each of the variables?
# +
fig, axs = plt.subplots(1, 3, figsize=(15, 7))
for ax, col in zip(axs, df.select_dtypes('number')):
df[col].plot.hist(ax=ax, title=col, ec='black')
# -
df.is_male.value_counts().plot.barh()
plt.title('Distribution of Males (1) to Females (0)')
plt.ylabel('is_male')
plt.xlabel('Count')
plt.show()
# **Takeaways:**
#
# - spending score is ~ normal
# - age + annual income have a long tail on the right -- i.e. they are *right skewed*
# - more female observations than male
# ### Does spending score differ across gender?
#
# 1. Viz gender against spending score
# 1. Stats test to confirm
sns.violinplot(data=df, y='spending_score', x='is_male')
plt.title('Violin distribution of gender and spending score')
plt.show()
sns.boxplot(data=df, y='spending_score', x='is_male')
plt.title("Is there a difference in spending score for male vs. female customers?")
plt.show()
sns.barplot(data=df, y='spending_score', x='is_male')
plt.title("Is there a difference in spending score between males and females?")
plt.show()
# **Takeaways:**
#
# - Seems like there's not much difference in spending score.
# T-test:
#
# - one-tailed or two-tailed? 2 tailed b/c we are looking for *any* difference in means
# - one-sample or two-sample? 2 sample b/c we're looking at the average spending score of 2 separate samples
#
# **Levene's Test**: test for equal variance
#
# - $H_0$: there is no difference in spending score variance between the two samples
# - $H_a$: there is a difference in spending score variance between the two samples
stats.levene(
df.query('is_male == 1').spending_score,
df.query('is_male == 0').spending_score,
)
# A high pvalue (0.14) means we **fail to reject** the null hypothesis.
stats.ttest_ind(
df.query('is_male == 1').spending_score,
df.query('is_male == 0').spending_score,
equal_var=True,
)
# We conclude there is no significant difference in spending score between Males and Females (p = 0.755).
#
# What does `.query()` do?
df.query('is_male == 1').head()
df[df.is_male == 1].head()
# **Conclusion:**
# ### Is there a relationship between spending score and annual income?
#
# 1. Viz annual_income by spending_score
# 2. Spearman's test if we want to confirm correlation (pearson's assumes normally distributed vars)
df.plot.scatter(
x="annual_income",
y="spending_score",
title='Is there a relationship between annual income and spending score?',
figsize=(8, 6),
)
plt.show()
# **Conclusion:**
#
# - not a linear relationship
# - looks like an "X"
# - looks like there might be clusters, the middle is very dense, the corners not so much
df.head()
# Is there a relationship between age and spending score?
#
# 1. Viz age by spending_score.
# 1. Create age bins and compare
df.plot.scatter(y='spending_score', x='age', title='Is there a relationship between age and spending score?', figsize=(13, 8))
plt.show()
# **Takeaways:**
#
# - spending score trails off for older individuals
# - younger folks seem to have higher spending scores
# - after age ~ 40, max(spending score) decreases
# #### How does `pd.cut()` work?
#
# Suppose that we had an array:
x = pd.Series(range(1, 11))
x
# We can pass that array to `pd.cut()` and define the bins with a list of values:
pd.cut(x, bins=[0, 7, 9, 11])
# When working with DataFrames, we can create a bin column quite easily using `pd.cut()`
df['age_bin'] = pd.cut(df.age, [0, 40, 80])
df.head()
# Let's take a look at the distribution of those bins:
df.age_bin.value_counts().plot.barh(title='Distribution of age_bin')
# Looks like there were not any observations with an age above 80:
df.age.describe()
# Let's create a simple boxplot to visualize how spending score compares between these age bins:
sns.boxplot(data=df, y='spending_score', x='age_bin')
plt.show()
# +
#H0: Variances are equal
#Ha: Variances are not equal
stats.levene(
df[df.age <= 40].spending_score,
df[df.age > 40].spending_score,
)
# -
# **Conclusion:**
#
# Our p-value is less than our alpha, so we reject the null hypothesis that the variance in spending score is the same for folks less than or equal to 40 and above 40.
#
# (i.e. the variance is not equal)
stats.ttest_ind(
df[df.age <= 40].spending_score,
df[df.age > 40].spending_score,
equal_var=False,
)
# **Conclusion:**
#
# We reject the null hypothesis that the average spending score for folks less than or equal to 40 is the same as the average spending score for folks over 40.
df.head()
# Note that when we used `pd.cut()` we created a category object. This dtype follows its own set of rules.
df.dtypes
# The values shown are not just strings. If we try to match using a string value, its not going to find it:
df[df.age_bin == '(0, 40]']
# Instead we have to use `pd.Interval()`:
df[df.age_bin == pd.Interval(0, 40)]
# But if we still have access to the original unbinned data, then its probably just more straightforward to use that:
df[df.age <= 40]
# ### If we control for age, does spending score differ across annual income?
#
# 1. Viz annual income by spending score for each age bin
# 2. Analyze each age bin separately
sns.relplot(data=df, y='spending_score', x='annual_income', col='age_bin')
# **Takeaways:**
#
# - 0 through 40 group still has an x-shape
# - 40+ crowd is just the bottom half of the x
sns.relplot(
data=df,
y='spending_score',
x='annual_income',
col=pd.cut(df.age, bins=[0, 30, 40, 80]),
)
plt.suptitle("Do the different decades account for the upper vs lower extremes?")
plt.tight_layout()
# **Takeaways:**
#
# - 30-40 *almost* looks linear
# - 0-30 *almost* looks like a negative linear relationship
# - 40-80 looks quadratic
df.head()
# ### Does gender explain the high income-low spending score and/or low income-low spending score in the under 40 group?
sns.scatterplot(
x='annual_income',
y='spending_score',
data=df[df.age <= 40],
hue='is_male',
)
plt.title("Does gender acccount for upper vs lower\nin the younger age group?")
# Aside: scatterplot vs relplot
#
# - scatter plot works with axes
# - relplot works with figures
#
# ---
#
# - seaborn is built on top of matplotlib
# - every matplotlib axis lives within a figure
# - a figure can have 1 or more axes in it (2+ is when we have subplots)
df.age_bin.value_counts()
sns.relplot(
x='annual_income',
y='spending_score',
data=df,
hue='is_male',
col='age_bin',
)
print("Does gender acccount for upper vs lower in the younger age group?\n")
plt.show()
# **Takeaways:**
#
# - gender isn't terribly informative in this context
df.head()
# ### If we control for annual income, does spending score differ across age?
#
# Because of the shape of annual income with spending score, I will create 3 bins of income: \[0, 40), \[40, 70), \[70, 140\].
ax = df.annual_income.plot.hist()
ax.axvline(40, color='black')
ax.axvline(70, color='black')
ax = df.annual_income.plot.hist()
ax.axvline(40, color='black')
ax.axvline(80, color='black')
df.head()
# +
df['income_bin'] = pd.cut(df.annual_income, [0, 50, 80, 140])
plt.figure(figsize=(13, 7))
sns.scatterplot(
x='age',
y='spending_score',
data=df,
hue='income_bin',
)
plt.title("How does age compare to spending score\nwithin each income bin?")
# -
# Middle-income customers have consistent and modest score, regardless of age
# +
df['income_bin'] = pd.cut(df.annual_income, [0, 40, 70, 140])
plt.figure(figsize=(13, 7))
sns.scatterplot(
x='age',
y='spending_score',
data=df,
hue='income_bin',
)
plt.title("How does age compare to spending score\nwithin each income bin?")
# -
# **Takeaways:**
# ## Summary
# - annual income and spending score are good candidates for clustering
# - older folks past some cutoff (40+) tend to not high values for spending score
# - theres a good number of younger folks (30-) with low incomes and high spending scores
# - gender didn't really seem to have an impact
| clustering_explore_lesson.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="zAYzZXMyCYQx"
# Install pyspark
# ! pip install --ignore-installed pyspark
# Install Spark NLP
# ! pip install --ignore-installed spark-nlp
# + colab={"base_uri": "https://localhost:8080/"} id="mxJniPtV_gqj" outputId="069698a8-614a-457e-a1b1-dbfd7dc43319"
import sparknlp
from sparknlp.base import *
from sparknlp.annotator import *
from pyspark.sql import SparkSession
print("Spark NLP version", sparknlp.version())
# + [markdown] id="qceFcfEhr9r5"
# To use Merge Entities parameter we need to set allowSparkContext parameter to true
# + colab={"base_uri": "https://localhost:8080/"} id="luNlbsk1AJqP" outputId="133d06b8-b5a9-4075-d140-d3e9e926f9de"
from pyspark.sql.types import StringType
text = ['<NAME> is a nice lad and lives in New York']
data_set = spark.createDataFrame(text, StringType()).toDF("text")
data_set.show(truncate=False)
# + [markdown] id="HSvNig972xXC"
# # Graph Extraction
# + [markdown] id="QkW7uQ4_cqAQ"
# Graph Extraction will use pretrained POS, Dependency Parser and Typed Dependency Parser annotators when the pipeline does not have those defined
# + colab={"base_uri": "https://localhost:8080/"} id="VVFs6NDBlWsN" outputId="34c81b25-024d-4ef2-cf10-1764665143a4"
document_assembler = DocumentAssembler().setInputCol("text").setOutputCol("document")
tokenizer = Tokenizer().setInputCols(["document"]).setOutputCol("token")
word_embeddings = WordEmbeddingsModel.pretrained() \
.setInputCols(["document", "token"]) \
.setOutputCol("embeddings")
ner_tagger = NerDLModel.pretrained() \
.setInputCols(["document", "token", "embeddings"]) \
.setOutputCol("ner")
# + [markdown] id="HEJRu8qXg3SI"
# To instruct Graph Extraction to use automatically pretrained POS, Dependency and Typed Dependency Parser annotator, we need to set MergeEntities parameter to True. This parameter will merge neighbor tagging entities into one. e.g. <NAME> will be consider a single token, before sending it to Dependency Parsers annotators.
# -
# In this sentence, we can extract paths for the following pair of tokens-ENTITIES:
# * lad-PER, will output the path between *lad* and <NAME>
# * lad-LOC, will output the path between *lad* and New York
#
# Any other pair of token,ENTITY will output an empty path since there is no path between them. You can visualize the dependency tree for this sentence using [sparknlp display package](https://github.com/JohnSnowLabs/spark-nlp-display)
# + id="XxqysCFDg1aP"
graph_extraction = GraphExtraction() \
.setInputCols(["document", "token", "ner"]) \
.setOutputCol("graph") \
.setRelationshipTypes(["lad-PER", "lad-LOC"]) \
.setMergeEntities(True)
# + [markdown] id="fEpjj9shlKMP"
# Under the hood it uses default pretrained annotators, but we can set any pretrained model with the parameters *setPosModel*, *setDependencyParserModel* or *setTypedDependencyParserModel*
# + [markdown] id="0Dms9keFa7K0"
# Unlike [this notebook](https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/feature/graph-extraction-tutorial/jupyter/annotation/english/graph-extraction/graph_extraction.ipynb), the pipeline below just has graph extraction + NER + tokenizer annotators
# + id="LRpKY22pAqlL"
graph_pipeline = Pipeline().setStages([document_assembler, tokenizer,
word_embeddings, ner_tagger,
graph_extraction])
# + [markdown] id="lJV6x-Nqw442"
# The result dataset has a *graph* column with the paths between lad,PER and lad-LOC
# + colab={"base_uri": "https://localhost:8080/"} id="Kh78KBe-63Dn" outputId="17dbc439-dae7-412f-9f52-ac61ece64025"
graph_data_set = graph_pipeline.fit(data_set).transform(data_set)
graph_data_set.select("graph").show(truncate=False)
| jupyter/annotation/english/graph-extraction/graph_extraction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pyspark.sql import SparkSession
from pyspark.sql import functions as sf
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.feature import HashingTF, IDF, StopWordsRemover,RegexTokenizer
from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler, IndexToString, StandardScaler
import shutil
import os
from pyspark.ml.stat import Correlation
import pandas as pd
import seaborn as sns
# +
#spark.stop()
# -
#open Spark Session
spark = SparkSession.builder.appName('prices_houses').master("local[*]").getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
#spark.sparkContext.addPyFile("sparkxgb.zip")
#read source I remove last 10 entries for predictions
data=spark.read.csv("train.csv", inferSchema=True,sep=',',header=True)
datatest=spark.read.csv("test.csv", schema=data.schema,sep=',',header=True)
len(data.columns),len(datatest.columns)
columnListS = [item[0] for item in data.dtypes if item[1].startswith('string')]
columnListI = [item[0] for item in data.dtypes if item[1].startswith('int')]
columnListS_T = [item[0] for item in datatest.dtypes if item[1].startswith('string')]
columnListI_T = [item[0] for item in datatest.dtypes if item[1].startswith('int')]
# +
#datatest=datatest.fillna("NO_Value",subset=columnListS_T)
#datatest=datatest.fillna(0,subset=columnListI_T)
#data=data.fillna("NO_Value",subset=columnListS)
# -
features=columnListI[1:34]
featurest=columnListI_T[1:34]
# +
lr_data = data.select(sf.col("SalePrice").alias("label"), *columnListI).dropna()
columns = lr_data.columns
# -
lr_datat = datatest.select(sf.col("SalePrice").alias("label"), *columnListI_T)
columnst = lr_datat.columns
lr_datat=lr_datat.fillna(0)
lr_datat.show(n=1, truncate=False)
resumen2=lr_data.toPandas()
resumen2.describe().transpose()
corr=resumen2.corr()
# +
cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)
def magnify():
return [dict(selector="th",
props=[("font-size", "7pt")]),
dict(selector="td",
props=[('padding', "0em 0em")]),
dict(selector="th:hover",
props=[("font-size", "12pt")]),
dict(selector="tr:hover td:hover",
props=[('max-width', '200px'),
('font-size', '12pt')])
]
# -
corr.style.background_gradient(cmap, axis=1)\
.set_properties(**{'max-width': '80px', 'font-size': '10pt'})\
.set_caption("Hover to magify")\
.set_precision(2)\
.set_table_styles(magnify())
features_extra=['LotArea','OverallQual','YearBuilt','YearRemodAdd','BsmtFinSF1','TotalBsmtSF','1stFlrSF',\
'2ndFlrSF','GrLivArea','BsmtFullBath','FullBath','HalfBath','TotRmsAbvGrd','Fireplaces','GarageCars','GarageArea']
# +
vector = VectorAssembler(inputCols=columns, outputCol="features")
scaler = StandardScaler(withMean = True, withStd = True,inputCol="features", outputCol="scaled_features")
stages = [vector, scaler]
pipe = Pipeline(stages=stages)
# we'll be using this data frame
data_out = pipe.fit(lr_data).transform(lr_data).select('id','scaled_features','label')
# +
#lr_datat TEST DATA
vector2 = VectorAssembler(inputCols=columnst, outputCol="features")
scaler2 = StandardScaler(withMean = True, withStd = True,inputCol="features", outputCol="scaled_features")
stages = [vector2, scaler2]
pipe2 = Pipeline(stages=stages)
# we'll be using this data frame
data_out_t = pipe2.fit(lr_datat).transform(lr_datat).select('id','scaled_features','label')
# -
data_out=data_out.withColumnRenamed('scaled_features','features')
data_out_t=data_out_t.withColumnRenamed('scaled_features','features')
train_data,test_data = data_out.randomSplit([0.7,0.3])
data_out.show()
from pyspark.ml.regression import LinearRegression, LinearRegressionModel
from pyspark.ml.evaluation import RegressionEvaluator
lr = LinearRegression()
lrModel = lr.fit(train_data)
# Print the coefficients and intercept for linear regression
print("Coefficients: {} Intercept: {}".format(lrModel.coefficients,lrModel.intercept))
test_results = lrModel.evaluate(test_data)
print("RMSE: {}".format(test_results.rootMeanSquaredError))
print("MSE: {}".format(test_results.meanSquaredError))
print("R2: {}".format(test_results.r2))
test_kagle=lrModel.evaluate(data_out_t)
from pyspark.ml.regression import GBTRegressor
from pyspark.ml.evaluation import RegressionEvaluator
gbt = GBTRegressor()
model = gbt.fit(train_data)
predictions = model.transform(test_data)
# Select (prediction, true label) and compute test error
evaluator = RegressionEvaluator(
labelCol="label", predictionCol="prediction", metricName="r2")
rmse = evaluator.evaluate(predictions)
print("Root Mean Squared Error (RMSE) on test data = %g" % rmse)
predictions2 = model.transform(data_out_t)
output=predictions2.select('id','prediction')
output.write.csv('output2.csv')
from pyspark.ml.regression import RandomForestRegressor
from pyspark.ml.evaluation import RegressionEvaluator
rf = RandomForestRegressor()
modelrf = rf.fit(train_data)
predictionsrf = modelrf.transform(test_data)
# Select (prediction, true label) and compute test error
evaluator = RegressionEvaluator(
labelCol="label", predictionCol="prediction", metricName="r2")
rmse = evaluator.evaluate(predictionsrf)
print("Root Mean Squared Error (RMSE) on test data = %g" % rmse)
predictions3 = modelrf.transform(data_out_t)
output=predictions3.select('id','prediction')
output.write.csv('output3.csv')
| prices_houses/Prices Houses Spark V2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <div id="toc_1" data-value="Inital code"></div>
# # Scatterplot Animation using Matplotlib, day 2
# We will animate a scatterplot in today's tutorial.
# Lets get right into it.
# ### Import all the necessary modules.
# +
import numpy as np
np.random.seed(2021)
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors
plt.style.use('ggplot')
import matplotlib.animation as animation # for the animation
from IPython.display import HTML # To display the animation in jupter notebook
# -
# ### Prepare some random data
# The majority of data you will animate is either **Time Series** or data generated through some iterative process.
# So one axis/ dimension of your data should contain the time / iterative feature.
# **Lineplot example:**
# <p><center>Price of an item (You can represent the time as integer)</center></p>
#
# | Time(X)| Price(Y)|
# | --- | --- |
# | 2021-10-28 | 12.5 |
# | 2021-10-29 | 15 |
# | 2021-10-30 | 17.8 |
#
#
# **Scatterplot example:**
# <p><center>Coordinates of a car at each discrete step</center></p>
#
# | Step| X | Y |
# | --- | --- | --- |
# | 0 | 0 | 0 |
# | 1 | 1 | 0 |
# | 2 | 1 | 1 |
#
# +
n = 30
X = np.random.rand(n) * 10
Y = np.random.rand(n) * 10
print(' X Y')
np.vstack((X, Y)).T[:5]
# -
fig, ax = plt.subplots(figsize=(10, 5))
ax.scatter(X, Y, c='#b3de72', edgecolor='k', s=100)
plt.show()
# Our Data at the $0th$ / first step / time instance looks like this.
#
#
# Lets generate the data for the rest of the time instances.
# We will change the coordinates of the points by a small margin at every step
# i.e add or subtract a small number for every time instance.
# +
data = []
# our total no of time steps
time_range = 200
xmax, xmin, ymax, ymin = 0, 0, 0, 0
for t in range(time_range):
# append X and Y to data for each time_range instance
data.append((X, Y))
# change X and Y values by a small margin
X = X + np.random.normal(0, 0.5, size=X.shape)
Y = Y + np.random.normal(0, 0.5, size=Y.shape)
# to set the ax limits
xmax, xmin = max(xmax, X.max()), min(xmin, X.min())
ymax, ymin = max(ymax, Y.max()), min(ymin, Y.min())
# -
# Lets plot few instances of our **data**
fig, axes = plt.subplots(2, 2, figsize=(14, 8))
for ax, (X, Y) in zip(axes.reshape(-1), data[:4]):
ax.scatter(X, Y, c='#b3de72', edgecolor='k', s=100)
plt.show()
# As you can see very small amount of coordinate changes.
# ### Without Blitting
# Now that we are done with the boring stuff lets **animate**.
#
# Without Blitting is basically method 1 from our last tutorial, here we redraw the plot for every instance / frame.
def update(i):
# clear the plot for each frame
ax.clear()
# plot the 'i'-th time instance / step
ax.scatter(data[i][0], data[i][1], c='#b3de72', edgecolor='k', s=100)
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
# +
fig, ax = plt.subplots(figsize=(10, 5))
ani = animation.FuncAnimation(fig, # the canvas we will use for the animation
update, # the update function
frames=time_range, # total no of frames
interval=75, # the delay between each frame
blit=False # So we don't have to return
)
plt.close()
# -
HTML(ani.to_html5_video())
# TADA !!, that's pretty much our Without blitting example
# ### With Blitting
# Now lets go through our method 2. In this process we will create the plot once and only update
# all those attributes that are changing in every frame.
#
# Lets try something new as well ! We will change the point colors based on their coordinates.
# Something like this
# ```
# 20|
# |Red Green
# 5|
# |Orange Lime
# -10|_______________
# -15 0 20
# ```
# +
# Function to calculate the color based on the coordinates
colors = [''] * n
def get_colors(X, Y, colors):
for i, (x, y) in enumerate(zip(X, Y)):
if x < 0 and y < 5:
colors[i] = '#fdbc6c'
elif x >= 0 and y < 5:
colors[i] = '#b3de72'
elif x < 0 and y >= 5:
colors[i] = '#e54d35'
elif x > 0 and y > 5:
colors[i] = '#3faa5a'
return colors
get_colors(data[0][0], data[0][1], colors)[:5]
# -
# Lets check the initial plot
# +
fig, ax = plt.subplots(figsize=(10, 5))
# The plot function returns a PathCollection object
# Because we are going to modify the same plot
# The updates will be done only on X, Y and color
# edgecolor and size will remain constant
scatter = ax.scatter(data[0][0], data[0][1],
c=get_colors(data[0][0], data[0][1], colors),
edgecolors='k',
s=100)
print('PathCollection object:', scatter)
plt.show()
# -
# **To update X and Y**, we will use the function
# > set_offsets()
#
# **To update the colors**
# > set_facecolors()
#
def update(i):
# Note how we don't clear the axis for each from, just change what we need to
# plot upto 'i'
# NOTE THE FUNCTION USED TO UPDATE THE X AND Y
scatter.set_offsets(np.vstack((data[i][0], data[i][1])).T)
scatter.set_facecolors(
get_colors(data[i][0], data[i][1], colors)
)
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
# we need to return 'scatter' as a list / sequence in this method
return scatter,
ani = animation.FuncAnimation(fig, # the canvas we will use for the animation
update, # the update function
frames=time_range, # total no of frames
interval=75, # the delay between each frame
blit=True, # blit is set true because we return some objects
)
HTML(ani.to_html5_video())
# ### Summary
# Creating animations with blitting can be considerably faster, unfortunately it is quite a hassle to update
# all the properties of the scatterplot, such as markers, edgecolors, etc.The matplotlib documentations are not
# very friendly either, in this case.
| articles/.ipynb_checkpoints/Scatterplot Animation, day 2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def dance():
print("I am a disco dancer")
dance()
dance
nacho = dance
nacho()
def dagabaz():
print("dil jude")
nacho = dagabaz
dance()
nacho()
out = nacho()
print(out)
def fact(n):
v = 1
while(n > 0):
v *= n
n-=1
return v
fact(4)
def second(one, two):
print(one, two)
second(10, 20)
second(two=30, one=20)
print("hi", "bye", sep="--")
second(10, two=30, one=20)
second(one=10, 30)
def third(one, two="more happy", three="happy"):
print(one, two, three)
third(1)
# +
# print?
# -
def square(x):
return x*x
square(4)
list(map(print, [1, 2, 3, 4, 5]))
# +
# square?
# -
def to_dict(op, ite):
vals = map(op, ite)
return dict(zip(ite, vals))
to_dict(square, range(1, 8))
list(map(lambda chacha : chacha * 4, [1, 2, 3, 4, 5]))
s = lambda a, b : a+b
s(4, 7)
list(map(lambda x : list(range(x)), [1, 2, 3, 4, 5]))
some = lambda x : list(range(x))
some(2)
line = "1 3 5 6"
out = list(map(int, line.split(" ")))
out
# +
# map?
# -
li = [4, 5, 22, 66, 11]
list(filter(lambda x : x%2 == 0, li))
def unpack(one, *some, sec="hi"):
print((some), sec)
print(one)
unpack(4, 5, 7, "bye")
def go(one, two):
print(one, two)
tup = (3, 6)
go(*tup)
| session-2/func.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Prerequisites for the GPU Demos
# To run the GPU-demo tutorials, you must fulfill the following prerequisites:
#
# - Your environment must have one or more [NVIDIA](https://www.nvidia.com/en-us/) graphics processing units (GPUs).
# - To use the Horovod demos, ensure that
# - The platform's Horovod service is deployed on your environment.<br>
# > **Note:** The Horovod service isn't part of the default v2.3 platform deployment.<br/>
# > Contact [Iguazio Customer Success](mailto:<EMAIL>) to deploy the service.
# - Your Jupyter Notebook service uses the **Jupyter Deep Learning + GPU** flavor (configured from the custom service parameters).
# - To use the RAPIDS demos, ensure that
# - Your environment has one or more GPUs with the [NVIDIA Pascal](https://www.nvidia.com/en-us/geforce/products/10series/architecture/) architecture or better and [compute capability](https://developer.nvidia.com/cuda-gpus) 6.0+.
# - Your Jupyter Notebook service uses the **Jupyter Deep Learning with Rapids** flavor (configured from the custom service parameters).<br/>
# > **Note:** In v2.3 of the platform, you must contact [Iguazio Customer Success](mailto:<EMAIL>) before attempting to deploy the service with this flavor.
| demos/gpu/gpu-prerequisites.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div>
# <a href="http://bokeh.pydata.org/"><img src="../images/bokeh-header.png"></a>
# </div>
#
# # Bokeh 5-minute Overview
#
# Bokeh is a Python interactive visualization library that targets modern web browsers for presentation. Its goal is to provide elegant, concise construction of novel graphics in the style of D3.js, and to extend this capability with high-performance interactivity over very large or streaming datasets. Bokeh can help anyone who would like to quickly and easily create interactive plots, dashboards, and data applications.
# ## Simple Example
#
# Here is a simple first example. First we'll import the [`figure`](https://bokeh.pydata.org/en/latest/docs/reference/plotting.html#bokeh.plotting.figure.figure) function from [`bokeh.plotting`](https://bokeh.pydata.org/en/latest/docs/user_guide/plotting.html), which will let us create all sorts of interesting plots easily. We also import the `show` and `ouptut_notebook` functions from `bokeh.io` — these let us display our results inline in the notebook.
from bokeh.plotting import figure
from bokeh.io import output_notebook, show
# Next, we'll tell Bokeh to display its plots directly into the notebook.
# This will cause all of the Javascript and data to be embedded directly
# into the HTML of the notebook itself.
# (Bokeh can output straight to HTML files, or use a server, which we'll
# look at later.)
output_notebook()
# Next, we'll import NumPy and create some simple data.
from numpy import cos, linspace
x = linspace(-6, 6, 100)
y = cos(x)
# Now we'll call Bokeh's `figure` function to create a plot `p`. Then we call the `circle()` method of the plot to render a red circle at each of the points in x and y.
#
# We can immediately interact with the plot:
#
# * click-drag will pan the plot around.
# * mousewheel will zoom in and out (after enabling in the toolbar)
#
# The toolbar below is the default one that is available for all plots. It can be configured further via the `tools` keyword argument.
p = figure(width=500, height=500)
p.circle(x, y, size=7, color="firebrick", alpha=0.5)
show(p)
# # Bar Plot Example
#
#
# Bokeh's core display model relies on *composing graphical primitives* which are bound to data series. This is similar in spirit to Protovis and D3, and different than most other Python plotting libraries.
#
# A slightly more sophisticated example demonstrates this idea.
#
# Bokeh ships with a small set of interesting "sample data" in the `bokeh.sampledata` package. We'll load up some historical automobile mileage data, which is returned as a Pandas `DataFrame`.
# +
from bokeh.sampledata.autompg import autompg
grouped = autompg.groupby("yr")
mpg = grouped.mpg
avg, std = mpg.mean(), mpg.std()
years = list(grouped.groups)
american = autompg[autompg["origin"]==1]
japanese = autompg[autompg["origin"]==3]
# -
# For each year, we want to plot the distribution of MPG within that year.
# +
p = figure(title="MPG by Year (Japan and US)")
p.vbar(x=years, bottom=avg-std, top=avg+std, width=0.8,
fill_alpha=0.2, line_color=None, legend="MPG 1 stddev")
p.circle(x=japanese["yr"], y=japanese["mpg"], size=10, alpha=0.5,
color="red", legend="Japanese")
p.triangle(x=american["yr"], y=american["mpg"], size=10, alpha=0.3,
color="blue", legend="American")
p.legend.location = "top_left"
show(p)
# -
# **This kind of approach can be used to generate other kinds of interesting plots. See many more examples in the [Bokeh Documentation Gallery](https://bokeh.pydata.org/en/latest/docs/gallery.html). **
# ## Linked Brushing
#
# To link plots together at a data level, we can explicitly wrap the data in a `ColumnDataSource`. This allows us to reference columns by name.
#
# We can use a "select" tool to select points on one plot, and the linked points on the other plots will highlight.
# +
from bokeh.models import ColumnDataSource
from bokeh.layouts import gridplot
source = ColumnDataSource(autompg)
options = dict(plot_width=300, plot_height=300,
tools="pan,wheel_zoom,box_zoom,box_select,lasso_select")
p1 = figure(title="MPG by Year", **options)
p1.circle("yr", "mpg", color="blue", source=source)
p2 = figure(title="HP vs. Displacement", **options)
p2.circle("hp", "displ", color="green", source=source)
p3 = figure(title="MPG vs. Displacement", **options)
p3.circle("mpg", "displ", size="cyl", line_color="red", fill_color=None, source=source)
p = gridplot([[ p1, p2, p3]], toolbar_location="right")
show(p)
# -
# You can read more about the `ColumnDataSource` and other Bokeh data structures in [Providing Data for Plots and Tables](https://bokeh.pydata.org/en/latest/docs/user_guide/data.html)
# ## Standalone HTML
#
# In addition to working well with the Notebook, Bokeh can also save plots out into their own HTML files. Here is the bar plot example from above, but saving into its own standalone file.
#
# Now when we call `show()`, a new browser tab is also opened with the plot. If we just wanted to save the file, we would use `save()` instead.
# +
from bokeh.plotting import output_file
output_file("barplot.html")
p = figure(title="MPG by Year (Japan and US)")
p.vbar(x=years, bottom=avg-std, top=avg+std, width=0.8,
fill_alpha=0.2, line_color=None, legend="MPG 1 stddev")
p.circle(x=japanese["yr"], y=japanese["mpg"], size=10, alpha=0.3,
color="red", legend="Japanese")
p.triangle(x=american["yr"], y=american["mpg"], size=10, alpha=0.3,
color="blue", legend="American")
p.legend.location = "top_left"
show(p)
# -
# ## Bokeh Applications
#
# Bokeh also has a server component that can be used to build interactive web applications that easily connect the powerful constellation of PyData tools to sophisticated Bokeh visualizations. The Bokeh server can be used to:
#
# * respond to UI and tool events generated in a browser with computations or queries using the full power of python
# * automatically push server-side updates to the UI (i.e. widgets or plots in a browser)
# * use periodic, timeout, and asynchronous callbacks to drive streaming updates
#
# The cell below shows a simple deployed Bokeh application from https://demo.bokehplots.com embedded in an IFrame. Scrub the sliders or change the title to see the plot update.
from IPython.display import IFrame
IFrame('https://demo.bokehplots.com/apps/sliders/', width=900, height=410)
# Click on any of the thumbnails below to launch other live Bokeh applications.
#
# <center>
# <a href="https://demo.bokehplots.com/apps/crossfilter">
# <img
# width="30%" height="30%" style="display: inline ; padding: 10px;"
# src="https://bokeh.pydata.org/static/crossfilter_t.png"
# >
# </a>
#
# <a href="https://demo.bokehplots.com/apps/movies">
# <img
# width="30%" height="30%" style="display: inline ; padding: 10px;"
# src="https://bokeh.pydata.org/static/movies_t.png"
# >
# </a>
#
# <a href="https://demo.bokehplots.com/apps/gapminder">
# <img
# width="30%" height="30%" style="display: inline ; padding: 10px;"
# src="http://bokeh.pydata.org/static/gapminder_t.png"
# >
# </a>
# </center>
#
# Find more details and information about developing and deploying Bokeh server applications in the User's Guide chapter [Running a Bokeh Server](https://bokeh.pydata.org/en/latest/docs/user_guide/server.html).
# ## BokehJS
#
# At its core, Bokeh consists of a Javascript library, [BokehJS](https://github.com/bokeh/bokeh/tree/master/bokehjs), and a Python binding which provides classes and objects that ultimately generate a JSON representation of the plot structure.
#
# You can read more about design and usage in the [Developing with JavaScript](https://bokeh.pydata.org/en/latest/docs/user_guide/bokehjs.html) section of the Bokeh User's Guide.
# ## More Information
#
# Find more details and information at the resources listed below:
#
# *Documentation:* https://bokeh.pydata.org/en/latest
#
# *GitHub:* https://github.com/bokeh/bokeh
#
# *Mailing list:* [<EMAIL>](mailto:<EMAIL>)
#
# *Gitter Chat:* https://gitter.im/bokeh/bokeh
#
# Be sure to follow us on Twitter [@bokehplots](http://twitter.com/BokehPlots>) and on [Youtube](https://www.youtube.com/c/Bokehplots)!
#
# <img src="../images/bokeh-transparent.png" width="64px" height="64px">
| bokeh/quickstart.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TensorFlow Transfer Learning
#
# This notebook shows how to use pre-trained models from [TensorFlowHub](https://www.tensorflow.org/hub). Sometimes, there is not enough data, computational resources, or time to train a model from scratch to solve a particular problem. We'll use a pre-trained model to classify flowers with better accuracy than a new model for use in a mobile application.
#
# ## Learning Objectives
# 1. Know how to apply image augmentation
# 2. Know how to download and use a TensorFlow Hub module as a layer in Keras.
# +
import os
import pathlib
from PIL import Image
import IPython.display as display
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import (
Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Softmax)
import tensorflow_hub as hub
# -
# ## Exploring the data
#
# As usual, let's take a look at the data before we start building our model. We'll be using a creative-commons licensed flower photo dataset of 3670 images falling into 5 categories: 'daisy', 'roses', 'dandelion', 'sunflowers', and 'tulips'.
#
# The below [tf.keras.utils.get_file](https://www.tensorflow.org/api_docs/python/tf/keras/utils/get_file) command downloads a dataset to the local Keras cache. To see the files through a terminal, copy the output of the cell below.
# +
data_dir = tf.keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
# Print data path
print("cd", data_dir)
# -
# We can use python's built in [pathlib](https://docs.python.org/3/library/pathlib.html) tool to get a sense of this unstructured data.
# +
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.jpg')))
print("There are", image_count, "images.")
CLASS_NAMES = np.array(
[item.name for item in data_dir.glob('*') if item.name != "LICENSE.txt"])
print("These are the available classes:", CLASS_NAMES)
# -
# Let's display the images so we can see what our model will be trying to learn.
# +
roses = list(data_dir.glob('roses/*'))
for image_path in roses[:3]:
display.display(Image.open(str(image_path)))
# -
# ## Building the dataset
#
# Keras has some convenient methods to read in image data. For instance [tf.keras.preprocessing.image.ImageDataGenerator](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator) is great for small local datasets. A tutorial on how to use it can be found [here](https://www.tensorflow.org/tutorials/load_data/images), but what if we have so many images, it doesn't fit on a local machine? We can use [tf.data.datasets](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) to build a generator based on files in a Google Cloud Storage Bucket.
#
# We have already prepared these images to be stored on the cloud in `gs://cloud-ml-data/img/flower_photos/`. The images are randomly split into a training set with 90% data and an iterable with 10% data listed in CSV files:
#
# Training set: [train_set.csv](https://storage.cloud.google.com/cloud-ml-data/img/flower_photos/train_set.csv)
# Evaluation set: [eval_set.csv](https://storage.cloud.google.com/cloud-ml-data/img/flower_photos/eval_set.csv)
#
# Explore the format and contents of the train.csv by running:
# !gsutil cat gs://cloud-ml-data/img/flower_photos/train_set.csv | head -5 > /tmp/input.csv
# !cat /tmp/input.csv
# !gsutil cat gs://cloud-ml-data/img/flower_photos/train_set.csv | sed 's/,/ /g' | awk '{print $2}' | sort | uniq > /tmp/labels.txt
# !cat /tmp/labels.txt
# Let's figure out how to read one of these images from the cloud. TensorFlow's [tf.io.read_file](https://www.tensorflow.org/api_docs/python/tf/io/read_file) can help us read the file contents, but the result will be a [Base64 image string](https://en.wikipedia.org/wiki/Base64). Hmm... not very readable for humans or Tensorflow.
#
# Thankfully, TensorFlow's [tf.image.decode_jpeg](https://www.tensorflow.org/api_docs/python/tf/io/decode_jpeg) function can decode this string into an integer array, and [tf.image.convert_image_dtype](https://www.tensorflow.org/api_docs/python/tf/image/convert_image_dtype) can cast it into a 0 - 1 range float. Finally, we'll use [tf.image.resize](https://www.tensorflow.org/api_docs/python/tf/image/resize) to force image dimensions to be consistent for our neural network.
#
# We'll wrap these into a function as we'll be calling these repeatedly. While we're at it, let's also define our constants for our neural network.
# +
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
BATCH_SIZE = 32
# 10 is a magic number tuned for local trianing of this dataset.
SHUFFLE_BUFFER = 10 * BATCH_SIZE
AUTOTUNE = tf.data.experimental.AUTOTUNE
VALIDATION_IMAGES = 370
VALIDATION_STEPS = VALIDATION_IMAGES // BATCH_SIZE
# -
def decode_img(img, reshape_dims):
# Convert the compressed string to a 3D uint8 tensor.
img = tf.image.decode_jpeg(img, channels=IMG_CHANNELS)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
img = tf.image.convert_image_dtype(img, tf.float32)
# Resize the image to the desired size.
return tf.image.resize(img, reshape_dims)
# Is it working? Let's see!
#
# **TODO 1.a:** Run the `decode_img` function and plot it to see a happy looking daisy.
# +
img = tf.io.read_file(
"gs://cloud-ml-data/img/flower_photos/daisy/754296579_30a9ae018c_n.jpg")
# Uncomment to see the image string.
#print(img)
img = decode_img(img, [IMG_WIDTH, IMG_HEIGHT])
plt.imshow((img.numpy()));
# -
# One flower down, 3669 more of them to go. Rather than load all the photos in directly, we'll use the file paths given to us in the csv and load the images when we batch. [tf.io.decode_csv](https://www.tensorflow.org/api_docs/python/tf/io/decode_csv) reads in csv rows (or each line in a csv file), while [tf.math.equal](https://www.tensorflow.org/api_docs/python/tf/math/equal) will help us format our label such that it's a boolean array with a truth value corresponding to the class in `CLASS_NAMES`, much like the labels for the MNIST Lab.
def decode_csv(csv_row):
record_defaults = ["path", "flower"]
filename, label_string = tf.io.decode_csv(csv_row, record_defaults)
image_bytes = tf.io.read_file(filename=filename)
label = tf.math.equal(CLASS_NAMES, label_string)
return image_bytes, label
# Next, we'll transform the images to give our network more variety to train on. There are a number of [image manipulation functions](https://www.tensorflow.org/api_docs/python/tf/image). We'll cover just a few:
#
# * [tf.image.random_crop](https://www.tensorflow.org/api_docs/python/tf/image/random_crop) - Randomly deletes the top/bottom rows and left/right columns down to the dimensions specified.
# * [tf.image.random_flip_left_right](https://www.tensorflow.org/api_docs/python/tf/image/random_flip_left_right) - Randomly flips the image horizontally
# * [tf.image.random_brightness](https://www.tensorflow.org/api_docs/python/tf/image/random_brightness) - Randomly adjusts how dark or light the image is.
# * [tf.image.random_contrast](https://www.tensorflow.org/api_docs/python/tf/image/random_contrast) - Randomly adjusts image contrast.
#
# **TODO 1.b:** Add the missing parameters from the random augment functions.
# +
MAX_DELTA = 63.0 / 255.0 # Change brightness by at most 17.7%
CONTRAST_LOWER = 0.2
CONTRAST_UPPER = 1.8
def read_and_preprocess(image_bytes, label, random_augment=False):
if random_augment:
img = decode_img(image_bytes, [IMG_HEIGHT + 10, IMG_WIDTH + 10])
img = tf.image.random_crop(img, [IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
img = tf.image.random_flip_left_right(img)
img = tf.image.random_brightness(img, MAX_DELTA)
img = tf.image.random_contrast(img, CONTRAST_LOWER, CONTRAST_UPPER)
else:
img = decode_img(image_bytes, [IMG_WIDTH, IMG_HEIGHT])
return img, label
def read_and_preprocess_with_augment(image_bytes, label):
return read_and_preprocess(image_bytes, label, random_augment=True)
# -
# Finally, we'll make a function to craft our full dataset using [tf.data.dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset). The [tf.data.TextLineDataset](https://www.tensorflow.org/api_docs/python/tf/data/TextLineDataset) will read in each line in our train/eval csv files to our `decode_csv` function.
#
# [.cache](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#cache) is key here. It will store the dataset in memory
def load_dataset(csv_of_filenames, batch_size, training=True):
dataset = tf.data.TextLineDataset(filenames=csv_of_filenames) \
.map(decode_csv).cache()
if training:
dataset = dataset \
.map(read_and_preprocess_with_augment) \
.shuffle(SHUFFLE_BUFFER) \
.repeat(count=None) # Indefinately.
else:
dataset = dataset \
.map(read_and_preprocess) \
.repeat(count=1) # Each photo used once.
# Prefetch prepares the next set of batches while current batch is in use.
return dataset.batch(batch_size=batch_size).prefetch(buffer_size=AUTOTUNE)
# We'll test it out with our training set. A batch size of one will allow us to easily look at each augmented image.
train_path = "gs://cloud-ml-data/img/flower_photos/train_set.csv"
train_data = load_dataset(train_path, 1)
itr = iter(train_data)
# **TODO 1.c:** Run the below cell repeatedly to see the results of different batches. The images have been un-normalized for human eyes. Can you tell what type of flowers they are? Is it fair for the AI to learn on?
image_batch, label_batch = next(itr)
img = image_batch[0]
plt.imshow(img)
print(label_batch[0])
# ## MobileNetV2
#
# These flower photos are much larger than handwritting recognition images in MNIST. They are about 10 times as many pixels per axis **and** there are three color channels, making the information here over 200 times larger!
#
# How do our current techniques stand up? Copy your best model architecture over from the <a href="2_mnist_model.ipynb">MNIST models lab</a> and see how well it does after training for 5 epochs of 50 steps.
#
# **TODO 2.a** Copy over the most accurate model from 2_mnist_model.ipynb or build a new CNN Keras model.
# +
eval_path = "gs://cloud-ml-data/img/flower_photos/eval_set.csv"
nclasses = len(CLASS_NAMES)
hidden_layer_1_neurons = 400
hidden_layer_2_neurons = 100
dropout_rate = 0.25
num_filters_1 = 64
kernel_size_1 = 3
pooling_size_1 = 2
num_filters_2 = 32
kernel_size_2 = 3
pooling_size_2 = 2
layers = [
Conv2D(num_filters_1, kernel_size=kernel_size_1,
activation='relu',
input_shape=(IMG_WIDTH, IMG_HEIGHT, IMG_CHANNELS)),
MaxPooling2D(pooling_size_1),
Conv2D(num_filters_2, kernel_size=kernel_size_2,
activation='relu'),
MaxPooling2D(pooling_size_2),
Flatten(),
Dense(hidden_layer_1_neurons, activation='relu'),
Dense(hidden_layer_2_neurons, activation='relu'),
Dropout(dropout_rate),
Dense(nclasses),
Softmax()
]
old_model = Sequential(layers)
old_model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_ds = load_dataset(train_path, BATCH_SIZE)
eval_ds = load_dataset(eval_path, BATCH_SIZE, training=False)
# -
old_model.fit_generator(
train_ds,
epochs=5,
steps_per_epoch=5,
validation_data=eval_ds,
validation_steps=VALIDATION_STEPS
)
# If your model is like mine, it learns a little bit, slightly better then random, but *ugh*, it's too slow! With a batch size of 32, 5 epochs of 5 steps is only getting through about a quarter of our images. Not to mention, this is a much larger problem then MNIST, so wouldn't we need a larger model? But how big do we need to make it?
#
# Enter Transfer Learning. Why not take advantage of someone else's hard work? We can take the layers of a model that's been trained on a similar problem to ours and splice it into our own model.
#
# [Tensorflow Hub](https://tfhub.dev/s?module-type=image-augmentation,image-classification,image-others,image-style-transfer,image-rnn-agent) is a database of models, many of which can be used for Transfer Learning. We'll use a model called [MobileNet](https://tfhub.dev/google/imagenet/mobilenet_v2_035_224/feature_vector/4) which is an architecture optimized for image classification on mobile devices, which can be done with [TensorFlow Lite](https://github.com/tensorflow/hub/blob/master/examples/colab/tf2_image_retraining.ipynb). Let's compare how a model trained on [ImageNet](http://www.image-net.org/) data compares to one built from scratch.
#
# The `tensorflow_hub` python package has a function to include a Hub model as a [layer in Keras](https://www.tensorflow.org/hub/api_docs/python/hub/KerasLayer). We'll set the weights of this model as un-trainable. Even though this is a compressed version of full scale image classification models, it still has over four hundred thousand paramaters! Training all these would not only add to our computation, but it is also prone to over-fitting. We'll add some L2 regularization and Dropout to prevent that from happening to our trainable weights.
#
# **TODO 2.b**: Add a Hub Keras Layer at the top of the model using the handle provided.
# +
module_selection = "mobilenet_v2_100_224"
module_handle = "https://tfhub.dev/google/imagenet/{}/feature_vector/4" \
.format(module_selection)
transfer_model = tf.keras.Sequential([
hub.KerasLayer(module_handle, trainable=False),
tf.keras.layers.Dropout(rate=0.2),
tf.keras.layers.Dense(
nclasses,
activation='softmax',
kernel_regularizer=tf.keras.regularizers.l2(0.0001))
])
transfer_model.build((None,)+(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
transfer_model.summary()
# -
# Even though we're only adding one more `Dense` layer in order to get the probabilities for each of the 5 flower types, we end up with over six thousand parameters to train ourselves. Wow!
#
# Moment of truth. Let's compile this new model and see how it compares to our MNIST architecture.
# +
transfer_model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_ds = load_dataset(train_path, BATCH_SIZE)
eval_ds = load_dataset(eval_path, BATCH_SIZE, training=False)
# -
transfer_model.fit(
train_ds,
epochs=5,
steps_per_epoch=5,
validation_data=eval_ds,
validation_steps=VALIDATION_STEPS
)
# Alright, looking better!
#
# Still, there's clear room to improve. Data bottlenecks are especially prevalent with image data due to the size of the image files. There's much to consider such as the computation of augmenting images and the bandwidth to transfer images between machines.
#
# Think life is too short, and there has to be a better way? In the next lab, we'll blast away these problems by developing a cloud strategy to train with TPUs!
#
# ## Bonus Exercise
#
# Keras has a [local way](https://keras.io/models/sequential/) to do distributed training, but we'll be using a different technique in the next lab. Want to give the local way a try? Check out this excellent [blog post](https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly) to get started. Or want to go full-blown Keras? It also has a number of [pre-trained models](https://keras.io/applications/) ready to use.
# Copyright 2019 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| courses/machine_learning/deepdive2/image_classification/solutions/3_tf_hub_transfer_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from bert_serving.client import BertClient
import datetime
bc = BertClient()
m = '2'
def trigger(m):
file_dir = './why_merged_' + m + '_set.tsv'
trigger = []
with open(file_dir, 'r') as f:
line = f.readline()
while line:
trigger.append(line[:-1])
line = f.readline()
return trigger
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
node_feat_vec_H0 = bc.encode(trigger(m))
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# -
node_feat_vec_H0.tofile('./node_feat_vec_H0_cutoff_' + m + '.txt')
print(node_feat_vec_H0.shape)
# +
m = '3'
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
node_feat_vec_H0 = bc.encode(trigger(m))
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
node_feat_vec_H0.tofile('./node_feat_vec_H0_cutoff_' + m + '.txt')
print(node_feat_vec_H0.shape)
# -
| node_feat_vec_H0_c2_gen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <NAME> - UPV/EHU 2016
# +
##################################################################################################################
### Misc utilities:
##################################################################################################################
#-------------------------------------------------------------------
def scale_image(scale, *args):
"""
If scale == 'mpc', *args: img_z, img_pix, img_mpc
If scale == 'arcsec', *args: img_z, img_pix, img_arcsec
"""
arcsecs_in_rad = 360.*3600./(2.*np.pi)
if scale == 'mpc':
img_z, img_pix, img_mpc = args
ad_dist = Planck13.angular_diameter_distance(img_z).value
img_rads = img_mpc/ad_dist
img_arcsec = img_rads*arcsecs_in_rad
elif scale == 'arcsec':
img_z, img_pix, img_arcsec = args
img_rads = img_arcsec/arcsecs_in_rad
ad_dist = Planck13.angular_diameter_distance(img_z).value
img_mpc = img_rads*ad_dist
if img_z == -1.:
print '*** ERROR: wrong redshift equal to -1.'
img_arcmin = img_arcsec/60.
sdss_scale = img_arcsec/img_pix
return sdss_scale, img_arcsec, img_arcmin, img_mpc
#-------------------------------------------------------------------
def distances_image(N_pix):
X, Y = np.ogrid[0:N_pix, 0:N_pix]
distances = np.sqrt((1.*X - N_pix/2.)**2 + (1.*Y - N_pix/2.)**2)
return distances
##################################################################################################################
### Image manipulation utilities:
##################################################################################################################
#-------------------------------------------------------------------
def crop_image(image_data, img_mpc):
x_pix, y_pix = image_data.shape
if (x_pix/y_pix > 1.1) or (y_pix/x_pix > 1.1):
print '***WARNING: image ratio > 1.1'
long_pix, short_pix = np.sort([x_pix, y_pix])
img_mpc_size = img_mpc*(short_pix/long_pix)
diff = np.abs(x_pix - y_pix)
if x_pix > y_pix:
image_data = image_data[diff/2 + diff%2 : x_pix - diff/2,]
elif y_pix > x_pix:
image_data = image_data[:,diff/2 + diff%2 : y_pix - diff/2]
return image_data, img_mpc_size
#-------------------------------------------------------------------
def zoom_image_data(image_data, final_size, img_mpc):
x_pix, y_pix = image_data.shape
if x_pix != y_pix:
print '***WARNING: original fits dimensions: %i x %i' % (x_pix, y_pix)
image_data, img_mpc_size = crop_image(image_data, img_mpc)
x_pix, y_pix = image_data.shape
print 'Fixed: final fits dimensions: %i x %i' % (x_pix, y_pix)
else:
img_mpc_size = img_mpc
total_flux = image_data.sum()
#print 'Total flux: ', total_flux
image_data_zoom = ndimage.interpolation.zoom(image_data,(1.*final_size/x_pix, 1.*final_size/y_pix))
zoom_flux = image_data_zoom.sum()
#image_data_zoom = image_data_zoom*1.*total_flux/zoom_flux
x_pix, y_pix = image_data_zoom.shape
#print 'Zoomed fits dimensions: %i x %i' % (x_pix, y_pix)
if (x_pix != final_size) | (y_pix != final_size):
print '***WARNING! Final fits dimensions: %i x %i' % (x_pix, y_pix)
#final_flux = zoom_image.sum()
#print 'Zoomed flux: ', final_flux
return image_data_zoom, img_mpc_size
#-------------------------------------------------------------------
def check_for_nan(data, stop=False):
if np.isnan(data).any() & stop:
sys.exit("***ERROR: NaN found")
elif np.isnan(data).any() & (not stop):
print '***WARNING: NaN found'
return True
#-------------------------------------------------------------------
def convolve_image(image_data, size_kernel=5.):
"""http://astropy.readthedocs.io/en/latest/convolution/index.html"""
kernel = Gaussian2DKernel(size_kernel)
#kernel = AiryDisk2DKernel(size_kernel)
#kernel = MexicanHat2DKernel(size_kernel)
#kernel = Tophat2DKernel(size_kernel)
result_image_data = convolve(image_data, kernel, boundary='extend')
return result_image_data
#-------------------------------------------------------------------
def apply_aperture_mask(image_data, aperture=1., inverse=False):
lx, ly = image_data.shape
X, Y = np.ogrid[0:lx, 0:ly]
aperture_mask = (X - lx / 2) ** 2 + (Y - ly / 2) ** 2 > lx * ly / 4 * (aperture)**2
if not inverse:
image_data[aperture_mask] = 0.
elif inverse:
image_data[~aperture_mask] = 0.
return image_data
#-------------------------------------------------------------------
def fft_filtering(image_data, filter_aperture=0.5, inverse=False):
# Take the fourier transform of the image:
F1 = fftpack.fft2(image_data)
# Now shift the quadrants around so that low spatial frequencies are in
# the center of the 2D fourier transformed image:
F2 = fftpack.fftshift(F1)
# Apply filter to range of frequencies:
F3 = apply_aperture_mask(F2, filter_aperture, inverse)
# Reconstruct image:
filt_image_data = np.abs(fftpack.ifft2(F3))
return filt_image_data
##################################################################################################################
### Filtering utilities:
##################################################################################################################
#-------------------------------------------------------------------
def create_antenna(img_pix, fwhm, display=False):
"""Creates FFT antenna."""
distances = distances_image(img_pix)
## Antenna in real space
sigma = (fwhm/2.0)/(np.sqrt(2.0*np.log(2.0)))
antenna = np.exp(-distances**2/(2.0*(sigma**2)))
normalization = np.sum(antenna)
#antenna = antenna/normalization
## FFT antenna
fft_antenna = fftpack.fft2(antenna)
fft_antenna = fft_antenna/(np.abs(fft_antenna[0,0]))
## Power of antenna
ff = np.array(distances, dtype=np.int)
nf = np.int(np.sqrt(2.0*((img_pix/2.0)**2)))
power_antenna = np.arange(0., nf, 1.)
for i in range(nf):
p_mask = ff==i
power_antenna[i] = np.mean(np.abs(fft_antenna[p_mask])**2.)
if display:
print '\nAntenna:'
print 'fwhm : %i pixels' % fwhm
print 'Npix_side : %i pixels' % img_pix
fig = plt.figure(figsize=(10,4))
ax1 = fig.add_subplot(131)
ax1.imshow(antenna)
ax1.set_title('antenna')
ax2 = fig.add_subplot(132)
ax2.imshow(np.abs(fft_antenna))
ax2.set_title('FFT antenna')
ax3 = fig.add_subplot(133)
ax3.plot(range(nf), power_antenna)
#ax3.set_xscale('log')
ax3.set_yscale('log')
ax3.set_title('antenna power')
fig.tight_layout()
plt.show()
plt.close(fig)
return antenna, fft_antenna, power_antenna
#-------------------------------------------------------------------
def create_MHW(img_pix, MHW_scale, display=False):
"""Creates Mexican Hat Wavelet (MHW)."""
distances = distances_image(img_pix)/MHW_scale
## MHW in real space
MHW = (2.0 - distances**2)*np.exp((-distances**2)/2.0)
normalization = np.sum(MHW)
MHW = MHW/normalization
## FFT MHW
fft_MHW = fftpack.fft2(MHW)
fft_MHW = fftpack.fftshift(fft_MHW)
#fft_MHW = fft_MHW/(np.abs(fft_MHW[0,0]))
## Power of MHW
ff = np.array(distances, dtype=np.int)
nf = np.int(np.sqrt(2.0*((img_pix/2.0)**2)))
power_MHW = np.arange(0., nf, 1.)
for i in range(nf):
p_mask = ff==i
power_MHW[i] = np.mean(np.abs(fft_MHW[p_mask])**2.)
if display:
print '\nMHW:'
print 'scale : %i pixels' % MHW_scale
print 'Npix_side : %i pixels' % img_pix
fig = plt.figure(figsize=(10,4))
ax1 = fig.add_subplot(131)
ax1.imshow(MHW)
ax1.set_title('MHW')
ax2 = fig.add_subplot(132)
ax2.imshow(np.abs(fft_MHW))
ax2.set_title('FFT MHW')
ax3 = fig.add_subplot(133)
ax3.plot(range(nf), power_MHW)
#ax3.set_xscale('log')
ax3.set_yscale('log')
ax3.set_title('MHW power')
fig.tight_layout()
plt.show()
plt.close(fig)
return MHW, fft_MHW, power_MHW
##################################################################################################################
### Image normalization utilities:
##################################################################################################################
#-------------------------------------------------------------------
def norm_image_absolute(image_data, norm_down=1., norm_up=np.inf):
image_data = image_data - image_data.min() + norm_down
image_data[image_data > norm_up] = norm_up
return image_data
##################################################################################################################
### Visualization utilities:
##################################################################################################################
#-------------------------------------------------------------------
def plot_image_data(image_data, lognorm=True):
my_dpi=100
lx, ly = image_data.shape
fig = plt.figure(figsize=(1.*lx/my_dpi, 1.*ly/my_dpi), dpi=my_dpi)
ax = fig.add_axes([0.,0.,1.,1.])
plt.axis('off')
if lognorm:
plt.imshow(image_data, cmap='gray', norm=LogNorm())
elif not lognorm:
plt.imshow(image_data, cmap='gray')
return fig
#-------------------------------------------------------------------
def plot_spectrum_data(image_data):
figsizex, figsizey = 8, 8
fig = plt.figure(figsize=(figsizex, figsizey))
ax = fig.add_axes([0.,0.,1.,1.])
F1 = fftpack.fft2(image_data)
F2 = fftpack.fftshift(F1)
psf_2D = np.abs(F2)**2
ax.imshow(np.log10(psf_2D))
return fig
#-------------------------------------------------------------------
def plot_histogram(image_data, lognorm=False, norm_min=1., norm_max=20.):
figsizex, figsizey = 5, 4
fig = plt.figure(figsize=(figsizex, figsizey))
ax = fig.add_subplot(111)
#bin_type = 'scott'
bin_type = 'freedman'
if not lognorm:
hist(image_data, bins=bin_type, histtype='stepfilled', alpha=0.2, normed=True)
elif lognorm:
hist(np.log(image_data), bins=bin_type, histtype='stepfilled', alpha=0.2, normed=True)
ax.set_xlabel('DI')
ax.set_ylabel('P(DI)')
plt.xlim([norm_min - 0.5, norm_max + 0.5])
plt.title('Light histogram')
plt.tight_layout()
return fig
| Python_scripts/Utilities_images.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Multiset
#
# ### _aka_ bag, mset
#
# ### _pl._ multizbiór, wielozbiór
# **Plan**
#
# 1. Definition of multiset
#
# 2. Implementation of multiset
#
# 3. Use cases of multiset
# # Definition
# It's "a modification of the concept of a set that, unlike a set, **allows for multiple instances for each of its elements**". [[source](https://en.wikipedia.org/wiki/Multiset)]
# But what _instance of an element_ means? "Instance" is not a programming term here. It means -- more or less -- occurrence, which... also is not very helpful. So let's say that two instances/occurrences an element means that both are **equal to each other**.
# +
# two occurrences of `1` are... `1` and `1`, because:
assert 1 == 1
# ;D
# -
# To put it simply: **multiset is a set that can keep multiple equal elements**.
# So this is a multiset: `⟨1, 1, 2⟩`.
#
# Also, multisets written this way: `⟨1, 1, 2⟩`, `⟨1, 2, 1⟩`, `⟨2, 1, 1⟩`, are identical.
# # Implementation
# What about the implementation?
#
# In terms of Python's collections, multiset is similar to:
#
# - `set` (as keeping insertion order is not required)
#
# - `list`/`tuple` (as both keep multiple "equal" elements)
# ## Multiset as `list` / `tuple`
# First option: `list` (or `tuple`). It keeps insertion order, but it is not a requirement for multiset not to keep it.
# So `[1, 1, 2, 2, 3]` and `[3, 1, 1, 2, 2]` would be both identical multisets.
#
# **Problems**
#
# 1. There is no out-of-the-box way to check for identity (`[1, 2, 1] == [1, 1, 2]` will return `False`).
#
# 2. We don't have any (optimized) set operations out-of-the-box.
#
# 3. We cannot benefit from the set's great feature: constant time (`O(1)`) membership checking.
#
# So generally implementing multiset using `list` or `tuple` sux.
# ## Multiset as `dict`
# Let's try a different approach: a dict with key of multiset element and value of list of all equal elements for a key.
#
# So multiset of `[42, 42, 51, 51, 60]` would be:
{
42: [42, 42],
51: [51, 51],
60: [60],
}
# But why bother building a list of repeated identical elements if we can keep only a count them.
#
# In this implementation multiset of `[41, 41, 52, 52, 60]` would be:
{
41: 2,
52: 2,
60: 1,
}
# We would increment the count on adding new element to multiset and decrement it on removing.
# ## Multiset as `Counter`
# It turns out that we already have this kind of datatype in Python: `Counter`.
from collections import Counter
# +
my_fruit = ['apple', 'banana', 'pear', 'apple', 'apple']
my_fruit_counter = Counter(my_fruit)
my_fruit_counter
# -
my_fruit_counter['banana'] += 4
my_fruit_counter
'pear' in my_fruit_counter
# ### Constant time (`O(1)`) membership checking
large_counter = Counter(range(10**6 + 1))
number = 10**6
# %timeit number in large_counter
# In fact `Counter` inherits from `dict`, so it's not surprising ;)
# compared to list
large_list = list(range(10**6 + 1))
number = 10**6
# %timeit number in large_list
# ### Counter operations
apple_apple_pear_banana = Counter('apple apple pear banana'.split())
apple_apple_pear_banana
pear_pear_orange = Counter('pear pear orange'.split())
pear_pear_orange
# #### Equalily
apple_apple_pear_banana == pear_pear_orange
apple_apple_pear_banana == Counter('pear banana apple apple'.split())
# #### Add
#
# Add counts from two counters.
apple_apple_pear_banana + pear_pear_orange
# #### Subtract
#
# Subtract count, but keep only results with positive counts.
apple_apple_pear_banana - pear_pear_orange
pear_pear_orange - apple_apple_pear_banana
# #### Union
#
# Union is the maximum of value in either of the input counters.
apple_apple_pear_banana | pear_pear_orange
# #### Intersection
#
# Intersection is the minimum of corresponding counts.
apple_apple_pear_banana & pear_pear_orange
# #### Update
#
# Like `dict.update()` but add counts instead of replacing them.
#
c = Counter('Monty')
c
c.update('Python')
c
# #### Enumerating all elements
list((apple_apple_pear_banana.elements()))
# #### Most common elements
c = Counter('Do you use static type hints in your code?')
c.most_common()
c.most_common(3)
# ### Counter pros and cons
# #### Pros
#
# - blazingly fast membership checking (vs list/tuple)
#
# - equality checking (vs list/tuple)
#
# - additional operations: `+`, `-`, `&`, `|` (vs dict)
#
# #### Cons
#
# - Counter is a dict, so we cannot store there elements that are not hashable (vs list/tuple).
#
# - It's useless when we want to store all occurrences of equal elements (vs dict).
# So what if we want to store all occurrences of equal elements?
class Person:
def __init__(self, id_, name, nationality):
self.id_ = id_
self.name = name
self.nationality = nationality
def __hash__(self):
return hash((self.id_, self.name))
def __eq__(self, other):
if isinstance(other, Person):
return self.id_ == other.id_ and self.name == other.name
return False
def __repr__(self):
return f'Person({self.id_}, {self.name}, {self.nationality})'
p1 = Person(id_=1, name='Bob', nationality='US')
p2 = Person(id_=1, name='Bob', nationality='UK')
p3 = Person(id_=2, name='Kasia', nationality='PL')
p4 = Person(id_=3, name='Taras', nationality='UA')
Counter([p1, p2, p3, p4])
# ## Multiset with many occurrences of equal elements
#
# Wrapper on `defaultdict`.
# +
from collections import defaultdict
class Multiset:
def __init__(self, items=None):
self.__data = defaultdict(list)
items = items or []
for item in items:
self.__data[hash(item)].append(item)
def __repr__(self):
return f'Multiset({dict(self.__data)})'
def __contains__(self, other):
return hash(other) in self.__data
def __eq__(self, other):
return self.__data == other.__data
def update(self, items):
for item in items:
self.__data[hash(item)].append(item)
def remove(self, item):
self.__data[hash(item)].remove(item)
# -
Multiset('abracadabra')
10 in Multiset([5, 7, 10])
m1 = Multiset([10, 21, 21, 32])
m2 = Multiset([10, 21, 21, 32])
m1 == m2
print(p1)
print(p2)
print(p3)
print(p4)
p1 == p2
people = Multiset([p1])
people
people.update([p2])
people
people.update([p3])
people
people.remove(p1)
people
people.remove(p2)
people
# ### Pros and cons
#
# #### Pros
#
# - Fast membership checking.
#
# - Equality checking.
#
# - We can store multiple occurrences of equal items
#
# #### Cos
#
# - No set operations included -- we need to add them ourselves.
#
# - This one is a dict too -- we cannot store there elements that are not hashable (vs list/tuple).
# There is a multiset library: https://github.com/wheerd/multiset, but I did not test it (7 stars...).
# ## Multidict real-life use cases
# ### Counter
# #### Word count
latin_text = """
Respondeo dicendum quod Deum esse quinque viis probari potest. Prima autem et manifestior via est, quae sumitur ex parte motus. Certum est enim, et sensu constat, aliqua moveri in hoc mundo. Omne autem quod movetur, ab alio movetur. Nihil enim movetur, nisi secundum quod est in potentia ad illud ad quod movetur, movet autem aliquid secundum quod est actu. Movere enim nihil aliud est quam educere aliquid de potentia in actum, de potentia autem non potest aliquid reduci in actum, nisi per aliquod ens in actu, sicut calidum in actu, ut ignis, facit lignum, quod est calidum in potentia, esse actu calidum, et per hoc movet et alterat ipsum. Non autem est possibile ut idem sit simul in actu et potentia secundum idem, sed solum secundum diversa, quod enim est calidum in actu, non potest simul esse calidum in potentia, sed est simul frigidum in potentia. Impossibile est ergo quod, secundum idem et eodem modo, aliquid sit movens et motum, vel quod moveat seipsum. Omne ergo quod movetur, oportet ab alio moveri. Si ergo id a quo movetur, moveatur, oportet et ipsum ab alio moveri et illud ab alio. Hic autem non est procedere in infinitum, quia sic non esset aliquod primum movens; et per consequens nec aliquod aliud movens, quia moventia secunda non movent nisi per hoc quod sunt mota a primo movente, sicut baculus non movet nisi per hoc quod est motus a manu. Ergo necesse est devenire ad aliquod primum movens, quod a nullo movetur, et hoc omnes intelligunt Deum. Secunda via est ex ratione causae efficientis. Invenimus enim in istis sensibilibus esse ordinem causarum efficientium, nec tamen invenitur, nec est possibile, quod aliquid sit causa efficiens sui ipsius; quia sic esset prius seipso, quod est impossibile. Non autem est possibile quod in causis efficientibus procedatur in infinitum. Quia in omnibus causis efficientibus ordinatis, primum est causa medii, et medium est causa ultimi, sive media sint plura sive unum tantum, remota autem causa, removetur effectus, ergo, si non fuerit primum in causis efficientibus, non erit ultimum nec medium. Sed si procedatur in infinitum in causis efficientibus, non erit prima causa efficiens, et sic non erit nec effectus ultimus, nec causae efficientes mediae, quod patet esse falsum. Ergo est necesse ponere aliquam causam efficientem primam, quam omnes Deum nominant. Tertia via est sumpta ex possibili et necessario, quae talis est. Invenimus enim in rebus quaedam quae sunt possibilia esse et non esse, cum quaedam inveniantur generari et corrumpi, et per consequens possibilia esse et non esse. Impossibile est autem omnia quae sunt, talia esse, quia quod possibile est non esse, quandoque non est. Si igitur omnia sunt possibilia non esse, aliquando nihil fuit in rebus. Sed si hoc est verum, etiam nunc nihil esset, quia quod non est, non incipit esse nisi per aliquid quod est; si igitur nihil fuit ens, impossibile fuit quod aliquid inciperet esse, et sic modo nihil esset, quod patet esse falsum. Non ergo omnia entia sunt possibilia, sed oportet aliquid esse necessarium in rebus. Omne autem necessarium vel habet causam suae necessitatis aliunde, vel non habet. Non est autem possibile quod procedatur in infinitum in necessariis quae habent causam suae necessitatis, sicut nec in causis efficientibus, ut probatum est. Ergo necesse est ponere aliquid quod sit per se necessarium, non habens causam necessitatis aliunde, sed quod est causa necessitatis aliis, quod omnes dicunt Deum. Quarta via sumitur ex gradibus qui in rebus inveniuntur. Invenitur enim in rebus aliquid magis et minus bonum, et verum, et nobile, et sic de aliis huiusmodi. Sed magis et minus dicuntur de diversis secundum quod appropinquant diversimode ad aliquid quod maxime est, sicut magis calidum est, quod magis appropinquat maxime calido. Est igitur aliquid quod est verissimum, et optimum, et nobilissimum, et per consequens maxime ens, nam quae sunt maxime vera, sunt maxime entia, ut dicitur II Metaphys. Quod autem dicitur maxime tale in aliquo genere, est causa omnium quae sunt illius generis, sicut ignis, qui est maxime calidus, est causa omnium calidorum, ut in eodem libro dicitur. Ergo est aliquid quod omnibus entibus est causa esse, et bonitatis, et cuiuslibet perfectionis, et hoc dicimus Deum. Quinta via sumitur ex gubernatione rerum. Videmus enim quod aliqua quae cognitione carent, scilicet corpora naturalia, operantur propter finem, quod apparet ex hoc quod semper aut frequentius eodem modo operantur, ut consequantur id quod est optimum; unde patet quod non a casu, sed ex intentione perveniunt ad finem. Ea autem quae non habent cognitionem, non tendunt in finem nisi directa ab aliquo cognoscente et intelligente, sicut sagitta a sagittante. Ergo est aliquid intelligens, a quo omnes res naturales ordinantur ad finem, et hoc dicimus Deum.
""".split()
c = Counter(latin_text)
c.most_common(15)
# #### Shopping cart
Counter('butter chocolate chocolate milk milk chocolate butter'.split())
# +
# merging carts can be useful
Counter('butter milk milk chocolate butter'.split()) + Counter('chocolate chocolate chocolate'.split())
# -
# http://www.java2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm ???
# ### Multidict with copies
# #### When we want to store multiple hashable elements with a mutable part.
...
| multiset_lightning_talk.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import nivapy3 as nivapy
import pandas as pd
# # Update ICPW "trends" stations for Norway
#
# Norwegian ICPW sites are automatically updates as data are approved by the lab. However, for the "trends" work, we created a separate set of 83 Norwegian sites, which must be updated manually. This notebook exports the relevant data to CSV.
eng = nivapy.da.connect()
# Period of interest
st_dt = '2017-01-01'
end_dt = '2019-12-31'
# +
# Read station details for "trends" sites
stn_df = pd.read_excel(r"../../../all_icpw_sites_may_2020.xlsx", sheet_name="all_icpw_stns")
nor_df = stn_df.query("country == 'Norway'")
# Get the original station codes by removing "Tr18_NO_"
code_list = list(nor_df['station_code'].str[8:])
print(len(nor_df))
nor_df.head()
# -
# Get all RESA stations
resa_df = nivapy.da.select_resa_stations(eng)
resa_df = resa_df.query("station_code in @code_list")
print(len(resa_df))
resa_df.head()
# Get all available parameters
par_df = nivapy.da.select_resa_station_parameters(resa_df, st_dt, end_dt, eng)
par_df.head()
# Get chemistry data
wc_df, dup_df = nivapy.da.select_resa_water_chemistry(resa_df, par_df, st_dt, end_dt, eng, drop_dups=True, lod_flags=True)
wc_df.head()
# +
# Filter data
# Just surface samples
wc_df = wc_df.query("(depth1 < 1) and (depth2 < 1)")
# Ignore mixed samples
wc_df = wc_df.query("depth1 == depth2")
# If still duplicates, choose shallowest
wc_df.sort_values(['depth1'], inplace=True)
wc_df.drop_duplicates(['station_code', 'sample_date'], keep='first', inplace=True)
# -
# Tidy
wc_df['code'] = "Tr18_NO_" + wc_df['station_code']
wc_df.drop(['station_id', 'station_code', 'station_name', 'depth1', 'depth2'], axis='columns', inplace=True)
wc_df.to_csv('out.csv')
| extract_norway_icpw_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Detail about Legendre Polynomial Smoothing
# ========
#
# For the curious who have good mathematics, basic information is supplied here, but this is not needed for the other sections. It is a variation on the previous linear regression where we use the same linear functional form
# $$f(x) ~=~ \sum_{i=0}^{I} a_i L_i(x)$$
# where $L_i(x)$ is the $i$-th Legendre polynomial (which is an $i$-order polynomial).
# Legendre polynomials are used instead of simple polynomials since they're a better basis set: simple powers have extreme values, and interact badly.
#
# Moreover, to fit $f(x)$ we minimise the augmented squared error
# $$\frac{1}{\sigma^2}\sum_{i=1}^N (y_i-f(x_i))^2 + \lambda {\large\int}_x (f''(x))^2 \mbox{d}x$$
# where the second term involves the second derivative of $f()$ and induces *smoothness* in the resulting polynomial because it represents the mean square curvature.
# Bayesian theory is used to set the smoothing hyperparameter $\lambda$. The second term says, "don't let the function $f()$ change too quickly." The smoothing integral evaluates to be a sparse quadratic form in the parameters $(a_1,...,a_I)$.
#
# Discussion
# ------
#
# Now is this a good general purpose fitting routine in 2-D? Certainly not always. In fact, its not even clear that such a thing as a "general purpose routine" exists. Consider the following scenarios:
# * you're modelling exchange rate data at 5-minute intervals which can have wild changes;
# * you're modelling a fractal function, which means no matter what scale you fit, it seems somewhat the same,
# * you're modelling an industrial process known to undergo "phase changes" at different inputs, so occasional stark changes are expects.
#
# Moreover, the celibrated ["no free lunch theorem"](https://en.wikipedia.org/wiki/No_free_lunch_theorem) (NFLT) says, roughly,
# > if an algorithm performs well on a certain class of problems then it necessarily pays for that with degraded performance on the set of all remaining problems
#
# Clearly, our "smoothing" is not intrinsically useful in the contexts above, and indeed there most be other contexts where it cannot do as well too, by the NFLT. You may see in some examples that does seem to make things a bit too smooth, for instance to try and smooth out peaks.
#
# Initialise
# ---------
#
# First we reinitiallise things again.
# +
# put the pieces together, sin(x) + noise + basic regression
import sys
import os
import numpy
sys.path.append(os.getcwd())
import regressiondemo as rd
# %matplotlib inline
import matplotlib.pyplot as pl
rd.setSigma(0.2)
# don't make points more than 100 as demo is O(points^3)
points = 30
x = rd.makeX(points)
# xts and yts store the "true" function for the purposes of plotting
# these have to be high frequency to make the resultant plot look
# like a smooth curve
xts = rd.makeX(200,uniform=True)
# -
# The Basis Functions
# ----------
#
# For those who want a glimpse of the gory details, read on.
#
# We plot below a few basis functions. The first plot is the Legendre polynomials, which are rather like sin curves but modified so they fit into a finite range of [-1,1]. The final plot is the actual polynomial bases used by the regression routine. Note the higher order bases have been scaled to be so small that they look flat, but they are actually very curvy, as in the second plot. The effect of scaling is that in the final polynomials have no sharp deviations of curvature left.
# +
# CHOOSE: degree of Legendre poly
legdegree = 20
legpoly = rd.LegPoly(legdegree)
legpoly.setX(xts)
vanders = legpoly.vander
legpoly.setX(x)
vander = legpoly.vander
plist = [1,2,5,10,legdegree-1]
# we will plot the first few Legendre polys
for i in plist:
pl.plot(xts,vanders[:,i-1],label='Legendre_'+str(i))
pl.legend(bbox_to_anchor=(1.4, 1.1))
pl.suptitle('Legendre Polynomials of different order')
pl.show()
Vu, Vs, Vv = numpy.linalg.svd(legpoly.smooth)
# we will plot the first few Smoothed Legendre polys before scaling
for i in plist:
pl.plot(xts,numpy.dot(vanders,Vv[:,legdegree-i+1]),label='UnscSmthLgdre_'+str(i))
pl.legend(bbox_to_anchor=(1.6, 1.1))
pl.suptitle('Smoothed Legendre Polynomials before scaling')
pl.show()
# now first few Smoothed Legendre polys with scaling
for i in plist:
pl.plot(xts,numpy.dot(vanders,Vv[:,legdegree-i+1])/numpy.sqrt(Vs[legdegree-i+1]),label='SmoothLgdre_'+str(i))
pl.legend(bbox_to_anchor=(1.5, 1.1))
pl.suptitle('Smoothed Legendre Polynomials')
pl.show()
# now plot random functions
for i in range(10):
uu = numpy.random.normal(0,1,legdegree+1)
# uu[legdegree] = 0
pl.plot(xts,numpy.dot(vanders,numpy.dot(Vv,uu/numpy.sqrt(Vs))))
pl.suptitle('Random Smoothed Legendre Polys (centered, mean slope 0)')
pl.show()
# -
| RegressionActivity/X.RegressionLegendreDetail.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Major version: the feature setup
# Minor version: model hypertunning
VERSION = 'v1.1'
major_VERSION = VERSION.split('.')[0]+'.0'
# # Model Details
# ## Features:
# - One hot encoded **day of week** and **month** (not year)
# - Weather feature (OHE):
# - Icons (cloudy, partial cloudy, ...)
# - Precipitates Type (None, Snow, Sleet, Rain)
# - Station info:
# - community area (OHE)
# - capacity
# - long, lat
#
# ## Target
# - Log scale/normal scale
#
# # Work Flow
# ## Training Preprocessing
# - Merge station community area (Join tables)
# - Drop id after merging
# - Add weather info (temp_high/low, and OHE ICONs and Precipitates Types)
# - Convert to numpy matrix
#
# ## Pipeline
# - OHE on date time (Remember column indices)
# - Scaling for `year, lon_ave, lat_ave, dp_max, temp_high, temp_low` (`MinMaxScaler`)
# - Regressor()
#
# ## Test Preprocessing
# - Start with Pandas template (station_id, lon_ave, lat_ave, dp_max, OHE community area)
# - Add weather info (temp_high/low, and OHE ICONs and Precipitates Types)
# - Convert to numpy matrix
#
# ## Post prediction
# - Rescale if trained on log
# - Hard cap negative (activation function)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import pickle
# ## Load data
INFO_verbose = False
# %%time
st_template = pd.read_pickle('../data/model_v1.0_template.pk')
if INFO_verbose:
st_template.info()
# +
# %%time
## load preprocessed data
if os.path.exists('../data/divvy_data_model_'+major_VERSION+'.pk'):
print("Loading from previous pickle file.")
data = pd.read_pickle('../data/divvy_data_model_'+major_VERSION+'.pk')
else:
print("Create data set for this model... ")
data_lst = []
for year in [2013, 2014, 2015, 2016, 2017, 2018]:
dt_tmp = pd.read_feather('../data/Final_Divvy_data_'+str(year)+'.feather')
data_lst.append(dt_tmp)
data = pd.concat(data_lst, ignore_index=True)
data.to_pickle('../data/divvy_data_model_'+major_VERSION+'.pk')
print("Data saved to pickle file")
# -
if INFO_verbose:
data.info()
# ## Get target value
target_in = data.total_in
target_out = data.total_out
target_in_log = np.log(target_in+1)
# ## Prepare features
# Prescreening for useful features
feats = pd.merge(data[['station_id', 'month', 'dayofweek', 'year',
'icon_clear-day', 'icon_cloudy', 'icon_partly-cloudy-day',
'icon_rain', 'icon_sleet', 'icon_snow',
'precipType_None', 'precipType_rain',
'precipType_sleet', 'precipType_snow',
'temperatureHigh', 'temperatureLow'
]], st_template,
left_on='station_id', right_on='id').drop(['station_id', 'id'], axis=1)
# ## Reordering dataframe
# Reordering
cols = []
cols.extend(feats.columns[15:])
cols.extend(feats.columns[2:15])
cols.extend(feats.columns[:2])
feats_ro = feats[cols]
if INFO_verbose:
feats_ro.info()
feats_ro = feats_ro.fillna(0)
# ## Get/Check indices for numerical columns
num_col_in = [0, 1, 2, 50, 61, 62]
feats_ro.iloc[:, num_col_in].info()
# ## One hot encoding date
feats_fnl = pd.get_dummies(feats_ro, columns=['month', 'dayofweek'])
# ## Save model details into txt
# +
import io
buffer = io.StringIO()
feats_fnl.info(buf=buffer)
s = buffer.getvalue()
with open("../model_features_details/features_"+major_VERSION+'.txt', "w", encoding="utf-8") as f:
f.write(s)
# -
# ## Convert to numpy matrix
features_ = feats_fnl.to_numpy()
features_.shape
# ## Building pipelines
# +
import sklearn
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import Ridge
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
# -
sklearn.__version__
# +
# %%time
model = Pipeline([
('mms', MinMaxScaler(feature_range=(0, 1))),
('rf', RandomForestRegressor(n_estimators=10, min_samples_split=5))
])
# Train Test split
X_train, X_test, y_train, y_test = train_test_split(
features_,
target_out,
test_size=0.2,
random_state=42)
# -
# %%time
model.fit(X_train, y_train)
# +
# Save model, features and targets
Model_name = 'random_forest_'
with open('../model_data/'+Model_name+'model_'+VERSION+'.pk', 'wb') as p:
pickle.dump(model, p)
with open('../model_data/'+Model_name+'testfeature_'+VERSION+'.pk', 'wb') as ptf:
pickle.dump(X_test, ptf)
with open('../model_data/'+Model_name+'testarget_'+VERSION+'.pk', 'wb') as ptg:
pickle.dump(y_test, ptg)
# -
model.score(X_test, y_test)
# First check
y_pred = model.predict(X_test)
plt.plot(y_test, y_pred, 'o', alpha=0.1)
# Deeper Look
plt.plot(y_test, y_pred, 'o', alpha=0.01)
plt.xlim(0, 300)
plt.ylim(0, 300)
# ## Grid Search
# +
# %%time
# Train Test split
X_train, X_test, y_train, y_test = train_test_split(
features_,
target_out,
test_size=0.2,
random_state=42)
print("Train/Test splitted...")
model_gs = GridSearchCV(
model,
{'rf__n_estimators': [20, 50], 'rf__max_depth':[5, 10, 20]},
cv=5,
n_jobs=4
)
model_gs.fit(X_train, y_train)
print("Best params: ", model_gs.best_params_)
# -
y_pred = model_gs.predict(X_test)
plt.plot(y_test, y_pred, 'o', alpha=0.1)
# +
# Save model, features and targets
Model_name = 'random_forest_'
with open('../model_data/'+Model_name+'model_'+VERSION+'.pk', 'wb') as p:
pickle.dump(model_gs, p)
with open('../model_data/'+Model_name+'testfeature_'+VERSION+'.pk', 'wb') as ptf:
pickle.dump(X_test, ptf)
with open('../model_data/'+Model_name+'testraget_'+VERSION+'.pk', 'wb') as ptg:
pickle.dump(y_test, ptg)
# -
# ## Reload model
# +
# %%time
# Load previous data
Model_name = 'random_forest_'
with open('../model_data/'+Model_name+'model_'+VERSION+'.pk', 'rb') as pp:
loaded_model = pickle.load(pp)
with open('../model_data/'+Model_name+'testfeature_'+VERSION+'.pk', 'rb') as ptfl:
loaded_test_feature = pickle.load(ptfl)
with open('../model_data/'+Model_name+'testarget_'+VERSION+'.pk', 'rb') as ptgl:
loaded_test_target = pickle.load(ptgl)
# -
loaded_model.score(loaded_test_feature, loaded_test_target)
| machine_learning_models/ML_v1.0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Library
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# Machine Learning Models
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from lightgbm import LGBMClassifier
# Resampling
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler
# Preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
# Feature selection
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
# Tuning
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from skopt import BayesSearchCV
from skopt.space import Real, Categorical, Integer
# Evaluation
from sklearn.metrics import f1_score, accuracy_score,confusion_matrix,classification_report
# Deep Learning Model
from keras.models import Sequential
from keras.layers import Dense,Dropout
from keras.utils import to_categorical
from keras.optimizers import Adam
from keras.wrappers.scikit_learn import KerasClassifier
plt.style.use('seaborn')
# -
# ## 1. Read Data
data = pd.read_csv('OnlineNewsPopularity.csv')
data.columns=data.columns.str.replace(' ','')
bins = [0, 1400, 10000, np.inf]
data['category'] = pd.cut(data['shares'], bins,
labels = ['Unpopular','Popular','Extremly_popular'])
data.head()
# ## 2. Modelling
# <a href=#p1>2.1 Baseline</a>
#
# <a href=#p2>2.2 Feature Selection</a>
#
# <a href=#p3>2.3 Feature Selection + UnderSampling</a>
#
# <a href=#p4>2.4 Feature Selection + OverSampling</a>
#
# <a href=#p5>2.5 Feature Selection + Model Tuning</a>
# #### Split train and test set
X = data.drop(['url','timedelta','category','shares'],axis=1)
y = data['category']
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.2, random_state=2021)
# #### Scaling data
numerical_features = ['n_tokens_title', 'n_tokens_content',
'n_unique_tokens', 'n_non_stop_words', 'n_non_stop_unique_tokens',
'num_hrefs', 'num_self_hrefs', 'num_imgs', 'num_videos',
'average_token_length', 'num_keywords','kw_min_min', 'kw_max_min', 'kw_avg_min',
'kw_min_max', 'kw_max_max', 'kw_avg_max', 'kw_min_avg', 'kw_max_avg',
'kw_avg_avg', 'self_reference_min_shares', 'self_reference_max_shares',
'self_reference_avg_sharess','LDA_00',
'LDA_01', 'LDA_02', 'LDA_03', 'LDA_04', 'global_subjectivity',
'global_sentiment_polarity', 'global_rate_positive_words',
'global_rate_negative_words', 'rate_positive_words',
'rate_negative_words', 'avg_positive_polarity', 'min_positive_polarity',
'max_positive_polarity', 'avg_negative_polarity',
'min_negative_polarity', 'max_negative_polarity', 'title_subjectivity',
'title_sentiment_polarity', 'abs_title_subjectivity',
'abs_title_sentiment_polarity']
# +
X_train_norm = X_train.copy()
X_test_norm = X_test.copy()
scaler = StandardScaler()
scaler.fit(X_train[numerical_features])
X_train_norm[numerical_features] = scaler.transform(X_train[numerical_features])
X_test_norm[numerical_features] = scaler.transform(X_test_norm[numerical_features])
# -
# <a name='p1' /></a>
# ## 2.1 Baseline
# #### NB & XGBoost
def model_fit(X_train_df,y_train_df,X_test_df):
models = {
"GaussianNB": GaussianNB(),
"XGBooostClassifier": XGBClassifier()
}
prediction = dict()
score_map = {}
f1_map = {}
for model_name in models:
model = models[model_name]
model.fit(X_train_df, y_train_df)
prediction[model_name] = model.predict(X_test_df)
score = accuracy_score(y_test, prediction[model_name])
f1 = f1_score(y_test, prediction[model_name], average='macro')
score_map[model_name] = score
f1_map[model_name] = f1
print("{}{}{}".format(model_name, " accuracy: ", score))
print("{}{}{}".format(model_name, " f1 score: ", f1))
model_fit(X_train_norm,y_train,X_test_norm)
# #### Logistic
logistic = LogisticRegression(random_state=2021,class_weight='balanced',solver='liblinear')
logistic_result = logistic.fit(X_train_norm, y_train)
y_pred = logistic_result.predict(X_test_norm)
print(classification_report(y_test, y_pred))
print("accuracy:", logistic_result.score(X_test_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred, average='macro'))
# #### Random Forest
# +
rf= RandomForestClassifier(random_state=0,class_weight='balanced')
rf_result = rf.fit(X_train_norm, y_train)
y_pred1 = rf_result.predict(X_test_norm)
print("accuracy:",rf_result.score(X_test_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred1, average='macro'))
# -
# #### AdaBoost
ada_model = AdaBoostClassifier(random_state=2021)
ada_result = ada_model.fit(X_train_norm, y_train)
y_pred = ada_result.predict(X_test_norm)
print("accuracy:", ada_result.score(X_test_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred, average='macro'))
# <a name='p2' /></a>
# ## 2.2 Feature Selection
# #### Use multiple models to select key features by importance
# +
models = {
"Logistic": LogisticRegression(solver='liblinear'),
"RandomForestClassifier":RandomForestClassifier(),
"SupportVectorClassifier":SVC(kernel='linear'),
#"GaussianNB": GaussianNB(),
"XGBooostClassifier": XGBClassifier(),
"AdaBoostClassifier":AdaBoostClassifier(),
"LightGBMClassifier": LGBMClassifier()
}
feature_selection = {}
feature_names = np.array(X_train_norm.columns)
for model_name in models:
model = models[model_name]
selector = SelectFromModel(estimator=model,threshold=-1, max_features=25).fit(X_train_norm, y_train)
feature_selection[model_name] = feature_names[selector.get_support()]
print("{}{}{}".format(model_name, ": ", 'done'))
# -
# #### select features using model votes >=3
# +
feature_count = dict(zip(feature_names,np.zeros(len(feature_names))))
for i in feature_names:
for j in feature_selection:
if i in feature_selection[j]:
feature_count[i]+=1
feature_count = pd.Series(feature_count).sort_values(ascending=False)
# select by feature count
features_selected = feature_count[feature_count>=3].index.tolist()
features_selected
# -
features_selected=['LDA_00',
'n_unique_tokens',
'kw_avg_avg',
'kw_max_avg',
'n_non_stop_unique_tokens',
'kw_avg_min',
'self_reference_min_shares',
'global_subjectivity',
'data_channel_is_tech',
'kw_avg_max',
'kw_min_avg',
'LDA_02',
'data_channel_is_socmed',
'average_token_length',
'is_weekend',
'kw_min_min',
'LDA_01',
'self_reference_avg_sharess',
'LDA_03',
'data_channel_is_entertainment',
'n_non_stop_words',
'kw_max_min',
'num_imgs',
'data_channel_is_lifestyle',
'data_channel_is_world',
'data_channel_is_bus',
'num_hrefs',
'weekday_is_saturday']
# #### Scaling data
numerical_features_selected = [i for i in numerical_features if i in features_selected]
X_train_selected = X_train[features_selected]
X_test_selected = X_test[features_selected]
# +
X_train_selected_norm = X_train_selected.copy()
X_test_selected_norm = X_test_selected.copy()
scaler = StandardScaler()
scaler.fit(X_train[numerical_features_selected])
X_train_selected_norm[numerical_features_selected] = scaler.transform(X_train_selected_norm[numerical_features_selected])
X_test_selected_norm[numerical_features_selected] = scaler.transform(X_test_selected_norm[numerical_features_selected])
# -
# #### NB & XGBoost
model_fit(X_train_selected_norm,y_train,X_test_selected_norm)
# #### Logistic
logistic = LogisticRegression(random_state=2021,class_weight='balanced',solver='liblinear')
logistic_result_new = logistic.fit(X_train_selected_norm, y_train)
y_pred = logistic_result_new.predict(X_test_selected_norm)
print("Accuracy:",logistic_result_new.score(X_test_selected_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred, average='macro'))
# #### Random Forest
rf= RandomForestClassifier(random_state=0,class_weight='balanced')
rf_result_new = rf.fit(X_train_selected_norm, y_train)
y_pred = rf_result_new.predict(X_test_selected_norm)
print("Accuracy:",rf_result_new.score(X_test_selected_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred, average='macro'))
# #### AdaBoost
ada_model = AdaBoostClassifier(random_state=2021)
ada_result = ada_model.fit(X_test_selected_norm, y_train)
y_pred = ada_result.predict(X_test_selected_norm)
print("accuracy:", ada_result.score(X_test_selected_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred, average='macro'))
# <a name='p3' /></a>
# ## 2.3 Feature Selection + UnderSampling
rus = RandomUnderSampler()
X_train_selected_norm_RUS, y_train_RUS = rus.fit_resample(X_train_selected_norm, y_train)
# #### NB & XGBoost
model_fit(X_train_selected_norm_RUS,y_train_RUS,X_test_selected_norm)
# #### Logistic
logistic = LogisticRegression(random_state=2021,class_weight='balanced',solver='liblinear')
logistic_result_new = logistic.fit(X_train_selected_norm_RUS, y_train_RUS)
y_pred_new = logistic_result_new.predict(X_test_selected_norm)
print("Accuracy:",logistic_result_new.score(X_test_selected_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred_new, average='macro'))
# #### Random Forest
rf= RandomForestClassifier(random_state=2021,class_weight='balanced')
rf_result_new = rf.fit(X_train_selected_norm_RUS, y_train_RUS)
y_pred_new = rf_result_new.predict(X_test_selected_norm)
print("Accuracy:",rf_result_new.score(X_test_selected_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred_new, average='macro'))
# #### AdaBoost
ada_model = AdaBoostClassifier(random_state=2021)
ada_result = ada_model.fit(X_train_selected_norm_RUS, y_train_RUS)
y_pred = ada_result.predict(X_test_selected_norm)
print("accuracy:", ada_result.score(X_test_selected_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred, average='macro'))
# <a name='p4' /></a>
# ## 2.4 Feature Selection + OverSampling
smote = SMOTE()
X_train_selected_norm_smote, y_train_smote = rus.fit_resample(X_train_selected_norm, y_train)
# #### NB & XGBoost
model_fit(X_train_selected_norm_smote,y_train_smote,X_test_selected_norm)
# #### Logistic
logistic = LogisticRegression(random_state=2021,class_weight='balanced',solver='liblinear')
logistic_result_new = logistic.fit(X_train_selected_norm_smote, y_train_smote)
y_pred_new = logistic_result_new.predict(X_test_selected_norm)
print("Accuracy:",logistic_result_new.score(X_test_selected_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred_new, average='macro'))
# #### Random Forest
rf= RandomForestClassifier(random_state=2021,class_weight='balanced')
rf_result_new = rf.fit(X_train_selected_norm_smote, y_train_smote)
y_pred_new = rf_result_new.predict((X_test_selected_norm)
print("Accuracy:",rf_result_new.score(X_test_selected_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred, average='macro'))
# #### AdaBoost
ada_model = AdaBoostClassifier(random_state=2021)
ada_result = ada_model.fit(X_train_selected_norm_smote, y_train_smote)
y_pred = ada_result.predict(X_test_selected_norm)
print("accuracy:", ada_result.score(X_test_selected_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred, average='macro'))
# <a name='p5' /></a>
# ## 2.5 Feature Selection + Model Tuning
# ### Bayes Tuning
# +
nb_classifier = GaussianNB()
params_NB = {'var_smoothing': np.logspace(0,-9, num=100)}
gs_NB = GridSearchCV(estimator=nb_classifier,
param_grid=params_NB,
cv=5,
n_jobs = -1,
scoring='accuracy')
gs_NB.fit(X_train_selected_norm, y_train)
gs_NB.best_params_
# -
prediction = gs_NB.predict(X_test_selected_norm)
score = accuracy_score(y_test, prediction)
f1 = f1_score(y_test, prediction, average='macro')
print("{}{}{}".format('nb_classifier', " accuracy: ", score))
print("{}{}{}".format('nb_classifier', " f1 score: ", f1))
# ### XGBoost Tuning
# +
xgb_classifier = XGBClassifier()
params_xgb = {
'learning_rate': [0.01, 0.1],
'max_depth': [2,5,7],
'n_estimators': [50, 100, 200]
}
gs_xgb = GridSearchCV(estimator=xgb_classifier,
param_grid=params_xgb,
cv=5,
n_jobs = -1,
verbose = 1,
scoring='accuracy')
gs_xgb.fit(X_train_selected_norm, y_train)
gs_xgb.best_params_
# -
prediction = gs_xgb.predict(X_test_selected_norm)
score = accuracy_score(y_test, prediction)
f1 = f1_score(y_test, prediction, average='macro')
print("{}{}{}".format('xgb_classifier', " accuracy: ", score))
print("{}{}{}".format('xgb_classifier', " f1 score: ", f1))
# ### Logistic Tuning
def clf_grid_cv(clf, search_spaces):
pipe = Pipeline([('scl', StandardScaler()),
('clf', clf)])
grid_cv = GridSearchCV(pipe,
search_spaces,
cv=3,
n_jobs = -1,
refit = True
)
grid_cv.fit(X_train_selected_norm, y_train)
print("val. score: %s" % grid_cv.best_score_)
print("test score: %s" % grid_cv.score(X_test_selected_norm, y_test))
print("best params: %s" % str(grid_cv.best_params_))
return grid_cv
search_spaces_logi = {
'clf__penalty' : ['l1', 'l2'],
'clf__C' : [100, 10, 1.0, 0.1, 0.01]
}
clf_result = clf_grid_cv(LogisticRegression(random_state=2021,solver='liblinear'),search_spaces_logi)
logistic = LogisticRegression(random_state=2021,solver='liblinear',penalty= 'l2',C=10)
logistic.fit(X_train_selected_norm, y_train)
y_pred_new = logistic.predict(X_test_selected_norm)
print("Accuracy",logistic.score(X_test_selected_norm, y_test))
print("F1-score:", f1_score(y_pred_new,y_test,average='macro'))
# ### Random Forest Tuning
search_spaces_rf = {
'clf__max_depth': [10,20,50,80,100],
'clf__min_samples_leaf': [3, 4, 5],
'clf__n_estimators': [100, 200, 300, 1000]
}
rf_result = clf_grid_cv(RandomForestClassifier(random_state=2021),search_spaces_rf)
rf = RandomForestClassifier(random_state=2021,max_depth= 50, min_samples_leaf= 5, n_estimators= 1000)
rf.fit(X_train_selected_norm, y_train)
y_pred_new = rf.predict(X_test_selected_norm)
print("accuracy",rf.score(X_test_selected_norm, y_test))
print("F1-score:", f1_score(y_pred_new,y_test,average='macro'))
# ### AdaBoost Tuning
# +
ada = AdaBoostClassifier(random_state=2021)
search_spaces_ada = {
'clf__n_estimators':[10,50,100,500,1000],
'clf__learning_rate':[0.01,0.1,1,10,100],
}
clf = GridSearchCV(ada, search_spaces_ada,scoring = 'roc_auc',cv=3,
n_jobs = -1,refit = True)
# -
clf_result = clf_grid_cv(AdaBoostClassifier(random_state=2021),search_spaces_ada)
clf_result.best_params_
ada_model = AdaBoostClassifier(random_state=2021, learning_rate=0.1, n_estimators=500)
ada_result = ada_model.fit(X_train_selected_norm, y_train)
y_pred = ada_result.predict(X_test_selected_norm)
print("accuracy:", ada_result.score(X_test_selected_norm,y_test))
print("F1-score:", f1_score(y_test, y_pred, average='macro'))
# ## Insights
rf = RandomForestClassifier(random_state=2021,max_depth= 50, min_samples_leaf= 5, n_estimators= 1000)
rf.fit(X_train_selected_norm, y_train)
y_pred_new = rf.predict(X_test_selected_norm)
print("accuracy",rf.score(X_test_selected_norm, y_test))
print("F1-score:", f1_score(y_pred_new,y_test,average='macro'))
# ### Permutation Importance
# +
import eli5
from eli5.sklearn import PermutationImportance
perm = PermutationImportance(rf).fit(X_test_selected_norm, y_test)
eli5.show_weights(perm, feature_names = X_test_selected_norm.columns.tolist())
# -
# ### Partial Dependence Plot
# +
from matplotlib import pyplot as plt
from pdpbox import pdp, get_dataset, info_plots
# Create the data that we will plot
pdp_goals = pdp.pdp_isolate(model=rf, dataset=X_test_selected_norm, model_features=X_test_selected_norm.columns.tolist(), feature='is_weekend')
# plot it
pdp.pdp_plot(pdp_goals, 'is_weekend')
plt.show()
# +
# Create the data that we will plot
pdp_goals = pdp.pdp_isolate(model=rf, dataset=X_test_selected_norm, model_features=X_test_selected_norm.columns.tolist(), feature='kw_avg_avg')
# plot it
pdp.pdp_plot(pdp_goals, 'kw_avg_avg')
plt.show()
# +
# Create the data that we will plot
pdp_goals = pdp.pdp_isolate(model=rf, dataset=X_test_selected_norm, model_features=X_test_selected_norm.columns.tolist(), feature='data_channel_is_entertainment')
# plot it
pdp.pdp_plot(pdp_goals, 'data_channel_is_entertainment')
plt.show()
| Project7-Predict-Online-News-Popularity/2-ML-Modelling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 69} id="eYTYJD4nA-O1" outputId="f8c9ef25-96e6-420b-fa57-7a77b2134bc6"
import tensorflow as tf
# + id="tpkKSQXgA_hc"
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# mnist 데이터는 0~255 범위의 데이터
# 이를 0~1 사이로 맞춰주기 위해서 255로 나누는 것인데
# 딥러닝은 0~1 사이로 input 데이터의 범위를 해줘야 학습이 잘 됌
x_train, x_test = x_train / 255.0, x_test / 255.0
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="RaNOxLG-DaC7" outputId="4f3fa65f-6fec-439f-cd71-bbeb30b9291c"
x_train.shape
# + [markdown] id="VYpbFvjIGujw"
# ## 1. Keras API ( Sequential vs Functional)
# - Tensorflow에서 keras api를 활용하는 방법이 크게 두가지.
# - Sequential은 layer의 구조가 순차적으로 있을때 사용이 편리.
# - Functional API를 활용한다면 복잡한 구조를 설계할 수 있음.
# - 예를 들어서 input, output을 여러개 관리 하거나, layer 사이사이에 연결을 해줄 때.
# + id="gZtdG_i5C3rK"
model = tf.keras.models.Sequential([
tf.keras.layers.Reshape((28, 28, 1)),
tf.keras.layers.Conv2D(16, 3, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/", "height": 244} id="VSny6Bk8DTh9" outputId="ee857029-09a4-4bc5-a2ab-64b752cf469a"
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
# + id="QMqJEvvdF2l5"
from tensorflow import keras
from tensorflow.keras import layers
# + colab={"base_uri": "https://localhost:8080/", "height": 366} id="Gha3xecYGmhh" outputId="58e5d54d-2d53-413a-8d7c-7e30e9028f80"
inputs = keras.Input(shape=(28, 28))
x = layers.Reshape((28, 28, 1))(inputs)
x = layers.Conv2D(16, 3, activation='relu')(x)
x = layers.Flatten()(x)
x = layers.Dense(128)(x)
x = layers.Dense(10, activation='softmax')(x)
outputs = x
model = keras.Model(inputs, outputs)
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 244} id="KJvvsHUbGpP2" outputId="7fa3077f-e8a1-4b57-a3f3-25694b6422ca"
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
# + [markdown] id="PF12IhZBH9Rj"
# # 2. Pooling의 사용
# + colab={"base_uri": "https://localhost:8080/", "height": 401} id="Xx0Cd7guDXHb" outputId="93c6c818-d890-42a6-dbb6-f3c2c87545bb"
inputs = keras.Input(shape=(28, 28))
x = layers.Reshape((28, 28, 1))(inputs)
x = layers.Conv2D(16, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Flatten()(x)
x = layers.Dense(128)(x)
x = layers.Dense(10, activation='softmax')(x)
outputs = x
model = keras.Model(inputs, outputs)
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 244} id="V9vtyx0ZF-XC" outputId="35ba6730-6b15-4930-9df9-5e8ce8f11eee"
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
# + [markdown] id="nKnMsu2MJAhP"
# # 3. 조금더 깊은 layer
# + colab={"base_uri": "https://localhost:8080/", "height": 470} id="cYUoM0Q2GVGC" outputId="c26200ff-2b2e-4bda-8a89-4131b3e8bb4c"
inputs = keras.Input(shape=(28, 28))
x = layers.Reshape((28, 28, 1))(inputs)
x = layers.Conv2D(16, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Flatten()(x)
x = layers.Dense(128)(x)
x = layers.Dense(10, activation='softmax')(x)
outputs = x
model = keras.Model(inputs, outputs)
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 244} id="ghrrE3O9I3Bb" outputId="4b06f7d3-2e3b-4297-df25-3f6a4fc944b2"
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
# + [markdown] id="i46Ut-e0JGlh"
# 결과
# Pooling layer는 이미지의 크기를 작게 만듦으로써 정보의 손실을 가져온다고 볼수도 있다. 하지만 Flatten layer가 있는 모델의 구조상 pooling layer를 통해서 전체 모델의 parameter 양은 되려 줄어든다.
# + [markdown] id="HZUgd0iKx6By"
# # 4. Padding의 유무
# + colab={"base_uri": "https://localhost:8080/", "height": 470} id="_ejUj_hrx7qh" outputId="40580492-86d7-4415-c955-e43f7d68382a"
inputs = keras.Input(shape=(28, 28))
x = layers.Reshape((28, 28, 1))(inputs)
x = layers.Conv2D(16, 3, activation='relu',padding="same")(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(32, 3, activation='relu',padding="same")(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Flatten()(x)
x = layers.Dense(128)(x)
x = layers.Dense(10, activation='softmax')(x)
outputs = x
model = keras.Model(inputs, outputs)
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 244} id="fmPyITpxx-ge" outputId="30c03229-5a28-430c-c6f5-e42c82475d48"
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
# + id="VsAZP6G6yDhq"
| AI_Class/025/CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <h1> Create TensorFlow wide-and-deep model </h1>
#
# This notebook illustrates:
# <ol>
# <li> Creating a model using the high-level Estimator API
# </ol>
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
# + language="bash"
# if ! gsutil ls | grep -q gs://${BUCKET}/; then
# gsutil mb -l ${REGION} gs://${BUCKET}
# fi
# -
# %bash
# ls *.csv
# <h2> Create TensorFlow model using TensorFlow's Estimator API </h2>
# <p>
# First, write an input_fn to read the data.
import shutil
import numpy as np
import tensorflow as tf
# +
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
TRAIN_STEPS = 1000
def read_dataset(prefix, pattern, batch_size=512):
# use prefix to create filename
filename = './{}*{}*'.format(prefix, pattern)
if prefix == 'train':
mode = tf.estimator.ModeKeys.TRAIN
num_epochs = None # indefinitely
else:
mode = tf.estimator.ModeKeys.EVAL
num_epochs = 1 # end-of-input after this
# the actual input function passed to TensorFlow
def _input_fn():
# could be a path to one file or a file pattern.
input_file_names = tf.train.match_filenames_once(filename)
filename_queue = tf.train.string_input_producer(
input_file_names, shuffle=True, num_epochs=num_epochs)
# read CSV
reader = tf.TextLineReader()
_, value = reader.read_up_to(filename_queue, num_records=batch_size)
if mode == tf.estimator.ModeKeys.TRAIN:
value = tf.train.shuffle_batch([value], batch_size, capacity=10*batch_size,
min_after_dequeue=batch_size, enqueue_many=True,
allow_smaller_final_batch=False)
value_column = tf.expand_dims(value, -1)
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
features.pop(KEY_COLUMN)
label = features.pop(LABEL_COLUMN)
return features, label
return _input_fn
# -
# Next, define the feature columns
def get_wide_deep():
# define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# sparse columns are wide
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, 3)
# continuous columns are deep
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# To predict with the TensorFlow model, we also need a serving input function. We will want all the inputs from our user.
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# Finally, train!
# +
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.learn.python.learn import learn_runner
PATTERN = "csv"
def train_and_evaluate(output_dir):
wide, deep = get_wide_deep()
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir=output_dir,
linear_feature_columns=wide,
dnn_feature_columns=deep,
dnn_hidden_units=[64, 32])
train_spec=tf.estimator.TrainSpec(
input_fn=read_dataset('train', PATTERN),
max_steps=TRAIN_STEPS)
exporter = tf.estimator.FinalExporter('exporter',serving_input_fn)
eval_spec=tf.estimator.EvalSpec(
input_fn=read_dataset('eval', PATTERN),
steps=None,
exporters=exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
shutil.rmtree('babyweight_trained', ignore_errors=True) # start fresh each time
train_and_evaluate('babyweight_trained')
# -
# When I ran it, the final lines of the output (above) were:
# <pre>
# INFO:tensorflow:Saving dict for global step 1000: average_loss = 1.176739, global_step = 1000, loss = 598.1424
# ...
# INFO:tensorflow:SavedModel written to: babyweight_trained/export/exporter/temp-1516668894/saved_model.pb
# [2018-01-23 00:54:55,246] {tf_logging.py:82} INFO - SavedModel written to: babyweight_trained/export/exporter/temp-1516668894/saved_model.pb
# </pre>
# The Servo directory contains the final model and the final RMSE (the average_loss) is 1.1767
# <h2> Monitor and experiment with training </h2>
from google.datalab.ml import TensorBoard
TensorBoard().start('./babyweight_trained')
# In TensorBoard, look at the learned embeddings. Are they getting clustered? How about the weights for the hidden layers? What if you run this longer? What happens if you change the batchsize?
TensorBoard.stop(5539)
# Copyright 2017-2018 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| courses/machine_learning/deepdive/07_structured/3_tensorflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
"""
Define a function square_root that receives a number as a param and returns the square root of that number. Determine the square root of 6.25.
"""
# + tags=[]
def square_root(number):
return number**1/2
square_root(6.25)
| return square root.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.12 ('apple_tensorflow')
# language: python
# name: python3
# ---
import os
import shutil
# +
"""
If your folders look like
- NIH_FOLDER
- No Finding
- Finding
- etc.
- Chexpert_FOLDER
- No Finding
- Finding
- etc.
"""
PATH_TO_NIH_FOLDERS = ""
PATH_TO_CHEXPERT_FOLDERS = ""
PATH_TO_FINAL_FOLDERS = ""
if not os.path.exists(PATH_TO_FINAL_FOLDERS):
os.mkdir(PATH_TO_FINAL_FOLDERS)
# -
#recursively merge two folders including subfolders
def mergefolders(root_src_dir, root_dst_dir):
for src_dir, dirs, files in os.walk(root_src_dir):
dst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
if os.path.exists(dst_file):
os.remove(dst_file)
shutil.copy(src_file, dst_dir)
# +
# Merge No Finding and No finding
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "No Finding")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "No Finding"))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'No Finding'), os.path.join(PATH_TO_FINAL_FOLDERS, 'No Finding'))
mergefolders(os.path.join(PATH_TO_CHEXPERT_FOLDERS, 'No Finding'), os.path.join(PATH_TO_FINAL_FOLDERS, 'No Finding'))
# +
# Merge Cardiomegaly Enlarged Cardiomediastinal and Cardiomegaly
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Cardiomegaly")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Cardiomegaly"))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Cardiomegaly'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Cardiomegaly'))
mergefolders(os.path.join(PATH_TO_CHEXPERT_FOLDERS, 'Enlarged Cardiomediastinum'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Cardiomegaly'))
# +
# Merge Edema, Pneumothorax, Atelectasis
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Edema")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Edema"))
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Pneumothorax")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Pneumothorax"))
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Atelectasis")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Atelectasis"))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Edema'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Edema'))
mergefolders(os.path.join(PATH_TO_CHEXPERT_FOLDERS, 'Edema'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Edema'))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Pneumothorax'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Pneumothorax'))
mergefolders(os.path.join(PATH_TO_CHEXPERT_FOLDERS, 'Pneumothorax'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Pneumothorax'))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Atelectasis'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Atelectasis'))
mergefolders(os.path.join(PATH_TO_CHEXPERT_FOLDERS, 'Atelectasis'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Atelectasis'))
# +
# Merge Consolidation and Pneumonia into Pneumonia
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Pneumonia")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Pneumonia"))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Consolidation'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Pneumonia'))
mergefolders(os.path.join(PATH_TO_CHEXPERT_FOLDERS, 'Consolidation'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Pneumonia'))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Pnemonia'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Pneumonia'))
mergefolders(os.path.join(PATH_TO_CHEXPERT_FOLDERS, 'Pnemonia'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Pneumonia'))
# +
# Under Chexpert Create a Lung Opacity/Lesion combine both Lung Opacity and Lung Lesion
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Lung LesionAndOpacity")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Lung LesionAndOpacity"))
mergefolders(os.path.join(PATH_TO_CHEXPERT_FOLDERS, 'Lung Opacity'), os.path.join(PATH_TO_FINAL_FOLDERS, "Lung LesionAndOpacity"))
os.rename(os.path.join(PATH_TO_CHEXPERT_FOLDERS, 'Lung Lesion'), os.path.join(PATH_TO_FINAL_FOLDERS, "Lung LesionAndOpacity"))
# Then take duplicates for each category of Mass, Nodule, Fibrosis, Pleural Thickening, Infiltration and merge with the above category
for file in os.listdir(os.path.join(PATH_TO_NIH_FOLDERS, 'Mass')):
shutil.copy(os.path.join(PATH_TO_NIH_FOLDERS, 'Mass', file), os.path.join(PATH_TO_FINAL_FOLDERS, "Lung LesionAndOpacity"))
for file in os.listdir(os.path.join(PATH_TO_NIH_FOLDERS, 'Nodule')):
shutil.copy(os.path.join(PATH_TO_NIH_FOLDERS, 'Nodule', file), os.path.join(PATH_TO_FINAL_FOLDERS, "Lung LesionAndOpacity"))
for file in os.listdir(os.path.join(PATH_TO_NIH_FOLDERS, 'Fibrosis')):
shutil.copy(os.path.join(PATH_TO_NIH_FOLDERS, 'Fibrosis', file), os.path.join(PATH_TO_FINAL_FOLDERS, "Lung LesionAndOpacity"))
for file in os.listdir(os.path.join(PATH_TO_NIH_FOLDERS, 'Pleural_Thickening')):
shutil.copy(os.path.join(PATH_TO_NIH_FOLDERS, 'Pleural Thickening', file), os.path.join(PATH_TO_FINAL_FOLDERS, "Lung LesionAndOpacity"))
for file in os.listdir(os.path.join(PATH_TO_NIH_FOLDERS, 'Infiltration')):
shutil.copy(os.path.join(PATH_TO_NIH_FOLDERS, 'Infiltration', file), os.path.join(PATH_TO_FINAL_FOLDERS, "Lung LesionAndOpacity"))
# +
# Merge Mass and Nodule originals into one larger category
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Mass and Nodule")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Mass and Nodule"))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Mass'), os.path.join(PATH_TO_FINAL_FOLDERS, "Mass and Nodule"))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Nodule'), os.path.join(PATH_TO_FINAL_FOLDERS, "Mass and Nodule"))
# +
# Merge Fibrosis, Pleural_Thickening, Infiltration into final folder
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Fibrosis")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Fibrosis"))
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Pleural_Thickening")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Pleural_Thickening"))
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Infiltration")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Infiltration"))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Fibrosis'), os.path.join(PATH_TO_FINAL_FOLDERS, "Fibrosis"))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Pleural_Thickening'), os.path.join(PATH_TO_FINAL_FOLDERS, "Pleural_Thickening"))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Infiltration'), os.path.join(PATH_TO_FINAL_FOLDERS, "Infiltration"))
# +
# merge effusion and pleural effusion
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Effusion")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Effusion"))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Effusion'), os.path.join(PATH_TO_FINAL_FOLDERS, "Effusion"))
mergefolders(os.path.join(PATH_TO_CHEXPERT_FOLDERS, 'Pleural Effusion'), os.path.join(PATH_TO_FINAL_FOLDERS, "Effusion"))
# +
# merge Fracture from Chexpert, Hernia from NIH, and Pleural Other from Chexpert into final folders
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Fracture")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Fracture"))
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Hernia")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Hernia"))
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Pleural Other")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Pleural Other"))
mergefolders(os.path.join(PATH_TO_CHEXPERT_FOLDERS, 'Fracture'), os.path.join(PATH_TO_FINAL_FOLDERS, "Fracture"))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Hernia'), os.path.join(PATH_TO_FINAL_FOLDERS, "Hernia"))
mergefolders(os.path.join(PATH_TO_CHEXPERT_FOLDERS, 'Pleural Other'), os.path.join(PATH_TO_FINAL_FOLDERS, "Pleural Other"))
# +
# merge NIH Emphysema into final folder
if not os.path.exists(os.path.join(PATH_TO_FINAL_FOLDERS, "Emphysema")):
os.makedirs(os.path.join(PATH_TO_FINAL_FOLDERS, "Emphysema"))
mergefolders(os.path.join(PATH_TO_NIH_FOLDERS, 'Emphysema'), os.path.join(PATH_TO_FINAL_FOLDERS, 'Emphysema'))
| datasetsforeverything/MergeCategorical.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: base
# language: python
# name: base
# ---
# # Градиентный спуск
#
# ## Градиент функции:
#
# Градиент - вектор частных производных
#
#
# $$\nabla f(x) = \begin{pmatrix} \frac {\partial f}{\partial x_1} & \dots & \frac {\partial f} {\partial x_d} \end{pmatrix}$$
#
# Считаем производную функции в отдельности для каждой переменной $x_1, x_2, \dots, x_d$ (частные производные), составляем вектор из частных производных - градиент (обобщение производной для функции многих переменных).
#
# <img src='images/gradient.png'>
#
# Фиксируем точку $x_0$. В какую сторону функция быстрее всего растёт? Функция растёт в направлении градиента. Если градиент взять с минусом, то он покажет в сторону наискорейшего убывания функции.
# Если градиент равен нулю, то это экстремум.
#
# #### Необходимое условие экстремума:
#
# Если точка $x_0$ - экстремум и в ней существует производная, то $$\nabla f(x_0)=0 $$
# Можно посчитать градиент функции, приравнять нулю, решить систему уравнений. Ответов может быть много.
#
# - Если функция строго выпуклая, то экстремум один.
#
# - MSE линейной регрессии - выпуклая функция
#
#
# Градиентные методы - общий подход для обучения моделей (не всегда возможно решить задачу аналитически)
#
# Антиградиент указывает в сторону наискорейшего убывния функции.
#
# Это можно использовать для поиска минимума функции (минимизация функционала ошибки, например, MSE).
#
#
# ### Алгоритм
# 1. Стартуем из случайной точки
# 2. Сдвигаемся по антиградиенту
# 3. Повторяем, пока не окажемся в точке минимума
#
#
# ### Пример
# Один признак
#
# Модель:
# $$a(x)=w_1 x + w_0$$
#
# Два параметра: $w_0$ и $w_1$
#
# Функционал ошибки MSE:
# $$\displaystyle {Q(w_0, w_1) = \frac{1}{l} \sum _{i=1}^{l}{(w_1 x_i + w_0 - y_i)^2}}$$
#
# <img src='images/MSE.png'>
# $$\displaystyle {Q(w_0, w_1) = \frac{1}{l} \sum _{i=1}^{l}{(w_1 x_i + w_0 - y_i)^2}}$$
# Счиаем частные производные по каждому из весов:
# $$\frac {\partial Q}{\partial w_1} = \frac {2}{l} \sum _{i=1} ^{l} x_i (w_1 x_i + w_0 -y_i)$$
#
# $$\frac {\partial Q}{\partial w_0} = \frac {2}{l} \sum _{i=1} ^{l} (w_1 x_i + w_0 -y_i)$$
#
# Градиент (вектор с двумя компонентами):
#
# $$\nabla Q(w) = (\frac {2}{l} \sum _{i=1} ^{l} x_i (w_1 x_i + w_0 -y_i), \frac {2}{l} \sum _{i=1} ^{l} (w_1 x_i + w_0 -y_i))$$
# ## Алгоритм градиентного спуска Full GD
# ### Начальное приближение
# Сначала нужно как-то инициализировать веса ($w^0$, 0 - номер итерации). Можно сгенерировать веса из стандартного нормального распределения.
#
# ### Шаг алгоритма (повторять до сходимости)
# $$w^t = w^{t-1} - \eta \nabla Q(w^{t-1})$$
# - $t$ - номер итерации
# - $w^t$ - новая точка
# - $\eta$ - размер шага
# - $\nabla Q(w^{t-1})$ - градиент в предыдущей точке
#
# Остановить процесс, если:
# - вектор весов почти не меняется $||w^t - w^{t-1}|| < \epsilon$
# или
# - если норма градиента близка к нулю $||Q(w^{t})|| < \epsilon$
# __Если признаков много:__
# $$\displaystyle {Q(w) = \frac{1}{l} \sum _{i=1}^{l}{(<w, x> - y_i)^2}}$$
#
# $$\frac {\partial Q}{\partial w_1} = \frac {2}{l} \sum _{i=1} ^{l} x_{i1} (<w, x> -y_i)$$
# $$\frac {\partial Q}{\partial w_d} = \frac {2}{l} \sum _{i=1} ^{l} x_{id} (<w, x> -y_i)$$
#
# $$\nabla Q(w) = \frac {2}{l} X^T (Xw -y)$$
# ### Проблема локальных минимумов
# - Локальный минимум - точка, в некоторой окрестности которой нет более маленьких значений
# - Глобальный минимум - самая низкая точка функции
# - Если функция выпуклая (например MSE), то у неё один глобальный=локальный минимум
# - Другие функции потерь могут не быть выпуклыми
#
# <img src='images/local_min.png'>
#
# - Цель - найти глобальный минимум (там меньше ошибка модели), но это не всегда возможно
# - Если стартуем из неудачной точки, то находим только локальный минимум (и застреваем там)
#
# <img src='images/local_min2.png'>
#
# - Поэтому градиентный спуск находит __локальный минимум__
# - Можно использовать мулитистарт (запуск градиентного спуска из разных начальных точек)
# <img src='images/local_min3.png'>
# ### Длина шага
# $$w^t = w^{t-1} - \eta \nabla Q(w^{t-1})$$
# $\eta$ позволяет контролировать скорость обучения
# - Если сделать длину шага недостаточно маленькой, градиентный спуск может разойтись
# - Длина шага - параметр, который нужно подбирать
#
# Линия уровня - кривая, вдоль которой функция принимает одно и то же значение. Вид сверху на параболоид
#
# Вектор градиента перепендикулярен линии уровня
#
# #### Переменная длина шага
# $$w^t = w^{t-1} - \eta _t \nabla Q(w^{t-1})$$
# Длину шага можно менять в зависимости от шага
#
# Например, $$\eta _t = \frac{1}{t}$$. Чем больше итераций сделано, тем меньше шаг
#
# Или $$\eta _t = \lambda(\frac{s}{s+t})^p$$ Дополнительные параметры
#
# #### Масштабирование признаков
# - Алгоритм может разойтись, если признаки имеют разные масштабы (линии уровня вытянутые), эллипс, а не круг
#
# $$x_i ^j = \frac{x_i ^j - \mu _j}{\sigma_j}$$
# # Алгоритм
# Обновление весов:
#
# $$w^{(t+1)} = w^{(t)} - \alpha_t \nabla Q(w^{(t)}),$$
#
# $j$-ая компонента градиента:
#
# $$\frac{\partial Q(w)}{w_j} = \frac{2}{l} \sum_{i=1}^{l}x_{ij}(\langle w, x_i \rangle - y_i).$$
import matplotlib.pyplot as plt
import numpy as np
from sklearn.base import BaseEstimator
class LinearRegression(BaseEstimator):
def __init__(self, epsilon=1e-4, max_steps=1000, w0=None, alpha=1e-2):
self.epsilon = epsilon # разница для нормы изменения весов
self.max_steps = max_steps # максимальное количество шагов
self.w0 = w0 # начальные веса
self.alpha = alpha # шаг обучения
self.w = None
self.w_history = []
def fit(self, X, y):
l, d = X.shape
if self.w0 is None: # если нет начальной инициализации весов
self.w0 = np.zeros(d)
self.w = self.w0
for step in range(self.max_steps):
self.w_history.append(self.w)
w_new = self.w - self.alpha * self.calc_gradient(X, y)
if (np.linalg.norm(w_new - self.w) < self.epsilon):
break
self.w = w_new
return self
def predict(self, X):
if self.w is None:
raise Exception('Not trained yet')
l, d = X.shape
y_pred = []
for i in range(l):
y_pred.append(np.dot(X[i], self.w))
return np.array(y_pred)
def calc_gradient(self, X, y):
l, d = X.shape
gradient = []
for j in range(d):
dQ = 0
for i in range(l):
dQ += (2/l) * X[i][j] * (np.dot(X[i], self.w) - y[i])
gradient.append(dQ)
return np.array(gradient)
# ### Визуализация траектории градиентного спуска
def plot_gd(X, y, w_history):
# compute level set
A, B = np.meshgrid(np.linspace(-3, 3, 100), np.linspace(-3, 3, 100))
levels = np.empty_like(A)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
w_tmp = np.array([A[i, j], B[i, j]])
levels[i, j] = np.mean(np.power(np.dot(X, w_tmp) - y, 2))
plt.figure(figsize=(13, 9))
plt.title('GD trajectory')
plt.xlabel(r'$w_1$')
plt.ylabel(r'$w_2$')
plt.xlim((-2.1, 2.1))
plt.ylim((-2.1, 2.1))
# visualize the level set
CS = plt.contour(A, B, levels, levels=np.logspace(0, 2, num=10), cmap=plt.cm.rainbow_r)
CB = plt.colorbar(CS, shrink=0.8, extend='both')
# visualize trajectory
w_list = np.array(lr.w_history)
plt.scatter(w_true[0], w_true[1], c='r', marker='*')
plt.scatter(w_list[:, 0], w_list[:, 1])
plt.plot(w_list[:, 0], w_list[:, 1])
plt.show()
# +
n_features = 2
n_objects = 300
num_steps = 100
np.random.seed(1)
w_true = np.random.normal(0, 0.1, size=(n_features, ))
w_0 = np.random.uniform(-2, 2, (n_features))
X = np.random.uniform(-5, 5, (n_objects, n_features))
y = np.dot(X, w_true) + np.random.normal(0, 1, (n_objects))
# +
lr = LinearRegression(w0=w_0)
lr.fit(X, y)
plot_gd(X, y, lr.w_history)
# -
# ### Влияние размера шага на сходимость
# $\alpha = 0.0001$
lr = LinearRegression(w0=w_0, alpha=0.0001)
lr.fit(X, y)
plot_gd(X, y, lr.w_history)
# $\alpha = 0.01$
lr = LinearRegression(w0=w_0, alpha=0.01)
lr.fit(X, y)
plot_gd(X, y, lr.w_history)
# $\alpha = 0.1$
lr = LinearRegression(w0=w_0, alpha=0.1)
lr.fit(X, y)
plot_gd(X, y, lr.w_history)
# ### Масштабирование признаков
# Функция вытянута по одной из координат (признаков)
#
# При каждом шаге вдоль антиградиента мы шагаем перпендикулярно линиям уровня
# +
X_new = X.copy()
X_new[:,1] = X_new[:,1] * 5
lr = LinearRegression(w0=w_0, alpha=0.01)
lr.fit(X_new, y)
plot_gd(X_new, y, lr.w_history)
# -
# ## Стохастический градиентный спуск SGD
# Для вычисления обычного градиента, для каждой частной производной происходит суммирование по всей обучающей выборке. Она может быть очень большой. Это нужно делать для каждого шага (на каждой итерации).
# Для этого требуется слишком много вычислений.
#
# Будем шагать не по среднему по всем градиентам, а по градиенту одного объекта (одного слагаемого из функционала ошибки).
#
# ## Алгоритм стохастического градиентного спуска
#
# 1. Начальное приближение $w^0$
# 2. Шаг алгоритма
# Повторять, каждый раз выбирая случайных объект $i_t$
# $$w^t = w^{t-1} - \eta \nabla L(y_{i_{t}}, a(x_{i_{t}}))$$
# Берём значение функции потерь на объекте $i_t$
#
# 3. Остановить процесс, если вектор весов почти не меняется $||w^t - w^{t-1}|| < \epsilon$
#
# Траектория стохастического градиентного спуска будет более ломанной, и в конце будет сложно попасть в минимум. Здесь очень важная уменьшающаяся длина шага
#
# Для сходимости SGD требуется гораздо больше итераций, чем для Full GD, но каждый шаг очень быстрый
# ### Mini-batch SGD
# То же самое, что и SGD, но шаг делается по нескольким случайным объектам (batch, пакет)
#
# SGD можно применять на больших выборках, не помещающихся в RAM
# ## Другие модификации GD
# ### Проблемы градиентного спуска:
# 1. Сложные линии уровня (если линии уровня имеют форму эллипса, первые шаги будут выглядеть как осцилляции (колебания), а не движение в направлении к минимуму). Будет трактория зигзага и много лишних итераций. Чем меньше линии уровня похожи на окружности, тем сложнее градиентному спуску двигаться. Это может произойти из-за немасштабированных признаков или сложной функции потерь.
# 2. Разная скорость сходимости по разным параметрам
#
# ### Градиентный спуск с инерцией (momentum)
# $$h_t = \alpha h_{t-1} + \eta _t \nabla Q (w^{t-1})$$
#
# $$w^t = w^{t-1} - h_t$$
#
# - $h_t$ - инерция, усредненное направление движения, обновляется на каждом шаге
# - $\alpha$ - параметр затухания (обычно 0.9), гиперпараметр
# - $\eta_t$ - длина шага
# - _Как будто шарик, который катится в сторону минимума, очень тяжёлый_
# - Копим в $h_t$ среднее значение градиента со всех прошлых шагов. Экспоненциально-затухающее среднее. Берём все прошлые значения, умножаем на $\alpha$ и прибавляем новый градиент. Чем раньше был какой-то градиент, тем меньше он будет иметь вклад в $h_t$.
# - Метод очень популярен в глубоком обучении в задачах машинного зрения
# Без инерции
#
# <img src='images/gd_momentum.png'>
#
# С инерцией
#
# <img src='images/gd_momentum2.png'>
#
# Осцилляции быстро убывают.
# ### Nesterov momentum
# $$h_t = \alpha h_{t-1} + \eta _t \nabla Q (w^{t-1} - \alpha h_{t-1})$$
#
# $$w^t = w^{t-1} - h_t$$
#
# - $w^{t-1} - \alpha h_{t-1}$ - неплохая оценка того, куда мы попадём на следующем шаге
# - шагаем в направлении $\alpha h_{t-1}$, считаем градиент в этой точке и "дошагиваем" по этому антиградиенту дальше
# #### Проблема с разреженными данными
# Например, в модели есть категориальные признаки и использовалось one-hot кодирование. По редким бинарным признакам (редкая категория) шаги могут быть очень маленькими, так как объект с редкой категорией может встретитьсся уже ближе к концу работу алгоритма, когда шаги очень маленькие. Веса будут настраиваться хуже.
#
# #### Проблема с разным масштабом
# Какой-то признак может меняться в диапазоне от 0 до 1, а какой-то от 0 до 1 млн. Тогда нужно шагать по каждому параметру с разной скоростью. Быстрее обновлять веса при признаке единичного масштаба и медленнее при признаке миллионного масштаба.
# ### AdaGrad
#
# $$G^ t _j = G^ {t-1} _j + (\nabla Q(w^{t-1}))^2 _j$$
#
# $$w^t _j = w^{t-1} _j - \frac{\eta _t}{\sqrt{G^t _j + \epsilon} } (\nabla Q (w^{t-1}))_j$$
#
# - В $G_j$ накапливаются квадраты градиентов по $j$ признаку, то есть частных производных по нему. Насколько сильно уже нашагали по j параметру
# - Длина шага нормируется на знаменатель. Если по этому признаку много нашагали, то знаменатель большой.
# - По каждому параметру будет своя скорость
# - Недостаток алгоритма $G_j$ может только расти на каждой итерации t
# ### RMSProp
# $$G^ t _j = \alpha G^ {t-1} _j + (1-\alpha)(\nabla Q(w^{t-1}))^2 _j$$
#
# $$w^t _j = w^{t-1} _j - \frac{\eta _t}{\sqrt{G^t _j + \epsilon} } g_{tj}$$
# - Скорость зависит только от недавних шагов
# - $\alpha$ обычно 0.9
# ### Adam
# Совмешает идею инерции и своей длины шага по каждому признаку
#
# // страшная формула
# В итоге momentum позволяет избавиться от осцилляций, а методы типа AdaGrad позволяют грамотнее задавать темп движения по каждому параметру.
# # Алгоритм
# Обновление весов:
#
# $$w^{(t+1)} = w^{(t)} - \alpha_t \nabla Q(w^{(t)}),$$
#
# $j$-ая компонента градиента:
#
# $$\frac{\partial Q(w)}{w_j} = \frac{2}{l} \sum_{i=1}^{l}x_{ij}(\langle w, x_i \rangle - y_i).$$
# # Домашнее задание
# ## Задание 1
# Реализуйте класс ```LinearRegressionSGD``` c обучением и и применением линейной регрессии, построенной с помощью стохастического градиентного спуска, с заданным интерфейсом.
#
# Обратите внимание на следуюшие моменты:
# - Схожий класс использовался в лекции
# - Выбирайте **10** случайных сэмплов (равномерно) каждый раз.
# - Используйте параметры по умолчанию (epsilon=1e-6, max_steps=10000, w0=None, alpha=1e-8)
# - Выход из цикла осуществуется по сравнению 2-нормы разницы весов с epsilon, а функция потерь - MSE.
#
#
# Визуализируйте траекторию градиентного спуска (как в лекции)
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import warnings
np.random.seed(0)
# +
from sklearn.base import BaseEstimator
class LinearRegressionSGD(BaseEstimator):
def __init__(self, epsilon=1e-6, max_steps=10000, w0=None, alpha=1e-8):
"""
epsilon: разница для нормы изменения весов
max_steps: максимальное количество шагов в градиентном спуске
w0: np.array (d,) - начальные веса
alpha: шаг обучения
"""
self.epsilon = epsilon
self.max_steps = max_steps
self.w0 = w0
self.alpha = alpha
self.w = None
self.w_history = []
def fit(self, X, y):
"""
X: np.array (l, d)
y: np.array (l)
---
output: self
"""
## На каждом шаге градиентного спуска веса можно добавлять в w_history (для последующей отрисовки)
## Для выполнения шага выберите 10 случайных(равномерно) сэмплов
return self
def predict(self, X):
"""
X: np.array (l, d)
---
output: np.array (l)
"""
return y_pred
def calc_gradient(self, X, y):
"""
X: np.array (l, d)
y: np.array (l)
---
output: np.array (d)
"""
return gradient
# -
# ## Задание 2
# Предсказать стоимость домов
#
# Сравнить алгоритмы линейной регресии с полным градиентным спуском и стохастическим градиентным спуском по MSE на тестовой выборке и времени работы алгоритма
#
# +
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
data = load_boston()
X = pd.DataFrame(data.data, columns=data.feature_names)
y = data.target
X_train, X_test, y_train, y_test = train_test_split(np.array(X), y, test_size=0.3, random_state=10)
# -
| 05_gradient_descent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.environ['CUDA_VISIBLE_DEVICES']='0,1,2,3'
# %run -p ../latent_ode_tinfocnf.py --sup False --noise_std 0.3 --a 0. --b 0.3 --noise_std_test 0.3 --a_test 0. --b_test 0.3 --adjoint False --visualize True --niters 2000 --lr 0.01 --save vis --savedir ./results_nstd_0_3_a_0_b_0_3_nstdt_0_3_at_0_bt_0_3 --gpu 1
#
# %run -p ../latent_ode_tinfocnf.py --sup False --noise_std 0.3 --a 0. --b 0.3 --noise_std_test 0.3 --a_test 0. --b_test 0.4 --adjoint False --visualize True --niters 2000 --lr 0.01 --save vis --savedir ./results_nstd_0_3_a_0_b_0_3_nstdt_0_3_at_0_bt_0_4 --gpu 1
#
# %run -p ../latent_ode_tinfocnf.py --sup False --noise_std 0.3 --a 0. --b 0.3 --noise_std_test 0.3 --a_test 0. --b_test 0.5 --adjoint False --visualize True --niters 2000 --lr 0.01 --save vis --savedir ./results_nstd_0_3_a_0_b_0_3_nstdt_0_3_at_0_bt_0_5 --gpu 1
#
# %run -p ../latent_ode_tinfocnf.py --sup False --noise_std 0.3 --a 0. --b 0.3 --noise_std_test 0.3 --a_test 1. --b_test 0.3 --adjoint False --visualize True --niters 2000 --lr 0.01 --save vis --savedir ./results_nstd_0_3_a_0_b_0_3_nstdt_0_3_at_1_bt_0_3 --gpu 1
#
# %run -p ../latent_ode_tinfocnf.py --sup False --noise_std 0.3 --a 0. --b 0.3 --noise_std_test 0.3 --a_test 1. --b_test 0.4 --adjoint False --visualize True --niters 2000 --lr 0.01 --save vis --savedir ./results_nstd_0_3_a_0_b_0_3_nstdt_0_3_at_1_bt_0_4 --gpu 1
#
# %run -p ../latent_ode_tinfocnf.py --sup False --noise_std 0.3 --a 0. --b 0.3 --noise_std_test 0.3 --a_test 1. --b_test 0.5 --adjoint False --visualize True --niters 2000 --lr 0.01 --save vis --savedir ./results_nstd_0_3_a_0_b_0_3_nstdt_0_3_at_1_bt_0_5 --gpu 1
#
# %run -p ../latent_ode_tinfocnf.py --sup False --noise_std 0.3 --a 0. --b 0.3 --noise_std_test 0.3 --a_test 2. --b_test 0.3 --adjoint False --visualize True --niters 2000 --lr 0.01 --save vis --savedir ./results_nstd_0_3_a_0_b_0_3_nstdt_0_3_at_2_bt_0_3 --gpu 1
#
# %run -p ../latent_ode_tinfocnf.py --sup False --noise_std 0.3 --a 0. --b 0.3 --noise_std_test 0.3 --a_test 2. --b_test 0.4 --adjoint False --visualize True --niters 2000 --lr 0.01 --save vis --savedir ./results_nstd_0_3_a_0_b_0_3_nstdt_0_3_at_2_bt_0_4 --gpu 1
#
# %run -p ../latent_ode_tinfocnf.py --sup False --noise_std 0.3 --a 0. --b 0.3 --noise_std_test 0.3 --a_test 2. --b_test 0.5 --adjoint False --visualize True --niters 2000 --lr 0.01 --save vis --savedir ./results_nstd_0_3_a_0_b_0_3_nstdt_0_3_at_2_bt_0_5 --gpu 1
#
| examples/.ipynb_checkpoints/main_latent_ode_baseline_changenoise-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# ## This notebook describes my solution for the Kaggle Titanic Challenge
#
# First, start with the necessary imports
# +
# Import division from future
from __future__ import division
# Basic analysis imports
import pandas as pd
from pandas import Series,DataFrame
from pandas.tools import plotting
import numpy as np
# Scikitlearn imports
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report, roc_auc_score
from sklearn.metrics import roc_curve, auc
# Standard visualization analysis imports
import matplotlib.pyplot as plt
import seaborn as sb
sb.set_style('whitegrid')
# %matplotlib inline
# -
# Read in and combine training and test datasets so that all feature engineering is done to both equally
train = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test.csv')
# Extract and remove target data from training set
target = train.Survived
train.drop('Survived', axis=1, inplace=True)
# Combine training and testing data
combined_df = train.append(test)
combined_df.reset_index(inplace=True)
combined_df.drop('index', axis=1, inplace=True)
# Take a quick look
combined_df.info()
# There are many missing values in the Age and Cabin columns as well as one in the Embarked column. To ameliorate this, when processing the raw dataset to form meaningful features for machine learning I fill in the missing values with the corresponding medians as below. Also, I form dummy variables for all categorical features.
# +
def process_features():
# Load combined dataframe
global combined_df
'''
Each name starts with a title of some kind. I'll create a dummy variable for each separate title.
Since some titles are very similar to others, I'll create a reduced list to map to.
'''
title_list = list(np.unique([name.split(', ')[1].split('.')[0] for name in combined_df.Name]))
title_map = {
'Capt': 'Military',
'Col': 'Military',
'Major': 'Military',
'Don': 'Royalty',
'Dona': 'Royalty',
'Jonkheer': 'Royalty',
'the Countess': 'Royalty',
'Sir': 'Royalty',
'Lady': 'Royalty',
'Master': 'Royalty',
'Dr': 'Academic',
'Rev': 'Academic',
'Mlle': 'Miss',
'Miss': 'Miss',
'Mme': 'Mrs',
'Mrs': 'Mrs',
'Ms': 'Mrs',
'Mr': 'Mr'
}
# Create a Title column
combined_df['Title'] = combined_df.Name.map(lambda x: x.split(', ')[1].split('.')[0])
# Map to reduced list of titles
combined_df.Title = combined_df.Title.map(title_map)
# Create dummy columns for titles. ***Keep the dummy title for the time being, we'll drop in a second***
title_dummies = pd.get_dummies(combined_df.Title, prefix='Title')
combined_df = pd.concat([combined_df, title_dummies], axis=1)
# Drop Name column
combined_df.drop('Name', axis=1, inplace=True)
'''
I fill in the missing age values based on the median value organized by Sex, Class, and Title.
'''
# Fill in missing age values
age_groups = combined_df.groupby(['Sex', 'Pclass', 'Title'])['Age'].median()
# Define a function to map to correct median age based on sex, class, and title
def process_age(row):
if np.isnan(row.Age):
age = age_groups[row.Sex, row.Pclass, row.Title]
else:
age = row.Age
return age
combined_df.Age = combined_df.apply(process_age, axis=1)
'''
I fill in the missing fare values based on the median value organized by Sex, Class, and Title.
'''
# Fill in missing fare values
fare_groups = combined_df.groupby(['Sex', 'Pclass', 'Title'])['Fare'].median()
def process_fare(row):
if np.isnan(row.Fare):
fare = fare_groups[row.Sex, row.Pclass, row.Title]
else:
fare = row.Fare
return fare
combined_df.Fare = combined_df.apply(process_fare, axis=1)
'''
Since the vast majority of people embarked from Southampton, I'll just assume all missing values are 'S'
'''
combined_df.Embarked.fillna('S', inplace=True)
embarked_dummies = pd.get_dummies(combined_df.Embarked, prefix='Embarked')
combined_df = pd.concat([combined_df, embarked_dummies], axis=1)
combined_df.drop('Embarked', axis=1, inplace=True)
'''
I define dummy variables based on which deck they were on (which is the first letter of their cabin number).
Also, if the cabin number is missing, then I fill it with U for unknown.
'''
# Fill missing cabin values with 'U' for unknown
combined_df.Cabin.fillna('U', inplace=True)
# Extract first letter of cabin values to act as the Deck the passenger was assigned to
combined_df['Deck'] = combined_df.Cabin.map(lambda x: x[0])
# Use dummy encoding
deck_dummies = pd.get_dummies(combined_df.Deck, prefix='Deck')
combined_df = pd.concat([combined_df, deck_dummies], axis=1)
combined_df.drop('Cabin', axis=1, inplace=True)
'''
Process Sex feature
'''
combined_df.Sex = combined_df.Sex.map({'male':1, 'female':0})
'''
Create dummy features for Class
'''
class_dummies = pd.get_dummies(combined_df.Pclass, prefix='Pclass')
combined_df = pd.concat([combined_df, class_dummies], axis=1)
'''
The prefix for the ticket number can be another identifying feature so we'll create a set of dummy variables
for it.
'''
def clean_ticket(ticket):
ticket = ticket.replace('/', '').replace('.', '').split()
ticket = [x.strip() for x in ticket]
ticket = filter(lambda x: not x.isdigit(), ticket)
if len(ticket) > 0:
return ticket[0]
else:
return 'XXX'
combined_df['Ticket'] = combined_df.Ticket.map(clean_ticket)
ticket_dummies = pd.get_dummies(combined_df.Ticket, prefix='Ticket')
combined_df = pd.concat([combined_df, ticket_dummies], axis=1)
combined_df.drop('Ticket', axis=1, inplace=True)
'''
We can also create a family feature which can be translated into 3 separate dummy variables for single,
small, and large family.
'''
combined_df['Family'] = combined_df.SibSp + combined_df.Parch
def family_map(size):
if size == 1:
return 'Single'
elif (size > 1) and (size <= 4):
return 'Small'
else:
return 'Large'
combined_df.Family = combined_df.Family.map(family_map)
family_dummies = pd.get_dummies(combined_df.Family, prefix='Family')
combined_df = pd.concat([combined_df, family_dummies], axis=1)
# Break up the dataset again into a training and testing set
train_df = combined_df.loc[ 0:(train.shape[0]-1) ]
test_df = combined_df.loc[ train.shape[0]:(train.shape[0] + test.shape[0]) ]
return train_df, test_df
# Process all features
train_df, test_df = process_features()
# -
# We can now make some visualizations of the most important features to see how they affect survival
# +
# First create a dataframe of some features for visualization and concatenate the survival results
cols = ['Pclass', 'Age', 'Fare', 'Title', 'Deck', 'Family', 'Sex']
vis_df = DataFrame(train_df, columns=cols)
vis_df['Survived'] = target
sex_map = {0:'Female', 1:'Male'}
survival_map = {0:'Perished', 1:'Survived'}
class_map = {1:'1st', 2:'2nd', 3:'3rd'}
vis_df.Sex = vis_df.Sex.map(sex_map)
vis_df.Survived = vis_df.Survived.map(survival_map)
vis_df.Pclass = vis_df.Pclass.map(class_map)
# Create visualizations
sb.countplot(x='Sex', hue='Survived', data=vis_df)
plt.title('Female passengers had better chances')
plt.show()
sb.countplot(x='Pclass', hue='Survived', data=vis_df, order=['1st', '2nd', '3rd'])
plt.title('Class and survival played out exactly as expected')
plt.show()
sb.factorplot(x='Survived', y='Fare', hue='Pclass', data=vis_df, hue_order=['1st', '2nd', '3rd'])
plt.title('At least in 1st class, higher fare also increased survival chances')
plt.show()
sb.factorplot(x='Survived', y='Age', hue='Sex', data=vis_df)
plt.title('Age had a large but opposite effect depending on gender')
plt.show()
sb.countplot(x='Family', hue='Survived', data=vis_df, order=['Single', 'Small', 'Large'])
plt.title('A large family seems to have not helped')
plt.show()
sb.countplot(x='Title', hue='Survived', data=vis_df)
plt.title('Aside from gender, royalty seemed to help. Unless most royals were female?')
plt.show()
sb.countplot(x='Title', hue='Sex', data=vis_df)
plt.title('It seems most royals were male - so being royal helped regardless of gender')
plt.show()
# -
# Now I take the original combined dataframe and drop columns that aren't needed anymore and normalize features
# +
# Drop columns for which dummy columns exist from the combined dataframe
combined_df = combined_df.drop(['Title', 'Deck', 'Family', 'Pclass'], axis=1)
# Rescale all features to exist within the unit interval
feature_list = list(combined_df.columns)
feature_list.remove('PassengerId')
combined_df[ feature_list ] = combined_df[ feature_list ].apply(lambda x: x/x.max(), axis=0)
# Split the combined dataframe into a train and test set again
train_df = combined_df.loc[ 0:(train.shape[0]-1) ]
test_df = combined_df.loc[ train.shape[0]:(train.shape[0] + test.shape[0]) ]
# -
# Here, I perform feature selection which helps to reduce redundancy in the data, speeds up training, and reduces overfitting.
# +
# Perform feature selection using a tree-based estimator. The threshold for selection is by default set to the mean
# of the importances.
clf = ExtraTreesClassifier(n_estimators=200)
_ = clf.fit(train_df, target)
model = SelectFromModel(clf, prefit=True)
selected_train = model.transform(train_df)
selected_test = model.transform(test_df)
# Create a dataframe to map features to their importances
features = DataFrame()
features['feature'] = train_df.columns
features['importance'] = clf.feature_importances_
# Print out important features
features[ features.importance > np.mean(features.importance) ].sort_values(by='importance', ascending=False)
# -
# Now I perform a grid search with a 5-fold cross validation to tune the hyper parameters of a Random Forest classifier.
# +
rand_forest = RandomForestClassifier()
# Define a parameter grid to search over
param_grid = {"n_estimators": range(250,360,10), "max_depth": range(6,17,2),
'criterion':['gini','entropy']}
# Perform 5-fold stratified cross validation
cross_validation = StratifiedKFold(target, n_folds=5)
# Perform grid search
clf = GridSearchCV(rand_forest, param_grid=param_grid,
cv=cross_validation, n_jobs=10,
verbose=10)
_ = clf.fit(selected_train, target)
print 'Best score: %.2f' % clf.best_score_
print 'Best parameters: {}'.format(clf.best_params_)
# -
# Finally, I fill a dataframe with my survival predictions and export it in csv format for submission. This result achieved an accuracy of 0.79904 on the public leaderboard for the competition.
result = clf.predict(selected_test).astype(int)
result_df = DataFrame(columns=['PassengerId', 'Survived'])
result_df.PassengerId = test_df.PassengerId
result_df.Survived = result
result_df.to_csv('../data/titanic_submission.csv', index=False)
| analysis/Titanic_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["meta", "draft"]
# # List of optimization methods
# + [markdown] tags=["hide"]
# **TODO**
# * https://en.wikipedia.org/wiki/Mathematical_optimization#Computational_optimization_techniques
#
# Matlab doc:
# * https://www.mathworks.com/examples/global-optimization/mw/globaloptim-ex14506760-optimization-of-stochastic-objective-function
# * https://fr.mathworks.com/products/global-optimization/code-examples.html
# * https://fr.mathworks.com/help/gads/examples/optimization-of-stochastic-objective-function.html
#
# Pour chaque algorithme, faire un tableau résumant ses propriétés (local/global, pour les pb continu/discrets, pour les pb differentiable ou non, pour les pb multistage ou non, stochastic/deterministe, etc.), convergences connues, conseils, l'article de ref (+ auteurs et date), ...
#
# Rediriger pour chaque algo, vers mes implémentations pyai, l'implémentation de ref., ...
# -
# ## Local optimization
# + [markdown] tags=["hide"]
# **TODO**
# * https://docs.scipy.org/doc/scipy/reference/optimize.html#local-optimization
# -
# * Gradient algorithm
# * Conjugate gradient algorithm
# * Newton method
# * Quasi-Newton method
# * [Wikipedia](https://en.wikipedia.org/wiki/Quasi-Newton_methods)
# * Broyden–Fletcher–Goldfarb–Shanno (BFGS)
# * [Wikipedia](https://en.wikipedia.org/wiki/Broyden%E2%80%93Fletcher%E2%80%93Goldfarb%E2%80%93Shanno_algorithm)
# * L-BFGS
# * [Wikipedia](https://en.wikipedia.org/wiki/L-BFGS)
# * L-BFGS-B
# * Powells' method
# * [Wikipedia](https://en.wikipedia.org/wiki/Powell%27s_method)
# * NEWUOA
# * [Wikipedia](https://en.wikipedia.org/wiki/NEWUOA)
# * Nelder-Mead
# * [Wikipedia](https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method)
# * Relaxed methods
# * Pattern search
# * [Wikipedia](https://en.wikipedia.org/wiki/Pattern_search)
# ## Global optimization
# + [markdown] tags=["hide"]
# **TODO**
# * [Evolutionary algorithms](https://en.wikipedia.org/wiki/Evolutionary_algorithm)
# * [Algorithme_Evolutionnistes](https://fr.wikipedia.org/wiki/Algorithme_%C3%A9volutionniste)
# * http://www.scholarpedia.org/article/Metaheuristic_Optimization
# * http://www.scholarpedia.org/article/Metaheuristics
# -
# * EDA
# * [Dedicated notebook](http://www.jdhp.org/docs/notebook/ai_optimization_eda_en.html)
# * Basin-Hopping
# * [Main web site](http://www-wales.ch.cam.ac.uk/)
# * [Scipy's implementation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.basinhopping.html#scipy.optimize.basinhopping)
# * Simulated Annealing
# * [Scipy's implementation](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.optimize.anneal.html)
# * [Scholarpedia](http://www.scholarpedia.org/article/Metaheuristic_Optimization#Simulated_Annealing)
# * Tabu search
# * [Wikipedia](https://en.wikipedia.org/wiki/Tabu_search)
# * [Scholarpedia](http://www.scholarpedia.org/article/Metaheuristic_Optimization#Tabu_Search)
# * Particle swarm
# * [Wikipedia](https://en.wikipedia.org/wiki/Particle_swarm_optimization)
# * [Scholarpedia](http://www.scholarpedia.org/article/Particle_swarm_optimization)
# * Ant colony optimization algorithms
# * [Wikipedia](https://en.wikipedia.org/wiki/Ant_colony_optimization_algorithms)
# * [Scholarpedia](http://www.scholarpedia.org/article/Ant_colony_optimization)
# * Bee algorithms
# * [Wikipedia](https://en.wikipedia.org/wiki/Bees_algorithm)
# * [Scholarpedia](http://www.scholarpedia.org/article/Artificial_bee_colony_algorithm)
#
# [Evolutionary algorithms](https://en.wikipedia.org/wiki/Evolutionary_algorithm):
# * CMA-ES
# * [Dedicated notebook](http://www.jdhp.org/docs/notebook/ai_optimization_cmaes_en.html)
# * SAES
# * [Dedicated notebook](http://www.jdhp.org/docs/notebook/ai_optimization_saes_en.html)
# * Differential Evolution
# * [Dedicated notebook](http://www.jdhp.org/docs/notebook/ai_optimization_differential_evolution_en.html)
| nb_sci_ai/ai_optimization_algorithms_en.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy, scipy, matplotlib.pyplot as plt, sklearn, urllib, stanford_mir, IPython.display
# %matplotlib inline
plt.rcParams['figure.figsize'] = (14, 5)
# [← Back to Index](index.html)
# # Principal Component Analysis
# Download a file:
filename = '125_bounce.wav'
url = 'http://audio.musicinformationretrieval.com/'
urllib.urlretrieve(url + filename, filename=filename)
# Load a file:
x, fs = librosa.load(filename)
# Listen to the signal:
IPython.display.Audio(x, rate=fs)
# Compute some features:
X = librosa.feature.mfcc(x, sr=fs)
print(X.shape)
# Scale the features to have zero mean and unit variance:
X = sklearn.preprocessing.scale(X)
X.mean()
# Create a PCA model object.
model = sklearn.decomposition.PCA(n_components=2, whiten=True)
# Apply PCA to the scaled features:
model.fit(X.T)
Y = model.transform(X.T)
print(Y.shape)
# Let's see how many principal components were returned:
model.components_.shape
# Plot the two top principal components for each data point:
plt.scatter(Y[:,0], Y[:,1])
# [← Back to Index](index.html)
| pca.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Vector Space Models
# Representation text units (characters, phonemes, words, phrases, sentences, paragraphs, and documents) with vector of numbers.
# ## Basic Vectorization Approaches
# One-Hot Encoding, cons:
# 1. The size of one-hot vector is directly proportional to size of the vocabulary, and most real-world corpora have large vocabularies. This results in a sparse representation.
# 2. This representation does not give a fixed-length representation for text, i.e., if a text has 10 words, you get longer representation for it as compared to a text with 5 words.
# 3. It treats words as atomic units and has no notion of (dis)similarity between words. Semantically, very poor at capturing the meaning of the word in relation to other words.
# 4. Cannot handle ouf of vocabulary (OOV)
# +
# One-Hot Encoding
documents = ["Dog bites man.", "Man bites dog.", "Dog eats meat.", "Man eats food."]
processed_docs = [doc.lower().replace(".","") for doc in documents]
print(f'Processed docs: {processed_docs}')
# build vocabulary
vocab = {}
count = 0
for doc in processed_docs:
for word in doc.split():
if word not in vocab:
count += 1
vocab[word] = count
print(f'Vocabulary: {vocab}')
# onehot vector
def get_onehot_vector(somestring):
onehot_encoded = []
for word in somestring.split():
temp = [0]*len(vocab)
if word in vocab:
temp[vocab[word]-1] = 1 # use -1 because index array starts from 0 not 1
onehot_encoded.append(temp)
return onehot_encoded
print(f'Docs 1 preprocessed: {processed_docs[0]}')
print(f'Docs 1 one hot: {get_onehot_vector(processed_docs[0])}')
print(f'One hot random text: {get_onehot_vector("man and dog are good")}')
# -
# Bag of Words,
#
# Advantages:
# 1. Simple to understand and implement
# 2. Captures the semantic similarity of documents. Because documents having the same words will have their vector representations closer to each other in Euclidean space as compared to documents with completely different words.
# 3. A fixed-length encoding for any sentence of arbitrary length
#
# Disadvantages:
# 1. The size of the vector increases with the size of the vocabulary. Sparsity problem. One way to control it is by limiting the vocabulary.
# 2. It does not capture the similarity between different words that mean the same thing.
# 3. It does not have any way to handle out of vocabulary words.
# 4. Word order information is lost
# +
# bag of words
from sklearn.feature_extraction.text import CountVectorizer
#look at the documents list
print("Our corpus: ", processed_docs)
count_vect = CountVectorizer()
#Build a BOW representation for the corpus
bow_rep = count_vect.fit_transform(processed_docs)
#Look at the vocabulary mapping
print("Our vocabulary: ", count_vect.vocabulary_)
#see the BOW rep for first 2 documents
print("BoW representation for 'dog bites man': ", bow_rep[0].toarray())
print("BoW representation for 'man bites dog: ",bow_rep[1].toarray())
#Get the representation using this vocabulary, for a new text
temp = count_vect.transform(["dog and dog are friends"])
print("Bow representation for 'dog and dog are friends':", temp.toarray())
# Researchers have shown that such a representation without considering frequency is useful for sentiment analysis
# BoW with binary vectors
count_vect = CountVectorizer(binary=True)
bow_rep_bin = count_vect.fit_transform(processed_docs)
temp = count_vect.transform(["dog and dog are friends"])
print("\nBoW with binary vectors:")
print("Bow representation for 'dog and dog are friends':", temp.toarray())
# -
# Bag of N-Grams
#
# Prons and cons:
# 1. It captures some context and word-order information in the form of n-grams
# 2. The resulting vector space is able to capture some semantic similarity.
# 3. As n increases, dimensionality (and therefore sparsity) only increases rapidly.
# 4. It still provides no way to address the OOV problem
# +
from sklearn.feature_extraction.text import CountVectorizer
#Ngram vectorization example with count vectorizer and uni, bi, trigrams
count_vect = CountVectorizer(ngram_range=(1,3))
#Build a BOW representation for the corpus
bow_rep = count_vect.fit_transform(processed_docs)
#Look at the vocabulary mapping
print("Our vocabulary: ", count_vect.vocabulary_)
#see the BOW rep for first 2 documents
print("BoW representation for 'dog bites man': ", bow_rep[0].toarray())
print("BoW representation for 'man bites dog: ",bow_rep[1].toarray())
#Get the representation using this vocabulary, for a new text
temp = count_vect.transform(["dog and dog are friends"])
print("Bow representation for 'dog and dog are friends':", temp.toarray())
# -
# TF-IDF
#
# It aims to quantify the importance of a given word relative to other words in the document and in the corpus.
# +
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer()
bow_rep_tfidf = tfidf.fit_transform(processed_docs)
#IDF for all words in the vocabulary
print("IDF for all words in the vocabulary",tfidf.idf_)
print("-"*10)
#All words in the vocabulary.
print("All words in the vocabulary",tfidf.get_feature_names())
print("-"*10)
#TFIDF representation for all documents in our corpus
print("TFIDF representation for all documents in our corpus\n",bow_rep_tfidf.toarray())
print("-"*10)
temp = tfidf.transform(["dog and man are friends"])
print("Tfidf representation for 'dog and man are friends':\n", temp.toarray())
# -
# Three fundamentals drawback from basic vectorization approaches:
# 1. Discrete representations, it is hampers their ability to capture relationships between words.
# 2. The feature vectors are sparse and high-dimensional representations. The high-dimensionality representations makes them computationally inefficient.
# 3. Cannot handle OOV words.
# ## Distributed Representations
#
# Some key terms:
# - Distributional similarity, the meaning of the word can be understood from the context (connotation)
# - Distributional hypothesis, this hypothesizes that words that occur in similar context have similar meanings.
# - Distributional representation, representation schemes that are obtained based on distribution of words from the context in which the words appear. (one-hot, bag of words, bag of n-grams, TF-IDF)
# - Distributed representation, is based on the distributional hypothesis.
# - Embedding, is a mapping between vector space from distributional representation to vector space from distributed representation.
# - Vector semantics, NLP methods that aim to learn the word representations based on distributional properties of words in a large corpus.
# +
# word embedding
# RUNNING IN GOOGLE COLAB
# Word Embedding: https://colab.research.google.com/drive/1YPvwkUNPk3N7VXMsTi1VNoyEl13HXyMa?usp=sharing
# Training embedding gensim: https://colab.research.google.com/drive/11SL71Xf72CnFLNShbuuMiPY-LgIg4c3j?usp=sharing
# import warnings #This module ignores the various types of warnings generated
# warnings.filterwarnings("ignore")
# import os #This module provides a way of using operating system dependent functionality
# import psutil #This module helps in retrieving information on running processes and system resource utilization
# process = psutil.Process(os.getpid())
# from psutil import virtual_memory
# mem = virtual_memory()
# import time #This module is used to calculate the time
# +
# from gensim.models import Word2Vec, KeyedVectors
# pretrainedpath = 'temp/GoogleNews-vectors-negative300.bin.gz'
# #Load W2V model. This will take some time, but it is a one time effort!
# pre = process.memory_info().rss
# print("Memory used in GB before Loading the Model: %0.2f"%float(pre/(10**9))) #Check memory usage before loading the model
# print('-'*10)
# start_time = time.time() #Start the timer
# ttl = mem.total #Toal memory available
# w2v_model = KeyedVectors.load_word2vec_format(pretrainedpath, binary=True) #load the model
# print("%0.2f seconds taken to load"%float(time.time() - start_time)) #Calculate the total time elapsed since starting the timer
# print('-'*10)
# print('Finished loading Word2Vec')
# print('-'*10)
# post = process.memory_info().rss
# print("Memory used in GB after Loading the Model: {:.2f}".format(float(post/(10**9)))) #Calculate the memory used after loading the model
# print('-'*10)
# print("Percentage increase in memory usage: {:.2f}% ".format(float((post/pre)*100))) #Percentage increase in memory after loading the model
# print('-'*10)
# print("Numver of words in vocablulary: ",len(w2v_model.vocab)) #Number of words in the vocabulary.
# -
# spacy
import spacy
nlp = spacy.load("en_core_web_sm")
# +
print("Document After Pre-Processing:",processed_docs)
# Iterate over each document and initiate an nlp instance.
for doc in processed_docs:
doc_nlp = nlp(doc) #creating a spacy "Doc" object which is a container for accessing linguistic annotations.
print("-"*30)
print("Average Vector of '{}'\n".format(doc),doc_nlp.vector)#this gives the average vector of each document
for token in doc_nlp:
print()
print(token.text,token.vector)#this gives the text of each word in the doc and their respective vectors.
# -
# Ways to handle OOV problem for word embeddings:
# 1. Create vectors that are initialized randomly, where each component between -0.25 to +0.25.
# 2. Subword, morphological properties (prefixes, suffixes, word endings, etc), or by using character representations.
# ## Distributed Representations Beyond Words and Characters
#
# Word2vec learned representations for words, and we aggregated them to form text representations. fastText learned representations for character n-grams, which were aggregated to form word representations and then text representations.
#
# Both approaches do not take the context of words into account. For example, the sentences "dog bites man" and "man bites dog", both receive the same representation.
#
# Another approaches, Doc2vec, allows us to directly learn the representations for texts of arbitrary lengths (phrases, sentences, paragraphs, and documents) by taking the context of words in the text into account.
#
# Doc2vec learns a "paragraph vector" that learns a representation for the full text.
import warnings
warnings.filterwarnings('ignore')
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from nltk.tokenize import word_tokenize
from pprint import pprint
import nltk
nltk.download('punkt')
# +
data = ["dog bites man",
"man bites dog",
"dog eats meat",
"man eats food"]
tagged_data = [TaggedDocument(words=word_tokenize(word.lower()), tags=[str(i)]) for i, word in enumerate(data)]
tagged_data
# -
#dbow
model_dbow = Doc2Vec(tagged_data,vector_size=20, min_count=1, epochs=2,dm=0)
print(model_dbow.infer_vector(['man','eats','food']))#feature vector of man eats food
model_dbow.wv.most_similar("man",topn=5)#top 5 most simlar words.
model_dbow.wv.n_similarity(["eats"],["man"])
# +
#dm
model_dm = Doc2Vec(tagged_data, min_count=1, vector_size=20, epochs=2,dm=1)
print("Inference Vector of man eats food\n ",model_dm.infer_vector(['man','eats','food']))
print("Most similar words to man in our corpus\n",model_dm.wv.most_similar("man",topn=5))
print("Similarity between man and dog: ",model_dm.wv.n_similarity(["dog"],["man"]))
# +
# OOV
# model_dm.wv.n_similarity(['covid'],['man'])
# -
# ## Universal Text Representations
#
# Words can mean different things in different context. For example
# - “I went to a **bank** to withdraw money” and
# - “I sat by the river **bank** and pondered about text representations”
#
# Contextual word representations, which addresses this issue. It uses "language modeling" which is the task of predictiong the next likely word in a sequence of words.
# - Transformers, BERT, ELMo, etc
#
# Important aspects to keep in mind while using them in our project:
# 1. All text representations are inherently biases based on what they saw in training data.
# - Example: An embedding model trained heavily on technology data is likely to identify Apple as being closer to Microsoft/Facebook that to an orange.
# 2. Pre-trained embeddings are generally large-sized files (several GBs), which may pose problems in certain deployment scenarios.
# 3. Modeling language for a real-world application is more that capturing the information via word and sentence embeddings.
# - Example: the task of sarcasm detection requires nuances that are not yet captured well by embedding techniques.
# 4. A practitioner needs to exercise caution and consider practical issues such as return on investment from the effort, business needs, and infrastructural constraints before trying to use them in production-grade applications
# # Visualizing Embeddings
# +
# Google Colab
# Visualizing Embeddings using TSNE:
# https://colab.research.google.com/drive/1HJ60cZe2DZdorHVHcMOHDWSqLZQuRURD?usp=sharing
# Visualizing Embeddings using Tensorboard
# https://colab.research.google.com/drive/1s2GsIztRNuSMoBHAaN1vf9ChjGCwvdZK?usp=sharing
# -
# # Handcrafted Feature Representation
#
# In many cases, we do have some domain-specific knowledge about the given NLP problem, which we would like to incorporate into the model we're building.
#
# Clearly, custom feature engineering is much more difficult to formulate compared to other feature engineering schemes we’ve seen so far. It’s for this reason that vectorization approaches are more accessible to get started with, especially when we don’t have enough understanding of the domain
| 03_Text Representation/Text Representation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <small><i>This notebook was put together by [<NAME>](http://www.vanderplas.com). Source and license info is on [GitHub](https://github.com/jakevdp/sklearn_tutorial/).</i></small>
# # Supervised Learning In-Depth: Support Vector Machines
# Previously we introduced supervised machine learning.
# There are many supervised learning algorithms available; here we'll go into brief detail one of the most powerful and interesting methods: **Support Vector Machines (SVMs)**.
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
plt.style.use('seaborn')
# -
# ## Motivating Support Vector Machines
# Support Vector Machines (SVMs) are a powerful supervised learning algorithm used for **classification** or for **regression**. SVMs are a **discriminative** classifier: that is, they draw a boundary between clusters of data.
#
# Let's show a quick example of support vector classification. First we need to create a dataset:
from sklearn.datasets.samples_generator import make_blobs
X, y = make_blobs(n_samples=50, centers=2,
random_state=0, cluster_std=0.60)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring');
# A discriminative classifier attempts to draw a line between the two sets of data. Immediately we see a problem: such a line is ill-posed! For example, we could come up with several possibilities which perfectly discriminate between the classes in this example:
# +
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
for m, b in [(1, 0.65), (0.5, 1.6), (-0.2, 2.9)]:
plt.plot(xfit, m * xfit + b, '-k')
plt.xlim(-1, 3.5);
# -
# These are three *very* different separaters which perfectly discriminate between these samples. Depending on which you choose, a new data point will be classified almost entirely differently!
#
# How can we improve on this?
# ### Support Vector Machines: Maximizing the *Margin*
#
# Support vector machines are one way to address this.
# What support vector machined do is to not only draw a line, but consider a *region* about the line of some given width. Here's an example of what it might look like:
# +
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
for m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]:
yfit = m * xfit + b
plt.plot(xfit, yfit, '-k')
plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none', color='#AAAAAA', alpha=0.4)
plt.xlim(-1, 3.5);
# -
# Notice here that if we want to maximize this width, the middle fit is clearly the best.
# This is the intuition of **support vector machines**, which optimize a linear discriminant model in conjunction with a **margin** representing the perpendicular distance between the datasets.
# #### Fitting a Support Vector Machine
#
# Now we'll fit a Support Vector Machine Classifier to these points. While the mathematical details of the likelihood model are interesting, we'll let you read about those elsewhere. Instead, we'll just treat the scikit-learn algorithm as a black box which accomplishes the above task.
from sklearn.svm import SVC # "Support Vector Classifier"
clf = SVC(kernel='linear')
clf.fit(X, y)
# To better visualize what's happening here, let's create a quick convenience function that will plot SVM decision boundaries for us:
def plot_svc_decision_function(clf, ax=None):
"""Plot the decision function for a 2D SVC"""
if ax is None:
ax = plt.gca()
x = np.linspace(plt.xlim()[0], plt.xlim()[1], 30)
y = np.linspace(plt.ylim()[0], plt.ylim()[1], 30)
Y, X = np.meshgrid(y, x)
P = np.zeros_like(X)
for i, xi in enumerate(x):
for j, yj in enumerate(y):
P[i, j] = clf.decision_function([[xi, yj]])
# plot the margins
ax.contour(X, Y, P, colors='k',
levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf);
# Notice that the dashed lines touch a couple of the points: these points are the pivotal pieces of this fit, and are known as the *support vectors* (giving the algorithm its name).
# In scikit-learn, these are stored in the ``support_vectors_`` attribute of the classifier:
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, facecolors='none');
# Let's use IPython's ``interact`` functionality to explore how the distribution of points affects the support vectors and the discriminative fit.
# (This is only available in IPython 2.0+, and will not work in a static view)
# +
from ipywidgets import interact
def plot_svm(N=10):
X, y = make_blobs(n_samples=200, centers=2,
random_state=0, cluster_std=0.60)
X = X[:N]
y = y[:N]
clf = SVC(kernel='linear')
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plt.xlim(-1, 4)
plt.ylim(-1, 6)
plot_svc_decision_function(clf, plt.gca())
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, facecolors='none')
interact(plot_svm, N=[10, 200], kernel='linear');
# -
# Notice the unique thing about SVM is that only the support vectors matter: that is, if you moved any of the other points without letting them cross the decision boundaries, they would have no effect on the classification results!
# #### Going further: Kernel Methods
#
# Where SVM gets incredibly exciting is when it is used in conjunction with *kernels*.
# To motivate the need for kernels, let's look at some data which is not linearly separable:
# +
from sklearn.datasets.samples_generator import make_circles
X, y = make_circles(100, factor=.1, noise=.1)
clf = SVC(kernel='linear').fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf);
# -
# Clearly, no linear discrimination will ever separate these data.
# One way we can adjust this is to apply a **kernel**, which is some functional transformation of the input data.
#
# For example, one simple model we could use is a **radial basis function**
r = np.exp(-(X[:, 0] ** 2 + X[:, 1] ** 2))
# If we plot this along with our data, we can see the effect of it:
# +
from mpl_toolkits import mplot3d
def plot_3D(elev=30, azim=30):
ax = plt.subplot(projection='3d')
ax.scatter3D(X[:, 0], X[:, 1], r, c=y, s=50, cmap='spring')
ax.view_init(elev=elev, azim=azim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('r')
interact(plot_3D, elev=(-90, 90), azip=(-180, 180));
# -
# We can see that with this additional dimension, the data becomes trivially linearly separable!
# This is a relatively simple kernel; SVM has a more sophisticated version of this kernel built-in to the process. This is accomplished by using ``kernel='rbf'``, short for *radial basis function*:
# +
clf = SVC(kernel='rbf')
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, facecolors='none');
# -
# Here there are effectively $N$ basis functions: one centered at each point! Through a clever mathematical trick, this computation proceeds very efficiently using the "Kernel Trick", without actually constructing the matrix of kernel evaluations.
#
# We'll leave SVMs for the time being and take a look at another classification algorithm: Random Forests.
| notebook/01-sklearn/03.1-Classification-SVMs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from easy21 import *
import numpy as np
# - Apply Monte-Carlo control to Easy21.
# - [x] Initialise the value function to zero.
# - [x] Use a time-varying scalar step-size of αt = 1/N(st,at)
# - [x] and an ε-greedy exploration strategy with εt = N0/(N0 + N(st)),
# - [x] where N0 = 100 is a constant,
# - [x] N(s) is the number of times that state s has been visited,
# - [x] and N(s,a) is the number of times that action a has been selected from state s.
# - [x] Feel free to choose an alternative value for N0, if it helps producing better results.
# - [x] Plot the optimal value function V ∗ (s) = maxa Q∗ (s, a) using similar axes to the following figure taken from Sutton and Barto’s Blackjack example.
# +
class MC_Agent:
def __init__(self, environment, n0):
self.n0 = float(n0)
self.env = environment
# N(s) is the number of times that state s has been visited
# N(s,a) is the number of times that action a has been selected from state s.
self.N = np.zeros((self.env.dealer_values_count,
self.env.player_values_count,
self.env.actions_count))
self.Q = np.zeros((self.env.dealer_values_count,
self.env.player_values_count,
self.env.actions_count))
# self.E = np.zeros((self.env.dealer_values_count,
# self.env.player_values_count,
# self.env.actions_count))
# Initialise the value function to zero.
self.V = np.zeros((self.env.dealer_values_count, self.env.player_values_count))
self.count_wins = 0
self.iterations = 0
# def get_action(self, s):
# a = Actions.hit
# return a
# get optimal action, with epsilon exploration (epsilon dependent on number of visits to the state)
# ε-greedy exploration strategy with εt = N0/(N0 + N(st)),
def get_action(self, state):
dealer_idx = state.dealer-1
player_idx = state.player-1
n_visits = sum(self.N[dealer_idx, player_idx, :])
# epsilon = N0/(N0 + N(st)
curr_epsilon = self.n0 / (self.n0 + n_visits)
# epsilon greedy policy
if random.random() < curr_epsilon:
r_action = Actions.hit if random.random()<0.5 else Actions.stick
# if (dealer_idx == 0 and player_idx == 0):
# print ("epsilon:%s, random:%s " % (curr_epsilon, r_action))
return r_action
else:
action = Actions.to_action(np.argmax(self.Q[dealer_idx, player_idx, :]))
# if (dealer_idx == 0 and player_idx == 0):
# print ("epsilon:%s Qvals:%s Q:%s" % (curr_epsilon, self.Q[dealer_idx, player_idx, :], action))
return action
def train(self, iterations):
# Loop episodes
for episode in xrange(iterations):
episode_pairs = []
# get initial state for current episode
s = self.env.get_start_state()
# Execute until game ends
while not s.term:
# get action with epsilon greedy policy
a = self.get_action(s)
# store action state pairs
episode_pairs.append((s, a))
# update visits
# N(s) is the number of times that state s has been visited
# N(s,a) is the number of times that action a has been selected from state s.
self.N[s.dealer-1, s.player-1, Actions.as_int(a)] += 1
# execute action
s,r = self.env.step(s, a)
#if episode%10000==0: print "Episode: %d, Reward: %d" %(episode, my_state.rew)
self.count_wins = self.count_wins+1 if r==1 else self.count_wins
# Update Action value function accordingly
for curr_s, curr_a in episode_pairs:
# print s.dealer, s.player, s.r, a
dealer_idx = curr_s.dealer-1
player_idx = curr_s.player-1
action_idx = Actions.as_int(curr_a)
# Use a time-varying scalar step-size of αt = 1/N(st,at)
# step = 1.0 / sum(self.N[dealer_idx, player_idx, :])
step = 1.0 / self.N[dealer_idx, player_idx, action_idx]
error = r - self.Q[dealer_idx, player_idx, action_idx]
self.Q[dealer_idx, player_idx, action_idx] += step * error
self.iterations += iterations
print float(self.count_wins)/self.iterations*100
# Derive value function
for d in xrange(self.env.dealer_values_count):
for p in xrange(self.env.player_values_count):
self.V[d,p] = max(self.Q[d, p, :])
def plot_frame(self, ax):
def get_stat_val(x, y):
return self.V[x, y]
X = np.arange(0, self.env.dealer_values_count, 1)
Y = np.arange(0, self.env.player_values_count, 1)
X, Y = np.meshgrid(X, Y)
Z = get_stat_val(X, Y)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
return surf
# TODO
# add missing values
# make train so it can be ran multiple sets of itteractions
# (and figure out when to compute the value function and to make sure its done from start of file
# -
# where N0 = 100 is a constant,
N0 = 100
N0 = 100
agent = MC_Agent(Environment(), N0)
for i in xrange (10):
agent.train(50000)
# +
# Feel free to choose an alternative value for N0, if it helps producing better results.
# +
# N0 = 300
# agent = MC_Agent(Environment(), N0)
# for i in xrange (10):
# agent.train(50000)
# +
# N0 = 30
# agent = MC_Agent(Environment(), N0)
# for i in xrange (10):
# agent.train(50000)
# -
# ## Plot
def animate(frame):
i = agent.iterations
step_size = i
step_size = max(1, step_size)
step_size = min(step_size, 2 ** 16)
agent.train(step_size)
ax.clear()
surf = agent.plot_frame(ax)
plt.title('MC score:%s frame:%s step_size:%s ' % (float(agent.count_wins)/agent.iterations*100, frame, step_size) )
# plt.draw()
fig.canvas.draw()
print "done ", frame, step_size, i
return surf
# +
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
# %matplotlib inline
N0 = 100
agent = MC_Agent(Environment(), N0)
fig = plt.figure("N100")
ax = fig.add_subplot(111, projection='3d')
ani = animation.FuncAnimation(fig, animate, 32, repeat=False)
# note: requires gif writer; swap with plt.show()
ani.save('MC_Agent.gif', writer='imagemagick', fps=3)
# plt.show()
# -
from IPython.display import Image
Image(url="MC_Agent.gif")
# +
# Feel free to choose an alternative value for N0, if it helps producing better results.
# +
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
# %matplotlib inline
N0 = 1000000
agent = MC_Agent(Environment(), N0)
fig = plt.figure("N1000000")
ax = fig.add_subplot(111, projection='3d')
ani = animation.FuncAnimation(fig, animate, 50, repeat=False)
# note: requires gif writer; swap with plt.show()
ani.save('MC_Agent_N1000000.gif', writer='imagemagick', fps=3)
# plt.show()
# -
from IPython.display import Image
Image(url="MC_Agent_N1000000.gif")
# +
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
# %matplotlib inline
N0 = 1
agent = MC_Agent(Environment(), N0)
fig = plt.figure("N1")
ax = fig.add_subplot(111, projection='3d')
ani = animation.FuncAnimation(fig, animate, 50, repeat=False)
# note: requires gif writer; swap with plt.show()
ani.save('MC_Agent_N1.gif', writer='imagemagick', fps=3)
# plt.show()
# -
from IPython.display import Image
Image(url="MC_Agent_N1.gif")
# +
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
# %matplotlib inline
N0 = 1000
agent = MC_Agent(Environment(), N0)
fig = plt.figure("Nbest")
ax = fig.add_subplot(111, projection='3d')
ani = animation.FuncAnimation(fig, animate, 50, repeat=False)
# note: requires gif writer; swap with plt.show()
ani.save('MC_Agent_Nbest.gif', writer='imagemagick', fps=3)
# plt.show()
# -
from IPython.display import Image
Image(url="MC_Agent_Nbest.gif")
# Actions.as_int(value)
agent.V[9, 1]
stick_v = np.zeros((agent.env.dealer_values_count, agent.env.player_values_count))
hit_v = np.zeros((agent.env.dealer_values_count, agent.env.player_values_count))
actions = np.zeros((agent.env.dealer_values_count, agent.env.player_values_count))
for d in xrange(agent.env.dealer_values_count):
for p in xrange(agent.env.player_values_count):
action = Actions.to_action(np.argmax(agent.Q[d, p, :]))
value = agent.V[d, p]
if (action == Actions.stick):
stick_v[d,p] = value
hit_v[d,p] = 0
actions[d,p] = -1
else:
hit_v[d,p] = value
stick_v[d,p] = 0
actions[d,p] = 1
# +
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
# %matplotlib inline
# fig = plt.figure("N100")
# ax = fig.add_subplot()
# bx = fig.add_subplot()
fig, ax = plt.subplots()
fig2, bx = plt.subplots()
fig3, cx = plt.subplots()
heatmap3 = ax.pcolor(actions, cmap=plt.cm.hot, alpha=0.8)
heatmap = bx.pcolor(stick_v, cmap=plt.cm.afmhot, alpha=0.8)
heatmap2 = cx.pcolor(hit_v, cmap=plt.cm.afmhot, alpha=0.8)
plt.show()
# -
| Joe #2 Monte-Carlo Control in Easy21/Joe #2 Monte-Carlo Control in Easy21.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# https://plot.ly/python/anova/
import plotly as py
import plotly.graph_objs as go
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
import scipy
import statsmodels
import statsmodels.api as sm
from statsmodels.formula.api import ols
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
py.offline.init_notebook_mode(connected=True)
# -
# 拉取数据
moore_dataset = sm.datasets.get_rdataset("Moore", "carData", cache=True)
print(moore_dataset.__doc__)
moore_df = moore_dataset.data
moore_df.rename(columns={"partner.status":"partner_status"}, inplace=True)
moore_df.head(n=10)
| ML/learn/StatsModels/anova-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Control-Z Gate Sequence
# ## Introduction
#
# In this tutorial we show how to prepare the pulse sequence that generates a *Controlled - Z* gate. We will prepare our state with atoms in any of the "digital" states that we shall call $|g\rangle$ and $|h \rangle$ ( for "ground" and "hyperfine", respectively). Then we will use the *Rydberg blockade* effect to create the logic gate. The levels that each atom can take are the following:
#
# <center><img src="attachment:three_states.png" width="120"/></center>
# We will be using *NumPy* and *Matplotlib* for calculations and plots. Many additional details about the CZ gate construction can be found in [1111.6083v2](https://arxiv.org/abs/1111.6083)
import numpy as np
import matplotlib.pyplot as plt
import qutip
from itertools import product
# We import the following Classes from Pulser:
from pulser import Pulse, Sequence, Register
from pulser.devices import Chadoq2
from pulser_simulation import Simulation
from pulser.waveforms import BlackmanWaveform, ConstantWaveform
# ## 1. Loading the Register on a Device
# Defining an atom register can simply be done by choosing one of the predetermined shapes included in the `Register`class. We can also construct a dictionary with specific labels for each atom. The atoms must lie inside the *Rydberg blockade radius* $R_b$, which we will characterize by
#
# $$\hbar \Omega^{\text{Max}}_{\text{Rabi}} \sim U_{ij} = \frac{C_6}{R_{b}^6},$$
#
# where the coefficient $C_6$ determines the strength of the interaction ($C_6/\hbar \approx 5008$ GHz.$\mu m^6$). We can obtain the corresponding Rydberg blockade radius from a given $\Omega_{\text{Rabi}}^{\text{max}}$ using the `rydberg_blockade_radius()` method from `Chadoq2`. For the pulses in this tutorial, $\Omega^{\text{Max}}_{\text{Rabi}}$ is below $2\pi \times 10$ Mhz so:
# +
Rabi = np.linspace(1, 10, 10)
R_blockade = [
Chadoq2.rydberg_blockade_radius(2.0 * np.pi * rabi) for rabi in Rabi
]
plt.figure()
plt.plot(Rabi, R_blockade, "--o")
plt.xlabel(r"$\Omega/(2\pi)$ [MHz]", fontsize=14)
plt.ylabel(r"$R_b$ [$\mu\.m$]", fontsize=14)
plt.show()
# -
# Thus, we place our atoms at relative distances below $5$ µm, therefore ensuring we are inside the Rydberg blockade volume.
# Atom Register and Device
q_dict = {
"control": np.array([-2, 0.0]),
"target": np.array([2, 0.0]),
}
reg = Register(q_dict)
reg.draw()
# ## 2. State Preparation
# The first part of our sequence will correspond to preparing the different states on which the CZ gate will act. For this, we define the following `Pulse` instances that correspond to $\pi$ and $2\pi$ pulses (notice that the area can be easily fixed using the predefined `BlackmanWaveform`):
# Let us construct a function that takes the label string (or "id") of a state and turns it into a ket state. This ket can be in any of the "digital" (ground-hyperfine levels), "ground-rydberg" or "all" levels. We also include a three-atom system case, which will be useful in the CCZ gate in the last section.
def build_state_from_id(s_id, basis_name):
if len(s_id) not in {2, 3}:
raise ValueError("Not a valid state ID string")
ids = {"digital": "gh", "ground-rydberg": "rg", "all": "rgh"}
if basis_name not in ids:
raise ValueError("Not a valid basis")
pool = {"".join(x) for x in product(ids[basis_name], repeat=len(s_id))}
if s_id not in pool:
raise ValueError("Not a valid state id for the given basis.")
ket = {
op: qutip.basis(len(ids[basis_name]), i)
for i, op in enumerate(ids[basis_name])
}
if len(s_id) == 3:
# Recall that s_id = 'C1'+'C2'+'T' while in the register reg_id = 'C1'+'T'+'C2'.
reg_id = s_id[0] + s_id[2] + s_id[1]
return qutip.tensor([ket[x] for x in reg_id])
else:
return qutip.tensor([ket[x] for x in s_id])
# We try this out:
build_state_from_id("hg", "digital")
# Let's now write the state preparation sequence. We will also create the prepared state to be able to calculate its overlap during the simulation. First, let us define a π-pulse along the Y axis that will excite the atoms to the hyperfine state if requested:
duration = 300
pi_Y = Pulse.ConstantDetuning(
BlackmanWaveform(duration, np.pi), 0.0, -np.pi / 2
)
pi_Y.draw()
# The sequence preparation itself acts with the Raman channel if the desired initial state has atoms in the hyperfine level. We have also expanded it for the case of a CCZ in order to use it below:
def preparation_sequence(state_id, reg):
global seq
if not set(state_id) <= {"g", "h"} or len(state_id) != len(reg.qubits):
raise ValueError("Not a valid state ID")
if len(reg.qubits) == 2:
seq_dict = {"1": "target", "0": "control"}
elif len(reg.qubits) == 3:
seq_dict = {"2": "target", "1": "control2", "0": "control1"}
seq = Sequence(reg, Chadoq2)
if set(state_id) == {"g"}:
basis = "ground-rydberg"
print(
f"Warning: {state_id} state does not require a preparation sequence."
)
else:
basis = "all"
for k in range(len(reg.qubits)):
if state_id[k] == "h":
if "raman" not in seq.declared_channels:
seq.declare_channel(
"raman", "raman_local", seq_dict[str(k)]
)
else:
seq.target(seq_dict[str(k)], "raman")
seq.add(pi_Y, "raman")
prep_state = build_state_from_id(
state_id, basis
) # Raises error if not a valid `state_id` for the register
return prep_state
# Let's test this sequence. Notice that the state "gg" (both atoms in the ground state) is automatically fed to the Register so a pulse sequence is not needed to prepare it.
# Define sequence and Set channels
prep_state = preparation_sequence("hh", reg)
seq.draw(draw_phase_area=True)
# ## 3. Constructing the Gate Sequence
# We apply the common $\pi-2\pi-\pi$ sequence for the CZ gate
pi_pulse = Pulse.ConstantDetuning(BlackmanWaveform(duration, np.pi), 0.0, 0)
twopi_pulse = Pulse.ConstantDetuning(
BlackmanWaveform(duration, 2 * np.pi), 0.0, 0
)
def CZ_sequence(initial_id):
# Prepare State
prep_state = preparation_sequence(initial_id, reg)
prep_time = max(
(seq._last(ch).tf for ch in seq.declared_channels), default=0
)
# Declare Rydberg channel
seq.declare_channel("ryd", "rydberg_local", "control")
# Write CZ sequence:
seq.add(
pi_pulse, "ryd", "wait-for-all"
) # Wait for state preparation to finish.
seq.target("target", "ryd") # Changes to target qubit
seq.add(twopi_pulse, "ryd")
seq.target("control", "ryd") # Changes back to control qubit
seq.add(pi_pulse, "ryd")
return prep_state, prep_time
prep_state, prep_time = CZ_sequence(
"gh"
) # constructs seq, prep_state and prep_time
seq.draw(draw_phase_area=True)
print(f"Prepared state: {prep_state}")
print(f"Preparation time: {prep_time}ns")
# ## 4. Simulating the CZ sequence
CZ = {}
for state_id in {"gg", "hg", "gh", "hh"}:
# Get CZ sequence
prep_state, prep_time = CZ_sequence(
state_id
) # constructs seq, prep_state and prep_time
# Construct Simulation instance
simul = Simulation(seq)
res = simul.run()
data = [st.overlap(prep_state) for st in res.states]
final_st = res.states[-1]
CZ[state_id] = final_st.overlap(prep_state)
plt.figure()
plt.plot(np.real(data))
plt.xlabel(r"Time [ns]")
plt.ylabel(rf"$ \langle\,{state_id} |\, \psi(t)\rangle$")
plt.axvspan(0, prep_time, alpha=0.06, color="royalblue")
plt.title(rf"Action of gate on state $|${state_id}$\rangle$")
CZ
# ## 5. CCZ Gate
# The same principle can be applied for composite gates. As an application, let us construct the *CCZ* gate, which determines the phase depending on the level of *two* control atoms. We begin by reconstructing the Register:
# Atom Register and Device
q_dict = {
"control1": np.array([-2.0, 0.0]),
"target": np.array([0.0, 2 * np.sqrt(3.001)]),
"control2": np.array([2.0, 0.0]),
}
reg = Register(q_dict)
reg.draw()
preparation_sequence("hhh", reg)
seq.draw(draw_phase_area=True)
def CCZ_sequence(initial_id):
# Prepare State
prep_state = preparation_sequence(initial_id, reg)
prep_time = max(
(seq._last(ch).tf for ch in seq.declared_channels), default=0
)
# Declare Rydberg channel
seq.declare_channel("ryd", "rydberg_local", "control1")
# Write CCZ sequence:
seq.add(
pi_pulse, "ryd", protocol="wait-for-all"
) # Wait for state preparation to finish.
seq.target("control2", "ryd")
seq.add(pi_pulse, "ryd")
seq.target("target", "ryd")
seq.add(twopi_pulse, "ryd")
seq.target("control2", "ryd")
seq.add(pi_pulse, "ryd")
seq.target("control1", "ryd")
seq.add(pi_pulse, "ryd")
return prep_state, prep_time
CCZ_sequence("hhh")
seq.draw(draw_phase_area=True)
CCZ = {}
for state_id in {"".join(x) for x in product("gh", repeat=3)}:
# Get CCZ sequence
prep_state, prep_time = CCZ_sequence(state_id)
# Construct Simulation instance
simul = Simulation(seq)
res = simul.run()
data = [st.overlap(prep_state) for st in res.states]
final_st = res.states[-1]
CCZ[state_id] = final_st.overlap(prep_state)
plt.figure()
plt.plot(np.real(data))
plt.xlabel(r"Time [ns]")
plt.ylabel(rf"$ \langle\,{state_id} | \psi(t)\rangle$")
plt.axvspan(0, prep_time, alpha=0.06, color="royalblue")
plt.title(rf"Action of gate on state $|${state_id}$\rangle$")
CCZ
# Our results are as expected: only the $|hhh\rangle$ state (which corresponds to a $111$ digital state) gets its phase flipped in sign
| tutorials/applications/Control-Z Gate Sequence.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.2
# language: julia
# name: julia-1.5
# ---
# # Power Spectral Density of White Noise
#
# In this notebook, we will consider the relation between the power, variance, autocorrelation function and power spectral density of a filtered noise process.
#
# Typically, a random process has infinite energy but finite and nonzero power. Thus, a random process can be classified as a power signal. There is a strong relation between the autocorrelation funciton $R_X(\tau)$ of a **wide sense stationary** proces $X(t)$ and its power spectral density $S_X(f)$: the power spectral density $S_X(f)$ is the Fourier transform of the autocorrelation function $R_X(\tau)$.
# $$
# R_X(\tau) \underset{\mathcal{F}^{-1}}{\stackrel{\mathcal{F}}{\rightleftharpoons}} S_X(f)
# $$
#
# where $\mathcal{F}$ is the Fourier transform a signal $f(t)$ defined as
#
# $$
# F(f) = \mathcal{F}[f(t)] = \int_{-\infty}^{\infty} f(t) e^{-j 2 \pi f t}dt
# $$
#
# and $\mathcal{F}^{-1}$ is the inverse Fourier transform defined as
#
# $$
# f(t) = \mathcal{F}^{-1}[F(f)] = \int_{-\infty}^{\infty} F(f) e^{j 2 \pi f t }df
# $$
#
# Once the power spectral density $S_X(f)$ of a process $X(t)$ is defined, the average power $\mathcal{P}_X$ of the random process can be found as,
#
# $$
# \mathcal{P}_X = \int_{-\infty}^{\infty} S_X(f) df
# $$
#
# Since the variance $\sigma_X^2$ is the average power of a random process, we can write
# $$
# R_X(0) = \int_{-\infty}^{\infty} S_X(f) e^{j 2 \pi f 0} df = \mathcal{P}_X = \sigma_X^2
# $$
#
# A process $X(t)$ is called white if its power spectral density $S_X(f)$ is constant for all frequency bands. Thus, the power of a white random process is infinite, which implies a true random white process cannot be generated. But, a process whose power spectral density is constant for a wide range of frequencies, the process can be considered as white. Thus, we can think a process as white if its power spectal density is constant for a wide range of frequency band given as $[0, B]$ where $B$ is the bandwidth of the process.
#
# Consider the power spectral density $S_Y(f)$ of a random process $Y(t)$
# $$
# S_Y(f) =
# \begin{cases}
# N_0 / 2 & |f| \leq B \\
# 0 & otherwise
# \end{cases}
# $$
# The power of $Y(t)$ can be found as
# $$P_Y = N_0 B$$
# The process $Y(t)$ is a contiuous time process and need to be sampled to be numerically simulated. Let us define a random process
# $$n[k] = Y(n T_s)$$
# where $T_s$ is the sampling frequency. We need $n[k]$ to be uncorrelated discrete time random process, in other words, we need the autocorrelation function $R_n[k]$ to be an impulse-like function. To calculate the autocorrelation function $R_n[k]$ of the discrete time process $n[k]$, we first calculate the autocorrelation function $R_Y(\tau)$ of the contiuous time random process $Y(t)$. We have
# $$
# R_Y(\tau) = \mathcal{F}^{-1}[S_Y(f)] = N_0 B sinc(2 B \tau)
# $$
# where
# $$sinc(t) = \dfrac{sin(\pi t)}{\pi t}$$
# Since $n[k] = Y(k T_s)$ we have,
# $$R_n[k] = R_Y(k Ts) = N_0 B sinc(2 B k T_s), \; k = 1, 2, \ldots$$
# If sampling is performed such that
# $$T_s = \dfrac{m}{2B}, \; m = 1, 2, \ldots$$
# we have
# $$R_n[k] = 0, \; k = 1, 2, \ldots$$
# Thus the noise samples $n[k]$ are uncorrelated from each other and the contiuous time noise process $Y(t)$ can be recoveered from its samples $n[k]$.
#
# After this theoretical beckground, we are ready for the simulation of a continuous time noise process whose power spectral density is constant and equals $N_0 / 2$ for a frequency range of $[-B, B]$ where $B$ is the bandwith of the process. From the above discussion, we found that
# $$B = \dfrac{fs}{2}$$
# and
# $$P_n = N_0 B$$
# +
# Load packages
using FFTW
using Plots
using DSP
using Statistics
# Construct noise signal
N0 = 50. # Single sideband power spectral density
fs = 100 # Sampling frequency
ts = 1 / fs # Sampling time
ln = 2^10 # Number of noise samples
tn = (0 : ln - 1) * ts # Noise vector
n = sqrt(N0 / 2 * fs) * randn(ln) # Sampled noise
# Plot the noise
plot(tn, n)
xlabel!("Time [seconds]")
ylabel!("n(t)")
# -
# Note the variance $\sigma_n^2$ of the sampled noise is $N_0 \dfrac{fs}{2} = 1000$ as explained above. We can check the variance of the noise samples
var(n)
# which implies we are quite good. Note that, actually we plotted the contiunouus time random process $Y(t)$ from it is samples $n[k]$. We can calculate the autocorrelation function $R_Y(\tau)$ of $Y(t)$. We found that $R_Y(\tau) = N_0 B sinc(2 B \tau)$
# +
# Compute autocorrelation function.
Rn = xcorr(n, n) / (ln - 1)
τ = (-ln + 1 : ln - 1) * ts
lr = length(Rn)
# Plots
plot(τ, Rn)
xlabel!("τ [seconds]")
ylabel!("Rn(τ)")
# -
# Note that to calculate $R_Y(\tau)$ we used `xcorr` function which calculates the autocorrelation function $R_n[k]$ of a sampled noise $n[k]$. However we want to calculate the autocorrelation function $R_Y(\tau]$. Considering that Gauissian process ergodic, we have
# $$
# R_Y(\tau) = E[n(t)n(t + \tau)] = \dfrac{1}{T_d} \int_{0}^{T_d} n(t)n(t + \tau) dt \approx \dfrac{1}{T_d} \sum_{l = 1}^{L - 1} n[l]n[l + k] T_s = \dfrac{1}{L - 1} \sum_{l = 1}^{L - 1} n[l]n[l + k]
# $$
# where $E[X]$ denotes the expected value of the random process $X$ and $T_d$ is the duration of the random process $Y(t)$. Hence, we normlized the result of `xcorr` function by the term `ln` which is the number of samples $n[k]$ taken from the continuous time process $Y(t)$.
#
# We have defined the power spectral density $S_Y(f)$ of random process $Y(t)$ to be the Fourier transform of its autocorrelation function $R_Y(\tau)$. From the properties of the autocorrelation function $R_Y(\tau)$, it can be shown that power spectral density $S_Y(f)$ is a real function of $f$. `fft` always returns complex valued signals. Thus, to get a real $S_n(f)$, we have to take the magnitude of output of the `fft` function.
# +
# Fourier tranform of autocorrelation function
xrf = fft(Rn) * ts
psdr = abs.(xrf)
ffr = (0 : 1 / (lr - 1) : 1) * fs
Δfr = fs / (lr - 1)
Pfr = sum(psdr[1 : end - 1]) * Δfr
@show Pfr
# Plot
plot(ffr, psdr)
xlabel!("Frequency [Hz]")
ylabel!("Sn(f)")
# -
# Note that the noise average power calculated using $S_n(f)$ is quite close to its theoretical value, which is the variance $\sigma_n^2 = 2512.9183244388337$ of the sampled noise.
# Another method for the calculation of the power spectral density uses a sample waveoform of the process directly. The power spectal density $S_X(f)$ of a continous time random process $X(t)$ is defined as
# $$
# S_x(f) = \lim_{T \mapsto \infty} \dfrac{|X(f)|^2}{T}
# $$
# Thus, $S_n(f)$ can be computed numerically as
# $$
# S_n(f) \approx \dfrac{|X(f)|^2}{T} = \dfrac{|X(f)|^2}{(l - 1) T_s}
# $$
# and the total average power $\mathcal{P}_n$ can be calculated as
# $$
# \mathcal{P}_n = \int_{0}^{fs} S_n(f) df = \sum_{n = 1}^{l - 1} \dfrac{|X(f)|^2}{(l - 1) T_s} \Delta f
# $$
# where $\Delta f = \dfrac{f_s}{l - 1}$ is the frequency resolution of the `fft` operation.
# +
# Compute power spectral density
xfn = fft(n) * ts
psdn = abs.(xfn).^2 / (ln - 1) / ts
ffn = (0 : 1 / (ln - 1) : 1) * fs
Δf = fs / (ln - 1)
Pfn = sum(psdn[1 : end - 1]) * Δf
@show Pfn
# Plots
plot(ffn, psdn)
xlabel!("Frequency [Hz]")
ylabel!("Sn(f)")
# -
# Notice that the power computed usign the noise sample waveform directly is quite close the theoretical value $\sigma_n^2 = 2512.9183244388337$.
#
# As a last step we compute the power in the time domain using the noise samples diretly.
Pt = sum(n[1 : ln - 1].^2) / (ln - 1)
# Again the power calculated in the time domain is close to its theoretical value $\sigma_n^2 = 2512.9183244388337$
| notebooks/power_spectral_density_of_white_noise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Sentiment Analysis in Keras
# In this notebook we want to train a neural network in Keras in order to predict the sentiment of a movie review, i.e. whether it is positive or negative. For the purpose, the imdb movie review dataset available in Keras is used. This problem has been widely addressed and is one of the example use cases of Keras, useful to experiment and learn.
# - We rely on the examples provided by the Coursera course https://www.coursera.org/learn/ai/lecture/hQYsN/recurrent-neural-networks, https://machinelearningmastery.com/sequence-classification-lstm-recurrent-neural-networks-python-keras/ and http://www.samyzaf.com/ML/imdb/imdb.html
# - Embeddings are used to represent words and provide them as input to the neural network: https://en.wikipedia.org/wiki/Embedding, https://en.wikipedia.org/wiki/Word_embedding, https://keras.io/layers/embeddings/
# - Convolutional neural nets were shown leading good results in spite of small network structure: http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/, https://github.com/alexander-rakhlin/CNN-for-Sentence-Classification-in-Keras/blob/master/docs/1408.5882v2.pdf, http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/
# - A more complete overview at http://cs224d.stanford.edu/syllabus.html
# +
from keras.datasets import imdb
# select max_features most common items from vocabulary
max_features = 20000
top_words = max_features
max_review_length = 500
(x_train, y_train), (x_test, y_test) = imdb.load_data(path="imdb.npz",
num_words=top_words,
skip_top=0,
maxlen=max_review_length,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
# -
print x_train.shape
print y_train.shape
print x_test.shape
print y_test.shape
# Since the reviews might have different lenght and we have a fixed input for our network to train, we use padding to create sub-sequences of fixed length, i.e. if the raw sequence is longer than the set size it will truncate it, while if shorter than the set size the padding will add zeros to fill the space.
# +
from keras.preprocessing import sequence
x_train = sequence.pad_sequences(x_train, maxlen=max_review_length)
print x_train.shape
x_test = sequence.pad_sequences(x_test, maxlen=max_review_length)
print x_test.shape
# -
# For the model we use:
# 1. An input Embedding layer using 128-length vectors to represent each word.
# 2. An LSTM layer with 128 memory units
# 3. A Dense output layer with a single neuron, whose sigmoid activation will define the belonging class for the review
# +
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense
#from keras.layers.embeddings import Embedding
embedding_vector_length = 128
model = Sequential()
# input layer
model.add(Embedding(max_features,128
#input_dim=top_words,
#output_dim=embedding_vector_length,
#input_length=max_review_length
))
# hidden layer
model.add(LSTM(units=128
#units=128
#dropout=0.2,
#recurrent_dropout=0.2
))
# output layer
model.add(Dense(1,
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
#optimizer='adam',
optimizer='sgd',
metrics=['accuracy'])
print(model.summary())
# -
# Set the seed and train the model:
# +
# fix random seed for reproducibility
import numpy
numpy.random.seed(7)
# define the batch size, ie. the size of the set (or number of observations) used between weight updates
# this is a tradeoff as bigger batch sizes allow for learning bigger dependencies across data samples
# although brings in greater computational complexity
# viceversa smaller batch sizer are lighter to handle and might lead to better weight updates for non-sequential data
batch_size = 32
model.fit(x_train,
y_train,
validation_data=(x_test, y_test),
epochs=1,
batch_size=batch_size)
# -
# Evaluate the model:
loss, accuracy = model.evaluate(x_train, y_train, batch_size=batch_size)
print("Training: accuracy = %f ; loss = %f" % (accuracy, loss))
loss, accuracy = model.evaluate(x_test, y_test, batch_size=batch_size)
print("Testing: accuracy = %f ; loss = %f" % (accuracy, loss))
# Save the model:
model.save("imdb.h5")
# !ls
# which can be reloaded as:
from keras.models import load_model
model = load_model("imdb.h5")
| imdb_sentiment/imdb_sa.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.11 64-bit (''cvision'': conda)'
# name: python3
# ---
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# # Abrindo datasets
files = list(filter(lambda x: x.endswith('pkl'), os.listdir()))
features = ['sobel', 'glcm', 'lbp']
datasets = {}
# +
for file in files:
datasets[file.split('.')[0]] = pd.read_pickle(file)
for dataset in datasets.keys():
datasets[dataset] = datasets[dataset].sort_values('label')
# -
# # Unindo as features
X, y = np.column_stack([datasets[x][y].tolist() for x in datasets.keys() for y in features]), datasets['gaussian']['class']
print(len(X), len(X[0]), X.shape, y.shape)
print(len(datasets['gaussian']['lbp'][0].tolist()),len(datasets['gaussian']['glcm'][0].tolist()),len(datasets['gaussian']['sobel'][0].tolist()))
(17+100+22500)*7
# ### StratifiedKFold para dividir os conjuntos de treino / teste - https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html
skf = StratifiedKFold(n_splits=2, shuffle=True, random_state=42)
# # SVM
for i, (train_index, test_index) in enumerate(skf.split(X, y)):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
print(i)
for kernel in ['linear', 'rbf', 'poly']:
svm = SVC(random_state=42, kernel=kernel)
svm.fit(X_train, y_train)
y_hat = svm.predict(X_test)
print(kernel)
print(classification_report(y_test, y_hat))
print()
# # MLP
for i, (train_index, test_index) in enumerate(skf.split(X, y)):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
print(i)
for activation in ['identity', 'logistic', 'tanh', 'relu']:
mlp = MLPClassifier(random_state=42, max_iter=200,early_stopping=True, activation=activation)
mlp.fit(X_train, y_train)
y_hat = mlp.predict(X_test)
print(activation)
print(classification_report(y_test, y_hat))
print()
| Roteiros/classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Certification Project by <NAME>
#
# ## Part 1 - Data Exploration, Feature Selection
# In this module we use Spark in conjunction with some popular Python libraries
# to explore data and select features we will use in the next module which is model training,
# scoring and evaluation.
# ## Setup
# Import some useful packages and modules:
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# +
# Create a SparkSession:
spark = SparkSession.builder.master("local").appName("creditcard_explore01").getOrCreate()
# Copy credit card data from CDSW project to HDFS
# #!hdfs dfs -mkdir creditcardfraud
# #!hdfs dfs -put data/creditcard.csv creditcardfraud
# Load the creditcard data from HDFS:
df = spark.read.csv("creditcardfraud/", sep=",", header=True, inferSchema=True)
# Show first 5 lines to see if the delimited lines have been read properly
df.show(5)
# +
# And Print schema
df.printSchema()
# Define a new schema
from pyspark.sql.types import *
schema = StructType([
StructField("Time", DoubleType()),
StructField("V1", DoubleType()),
StructField("V2", DoubleType()),
StructField("V3", DoubleType()),
StructField("V4", DoubleType()),
StructField("V5", DoubleType()),
StructField("V6", DoubleType()),
StructField("V7", DoubleType()),
StructField("V8", DoubleType()),
StructField("V9", DoubleType()),
StructField("V10", DoubleType()),
StructField("V11", DoubleType()),
StructField("V12", DoubleType()),
StructField("V13", DoubleType()),
StructField("V14", DoubleType()),
StructField("V15", DoubleType()),
StructField("V16", DoubleType()),
StructField("V17", DoubleType()),
StructField("V18", DoubleType()),
StructField("V19", DoubleType()),
StructField("V20", DoubleType()),
StructField("V21", DoubleType()),
StructField("V22", DoubleType()),
StructField("V23", DoubleType()),
StructField("V24", DoubleType()),
StructField("V25", DoubleType()),
StructField("V26", DoubleType()),
StructField("V27", DoubleType()),
StructField("V28", DoubleType()),
StructField("Amount", DoubleType()),
StructField("Class", IntegerType())
])
df = spark \
.read \
.format("csv") \
.option("sep", ",") \
.option("header", True) \
.schema(schema) \
.load("creditcardfraud/creditcard.csv")
df.describe("Time","Amount","Class").show()
# -
# Run some basic checks on the data - any NULL values?
df_nonull = df.dropna()
df_nonull.describe("Time","Amount","Class").show()
# +
# Add a new Category Column "Fraud"
df2 = df.withColumn("Fraud", df.Class == 1)
# Describe the new DataFrame
df2.select("Time", "V1", "V2", "Amount", "Class", "Fraud").show(5)
#df2.describe("Time", "V1", "V2", "Amount", "Class").show()
# Load into Panda Dataframe to visualize summary better.
pdf = df2.toPandas()
pdf.describe()
# +
# Time Column - View distribution
# Plot Time with normal, and plot Time with fraud
# sns.distplot(pdf["Time"], kde=False)
# sns.distplot(pdf["Time"][pdf.Class == 0], kde=False)
# sns.distplot(pdf["Time"][pdf.Class == 1], kde=False)
# Filter "Normal" DataFrame where Class == 0
# and filter "Fraudulent" DataFrame where Class == 1
pdf_normal = pdf[pdf.Class == 0]
# pdf_normal.count()
# Plot distribution of Normal transactions
sns.jointplot(x="Time", y="Amount", data=pdf_normal, size=12, kind="reg")
# -
| 07_jupyter_explore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python(scRFE1)
# language: python
# name: scrfe1
# ---
# # scRFE giniPlots code
# ## Setup
# +
import scanpy as sc
import pandas as pd
sc.settings.verbosity = 3
# +
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# show figures in the notebook
# %matplotlib inline
# sc.settings.set_figure_params(dpi=300) # save umaps as non-vectorial hi-res images
# # plt.rcParams['figure.figsize']=(10,10) # make figures square if not using the setting above
# show figures in the notebook
# %matplotlib inline
# -
import sys
import os
# ## Make plots for facsTissAge5000
facsTissueList = ['SCAT','Large_Intestine','Diaphragm','Marrow','Mammary_Gland','Pancreas',
'Lung', 'Bladder', 'MAT','Tongue','Spleen','Skin','Limb_Muscle','Trachea',
'GAT','Thymus','Liver','Kidney','Heart','Brain_Myeloid','Brain_Non-Myeloid',
'Aorta','BAT']
facsTissAgetfsResults = {}
for t in facsTissueList:
facsTissAgetfsResults[t] = pd.read_csv(t+'_facs_tf_age_5000.csv')
facsTissAgetfsResults
def scatterplotResults (scRFEres, tissueList, label, seq, nEst):
plotRes = {}
for t in tissueList:
plt.figure()
print(t)
plotRes[t] = sns.scatterplot(x=scRFEres[t][label+'_gini'][0:20], y=scRFEres[t][label][0:20])
plotRes[t].invert_yaxis()
plotRes[t].set(xlabel=t+'_'+label+'_gini', ylabel = t+'-'+label+'_gene')
plotRes[t].set_title(t+'_'+label+'_'+seq+'_'+nEst)
plt.savefig(t+'_'+label+'_'+seq+'_'+nEst+'.png')
# cd /Users/madelinepark/desktop/scRFEfigs/facsTissAgetfPlot
scatterplotResults(scRFEres = facsTissAgetfsResults, tissueList = facsTissueList,
label='3m', seq='facs', nEst='5000')
#before running, check what labels your csv has (ex: some tissues don't have certain ages)
# ## Make plots for dropletTissAge5000
# https://seaborn.pydata.org/generated/seaborn.scatterplot.html
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
# tips = sns.load_dataset("tips")
# ax = sns.scatterplot(x="total_bill", y="tip", data=tips)
ax = sns.scatterplot(x=Marrow['18m_gini'][0:20], y=Marrow['18m'][0:20] )
ax.invert_yaxis()
ax.set(xlabel="Marrow_18m_gini", ylabel = "Marrow_18m_gene")
ax.set_title('Marrow_18m_Droplet_5000')
# +
# cd /users/madelinepark/Desktop
# -
# dict with name of tissue and corresponding results csv
dropletTissAgetfsResults = {
'Marrow': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Marrow_droplet_tf_age_5000.csv'),
'Lung': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Lung_droplet_tf_age_5000.csv'),
'Bladder': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Bladder_droplet_tf_age_5000.csv'),
'Tongue': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Tongue_droplet_tf_age_5000.csv'),
'Trachea': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Trachea_droplet_tf_age_5000.csv'),
# 'Large_Intestine': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Large_Intestine_droplet_tf_age_5000.csv'),
'Mammary_Gland': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Mammary_Gland_droplet_tf_age_5000.csv'),
# 'Fat': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Fat_droplet_tf_age_5000.csv'),
# 'Pancreas': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Pancreas_droplet_tf_age_5000.csv'),
# 'Skin': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Skin_droplet_tf_age_5000.csv'),
'Liver': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Liver_droplet_tf_age_5000.csv'),
'Kidney': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Kidney_droplet_tf_age_5000.csv'),
'Heart_and_Aorta': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Heart_and_Aorta_droplet_tf_age_5000.csv'),
'Limb_Muscle': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Limb_Muscle_droplet_tf_age_5000.csv'),
'Spleen': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Spleen_droplet_tf_age_5000.csv'),
'Thymus': pd.read_csv('/Users/madelinepark/Downloads/dropletTissAgetfsResults/Thymus_droplet_tf_age_5000.csv')
}
# cd /Users/madelinepark/Desktop/scRFEfigs/dropletTissAgetfPlot/
def scatterplotResults (scRFEres, tissueList, label, seq, nEst):
plotRes = {}
for t in tissueList:
plt.figure()
print(t)
plotRes[t] = sns.scatterplot(x=scRFEres[t][label+'_gini'][0:20], y=scRFEres[t][label][0:20])
plotRes[t].invert_yaxis()
plotRes[t].set(xlabel=t+'_'+label+'_gini', ylabel = t+'-'+label+'_gene')
plotRes[t].set_title(t+'_'+label+'_'+seq+'_'+nEst)
plt.savefig(t+'_'+label+'_'+seq+'_'+nEst+'.png')
# +
# dropletTissueList = ['Marrow','Lung','Bladder','Tongue','Trachea','Large_Intestine','Mammary_Gland',
# 'Fat','Pancreas','Skin','Liver','Kidney','Heart_and_Aorta','Limb_Muscle',
# 'Spleen','Thymus']
dropletTissueList = ['Marrow','Lung','Bladder','Tongue','Trachea','Mammary_Gland',
'Liver','Kidney','Heart_and_Aorta','Limb_Muscle',
'Spleen','Thymus']
# -
scatterplotResults(scRFEres = dropletTissAgetfsResults, tissueList = dropletTissueList,
label='3m', seq='droplet', nEst='5000')
#before running, check what labels your csv has (ex: some tissues don't have certain ages)
| scripts/understandingResults/giniPlotsv1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Conduct a force controlled analysis of a uniaxial material
#
# Author: <NAME>
#
# Note: The force controlled uniaxial driver has been implemented as a function in the Object-oriented OpenSEES library,
# o3seespy (`o3seespy.tools.run_uniaxial_force_driver()`) and displacement
# controlled (`o3seespy.tools.run_uniaxial_disp_driver()`)
#
# ### Load inline plotting and auto-reload commands
# + pycharm={"name": "#%%\n", "is_executing": false}
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# -
# ## Load the libraries needed for the analysis
# + pycharm={"name": "#%%\n", "is_executing": false}
import matplotlib.pyplot as plt
import engformat as ef
import numpy as np
import o3seespy as o3
import o3seespy.__about__
print('o3seespy version: ', o3seespy.__about__.__version__)
# -
# ## Define properties for SteelMPF material and forces
# + pycharm={"name": "#%%\n", "is_executing": false}
fy = 1450 # N
k = 5e4 # N/m
el_y = fy / k
b = 0.001
forces = np.array([1200, -10, 1400, 300, 800]) # N
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Run a uniaxial driver
# + pycharm={"name": "#%%\n", "is_executing": false}
# control parameters
d_step = 0.0001
max_steps=10000
handle='silent'
osi = o3.OpenSeesInstance(ndm=1)
left_node = o3.node.Node(osi, 0, 0)
right_node = o3.node.Node(osi, 0, 0)
o3.Fix1DOF(osi, left_node, o3.cc.FIXED)
o3.Fix1DOF(osi, right_node, o3.cc.FREE)
mat_obj = o3.uniaxial_material.SteelMPF(osi, fy, fy, k, b, b,
r0=10, c_r1=0.99, c_r2=0.15)
ele = o3.element.ZeroLength(osi, [left_node, right_node], mats=[mat_obj], dirs=[o3.cc.DOF2D_X], r_flag=1)
o3.constraints.Plain(osi)
o3.numberer.RCM(osi)
o3.system.BandGeneral(osi)
o3.test_check.NormDispIncr(osi, 0.002, 10, p_flag=0)
o3.algorithm.Newton(osi)
o3.integrator.DisplacementControl(osi, right_node, o3.cc.X, 0.0001)
o3.analysis.Static(osi)
ts_po = o3.time_series.Linear(osi, factor=1)
o3.pattern.Plain(osi, ts_po)
o3.Load(osi, right_node, [1.0])
react = 0
disp = [0]
reacts = [react]
diffs = np.diff(forces, prepend=0)
orys = np.where(diffs >= 0, 1, -1)
for i in range(len(forces)):
ory = orys[i]
o3.integrator.DisplacementControl(osi, right_node, o3.cc.X, -d_step * ory)
for j in range(max_steps):
if react * ory < forces[i] * ory:
o3.analyze(osi, 1)
else:
break
o3.gen_reactions(osi)
react = o3.get_ele_response(osi, ele, 'force')[0]
reacts.append(react)
end_disp = -o3.get_node_disp(osi, right_node, dof=o3.cc.X)
disp.append(end_disp)
if j == max_steps - 1:
if handle == 'silent':
break
if handle == 'warn':
print(f'Target not met: force={react:.4g}, target: {forces[i]:.4g}')
else:
raise ValueError()
disp = np.array(disp)
reacts = np.array(reacts)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Plot results
# + pycharm={"name": "#%%\n", "is_executing": false}
bf, sps = plt.subplots()
sps.plot(disp, reacts, label=f'K={k/1e3}kN/m', c='b')
sps.plot(disp[-1], reacts[-1], 'o', c='r', label='END')
sps.legend()
ef.xy(sps, y_axis=1, x_axis=1, x_grid=True, y_grid=True)
sps.set_xlabel('Displacement [m]')
sps.set_ylabel('Force [N]')
plt.show()
# + pycharm={"name": "#%%\n"}
| examples/example_uniaxial_driver.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Snakemake
#
# ```
# conda install -c bioconda snakemake
# conda install graphviz
# ```
# +
SAMPLES = ['ctl1', 'ctl2']
rule all:
input:
'merged.txt'
rule acounts:
input:
file='{sample}.fastq'
output:
'{sample}_counts.txt'
run:
with open(input.file, 'r') as f:
nc = [str(l.count('A')) for l in f if not l[0]=='@']
data = ', '.join(nc)+'\n'
with open(output[0], 'w') as f: f.write(data)
rule merge:
input:
counts=expand('{sample}_counts.txt',sample=SAMPLES)
output:
'merged.txt'
shell:
"""
for f in {input.counts}
do
cat $f >> {output}
done
"""
# -
snakemake --dag merged.txt | dot -Tsvg > dag.svg
snakemake --name mylittleworkflow.txt
# ## Nextflow
# +
# #!/usr/bin/env nextflow
params.range = 100
/*
* A trivial Perl script producing a list of numbers pair
*/
process perlTask {
output:
stdout randNums
shell:
'''
#!/usr/bin/env perl
use strict;
use warnings;
my $count;
my $range = !{params.range};
for ($count = 0; $count < 10; $count++) {
print rand($range) . ', ' . rand($range) . "\n";
}
'''
}
/*
* A Python script task which parses the output of the previous script
*/
process pyTask {
echo true
input:
stdin randNums
'''
#!/usr/bin/env python
import sys
x = 0
y = 0
lines = 0
for line in sys.stdin:
items = line.strip().split(",")
x = x+ float(items[0])
y = y+ float(items[1])
lines = lines+1
print "avg: %s - %s" % ( x/lines, y/lines )
'''
}
# -
# # Luigi
#
# [https://github.com/spotify/luigi](https://github.com/spotify/luigi)
# Task:
#
# - Design a lightweight pipeline module for your architecture (PC, cluster or cloud)!
| day3/workflows.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="LtKm9EcR-o4K"
# # Diplodatos Kaggle Competition
# ---
# Grupo 26
#
# - <NAME>
# - <NAME>
# - <NAME>
# - <NAME>
# ---
#
# We present this peace of code to create the baseline for the competition, and as an example of how to deal with these kind of problems. The main goals are that you:
#
# 1. Learn
# 1. Try different models and see which one fits the best the given data
# 1. Get a higher score than the given one in the current baseline example
# 1. Try to get the highest score in the class :)
#
#
#
# Data fields
#
# * `TripType` - a categorical id representing the type of shopping trip the customer made. This is the ground truth that you are predicting. TripType_999 is an "other" category.
#
# * `VisitNumber` - an id corresponding to a single trip by a single customer
#
# * `Weekday` - the weekday of the trip
# * `Upc` - the UPC number of the product purchased
# * `ScanCount` - the number of the given item that was purchased. A negative value indicates a product return.
#
# * `DepartmentDescription` - a high-level description of the item's department
#
# * `FinelineNumber` - a more refined category for each of the products, created by Walmart
# + id="hm656JQCqVlC" outputId="edac2194-af05-411e-9ec1-81ae91467d10" colab={"base_uri": "https://localhost:8080/", "height": 69}
# !pip install xgboost
# + id="uSAF9Cee-o4L" outputId="fe3f957b-0e56-40f7-a5bd-2d3543ef3b31" colab={"base_uri": "https://localhost:8080/", "height": 72}
# Import the required packages
import os
import math
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier as DT
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold
from scipy.stats import uniform, truncnorm, randint
# + id="fTsGaMvQmLBv"
# variables.
file_path_train = 'https://raw.githubusercontent.com/DiploDatos/AprendizajeSupervisado/master/practico/data/train.csv'
file_path_test = 'https://raw.githubusercontent.com/DiploDatos/AprendizajeSupervisado/master/practico/data/test.csv'
dtype={
#'TripType': np.uint8, # unsigned number
#'VisitNumber': np.uint32,
#'Weekday': str,
'Upc': str,
#'ScanCount': np.int32,
#'DepartmentDescription': str,
#'FinelineNumber': str # long
}
# + [markdown] id="qPwzQuB_-o4N"
# Read the *original* dataset...
# + id="rHGWvNN9-o4O" outputId="650d8f86-40b4-4a10-c4eb-86fc139737e0" colab={"base_uri": "https://localhost:8080/", "height": 155}
original_df = pd.read_csv(file_path_train, dtype=dtype)
original_df.dtypes
# + [markdown] id="a9p3lDUfzUk3"
# Looking into the columns values...
# + [markdown] id="CkIEC_d8-o4T"
# **TripType** is the column that we should predict. That column is not present
# in the test set.
#
# The min value in `ScanCount` column is `-10`, but a negative value indicates a
# product return. It is posible make a new column using if a values is negative
# or not.
# + id="RCLxfaaI-o4U" outputId="4f2ca28c-7022-4d62-f40b-b4ef8d35de1c" colab={"base_uri": "https://localhost:8080/", "height": 377}
original_df.describe(include='all')
# + id="ZlJInshpzUk9" outputId="28497fab-a654-4b11-d1d8-a51144ef1118" colab={"base_uri": "https://localhost:8080/", "height": 34}
original_df.Weekday.nunique(dropna=False)
# + id="nGhzx_cN-o4b" outputId="7004c54f-d725-45f0-87a4-3d3e543fda37" colab={"base_uri": "https://localhost:8080/", "height": 34}
original_df.DepartmentDescription.nunique(dropna=False)
# + id="SrW4yf2vzUlD" outputId="65de0e28-b095-41f1-93ea-7e5e685e5e9d" colab={"base_uri": "https://localhost:8080/", "height": 34}
original_df.FinelineNumber.nunique(dropna=False)
# + id="_v0KTA3LzUlF" outputId="123d48e1-2962-4851-ff4b-320635e90537" colab={"base_uri": "https://localhost:8080/", "height": 34}
original_df.Upc.nunique(dropna=False)
# + [markdown] id="mmXTzsN3c8oT"
# ## 1 Pre-processing
# ---
# + [markdown] id="f_S77OnK-o4g"
# ### 1.1 `NaN` values
#
# There are `nan`s in the column, let us find them...
# + id="yqTfAluFb6Qz" outputId="40babf20-59cf-4296-b783-54a03e874208" colab={"base_uri": "https://localhost:8080/", "height": 155}
original_df.isna().sum()
# + id="s1QWClt4-o4g" outputId="ccc79871-787d-4912-bb33-dcfca3222e75" colab={"base_uri": "https://localhost:8080/", "height": 406}
original_df[original_df.DepartmentDescription.isna()]
# + [markdown] hideCode=true id="b5O8hX4h-o4i"
# When the description is `NaN`, then the Upc and FinelineNumber are both NaN?
# + id="wdikGH86-o4j" outputId="19442c24-3cb1-49a8-a479-40e0d3ce2e64" colab={"base_uri": "https://localhost:8080/", "height": 34}
(
original_df.DepartmentDescription.isna().sum(),
(original_df.DepartmentDescription.isna() &
original_df.Upc.isna() &
original_df.FinelineNumber.isna()).sum())
# + id="ykJ6mW09-o4l" outputId="52561e50-d7dc-4a5c-ebdf-52afb24e565b" colab={"base_uri": "https://localhost:8080/", "height": 406}
original_df[original_df.Upc.isna()]
# + [markdown] id="hkxghXc-bw_v"
# If `Upc` is `NaN`, then is `NaN` `FileNumber`?
# + id="5U3SxO8U-o4n" outputId="35b7fa43-2ae2-4edc-8eee-2a65b579a329" colab={"base_uri": "https://localhost:8080/", "height": 34}
(original_df.Upc.isna().sum(),
original_df.FinelineNumber.isna().sum(),
(original_df.FinelineNumber.isna() & original_df.Upc.isna()).sum())
# + [markdown] id="TEuX-r_3-o4n"
# But it may be the case that both `Upc` and `FineLineNumber` are `NaN` but not the `DepartmentDescription` ...
#
#
#
# + id="OeSCgx9pEqu2" outputId="723512ee-4d4d-4b8a-8013-b5ac1f32e81d" colab={"base_uri": "https://localhost:8080/", "height": 69}
fil = (original_df.FinelineNumber.isna() & original_df.Upc.isna())
original_df[fil]['DepartmentDescription'].value_counts(dropna=False)
# + [markdown] id="zeMMoLD8dggY"
# Despite of previous case, where `Upc` and `FineLineNumber` are `NaN`
#
# `DepartmentDescription` has a value --> `PHARMACY RX`.
# + id="hFkHycMWzUlW" outputId="ee305dc1-de80-4bdb-c785-ec71365ba906" colab={"base_uri": "https://localhost:8080/", "height": 530}
print(original_df[original_df.Upc.isna()].TripType.nunique())
plt.figure(figsize=(16,8))
sns.countplot(
original_df[original_df.Upc.isna()].TripType, color='dodgerblue')
plt.title('Cantidad de UPC NaN por TripType')
plt.xlabel('TripType')
# plt.ylabel('Cant. de Mediciones')
plt.show()
# + [markdown] id="9rqPKb2g-o4p"
# So, `Upc` and `FinelineNumber` are both `NaN` at the same
# time.
# + [markdown] id="gRJUObm2fmI9"
# ### 1.2 Analysis
#
# Our last step in this analysis is to see how balanced is the data...
# + id="ByLMMzbL-o4s" outputId="2ca426ab-485f-4aba-9e1e-c4ecb45112bb" colab={"base_uri": "https://localhost:8080/", "height": 547}
print(original_df[['TripType']].nunique())
plt.figure(figsize=(16,8))
sns.countplot(
original_df.TripType, color='dodgerblue')
plt.title('Cantidad de entradas por TripType')
plt.xlabel('TripType')
# plt.ylabel('Cant. de Mediciones')
plt.show()
# + id="sdpS1vBTzUle" outputId="2eccaba8-da5e-4f03-caeb-aa9c5874ef26" colab={"base_uri": "https://localhost:8080/", "height": 513}
plt.figure(figsize=(16,8))
sns.countplot(
original_df[original_df.ScanCount < 0].TripType, color='dodgerblue')
plt.title('')
plt.xlabel('TripType')
plt.title('Cantidad de devoluciones por TripType')
# plt.ylabel('Cant. de Mediciones')
plt.show()
# + id="DRqWKtvxzUlg"
del original_df
# + [markdown] id="5HtUZY6dgm55"
# ## 2 Data Cleaning.
# ---
#
# + id="deFObYgzNa0g"
clean_df = pd.read_csv(file_path_train, dtype=dtype)
# + [markdown] id="wVcKUXDszUll"
# ### 2.1 Get Labels `TripType`
# + id="4eZwEbGezUlm" outputId="c1e60b9e-891d-4e2f-ad89-0ce4583bce28" colab={"base_uri": "https://localhost:8080/", "height": 224}
y = clean_df.groupby(
['VisitNumber', 'Weekday'], as_index=False).first().TripType
y
# + [markdown] id="sKUqdzjKzUlp"
# ### 2.2 Concat Test and Train Dataframes
# + id="AsLaRPdezUlp"
test_df = pd.read_csv(file_path_test, dtype=dtype)
# + id="XJhQ3la2zUlr" outputId="98afd555-c045-406b-cea5-9ce73cbdfbae" colab={"base_uri": "https://localhost:8080/", "height": 406}
clean_df = clean_df.drop(['TripType'], axis=1)
clean_df
# + [markdown] id="tlkyVUtK2TYx"
# Set a flag for Training Set and Test Set.
# + id="WM2uGZMOzUlu"
clean_df['is_train_set'] = 1
test_df['is_train_set'] = 0
# + id="9Tllha95zUlv"
clean_df = pd.concat([clean_df, test_df])
# + [markdown] id="_X_FEYZw2fMA"
# Delete old test dataframe
# + id="FaTRy22GzUlx"
del test_df
# + [markdown] id="Bg_-8V3LzUlz"
# ### 2.3 `nan` values ?
# + id="XVoZ2_YQzUl0" outputId="5fa92b71-e607-4ddd-b444-07a313e5f39c" colab={"base_uri": "https://localhost:8080/", "height": 155}
clean_df.isna().sum()
# + [markdown] id="9rEnfyELzUl2"
# ### 2.4 return column
#
# New `return` and `ScanCount` columns from `ScanCount`
#
# `return`
#
# * `1` a return
# * `0` no return
# + id="JxyRu4Sa24LF"
def repay_column(df: pd.DataFrame):
"""
add new return column
"""
df['returns'] = df.apply(
lambda x: abs(x['ScanCount']) if x['ScanCount'] < 0 else 0, axis=1
)
return df
# + id="Qo9jtTay29I1" outputId="c63e731f-16b6-425c-9309-76316d790b9b" colab={"base_uri": "https://localhost:8080/", "height": 406}
clean_df = repay_column(clean_df)
clean_df[['ScanCount', 'returns']]
# + [markdown] id="05rCbgHEzUl3"
# #### Positive ScanCount column
#
# Positive `ScanCount`
# + id="mvwQjAVM3QsE" outputId="754992cc-c102-4046-a7bf-69056cb567ee" colab={"base_uri": "https://localhost:8080/", "height": 406}
clean_df.loc[clean_df.ScanCount < 0, 'ScanCount'] = 0
clean_df
# + [markdown] id="6iQEVSHwzUl5"
# ### 2.5 `UPC` columns
#
# In its standard version (UPC-A), the bar code consists of a five digit
# manufacturer number and a five digit product number. In addition there is a
# 1 digit number system identifier at the start of the code. The number
# system digit denotes the use of one of ten number systems defined by UPC:
#
# * `0, 1 , 6, 7 and 8` are for regular UPC codes.
# * `2` is for random weight items, e.g. meat, marked in-store.
# * `3` is for National Drug Code and National Health Related Items.
# * `4` is for in-store marking of non-food items.
# * `5 and 9` are for coupon use.
#
# <p style="text-align: center;">
# <img src=http://www.computalabel.com/Images/UPCdiag.png width=75%>
# </p>
#
#
#
# The UPC symbol also has a `check digit` which is the last digit of the
# code and is calculated according to the algorithm used for EAN.
#
# First step add `0's` to some values in Upc column to clomplete 11 digits
#
# if values is `nan` = `'nan'` (str)
# + id="i23DJ2NE0iiS"
def clean_upc(df):
def f(x):
if x == '-1' or not isinstance(x, str) :
x = '-1'
elif len(x) < 11:
x = '0' * (11 - len(x)) + x
return x
df.Upc = df.Upc.str[:-2].apply(f)
return df
# + id="dQl2g6CLzUl5" outputId="4b4b4228-c068-40a3-c6b2-751d47b77691" colab={"base_uri": "https://localhost:8080/", "height": 406}
clean_df = clean_upc(clean_df)
clean_df[['Upc']]
# + [markdown] id="CS7zdQrkzUl7"
# #### numSysChar Column
#
# * `0, 1 , 6, 7 and 8` are for regular UPC codes.
# * `2` is for random weight items, e.g. meat, marked in-store.
# * `3` is for National Drug Code and National Health Related Items.
# * `4` is for in-store marking of non-food items.
# * `5 and 9` are for coupon use.
# + id="Atm2ldLj30Jq"
def upc_columns(df):
df['numSysChar'] = df.apply(
lambda x: x.Upc[0] if x.Upc != '-1' else '-1', axis=1)
df['manNum'] = df.apply(
lambda x: x.Upc[1:6] if x.Upc != '-1' else '-1', axis=1)
#df['itemNum'] = df.apply(
# lambda x: x.Upc[6:11] if x.Upc != '-1' else '-1', axis=1)
# df['checkDig'] = df.apply(
# lambda x: int(x.Upc[-1]) if isinstance(x.Upc, str) else -1, axis=1)
return df
# + id="XqTYHRcs4YzH" outputId="d65f13e3-4543-4eb8-a6a4-19db435badd3" colab={"base_uri": "https://localhost:8080/", "height": 406}
clean_df = upc_columns(clean_df)
clean_df[['Upc', 'numSysChar', 'manNum']]
# + id="7OY5tIJB4o18"
fil = ['0','1','6','7', '8']
clean_df.loc[clean_df.numSysChar.isin(fil), 'numSysChar'] = 'regular'
fil = ['5','9']
clean_df.loc[clean_df.numSysChar.isin(fil), 'numSysChar'] = 'cupon'
# + id="IfOiQX5Q4soe" outputId="1ee45964-3d77-4591-a341-620693765245" colab={"base_uri": "https://localhost:8080/", "height": 138}
clean_df.numSysChar.value_counts()
# + [markdown] id="7Nh58A0bzUl8"
# ### 2.6 drop columns
# + id="TJyN8Q4MzUl9"
clean_df.Upc = clean_df.Upc.astype('float')
clean_df.FinelineNumber = clean_df.FinelineNumber.astype('float')
clean_df.manNum = clean_df.manNum.astype('int')
# + id="neRKrcs2zUl-" outputId="e3359e93-ada3-4aff-af93-5c8e03532cfc" colab={"base_uri": "https://localhost:8080/", "height": 443}
clean_df
# + [markdown] id="rhJdM5YDzUmA"
# ### 2.7 Dummies, groupby columns
#
# Now, we create the dummy columns
# + id="qGBQ9f8TzUmB" outputId="415b4c3c-ac24-4415-99d3-7c6d811fd8c9" colab={"base_uri": "https://localhost:8080/", "height": 443}
clean_df = pd.get_dummies(
clean_df,
columns=['DepartmentDescription'],
dummy_na=True)
clean_df
# + [markdown] id="U_45Tnor57gM"
# #### `numSysChar` Dummies
# + id="W_Fpqhzf5nJa" outputId="2479ad06-0435-42cd-9eca-37d98d6f5b1c" colab={"base_uri": "https://localhost:8080/", "height": 443}
clean_df = pd.get_dummies(
clean_df,
columns=['numSysChar'],
dummy_na=False)
clean_df
# + [markdown] id="7FE7llzm-o44"
# #### group by VisitNumber and Weekday
# Now, we group by the VisitNumber and Weekday (they should be the same),
# and add all values for ScanCount, and the one-hot encoding
# of `DepartmentDescriptioin`
# + id="aKQXdpBQzUmD"
clean_df = clean_df.groupby(['VisitNumber', 'Weekday'], as_index=False).sum()
# + id="iWp9w6IyzUmF" outputId="3a319329-604b-487a-8e9e-4cc09266cf19" colab={"base_uri": "https://localhost:8080/", "height": 443}
clean_df
# + id="1sQlxj55zUmH"
clean_df = pd.get_dummies(clean_df, columns=["Weekday"], dummy_na=False)
# + [markdown] id="1oZBpeY100Fv"
# ### 2.8 spit (trainm, valid) y test
# + id="z_63zl92zUmK"
df_test = clean_df[clean_df.is_train_set == 0]
clean_df = clean_df[clean_df.is_train_set != 0]
# + id="R8Zh_lHmzUmM"
clean_df = clean_df.drop(["is_train_set"], axis=1)
df_test = df_test.drop(["is_train_set"], axis=1)
# + [markdown] id="ePReaomezUmO"
# ## 3 Models Train and Test
# ---
#
# Load the data...
# + [markdown] id="pBK7etouzUmO"
# ### Create the model and evaluate it
#
# split training dataset into train and "validation"
# (we won't be using validation set in this example, because of the cross-validation;
#
# but it could be useful for you depending on your approach)
# + id="1gs41buAzUmP"
#state = np.random.RandomState(43)
X_train, X_valid, y_train, y_valid = train_test_split(
clean_df, y,
test_size=0.2,
random_state=42)
# + id="DzBeFoMDzUmS" outputId="afb4f6a6-5d40-4753-8dff-541148a5b354" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(X_train.shape, y_train.shape)
# + id="QLe7m1VHzhsA"
# results dataframe is used to store the computed results
results = pd.DataFrame(columns=('clf', 'best_acc'))
# + [markdown] id="MM7rMaH45T3d"
# ### 3.1 Decision Tree
# + id="WwL0YQjtzUmf"
tree_param = {
'criterion':('gini', 'entropy'),
'min_samples_leaf':(1, 2, 5),
'min_samples_split':(2, 3, 5, 10, 50, 100)}
tree = DT(random_state=42)
tree_clf = GridSearchCV(tree, tree_param, cv=3, scoring='accuracy', n_jobs=4)
tree_clf.fit(X_train, y_train)
best_tree_clf = tree_clf.best_estimator_
# + id="cRHII8FszUmh" outputId="99e553ea-bc76-4a5c-df7c-7cd8099545c4" colab={"base_uri": "https://localhost:8080/", "height": 275}
print(f'Best Decision Tree Test accuracy: = {best_tree_clf.score(X_valid, y_valid)}')
print('Best Decision Tree accuracy: ', tree_clf.best_score_)
print(best_tree_clf)
results = results.append(
{'clf': best_tree_clf, 'best_acc': tree_clf.best_score_},
ignore_index=True
)
print('The best classifier so far is: ')
print(results.loc[results['best_acc'].idxmax()]['clf'])
# + [markdown] id="del_OQH55Mc8"
# ### 3.2 `RandomForestClassifier`
# + id="nmb1Db3zzUmc" outputId="4eb92b93-e44f-4496-f9ff-ed32150450d0" colab={"base_uri": "https://localhost:8080/", "height": 72}
kfold = StratifiedKFold(n_splits=3, random_state=42, shuffle=True)
rf_model = RandomForestClassifier(n_estimators=100, random_state=42)
model_params = {
'min_samples_split': [2, 3, 4, 6],
'class_weight': ['balanced'],
'max_depth': [64, 96, 108, 128],
'bootstrap': [False],
}
rfor_clf = GridSearchCV(
rf_model, model_params, cv=kfold, scoring='accuracy', n_jobs=4
)
rfor_clf.fit(X_train, y_train)
best_tree_clf = tree_clf.best_estimator_
# + id="VL6fttJwzUme" outputId="f4579923-461d-4e0a-9985-a06ca2db5734" colab={"base_uri": "https://localhost:8080/", "height": 275}
print(f'Best Random Forest Test accuracy = {best_tree_clf.score(X_valid, y_valid)}')
print('Best Random Forest accuracy: ', rfor_clf.best_score_)
print(best_tree_clf)
results = results.append(
{'clf': best_tree_clf, 'best_acc': rfor_clf.best_score_},
ignore_index=True
)
print('The best classifier so far is: ')
print(results.loc[results['best_acc'].idxmax()]['clf'])
# + [markdown] id="DjHvVJLEzUmU"
# ### 3.2 `GradientBoostingClassifier`
# + id="G9f0rYblzUmU"
kfold = StratifiedKFold(n_splits=3, random_state=42, shuffle=True)
parameters = {
'learning_rate': [0.1],
'loss': ['deviance'],
'min_samples_split': [2],
'max_depth': [3]
}
clf1 = GradientBoostingClassifier(random_state=42, n_estimators=100)
boost_clf1 = GridSearchCV(clf1, parameters, cv=3, scoring='accuracy', n_jobs=4)
boost_clf1.fit(X_train, y_train)
best_tree_clf = boost_clf1.best_estimator_
# + id="cB60B9dizUmW" outputId="2bc5fc75-41c3-4a4d-998c-3eac6fc16625" colab={"base_uri": "https://localhost:8080/", "height": 344}
print(f'Best Gradient Boosting Test accuracy = {best_tree_clf.score(X_valid, y_valid)}')
print('Best Gradient Boosting accuracy: ', boost_clf1.best_score_)
print(best_tree_clf)
results = results.append(
{'clf': best_tree_clf, 'best_acc': boost_clf1.best_score_},
ignore_index=True
)
print('The best classifier so far is: ')
print(results.loc[results['best_acc'].idxmax()]['clf'])
# + [markdown] id="-LLn1PbKztbj"
# #### 3.3 `XGradientBoostingClassifier` (XGBClassifier)
# + id="wRZoWW0iy7_B"
kfold = StratifiedKFold(n_splits=3, random_state=42, shuffle=True)
xgbc = XGBClassifier(random_state=42)
parameters = {
'eta': [0.1],
}
xgbc_clf = GridSearchCV(
xgbc, parameters, cv=kfold, scoring='balanced_accuracy', n_jobs=4)
xgbc_clf.fit(X_train, y_train)
best_tree_clf = xgbc_clf.best_estimator_
# + id="ZpQEreNAy9U9" outputId="b3988c72-7163-4851-bb3f-2b177c28ee8c" colab={"base_uri": "https://localhost:8080/", "height": 138}
best_tree_clf
# + id="cU2-oiXDy_9W" outputId="6a753739-5b19-4740-ea39-26b05e1bddf8" colab={"base_uri": "https://localhost:8080/", "height": 34}
y_pred = best_tree_clf.predict(X_valid)
predictions = [value for value in y_pred]
accuracy = accuracy_score(y_valid, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
# print('Best GradientBoostingClassifier: ', best_tree_clf.best_score_)
# + id="UxNBnycpzUmj" outputId="982b01bd-bf0e-4f1c-b4ea-8e71cfd1b860" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# sns.set_context(context='talk', font_scale=0.5)
fig = plt.figure(figsize=(25,25))
ax = plt.subplot('111')
plot_confusion_matrix(
best_tree_clf, X_valid, y_valid,
cmap=plt.cm.Blues,
normalize='true',
ax=ax
)
plt.title('Confusion Matrix SGDClassifier best model')
plt.show()
# + [markdown] id="9gR_VeFQzUmm"
# ## 4 Results write back
# ---
#
# the best model is:
# for XGBClassifier Accuracy: `70.05%`
#
# and
#
#
#
# ```python
# XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
# colsample_bynode=1, colsample_bytree=1, eta=0.1, gamma=0,
# gpu_id=-1, importance_type='gain', interaction_constraints='',
# learning_rate=0.100000001, max_delta_step=0, max_depth=6,
# min_child_weight=1, missing=nan, monotone_constraints='()',
# n_estimators=100, n_jobs=0, num_parallel_tree=1,
# objective='multi:softprob', random_state=42, reg_alpha=0,
# reg_lambda=1, scale_pos_weight=None, subsample=1,
# tree_method='exact', validate_parameters=1, verbosity=None)
# ```
# + id="WUpWHqCrzUmn" outputId="8e8663e6-9faf-4f43-eacd-3e128a29326b"
# Esto hace un ranking de la importancia de la variable para el modelo
# lo saque de aca https://www.kaggle.com/zlatankr/titanic-random-forest-82-78/data
pd.concat((
pd.DataFrame(X_train.columns, columns = ['variable']),
pd.DataFrame(
best_tree_clf.feature_importances_, columns = ['importance'])),
axis=1
).sort_values(by='importance', ascending=False)[:20]
# + id="0ZBP5qM2zUmo"
# yy = results.clf.iloc[0].predict(XX)
best_tree_clf = boost_clf3.best_estimator_
yy = best_tree_clf.predict(df_test)
# + id="2Oo_z1b6zUmq" outputId="ef7bcd83-8d3f-4f59-d912-dfbcc24c1620"
submission = pd.DataFrame(
list(zip(df_test.VisitNumber, yy)),
columns=["VisitNumber", "TripType"])
submission
# + id="mA0rmfm7zUmr"
# submission.to_csv("../data/submission.csv", header=True, index=False)
# + [markdown] id="c_i_TW2TzUmt"
# ---
# ## End
| supervised/notebooks/baseline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--BOOK_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png">
# *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by <NAME>; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*
#
# *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*
#
# *No changes were made to the contents of this notebook from the original.*
# <!--NAVIGATION-->
# < [Understanding Data Types in Python](02.01-Understanding-Data-Types.ipynb) | [Contents](Index.ipynb) | [Computation on NumPy Arrays: Universal Functions](02.03-Computation-on-arrays-ufuncs.ipynb) >
# # The Basics of NumPy Arrays
# Data manipulation in Python is nearly synonymous with NumPy array manipulation: even newer tools like Pandas ([Chapter 3](03.00-Introduction-to-Pandas.ipynb)) are built around the NumPy array.
# This section will present several examples of using NumPy array manipulation to access data and subarrays, and to split, reshape, and join the arrays.
# While the types of operations shown here may seem a bit dry and pedantic, they comprise the building blocks of many other examples used throughout the book.
# Get to know them well!
#
# We'll cover a few categories of basic array manipulations here:
#
# - *Attributes of arrays*: Determining the size, shape, memory consumption, and data types of arrays
# - *Indexing of arrays*: Getting and setting the value of individual array elements
# - *Slicing of arrays*: Getting and setting smaller subarrays within a larger array
# - *Reshaping of arrays*: Changing the shape of a given array
# - *Joining and splitting of arrays*: Combining multiple arrays into one, and splitting one array into many
# ## NumPy Array Attributes
# First let's discuss some useful array attributes.
# We'll start by defining three random arrays, a one-dimensional, two-dimensional, and three-dimensional array.
# We'll use NumPy's random number generator, which we will *seed* with a set value in order to ensure that the same random arrays are generated each time this code is run:
# +
import numpy as np
np.random.seed(0) # seed for reproducibility
x1 = np.random.randint(10, size=6) # One-dimensional array
x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array
x3 = np.random.randint(10, size=(3, 4, 5)) # Three-dimensional array
# -
# Each array has attributes ``ndim`` (the number of dimensions), ``shape`` (the size of each dimension), and ``size`` (the total size of the array):
print("x3 ndim: ", x3.ndim)
print("x3 shape:", x3.shape)
print("x3 size: ", x3.size)
# Another useful attribute is the ``dtype``, the data type of the array (which we discussed previously in [Understanding Data Types in Python](02.01-Understanding-Data-Types.ipynb)):
print("dtype:", x3.dtype)
# Other attributes include ``itemsize``, which lists the size (in bytes) of each array element, and ``nbytes``, which lists the total size (in bytes) of the array:
print("itemsize:", x3.itemsize, "bytes")
print("nbytes:", x3.nbytes, "bytes")
# In general, we expect that ``nbytes`` is equal to ``itemsize`` times ``size``.
# ## Array Indexing: Accessing Single Elements
# If you are familiar with Python's standard list indexing, indexing in NumPy will feel quite familiar.
# In a one-dimensional array, the $i^{th}$ value (counting from zero) can be accessed by specifying the desired index in square brackets, just as with Python lists:
x1
x1[0]
x1[4]
# To index from the end of the array, you can use negative indices:
x1[-1]
x1[-2]
# In a multi-dimensional array, items can be accessed using a comma-separated tuple of indices:
x2
x2[0, 0]
x2[2, 0]
x2[2, -1]
# Values can also be modified using any of the above index notation:
x2[0, 0] = 12
x2
# Keep in mind that, unlike Python lists, NumPy arrays have a fixed type.
# This means, for example, that if you attempt to insert a floating-point value to an integer array, the value will be silently truncated. Don't be caught unaware by this behavior!
x1[0] = 3.14159 # this will be truncated!
x1
# ## Array Slicing: Accessing Subarrays
# Just as we can use square brackets to access individual array elements, we can also use them to access subarrays with the *slice* notation, marked by the colon (``:``) character.
# The NumPy slicing syntax follows that of the standard Python list; to access a slice of an array ``x``, use this:
# ``` python
# x[start:stop:step]
# ```
# If any of these are unspecified, they default to the values ``start=0``, ``stop=``*``size of dimension``*, ``step=1``.
# We'll take a look at accessing sub-arrays in one dimension and in multiple dimensions.
# ### One-dimensional subarrays
x = np.arange(10)
x
x[:5] # first five elements
x[5:] # elements after index 5
x[4:7] # middle sub-array
x[::2] # every other element
x[1::2] # every other element, starting at index 1
# A potentially confusing case is when the ``step`` value is negative.
# In this case, the defaults for ``start`` and ``stop`` are swapped.
# This becomes a convenient way to reverse an array:
x[::-1] # all elements, reversed
x[5::-2] # reversed every other from index 5
# ### Multi-dimensional subarrays
#
# Multi-dimensional slices work in the same way, with multiple slices separated by commas.
# For example:
x2
x2[:2, :3] # two rows, three columns
x2[:3, ::2] # all rows, every other column
# Finally, subarray dimensions can even be reversed together:
x2[::-1, ::-1]
# #### Accessing array rows and columns
#
# One commonly needed routine is accessing of single rows or columns of an array.
# This can be done by combining indexing and slicing, using an empty slice marked by a single colon (``:``):
print(x2[:, 0]) # first column of x2
print(x2[0, :]) # first row of x2
# In the case of row access, the empty slice can be omitted for a more compact syntax:
print(x2[0]) # equivalent to x2[0, :]
# ### Subarrays as no-copy views
#
# One important–and extremely useful–thing to know about array slices is that they return *views* rather than *copies* of the array data.
# This is one area in which NumPy array slicing differs from Python list slicing: in lists, slices will be copies.
# Consider our two-dimensional array from before:
print(x2)
# Let's extract a $2 \times 2$ subarray from this:
x2_sub = x2[:2, :2]
print(x2_sub)
# Now if we modify this subarray, we'll see that the original array is changed! Observe:
x2_sub[0, 0] = 99
print(x2_sub)
print(x2)
# This default behavior is actually quite useful: it means that when we work with large datasets, we can access and process pieces of these datasets without the need to copy the underlying data buffer.
# ### Creating copies of arrays
#
# Despite the nice features of array views, it is sometimes useful to instead explicitly copy the data within an array or a subarray. This can be most easily done with the ``copy()`` method:
x2_sub_copy = x2[:2, :2].copy()
print(x2_sub_copy)
# If we now modify this subarray, the original array is not touched:
x2_sub_copy[0, 0] = 42
print(x2_sub_copy)
print(x2)
# ## Reshaping of Arrays
#
# Another useful type of operation is reshaping of arrays.
# The most flexible way of doing this is with the ``reshape`` method.
# For example, if you want to put the numbers 1 through 9 in a $3 \times 3$ grid, you can do the following:
grid = np.arange(1, 10).reshape((3, 3))
print(grid)
# Note that for this to work, the size of the initial array must match the size of the reshaped array.
# Where possible, the ``reshape`` method will use a no-copy view of the initial array, but with non-contiguous memory buffers this is not always the case.
#
# Another common reshaping pattern is the conversion of a one-dimensional array into a two-dimensional row or column matrix.
# This can be done with the ``reshape`` method, or more easily done by making use of the ``newaxis`` keyword within a slice operation:
# +
x = np.array([1, 2, 3])
# row vector via reshape
x.reshape((1, 3))
# -
# row vector via newaxis
x[np.newaxis, :]
# column vector via reshape
x.reshape((3, 1))
# column vector via newaxis
x[:, np.newaxis]
# We will see this type of transformation often throughout the remainder of the book.
# ## Array Concatenation and Splitting
#
# All of the preceding routines worked on single arrays. It's also possible to combine multiple arrays into one, and to conversely split a single array into multiple arrays. We'll take a look at those operations here.
# ### Concatenation of arrays
#
# Concatenation, or joining of two arrays in NumPy, is primarily accomplished using the routines ``np.concatenate``, ``np.vstack``, and ``np.hstack``.
# ``np.concatenate`` takes a tuple or list of arrays as its first argument, as we can see here:
x = np.array([1, 2, 3])
y = np.array([3, 2, 1])
np.concatenate([x, y])
# You can also concatenate more than two arrays at once:
z = [99, 99, 99]
print(np.concatenate([x, y, z]))
# It can also be used for two-dimensional arrays:
grid = np.array([[1, 2, 3],
[4, 5, 6]])
# concatenate along the first axis
np.concatenate([grid, grid])
# concatenate along the second axis (zero-indexed)
np.concatenate([grid, grid], axis=1)
# For working with arrays of mixed dimensions, it can be clearer to use the ``np.vstack`` (vertical stack) and ``np.hstack`` (horizontal stack) functions:
# +
x = np.array([1, 2, 3])
grid = np.array([[9, 8, 7],
[6, 5, 4]])
# vertically stack the arrays
np.vstack([x, grid])
# -
# horizontally stack the arrays
y = np.array([[99],
[99]])
np.hstack([grid, y])
# Similary, ``np.dstack`` will stack arrays along the third axis.
# ### Splitting of arrays
#
# The opposite of concatenation is splitting, which is implemented by the functions ``np.split``, ``np.hsplit``, and ``np.vsplit``. For each of these, we can pass a list of indices giving the split points:
x = [1, 2, 3, 99, 99, 3, 2, 1]
x1, x2, x3 = np.split(x, [3, 5])
print(x1, x2, x3)
# Notice that *N* split-points, leads to *N + 1* subarrays.
# The related functions ``np.hsplit`` and ``np.vsplit`` are similar:
grid = np.arange(16).reshape((4, 4))
grid
upper, lower = np.vsplit(grid, [2])
print(upper)
print(lower)
left, right = np.hsplit(grid, [2])
print(left)
print(right)
# Similarly, ``np.dsplit`` will split arrays along the third axis.
# <!--NAVIGATION-->
# < [Understanding Data Types in Python](02.01-Understanding-Data-Types.ipynb) | [Contents](Index.ipynb) | [Computation on NumPy Arrays: Universal Functions](02.03-Computation-on-arrays-ufuncs.ipynb) >
| numpy/02.02-The-Basics-Of-NumPy-Arrays.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Synthetic Seismogram Widget
# ## Using the Notebook
# This is the <a href="https://jupyter.org/">Jupyter Notebook</a>, an interactive coding and computation environment. For this lab, you do not have to write any code, you will only be running it.
#
# To use the notebook:
# - "Shift + Enter" runs the code within the cell (so does the forward arrow button near the top of the document)
# - You can alter variables and re-run cells
# - If you want to start with a clean slate, restart the Kernel either by going to the top, clicking on Kernel: Restart, or by "esc + 00" (if you do this, you will need to re-run Step 0 before running any other cells in the notebook)
#
# Instructions as to how to set up Python and the iPython notebook on your personal computer are attached in the appendix of the lab
# ## Step 0: Import Necessary Packages
# Import the necessary packages
# %matplotlib inline
from SimPEG.utils import download
from geoscilabs.seismic.syntheticSeismogram import InteractLogs, InteractDtoT, InteractWconvR, InteractSeismogram
from geoscilabs.seismic.NMOwidget import ViewWiggle, InteractClean, InteractNosiy, NMOstackthree
# from geoscilabs.seismic.drawGaussianSeismogram import *
# # 1 Normal Incidence Seismogram
# ## Backgrounds: Normal Incidence Seismogram
# We begin by constructing a reflectivity series from the physical property model. The physical properties characterizing the model are:
# - density ($\rho$) and
# - seismic velocity ($v$)
#
# The acoustic impedance of each layer is:
# $$
# Z_i = \rho_i v_i
# $$
#
# From acoustic impedance, the down-going reflection coefficient for each interface is given by
# $$
# r_{i,i+1} = \frac{Z_{i+1}-Z_i}{Z_{i+1}+Z_i}
# $$
#
# and the transmission coefficient is
# $$
# t_{i,i+1} = \frac{2Z_{i}}{Z_{i+1}+Z_i}
# $$
#
# <img src="https://github.com/geoscixyz/geosci-labs/blob/master/images/seismic/rt1interface.png?raw=true" style="width: 40%; height: 40%"></img>
# #### Figure 1. Normal incidence for two layers model.
# The true reflectivity accounts for both reflection and transmission. For the reflection off of the first layer, these are equivalent. For the reflection off of the second interface, the reflection coefficient $r_{2,3}$ in multiplied by $t_{1,2}t_{2,1}$ to get the true reflectivity. In the below plot, this effect of the transmission coefficients can be included or not using the toggle "usingT."
#
# <img src="https://github.com/geoscixyz/geosci-labs/blob/master/images/seismic/rt2interface.png?raw=true" style="width: 40%; height: 40%"></img>
# #### Figure 2. Normal incidence for three layers model.
# ### Setup for constructing a synthetic seismogram
# We will consider a model consisting of two horizontal layers over a half-space. Each layer has density $\rho_i$ and seismic velocity $v_i$. In this notebook, we will start from the physical property model and walk through how to construct a synthetic, normal incidence seismogram.
#
# <img src="https://github.com/geoscixyz/geosci-labs/blob/master/images/seismic/geoModel.png?raw=true" style="width: 50%; height: 50%"></img>
# #### Figure 3. Three layer model
# ## 1.1 Reflection coefficient and Reflectivity
# ### From Physical Properties to a Reflectivity Series
# **Parameters of the below widget are:**
#
# - *d2*: depth of the interface between layer 1 and 2
# - *d3*: depth of the interface between layer 2 and 3
# - *rho1*: density of the layer 1 ($kg/m^3$)
# - *rho2*: density of the layer 2 ($kg/m^3$)
# - *rho3*: density of the layer 3 ($kg/m^3$)
# - *v1*: velocity of the layer 1 ($m/s$)
# - *v2*: velocity of the layer 2 ($m/s$)
# - *v3*: velocity of the layer 3 ($m/s$)
# - *usingT*: switch for considering transmission coefficient for reflectivity
LogVal = InteractLogs(v1=1500,v2=1500,v3=1500)
LogVal
# ## 1.2 Depth to Time conversion
# Now we have the reflectivity series as a function of depth. With seismic, we measure a signal as a function of time. So we must apply a conversion from depth to time. We do this by computing the time it takes for a signal to reach a given depth and return to the surface.
InteractDtoT(LogVal)
# ## 1.3 Constructing the ideal seismogram
# ### Wavelet $\otimes$ Reflectivity = Seismogram
# Now that we have the reflectivity series in time, the next step is to choose the input pulse and construct our seismogram. For the following examples, we will use a <a href="http://subsurfwiki.org/wiki/Ricker_wavelet">Ricker Wavelet</a> with peak frequency $f$.
#
# A seismogram is the convolution of the wavelet and reflectivity series. Here, you can adjust the peak frequency (wavF) of the wavelet and its amplitude (wavA).
#
# The geologic model used is:
# <img src="https://github.com/geoscixyz/geosci-labs/blob/master/images/seismic/Step3FixedMod.png?raw=true" style="width: 50%; height: 50%"></img>
InteractWconvR()
# ## 1.4 Vertical Resolution
# When referring to vertical resolution, the question to ask is: "Can the two arrivals (one from the top, and one from the bottom of the layer) be distinguished?"
#
# Adjust the layer thickness for the middle layer (by adjusting d2 and/or d3) and the frequency of the input pulse to investigate vertical resolution. You can also add noise to the trace.
#
# The geologic model is:
# <img src="https://github.com/geoscixyz/geosci-labs/blob/master/images/seismic/geoModel.png?raw=true" style="width: 50%; height: 50%"></img>
#
# You can adjust all of the parameters. Have fun!
#
# GPG section: http://gpg.geosci.xyz/content/seismic/seismic_reflection_interpretation.html
# **Parameters of the below widget are:**
#
# - d2: depth of the interface between layer 1 and 2
# - d3: depth of the interface between layer 2 and 3
# - rho1: density of the layer 1 ($kg/m^3$)
# - rho2: density of the layer 2 ($kg/m^3$)
# - rho3: density of the layer 3 ($kg/m^3$)
# - v1: velocity of the layer 1 ($m/s$)
# - v2: velocity of the layer 2 ($m/s$)
# - v3: velocity of the layer 3 ($m/s$)
# - wavef: peak frequency of the Ricker wavelet
# - waveA: amplitude of Ricker wavelet
# - AddNoise: swith for adding noise
# - usingT: switch for considering transmission coefficient for reflectivity
InteractSeismogram()
# # Constructing a Normal Incidence Seismic Trace from CMP gather data
# ## 2.1 Sketch the problem
# No app for this section.
# ## 2.2 From CMP gather to a Seismic Trace
# ### Background: Fit A Hyperbola to the Data
# - Each reflection event in a CMP gather has a travel time that corresponds to a hyperbola: $$ t(x) = \sqrt{\frac{x^2}{v^2_{stacking}} + t_0^2} \\$$ where $x$ is offset between source and receiver, $v_{stacking}$ is stacking velocity, and $t_0$ is the intercept time: $$ t_0 = \sqrt{\frac{4d^2}{v^2_{stacking}}} \\$$ where $d$ is the thickness of the first layer.
#
# - For each reflection event hyperbola, perform a velocity analysis to find $v_{stacking}$. This is done by first choosing $t_o$. Then choose a trial value of velocity. <img src="http://www.eos.ubc.ca/courses/eosc350/content/methods/meth_10d/assets/kearey_fig4_21.gif"></img>
#
# - Calculate the Normal Moveout Correction: Using the hyperbolia corresponding to $v_{stacking}$, compute the normal moveout for each trace and then adjust the reflection time by the amount $\triangle T$: $$ \triangle T = t_0-t(x) \\ $$ <img src="http://www.eos.ubc.ca/courses/eosc350/content/methods/meth_10d/assets/ch1_fig8.gif"></img>
#
# Estimate $t_0$, and a plausible $v_{stack}$ by altering t0 and v using below widget. This hyperbola will be drawn as red hyperbola on the middle panel. On the right panel we apply stack with the velocity that you fit, and provice stacked trace.
#
# GPG section: (http://gpg.geosci.xyz/content/seismic/seismic_reflection_stacking.html#)
# ### Two common-mid-point (CMP) gathers: Clean and Noisy
# We have two CMP gathers generated from different geologic models. One data set is clean and the other is contaminated with noise. The seismic data were adapted from SeismicLab (http://seismic-lab.physics.ualberta.ca/).
#
# In this section, we will walk through how to construct a normal incidence seismogram from these data sets.
#
# We will do this in the following steps:
# - Plot the data
# - Perform the NMO correction and stack for the clean data
# - Perform the NMO correction and stack for the noisy data
# ### Step 1: Plot the data
# As you can see from clean CMP gather, you can recognize that we have only have one reflector, meaning there is a single interface seperating two geologic units visible in these data.
# (Note: The direct and any refracted arrivals have been removed).
#
# It is difficult to distinguish any reflectors in the noisy data. However, there is a single reflector in these data, and we will perform normal moveout (NMO) and stacking operations to construct a normal-incidence seismogram where this reflector is visible.
# +
# Define path to required data files
synDataFilePath = 'https://github.com/geoscixyz/geosci-labs/raw/master/assets/seismic/syndata1.npy'
obsDataFilePath = 'https://github.com/geoscixyz/geosci-labs/raw/master/assets/seismic/obsdata1.npy'
timeFilePath= 'https://github.com/geoscixyz/geosci-labs/raw/master/assets/seismic/time1.npy'
# Download the data
synData = download(synDataFilePath,overwrite=True,verbose=False)
obsData = download(obsDataFilePath,overwrite=True,verbose=False)
timeData = download(timeFilePath,overwrite=True,verbose=False)
# Plot the data
ViewWiggle(synData, obsData)
# -
# ### Step 2: Perform the NMO correction and stack for the clean data
# **Parameters of the below widget to fit observed reflection event are:**
#
# - *t0*: intercept time of the hyperbola
# - *v*: velocity of the hyperbola
# Fit hyperbola to clean data
clean = InteractClean(synData,timeData)
clean
# ### Step 3: Applying NMO correction to the Noisy Data
# Compared to the previous data set, this one is quite noisy. There is a reflector in the data, and your goal is to construct a stacked trace where this reflection is visible.
#
# Estimate $t_0$, and a plausible $v_{stack}$ by altering t0 and v using below widget. This hyperbola will be drawn as red hyperbola on the middle panel. On the right panel we apply stack with the velocity that you fit, and provice stacked trace.
noisy = InteractNosiy(obsData, timeData)
noisy
# In the previous step, you chose an intercept time (t0) and a stacking velocity (v). Running below cell will generate trhee stacked traces:
# - Left: using t0 and v-200 m/s that we fit from Step 3
# - Middle: using t0 and v that we fit from Step 3
# - Right: using t0 and v+200 m/s that we fit from Step 3
NMOstackthree(obsData, noisy.kwargs["t0"], noisy.kwargs["v"]-200., noisy.kwargs["v"], noisy.kwargs["v"]+200., timeData)
# ### 2.3 Finding the best hyperbola: Semblance analysis
# No app for this section
| notebooks/seismic/Seis_Reflection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="t_gnhLD0HvhU" toc-hr-collapsed=true toc-nb-collapsed=true
# ## Summary
#
# In this notebook, we test the accuracy of a network trained to reconstruct protein sequences on the *test dataset*, comprised of sequences of Gene3D domains excluded from the training dataset. We also evaluate the accuracy achieved by the network at predicting the ΔΔG of mutations, a task that this tangential to the objective that the network was trained to optimize.
#
# ----
# + [markdown] colab_type="text" id="eoBMUoW2Hvhp" toc-hr-collapsed=true toc-nb-collapsed=true
# ## Imports
# + colab={} colab_type="code" id="TbKxMUZWHvhq"
import tempfile
import time
import warnings
from collections import deque
from contextlib import contextmanager
from pathlib import Path
import numpy as np
import pandas as pd
import tqdm
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import proteinsolver
import proteinsolver.datasets
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from kmbio import PDB
from matplotlib.ticker import FormatStrFormatter
from scipy import stats
from tensorflow.python.summary.summary_iterator import summary_iterator
from torch_geometric.data import DataLoader
# +
# %matplotlib inline
try:
inline_rc
except NameError:
inline_rc = mpl.rcParams.copy()
mpl.rcParams.update({"font.size": 12})
# -
# ## Parameters
UNIQUE_ID = "191f05de" # No attention
# UNIQUE_ID = "0007604c" # 5-layer graph-conv with attention, batch_size=1
# UNIQUE_ID = "91fc9ab9" # 4-layer graph-conv with attention, batch_size=4
BEST_STATE_FILES = {
#
"191f05de": "protein_train/191f05de/e53-s1952148-d93703104.state"
}
DATA_ROOT = Path(tempfile.gettempdir())
DATA_ROOT = Path(tempfile.gettempdir()).joinpath("ml_data")
DATA_ROOT.mkdir(exist_ok=True)
DATA_ROOT
NOTEBOOK_NAME = "protein_analysis"
NOTEBOOK_PATH = Path(NOTEBOOK_NAME)
NOTEBOOK_PATH.mkdir(exist_ok=True)
NOTEBOOK_PATH
DATAPKG_DATA_DIR = Path(f"~/datapkg_data_dir").expanduser().resolve()
DATAPKG_DATA_DIR
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
proteinsolver.settings.data_url = DATAPKG_DATA_DIR.as_posix()
proteinsolver.settings.data_url
# + [markdown] colab_type="text" id="dsEY3dtLHvhy" toc-hr-collapsed=true toc-nb-collapsed=true
# ## Load data
# -
datasets = {}
DATAPKG_DATA_DIR.joinpath(
"deep-protein-gen",
"processed",
"validation_data",
"part-00000-4f535e50-cdf4-4275-b6b3-a3038f24a1a9-c000.snappy.parquet",
).is_file()
datasets["protein_valid"] = proteinsolver.datasets.ProteinInMemoryDataset(
root=DATA_ROOT / "protein_valid",
subset="valid",
data_url=DATAPKG_DATA_DIR.joinpath(
"deep-protein-gen",
"processed",
"validation_data",
"part-00000-4f535e50-cdf4-4275-b6b3-a3038f24a1a9-c000.snappy.parquet",
).as_posix(),
)
datasets["protein_test"] = proteinsolver.datasets.ProteinInMemoryDataset(
root=DATA_ROOT / "protein_test",
subset="test",
data_url=DATAPKG_DATA_DIR.joinpath(
"deep-protein-gen",
"processed",
"validation_data",
"part-00000-4f535e50-cdf4-4275-b6b3-a3038f24a1a9-c000.snappy.parquet",
).as_posix(),
)
# ### Protherm
# +
file = DATAPKG_DATA_DIR.joinpath("deep-protein-gen/datasets-test/protherm/protherm.parquet").as_posix()
extra_columns = ["qseq", "residue_idx_1_corrected", "residue_idx_2_corrected", "distances"]
extra_column_renames = {
"qseq": "sequence",
"residue_idx_1_corrected": "row_index",
"residue_idx_2_corrected": "col_index",
}
protherm_dataset = proteinsolver.datasets.ProteinInMemoryDataset(
root=DATA_ROOT / "protherm",
subset="",
data_url=file,
extra_columns=extra_columns,
extra_column_renames=extra_column_renames,
)
protherm_df = pq.read_table(file).to_pandas()
# -
protherm_dataset[0]
protherm_df.head(2)
# ## Load model
# %run protein_train/{UNIQUE_ID}/model.py
batch_size = 1
num_features = 20
adj_input_size = 2
hidden_size = 128
frac_present = 0.5
frac_present_valid = frac_present
info_size= 1024
state_file = BEST_STATE_FILES[UNIQUE_ID]
state_file
net = Net(
x_input_size=num_features + 1, adj_input_size=adj_input_size, hidden_size=hidden_size, output_size=num_features
)
net.load_state_dict(torch.load(state_file, map_location=device))
net.eval()
net = net.to(device)
# ## Training plots
# !ls protein_train/runs/
# +
dfs = []
for tb_file in Path(f"protein_train/runs/{UNIQUE_ID}.xxx/").glob("*"):
data = []
for e in summary_iterator(tb_file.as_posix()):
for v in e.summary.value:
row = (e.step, v.tag, v.simple_value)
data.append(row)
if v.tag == "accuracy_mv":
row = (e.step, "wall_time", e.wall_time)
data.append(row)
df = (
pd.DataFrame(data, columns=["datapoint", "tag", "simple_value"])
.pivot(index="datapoint", columns="tag", values="simple_value")
.reset_index()
)
df["datapoint"] = df["datapoint"].astype(np.int)
dfs.append(df)
# -
df = pd.concat(dfs, sort=False)
print(len(df))
df = df.sort_values("wall_time", ascending=False).drop_duplicates(subset=["datapoint"], keep="first").sort_values("wall_time", ascending=True)
print(len(df))
assert len(df) == len(df["datapoint"].drop_duplicates())
# +
cmap = cm.get_cmap("tab10")
x = df["datapoint"].values
accuracy_m_mean, accuracy_m_std = df["accuracy_m"].ewm(com=16, min_periods=0).agg(['mean', 'std']).values.T
accuracy_mv_mean, accuracy_mv_std = df["accuracy_mv"].ewm(com=16, min_periods=0).agg(['mean', 'std']).values.T
# accuracy_m_std = np.nan_to_num(accuracy_m_std, 0)
# accuracy_mv_std = np.nan_to_num(accuracy_mv_std, 0)
fg, ax = plt.subplots(figsize=(5, 4))
ax.plot(x, accuracy_m_mean, label="training", color=cmap(0))
ax.fill_between(x, accuracy_m_mean - accuracy_m_std * 2, accuracy_m_mean + accuracy_m_std * 2, alpha=0.25, color=cmap(0))
ax.plot(x, accuracy_mv_mean, label="validation", color=cmap(1))
ax.fill_between(x, accuracy_mv_mean - accuracy_mv_std * 2, accuracy_mv_mean + accuracy_mv_std * 2, alpha=0.25, color=cmap(1))
# ax.hlines(0.7, 0, df["datapoint"].max(), linestyle='--')
ax.legend(loc="lower right")
ax.set_xlabel("Number of training data points")
ax.set_ylabel("Accuracy on missing residues")
ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# ax.set_ylim(0, 0.82)
# fg.savefig(NOTEBOOK_PATH / f"{UNIQUE_ID}-training-curve.svg", bbox_inches="tight")
# -
# ## Test set sequence identity
# ### Oneshot
results = []
for i, data in enumerate(
tqdm.tqdm_notebook(
DataLoader(datasets["protein_test"], shuffle=False, num_workers=1, batch_size=1, drop_last=False)
)
):
data = data.to(device)
data.y = data.x
x_in = torch.ones_like(data.y) * 20
is_missing = torch.ones(data.y.size(0), dtype=torch.bool)
output = net(x_in, data.edge_index, data.edge_attr)
output = torch.softmax(output, dim=1)
_, predicted = output.max(dim=1)
num_correct = float((predicted[is_missing] == data.y[is_missing]).sum())
num_total = float(is_missing.sum())
results.append({"fraction_correct": num_correct / num_total, "num_correct": num_correct, "num_total": num_total})
oneshot_results_df = pd.DataFrame(results)
fraction_correct_oneshot = (oneshot_results_df["num_correct"] / oneshot_results_df["num_total"]).mean()
fraction_correct_oneshot
# ### Incremental
@torch.no_grad()
def design_protein(net, x, edge_index, edge_attr):
mask = (x == 20)
if not mask.any():
return x
output = net(x, edge_index, edge_attr)
output = torch.softmax(output, dim=1)
max_pred, max_index = output.max(dim=1)
max_pred[~mask] = -1
_, max_residue = max_pred.max(dim=0)
x[max_residue] = max_index[max_residue]
return design_protein(net, x, edge_index, edge_attr)
test_sequence_identity = {}
for frac_present in [0.0, 0.5, 0.8]:
results = []
for i, data in enumerate(
tqdm.tqdm_notebook(
DataLoader(datasets["protein_test"], shuffle=False, num_workers=1, batch_size=1, drop_last=False)
)
):
data.to(device)
if frac_present == 0:
is_present = torch.zeros(data.x.size(0), dtype=torch.bool, device=data.x.device)
x_in = torch.ones_like(data.x) * 20
else:
is_present = torch.rand(data.x.size(0), device=data.x.device) < frac_present
x_in = torch.where(is_present, data.x, torch.tensor(20, device=data.x.device))
x_pred = design_protein(net, x_in, data.edge_index, data.edge_attr)
identity_all = float((x_pred == data.x).sum()) / data.x.size(0)
identity_missing = float((x_pred[~is_present] == data.x[~is_present]).sum()) / (~is_present).sum().item()
result = {
"identity_all": identity_all,
"identity_missing": identity_missing,
"num_correct": (x_pred[~is_present] == data.x[~is_present]).sum().item(),
"num_total": (~is_present).sum().item(),
}
results.append(result)
test_sequence_identity[frac_present] = pd.DataFrame(results)
fraction_correct_incremental = (test_sequence_identity[0.0]["num_correct"] / test_sequence_identity[0.0]["num_total"]).mean()
fraction_correct_incremental
# +
xlim = -0.025, 0.625
fg, ax = plt.subplots(figsize=(5, 4))
for frac, df in sorted(test_sequence_identity.items()):
data = df["identity_missing"]
data = np.clip(data, *xlim)
# ax.hist(data, bins=30, range=xlim, histtype="step", label=f"{frac:.0%} missing residues", linewidth=1.5)
sns.kdeplot(data, gridsize=35, label=f"{frac:.0%} residues present")
ax.set_ylabel("Number of sequences (normalized)")
ax.set_xlabel("Sequence identity to reference")
ax.set_xlim(*xlim)
# ax.set_ylim(0, 4.2)
ax.legend(loc="upper left", framealpha=0.3)
fg.tight_layout()
fg.savefig(NOTEBOOK_PATH.joinpath(f"{UNIQUE_ID}-test-incremental-kde.svg"), bbox_inches="tight")
# +
fg, ax = plt.subplots(figsize=(5, 4))
xlim = -0.025, 0.625
data = oneshot_results_df["fraction_correct"]
data = np.clip(data, *xlim)
ax.hist(
data,
bins=100,
range=xlim,
alpha=0.9,
label=f"oneshot (accuracy: {fraction_correct_oneshot:.2%})",
histtype="stepfilled",
linewidth=2,
color=cm.get_cmap("Set1")(1),
)
# sns.kdeplot(data, gridsize=200, label="oneshot")
data = test_sequence_identity[0.0]["identity_missing"]
data = np.clip(data, *xlim)
ax.hist(
data,
bins=100,
range=xlim,
alpha=0.6,
label=f"incremental (accuracy: {fraction_correct_incremental:.2%})",
histtype="stepfilled",
linewidth=2,
color=cm.get_cmap("Set1")(0),
)
# sns.kdeplot(data, gridsize=100, label="incremental")
ax.set_ylabel("Number of sequences")
ax.set_xlabel("Sequence identity to reference")
ax.set_xlim(xlim)
# ax.set_ylim(0, 165)
ax.legend(framealpha=0.5)
fg.tight_layout()
fg.savefig(NOTEBOOK_PATH.joinpath(f"{UNIQUE_ID}-test-oneshot-incremental.svg"), bbox_inches="tight")
# -
# ## Protherm validation
# +
results = []
for i, data in enumerate(tqdm.tqdm_notebook(protherm_dataset, total=len(protherm_dataset))):
data = data.to(device)
row = protherm_df.loc[data.Index.item()]
with torch.no_grad():
output = net(data.x, data.edge_index, data.edge_attr)
wt_idx = torch.tensor(proteinsolver.utils.seq_to_tensor(row.mutation[0].encode()), dtype=torch.long, device=device)
mut_idx = torch.tensor(
proteinsolver.utils.seq_to_tensor(row.mutation[-1].encode()), dtype=torch.long, device=device
)
pos = int(row.mutation[1:-1])
x_mut = data.x.clone()
assert x_mut[pos - 1] == wt_idx
x_mut[pos - 1] = mut_idx
with torch.no_grad():
output_mut = net(x_mut, data.edge_index, data.edge_attr)
x_blank = data.x.clone()
assert x_blank[pos - 1] == wt_idx
x_blank[pos - 1] = 20
with torch.no_grad():
output_blank = net(x_blank, data.edge_index, data.edge_attr)
# logproba.scan
scan_wt = proteinsolver.utils.scan_with_mask(net, data.x, data.edge_index, data.edge_attr)
scan_sm_wt = proteinsolver.utils.scan_with_mask(
net, data.x, data.edge_index, data.edge_attr, output_transform="proba"
)
scan_sm_log_wt = proteinsolver.utils.scan_with_mask(
net, data.x, data.edge_index, data.edge_attr, output_transform="logproba"
)
scan_mut = proteinsolver.utils.scan_with_mask(net, x_mut, data.edge_index, data.edge_attr)
scan_sm_mut = proteinsolver.utils.scan_with_mask(
net, x_mut, data.edge_index, data.edge_attr, output_transform="proba"
)
scan_sm_log_mut = proteinsolver.utils.scan_with_mask(
net, x_mut, data.edge_index, data.edge_attr, output_transform="logproba"
)
# logproba.incremental
incremental_wt = proteinsolver.utils.get_node_outputs(net, data.x, data.edge_index, data.edge_attr)
incremental_sm_wt = proteinsolver.utils.get_node_outputs(
net, data.x, data.edge_index, data.edge_attr, output_transform="proba"
)
incremental_sm_log_wt = proteinsolver.utils.get_node_outputs(
net, data.x, data.edge_index, data.edge_attr, output_transform="logproba"
)
incremental_mut = proteinsolver.utils.get_node_outputs(net, x_mut, data.edge_index, data.edge_attr)
incremental_sm_mut = proteinsolver.utils.get_node_outputs(
net, x_mut, data.edge_index, data.edge_attr, output_transform="proba"
)
incremental_sm_log_mut = proteinsolver.utils.get_node_outputs(
net, x_mut, data.edge_index, data.edge_attr, output_transform="logproba"
)
result = {
"wt_wt": output[pos - 1, wt_idx].item(),
"wt_mut": output[pos - 1, mut_idx].item(),
"wt_sm_wt": torch.softmax(output, dim=1)[pos - 1, wt_idx].item(),
"wt_sm_mut": torch.softmax(output, dim=1)[pos - 1, mut_idx].item(),
"wt_sm_log_wt": torch.softmax(output, dim=1).log()[pos - 1, wt_idx].item(),
"wt_sm_log_mut": torch.softmax(output, dim=1).log()[pos - 1, mut_idx].item(),
#
"mut_wt": output_mut[pos - 1, wt_idx].item(),
"mut_mut": output_mut[pos - 1, mut_idx].item(),
"mut_sm_wt": torch.softmax(output_mut, dim=1)[pos - 1, wt_idx].item(),
"mut_sm_mut": torch.softmax(output_mut, dim=1)[pos - 1, mut_idx].item(),
"mut_sm_log_wt": torch.softmax(output_mut, dim=1).log()[pos - 1, wt_idx].item(),
"mut_sm_log_mut": torch.softmax(output_mut, dim=1).log()[pos - 1, mut_idx].item(),
#
"blank_wt": output_blank[pos - 1, wt_idx].item(),
"blank_mut": output_blank[pos - 1, mut_idx].item(),
"blank_sm_wt": torch.softmax(output_blank, dim=1)[pos - 1, wt_idx].item(),
"blank_sm_mut": torch.softmax(output_blank, dim=1)[pos - 1, mut_idx].item(),
"blank_sm_log_wt": torch.softmax(output_blank, dim=1).log()[pos - 1, wt_idx].item(),
"blank_sm_log_mut": torch.softmax(output_blank, dim=1).log()[pos - 1, mut_idx].item(),
#
"scan_wt": scan_wt.mean().item(),
"scan_mut": scan_mut.mean().item(),
"scan_sm_wt": scan_sm_wt.mean().item(),
"scan_sm_mut": scan_sm_mut.mean().item(),
"scan_sm_log_wt": scan_sm_log_wt.mean().item(),
"scan_sm_log_mut": scan_sm_log_mut.mean().item(),
#
"incremental_wt": incremental_wt.mean().item(),
"incremental_mut": incremental_mut.mean().item(),
"incremental_sm_wt": incremental_sm_wt.mean().item(),
"incremental_sm_mut": incremental_sm_mut.mean().item(),
"incremental_sm_log_wt": incremental_sm_log_wt.mean().item(),
"incremental_sm_log_mut": incremental_sm_log_mut.mean().item(),
#
"ddg_exp": row.ddg_exp,
}
results.append(result)
results_df = pd.DataFrame(results)
# -
len(protherm_df)
# +
# torch.save(results_df, NOTEBOOK_PATH.joinpath("protherm_results.torch"))
# -
results_df = torch.load(NOTEBOOK_PATH.joinpath("protherm_results.torch"))
protherm_df.head()
assert len(results_df) == len(protherm_df)
assert (results_df.index == protherm_df.index).all()
protherm_wresults_df = pd.concat([protherm_df, results_df.rename(columns={"ddg_exp": "ddg_exp_2"})], axis=1)
assert (protherm_wresults_df["ddg_exp"] == protherm_wresults_df["ddg_exp_2"]).all()
del protherm_wresults_df["ddg_exp_2"]
protherm_wresults_df.head()
# +
# torch.save(protherm_wresults_df, NOTEBOOK_PATH.joinpath("protherm_wresults.torch"))
# -
results_df["scan_sm_log_wt"] = results_df_bak["scan_sm_log_wt"]
results_df["scan_sm_log_mut"] = results_df_bak["scan_sm_log_mut"]
# +
results_df["blank_change"] = results_df["blank_mut"] - results_df["blank_wt"]
results_df["blank_sm_change"] = results_df["blank_sm_mut"] - results_df["blank_sm_wt"]
results_df["wt_mut_change"] = results_df["mut_mut"] - results_df["wt_wt"]
results_df["wt_mut_sm_change"] = results_df["mut_sm_mut"] - results_df["wt_sm_wt"]
for prefix in ["scan", "scan_sm", "scan_sm_log", "incremental", "incremental_sm", "incremental_sm_log"]:
results_df[f"{prefix}_change"] = results_df[f"{prefix}_mut"] - results_df[f"{prefix}_wt"]
# +
column = "scan_sm_log_change"
ddg_change = results_df[column].values
ddg_exp = results_df["ddg_exp"].values
textstr = """\
Pearson R: {:.3f} (p={:.0e})
Spearman R: {:.3f} (p={:.0e})
""".format(
*stats.pearsonr(-ddg_change, ddg_exp), *stats.spearmanr(-ddg_change, ddg_exp)
).strip()
props = dict(facecolor="white", alpha=0.3)
with plt.rc_context(rc={"font.size": 12}):
fg, ax = plt.subplots(figsize=(6, 4.5))
ax.plot(-ddg_change, ddg_exp, "r.", alpha=0.5)
ax.set_xlabel("Difference in network outputs")
ax.set_ylabel("ΔΔG (kcal / mol)")
# ax.set_xlim(-8, 16)
ax.set_ylim(-8, 16)
ax.text(0.04, 0.95, textstr, transform=ax.transAxes, fontsize="medium", verticalalignment="top", bbox=props)
fg.tight_layout()
# fg.savefig(NOTEBOOK_PATH.joinpath(f"{UNIQUE_ID}-protherm-all.svg"), bbox_inches="tight")
# -
# ## Timing
def design_sequences(structure, num_sequences):
chain_id = next(structure.chains).id
pdata = proteinsolver.utils.extract_seq_and_adj(structure, chain_id)
data = proteinsolver.datasets.protein.row_to_data(pdata)
data = proteinsolver.datasets.protein.transform_edge_attr(data)
data.x = torch.ones_like(data.x) * 20
data = data.to(device)
for _ in range(num_sequences):
_ = proteinsolver.utils.design_sequence(net, data)
structure_path = Path(proteinsolver.__path__[0]).joinpath("data", "inputs").resolve(strict=True)
for structure_id in ["5VLI", "1N5U", "4Z8J", "4UNU", "4BEU"]:
structure_files = list(structure_path.glob(f"{structure_id.lower()}*.pdb"))
assert len(structure_files) == 1, structure_files
structure_file = structure_files[0]
structure = PDB.load(structure_file)
for num_designs in [1, 1000]:
print(structure_file.name, num_designs)
# %time design_sequences(structure, num_designs)
print()
# ## Perplexities
def calculate_perplexity(net, x, edge_index, edge_attr, x_in=None):
if x_in is None:
x_in = torch.ones_like(x) * 20
mask = x_in == 20
if not mask.any().item():
return []
output = net(x_in, edge_index, edge_attr)
output = torch.softmax(output, dim=1)
probas = output.gather(1, x.view(-1, 1))
probas[~mask] = -1
max_proba, max_residue = probas.max(dim=0)
x_in[max_residue] = x[max_residue]
return [max_proba.item()] + calculate_perplexity(net, x, edge_index, edge_attr, x_in=x_in)
proba_lists = []
for i, data in enumerate(
tqdm.tqdm_notebook(
DataLoader(datasets["protein_test"], shuffle=False, num_workers=1, batch_size=1, drop_last=False)
)
):
data = data.to(device)
with torch.no_grad():
proba_lst = calculate_perplexity(net, data.x, data.edge_index, data.edge_attr)
assert len(proba_lst) == data.x.size(0)
proba_lists.append(proba_lst)
perplexities = [2 ** -(np.log2(proba_lst).mean()) for proba_lst in proba_lists]
print(np.mean(perplexities))
fg, ax = plt.subplots(figsize=(5, 4))
ax.hist(np.clip(perplexities, 3, 22), bins=30, range=(3, 22))
ax.set_xlabel("Perplexity score")
ax.set_ylabel("Number of sequences")
fg.savefig(NOTEBOOK_PATH.joinpath(f"{UNIQUE_ID}-perplexity.svg"), bbox_inches="tight")
raise Exception("Done!")
# +
results = []
for i, data in enumerate(tqdm.tqdm_notebook(protherm_dataset, total=len(protherm_dataset))):
row = protherm_df.loc[data.Index.item()]
data = data.to(device)
with torch.no_grad():
log_prob_wt = proteinsolver.utils.get_node_proba(net, data.x, data.edge_index, data.edge_attr, 20)
wt_idx = torch.tensor(proteinsolver.utils.seq_to_tensor(row.mutation[0].encode()), dtype=torch.long, device=device)
mut_idx = torch.tensor(proteinsolver.utils.seq_to_tensor(row.mutation[-1].encode()), dtype=torch.long, device=device)
pos = int(row.mutation[1:-1])
x_mut = data.x.clone()
assert x_mut[pos - 1] == wt_idx
x_mut[pos - 1] = mut_idx
with torch.no_grad():
log_prob_mut = proteinsolver.utils.get_node_proba(net, x_mut, data.edge_index, data.edge_attr, 20)
results.append(log_prob_mut.sum().item() - log_prob_wt.sum().item())
results_df["sum_log_prob_change"] = results
# +
column = "sum_log_prob_change"
ddg_change = results_df[column].values
ddg_exp = results_df["ddg_exp"].values
textstr = """\
Pearson R: {:.3f} (p={:.0e})
Spearman R: {:.3f} (p={:.0e})
""".format(
*stats.pearsonr(-ddg_change, ddg_exp), *stats.spearmanr(-ddg_change, ddg_exp)
).strip()
props = dict(facecolor="white", alpha=0.3)
with plt.rc_context(rc={"font.size": 12}):
fg, ax = plt.subplots(figsize=(6, 4.5))
ax.plot(-ddg_change, ddg_exp, "r.", alpha=0.5)
ax.set_xlabel("Difference in network outputs")
ax.set_ylabel("ΔΔG (kcal / mol)")
ax.set_xlim(-8, 16)
ax.set_ylim(-8, 16)
ax.text(0.04, 0.95, textstr, transform=ax.transAxes, fontsize="medium", verticalalignment="top", bbox=props)
fg.tight_layout()
# fg.savefig(NOTEBOOK_PATH.joinpath("protherm-all.svg"))
# +
# table = pa.Table.from_pandas(results_df)
# pq.write_table(table, NOTEBOOK_PATH.joinpath("results.parquet"))
# -
| notebooks/06_protein_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pwd
import sys
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
sys.executable
# # 【Python】小池百合子当確に必要な票数を計算する【matplotlib】
# - https://qiita.com/tetsuro731/items/1457c61d53e99ad19dcb?utm_source=Qiita%E3%83%8B%E3%83%A5%E3%83%BC%E3%82%B9&utm_campaign=6922a8b5e9-Qiita_newsletter_419_07_15_2020&utm_medium=email&utm_term=0_e44feaa081-6922a8b5e9-34468725
#
# <br>
#
# - 統計学の検定という手法を使ったやり方だと「95%の確率で小池百合子の得票率は59~61%の間にあるだろう」というような言い方をします。
# - この時に59-61の区間のことを信頼区間と言います。
# - 今回はこれをPythonで実装してみました。
# - 以下の方法で信頼区間○○%の場合、N票開票した結果の得票率がr%だった場合、信頼区間の上限と下限が求まります。
def getR(r, N):
"""
return: 信頼度x%での得票率の下限と上限, 下限、上限の順で返す
r: 開票結果から計算した得票率(要は標準正規分布の平均値)
N: 開票した数(要はサンプル数)
信頼区間(x)の決め方(標準正規分布を仮定)
k = 1.96 : 信頼区間95%の場合(1σ)
k = 2.58 : 信頼区間99%の場合(2σ)
k = 3.29 : 信頼区間99.9%の場合(3σ)
"""
import math
k = 3.29 #99.9%
# 下限と上限
lower_limit = r - k * math.sqrt(r*(1-r)/N)
upper_limit = r + k * math.sqrt(r*(1-r)/N)
return lower_limit, upper_limit
# #### 得票率の平均は開票数によらず小池百合子0.6, 宇都宮健児0.15で固定(本来は時々刻々と変化するが)
# - 信頼区間99.9%とかなり厳しめにしたにも関わらず、わずか100票開票した時点でKenjiの上限よりYurikoの下限のほうが上にありますね。
# - 横軸の値が増えるにしたがって精度が上がるためこの区間も収束していきますが、わずかな票数でYurikoが当選確実だということがわかります。
#
# <br>
#
# #### 小池百合子の得票率を下げると二人の信頼区間が重なっている。これは1000票開票した結果Yurikoの票数がKenjiより多かったとしても、まだ統計的に十分とは言えないことを示しています。
#
# <br>
#
# #### 今回の都知事選ではかなり早い段階で小池百合子当選確実の報が出たが、1位と2位の得票率の差を考えれば、必要な開票数がかなり少ないことがわかる
# +
import numpy as np
import matplotlib.pyplot as plt
def plot_vote_confidence_interval(yuriko_rate, kenji_rate):
"""2者の(平均)得票率から開票数vs.得票率+信頼区間3σをplotする"""
yuriko_upper = []
yuriko_lower = []
kenji_upper = []
kenji_lower = []
# 100人ずつ1000人まで
N_open = [i for i in range(100, 1000, 100)]
for n_open in N_open:
yuriko_lower.append(getR(yuriko_rate, n_open)[0])
yuriko_upper.append(getR(yuriko_rate, n_open)[1])
kenji_lower.append(getR(kenji_rate, n_open)[0])
kenji_upper.append(getR(kenji_rate, n_open)[1])
yuriko_upper = np.array(yuriko_upper)
yuriko_lower = np.array(yuriko_lower)
kenji_upper = np.array(kenji_upper)
kenji_lower = np.array(kenji_lower)
plt.plot(N_open, (yuriko_lower + yuriko_upper) / 2,
color='blue', marker='o',
markersize=5, label='Yuriko')
plt.fill_between(N_open,
yuriko_upper,
yuriko_lower,
alpha=0.15, color='blue')
plt.plot(N_open, (kenji_lower + kenji_upper) / 2,
color='green', linestyle='--',
marker='s', markersize=5,
label='Kenji')
plt.fill_between(N_open,
kenji_upper,
kenji_lower,
alpha=0.15, color='green')
plt.grid()
plt.xlabel('Number of votes')
plt.ylabel('Rates')
plt.legend(loc='upper right')
plt.ylim([0., 1.0])
plt.tight_layout()
plt.show()
plt.clf() # メモリ解放
plt.close()
if __name__ == '__main__':
#だいたいの得票率
yuriko_rate = 0.6
kenji_rate = 0.15
plot_vote_confidence_interval(yuriko_rate, kenji_rate)
yuriko_rate = 0.4
kenji_rate = 0.3
plot_vote_confidence_interval(yuriko_rate, kenji_rate)
# -
# ## 得票率乱数で変えていく場合(得票率の確率は固定してる)
# - コインで表が出る確率は50%なので、0-1の乱数を10回発生させて、0.5以上が何回出たかを数えれば良い。
# - 今回も真の得票率(Yuriko = 0.6, Kenji = 0.15)がわかっていれば以下の関数で計算可能
def getMeanRate(R, N, seed=7):
"""
R : その候補者の真の得票率
N : 開票した票数
p : 0-1の乱数
"""
import random
random.seed(seed)
num = 0 #対象の候補者の票数
for i in range(N):
p = random.random() #0-1の乱数を発生
if(p < R): #開票判定、ドキドキ
num = num + 1 #名前が書いてあったらincrement
return num / N
# +
import numpy as np
import matplotlib.pyplot as plt
def plot_vote_confidence_interval(yuriko_rate, kenji_rate):
"""2者の(平均)得票率から開票数vs.得票率+信頼区間3σをplotする"""
yuriko_upper = []
yuriko_lower = []
yuriko_mean = [] #追加部分
kenji_upper = []
kenji_lower = []
kenji_mean = [] #追加部分
# 100人ずつ1000人まで
N_open = [i for i in range(100,1000, 100)]
for n_open in N_open:
yuriko_lower.append( getR(yuriko_rate, n_open)[0])
yuriko_upper.append( getR(yuriko_rate, n_open)[1])
yuriko_mean.append( getMeanRate(yuriko_rate, n_open)) #追加部分
kenji_lower.append( getR(kenji_rate, n_open) [0])
kenji_upper.append( getR(kenji_rate, n_open) [1])
kenji_mean.append( getMeanRate(kenji_rate, n_open)) #追加部分
yuriko_upper = np.array(yuriko_upper)
yuriko_lower = np.array(yuriko_lower)
yuriko_mean = np.array(yuriko_mean) #変更部分
kenji_upper = np.array(kenji_upper)
kenji_lower = np.array(kenji_lower)
kenji_mean = np.array(kenji_mean) #変更部分
plt.plot(N_open, yuriko_mean,
color='blue', marker='o',
markersize=5, label='Yuriko')
plt.fill_between(N_open,
yuriko_upper,
yuriko_lower,
alpha=0.15, color='blue')
plt.plot(N_open, kenji_mean,
color='green', linestyle='--',
marker='s', markersize=5,
label='Kenji')
plt.fill_between(N_open,
kenji_upper,
kenji_lower,
alpha=0.15, color='green')
plt.grid()
plt.xlabel('Number of votes')
plt.ylabel('Rates')
plt.legend(loc='upper right')
plt.ylim([0., 1.0])
plt.tight_layout()
plt.show()
plt.clf() # メモリ解放
plt.close()
if __name__ == '__main__':
#だいたいの得票率
yuriko_rate = 0.6
kenji_rate = 0.15
plot_vote_confidence_interval(yuriko_rate, kenji_rate)
yuriko_rate = 0.4
kenji_rate = 0.3
plot_vote_confidence_interval(yuriko_rate, kenji_rate)
# -
| vote_confidence_interval/vote_confidence_interval.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="hZzLBRl0iNfE" colab_type="text"
# # Hands-on: movie recommender system
# ## Collaborative filtering (matrix factorization)
#
# You are an online retailer/travel agent/movie review website, and you would like to help the visitors of your website to explore more of your products/destinations/movies. You got data which either describe the different products/destinations/films, or past transactions/trips/views (or preferences) of your visitors (or both!). You decide to leverage that data to provide relevant and meaningful recommendations.
#
# This notebook implements a simple collaborative system using factorization of the user-item matrix.
# + id="J8u8ZXvkhkfY" colab_type="code" colab={}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# + id="CPVCIslerd5e" colab_type="code" colab={}
ratings="https://github.com/couturierc/tutorials/raw/master/recommender_system/data/ratings.csv"
movies="https://github.com/couturierc/tutorials/raw/master/recommender_system/data/movies.csv"
# If data stored locally
# ratings="./data/ratings.csv"
# movies="./data/movies.csv"
df_ratings = pd.read_csv(ratings, sep=',')
df_ratings.columns = ['userId', 'itemId', 'rating', 'timestamp']
df_movies = pd.read_csv(movies, sep=',')
df_movies.columns = ['itemId', 'title', 'genres']
# + id="IvyAYay5rzcS" colab_type="code" colab={}
df_movies.head()
# + id="R4K08KX3sYhr" colab_type="code" colab={}
df_ratings.head()
# + [markdown] id="2hUKyFxYdsT5" colab_type="text"
# ## Quick exploration
#
# Hints: use df.describe(), df.column_name.hist(), scatterplot matrix (sns.pairplot(df[column_range])), correlation matrix (sns.heatmap(df.corr()) ), check duplicates, ...
# + id="LVqBtDNmJ5vL" colab_type="code" colab={}
# Start your exploration -- use as many cells as you need !
# + [markdown] id="MffuKcE5s8fQ" colab_type="text"
# ## Obtain the user-item matrice by pivoting df_ratings
# + id="qOt3GI3zs2Ts" colab_type="code" colab={}
##### FILL HERE (1 line) ######
df_user_item = NULL # Use df.pivot, rows ~ userId's, columns ~ itemId's
################################
# Sort index/rows (userId's) and columns (itemId's)
df_user_item.sort_index(axis=0, inplace=True)
df_user_item.sort_index(axis=1, inplace=True)
# + [markdown] id="90Q7L3SQtc1t" colab_type="text"
# This matrix has **many** missing values:
# + id="P6tkf_s3tgsL" colab_type="code" colab={}
df_user_item.head()
# + id="J0EfDXLIRWaG" colab_type="code" colab={}
df_user_item.describe()
# + [markdown] id="HXanXrqI4xJ4" colab_type="text"
# For instance, rating for userId=1 for movies with itemId 1 to 10:
# + id="QLI0gnwT4obE" colab_type="code" colab={}
df_user_item.loc[1][:10]
# + id="3SM4RU3njy2K" colab_type="code" colab={}
# df_user_item.loc[1].dropna().sort_values(ascending=False)
# + [markdown] id="-dtJPkm1knNC" colab_type="text"
# Save the movie ids for user 1 for later:
# + id="C05fKcNrkmYv" colab_type="code" colab={}
item_rated_user_1 = df_user_item.loc[1].dropna().index
item_rated_user_1
# + [markdown] id="oR-pEwd5thyy" colab_type="text"
# We want to find the matrix of rank $k$ which is closest to the original matrix.
#
#
# + [markdown] id="gAUU_b5ma5bA" colab_type="text"
# ## What not to do: Fill with 0's or mean values, then Singular Value Decomposition (SVD)
# + [markdown] id="5ixiAfGIH6VU" colab_type="text"
# (Adapted from https://github.com/beckernick/matrix_factorization_recommenders/blob/master/matrix_factorization_recommender.ipynb)
#
# Singular Value Decomposition decomposes a matrix $R$ into the best lower rank (i.e. smaller/simpler) approximation of the original matrix $R$. Mathematically, it decomposes R into a two unitary matrices and a diagonal matrix:
#
# $$\begin{equation}
# R = U\Sigma V^{T}
# \end{equation}$$
#
# where:
# - R is users's ratings matrix,
# - $U$ is the user "features" matrix, it represents how much users "like" each feature,
# - $\Sigma$ is the diagonal matrix of singular values (essentially weights),
# - $V^{T}$ is the movie "features" matrix, it represents how relevant each feature is to each movie,
#
# with $U$ and $V^{T}$ orthogonal.
# + id="MMVe_feVQQK_" colab_type="code" colab={}
df_user_item = df_user_item.fillna(0)
df_user_item.head()
# + id="Pz16Rlw4tlom" colab_type="code" colab={}
R = df_user_item.values
# + id="_R9inUPkH1Hm" colab_type="code" colab={}
R
# + [markdown] id="gypFSYCYHg63" colab_type="text"
# Apply SVD to R (e.g. using NumPy or SciPy)
# + id="XGSFlWxLHYVE" colab_type="code" colab={}
from scipy.sparse.linalg import svds
U, sigma, Vt = svds(R, k = 50)
# + [markdown] id="slRJZ23uIVLt" colab_type="text"
# What do $U$, $\Sigma$, $V^T$ look like?
# + id="jfifORX6IIga" colab_type="code" colab={}
U
# + id="nXkKnGWcISzH" colab_type="code" colab={}
sigma
# + id="v0H56AlQIUTM" colab_type="code" colab={}
Vt
# + [markdown] id="baQzWyVHKQVN" colab_type="text"
# Get recommendations:
# + id="CyzbchyIKnkW" colab_type="code" colab={}
# First make sigma a diagonal matrix:
sigma = np.diag(sigma)
# + id="uouELHsfKtOU" colab_type="code" colab={}
R_after_svd = np.dot(np.dot(U, sigma), Vt)
R_after_svd
# + id="mFID_6eWKskb" colab_type="code" colab={}
# + [markdown] id="z6NRarPjJ0DI" colab_type="text"
# Drawbacks of this approach:
# - the missing values (here filled with 0's) is feedback that the user did not give, we should not cannot consider it negative/null rating.
# - the dense matrix is huge, applying SVD is not scalable.
# + [markdown] id="Keb06kCFbIPl" colab_type="text"
# ## Approximate SVD with stochastic gradient descend (SGD)
#
#
# This time, we do **not** fill missing values.
#
# We inject $\Sigma$ into U and V, and try to find P and q such that $\widehat{R} = P Q^{T}$ is close to $R$ **for the item-user pairs already rated**.
#
# + [markdown] id="tkr8jfzbVS_R" colab_type="text"
# A first function to simplify the entries (userId/itemId) : we map the set of
# + id="F_HgEkPAQSTG" colab_type="code" colab={}
def encode_ids(data):
'''Takes a rating dataframe and return:
- a simplified rating dataframe with ids in range(nb unique id) for users and movies
- 2 mapping disctionaries
'''
data_encoded = data.copy()
users = pd.DataFrame(data_encoded.userId.unique(),columns=['userId']) # df of all unique users
dict_users = users.to_dict()
inv_dict_users = {v: k for k, v in dict_users['userId'].items()}
items = pd.DataFrame(data_encoded.itemId.unique(),columns=['itemId']) # df of all unique items
dict_items = items.to_dict()
inv_dict_items = {v: k for k, v in dict_items['itemId'].items()}
data_encoded.userId = data_encoded.userId.map(inv_dict_users)
data_encoded.itemId = data_encoded.itemId.map(inv_dict_items)
return data_encoded, dict_users, dict_items
# + [markdown] id="Yt6SYVvAX3Di" colab_type="text"
# Here is the procedure we would like to implement in the function SGD():
#
# 1. itinialize P and Q to random values
#
# 2. for $n_{epochs}$ passes on the data:
#
# * for all known ratings $r_{ui}$
# * compute the error between the predicted rating $p_u \cdot q_i$ and the known ratings $r_{ui}$:
# $$ err = r_{ui} - p_u \cdot q_i $$
# * update $p_u$ and $q_i$ with the following rule:
# $$ p_u \leftarrow p_u + \alpha \cdot err \cdot q_i $$
# $$ q_i \leftarrow q_i + \alpha \cdot err \cdot p_u$$
#
#
#
#
#
#
# + id="iA0tyBHJ5xyI" colab_type="code" colab={}
# Adapted from http://nicolas-hug.com/blog/matrix_facto_4
def SGD(data, # dataframe containing 1 user|item|rating per row
n_factors = 10, # number of factors
alpha = .01, # number of factors
n_epochs = 3, # number of iteration of the SGD procedure
):
'''Learn the vectors P and Q (ie all the weights p_u and q_i) with SGD.
'''
# Encoding userId's and itemId's in data
data, dict_users, dict_items = encode_ids(data)
##### FILL HERE (2 lines) ######
n_users = NULL # number of unique users
n_items = NULL # number of unique items
################################
# Randomly initialize the user and item factors.
p = np.random.normal(0, .1, (n_users, n_factors))
q = np.random.normal(0, .1, (n_items, n_factors))
# Optimization procedure
for epoch in range(n_epochs):
print ('epoch: ', epoch)
# Loop over the rows in data
for index in range(data.shape[0]):
row = data.iloc[[index]]
u = int(row.userId) # current userId = position in the p vector (thanks to the encoding)
i = int(row.itemId) # current itemId = position in the q vector
r_ui = float(row.rating) # rating associated to the couple (user u , item i)
##### FILL HERE (1 line) ######
err = NULL # difference between the predicted rating (p_u . q_i) and the known ratings r_ui
################################
# Update vectors p_u and q_i
##### FILL HERE (2 lines) ######
p[u] = NULL # cf. update rule above
q[i] = NULL
################################
return p, q
def estimate(u, i, p, q):
'''Estimate rating of user u for item i.'''
##### FILL HERE (1 line) ######
return NULL #scalar product of p[u] and q[i] /!\ dimensions
################################
# + id="_MYUUm18-id6" colab_type="code" colab={}
p, q = SGD(df_ratings)
# + [markdown] id="qJd80gNgNuUR" colab_type="text"
# ## Get the estimate for all user-item pairs:
# + [markdown] id="hj4Pc-FjPJK6" colab_type="text"
# Get the user-item matrix filled with predicted ratings:
# + id="YRCg3k2IPMSc" colab_type="code" colab={}
df_user_item_filled = pd.DataFrame(np.dot(p, q.transpose()))
df_user_item_filled.head()
# + [markdown] id="LLHPMdpyN96R" colab_type="text"
# However, it is using the encode ids ; we need to retrieve the association of encoded ids to original ids, and apply it:
# + id="cuft25TRN4CY" colab_type="code" colab={}
df_ratings_encoded, dict_users, dict_items = encode_ids(df_ratings)
# + id="mCidjCrUl2tx" colab_type="code" colab={}
df_user_item_filled.rename(columns=(dict_items['itemId']), inplace=True)
df_user_item_filled.rename(index=(dict_users['userId']), inplace=True)
# Sort index/rows (userId's) and columns (itemId's)
df_user_item_filled.sort_index(axis=0, inplace=True)
df_user_item_filled.sort_index(axis=1, inplace=True)
df_user_item_filled.head()
# + [markdown] id="AVXIqXAdOPzX" colab_type="text"
# Originally available ratings for user 1:
# + id="iyka6nXcOPo4" colab_type="code" colab={}
df_user_item.loc[1][:10]
# + [markdown] id="Pphixa2wOPeh" colab_type="text"
# Estimated ratings after the approximate SVD:
# + id="YDczh7x5Q6in" colab_type="code" colab={}
df_user_item_filled.loc[1][:10]
# + [markdown] id="Uk8zB0HCmLvk" colab_type="text"
# ## Give recommendation to a user
#
# For instance 10 recommended movies for user 1
# + id="G8zxuZ2VmaIs" colab_type="code" colab={}
recommendations = list((df_user_item_filled.loc[10]).sort_values(ascending=False)[:10].index)
recommendations
# + id="5U7R7lyTuOy_" colab_type="code" colab={}
df_movies[df_movies.itemId.isin(recommendations)]
# + [markdown] id="3fhXmfLeuDZo" colab_type="text"
# vs the ones that were rated initially:
# + id="4ooeCcRnuI8y" colab_type="code" colab={}
already_rated = list((df_user_item.loc[10]).sort_values(ascending=False)[:10].index)
already_rated
# + id="0SM3mJYwyF1g" colab_type="code" colab={}
df_movies[df_movies.itemId.isin(already_rated)]
# + [markdown] id="qKarQdgbm4tw" colab_type="text"
# This is all the movies in descending order of predicted rating. Let's remove the ones that where alread rated.
# + [markdown] id="hkvVcbTALIji" colab_type="text"
#
#
#
# ---
#
#
#
# To put this into production, you'd first separate data into a training and validation set and optimize the number of latent factors (n_factors) by minimizing the Root Mean Square Error.
# It is easier to use a framework that allows to do this, do cross-validation, grid search, etc.
# + [markdown] id="nMdbrNdLldG9" colab_type="text"
# # Gradient Descent SVD using Surprise
# + id="4VdMT5PnbIn9" colab_type="code" colab={}
# !pip install surprise
# #!pip install scikit-surprise # if the first line does not work
# + id="Ed0lnuff4NOw" colab_type="code" colab={}
# from surprise import Reader, Dataset, SVD, evaluate
# Following Surprise documentation examples
# https://surprise.readthedocs.io/en/stable/getting_started.html
from surprise import Reader, Dataset, SVD, evaluate, NormalPredictor
from surprise.model_selection import cross_validate
from collections import defaultdict
# As we're loading a custom dataset, we need to define a reader.
reader = Reader(rating_scale=(0.5, 5))
# The columns must correspond to user id, item id and ratings (in that order).
data = Dataset.load_from_df(df_ratings[['userId', 'itemId', 'rating']], reader)
# We'll use the famous SVD algorithm.
algo = SVD()
# Run 5-fold cross-validation and print results
cross_validate(algo, data, measures=['RMSE', 'MAE'], cv=5, verbose=True)
# + [markdown] id="YyciPjWI4Q94" colab_type="text"
# #### Tune algorithm parameters with GridSearchCV
#
#
# + id="tG3nlrAKzLZg" colab_type="code" colab={}
from surprise.model_selection import GridSearchCV
param_grid = {'n_epochs': [5, 10], 'lr_all': [0.002, 0.005],
'reg_all': [0.4, 0.6]}
gs = GridSearchCV(SVD, param_grid, measures=['rmse', 'mae'], cv=3)
gs.fit(data)
# best RMSE score
print(gs.best_score['rmse'])
# combination of parameters that gave the best RMSE score
print(gs.best_params['rmse'])
# + id="LnfvwVPvzUsw" colab_type="code" colab={}
# We can now use the algorithm that yields the best rmse:
algo = gs.best_estimator['rmse']
trainset = data.build_full_trainset()
algo.fit(trainset)
# + id="JVAeYFgTzppL" colab_type="code" colab={}
algo.predict(621,1)
# + id="li7UhY6fz1oG" colab_type="code" colab={}
df_data = data.df
df_data = df_data.join(df_movies,how="left", on='itemId',rsuffix='_', lsuffix='')
df_data[df_data['userId']==1].sort_values(by = 'rating',ascending=False)[:10]
# + id="CRm97oJVz8wG" colab_type="code" colab={}
# From Surprise documentation: https://surprise.readthedocs.io/en/stable/FAQ.html
def get_top_n(predictions, n=10):
'''Return the top-N recommendation for each user from a set of predictions.
Args:
predictions(list of Prediction objects): The list of predictions, as
returned by the test method of an algorithm.
n(int): The number of recommendation to output for each user. Default
is 10.
Returns:
A dict where keys are user (raw) ids and values are lists of tuples:
[(raw item id, rating estimation), ...] of size n.
'''
# First map the predictions to each user.
top_n = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_n[uid].append((iid, est))
# Then sort the predictions for each user and retrieve the k highest ones.
for uid, user_ratings in top_n.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_n[uid] = user_ratings[:n]
return top_n
# + id="poADsLk634aR" colab_type="code" colab={}
# Predict ratings for all pairs (u, i) that are NOT in the training set.
testset = trainset.build_anti_testset()
predictions = algo.test(testset)
# + id="zn3AViRh19eR" colab_type="code" colab={}
top_n = get_top_n(predictions, n=10)
# + id="igRXlPxr4gCH" colab_type="code" colab={}
top_n.items()
# + id="U2ElCZzT4EC1" colab_type="code" colab={}
# Print the recommended items for all user 1
for uid, user_ratings in top_n.items():
print(uid, [iid for (iid, _) in user_ratings])
if uid == 1:
break
# + id="3OVCCW1C4ziF" colab_type="code" colab={}
df_movies[df_movies.itemId.isin([318, 750, 1204, 858, 904, 48516, 1221, 912, 1276, 4973])]
# + id="uNVZSfS35PSo" colab_type="code" colab={}
| notebooks/recommender.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This is a detailed EDA of the data, shown in the second video of "Exploratory data analysis" lecture (week 2).
#
#
# **PLEASE NOTE**: the dataset cannot be published, so this notebook is read-only.
# ## Load data
# In this competition hosted by *solutions.se*, the task was to predict the advertisement cost for a particular ad.
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
data_path = './data'
train = pd.read_csv('%s/train.csv.gz' % data_path, parse_dates=['Date'])
test = pd.read_csv('%s/test.csv.gz' % data_path, parse_dates=['Date'])
# -
# Let's look at the data (notice that the table is transposed, so we can see all feature names).
train.head().T
# We see a lot of features with not obvious names. If you search for the *CampaignId*, *AdGroupName*, *AdNetworkType2* using any web search engine, you will find this dataset was exported from Google AdWords. So what is the required domain knowledge here? The knowledge of how web advertisement and Google AdWords work! After you have learned it, the features will make sense to you and you can proceed.
#
# For the sake of the story I will briefly describe Google AdWords system now. Basically every time a user queries a search engine, Google AdWords decides what ad will be shown along with the actual search results. On the other side of AdWords, the advertisers manage the ads -- they can set a multiple keywords, that a user should query in order to their ad to be shown. If the keywords are set properly and are relevant to the ad, then the ad will be shown to relevant users and the ad will get clicked. Advertisers pay to Google for some type of events, happened with their ad: for example for a click event, i.e. the user saw this ad and clicked it. AdWords uses complex algorithms to decide which ad to show to a particular user with a particular search query. The advertisers can only indirectly influence AdWords decesion process by changing keywords and several other parameters. So at a high level, the task is to predict what will be the costs for the advertiser (how much he will pay to Google, column *Cost*) when the parameters (e.g. keywords) are changed.
#
# The ads are grouped in groups, there are features *AdGroupId* *AdGroupName* describing them. A campaign corresponds to some specific parameters that an advertiser sets. Similarly, there are ID and name features *CampaignId*, *CampaignName*. And finally there is some information about keywords: *KeywordId* and *KeywordText*. Slot is $1$ when ad is shown on top of the page, and $2$ when on the side. Device is a categorical variable and can be either "tablet", "mobile" or "pc". And finally the *Date* is just the date, for which clicks were aggregated.
test.head().T
# Notice there is diffrent number of columns in test and train -- our target is *Cost* column, but it is closly related to several other features, e.g. *Clicks*, *Conversions*. All of the related columns were deleted from the test set to avoid data leakages.
# # Let's analyze
# Are we ready to modeling? Not yet. Take a look at this statistic:
print 'Train min/max date: %s / %s' % (train.Date.min().date(), train.Date.max().date())
print 'Test min/max date: %s / %s' % ( test.Date.min().date(), test.Date.max().date())
print ''
print 'Number of days in train: %d' % ((train.Date.max() - train.Date.min()).days + 1)
print 'Number of days in test: %d' % (( test.Date.max() - test.Date.min()).days + 1)
print ''
print 'Train shape: %d rows' % train.shape[0]
print 'Test shape: %d rows' % test.shape[0]
# Train period is more than 10 times larger than the test period, but train set has fewer rows, how could that happen?
#
# At this point I suggest you to stop and think yourself, what could be a reason, why this did happen. Unfortunately we cannot share the data for this competition, but the information from above should be enough to get a right idea.
#
# Alternatively, you can go along for the explanation, if you want.
# # Investigation
# Let's take a look how many rows with each date we have in train and test.
test.Date.value_counts()
# print only first 10
train.Date.value_counts().head(10)
# Interesting, for the test set we have the same number of rows for every date, while in train set the number of rows is different for each day. It looks like that for each day in the test set a loop through some kind of IDs had been run. But what about train set? So far we don't know, but let's find the test IDs first.
# ### Test
# So now we know, that there is $639360$ different IDs. It should be easy to find the columns, that form ID, because if the ID is ['col1', 'col2'], then to compute the number of combinations we should just multiply the number of unique elements in each.
test_nunique = test.nunique()
test_nunique
# +
import itertools
# This function looks for a combination of elements
# with product of 639360
def find_prod(data):
# combinations of not more than 5 features
for n in range(1, 5):
# iterate through all combinations
for c in itertools.combinations(range(len(data)), n):
if data[list(c)].prod() == 639360:
print test_nunique.index[c]
return
print 'Nothing found'
find_prod(test_nunique.values)
# -
# Hmm, nothing found! The problem is that some features are tied, and the number of their combinations does not equal to product of individual unique number of elements. For example it does not make sense to create all possible combinations of *DestinationUrl* and *AdGroupId* as *DestinationUrl* belong to exactly one *AdGroupId*.
test.groupby('DestinationUrl').AdGroupId.nunique()
# So, now let's try to find ID differently. Let's try to find a list of columns, such that threre is exazctly $639360$ unique combinations of their values **in the test set** (not overall). So, we want to find `columns`, such that:
test[columns].drop_duplicates().shape[0] == 639360
# We could do it with a similar loop.
# +
import itertools
def find_ncombinations(data):
# combinations of not more than 5 features
for n in range(1, 5):
for c in itertools.combinations(range(data.shape[1]), n):
print c
columns = test.columns[list(c)]
if test[columns].drop_duplicates().shape[0] == 639360:
print columns
return
print 'Nothing found'
find_ncombinations(test)
# -
# But it will take forever to compute. So it is easier to find the combination manually.
# So after some time of trials and errors I figured out, that the four features *KeywordId, AdGroupId, Device, Slot* form the index. The number of unique rows is exactly *639360* as we wanted to find.
columns = ['KeywordId', 'AdGroupId', 'Device', 'Slot']
test[columns].drop_duplicates().shape
# Looks reasonable. For each *AdGroupId* there is a **distinct set** of possible *KeywordId's*, but *Device* and *Slot* variants are the same for each ad. And the target is to predict what will be the daily cost for using different *KeywordId's*, *Device* type, *Slot* type to advertise ads from *AdGroups*.
# ### Train
# To this end, we found how test set was constructed, but what about the train set? Let us plot something, probably we will find it out.
import seaborn as sns
sns.set(palette='pastel')
sns.set(font_scale=2)
# from absolute dates to relative
train['date_diff'] = (train.Date - train.Date.min()).dt.days
# +
# group by the index, that we've found
g= train.groupby(['KeywordId', 'AdGroupId', 'Device', 'Slot'])
# and for each index show average relative date versus
# the number of rows with that index
plt.figure(figsize=(12,12))
plt.scatter(g.date_diff.mean(),g.size(),edgecolor = 'none',alpha = 0.2, s=20, c='b')
plt.xlabel('Group mean relative date')
plt.ylabel('Group size')
plt.title('Train');
# -
# Looks interesting, isn't it? That is something we need to explain! How the same plot looks for the test set?
# from absolute dates to relative
test['date_diff'] = (test.Date - test.Date.min()).dt.days
# +
# group by the index, that we've found
g= test.groupby(['KeywordId', 'AdGroupId', 'Device', 'Slot'])
# and for each index show average relative date versus
# the number of rows with that index
plt.figure(figsize=(12,12))
plt.scatter(g.date_diff.mean(),g.size(),edgecolor = 'none',alpha = 0.2, s=20, c='b')
plt.xlabel('Group mean relative date')
plt.ylabel('Group size')
plt.ylim(-2, 30)
plt.title('Test');
# -
# Just a dot!
#
# Now let's think, what we actually plotted? We grouped the data by the ID that we've found previously and we plotted average *Date* in the group versus the size of each group. We found that ID is an aggregation index -- so for each date the *Cost* is aggreagated for each possible index. So group size shows for how many days we have *Const* information for each ID and mean relative date shows some information about these days.
#
# For test set it is expectable that both average date and the size of the groups are the same for each group: the size of each group is $14$ (as we have $14$ test days) and mean date is $6.5$, because for each group (index) we have $14$ different days, and $\frac{0 + 1 + \dots + 13}{14} = 6.5$.
#
# And now we can explain everything for the train set. Look at the top of the triangle: for those points (groups) we have *Cost* information for all the days in the train period, while on the sides we see groups, for which we have very few rows.
#
# But why for some groups we have smaller number of rows, than number of days? Let's look at the *Impressions* column.
train.Impressions.value_counts()
# We never have $0$ value in *Imressions* column. But in reality, of course, some ads with some combination of keyword, slot, device were never shown. So this looks like a nice explanation for the data: in the train set we **only** have information about ads (IDs, groups) which were shown at least once. And for the test set, we, of course, want to predict *Cost* **for every** possible ID.
# What it means for competitors, is that if one would just fit a model on the train set as is, the predictions for the test set will be biased by a lot. The predictions will be much higher than they should be, as we are only given a specific subset of rows as `train.csv` file.
# So, before modeling we should first extend the trainset and inject rows with `0` impressions. Such change will make train set very similar to the test set and the models will generalize nicely.
| mastering-kaggle-competitions/EDA_video2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import pandas_datareader.data as web
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import datapane as dp
import datetime as dt
import quandl
from wallstreet import Stock, Call
quandl.ApiConfig.api_key = "<KEY>"
plotly.offline.init_notebook_mode(connected=True)
import plotly.io as pio
pio.templates.default = "plotly_dark"
start = dt.datetime(2020, 1, 1)
end = dt.datetime.now()
days = (end - start).days
# +
def get_data(code, start, end, trans=''):
return quandl.get(code, start_date=start, end_date=end, transform=trans)
df = pd.DataFrame()
df['RDSA'] = get_data('EURONEXT/RDSA', start, end, trans='normalize').Open
df['Total'] = get_data('EURONEXT/FP', start, end, trans='normalize').Open
df['bbl'] = get_data('FRED/DCOILBRENTEU', start, end)
s = Stock('XOM')
XOM = s.historical(days_back=days, frequency='d')
XOM = XOM.set_index('Date').Open
df['XOM'] = 100*(XOM/XOM[0])
s = Stock('CVX')
CVX = s.historical(days_back=days, frequency='d')
CVX = CVX.set_index('Date').Open
df['CVX'] = 100*(CVX/CVX[0])
s = Stock('BP')
BP = s.historical(days_back=days, frequency='d')
BP = BP.set_index('Date').Open
df['BP'] = 100*(BP/BP[0])
# -
df.head()
# +
fig0 = make_subplots(specs=[[{"secondary_y": True}]])
for company in df.columns:
fig0.add_trace(go.Scatter(
x=df.index,
y=df[company],
mode='lines',
name=company,
), secondary_y=False)
fig0.add_trace(go.Scatter(x=df.index, y=df.bbl,
mode='lines+markers',
name='Brent'),
secondary_y=True
)
fig0.update_layout(
yaxis2 = {'showgrid': False},
title={
'text': "Stock Price",
'x':0.5,
'xanchor': 'center'})
fig0.show()
# +
df['RDSA 10-day MA'] = df['RDSA'].rolling(window=10).mean()
df['RDSA 20-day MA'] = df['RDSA'].rolling(window=20).mean()
trace0 = go.Scatter(x=df.index, y=df['RDSA'], name='10-day MA')
trace1 = go.Scatter(x=df.index, y=df['RDSA 10-day MA'], name='10-day MA', line={'dash': 'dash'})
trace2 = go.Scatter(x=df.index, y=df['RDSA 20-day MA'], name='20-day MA', line={'dash': 'dashdot'})
fig1 = go.Figure([trace0, trace1, trace2])
fig1.show()
# +
# Authenticate with your API token
dp.login(token="<PASSWORD>")
dp.Report(
dp.Group(
dp.Plot(fig0),
dp.Plot(fig1),
columns=2,
rows=1
),
).publish(name='stock_report', open=True)
# -
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="hp0CPnHvUE6Z" outputId="606b5136-6873-438b-a1d3-ecd5792d1d56"
import spacy
nlp = spacy.load('en_core_web_sm')
def lemma(var):
for token in var:
print(f'{token.text:{12}} {token.pos_:{6}} {token.lemma:<{22}} {token.lemma_}')
inp_text = nlp(u"My computer has a good GPU power even then I use Azure cloud to train Deep Learning models.")
lemma(inp_text)
# + id="b-i-vtceUmH1"
| Advanced/spacy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Case 2. Diabetic Retinopathy Detection
#
#
# * Authors: <NAME>, <NAME>
# * Last edited: 2019-03-03
# * Organization: [Metropolia University of Applied Sciences](https://www.metropolia.fi/)
# ## What is this?
#
# This is an assignment for *Cognitive Systems for Health Technology Applications* course.
#
# The goal is to use convolutional neural networks to create a classifier for diabetic retinopathy images.
# We use the dataset from [Diabetic Retinopathy Detection](https://www.kaggle.com/c/diabetic-retinopathy-detection)
# 2015 [Kaggle](www.kaggle.com) competition.
# It contains tens of thousands fundus photography images with and without signs of diabetic retinopathy
# (the leading cause of blindness in the working-age population of the developed world,
# it is estimated to affect over 93 million people
# <sup><a href="https://www.kaggle.com/c/diabetic-retinopathy-detection">[1]</a></sup>).
#
# Lauri and I were working on this assignment together, we shared ideas, insights and were giving
# each other suggestions and helped with troubleshooting.
# ## Introduction
#
# The dataset turned out to be a very challenging one. We struggled to find a model architecture which would train reasonably fast and would achieve good performance in terms of accuracy and receiver operating characteristic (ROC) curve. We were trying to follow advice and guidelines of various sources, mainly the following:
#
# - https://github.com/fchollet/deep-learning-with-python-notebooks/blob/master/5.2-using-convnets-with-small-datasets.ipynb
# - http://blog.kaggle.com/2015/09/09/diabetic-retinopathy-winners-interview-1st-place-ben-graham/
# - https://www.kaggle.com/c/diabetic-retinopathy-detection/discussion/15801
# - http://blog.kaggle.com/2015/08/14/diabetic-retinopathy-winners-interview-4th-place-julian-daniel/
# - https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5961805/
# - course material
#
# However, due to limited resources and large hyperparameter space we were unable to explore as many preprocessing and model pipelines as we intended. Most of our models performed close to random guessing and/or would always predict no symptoms.
#
# For the sake of better organization and clarity, we will only link chosen preprocessing steps and models instead of embedding them in this notebook.
# ## Examples
# ### Preprocessing
#
# In [this](https://github.com/rozni/uni-ml/blob/master/Cognitive_Systems_for_Health_Technology_Applications/Case_2/Image_Preprocessing.ipynb) notebook you can find how we preprocessed the data before feeding them into the network.
# ### Models
#
# Following are the examples of our models:
# - [Cats & Dogs](https://www.kaggle.com/late347/returnable-convnet-case2)
# - [DeepGray](https://github.com/rozni/uni-ml/blob/master/Cognitive_Systems_for_Health_Technology_Applications/Case_2/Model-DeepGray.ipynb)
# - [Incepted](https://github.com/rozni/uni-ml/blob/master/Cognitive_Systems_for_Health_Technology_Applications/Case_2/Model-Incepted.ipynb)
#
# Other than those, we have tried dozens of different model architectures in combination with various preprocessing methods.
# ## Conclusion
#
# For a tough dataset one needs tough preparation.
# There was a lot to learn about diabetic retinopathy, fundus photography,
# we have also learnt new ways how process images and look at the data.
# We improved our skills in many Python and ML libraries
# and gained valuable insights into deep learning.
# (And I understood I should finally invest into more RAM and an SSD... :-) )
#
# Thanks to Google for [Colab](https://colab.research.google.com) and [Kaggle](https://www.kaggle.com/), their environments enabled us to try much more than we would otherwise be able to.
# ## Bonus
#
# ### When preprocessing goes wrong
#
# 
| Cognitive_Systems_for_Health_Technology_Applications/Case_2/Report.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [py35]
# language: python
# name: Python [py35]
# ---
# +
import numpy as np
import sys
if "../" not in sys.path:
sys.path.append("../")
from envs.gridworld import GridworldEnv
from policy_iteration import policy_improvement
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# -
env = GridworldEnv()
policy, v = policy_improvement(env)
# +
policy, v = policy_improvement(env)
print("Policy Probability Distribution:")
print(policy)
print("")
print("Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):")
print(np.reshape(np.argmax(policy, axis=1), env.shape))
print("")
print("Value Function:")
print(v)
print("")
print("Reshaped Grid Value Function:")
print(v.reshape(env.shape))
print("")
# -
# Test the value function
expected_v = np.array([ 0, -1, -2, -3, -1, -2, -3, -2, -2, -3, -2, -1, -3, -2, -1, 0])
np.testing.assert_array_almost_equal(v, expected_v, decimal=2)
| reinforcement/policy_iteration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
#
# --* Table1: 3-way ANOVA
# * Table2: 2-way anova, alberta data
# * Table S1: Genome list, accessions, habitat, clade A/B, other metadata.
# * Table S3: Presence/Absence of pan-genome and targeted genes by genome
# * Table S5: Pagel data
import pandas as pd
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scikit_posthocs as sp
# +
long = pd.read_table('genomes_vs_features_longform.csv', sep=',')
meta = pd.read_table('genome_labels.csv', sep=',')
def rename_clade(string):
if type(string) == str:
return string.replace('Clade', 'Type')
return string
long['Type'] = long['Clade'].map(rename_clade)
loc_abbr_map = {'United Kingdom': 'UK',
'Canada/Alberta': 'Ab'}
long['Sampling Location'] = long.apply(lambda row: ' '.join([row['Habitat'], loc_abbr_map[row['Country/Province']]]), axis=1)
# -
counts = long[['Isolate', 'Country/Province', 'Habitat', 'Type', 'source', 'Presence']]\
.groupby(['Isolate', 'Country/Province', 'Habitat', 'Type', 'source',])\
.sum().reset_index()
counts
long.head()
# ## Table X: Unassigned. Feature presences/percentages by sampling location
for feat in long['source'].unique():
data = long[long['source']==feat]
sort_order = ['Presence', 'Percentage']
frames = []
total = long[long['source']==feat].groupby('Feature').sum()
total = total.join(total.div(1273).rename({'Presence': "Percentage"}, axis=1))
total.sort_values(by='Presence', ascending=False, inplace=True)
for col in ['Type', 'Sampling Location']:
p = data.groupby([col, 'Feature']).agg({'Presence': "sum"})
c = data.groupby([col]).agg({'Isolate': "nunique"})
c.rename({'Isolate': "Presence"}, axis=1, inplace=True)
perc = p.div(c, level=col).reset_index().pivot(index='Feature', columns=col, values='Presence')
presence = p.reset_index().pivot(index='Feature', columns=col, values='Presence')
perc.columns.name = ''
presence.columns.name =''
presence.rename({column: '{} Number Present'.format(column) for column in presence.columns}, axis=1, inplace=True)
perc.rename({column: '{} Percent Present'.format(column) for column in perc.columns}, axis=1, inplace=True)
vals = sorted(data[col].dropna().unique())
for val in vals:
sort_order.append('{} Number Present'.format(val))
sort_order.append('{} Percent Present'.format(val))
frames.append(presence.join(perc))
total.join(frames)[sort_order].to_csv('tables/feature_presence_percentages/{}_summary.csv'.format(feat), sep=',')
counts.head()
# ## Table 1: 3-way ANOVA
intersect = counts[counts['Habitat'].isin(['Agricultural', 'Wastewater Mun.', 'Clinical'])]
intersect.rename({'Country/Province': 'Geography'}, axis=1, inplace=True)
intersect.head()
model.summary()
t = intersect[intersect['source']==source]
factor_groups = t.groupby(['Type', 'Habitat', 'Geography'])
model = smf.ols('Presence ~ C(Type) + C(Habitat) + C(Geography) + C(Type):C(Habitat) +C(Type):C(Geography) + C(Habitat):C(Geography) + C(Type):C(Geography):C(Habitat)', data=intersect[intersect['source']==source]).fit()
d = sm.stats.anova_lm(model, typ=3)
d
import matplotlib.pyplot as plt
plt.figure(figsize=(6,6))
for values, group in factor_groups:
i, j = values
plt.scatter(group['Type'], group['Presence'])
results = []
for source in intersect['source'].unique():
model = smf.ols('Presence ~ C(Type) + C(Habitat) + C(Geography) + C(Type):C(Habitat) +C(Type):C(Geography) + C(Habitat):C(Geography) + C(Type):C(Geography):C(Habitat)', data=intersect[intersect['source']==source]).fit()
d = sm.stats.anova_lm(model, typ=3)
d = d[['PR(>F)']]
d.rename({'PR(>F)': "{}_p".format(source)}, axis=1, inplace=True)
results.append(d)
all_r = results[0]
for d in results[1:]:
all_r = all_r.join(d)
all_r.dropna().iloc[1:].dropna()[['AMR_p', 'Metal_p', 'VF_p', 'Plasmid_p', 'Genomic Island_p', 'Phage_p']]
all_r.dropna().iloc[1:].dropna()[['AMR_p', 'Metal_p', 'VF_p', 'Plasmid_p', 'Genomic Island_p', 'Phage_p']].to_csv('tables/Table1__3Way_ANOVA.csv', sep=',')
all_r = all_r.dropna().iloc[1:].dropna()[['AMR_p', 'Metal_p', 'VF_p', 'Plasmid_p', 'Genomic Island_p', 'Phage_p']]
# ## Table 2: 2-Way ANOVA alberta data
# +
counts.rename({'Country/Province': "Geography"}, axis=1, inplace=True)
results = []
for source in counts['source'].unique():
model2 = smf.ols('Presence ~ C(Habitat) + C(Type) + C(Habitat):C(Type)', data=counts[(counts['source']==source) & (counts['Geography']=='Canada/Alberta')]).fit()
d = sm.stats.anova_lm(model2, typ=3)
d = d[['PR(>F)']]
d.rename({'PR(>F)': "{}_p".format(source)}, axis=1, inplace=True)
results.append(d)
all_r2way = results[0]
for d in results[1:]:
all_r2way = all_r2way.join(d)
all_r2way.dropna().iloc[1:].dropna()[['AMR_p', 'Metal_p', 'VF_p', 'Plasmid_p', 'Genomic Island_p', 'Phage_p']]
# -
all_r2way = all_r2way.dropna().iloc[1:].dropna()[['AMR_p', 'Metal_p', 'VF_p', 'Plasmid_p', 'Genomic Island_p', 'Phage_p']]
all_r2way.to_csv('tables/Table2__2Way_ANOVA.csv', sep=',')
# ## Table X (unassigned): Bonferroni Corrected t-test post-hocs
# My assumed procedure:
# 1. Do the ANOVA
# 2. For cells in the ANOVA that have significant differences, we can do a t-test post-hoc.
#
# Since the 2-way anova didn't have any insig. cells that were sig in three-way, just use the three-way.
#
#
from statsmodels.stats.multicomp import pairwise_tukeyhsd
# +
factors = [['Type'],
['Habitat'],
['Geography'],
['Type', 'Habitat'],
['Type', 'Geography'],
['Habitat', 'Geography'],
['Type', 'Geography', 'Habitat']]
def query(factor):
return ':'.join(['C({})'.format(f) for f in factor])
tables3way = {}
#First, for 3-way
for source in ['AMR', 'Metal', 'VF', 'Plasmid', 'Genomic Island', 'Phage']:
data = intersect[intersect['source']==source]
for factor in factors:
if all_r.loc[query(factor), '{}_p'.format(source)] <= 0.05:
print("POST-HOC FOR {0}, {1}".format(source, '_'.join(factor)))
groups = []
for i, row in data.iterrows():
groups.append(' X '.join(row[factor].values))
data['groups'] = groups
tu = sp.posthoc_tukey(data[['groups', 'Presence']], group_col='groups', val_col='Presence')
"""
df = pd.DataFrame(tu.summary())
header = df.iloc[0]
header = [str(h) for h in header]
df = df[1:]
df.columns = header
"""
tables3way['{0}_{1}'.format(source, '_'.join(factor))] = tu
# -
for key, table in tables.items():
table.to_csv('tables/posthoc_tests/{}_3way_post.csv'.format(key), sep=',')
all_r2way
# +
factors = [['Type'],
['Habitat'],
['Habitat','Type'],]
tables2way = {}
#repeat for 2-way
for source in ['AMR', 'Metal', 'VF', 'Plasmid', 'Genomic Island', 'Phage']:
data=counts[(counts['source']==source) & (counts['Geography']=='Canada/Alberta')]
for factor in factors:
if all_r2way.loc[query(factor), '{}_p'.format(source)] <= 0.05:
print("POST-HOC FOR {0}, {1}".format(source, '_'.join(factor)))
groups = []
for i, row in data.iterrows():
groups.append(' X '.join(row[factor].values))
data['groups'] = groups
tu = sp.posthoc_tukey(data[['groups', 'Presence']], group_col='groups', val_col='Presence')
"""
df = pd.DataFrame(tu.summary())
header = df.iloc[0]
header = [str(h) for h in header]
df = df[1:]
df.columns = header
"""
tables2way['{0}_{1}'.format(source, '_'.join(factor))] = tu
# -
for key, table in tables.items():
table.to_csv('tables/posthoc_tests/{}_2way_post.csv'.format(key), sep=',')
tables2way.keys()
# +
writer = pd.ExcelWriter('tables/posthoc_spreadsheet.xlsx',engine='xlsxwriter')
workbook = writer.book
significant = workbook.add_format({'bg_color': '#93c47d'})
medium = workbook.add_format({'bg_color': '#ffd966'})
bad = workbook.add_format({'bg_color': "#e06666"})
for source in ['AMR', 'Metal', 'VF', 'Plasmid', 'Genomic Island', 'Phage']:
offset = 0
worksheet = workbook.add_worksheet(source)
writer.sheets[source] = worksheet
for key in tables3way.keys():
if key.startswith(source):
worksheet.write_string(offset,0, '{0} {1} {2}'.format(source, key.replace('_',' X '), "3-way ANOVOA posthoc, Tukey's HSD"))
offset += 1
tables3way[key].to_excel(writer, sheet_name=source, startrow=offset, startcol=0)
offset += tables3way[key].shape[0] + 4
for key in tables2way.keys():
if key.startswith(source):
worksheet.write_string(offset,0, '{0} {1} {2}'.format(source, key.replace('_', ' X '), "2-way ANOVOA posthoc, Tukey's HSD"))
offset += 1
tables2way[key].to_excel(writer, sheet_name=source, startrow=offset, startcol=0)
offset += tables2way[key].shape[0] + 4
worksheet.conditional_format('A1:Z1000', {'type': 'cell',
'criteria': '<=',
'value': 0.05,
'format': significant})
worksheet.conditional_format('A1:Z1000', {'type': 'cell',
'criteria': 'between',
'minimum': 0.05,
'maximum': 0.15,
'format': medium})
worksheet.conditional_format('A1:Z1000', {'type': 'cell',
'criteria': '>',
'value': 0.15,
'format':bad})
writer.save()
# -
# ## Table S3 Presence/absence of everything
def assign_gene(percent):
if percent >= 0.99:
return "Core"
elif percent >= 0.95:
return "Soft Core"
elif percent >= 0.15:
return "Shell"
else:
return "Cloud"
counts['source'].unique()
frames =[]
for feat in ['AMR', 'Metal', 'VF', 'Plasmid', 'Genomic Island', 'Phage']:
table = pd.read_table('tables/feature_presence_percentages/AMR_summary.csv', sep=',')[['Feature', 'Presence', 'Percentage']]
table['source'] = feat
frames.append(table)
type_counts = pd.concat(frames)
type_counts['gene_type'] = type_counts['Percentage'].map(assign_gene)
type_counts.set_index('Feature').to_csv('tables/TableX__target_feature_core_assignment.csv', sep=',')
# +
#get the roary gene presence absence table to count genes.
gpa = pd.read_table('gene_presence_absence_roary.csv', sep = ',', low_memory=False)
genomes = gpa.columns[14:] #14th column is start of genomes
# Convert to presence absence, remove reference and outgroup
roary = gpa.set_index('Gene')[genomes].T.notnull().astype(int).loc[meta['Isolate']]
roary.columns.name = ''
roary.index.name = 'Isolate'
#convert P/A to long form
roary_long = pd.melt(roary.reset_index(), id_vars=['Isolate'], var_name='Feature', value_name='Presence')
# count genomes
roary_t = roary_long.groupby('Feature').sum()
# percentage of genomes
roary_t = roary_t.join(roary_t.div(1273).rename({'Presence': 'Percentage'}, axis=1))
roary_t.sort_values(by='Presence', ascending=False, inplace=True)
# Annotate with core/cloud/shell
roary_t['source'] = 'Roary'
roary_t['gene_type'] = roary_t['Percentage'].map(assign_gene)
roary_t.to_csv('tables/TableX__roary_feature_core_assignment.csv', sep=',')
# -
pa = pd.read_table('all_features_presence_absence.tsv', sep='\t', index_col=0)
all_pa = pa.join(roary, rsuffix='_roary')
all_pa.to_csv('tables/TableS3__all_presence_absence.csv', sep=',')
# ## Table S5 (?): Pagel vs labels
# +
fe = pd.read_table('genomes_vs_features_longform.csv', sep=',')
loc_abbr_map = {'United Kingdom': 'UK',
'Canada/Alberta': 'Ab'}
fe['Sampling Location'] = fe.apply(lambda row: ' '.join([row['Habitat'], loc_abbr_map[row['Country/Province']]]), axis=1)
fe['Geography']= [loc_abbr_map[c] for c in fe['Country/Province']]
fe.head()
# -
def rename_clade(string):
if string == 'CladeB':
return 'Type B'
if type(string)==str:
return string.replace('Clade', 'Type')
return string
fe['Type'] = fe['Clade'].map(rename_clade)
# +
# To sort properly
recs = []
for col in ['Habitat', 'Geography', 'Type']:
for value in fe[col].unique():
recs.append({"Metadata Type": col,
"Metadata": value})
legend = pd.DataFrame.from_records(recs)
legend
# +
l1 = ['Agriculture',
'Clinical',
'Natural Water',
'Wastewater Agr.',
'Wastewater Municipal']
l2 = ['Agricultural',
'Clinical',
'Natural Water',
'Wastewater Agr.',
'Wastewater Mun.']
rename = {k:v for k, v in zip(l1,l2)}
rename['Clade A'] = 'Type A'
rename['Clade B'] = 'Type B'
rename['CladeB'] = 'Type B'
rename = {**rename, ** loc_abbr_map, "Alberta": "Ab"}
def fix_pagel(val):
if val in rename.keys():
return rename[val]
return val
# +
p_val = pd.read_table('pagel_vs_metadata_pvalue.csv', sep=',')
lr = pd.read_table('pagel_vs_metadata_LR.csv', sep=',')
feats = fe[['Feature', 'source']].drop_duplicates()
# -
df = pd.read_table('../../Indizio/data/pagel_features_p.csv', sep=',', index_col=0)
v = df.values
np.nanmin(v[np.nonzero(v)])
p_val['Unnamed: 0'] = p_val['Unnamed: 0'].map(fix_pagel)
lr['Unnamed: 0'] = lr['Unnamed: 0'].map(fix_pagel)
results = {}
for feat in feats['source'].unique():
f = feats[feats['source']==feat]
f_p = p_val[['Unnamed: 0'] + list(f[f['Feature'].isin(p_val.columns)]['Feature'])]
f_lr = lr[['Unnamed: 0'] + list(f[f['Feature'].isin(lr.columns)]['Feature'])]
f_pg = f_p.melt(id_vars=['Unnamed: 0'])
f_pg.rename({'Unnamed: 0': 'Metadata', 'variable': 'Feature', 'value': 'p value'}, axis=1, inplace=True)
f_pg.set_index(['Feature', 'Metadata'], inplace=True)
f_lr = lr[['Unnamed: 0'] + list(f[f['Feature'].isin(lr.columns)]['Feature'])]
f_lrg = f_lr.melt(id_vars=['Unnamed: 0'])
f_lrg.rename({'Unnamed: 0': 'Metadata', 'variable': 'Feature', 'value': 'LR'}, axis=1, inplace=True)
f_lrg.set_index(['Feature', 'Metadata'], inplace=True)
f_pg.join(f_lrg).to_csv('tables/pagel_vs_habitat/{}_vs_habitat_pagel.csv'.format(feat), sep=',')
f_pg_lr = f_pg.join(f_lrg)
data = fe[fe['source']==feat]
perc_frames = []
presence_frames = []
total = data.groupby('Feature').sum()
total = total.join(total.div(1273).rename({'Presence': "Percentage"}, axis=1))
total.sort_values(by='Presence', ascending=False, inplace=True)
for col in ['Type', 'Habitat', 'Geography']:
p = data.groupby([col, 'Feature']).agg({'Presence': 'sum'})
c = data.groupby([col]).agg({'Isolate': 'nunique'})
c.rename({'Isolate': 'Presence'}, axis=1, inplace=True)
perc = p.div(c, level=col).reset_index().pivot(index='Feature', columns=col, values='Presence')
presence = p.reset_index().pivot(index='Feature', columns=col, values='Presence')
perc.columns.name = ''
presence.columns.name =''
perc_frames.append(perc.reset_index().melt(id_vars=['Feature']).rename({'': 'Metadata', 'value': 'Proportion Present'},axis=1).set_index(['Feature', 'Metadata']))
presence_frames.append(presence.reset_index().melt(id_vars=['Feature']).rename({'': 'Metadata', 'value': 'n Genomes Present'},axis=1).set_index(['Feature', 'Metadata']))
final = f_pg_lr.reset_index().join(legend.set_index('Metadata'), on ='Metadata')\
.sort_values(by=['Feature', 'Metadata Type', 'Metadata'])\
.drop('Metadata Type',axis=1)\
.set_index(['Feature', 'Metadata'])\
.join(pd.concat(perc_frames)).join(pd.concat(presence_frames))
final.to_csv('tables/pagel_vs_habitat/{}_pagel_with_counts.csv'.format(feat), sep=',')
results[feat] = final
| Tables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Hopfield-Network" data-toc-modified-id="Hopfield-Network-1"><span class="toc-item-num">1 </span>Hopfield Network</a></span><ul class="toc-item"><li><span><a href="#Importing-Required-Libraries" data-toc-modified-id="Importing-Required-Libraries-1.1"><span class="toc-item-num">1.1 </span>Importing Required Libraries</a></span></li><li><span><a href="#Download/Import/Preprocess-MNIST-Dataset" data-toc-modified-id="Download/Import/Preprocess-MNIST-Dataset-1.2"><span class="toc-item-num">1.2 </span>Download/Import/Preprocess MNIST Dataset</a></span></li><li><span><a href="#Hopfield-Network" data-toc-modified-id="Hopfield-Network-1.3"><span class="toc-item-num">1.3 </span>Hopfield Network</a></span></li><li><span><a href="#Experiments-with-MNIST-Data" data-toc-modified-id="Experiments-with-MNIST-Data-1.4"><span class="toc-item-num">1.4 </span>Experiments with MNIST Data</a></span></li><li><span><a href="#Experiments-with-Small-Patterns" data-toc-modified-id="Experiments-with-Small-Patterns-1.5"><span class="toc-item-num">1.5 </span>Experiments with Small Patterns</a></span></li></ul></li></ul></div>
# + [markdown] cell_id="00000-a5b4c44c-50ef-4f35-a40a-1ef1a9cdc3f6" deepnote_cell_type="markdown" tags=[]
# # Hopfield Network
#
# <NAME>
# + [markdown] cell_id="00001-6d3d899c-a61f-413f-ac38-7d2fdc7e3214" deepnote_cell_type="markdown" tags=[]
# ## Importing Required Libraries
# + cell_id="00001-a3974940-436c-499b-9e65-df934d0a3f8c" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=457 execution_start=1612046334540 source_hash="4db0a5aa" tags=[]
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import RandomState
# + [markdown] cell_id="00003-c0532161-8a5e-4b7e-b787-96b34fc41470" deepnote_cell_type="markdown" tags=[]
# ## Download/Import/Preprocess MNIST Dataset
#
# Mnist Util Code: https://github.com/hsjeong5/MNIST-for-Numpy (modified)
# + cell_id="00001-01473cda-0f8c-45dc-aab5-1daf926494b7" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=1769 execution_start=1612046335007 source_hash="309732db" tags=[]
import numpy as np
from urllib import request
import gzip
import pickle
filename = [
["training_images","train-images-idx3-ubyte.gz"],
["test_images","t10k-images-idx3-ubyte.gz"],
["training_labels","train-labels-idx1-ubyte.gz"],
["test_labels","t10k-labels-idx1-ubyte.gz"]
]
def download_mnist():
base_url = "http://yann.lecun.com/exdb/mnist/"
for name in filename:
print("Downloading "+name[1]+"...")
request.urlretrieve(base_url+name[1], name[1])
print("Download complete.")
def save_mnist():
mnist = {}
for name in filename[:2]:
with gzip.open(name[1], 'rb') as f:
mnist[name[0]] = np.frombuffer(f.read(), np.uint8, offset=16).reshape(-1,28*28)
for name in filename[-2:]:
with gzip.open(name[1], 'rb') as f:
mnist[name[0]] = np.frombuffer(f.read(), np.uint8, offset=8)
with open("mnist.pkl", 'wb') as f:
pickle.dump(mnist,f)
print("Save complete.")
def init_mnist():
download_mnist()
save_mnist()
def load_mnist():
with open("mnist.pkl",'rb') as f:
mnist = pickle.load(f)
return mnist["training_images"], mnist["training_labels"], mnist["test_images"], mnist["test_labels"]
init_mnist()
x_train, t_train, x_test, t_test = load_mnist()
x_train = x_train.reshape(-1, 28, 28)
x_test = x_test.reshape(-1, 28, 28)
idx = 0 # <== SET AN INDEX
img = x_train[idx,:] # First image in the training set.
plt.imshow(img,cmap='gray')
plt.show() # Show the image
# + cell_id="00003-3096800e-267d-413c-b309-bf8ce36188ad" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=1545 execution_start=1612046336784 source_hash="8e579a5" tags=[]
np.random.seed(42) # <== Changing this value would generate different selections
# Pre-process the train set
x_train = x_train / np.max(x_train) # normalization
x_train[x_train>=0.5] = 1 # apply threshold for binarization
x_train[x_train<0.5] = -1 # apply threshold for binarization
# Shuffle the train set
shuffle_mask = np.arange( len(t_train) )
np.random.shuffle(shuffle_mask)
x_train = x_train[shuffle_mask]
t_train = t_train[shuffle_mask]
# Select 10 different numbers from the train set
uniq, uniq_idx = np.unique(t_train, return_index=True)
numbers = x_train[uniq_idx]
f, axs = plt.subplots(2, 5)
for i, ax in enumerate(axs.flatten()):
ax.imshow(numbers[i], cmap="gray")
ax.set_title("Value = "+ str(i))
f.tight_layout()
# + cell_id="00006-664a2fe3-bc48-4f2e-85f0-e27b3706f07a" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=1009 execution_start=1612046338355 source_hash="121c1e89" tags=[]
# Add noise to samples
noise = 100
noisy_numbers = numbers.copy()
for number in noisy_numbers:
mask = np.ones((28,28))
for i in range(noise):
r,c = np.random.randint(0,28, size=2)
number[r,c] *= -1
f, axs = plt.subplots(2, 5)
for i, ax in enumerate(axs.flatten()):
ax.imshow(noisy_numbers[i], cmap="gray")
ax.set_title("Value = "+ str(i))
f.tight_layout()
# + [markdown] cell_id="00007-1837795f-0641-4076-a1a5-57ba580f9c60" deepnote_cell_type="markdown" tags=[]
# ## Hopfield Network
# + [markdown] cell_id="00008-84b24d7e-8a91-43ba-a506-818edcc0f19c" deepnote_cell_type="markdown" tags=[]
# **Note:** Threshold parameter can be set as an integer, a float or a vector.
# + cell_id="00007-5b47082f-1366-4357-8b4c-334c25a43ca4" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=2 execution_start=1612047513776 output_cleared=false source_hash="ea0abb08" tags=[]
class HopfieldNetwork:
def __init__(self, k, threshold=0):
self.k = k
self.threshold = threshold
def calculate_energy(self, x):
return -0.5*np.sum(np.matmul(x.T, x)*self.w) + np.sum(x*self.threshold)
def train(self, data):
self.w = np.zeros((self.k, self.k))
for x in data:
self.w += self.calculate_w_per_sample(x)
#print("w:", self.w)
def activation(self, z):
if z > self.threshold:
return 1
return -1
def predict(self, x): #update
x = x.copy()
x = x.reshape(1, -1)
#async update
max_epoch = 1
counter = 0 # counter or not changed states
for e in range(max_epoch):
if (counter > self.k):
break
order = np.arange(self.k)
np.random.shuffle(order)
for i in order:
x_i = x @ self.w[i]
x_i_a = self.activation(x_i)
if (x_i_a != x[0,i]):
counter += 1
x[0,i] = x_i_a
if (self.k < 101):
print("State:", x)
else:
print("Energy:", self.calculate_energy(x))
return x
def calculate_w_per_sample(self, x):
x = x.reshape(1, -1)
n = x.shape[1]
w = np.matmul(x.T, x) * (1 - np.eye(n))
return w
data = np.array([ [1,1,1,1,-1,-1,-1], [-1,1,-1,1,-1,1,-1], [-1,-1,1,1,-1,1,-1] ])
print("Printing only changed states...")
hn = HopfieldNetwork(7)
hn.train(data)
final_state = hn.predict(np.array([-1,-1,-1,1,-1,-1,-1]))
print("final state:", final_state)
# + [markdown] cell_id="00009-9d934528-c0e7-48d3-875b-c4c374f74b2f" deepnote_cell_type="markdown" tags=[]
# ## Experiments with MNIST Data
# + cell_id="00010-87760e96-75f1-466d-b8b3-4b04b3750919" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=474 execution_start=1612049108368 source_hash="ccff1ec5" tags=[]
hn = HopfieldNetwork(28*28)
hn.train(numbers[[0,1]])
f, axs = plt.subplots(1, 2)
axs[0].imshow(numbers[0], cmap="gray")
axs[0].set_title("Train Pattern 0")
axs[1].imshow(numbers[1], cmap="gray")
axs[1].set_title("Train Pattern 1")
f.tight_layout()
# + cell_id="00008-47278e11-fe99-42b1-8fe3-0fcf378b5403" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=590 execution_start=1612048898636 source_hash="84e6879d" tags=[]
f, axs = plt.subplots(1, 2)
out = hn.predict(noisy_numbers[0]).reshape(28, 28)
axs[0].imshow(noisy_numbers[0], cmap="gray")
axs[0].set_title("Input")
axs[1].imshow(out, cmap="gray")
axs[1].set_title("Output")
f.tight_layout()
# + cell_id="00011-1c96d415-3b59-4111-8749-360a1a53a15a" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=541 execution_start=1612048922003 source_hash="be4e06fb" tags=[]
f, axs = plt.subplots(1, 2)
out = hn.predict(noisy_numbers[1]).reshape(28, 28)
axs[0].imshow(noisy_numbers[1], cmap="gray")
axs[0].set_title("Input")
axs[1].imshow(out, cmap="gray")
axs[1].set_title("Output")
f.tight_layout()
# + [markdown] cell_id="00011-c3ab81cb-eac7-4a8b-b7de-ece4b165dd13" deepnote_cell_type="markdown" tags=[]
# **Note:** Unfortunately I was unable to train the model with more than 2 images.
# + [markdown] cell_id="00010-1424e317-31a0-477b-922e-0e0eb8730233" deepnote_cell_type="markdown" tags=[]
# ## Experiments with Small Patterns
# + cell_id="00007-f49d7b61-21b1-4817-99c4-f27b31cff7cd" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=326 execution_start=1612048229668 source_hash="4dc600f3" tags=[]
sign_T = np.array([[1,1,1],[-1,1,-1],[-1,1,-1]])
sign_plus = np.array([[-1,1,-1], [1,1,1], [-1,1,-1]])
sign_x = np.array([[1,-1,1],[-1,1,-1],[1,-1,1]])
data = np.array([sign_T ,sign_plus, sign_x]).reshape(-1,9)
hn = HopfieldNetwork(9)
hn.train(data)
f, axs = plt.subplots(1, 3)
axs[0].imshow(sign_T, cmap="gray")
axs[0].set_title("data[0]")
axs[1].imshow(sign_plus, cmap="gray")
axs[1].set_title("data[1]")
axs[2].imshow(sign_x, cmap="gray")
axs[2].set_title("data[2]")
f.tight_layout()
# + cell_id="00011-7d210ecf-39e1-4583-a76e-296abea71126" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=285 execution_start=1612048316435 source_hash="7d74e0a4" tags=[]
# TEST-1
test_data = np.array([[1,-1,1],[-1,-1,-1],[-1,-1,1]])
final_state = hn.predict(test_data)
print("final state:", final_state)
f, axs = plt.subplots(1, 2)
axs[0].imshow(test_data, cmap="gray")
axs[0].set_title("Input")
axs[1].imshow(final_state.reshape(3,3), cmap="gray")
axs[1].set_title("Output")
f.tight_layout()
# + cell_id="00012-5516aba3-583a-4450-a40e-2168d28c840c" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=440 execution_start=1612048371988 source_hash="b95a37e" tags=[]
# TEST-2
test_data = np.array([[1,1,1],[1,1,1],[1,1,1]])
final_state = hn.predict(test_data)
print("final state:", final_state)
f, axs = plt.subplots(1, 2)
axs[0].imshow(test_data, cmap="gray")
axs[0].set_title("Input")
axs[1].imshow(final_state.reshape(3,3), cmap="gray")
axs[1].set_title("Output")
f.tight_layout()
# + cell_id="00013-9508e5dd-c6ea-4203-9ba8-a42bc018a78f" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=203 execution_start=1612048433811 source_hash="722e9fff" tags=[]
# TEST-2
test_data = np.array([[1,1,1],[-1,1,1],[-1,-1,-1]])
final_state = hn.predict(test_data)
print("final state:", final_state)
f, axs = plt.subplots(1, 2)
axs[0].imshow(test_data, cmap="gray")
axs[0].set_title("Input")
axs[1].imshow(final_state.reshape(3,3), cmap="gray")
axs[1].set_title("Output")
f.tight_layout()
# + [markdown] cell_id="00017-19737917-7db3-45f4-91e6-1c6fb614b4e0" deepnote_cell_type="markdown" tags=[]
# **Note:** This model usually works OK but with some inputs (e.g. all -1) can output unrelated patterns.
| HopfieldNetworks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: visualization-curriculum-gF8wUgMm
# language: python
# name: visualization-curriculum-gf8wugmm
# ---
# + [markdown] papermill={"duration": 0.024379, "end_time": "2020-04-10T00:12:35.639436", "exception": false, "start_time": "2020-04-10T00:12:35.615057", "status": "completed"} tags=[]
# # COVID-19 Comparative Analysis
# > A Comparison of COVID-19 wtih SARS, MERS, EBOLA and H1N1
# - author: <NAME>
# - comments: true
# - permalink: /comparitive-analysis/
# - toc: true
# - image: images/copied_from_nb/covid-compare-1-1.png
#
# + [markdown] papermill={"duration": 0.021077, "end_time": "2020-04-10T00:12:35.681268", "exception": false, "start_time": "2020-04-10T00:12:35.660191", "status": "completed"} tags=[]
# These visualizations were made by [<NAME>](https://twitter.com/imdevskp), from [this kaggle kernel](https://www.kaggle.com/imdevskp/covid-19-analysis-viz-prediction-comparisons).
# + papermill={"duration": 0.026542, "end_time": "2020-04-10T00:12:35.728445", "exception": false, "start_time": "2020-04-10T00:12:35.701903", "status": "completed"} tags=[]
#hide
# # ! pip install folium plotly plotnine psutil
# + papermill={"duration": 0.026827, "end_time": "2020-04-10T00:12:35.775855", "exception": false, "start_time": "2020-04-10T00:12:35.749028", "status": "completed"} tags=[]
#hide
# # ! npm install -g electron@6.1.4 orca
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 4.977915, "end_time": "2020-04-10T00:12:40.773770", "exception": false, "start_time": "2020-04-10T00:12:35.795855", "status": "completed"} tags=[]
#hide
# storing and anaysis
import numpy as np
import pandas as pd
# visualization
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
from plotnine import *
import plotly.express as px
import folium
from IPython.display import Javascript
from IPython.core.display import display, HTML
# color pallette
cdr = ['#393e46', '#ff2e63', '#30e3ca'] # grey - red - blue
idr = ['#f8b400', '#ff2e63', '#30e3ca'] # yellow - red - blue
s = '#f0134d'
h = '#12cad6'
e = '#4a47a3'
m = '#42e6a4'
c = '#333333'
shemc = [s, h, e, m, c]
sec = [s, e, c]
# + [markdown] papermill={"duration": 0.021755, "end_time": "2020-04-10T00:12:40.818926", "exception": false, "start_time": "2020-04-10T00:12:40.797171", "status": "completed"} tags=[]
#
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 0.524088, "end_time": "2020-04-10T00:12:41.364217", "exception": false, "start_time": "2020-04-10T00:12:40.840129", "status": "completed"} tags=[]
#hide
# COVID-19
# --------
# covid_19 dataset
covid_19 = pd.read_csv('https://raw.githubusercontent.com/imdevskp/covid_19_jhu_data_web_scrap_and_cleaning/master/new/complete_data_new_format.csv',
parse_dates=['Date'])
# selecting important columns only
covid_19 = covid_19[['Date', 'Country/Region', 'Confirmed', 'Deaths']]
# replacing Mainland china with just China
covid_19['Country/Region'] = covid_19['Country/Region'].replace('Mainland China', 'China')
# renaming columns
covid_19.columns = ['Date', 'Country', 'Cases', 'Deaths']
# group by date and country
covid_19 = covid_19.groupby(['Date', 'Country'])['Cases', 'Deaths']
covid_19 = covid_19.sum().reset_index()
# latest
c_lat = covid_19[covid_19['Date'] == max(covid_19['Date'])].reset_index()
# latest grouped by country
c_lat_grp = c_lat.groupby('Country')['Cases', 'Deaths'].sum().reset_index()
# nth day
covid_19['nth_day'] = (covid_19['Date'] - min(covid_19['Date'])).dt.days
# day by day
c_dbd = covid_19.groupby('Date')['Cases', 'Deaths'].sum().reset_index()
# nth day
c_dbd['nth_day'] = covid_19.groupby('Date')['nth_day'].max().values
# no. of countries
temp = covid_19[covid_19['Cases']>0]
c_dbd['n_countries'] = temp.groupby('Date')['Country'].apply(len).values
c_dbd['new_cases'] = c_dbd['Cases'].diff()
c_dbd['new_deaths'] = c_dbd['Deaths'].diff()
c_dbd['epidemic'] = 'COVID-19'
covid_19.head()
# + papermill={"duration": 0.308989, "end_time": "2020-04-10T00:12:41.694203", "exception": false, "start_time": "2020-04-10T00:12:41.385214", "status": "completed"} tags=[]
#hide
# EBOLA
# ------
# ebola dataset
ebola_14 = pd.read_csv("https://raw.githubusercontent.com/imdevskp/ebola_outbreak_dataset/master/ebola_2014_2016_clean.csv",
parse_dates=['Date'])
# ebola_14 = ebola_14[ebola_14['Date']!=max(ebola_14['Date'])]
# selecting important columns only
ebola_14 = ebola_14[['Date', 'Country', 'No. of confirmed, probable and suspected cases',
'No. of confirmed, probable and suspected deaths']]
# renaming columns
ebola_14.columns = ['Date', 'Country', 'Cases', 'Deaths']
ebola_14.head()
# group by date and country
ebola_14 = ebola_14.groupby(['Date', 'Country'])['Cases', 'Deaths']
ebola_14 = ebola_14.sum().reset_index()
# filling missing values
ebola_14['Cases'] = ebola_14['Cases'].fillna(0)
ebola_14['Deaths'] = ebola_14['Deaths'].fillna(0)
# converting datatypes
ebola_14['Cases'] = ebola_14['Cases'].astype('int')
ebola_14['Deaths'] = ebola_14['Deaths'].astype('int')
# latest
e_lat = ebola_14[ebola_14['Date'] == max(ebola_14['Date'])].reset_index()
# latest grouped by country
e_lat_grp = e_lat.groupby('Country')['Cases', 'Deaths'].sum().reset_index()
# nth day
ebola_14['nth_day'] = (ebola_14['Date'] - min(ebola_14['Date'])).dt.days
# day by day
e_dbd = ebola_14.groupby('Date')['Cases', 'Deaths'].sum().reset_index()
# nth day
e_dbd['nth_day'] = ebola_14.groupby('Date')['nth_day'].max().values
# no. of countries
temp = ebola_14[ebola_14['Cases']>0]
e_dbd['n_countries'] = temp.groupby('Date')['Country'].apply(len).values
e_dbd['new_cases'] = e_dbd['Cases'].diff()
e_dbd['new_deaths'] = e_dbd['Deaths'].diff()
e_dbd['epidemic'] = 'EBOLA'
ebola_14.head()
# + papermill={"duration": 0.28874, "end_time": "2020-04-10T00:12:42.006472", "exception": false, "start_time": "2020-04-10T00:12:41.717732", "status": "completed"} tags=[]
#hide
# SARS
# ----
# sars dataset
sars_03 = pd.read_csv("https://raw.githubusercontent.com/imdevskp/sars-2003-outbreak-data-with-web-scrapping-munging-and-cleaning-code/master/sars_2003_complete_dataset_clean.csv",
parse_dates=['Date'])
# selecting important columns only
sars_03 = sars_03[['Date', 'Country', 'Cumulative number of case(s)',
'Number of deaths', 'Number recovered']]
# renaming columns
sars_03.columns = ['Date', 'Country', 'Cases', 'Deaths', 'Recovered']
# group by date and country
sars_03 = sars_03.groupby(['Date', 'Country'])['Cases', 'Deaths', 'Recovered']
sars_03 = sars_03.sum().reset_index()
# latest
s_lat = sars_03[sars_03['Date'] == max(sars_03['Date'])].reset_index()
# latest grouped by country
s_lat_grp = s_lat.groupby('Country')['Cases', 'Deaths', 'Recovered'].sum().reset_index()
# nth day
sars_03['nth_day'] = (sars_03['Date'] - min(sars_03['Date'])).dt.days
# day by day
s_dbd = sars_03.groupby('Date')['Cases', 'Deaths', 'Recovered'].sum().reset_index()
# nth day
s_dbd['nth_day'] = sars_03.groupby('Date')['nth_day'].max().values
# no. of countries
temp = sars_03[sars_03['Cases']>0]
s_dbd['n_countries'] = temp.groupby('Date')['Country'].apply(len).values
s_dbd['new_cases'] = s_dbd['Cases'].diff()
s_dbd['new_deaths'] = s_dbd['Deaths'].diff()
s_dbd['epidemic'] = 'SARS'
s_dbd.head()
# + papermill={"duration": 0.397929, "end_time": "2020-04-10T00:12:42.430548", "exception": false, "start_time": "2020-04-10T00:12:42.032619", "status": "completed"} tags=[]
#hide
# MERS
mers_cntry = pd.read_csv("https://raw.githubusercontent.com/imdevskp/mers_dataset_collection_cleaning/master/country_count_latest.csv")
mers_weekly = pd.read_csv("https://raw.githubusercontent.com/imdevskp/mers_dataset_collection_cleaning/master/weekly_clean.csv")
# cleaning
mers_weekly['Year-Week'] = mers_weekly['Year'].astype(str) + ' - ' + mers_weekly['Week'].astype(str)
mers_weekly['Date'] = pd.to_datetime(mers_weekly['Week'].astype(str) +
mers_weekly['Year'].astype(str).add('-1'),format='%V%G-%u')
mers_weekly.head()
# + papermill={"duration": 0.035268, "end_time": "2020-04-10T00:12:42.488018", "exception": false, "start_time": "2020-04-10T00:12:42.452750", "status": "completed"} tags=[]
#hide
mers_cntry.head()
# + papermill={"duration": 0.034859, "end_time": "2020-04-10T00:12:42.545042", "exception": false, "start_time": "2020-04-10T00:12:42.510183", "status": "completed"} tags=[]
#hide
mers_weekly.head()
# + [markdown] papermill={"duration": 0.022725, "end_time": "2020-04-10T00:12:42.589722", "exception": false, "start_time": "2020-04-10T00:12:42.566997", "status": "completed"} tags=[]
# # Reported Countries
# + papermill={"duration": 13.666587, "end_time": "2020-04-10T00:12:56.279073", "exception": false, "start_time": "2020-04-10T00:12:42.612486", "status": "completed"} tags=[]
#hide
fig = px.choropleth(c_lat_grp, locations="Country", locationmode='country names',
color="Cases", hover_name="Country",
color_continuous_scale="Emrld", title='COVID-19')
fig.update(layout_coloraxis_showscale=False)
fig.write_image('covid-compare-1-1.png')
#-----------------------------------------------------------------------------------------
fig = px.choropleth(e_lat_grp, locations="Country", locationmode='country names',
color="Cases", hover_name="Country",
color_continuous_scale="Emrld", title='EBOLA 2014')
fig.update(layout_coloraxis_showscale=False)
fig.write_image('covid-compare-1-2.png')
#-----------------------------------------------------------------------------------------
fig = px.choropleth(s_lat_grp, locations="Country", locationmode='country names',
color="Cases", hover_name="Country",
color_continuous_scale="Emrld", title='SARS 2003')
fig.update(layout_coloraxis_showscale=False)
fig.write_image('covid-compare-1-3.png')
#-----------------------------------------------------------------------------------------
fig = px.choropleth(mers_cntry, locations="Country", locationmode='country names',
color="Confirmed", hover_name="Country",
color_continuous_scale='Emrld', title='MERS')
fig.update(layout_coloraxis_showscale=False)
fig.write_image('covid-compare-1-4.png')
# + [markdown] papermill={"duration": 1.108208, "end_time": "2020-04-10T00:12:57.411709", "exception": false, "start_time": "2020-04-10T00:12:56.303501", "status": "completed"} tags=[]
# 
# 
# 
# 
# + [markdown] papermill={"duration": 0.023727, "end_time": "2020-04-10T00:12:57.462650", "exception": false, "start_time": "2020-04-10T00:12:57.438923", "status": "completed"} tags=[]
# # Deaths
# + papermill={"duration": 1.197844, "end_time": "2020-04-10T00:12:58.684293", "exception": false, "start_time": "2020-04-10T00:12:57.486449", "status": "completed"} tags=[]
#hide
fig = px.choropleth(c_lat_grp[c_lat_grp['Deaths']>0], locations="Country", locationmode='country names',
color="Deaths", hover_name="Country",
color_continuous_scale="Sunsetdark", title='COVID-19')
fig.update(layout_coloraxis_showscale=False)
fig.write_image('covid-compare-2-1.png')
#-----------------------------------------------------------------------------------------
fig = px.choropleth(e_lat_grp[e_lat_grp['Deaths']>0], locations="Country", locationmode='country names',
color="Deaths", hover_name="Country",
color_continuous_scale="Sunsetdark", title='EBOLA 2014')
fig.update(layout_coloraxis_showscale=False)
fig.write_image('covid-compare-2-2.png')
#-----------------------------------------------------------------------------------------
fig = px.choropleth(s_lat_grp[s_lat_grp['Deaths']>0], locations="Country", locationmode='country names',
color="Deaths", hover_name="Country",
color_continuous_scale="Sunsetdark", title='SARS 2003')
fig.update(layout_coloraxis_showscale=False)
# fig.write_image('covid-compare-2-3.png')
fig.write_image('covid-compare-2-3.png')
# + [markdown] papermill={"duration": 0.025445, "end_time": "2020-04-10T00:12:58.732984", "exception": false, "start_time": "2020-04-10T00:12:58.707539", "status": "completed"} tags=[]
# 
# 
# 
# + [markdown] papermill={"duration": 0.022466, "end_time": "2020-04-10T00:12:58.779930", "exception": false, "start_time": "2020-04-10T00:12:58.757464", "status": "completed"} tags=[]
# # Proportion
# + papermill={"duration": 2.017974, "end_time": "2020-04-10T00:13:00.819945", "exception": false, "start_time": "2020-04-10T00:12:58.801971", "status": "completed"} tags=[]
#hide
fig = px.treemap(c_lat_grp.sort_values(by='Cases', ascending=False).reset_index(drop=True),
path=["Country"], values="Cases", title='COVID-19',
color_discrete_sequence = px.colors.qualitative.Dark2)
fig.write_image('covid-compare-3-1.png')
fig = px.treemap(e_lat_grp.sort_values(by='Cases', ascending=False).reset_index(drop=True),
path=["Country"], values="Cases", title='EBOLA',
color_discrete_sequence = px.colors.qualitative.Dark2)
fig.write_image('covid-compare-3-2.png')
fig = px.treemap(s_lat_grp.sort_values(by='Cases', ascending=False).reset_index(drop=True),
path=["Country"], values="Cases", title='SARS',
color_discrete_sequence = px.colors.qualitative.Dark2)
fig.write_image('covid-compare-3-3.png')
fig = px.treemap(mers_cntry,
path=["Country"], values="Confirmed", title='MERS',
color_discrete_sequence = px.colors.qualitative.Dark2)
fig.write_image('covid-compare-3-4.png')
# + [markdown] papermill={"duration": 0.024239, "end_time": "2020-04-10T00:13:00.868589", "exception": false, "start_time": "2020-04-10T00:13:00.844350", "status": "completed"} tags=[]
# 
# 
# 
# 
# + [markdown] papermill={"duration": 0.024031, "end_time": "2020-04-10T00:13:00.915541", "exception": false, "start_time": "2020-04-10T00:13:00.891510", "status": "completed"} tags=[]
# # Comparison
# + papermill={"duration": 0.038871, "end_time": "2020-04-10T00:13:00.977729", "exception": false, "start_time": "2020-04-10T00:13:00.938858", "status": "completed"} tags=[]
#hide
# sum of cases
# -----------
c_cases = sum(c_lat_grp['Cases'])
c_deaths = sum(c_lat_grp['Deaths'])
c_no_countries = len(c_lat_grp['Country'].value_counts())
s_cases = sum(s_lat_grp['Cases'])
s_deaths = sum(s_lat_grp['Deaths'])
s_no_countries = len(s_lat_grp['Country'].value_counts())
e_cases = sum(e_lat_grp['Cases'])
e_deaths = sum(e_lat_grp['Deaths'])
e_no_countries = len(e_lat_grp['Country'].value_counts())
# + papermill={"duration": 0.044139, "end_time": "2020-04-10T00:13:01.045456", "exception": false, "start_time": "2020-04-10T00:13:01.001317", "status": "completed"} tags=[]
#hide
epidemics = pd.DataFrame({
'epidemic' : ['COVID-19', 'SARS', 'EBOLA', 'MERS', 'H1N1'],
'start_year' : [2019, 2003, 2014, 2012, 2009],
'end_year' : [2020, 2004, 2016, 2017, 2010],
'confirmed' : [c_cases, s_cases, e_cases, 2494, 6724149],
'deaths' : [c_deaths, s_deaths, e_deaths, 858, 19654],
'no_of_countries' : [c_no_countries, s_no_countries, e_no_countries, 27, 178]
})
epidemics['mortality'] = round((epidemics['deaths']/epidemics['confirmed'])*100, 2)
epidemics = epidemics.sort_values('end_year').reset_index(drop=True)
epidemics.head()
# + papermill={"duration": 0.658378, "end_time": "2020-04-10T00:13:01.729937", "exception": false, "start_time": "2020-04-10T00:13:01.071559", "status": "completed"} tags=[]
#hide
fig = px.bar(epidemics.sort_values('confirmed',ascending=False),
x="confirmed", y="epidemic", color='epidemic',
text='confirmed', orientation='h', title='No. of Cases',
range_x=[0,7500000],
color_discrete_sequence = [h, c, e, s, m])
fig.update_traces(textposition='outside')
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
fig.write_image('covid-compare-4-1.png')
# + papermill={"duration": 0.978044, "end_time": "2020-04-10T00:13:02.731753", "exception": false, "start_time": "2020-04-10T00:13:01.753709", "status": "completed"} tags=[]
#hide
fig = px.bar(epidemics.sort_values('deaths',ascending=False),
x="deaths", y="epidemic", color='epidemic',
text='deaths', orientation='h', title='No. of Deaths',
range_x=[0,25000],
color_discrete_sequence = [h, e, c, m, s])
fig.update_traces(textposition='outside')
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
fig.write_image('covid-compare-4-2.png')
# + papermill={"duration": 0.572672, "end_time": "2020-04-10T00:13:03.330225", "exception": false, "start_time": "2020-04-10T00:13:02.757553", "status": "completed"} tags=[]
#hide
fig = px.bar(epidemics.sort_values('mortality',ascending=False),
x="mortality", y="epidemic", color='epidemic',
text='mortality', orientation='h', title='Mortality rate',
range_x=[0,100],
color_discrete_sequence = [e, m, s, c, h])
fig.update_traces(textposition='outside')
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
fig.write_image('covid-compare-4-3.png')
# + papermill={"duration": 0.419515, "end_time": "2020-04-10T00:13:03.774315", "exception": false, "start_time": "2020-04-10T00:13:03.354800", "status": "completed"} tags=[]
#hide
fig = px.bar(epidemics.sort_values('no_of_countries', ascending=False),
x="no_of_countries", y="epidemic", color='epidemic',
text='no_of_countries', orientation='h', title='No. of Countries',
range_x=[0,200],
color_discrete_sequence = [h, c, s, m, e])
fig.update_traces(textposition='outside')
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
fig.write_image('covid-compare-4-4.png')
# + papermill={"duration": 0.952076, "end_time": "2020-04-10T00:13:04.750590", "exception": false, "start_time": "2020-04-10T00:13:03.798514", "status": "completed"} tags=[]
#hide
temp = pd.concat([s_dbd, e_dbd, c_dbd], axis=0, sort=True)
fig = px.line(temp, x="Date", y="Cases", color='epidemic',
title='No. of new cases',
color_discrete_sequence = sec)
fig.update_layout(xaxis_rangeslider_visible=True)
fig.write_image('covid-compare-4-5.png')
fig = px.line(temp, x="Date", y="Deaths", color='epidemic',
title='No. of new deaths',
color_discrete_sequence = sec)
fig.update_layout(xaxis_rangeslider_visible=True)
fig.write_image('covid-compare-4-6.png')
# + [markdown] papermill={"duration": 0.024082, "end_time": "2020-04-10T00:13:04.799782", "exception": false, "start_time": "2020-04-10T00:13:04.775700", "status": "completed"} tags=[]
# 
# 
# 
# 
# 
# 
# + [markdown] papermill={"duration": 0.023225, "end_time": "2020-04-10T00:13:04.846519", "exception": false, "start_time": "2020-04-10T00:13:04.823294", "status": "completed"} tags=[]
# # In the first N days
# + papermill={"duration": 1.312476, "end_time": "2020-04-10T00:13:06.183597", "exception": false, "start_time": "2020-04-10T00:13:04.871121", "status": "completed"} tags=[]
#hide
fig = px.line(temp, x="nth_day", y="Cases", color='epidemic',
title='Cases', color_discrete_sequence = sec)
fig.update_layout(xaxis_rangeslider_visible=True)
fig.write_image('covid-compare-5-1.png')
fig = px.line(temp, x="nth_day", y="Deaths", color='epidemic',
title='Deaths', color_discrete_sequence = sec)
fig.update_layout(xaxis_rangeslider_visible=True)
fig.write_image('covid-compare-5-2.png')
fig = px.line(temp, x="nth_day", y="n_countries", color='epidemic',
title='No. of Countries', color_discrete_sequence = sec)
fig.update_layout(xaxis_rangeslider_visible=True)
fig.write_image('covid-compare-5-3.png')
# + papermill={"duration": 0.92598, "end_time": "2020-04-10T00:13:07.136153", "exception": false, "start_time": "2020-04-10T00:13:06.210173", "status": "completed"} tags=[]
#hide
fig = px.scatter(epidemics, x='start_year', y = [1 for i in range(len(epidemics))],
size=epidemics['confirmed']**0.3, color='epidemic', title='Confirmed Cases',
color_discrete_sequence = shemc, hover_name='epidemic', height=400,
text=epidemics['epidemic']+'<br> Cases : '+epidemics['confirmed'].apply(str))
fig.update_traces(textposition='bottom center')
fig.update_yaxes(showticklabels=False)
fig.update_layout(showlegend=False)
fig.write_image('covid-compare-5-4.png')
fig = px.scatter(epidemics, x='start_year', y = [1 for i in range(len(epidemics))],
size=epidemics['deaths']**0.5, color='epidemic', title='Deaths',
color_discrete_sequence = shemc, hover_name='epidemic', height=400,
text=epidemics['epidemic']+'<br> Deaths : '+epidemics['deaths'].apply(str))
fig.update_traces(textposition='bottom center')
fig.update_yaxes(showticklabels=False)
fig.update_layout(showlegend=False)
fig.write_image('covid-compare-5-5.png')
# + [markdown] papermill={"duration": 0.024096, "end_time": "2020-04-10T00:13:07.185974", "exception": false, "start_time": "2020-04-10T00:13:07.161878", "status": "completed"} tags=[]
# 
# 
# 
# 
# 
# + papermill={"duration": 0.042112, "end_time": "2020-04-10T00:13:07.253109", "exception": false, "start_time": "2020-04-10T00:13:07.210997", "status": "completed"} tags=[]
#hide
c_lat_grp.head()
# + papermill={"duration": 0.312141, "end_time": "2020-04-10T00:13:07.589443", "exception": false, "start_time": "2020-04-10T00:13:07.277302", "status": "completed"} tags=[]
#hide_input
temp = covid_19.groupby('Date')['Cases'].sum().reset_index()
covid = temp['Cases']
sars = [8096 for i in range(len(temp))]
ebola = [28646 for i in range(len(temp))]
mers = [2494 for i in range(len(temp))]
h1n1 = [6724149 for i in range(len(temp))]
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(20, 8))
ax = plt.plot(temp['Date'], covid, label='COVID-19 (2019-2020)', c='#555555', alpha=0.8)
ax = plt.plot(temp['Date'], sars, label='SARS (2003-2004)', c='#E71D36', ls='--', alpha=0.8)
ax = plt.plot(temp['Date'], ebola, label='EBOLA (2014-2016)', c='#FF9F1C', ls='--', alpha=0.8)
ax = plt.plot(temp['Date'], mers, label='MERS', c='#2EC4B6', ls='--', alpha=0.8)
plt.title('Number of Cases')
plt.legend()
plt.show()
# + papermill={"duration": 0.312161, "end_time": "2020-04-10T00:13:07.928849", "exception": false, "start_time": "2020-04-10T00:13:07.616688", "status": "completed"} tags=[]
#hide_input
temp = covid_19.groupby('Date')['Deaths'].sum().reset_index()
covid = temp['Deaths']
sars = [774 for i in range(len(temp))]
ebola = [11323 for i in range(len(temp))]
mers = [858 for i in range(len(temp))]
h1n1 = [19654 for i in range(len(temp))]
plt.figure(figsize=(20, 8))
ax = plt.plot(temp['Date'], covid, label='COVID-19 (2019-2020)', c='#555555', alpha=0.8)
ax = plt.plot(temp['Date'], sars, label='SARS (2003-2004)', c='#E71D36', ls='--', alpha=0.8)
ax = plt.plot(temp['Date'], ebola, label='EBOLA (2014-2016)', c='#FF9F1C', ls='--', alpha=0.8)
ax = plt.plot(temp['Date'], mers, label='MERS', c='#2EC4B6', ls='--', alpha=0.8)
ax = plt.plot(temp['Date'], h1n1, label='H1N1', c='#2345BA', ls='--', alpha=0.8)
plt.title('Number of Deaths')
plt.legend()
plt.show()
# + [markdown] papermill={"duration": 0.028275, "end_time": "2020-04-10T00:13:07.995501", "exception": false, "start_time": "2020-04-10T00:13:07.967226", "status": "completed"} tags=[]
# # Related Work
# + [markdown] papermill={"duration": 0.027172, "end_time": "2020-04-10T00:13:08.049800", "exception": false, "start_time": "2020-04-10T00:13:08.022628", "status": "completed"} tags=[]
# 1. [https://www.kaggle.com/imdevskp/covid-19-analysis-viz-prediction-comparisons](https://www.kaggle.com/imdevskp/covid-19-analysis-viz-prediction-comparisons)
# 2. [https://www.kaggle.com/imdevskp/western-africa-ebola-outbreak-analysis](https://www.kaggle.com/imdevskp/western-africa-ebola-outbreak-analysis)
# 3. [https://www.kaggle.com/imdevskp/sars-2003-outbreak-analysis](https://www.kaggle.com/imdevskp/sars-2003-outbreak-analysis)
| _notebooks/2020-03-13-COVID19-Comparitive-Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Javascript library in a notebook: cytoscape.js
#
# Tries [cytoscape.js](http://js.cytoscape.org/) in a notebook. The example is taken from the gallery and more specifically the following one [cyjsSimpleDemoWithMagic.ipynb](https://github.com/paul-shannon/cyjs-jupyter/blob/master/self-contained-notebooks/javascriptOnly/cyjsSimpleDemoWithMagic.ipynb). A kind of tutorial is available at [Getting started with Cytoscape.js](http://blog.js.cytoscape.org/2016/05/24/getting-started/).
from jyquickhelper import RenderJS
css = None
libs = [dict(path='http://www.xavierdupre.fr/js/cytoscape/cytoscape.min.js', name='cytoscape')]
script = """
var cy = cytoscape({
container: document.getElementById('__ID__'),
elements: {
nodes: [
{data: {id: 'a', name: 'Node A', type: 'big' }},
{data: {id: 'b', name: 'Node B', type: 'little'}},
],
edges: [
{data: {source: 'a', target: 'b'}},
{data: {source: 'b', target: 'a'}}
]
},
style: [
{selector: 'node',
style: {
'text-valign': 'center',
'text-halign': 'center',
'border-color': 'red',
'background-color': 'white',
'border-width': 1,
'label': 'data(name)',
'height': 100, // defaults
'width': 100
}},
{selector: "node[type='big']",
style: {
'height': 150,
'width': 150,
'shape': 'roundrectangle'
}},
{selector: "node[type='little']",
style: {
'height': 50,
'width': 80
}},
{selector: 'edge',
style: {
'width': '1px',
'line-color': 'blue',
'target-arrow-shape': 'triangle',
'target-arrow-color': 'black',
'curve-style': 'bezier'
}}
],
ready: function(){
console.log("small cyjs network ready");
} // ready
}); // cytoscape
"""
jr = RenderJS(script, css=css, libs=libs, height='200px', width='500px')
jr
print(jr._repr_html_())
| _doc/notebooks/nb_cytoscape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas
import matplotlib.pyplot as plt
import time
import numpy
pandas.set_option('display.max_rows', 500)
pandas.set_option('display.max_columns', 500)
df = pandas.read_csv('../Feb_8_AHA_Files/SHHS CSVs all/ECGV0007_fullresults.csv')
# remove leading and trailing spaces from colum names
df.columns = df.columns.str.strip()
# +
# https://stackoverflow.com/questions/33788913/pythonic-efficient-way-to-strip-whitespace-from-every-pandas-data-frame-cell-tha
df = df.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
# -
## Fill NaN into blanks
df.replace("", numpy.nan, regex=True, inplace=True)
## Make all numeric strings into floats
df = df.apply(pandas.to_numeric, errors='ignore')
df.shape
df.head().T
df.dtypes
df.info()
df.describe().T
df.describe(include='object').T
# + active=""
# df.describe(include='category').T
# -
# # which columns contain 1 or more NaN entries?
nan_per_col=df.isna().sum(axis=0)
nan_per_col[nan_per_col>0]
# # histogram of every numeric column
start_time = time.time()
for this_col in df.columns:
if ('RR_' not in this_col) and ('QT' not in this_col):
print(this_col)
df[this_col].hist()
_=plt.show()
print('elapsed:',time.time()-start_time,'seconds')
# # scatter plot
start_time = time.time()
for this_col in df.columns:
print(this_col)
_=plt.scatter(df[this_col].index,df[this_col])
_=plt.show()
print('elapsed:',time.time()-start_time,'seconds')
df[this_col].nunique()
| find_anomalies_in_CSV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import pandana as pdna
import quilt3
import os
import pandana as pdna
from geosnap import Community
# %matplotlib inline
# +
#pip install -U numpy
# -
# The networks can take awhile to download and process from OSM, but we have pre-built networks for every metro in the country stored in our quilt bucket
if not os.path.exists("../data/40140.h5"):
p = quilt3.Package.browse('osm/metro_networks_8k', 's3://spatial-ucr')
p['40140.h5'].fetch("../data/")
rd = Community.from_lodes(msa_fips='40140', years=[2017])
gdf = rd.gdf
gdf.columns
gdf.dropna(subset=['total_employees']).plot(column='total_employees', scheme='quantiles', k=6, cmap='YlOrBr')
net = pdna.Network.from_hdf5("../data/40140.h5")
# change this number up to 5000 (upper limit)
net.precompute(3000)
#net.precompute(4000)
# Here we're doing a KNN to get the intersection node nearest to each block centroid
gdf.plot()
gdf["node_ids"] = net.get_node_ids(gdf.centroid.x,
gdf.centroid.y)
gdf
# Then, create a new veraiable on the network (total employees) located on the nodes we just identified, with values equal to total_employees
net.set(gdf.node_ids, variable=gdf["total_employees"], name="total_employees")
# Now calculate shortest distance between every node in the network and add up all the jobs accessible within 2km. This will give back a series for every node on the network. Using this series, we can move up or down levels of the hierarchy by taking the nearest intersection node to any polygon
# +
access = net.aggregate(3500, type="sum", name="total_employees")
#try 3.5km, 3km
# -
access
access.name ='emp'
gdf = gdf.merge(access, left_on='node_ids', right_index=True)
gdf.plot(column='emp', scheme='quantiles', k=6)
gdf['id']= gdf.geoid.str[:11]
gdf.id
gdf = gdf.dissolve('id', aggfunc='sum')
# +
#tracts = Community.from_census(msa_fips='40140', years=[2010])
# -
tgdf = gdf
tgdf.plot()
# now we'll grab the nearest intersection node for each tract and plot *tract*-level access
tgdf["node_ids"] = net.get_node_ids(tgdf.centroid.x, tgdf.centroid.y)
tgdf=tgdf.merge(access, left_on='node_ids', right_index=True)
tgdf.plot('emp_y', scheme="quantiles", k=5)
# The idea then would be to identify employment centers at some density cutoff (e.g. everything in yellow), then drop out anything that doesnt meet the total employment threshold
#
# e.g. do something like
#
# - select all tracts where access>= `density_threshold`
# - dissolve tract boundaries to give you contiguous employment center polys
# - select all emp centers where total employment >= `total_threshold`
# +
centers = tgdf[tgdf.emp_y >=10000]
#change this number
# -
# Here are our employment centers in Baltimore (based on the accessibility threshold alone)
centers.plot()
# I dont think geopandas has a generic dissolve that groups contiguous objects... you have to supply a grouping column, so we need to create one. Simple with a `W`
from libpysal.weights import Queen
w = Queen.from_dataframe(centers)
w.component_labels
centers['labels'] = w.component_labels
centers=centers.dissolve(aggfunc='sum', by='labels')
centers.plot('emp_y', scheme='quantiles', k=8)
centers.emp_y
centers.to_file("../data/rvcenter_4km1k.json", driver="GeoJSON")
tgdf.columns
| notebooks/center_smoother-rv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Voting Strengths
#
# Chapter 10 of [Real World Algorithms](https://mitpress.mit.edu/books/real-world-algorithms).
#
# ---
#
# > <NAME><br />
# > Athens University of Economics and Business
# + [markdown] slideshow={"slide_type": "slide"}
# ## The Schulze Method
#
# To start with the Schulze method, we need a way to input ballots.
#
# We assume that the ballots are saved in a file, one ballot per line. In each line, that is, ballot, the candidates are listed in decreasing preference.
#
# We'll use the file [ballots.csv](ballots.csv) as an example. The file is in [Comma-separated Values (CSV)](https://en.wikipedia.org/wiki/Comma-separated_values) format.
#
# So, the first line is:
# ```
# D,B,A,C
# ```
# which means that the first preference of the voter is candidate D, then B, then A, then C.
# + [markdown] slideshow={"slide_type": "slide"}
# Although seemingly simple, CSV is a treacherous format.
#
# There are many details than one would think at first sight; for example, what happens if a field in the line contains a comma, could we have different delimiters, etc.
#
# For that reason, you should always use a ready-made library for handling CVS files.
#
# Our ballots file is simple, but there is no reason not to use [Python's CSV library](https://docs.python.org/3/library/csv.html) anyway.
#
# We'll get all the ballots and we'll put them into a list.
# + slideshow={"slide_type": "fragment"}
import csv
import pprint
with open('ballots.csv') as ballots_file:
reader = csv.reader(ballots_file)
ballots = list(reader)
pprint.pprint(ballots, width=30)
# + [markdown] slideshow={"slide_type": "slide"}
# The first step in the Schulze method is to calculate the pairwise preferences of the voters regarding the candidates.
#
# That is an array $P$, such that element $P[c_j, c_k]$ shows how many voters prefer candidate $c_j$ to candidate $c_k$.
#
# As our candidates are given by characters, we'll assign a number, starting from zero, to each of the candidates, so that we'll be able to use integer-based indices.
# + slideshow={"slide_type": "slide"}
from collections import defaultdict
candidates = {
'A': 0,
'B': 1,
'C': 2,
'D': 3
}
def calc_pairwise_prefs(ballots, candidates):
# Initialize p to 0.
p = [ [0 for j in candidates.keys() ] for i in candidates.keys() ]
# Take each ballot in turn.
for ballot in ballots:
# Take each candidate in the ballot.
for i, c_i in enumerate(ballot):
# Take all following candidates in the ballot.
for c_j in ballot[i+1:]:
# Add to the score of c_i vs c_j.
p[candidates[c_i]][candidates[c_j]] += 1
return p
p = calc_pairwise_prefs(ballots, candidates)
pprint.pprint(p, width=20)
# + [markdown] slideshow={"slide_type": "slide"}
# The second step in the Schulze method is to create an election graph.
#
# This will be represented by an adjacency matrix.
#
# If for two candidates $c_i$ and $c_j$ the number $P[c_i, c_j]$ of voters that prefer $c_i$ over $c_j$ is greater than the number of voters $P[c_j, c_i]$ that prefer $c_j$ over $c_i$, we add the link $c_i \rightarrow c_j$ and we assign the number $P[c_i, c_j] - P[c_j, c_i]$ as the weight of the link $c_i \rightarrow c_j$.
#
# We'll assign the value $-1$ for all other pairs (or $-\infty$, but as $-1$ is not a valid weight, it will also do).
# + slideshow={"slide_type": "slide"}
def create_election_graph(p):
n = len(p)
g = [ [-1 for j in range(n) ] for i in range(n) ]
for i in range(n):
for j in range(n):
if p[i][j] > p[j][i]:
g[i][j] = p[i][j] - p[j][i]
return g
# + [markdown] slideshow={"slide_type": "slide"}
# We can then see the adjacency matrix for our election example:
# + slideshow={"slide_type": "fragment"}
g = create_election_graph(p)
pprint.pprint(g, width=20)
# + [markdown] slideshow={"slide_type": "slide"}
# With the adjacency matrix available, we can implement the calculation of the strongest paths.
#
# The function `calc_strongest_paths(p, candidates)` will take as input the adjacency matrix and the candidates and will return:
# * `s`, a matrix of size $n \times n$ such that `s[i][j]` is the strongest path between nodes `i` and `j`.
# * `pred`, a matrix of size $n \times n$ such that `pred[i][j]` is the predecessor of node `i` in the strongest path to node `j`.
#
# The algorithm finds the strongest paths iteratively, by allowing to use one additional node as intermediate node in the paths in each iteration.
# + slideshow={"slide_type": "slide"}
def calc_strongest_paths(p):
n = len(p)
# Initialize strongest paths array.
s = [ [ -1 for j in range(n) ] for i in range(n) ]
# Initialize predecessors array.
pred = [ [ -1 for j in range(n) ] for i in range(n) ]
# Initially the strength of the path s[i][j] is simply
# the difference in the weights between p[i][j]
# and p[j][i].
for i in range(n):
for j in range(n):
if p[i][j] > p[j][i]:
s[i][j] = p[i][j] - p[j][i]
pred[i][j] = i
# For each k, i, j, such that the path from i to j
# can be strengthened by taking the detour from i to k
# and k to j adjust the path and the predecessor.
# This can happen at most n times.
for k in range(n):
for i in range(n):
if i != k:
for j in range(n):
if j != i:
if s[i][j] < min(s[i][k], s[k][j]):
s[i][j] = min(s[i][k], s[k][j])
pred[i][j] = pred[k][j]
return (s, pred)
# + [markdown] slideshow={"slide_type": "slide"}
# We now apply `calc_strongest_paths(p)` to our example:
# + slideshow={"slide_type": "fragment"}
s, pred = calc_strongest_paths(p)
print('strongest paths')
pprint.pprint(s, width=30)
print('predecessors')
pprint.pprint(pred, width=30)
# + [markdown] slideshow={"slide_type": "slide"}
# The final step in the Schulze algorithm is finding, for each candidate the candidates that are less popular.
#
# That is a matter of comparing `s[i][j]` and `s[j][i]`.
#
# We implement the logic in `calc_results(s)`.
# + slideshow={"slide_type": "fragment"}
def calc_results(s):
n = len(s)
wins = [ [] for i in range(n) ]
for i in range(n):
for j in range(n):
if i != j:
if s[i][j] > s[j][i]:
wins[i].append(j)
return wins
# + [markdown] slideshow={"slide_type": "slide"}
# Finally, we can find the winner of the election:
# + slideshow={"slide_type": "fragment"}
wins = calc_results(s)
print(wins)
# + [markdown] slideshow={"slide_type": "fragment"}
# * Candidate `A` wins over `C`.
# * Candidate `B` wins over `A`, `C`.
# * Candidate `D` wins over `A`, `B`, `C`.
# * Candidate `D` wins the election.
# + [markdown] slideshow={"slide_type": "slide"}
# ## The Schulze Method: An Alternative
#
# We can implement the Schulze method with an alternative implementation, in which instead of an adjacency matrix we use a dictionary to represent the preferences.
#
# The logic is entirely the same.
# + [markdown] slideshow={"slide_type": "slide"}
# We implement `calc_pairwise_prefs(ballots)` to return a dictionary `p` such that `p[(c_i, c_j)]` shows how many voters prefer candidate `c_i` to candidate `c_j`.
#
# The keys to the dictionary are the tuples `(c_i, c_j)`.
#
# Note that we do not need to work with indices instead of the actual voters.
#
# We use a `defaultdict(int)`, so the dictionary will return 0 if `(c_i, c_j)` is not a key.
#
# Essentially this is like initializing the preferences matrix to zero.
# + slideshow={"slide_type": "slide"}
from collections import defaultdict
def calc_pairwise_prefs(ballots):
p = defaultdict(int)
for ballot in ballots:
for i, c_i in enumerate(ballot):
for c_j in ballot[i+1:]:
p[(c_i, c_j)] += 1
return p
p = calc_pairwise_prefs(ballots)
pprint.pprint(p)
# + [markdown] slideshow={"slide_type": "slide"}
# The printout of the preferences dictionary is less elegant than the printout of the preferences matrix that we had before.
#
# We can fix that by writing a short helper function that will output our dictionaries in matrix format.
# + slideshow={"slide_type": "slide"}
p = calc_pairwise_prefs(ballots)
import itertools
candidates = ['A', 'B', 'C', 'D']
def print_matrix(candidates, matrix, col_width=5):
print(' ', end="")
num_candidates = len(candidates)
for candidate in candidates:
print(f'{candidate:^{col_width}}', end="")
i = 0
for c1, c2 in itertools.product(candidates, repeat=2):
if i % num_candidates == 0:
print()
print(f'{candidates[i // num_candidates]:<2}', end="")
print(f'{matrix[(c1, c2)]:^{col_width}}', end="")
i += 1
print()
print_matrix(candidates, p, 5)
# + [markdown] slideshow={"slide_type": "slide"}
# We then create the election graph.
#
# We use again a dictionary to store the graph. The keys of the dictionary are node tuples and the values are differences in preferences.
#
# Note that not all tuples are actually stored in the dictionary. We store explicitly only the tuples with a positive difference in preferences.
#
# We use a `defaultdict(lambda:-1)`, which will return -1 for any other (non-existing) key, so for all other couples.
# + slideshow={"slide_type": "slide"}
def create_election_graph(p):
g = defaultdict(lambda:-1)
for (c_i, c_j), pref in p.items():
if pref > p[(c_j, c_i)]:
g[(c_i, c_j)] = pref - p[(c_j, c_i)]
return g
# + [markdown] slideshow={"slide_type": "slide"}
# In this way we save space.
#
# We can still use `print_matrix(candidates, g, 5)` to print the dictionary in matrix format.
#
# Only those entries that are not equal to -1 are actually stored in the dictionary.
# + slideshow={"slide_type": "slide"}
g = create_election_graph(p)
print_matrix(candidates, g, 5)
# + [markdown] slideshow={"slide_type": "slide"}
# We'll use again `defaultdict`s to implement `calc_strongest_paths(p, candidates)`.
#
# We need to pass `candidates` to the function as we no longer use numerical indices, but the actual candidates.
# + slideshow={"slide_type": "slide"}
def calc_strongest_paths(p, candidates):
# Initialize strongest paths dict.
s = defaultdict(lambda:-1)
# Initialize predecessors dict.
pred = defaultdict(lambda:-1)
# Initially the strength of the path from c_i to c_j is simply
# the difference in the weights between s[(c_i, c_j)]
# and s[(c_j, c_i)].
for (c_i, c_j), pref in p.items():
if pref > p[(c_j, c_i)]:
s[(c_i, c_j)] = pref - p[(c_j, c_i)]
pred[(c_i, c_j)] = c_i
# For each c_k, c_i, c_j, such that the path from c_i to c_j
# can be strengthened by taking the detour from c_i to c_k
# and then to c_k and c_j adjust the path and the predecessor.
# This can happen at most as many times as there are candidates.
for c_k in candidates:
for c_i in candidates:
if c_i != c_k:
for c_j in candidates:
if c_j != c_i:
if s[(c_i, c_j)] < min(s[(c_i, c_k)], s[(c_k, c_j)]):
s[(c_i, c_j)] = min(s[(c_i, c_k)], s[(c_k, c_j)])
pred[(c_i, c_j)] = pred[(c_k, c_j)]
return (s, pred)
# + [markdown] slideshow={"slide_type": "slide"}
# We now apply `calc_strongest_paths(p, candidates)` to our example:
# + slideshow={"slide_type": "fragment"}
s, pred = calc_strongest_paths(p, candidates)
print('strongest paths')
print_matrix(candidates, s, 5)
print('predecessors')
print_matrix(candidates, pred, 5)
# + [markdown] slideshow={"slide_type": "slide"}
# Finally, we calculate the results.
#
# We do as before, but we return a dictionary instead.
#
# The keys are the candidates.
#
# The value of a key is a list containing the candidates that lose to the particular candidate indicated by the key.
# + slideshow={"slide_type": "fragment"}
def calc_results(s):
wins = defaultdict(list)
for (c_i, c_j), v in s.items():
if s[(c_i, c_j)] > s[(c_j, c_i)]:
wins[c_i].append(c_j)
return wins
# + [markdown] slideshow={"slide_type": "slide"}
# So, here are the results again:
# + slideshow={"slide_type": "fragment"}
wins = calc_results(s)
pprint.pprint(wins)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Floyd-Warshall All Pairs Shortest Paths
#
# The strongest paths is a variation of the Floyd-Warshall all pairs shortest paths algorithm.
#
# As with the strongest paths, it finds shortest paths by using more and more nodes as intermediaries.
# + slideshow={"slide_type": "slide"}
import sys
MAX_INT = sys.maxsize
def floyd_warshall(w):
n = len(w)
# Initialize distances matrix.
dist = [ [ MAX_INT for j in range(n) ] for i in range(n) ]
# Initialize predecessors matrix.
pred = [ [ -1 for j in range(n) ] for i in range(n) ]
# Initially the length of the path from i to j is simply
# the weight between w[i][j], if it exists, and then
# i is the predecessor of j.
for i in range(n):
for j in range(n):
if w[i][j] != 0:
dist[i][j] = w[i][j]
pred[i][j] = i
# For each k, i, j, such that the path from i to j
# can be shortened by taking the detour from i to k
# and k to j adjust the path and the predecessor.
# This can happen at most n times.
for k in range(n):
for i in range(n):
if i != k:
for j in range(n):
if j != i:
if (dist[i][k] != MAX_INT and
dist[k][j] != MAX_INT and
dist[i][j] > dist[i][k] + dist[k][j]):
dist[i][j] = dist[i][k] + dist[k][j]
pred[i][j] = pred[k][j]
return (dist, pred)
# + [markdown] slideshow={"slide_type": "slide"}
# We'll use the algorithm on the familiar [traffic_grid_graph.txt](traffic_grid_graph.txt) algorithm.
#
# <img width="400" src="traffic_grid_graph.png"/>
# + [markdown] slideshow={"slide_type": "slide"}
# Here is the function that reads the graph:
# + slideshow={"slide_type": "fragment"}
def read_graph(filename, directed=False):
graph = {}
with open(filename) as input_file:
for line in input_file:
parts = line.split()
if len(parts) != 3:
continue # not a valid line, ignore
[n1, n2, w] = [ int (x) for x in parts ]
if n1 not in graph:
graph[n1] = []
if n2 not in graph:
graph[n2] = []
graph[n1].append((n2, w))
if not directed:
graph[n2].append((n1, w))
return graph
# + [markdown] slideshow={"slide_type": "slide"}
# We go ahead and read it:
# + slideshow={"slide_type": "fragment"}
g = read_graph('traffic_grid_graph.txt')
pprint.pprint(g)
# + [markdown] slideshow={"slide_type": "slide"}
# Our implementation of the Floyd-Warshall algorithms requires an adjacency matrix as input.
#
# So, we'll use a function that converts the graph from an adjacency list representation to an adjacency matrix one.
# + slideshow={"slide_type": "slide"}
def adjlist_to_matrix(g):
m = [ [ MAX_INT for j in g.keys() ] for i in g.keys() ]
for u in g.keys():
m[u][u] = 0
for u in g.keys():
for (v, w) in g[u]:
m[u][v] = w
return m
# + [markdown] slideshow={"slide_type": "slide"}
# We do the conversion, and then we run the Floyd-Warshall algorithm.
# + slideshow={"slide_type": "fragment"}
m = adjlist_to_matrix(g)
dist, pred = floyd_warshall(m)
for s in sorted(g.keys()):
print('starting node:', s)
print(pred[s])
print(dist[s])
# + [markdown] slideshow={"slide_type": "slide"}
# You may have noticed than the distance of a node to itself has been set to `MAX_INT`.
#
# If that bothers us, and we like it to fix it to zero, that's easy to do:
# + slideshow={"slide_type": "fragment"}
for i in range(len(dist)):
dist[i][i] = 0
for s in sorted(g.keys()):
print('starting node:', s)
print(pred[s])
print(dist[s])
| content/notebooks/chapter_10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Shivani-pawar26/Machine_learning/blob/main/Lab_3_linear_regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="JJM8ymxD96uR"
class MyLinearRegression:
def __init__(self, weight=1, bias=2, learning_rate=0.1,
iterations=80):
self.weight = weight
self.bias = bias
self.learning_rate = learning_rate
self.iterations = iterations
self.cost_trend = []
self.cost = 0
def predict(self, x):
predicted_set = []
for i in range(len(x)):
predicted_value = self.weight * x[i] + self.bias
predicted_set.append(predicted_value)
return predicted_set
def cost_function(self, x, y):
count = len(x)
total_error = 0.0
for i in range(count):
total_error += (y[i] - (self.weight * x[i] +
self.bias)) ** 2
return float(total_error) / (2 * count)
def update_weights(self, x, y):
weight_deriv = 0
bias_deriv = 0
count = len(x)
for i in range(count):
# Calculate partial derivatives
# -2x(y - (mx + b))
weight_deriv += -2 * x[i] * (y[i] -(self.weight * x[i] + self.bias))
# -2(y - (mx + b))
bias_deriv += -2 * (y[i] - (self.weight * x[i] +
self.bias))
# We subtract because the derivatives point in direction of steepest
# ascent
self.weight -= (weight_deriv / count) * self.learning_rate
self.bias -= (bias_deriv / count) * self.learning_rate
def train(self, x, y):
for i in range(self.iterations):
self.update_weights(x, y)
# Calculating cost
self.cost = self.cost_function(x, y)
self.cost_trend.append(self.cost)
# if i % 10000 == 0:
print("Iteration: {}\t Weight: {}\t Bias: {}\t Cost: {}".format(i, self.weight, self.bias, self.cost))
# + id="XGMUmPfj96ua" outputId="17409dc9-2d52-4dd4-a02b-9452f8e92795" colab={"base_uri": "https://localhost:8080/", "height": 833}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# intialise data of lists.
data = {'Hours':[2.5,5.1,3.2,8.5,3.5,1.5,9.2,5.5,8.3,2.7,7.7,5.9,4.5,3.3,1.1,8.9,2.5,1.9,6.1,7.4,2.7,4.8,3.8,6.9,7.8],
'Scores':[21,47,27,75,30,20,88,60,81,25,85,62,41,42,17,95,30,24,67,69,30,54,35,76,86]}
# Create DataFrame
studentscores = pd.DataFrame(data)
# Print the output.
studentscores
# + id="Avh401TI11sH" outputId="7222e3a3-0c2c-473f-f25a-7d3352121a63" colab={"base_uri": "https://localhost:8080/", "height": 279}
x=[2.5,5.1,3.2,8.5,3.5,1.5,9.2,5.5,8.3,2.7,7.7,5.9,4.5,3.3,1.1,8.9,2.5,1.9,6.1,7.4,2.7,4.8,3.8,6.9,7.8]
y=[21,47,27,75,30,20,88,60,81,25,85,62,41,42,17,95,30,24,67,69,30,54,35,76,86]
plt.scatter(x,y,s=10)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
# + id="_LSp2jKt96uj" outputId="a0d1510f-ee15-47ab-cad8-06772a5d7999" colab={"base_uri": "https://localhost:8080/"}
#from my_linear_regression import MyLinearRegression
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
# Importing the dataset
X = studentscores.iloc[:, :-1].values
y = studentscores.iloc[:, -1].values
X,y
# + id="PvfKE_WT96un" outputId="eab5a8e9-d04d-4fde-aff9-d1ef91f631fd" colab={"base_uri": "https://localhost:8080/"}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/4, random_state=0)
# Fitting Simple Linear Regression to the Training set
regressor = MyLinearRegression()
regressor.train(X_train, y_train)
print('Weight: ' + str(regressor.weight) + ' Bias: ' + str(regressor.bias))
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# + id="Qw0Vfxf996uw" outputId="e40a2dc4-feec-46de-bbf5-124a31c379c4" colab={"base_uri": "https://localhost:8080/", "height": 290}
x=[int(x) for x in range(80)]
y=regressor.cost_trend
plt.plot(x,y)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
# + id="Z6VkaIc6KE2Q" outputId="03428975-d6a7-48e9-8775-5b5757f3bba7" colab={"base_uri": "https://localhost:8080/", "height": 290}
w = regressor.weight
b = regressor.bias
x=[2.5,5.1,3.2,8.5,3.5,1.5,9.2,5.5,8.3,2.7,7.7,5.9,4.5,3.3,1.1,8.9,2.5,1.9,6.1,7.4,2.7,4.8,3.8,6.9,7.8]
y=[21,47,27,75,30,20,88,60,81,25,85,62,41,42,17,95,30,24,67,69,30,54,35,76,86]
plt.scatter(x,y)
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
y_vals = b + w * x_vals
plt.plot(x_vals, y_vals)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
| Lab_3_linear_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="Rj2Ycby3W9om"
#
# <a href="https://colab.research.google.com/github/aviadr1/learn-advanced-python/blob/master/content/14_pandas/edm_us_adult_census_income/questions.ipynb" target="_blank">
# <img src="https://colab.research.google.com/assets/colab-badge.svg"
# title="Open this file in Google Colab" alt="Colab"/>
# </a>
#
# + [markdown] colab_type="text" id="YsMAnwdxYFJq"
# # get the data
# run the following two cells below to get the data for this exercise,
# then followup by reading the questions and writing your own code to answer them.
# + colab={} colab_type="code" id="LhWeSUM8XMMq"
# !pip install requests
# + colab={} colab_type="code" id="RUsHrWEqW-6c"
import requests
url = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
request = requests.get(url)
request.raise_for_status()
with open('adult.csv', 'w') as f:
f.write(request.text)
### now the data is available in the file adult.csv.
### read the questions below
# import pandas as pd
# pd.read_csv('adult.csv')
# + [markdown] colab_type="text" id="i2sOaeqiW9op"
# # income for adults from the 1994 census
# This dataset was extracted done by <NAME> from the 1994 Census database.
# source: http://mlr.cs.umass.edu/ml/datasets/Adult
#
# Listing of attributes:
#
# * age: continuous.
# * workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
# * fnlwgt: continuous.
# * education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
# * education-num: continuous.
# * marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
# * occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
# * relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
# * race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
# * sex: Female, Male.
# * capital-gain: continuous.
# * capital-loss: continuous.
# * hours-per-week: continuous.
# * native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
# * income: >50K, <=50K.
#
# + [markdown] colab_type="text" id="2P4hHgdtW9oq"
# ## 1. load the data
# 1. extract the column names from the description and read the csv while supplying the columns names
# - rename columns with a hyphen `-` to use underscores `_` insead. example: `capital-gain --> capital_gain`
# - look at the head()
# 2. look at info, dtype, check for nan values
# 3. what are the value counts of the categorical variables: workclass, education, marital_status, occupation, relationship, race, sex, native_country, income?
# - do you notice the extra space ' ' at the beginning of each value?
# - remove the extra space
# 4. turn 'sex' and 'income' into 0/1 fields
# - replace the categorical 'sex' column with a numerical 'female' column with value 1 for females and 0 for males
# - replace the categorical 'income' column with a numerical 'over50k' column with value 1 for '>50k' and 0 for '<50K'
# 5. use `.describe()` function to get descriptive statistics for most columns
# - make sure that 'sex' and 'over50k' are now numerical fields
# + [markdown] colab_type="text" id="nVpu3sCkW9or"
# ## 2. explore capital gains / losses
# ### capital_gain
# 1. plot the histogram for capital gains
# - verbally describe what you see
# 2. for people who have `capital_gain > 0`
# - plot the histogram for capital gains
# 3. how many people have capital gains over 25000?
# - use `value_counts()` to look at all the values of capital_gain over 25000.
# - what's weird about the data?
# 4. could the people who had capital_gain==25124 be related?
# 5. does capital_gain over 50k mean income is over 50k?
#
# ### capital_loss
# 1. plot the histogram of capital_loss
# 2. for people who have `capital_loss > 0`
# - plot the histogram for capital_loss
# 3. how many people had both `capital_gain>0` and `capital_loss>0` ?
# 4. who can afford to lose money on capital investments?
# - what percent of people overall had over 50K income?
# - what percent of people with 0 capital_loss? with capital_loss>0?
#
# ### combining and binning
# 1. create a new `capital_change` column that equals `capital_gain - capital_loss`
# 2. use the `qcut` function to quantize/bin/cut `capital_change` into a new columns called `capital_change_bin` with 10 bins of equal proportions.
# 1. do not bin `capital_change==0` values as there are too many of them
# 2. to simplify using this column later, use the left side of the interval created as the label
# 3. label rows with `capital_change==0` as having `capital_change_bin=0`
# 4. make sure you have no null values for `capital_change_bin`
# 3. how many people have a non-zero capital_change?
# - lets call this 'has_capital_change'
# - plot 'has_capital_change' over 'over50k'
# - what do you learn from this diagram
# 4. plot `capital_change` by `over50k`
# - what do you learn from this diagram
# 4. plot `over50k` by `capital_change_bin`
# - what can you learn from this diagram?
#
# + [markdown] colab_type="text" id="ubfcz6GUW9ot"
# ## education
# 1. what is the mean education_num by education?
# - sort the education categories by the mean_values. does it make sense
# - check out other descriptive statistics to see if anything falls out of place
# - turn education into a categorical ordered type
# - plot education VS education_num
# - what have we learned?
# 1. plot the distribution for `education`
# 2. plot over50k by education
# - what can we learn?
# 3. plot hours_per_week by education
# 1. what can we learn from this plot?
# 2. now use the hue="over50k" of seaborn to see hours_per_week by education/over50k.
# - learn anything else?
# 4. plot education_num by occupation
# - sort by mean education_num
# 4. plot education_num by workclass
# - sort by mean education_num
# 5. create a crosstab or a pivot_table of education VS occupation.
# - normalize it by the education rows
# (each row X shows the conditional probability of having occupation Y by education level X)
# - create a heatmap that shows which occpupations are most likely for each education level
# - verbally describe what you've learned
# 6. create a crosstab or a pivot_table of education VS workclass.
# - normalize it by the education rows
# (each row X shows the conditional probability of having workclass Y by education level X)
# - create a heatmap that shows which workclass is most likely for each education level
# - verbally describe what you've learned
# - re-run this analysis without the private sector
# 7. plot "race" vs "education_num
# 8. plot "relationship" vs "education_num
# + [markdown] colab_type="text" id="oqjlIaC6W9ou"
# ## occupation / workclass
# 1. how many levels of occupation?
# 2. how many levels of worklass?
# 3. how many combinations? potential? actual?
# 4. plot `over50k` by `occupation`
# - sort by mean `over50k`
# - compare this to `over50k` by `education`. which variable more strongly predicts income?
# - compare this to `education_num` by `occupation`. are the highest paying jobs correlated with highest earning education?
# 5. plot `over50k` by `workclass`
# 6. look at combinations of occupation / workclass
# 1. what are the top combinations in terms of earning over50k (mean)? how many people in that category?
# 2. how many of these combinations have more than 100 people?
# 3. show a heatmap of the mean over50k of occupation-vs-worklass for combinations with more than 100 people.
# center the heatmap at the populations mean over50k for increased effect.
# what conclusions can you draw?
# 7. create a numerical encoding for occupation / workclass pairs
# - create a new column called "occ_class" that combines the string of the occupation and workclass
# - use the library [category_encoders](http://contrib.scikit-learn.org/categorical-encoding/), here's an [intro](https://towardsdatascience.com/smarter-ways-to-encode-categorical-data-for-machine-learning-part-1-of-3-6dca2f71b159) how to do it
# - use the weight of evidence encoder `ce.woe.WOEEncoder` here's an [article](https://towardsdatascience.com/all-about-categorical-variable-encoding-305f3361fd02#targetText=Weight%20of%20Evidence%20Encoding,-Weight%20of%20Evidence&targetText=Weight%20of%20evidence%20(WOE)%20is%20a%20measure%20of%20how%20much,P(Bads)%20%3D%201.) explaining it
# - add the encoded occ_class as a new column called `occ_class_woe` to your dataframe
#
# + [markdown] colab_type="text" id="M4iJoZbqW9ow"
# ## correlations
# 1. which features are most important, which correlate?
# - compute the correction matrix of features with themselves
# 2. draw a clustermap or heatmap of this correlation
# - center the cluster at 0
# - annotate the plot with the correlation values
# 3. look at the strongest correlations and draw some conclusions.
#
# + [markdown] colab_type="text" id="ryvJarvTW9ox"
# ## TODO:
# 1. look at `relationship` and `marriage_status`. how meaningful are they? should we encode them?
# 2. look at `native_country`. how does immigration effect other variables? should we build further categories based on continent or on 1st/2nd/3rd world countries? should we add an `is_immigrant` boolean column?
# 3. we've done the analysis treating each row of the data as a person, when **in fact** each row represents a large group of people, with the variable `fnlwgt` counting how many people are in the group. redo some of the analysis with weighted averages
# 4. look further at age. should we cut this continous variable into age groups like 18-25, 25-40 etc ?
# - combine age/relationship to see if relationship effects can be explained away by age
# 4. `education_num` seems to be a label encoding of `education`. I think some degrees are not properly captured with that encoding, like `assoc-voc`. it would be interesting to to recode it with woe against `over50k` and see if anything changes.
# 5. data quality questions:
# - why are women under-represented in this data
# - why are there no hispanic/latin category in race?
# 6. compare to other interesting analysis:
# - Predicting Earning Potential using the Adult Dataset https://rpubs.com/H_Zhu/235617
# - related notebook on kaggle https://www.kaggle.com/uciml/adult-census-income
#
# + colab={} colab_type="code" id="y_U5vQTFW9oy"
# -
#
# ```{toctree}
# :hidden:
# :titlesonly:
#
#
# solution
# ```
#
| content/_build/jupyter_execute/14_pandas/edm_us_adult_census_income/questions.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.1
# language: julia
# name: julia-1.4
# ---
# # Functionality to add
# - Databunch
# - Learner
# - Fit one cycle
# - Show batch
using Metalhead
using FileIO
using Images
using Serialization
using Random
using Flux
using Flux.Data: DataLoader
using Flux.Optimise: Optimiser, WeightDecay
using Flux: onehotbatch, onecold, logitcrossentropy
using Statistics, Random
import ProgressMeter
using Parameters: @with_kw
import MLDatasets
import DrWatson: savename, struct2dict
import BSON
using CUDAapi
using Plots
import GR
using Images
using CuArrays
using ImageView
gr()
# # ImageList -> Playground
# ## From folder
# +
path = "/media/subhaditya/DATA/COSMO/Datasets/catDog/"
categories = readdir(path)
imageSize = 64
# -
@time test_folder = readdir(joinpath(path,"cat"))
# +
images =zeros((imageSize, imageSize, 3, size(test_folder, 1)));
file_name = joinpath(path, "cat", "9998.jpg")
img = channelview(imresize(load(file_name), (imageSize, imageSize)))
# -
size(permutedims(img,(2,3,1)))
@time images[:,:,:,1] = permutedims(img,(2,3,1))
# +
Threads.nthreads() = 8
function add_path(cat::String)
temp_dir = readdir(joinpath(path,cat));
return [joinpath(path, cat,x) for x in temp_dir],fill(cat,size(temp_dir,1) )
end
# +
total_files = collect(Iterators.flatten([add_path(x)[1] for x in categories]));
total_categories = collect(Iterators.flatten([add_path(x)[2] for x in categories]));
# +
images = zeros((imageSize, imageSize, 3, size(total_files,1)));
@time Threads.@threads for idx in 1:size(total_files,1)
img = channelview(imresize(load(total_files[idx]), (imageSize, imageSize)))
images[:,:,:,idx] = permutedims(img,(2,3,1))
end
# +
size(images,4) == size(total_categories,1)
# -
mb_idxs = Base.Iterators.partition(1:length(X), 64)
# ##
# # ImageList -> Main
#export
using FileIO
using Images
using Serialization
using Random
using CUDAapi
using Plots
import GR
using Images
using CuArrays
using ImageView
using Statistics, Random
import ProgressMeter
using Distributions
using Zygote
gr()
# ## fromFolder
#export
# Helper function to add path to array
function add_path(cat::String)
temp_dir = readdir(joinpath(path,cat));
return [joinpath(path, cat,x) for x in temp_dir],fill(cat,size(temp_dir,1) )
end
# ## Class Distribution
#export
function classDistribution(y)
"""
Function to plot class distribution to see if balanced or not.
"""
labels = unique(y)
cnts = [sum(y .== i) for i in labels]
display(plot(cnts,seriestype = [:bar]))
return cnts,maximum(cnts)
end
#export
path = "/media/subhaditya/DATA/COSMO/Datasets/catDog/"
#export
# Define number of threads
Threads.nthreads() = length(Sys.cpu_info())
collect(1:10)
# +
#export
"""
Function to create an array of images and labels -> when the directory structure is as follows
- main
- category1
- file1...
-category2
- file1...
...
"""
function fromFolder(path::String,imageSize=64::Int64)
@info path, imageSize
categories = readdir(path)
total_files = collect(Iterators.flatten([add_path(x)[1] for x in categories]));
total_categories = collect(Iterators.flatten([add_path(x)[2] for x in categories]));
distrib,max_dis = classDistribution(total_categories)
indices_repeat = indexin(unique(total_categories), total_categories)
# oversample
total_add = max_dis.-distrib # get the differences to oversample
oversample = false;
if sum(total_add)>100
@info "Oversampling"
images = zeros((imageSize, imageSize, 3, size(max_dis*length(unique(total_categories)),1)));
oversample= true;
oversample_index = length(y)- sum(total_add)# keep a track of indices from the back
else
@info "No need to oversample"
images = zeros((imageSize, imageSize, 3, size(total_categories,1)));
oversample= false;
end
Threads.@threads for idx in collect(1:size(total_files,1))
img = channelview(imresize(load(total_files[idx]), (imageSize, imageSize)))
img = convert(Array{Float64},img)
images[:,:,:,idx] = permutedims(img,(2,3,1))
# @info oversample
if oversample==true
if idx in indices_repeat
labelrep = findfirst(x->x==idx,indices_repeat) # index in the repeated list
to_repeat = total_add[labelrep] # no of times to repeat
total_categories = vcat(total_categories, fill(total_categories[indices_repeat[labelrep]],to_repeat))
Threads.@threads for idx2 in collect(oversample_index:to_repeat)
images[:,:,:,idx2] = images[:,:,:,indices_repeat[labelrep] ]
end
end
end
end
@info "Done loading images"
return images, total_categories
end
# -
#export
X,y = fromFolder(path,64);
size(X),size(y)
# ## Splitting
at = 0.7
n = length(y)
idx = shuffle(1:n)
train_idx = view(idx, 1:floor(Int, at*n));
test_idx = view(idx, (floor(Int, at*n)+1):n);
ytrain,ytest = y[train_idx,:], y[test_idx,:]
Xtrain,Xtest = X[:,:,:,train_idx], X[:,:,:,test_idx]
@info length(ytrain),length(ytest)
@info length(Xtrain),length(Xtest)
#export
function splitter(pct_split=0.7::Float16)
"""
Splits into train/test by pct_split%
"""
n = length(y)
idx = shuffle(1:n)
train_idx = view(idx, 1:floor(Int, pct_split*n));
test_idx = view(idx, (floor(Int, pct_split*n)+1):n);
ytrain,ytest = y[train_idx,:], y[test_idx,:]
Xtrain,Xtest = X[:,:,:,train_idx], X[:,:,:,test_idx]
return Xtrain, ytrain, Xtest, ytest
end
#export
Xtrain, ytrain, Xtest, ytest = splitter(.8);
# ## Linear
using Zygote
W = rand(2,5)
b = rand(2)
Dense(x) = W*x.+b
function loss(x,y)
ŷ = Dense(x)
sum((y-ŷ).^2)
end
x,y = rand(5),rand(2)
# loss(x,y)
α = 0.1
x,y = rand(100),rand(2)
W = rand(2,100)
b = rand(2)
W = zeros(2,100)
for a in collect(1:50)
gs = gradient(() -> loss(x, y), Params([W, b]))
W̄ = gs[W]
W.= α.*W̄
ŷ = W*x.+b
@info sum((y-ŷ).^2)
end
# # Initialization
# - Zero Initialization: set all weights to 0
# - Normal Initialization: set all weights to random small numbers
# - Lecun Initialization: normalize variance
# - Xavier Intialization (glorot init)
# - Kaiming Initialization (he init)
# ## Lecun
# It draws samples from a truncated normal distribution centered on 0 with stddev <- sqrt(1 / fan_in) where fan_in is the number of input units in the weight tensor..
using Distributions
#export
lecun_normal(fan_in) = return Distributions.Normal(0, sqrt(1/fan_in))
W = rand(lecun_normal(2), 2, 100)
b = rand(lecun_normal(2), 2)
# ## Xavier Normal
# It draws samples from a truncated normal distribution centered on 0 with stddev = sqrt(2 / (fan_in + fan_out)) where fan_in is the number of input units in the weight tensor and fan_out is the number of output units in the weight tensor.
#export
xavier_normal(fan_in,fan_out) = return Distributions.Normal(0, sqrt(2/(fan_in+fan_out)))
W = rand(xavier_normal(2,100), 2, 100)
b = rand(xavier_normal(2,2), 2)
# # Xavier Uniform
# It draws samples from a uniform distribution within -limit, limit where limit is sqrt(6 / (fan_in + fan_out)) where fan_in is the number of input units in the weight tensor and fan_out is the number of output units in the weight tensor.
#export
function xavier_uniform(fan_in,fan_out)
limit = sqrt(6/(fan_in+fan_out))
return Distributions.Uniform(-limit, limit)
end
W = rand(xavier_uniform(2,100), 2, 100)
b = rand(xavier_uniform(2,2), 2)
# ## He Normal
# It draws samples from a truncated normal distribution centered on 0 with stddev = sqrt(2 / fan_in) where fan_in is the number of input units in the weight tensor.
#export
he_normal(fan_in) = return Distributions.Normal(0, sqrt(2/(fan_in)))
W = rand(he_normal(2), 2, 100)
b = rand(he_normal(2), 2)
# # He Uniform
# It draws samples from a uniform distribution within -limit, limit where limit is sqrt(6 / fan_in) where fan_in is the number of input units in the weight tensor.
#export
function he_uniform(fan_in)
limit = sqrt(6/(fan_in))
return Distributions.Uniform(-limit, limit)
end
W = rand(he_uniform(2), 2, 100)
b = rand(he_uniform(2), 2)
# # Batching
# ## One hot
ytrain
labels = unique(ytrain);
encodedlabels = Dict(labels .=> collect(1:length(labels)))
map(ytrain, encodedlabels)
function onecold(y_enc)
labels = unique(y_enc)
encodedlabels = Dict(labels .=> collect(1:length(labels)))
return map(labels, encodedlabels), encodedlabels
end
onecold(ytrain)
| .ipynb_checkpoints/DataLoader.jl-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="58FMWCpsIop7"
import os
import re
import json
import random
import codecs
from template_config import *
from nltk import word_tokenize
from collections import defaultdict
# + id="GISW_jYeIop-"
MAX_COL_NUM = 20
OPS = ["=", ">", "<", ">=", "<=", "!=", "LIKE"]
nlsql_templates_file = "data/nlsql_templates_context.txt"
spider_data_file = '/home/t-tyu/projects/NL2CodeOverData/data/spider'
sql_components_file = "data/sql_components.json"
wikisql_tables_file = "/home/t-tyu/projects/NL2CodeOverData/data/tabq_datasets/SQLNet/data/train_tok.tables.jsonl"
wikisql_tables_file_dev = "/home/t-tyu/projects/NL2CodeOverData/data/tabq_datasets/SQLNet/data/dev_tok.tables.jsonl"
# + id="mH9AdwDaIop_"
# read NL-SQL templates
templates = []
with open(nlsql_templates_file) as fp:
lines = fp.readlines()
template_one = {}
for line in lines:
if "\n" == line:
templates.append(template_one)
elif "SQL Pattern:" in line:
template_one = {}
sps = line.strip().replace("SQL Pattern: ", "").split("|||")
template_one["questions"] = []
if len(sps) == 1:
template_one["SQL pattern"] = sps[0]
template_one["SQL constraints"] = []
elif len(sps) == 2:
template_one["SQL pattern"] = sps[0]
template_one["SQL constraints"] = [x.strip() for x in sps[1].split("|") if x != " "]
else:
print("\n======Error warning!!!!")
elif "count: " in line:
sql_count = int(line.strip().replace("count: ", ""))
template_one["count"] = sql_count
elif "question: " in line:
sps = line.strip().replace("question: ", "").split("|||")
question = sps[0]
if len(sps) == 2:
q_constraints = [x.strip() for x in sps[1].split("|") if x != " "]
else:
q_constraints = []
template_one["questions"].append((question, q_constraints))
# + id="kQGlK9FBIoqA" outputId="9ef53000-b80b-4bdc-e854-7690c7897210"
all_constraints = []
for tmp in templates:
all_constraints.extend(tmp['SQL constraints'])
for q in tmp['questions']:
all_constraints.extend(q[1])
print(list(set(all_constraints)))
# + id="q-LZzx_fIoqB"
# read SQL component file
with open(sql_components_file) as json_file:
sql_components = json.load(json_file)
# + id="SxlIZ4cqIoqB"
def read_tables(table_path):
table_data = []
print ("Loading data from %s" % (table_path))
with open(table_path) as inf:
for line in inf:
tab = json.loads(line.strip())
table_data.append(tab)
print("table number in wikisql original table.json file: {}".format(len(table_data)))
return table_data
# + id="yyIGC6L9IoqC" outputId="05a81359-e21c-418f-8832-5fc140a5ea1f"
# read WikiSQL tables
wikisql_tables_train = read_tables(wikisql_tables_file)
# wikisql_tables_dev = read_tables(wikisql_tables_file_dev)
# # + wikisql_tables_dev
wikisql_tables = wikisql_tables_train
# + id="H42Np5JkIoqD"
# remove replicated tables
unique_wikisql_tables = []
headers = []
for wt in wikisql_tables:
if wt["header"] not in headers and "page_title" in wt.keys():
headers.append(wt["header"])
unique_wikisql_tables.append(wt)
# + id="C7WehD73IoqD" outputId="a1b7eb53-66a3-4491-eb35-157c4d6f4443"
len(unique_wikisql_tables)
# + id="o5Tu3ATzIoqE"
# # helper code
# unique_wikisql_tables[1000].keys()
# types_all = []
# for tab in unique_wikisql_tables:
# types_all.extend(tab["types"])
# types_all = list(set(types_all))
# print(types_all)
# + id="YCRkdKWEIoqE"
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def check_name(inpStr):
return len(inpStr) > 1 and "-" not in inpStr and not hasNumbers(inpStr)
def gen_name(title, must_have=False):
title_tokens = word_tokenize(title)
qualify_words = []
for w in title_tokens:
if check_name(w):
qualify_words.append(w)
if random.random() < 0.4:
name = " ".join(qualify_words[-2:])
else:
name = " ".join(qualify_words[-1:])
if name != "":
return name
if must_have:
return title_tokens[0]
else:
return name
tables_clean = []
for table in unique_wikisql_tables:
headers = [hd.lower().replace("*", "") for hd in table["header"]]
sec_title = table["section_title"]
page_title = table["page_title"]
caption = table["caption"]
types = table["types"]
table_name = "table"
if sec_title != "":
table_name = gen_name(sec_title)
# print("table_name: ", table_name)
if table_name == "" and caption != "":
table_name = gen_name(caption)
if table_name == "" and page_title != "":
table_name = gen_name(page_title, True)
if table_name == "table":
continue
# only keep values of the first 3 rows
rows = [row for row in table["rows"][:3]]
#if no id in the table columns, add new column "id" or name with p=0.3
# and add row ent and type for new added column id
if random.random() < 0.7:
index_col = "id"
if random.random() < 0.3:
index_col = "name"
if index_col not in headers:
headers = [index_col] + headers
val_add = 1
if index_col == "name":
val_add = "value"
rows = [[val_add] + row for row in rows]
types = ["text"] + types
# add * for each table for join table prediction
headers = ["*"] + headers
rows = [["all"] + row for row in rows]
types = ["text"] + types
# reformat values
values = [[] for _ in range(len(headers))]
for row in rows:
for i, val in enumerate(row):
values[i].append(str(val).lower())
table_name = table_name.lower()
headers[0] = table_name + " " + "*"
tabn_str = "_".join(table_name.split(" "))
headers_type = [tabn_str +" "+ hd + " real" if ty == "real" else tabn_str +" "+ hd for hd, ty in zip(headers, types)]
# print(len(headers), len(types), len(rows[0]))
assert len(headers) == len(types) == len(rows[0])
# print(table_name)
data = {'name': table_name,
'columns_original': headers,
'columns': headers_type,
'values': values,
'column_types': types}
tables_clean.append(data)
# + id="exPJozTWIoqF"
def create_dbs(tables):
random.shuffle(tables)
dbs = []
cur_cols = []
db_one = []
ahd_cols = []
for i, tab in enumerate(tables):
if len(db_one) <= random.choice([0,1]) and len(ahd_cols) < MAX_COL_NUM:
db_one.append(tab)
cur_cols.extend([col+"."+tab["name"] for col in tab["columns"]])
if i+1 < len(tables):
ahd_cols = cur_cols + [col+"."+tables[i+1]["name"] for col in tables[i+1]["columns"]]
else:
break
else:
if len(cur_cols) == len(list(set(cur_cols))) and len(db_one) > 1:
dbs.append(db_one)
db_one = []
cur_cols = []
ahd_cols = []
return dbs
# + id="VJ6BCNB9IoqG"
wikisql_dbs = create_dbs(tables_clean)
# + id="huqSggXpIoqG"
# for db in wikisql_dbs:
# tab_names = []
# col_count = 0
# for tab in db:
# tab_names.append(tab["name"])
# col_count += len(tab["columns"])
# print("----------")
# print("table names: ", tab_names)
# print("column num: ", col_count)
# print("table num: ", len(tab_names))
# + id="ik35uCIiIoqG" outputId="f70a9b45-4a73-4370-fc37-937268233d49"
len(wikisql_dbs)
# + id="YQrIqtakIoqG"
def gen_dbs_spider(datapath, tablepath, db_list):
with open(datapath) as f:
db_data_raw = json.load(f)
db_data = {}
for dd in db_data_raw:
db_data[dd["db_id"]] = {}
values = dd["data"]
for tbn, vals in values.items():
db_data[dd["db_id"]][tbn.lower()] = vals[:3]
dbs = json.load(open(tablepath))
print("dbs num: ", len(dbs))
spider_dbs = {}
for db in dbs:
db_id = db['db_id']
if db_id not in db_list:
continue
# print("\nprocessing db: ", db_id)
spider_dbs[db_id] = []
# skip formula for now
if db_id == "formula_1":
continue
if db_id in db_data.keys():
db_values = db_data[db_id]
else:
print("---------------------skipping db: ", db_id)
continue
#get table column names info
column_types = db['column_types']
table_names_original = [cn.lower() for cn in db['table_names_original']]
table_names = [cn.lower() for cn in db['table_names']]
column_names_original = [[i, x.lower()] for i, x in db['column_names_original']]
primary_keys = db["primary_keys"]
foreign_keys = []
for ks in db["foreign_keys"]:
if ks[0] not in primary_keys:
foreign_keys.append(ks[0])
if ks[1] not in primary_keys:
foreign_keys.append(ks[1])
column_names = []
for idx, ix in enumerate(db['column_names']):
i, x = ix[0], ix[1].lower()
if idx in primary_keys and "id" not in x:
x = x + " id"
elif idx in foreign_keys:
x = x + " refer"
column_names.append([i, x])
column_types = ["text" if i in primary_keys or i in foreign_keys else ct for i, ct in enumerate(column_types)]
info_ziped = list(zip(column_names, column_names_original, column_types))
for i, tabn in enumerate(table_names):
table = {}
table["name"] = tabn
table["columns_original"] = []
table["columns"] = []
table['column_types'] = []
tabng = table_names_original[i]
for coln_, colng_, colty in info_ziped:
cid, coln = coln_
_, colng = colng_
if colty != "text":
colty = "real"
if cid == i:
table["columns_original"].append(colng)
table["columns"].append(coln)
table["column_types"].append(colty)
# add * for each table for join table prediction
table["columns_original"] = [tabng + " *"] + table["columns_original"]
table["columns"] = [tabn + " *"] + table["columns"]
table["column_types"] = ["text"] + table["column_types"]
if tabng in db_values.keys():
rows = db_values[tabng]
try:
col_values = [[] for _ in range(len(table["columns"])-1)]
for row in rows:
for r, val in enumerate(row):
col_values[r].append(str(val).lower())
table["values"] = [len(col_values[0]) * ["all"]] + col_values
except:
print("--------skipping table: ", tabng)
continue
else:
print("--------skipping table: ", tabng)
continue
table["columns"] = [hd + " real" if ty == "real" else hd for hd, ty in zip(table["columns"], table["column_types"])]
tabn_str = "_".join(tabn.split(" "))
table["columns"] = [tabn_str + " " + hd for hd in table["columns"]]
assert len(table["columns"]) == len(table["columns_original"]) == len(table["column_types"]) == len(table["values"])
spider_dbs[db_id].append(table)
spider_dbs = [db for did, db in spider_dbs.items() if len(db) > 1]
return spider_dbs
# + id="eTDpwnC6IoqH" outputId="f834acc6-e8a8-4971-ee69-720d0b21adb2"
#read and reformat spider dbs
datapath = "/home/t-tyu/projects/NL2CodeOverData/data/spider/db_data.json"
tablepath = spider_data_file + "/tables.json"
train_db_ids_file = '/home/t-tyu/projects/NL2CodeOverData/data/spider/train_db_ids.txt'
dev_db_ids_file = '/home/t-tyu/projects/NL2CodeOverData/data/spider/dev_db_ids.txt'
train_database = []
with open(train_db_ids_file) as f:
for line in f:
train_database.append(line.strip())
dev_database = []
with open(dev_db_ids_file) as f:
for line in f:
dev_database.append(line.strip())
spider_train_dbs = gen_dbs_spider(datapath, tablepath, train_database)
spider_dev_dbs = gen_dbs_spider(datapath, tablepath, dev_database)
# + id="M5VQqr_cIoqI" outputId="d9bb7f1d-076d-4dc8-93e8-5914255575e5"
print("wikisql db num: {}\nspider train db num: {}\nspider dev db num: {}".format(
len(wikisql_dbs),
len(spider_train_dbs),
len(spider_dev_dbs)))
# + id="ZTvG4x4WIoqI"
with open("data/qsep_label_map.json", "r") as f:
qsep_label_map = json.load(f)
# + id="0yM5jBPdIoqI"
concat_label_map = defaultdict(list)
# + id="jMkOGtTjIoqI"
STRUCT_KEYWORDS = ["WHERE", "GROUP_BY", "HAVING", "ORDER_BY", "SELECT"]
EXTRA_OPS = ["NOT_IN", "IN", "BETWEEN", "="]
COUNT = "COUNT"
OTHER_KEYWORDS = ["LIMIT"] #AGG, OP, DASC, OR, =
NEST_KEYWORDS = ["EXCEPT", "UNION", "INTERSECT"]
def get_labels(sql_pattern):
sql_tokens = sql_pattern.replace("GROUP BY", "GROUP_BY").replace("ORDER BY", "ORDER_BY").replace("NOT IN", "NOT_IN").split(" ")
columns = {}
cur_nest = ""
cur_struct = ""
cur_len = len(sql_tokens)
select_count = 0
skip = False
for i, tok in enumerate(sql_tokens):
if tok in NEST_KEYWORDS:
if cur_nest == "" or cur_nest == "OP_SEL":
cur_nest = tok
else:
cur_nest = cur_nest + " " + tok
elif tok in STRUCT_KEYWORDS:
cur_struct = tok
if tok == "SELECT":
select_count += 1
if select_count > 1 and cur_nest == "":
cur_nest = "OP_SEL"
elif "COLUMN" in tok or "*" == tok:
if tok not in columns.keys():
columns[tok] = []
# SELECT {COLUMN0}
# SELECT {COLUMN0} , {COLUMN1}
# SELECT {AGG0} ( {COLUMN0} )
# SELECT {COLUMN0} {FROM} WHERE {COLUMN1} {OP} ( SELECT {AGG0} ( {COLUMN1} ) {FROM} ) AND {COLUMN2} {OP0} {VALUE0}
if cur_struct == "SELECT":
if "," == sql_tokens[i-1] or "SELECT" == sql_tokens[i-1]:
columns[tok].append(cur_nest + " " + cur_struct)
elif "(" == sql_tokens[i-1]:
columns[tok].append(cur_nest + " " + cur_struct + " " + sql_tokens[i-2])
else:
print("\nWarning: unexcepted SELECT format")
skip = True
print(sql_pattern)
# WHERE {COLUMN} {OP}
# WHERE {COLUMN2} {OP0}
# WHERE OR {COLUMN2} {OP0}
# WHERE {COLUMN2} BETWEEN
elif cur_struct == "WHERE":
assert "OP" in sql_tokens[i+1] or sql_tokens[i+1] in EXTRA_OPS
last_tok = sql_tokens[i-1]
if "OR" == last_tok or (i+3 < cur_len and "OR" == sql_tokens[i+3]):
columns[tok].append(cur_nest + " " + cur_struct + " OR " + sql_tokens[i+1])
elif "WHERE" == last_tok or "AND" == last_tok:
columns[tok].append(cur_nest + " " + cur_struct + " " + sql_tokens[i+1])
else:
print("\nWarning: unexcepted WHERE format")
skip = True
# GROUP BY {COLUMN0} , {COLUMN0}
elif cur_struct == "GROUP_BY":
columns[tok].append(cur_nest + " " + cur_struct)
# HAVING COUNT ( * ) {OP0}
# HAVING {AGG0} ( {COLUMN2} ) {OP0}
elif cur_struct == "HAVING":
last_tok = sql_tokens[i-1]
if last_tok != "(" and not ("AGG" in sql_tokens[i-2] or COUNT == sql_tokens[i-2]):
print("\nWarning: unexcepted HAVING format")
skip = True
columns[tok].append(cur_nest + " " + cur_struct + " " + sql_tokens[i-2] + " " + sql_tokens[i+2])
# ORDER BY COUNT ( * ) {DASC} LIMIT
# ORDER BY COUNT ( * ) {DASC}
# ORDER BY {COLUMN1} {DASC} LIMIT
# ORDER BY {COLUMN1} LIMIT
# ORDER BY {COLUMN1} , {COLUMN1} {DASC} LIMIT
# ORDER BY {COLUMN1} {DASC} if no DASC then is ASC
elif cur_struct == "ORDER_BY":
last_tok = sql_tokens[i-1]
if last_tok == "(":
dasc_tok = "{DASC}"
limit_tok = ""
if sql_tokens[i+2] != "{DASC}":
dasc_tok = "ASC"
if sql_tokens[i+2] == "LIMIT":
limit_tok = "LIMIT"
elif i+3 < cur_len and sql_tokens[i+3] == "LIMIT":
limit_tok = "LIMIT"
columns[tok].append(cur_nest + " " + cur_struct + " " + sql_tokens[i-2] + " " + dasc_tok + " " + limit_tok)
elif last_tok == "ORDER_BY" or last_tok == ",":
dasc_tok = "ASC"
limit_tok = ""
# small dirty pass
if i+1 < cur_len and sql_tokens[i+1] == "{DASC}":
dasc_tok = "{DASC}"
if i+2 < cur_len and sql_tokens[i+2] == "LIMIT":
limit_tok = "LIMIT"
elif i+1 < cur_len and sql_tokens[i+1] == "LIMIT":
limit_tok = "LIMIT"
columns[tok].append(cur_nest + " " + cur_struct + " " + dasc_tok + " " + limit_tok)
else:
print("\n------------Warning: unexcepted COLUMN label format")
skip = True
column_labels = {}
for col, labels in columns.items():
label_str = " ".join([l.strip() for l in labels])
column_labels[col] = label_str
return column_labels, skip
# + id="i4xGSNwaIoqJ"
def get_sql_slots(sql_pattern):
sql_tokens = sql_pattern.split(" ")
columns = {}
ops = {}
values = {}
aggs = {}
dasc = False
slots = []
val_pros = []
for i, tok in enumerate(sql_tokens):
if "{" in tok and "}" in tok and "FROM" not in tok:
if tok not in slots:
slots.append(tok)
if "AGG" in tok:
if i + 2 < len(sql_tokens) and "(" == sql_tokens[i+1]:
if "COLUMN" in sql_tokens[i+2]:
if sql_tokens[i+2] not in columns.keys():
columns[sql_tokens[i+2]] = ["number"]
else:
columns[sql_tokens[i+2]].append("number")
aggs[tok] = sql_tokens[i+2]
else:
print("\nTemplate Error: AGG format is wrong!!!")
print(sql_pattern)
elif "COLUMN" in tok:
if tok not in columns.keys():
columns[tok] = []
elif "OP" in tok:
if i - 1 >= 0 and "COLUMN" in sql_tokens[i-1]:
ops[tok] = [sql_tokens[i-1]]
if i + 1 < len(sql_tokens) and "VALUE" in sql_tokens[i+1]:
ops[tok].append(sql_tokens[i+1])
val_pros.append(sql_tokens[i+1])
elif i - 2 >= 0 and ")" == sql_tokens[i-1] and ("COLUMN" in sql_tokens[i-2] or "*" == sql_tokens[i-2]):
ops[tok] = [sql_tokens[i-2]]
if i + 1 < len(sql_tokens) and "VALUE" in sql_tokens[i+1]:
ops[tok].append(sql_tokens[i+1])
val_pros.append(sql_tokens[i+1])
else:
print("\nTemplate Error: OP format is wrong!!!")
print(sql_pattern)
elif "VALUE" in tok and tok not in val_pros:
"""
OP} {VALUE0}
LIMIT {VALUE0}
{COLUMN1} BETWEEN {VALUE0} AND {VALUE1}
HAVING COUNT ( * ) {OP1} {VALUE1}
= {VALUE1}
"""
if i - 2 >= 0 and ("BETWEEN" == sql_tokens[i-1] or "AND" == sql_tokens[i-1]):
values[tok] = "number"
if "BETWEEN" == sql_tokens[i-1]:
columns[sql_tokens[i-2]].append("number")
elif i - 1 >= 0 and "LIMIT" == sql_tokens[i-1]:
values[tok] = "integer"
elif i - 1 >= 0 and "=" == sql_tokens[i-1]:
assert "COLUMN" in sql_tokens[i-2]
columns[sql_tokens[i-2]].append(tok)
else:
print("\nTemplate Error: VALUE format is wrong!!!")
print(sql_pattern)
elif "DASC" in tok:
dasc = True
return (list(set(slots)), columns, ops, values, aggs, dasc)
def get_q_slots(question):
q_toks = [x.replace("?", "").replace("!", "").replace(".", "") for x in question.strip().split(" ")]
q_slots = list(set([tok for tok in q_toks if "TABLE" in tok or "SC" in tok or ("{" in tok and "}" in tok)]))
return q_slots
def process_constraints(constraints, columns, slots):
slot_values = {}
for constraint in constraints:
if "P0==" == constraint:
assert "{OP0}" in slots
slot_values["{OP0}"] = "="
elif "P1==" == constraint:
assert "{OP1}" in slots
slot_values["{OP1}"] = "="
elif "P0=P1==" == constraint:
assert "{OP0}" in slots and "{OP1}" in slots
slot_values["{OP0}"] = "="
slot_values["{OP1}"] = "="
elif "P0=P1=P2==" == constraint:
assert "{OP0}" in slots and "{OP1}" in slots and "{OP2}" in slots
slot_values["{OP0}"] = "="
slot_values["{OP1}"] = "="
slot_values["{OP2}"] = "="
elif "P0=>" == constraint:
assert "{OP0}" in slots
slot_values["{OP0}"] = ">"
elif "P0=<" == constraint:
assert "{OP0}" in slots
slot_values["{OP0}"] = "<"
elif "{AGG0}=MIN" == constraint:
assert "{AGG0}" in slots
slot_values["{AGG0}"] = "MIN"
elif "{AGG0}=MAX" == constraint:
assert "{AGG0}" in slots
slot_values["{AGG0}"] = "MAX"
elif "C0-id" == constraint:
assert "{COLUMN0}" in slots and "{COLUMN0}" in columns.keys()
columns["{COLUMN0}"].append("id")
elif "C1-id" == constraint:
assert "{COLUMN1}" in slots and "{COLUMN1}" in columns.keys()
columns["{COLUMN1}"].append("id")
elif "C2-id" == constraint:
assert "{COLUMN2}" in slots and "{COLUMN2}" in columns.keys()
columns["{COLUMN2}"].append("id")
elif "C3-T1" == constraint:
assert "{COLUMN3}" in slots and "{COLUMN3}" in columns.keys()
columns["{COLUMN3}"].append("T1")
elif "T0-T1-JOIN" == constraint or 'T0-T1-NO-JOIN' == constraint:
columns["{COLUMN0}"].append("T0")
if "{COLUMN1}" in columns.keys():
columns["{COLUMN1}"].append("T1")
return (slot_values, columns)
# helper function
def gen_col_info(col_str, columns, columns_inf):
col_conds = columns[col_str]
value_slot = [cc for cc in col_conds if "VALUE" in cc]
col = ""
value_val = None
if "id" in col_conds:
has_id = False
for c, t, v in columns_inf:
if "id" in col or "name" in col:
has_id = True
col, ctype, values = c, t, v
break
if not has_id:
col, ctype, values = columns_inf[0]
elif "number" in col_conds:
for colinfo in columns_inf[1:]:
if colinfo[1] == "real":
col, ctype, values = colinfo
if col == "":
col, ctype, values = random.choice(columns_inf[1:])
if len(value_slot) > 0:
assert len(value_slot) < 3
if len(values) == 0:
values = ["value"]
print("\nWarning: column values are empty!")
if len(value_slot) == 1:
value_val = [(value_slot[0], random.choice(values))]
else:
if len(values) > 2:
value_val = [(value_slot[0], values[0]), (value_slot[1], values[1])]
else:
value_val = [(value_slot[0], values[0]), (value_slot[1], "another value")]
return (col, value_val)
def replace_dict(inp, dicts):
for rep_in, rep_out in dicts.items():
inp = inp.replace(rep_in, str(rep_out))
return inp
def populate_one(db, question, sql_pattern, constraints):
"""
'P0=P1==', 'P0=P1=P2==', 'P0==', 'P1==', 'P0=>', 'P0=<', '{AGG0}=MAX', '{AGG0}=MIN'
'T0-T1-JOIN', 'T0-T1-NO-JOIN',
'C0-id',, 'C2-id', , 'C1-id', 'C3-T1'
"""
slots, columns, ops, vals, aggs, dasc = get_sql_slots(sql_pattern)
slot_values, columns = process_constraints(constraints, columns, slots)
q_slots = get_q_slots(question)
q_slot_values = {}
# 1 process ops - update columns and values constraints
for op, colv in ops.items():
if colv[0] == "*":
if op not in slot_values.keys():
op_val = random.choice([">", "<", ">=", "<=", "="])
slot_values[op] = op_val
if len(colv) == 2:
slot_values[colv[1]] = random.randint(1, 10)
else:
if colv[0] not in columns.keys():
print("\n-----colv[0] not in columns.keys(): ")
print(columns.keys())
print(ops)
assert colv[0] in columns.keys()
if op not in slot_values.keys():
if random.random() < 0.4:
op_val = "="
else:
op_val = random.choice(OPS)
slot_values[op] = op_val
if op_val in [">", "<", ">=", "<="]:
columns[colv[0]].append("number")
if len(colv) == 2:
columns[colv[0]].append(colv[1])
# 2 process columns
random.shuffle(db)
table_0, table_1 = None, None
table_label_0 = ""
table_label_1 = ""
use_table_1 = False
if "{COLUMN0}" in columns.keys() or "{TABLE0}" in q_slots:
table_label_0 = "SELECT"
if len(db) >= 2:
table_0, table_1 = db[:2]
if "{TABLE1}" in q_slots:
table_label_1 = "SELECT"
if "{TABLE0}" in q_slots:
# p<0.5 from T0, T1 AND to SELECT T1 *
# otherwise all from T0 AND to SELECT T1 *
if random.random() < 0.5:
use_table_1 = True
else:
# p<0.7 all from T0
# AND to SELECT T1 *
if random.random() < 0.7:
use_table_1 = True
if "{COLUMN1}" in columns.keys():
table_label_1 = "SELECT"
else:
# p<0.5 from T0, T1 AND to SELECT T1 *
# otherwise all from T0, NOT to SELECT T1 *
if random.random() < 0.5:
use_table_1 = True
if "{COLUMN1}" in columns.keys():
table_label_1 = "SELECT"
else:
print("\nWarning: db has only one table!")
assert len(db) > 1
table_0, table_1 = db[0], db[0]
T0 = table_0["name"]
T1 = table_1["name"]
columns_inf_0 = list(zip(table_0["columns"], table_0["column_types"], table_0["values"]))[1:]
if use_table_1:
columns_inf_1 = list(zip(table_1["columns"], table_1["column_types"], table_1["values"]))[1:]
if "{COLUMN0}" in columns.keys():
col_0, value_0 = gen_col_info("{COLUMN0}", columns, columns_inf_0)
slot_values["{COLUMN0}"] = col_0
if value_0 is not None:
for k, v in value_0:
slot_values[k] = v
if use_table_1:
columns_input = columns_inf_1
columns_all = columns_inf_0 + columns_inf_1
else:
columns_input = columns_inf_0
columns_all = columns_inf_0
if "{COLUMN1}" in columns.keys():
col_1, value_1 = gen_col_info("{COLUMN1}", columns, columns_input)
slot_values["{COLUMN1}"] = col_1
if value_1 is not None:
for k, v in value_1:
slot_values[k] = v
if "{COLUMN2}" in columns.keys():
col_2, value_2 = gen_col_info("{COLUMN2}", columns, columns_input)
slot_values["{COLUMN2}"] = col_2
if value_2 is not None:
for k, v in value_2:
slot_values[k] = v
if "{COLUMN3}" in columns.keys():
col_3, value_3 = gen_col_info("{COLUMN3}", columns, columns_input)
slot_values["{COLUMN3}"] = col_3
if value_3 is not None:
for k, v in value_3:
slot_values[k] = v
# 3 aggs
for agg in aggs.keys():
if agg not in slot_values.keys():
slot_values[agg] = random.choice(["MAX", "MIN", "SUM", "AVG"])
# 4 values
NUM = 1
for val, cond in vals.items():
assert val not in slot_values.keys()
if cond == "integer":
if random.random() < 0.5:
slot_values[val] = 1
else:
NUM = random.randint(2, 10)
slot_values[val] = NUM
else:
slot_values[val] = random.randint(0, 100)
# 5 dasc - true
if dasc == True:
slot_values["{DASC}"] = random.choice(["ASC", "DESC"])
# 6 check if all sql slot values are done
if len(slots) != len(slot_values):
print("\nlen(slots) != len(slot_values)")
print("sql_pattern: ", sql_pattern)
print("slots: ", slots)
print("slot_values: ", slot_values.keys())
assert len(slots) == len(slot_values)
# 7 for the questions slots:
for qs in q_slots:
if qs == "{TABLE0}":
q_slot_values["{TABLE0}"] = T0
elif qs == "{TABLE1}":
q_slot_values["{TABLE1}"] = T1
elif "SC" in qs:
sc = slot_values["{DASC}"]
if "SC" == qs:
q_slot_values[qs] = random.choice(sql_components["SC"][sc])
elif "SC_COL_LIMIT" == qs:
if NUM > 1:
sc = sc + "_NUM"
q_slot_values[qs] = random.choice(sql_components["SC_COL_LIMIT"][sc]).replace("[NUM]", str(NUM))
else:
q_slot_values[qs] = random.choice(sql_components["SC_COL_LIMIT"][sc])
elif "SC_COL_COUNT_LIMIT" in qs:
sc_type = qs.replace("SC_COL_COUNT_LIMIT", "")
if NUM > 1:
sc = sc + "_NUM" + sc_type
q_slot_values[qs] = random.choice(sql_components["SC_COL_COUNT_LIMIT"][sc]).replace("[NUM]", str(NUM))
else:
sc = sc + sc_type
q_slot_values[qs] = random.choice(sql_components["SC_COL_COUNT_LIMIT"][sc])
else:
if "-" not in qs:
print("qs wrong", qs)
assert "-" in qs
if "C1" in qs:
sc_col = slot_values["{COLUMN1}"]
elif "C2" in qs:
sc_col = slot_values["{COLUMN2}"]
q_slot_values[qs] = random.choice(sql_components["SC_COL"][sc]).replace("[COL]", sc_col)
else:
if qs not in slot_values.keys():
print("qs not in sv: ", qs)
print("sql_pattern: ", sql_pattern)
print("slot_values: ", slot_values)
assert qs in slot_values.keys()
if "OP" in qs:
q_slot_values[qs] = random.choice(sql_components["OP"][slot_values[qs]])
elif "AGG" in qs:
q_slot_values[qs] = random.choice(sql_components["AGG"][slot_values[qs]])
elif "COLUMN" in qs:
q_slot_values[qs] = " ".join(slot_values[qs].split(" ")[1:6])
elif "VALUE" in qs:
q_slot_values[qs] = " ".join(str(slot_values[qs]).split(" ")[:5])
else:
print("\nWarning: some q slot type not considered!")
print(qs)
# 8 check if all question slots are processed
assert len(q_slots) == len(q_slot_values)
# 9 generate final SQL-question pair
question_gen = replace_dict(question, q_slot_values)
# 10 generate column labels
slot_values_new = {}
for sl, vl in slot_values.items():
if "COLUMN" in sl:
slot_values_new[sl] = "_=_".join(vl.split(" "))
else:
slot_values_new[sl] = vl
column_labels, skip = get_labels(sql_pattern)
column_lables_real = {}
for col, label in column_labels.items():
if col != "*":
col = slot_values[col]
for slot, value in slot_values.items():
label = label.replace(slot, str(value))
column_lables_real[col] = label
# also add labels for table column *
if table_label_0 != "":
column_lables_real[table_0["columns"][0]] = table_label_0
if table_label_1 != "":
column_lables_real[table_1["columns"][0]] = table_label_1
sql_gen = replace_dict(sql_pattern.replace(" {FROM}", ""), slot_values_new)
return (sql_gen, question_gen, column_lables_real, q_slot_values, slot_values, sql_pattern, columns_all)
# let's start data augmentation!
def augment_db(db, templates, sql_components, aug_limit):
count = 0
augment_pairs = []
while count < aug_limit:
template = random.choice(templates)
sql_constraints = template['SQL constraints']
sql_pattern = template["SQL pattern"]
question, q_constraints = random.choice(template["questions"])
constraints = list(set(sql_constraints + q_constraints))
qsep_label = qsep_label_map[sql_pattern]
sql_gen, question_gen, column_lables, q_slot_values, slot_values, template, columns_all = populate_one(db, question, sql_pattern, constraints)
augment_pairs.append((question_gen, sql_gen, column_lables, q_slot_values, slot_values, template, columns_all, [qsep_label]))
count += 1
return augment_pairs
def augment_all_dbs(dbs, templates, sql_components, aug_limit):
augment_data = {}
schema_dbs = {}
for db in dbs:
db_cols = ["*"]
db_values = [""]
for tab in db:
db_cols.extend(tab["columns"])
db_values.extend([k[0] if len(k) > 0 else "" for k in tab["values"]])
schema_str = " </s> ".join(db_cols)
values_str = " </s> ".join([str(k) for k in db_values])
schema_str = schema_str + " |-| " + values_str
augment_pairs = augment_db(db, templates, sql_components, aug_limit)
augment_data[schema_str] = augment_pairs
schema_dbs[schema_str] = db
return augment_data, schema_dbs
# + id="VqKnRZNCIoqK"
def count_aug(augment_data):
count = 0
for k, ll in augment_data.items():
count += len(ll)
print(count)
# + id="i4o-obgvIoqK"
augment_data_wikisql, schema_dbs_wikisql = augment_all_dbs(wikisql_dbs, templates, sql_components, 5)
# + id="w1rclHJsIoqK" outputId="bc2e4eca-77c7-4878-d730-0a57414816a1"
count_aug(augment_data_wikisql)
# + id="LRlrauNAIoqK" outputId="551f9992-7c4a-4b43-8d39-b015987d66dd"
augment_data_spider_train, schema_dbs_spider_train = augment_all_dbs(spider_train_dbs, templates, sql_components, 150)
# + id="PB6o1B1iIoqK" outputId="57fb2564-d817-45a0-8623-ed07f6b151f5"
count_aug(augment_data_spider_train)
# + id="OMwh1kYIIoqL"
augment_data_spider_dev, schema_dbs_spider_dev = augment_all_dbs(spider_dev_dbs, templates, sql_components, 150)
# + id="5qzKxadQIoqL" outputId="1cdf9be9-77eb-4e07-a3e3-7607e91dd78c"
count_aug(augment_data_spider_dev)
# + id="WAgmCQJ1IoqL"
schema_dbs_all = {**schema_dbs_wikisql, **schema_dbs_spider_train, **schema_dbs_spider_dev}
# + id="APAQo0y4IoqL"
# augment_data_with_dev_wikisql = {**augment_data_spider_dev, **augment_data_spider_train, **augment_data_wikisql}
augment_data_no_dev_wikisql = {**augment_data_spider_train, **augment_data_wikisql}
# augment_data_with_dev_no_wikisql = {**augment_data_spider_dev, **augment_data_spider_train}
# augment_data_no_dev_no_wikisql = augment_data_spider_train
# + id="XfHPuoTeIoqL" outputId="5b12b5f6-064d-4f64-ad76-7a7095ba47cc"
count_aug(augment_data_no_dev_wikisql)
# + [markdown] id="illp4uCOIoqL"
# ### start to generate multi-turn examples
# + id="oUE7iw7dIoqM"
# read context template file
context_templates_file = "data/context_templates.json"
with open(context_templates_file) as json_file:
context_templates = json.load(json_file)
SQL_OPS = ('INTERSECT', 'UNION', 'EXCEPT')
AGG_OPS = ["MAX", "MIN", "SUM", "AVG"]
OPS = [">", "<", ">=", "<=", "=", "!="]
SQLPARSE_MAP = {"\n ": " ", "\n ": " ", "\n ": " ", "\n ": " ", "\n ": " ", "\n ": " ", "\nhaving": " having", "\nlimit": " limit"}
import sqlparse
prev_token = " <PASSWORD>> "
# + id="tooDekxHIoqM"
def col_select(col_conds, columns_inf):
value_slot = [cc for cc in col_conds if "VALUE" in cc]
col = ""
value_val = None
if "id" in col_conds:
has_id = False
for c, t, v in columns_inf:
if "id" in col or "name" in col:
has_id = True
col, ctype, values = c, t, v
break
if not has_id:
col, ctype, value = columns_inf[0]
elif "number" in col_conds:
for colinfo in columns_inf:
if colinfo[1] == "real":
col, ctype, value = colinfo
if len(columns_inf) == 0:
print("\n---------------------------------------- columns_inf: ", columns_inf)
if col == "":
col, ctype, value = random.choice(columns_inf)
if len(value_slot) > 0:
assert len(value_slot) < 3
if len(value_slot) == 1:
value_val = [(value_slot[0], value)]
else:
value_val = [(value_slot[0], value), (value_slot[1], value)]
return (col, value_val)
def replace_words(s, words):
for k, v in words.items():
s = s.replace(k, v)
return s
def edit_sql(sql_pattern, context_label, slot_values_prev, columns_all_prev, context_template):
sql = sql_pattern.lower().replace("{from}", "", 1).strip()
sql_clauses = {"select": "", "where": "", "group_by": "", "order_by": ""}
sql = sqlparse.format(sql, reindent=True)
parsed = [x for x in replace_words(sql, SQLPARSE_MAP).split("\n")]
slot_values = slot_values_prev.copy()
for p in parsed:
p_toks = p.split(" ")
if p_toks[0] == "select":
sql_clauses["select"] = p
elif p_toks[0] == "where":
sql_clauses["where"] = p
elif p_toks[0] == "group":
sql_clauses["group_by"] = p
elif p_toks[0] == "order":
sql_clauses["order_by"] = p
else:
raise Exception("unexcepted sql clause: ", p)
context_question, context_constraints = random.choice(context_template["questions"])
context_q_slots = get_q_slots(context_question)
context_q_slots = [x.replace("1", "10").replace("2", "20").replace("3", "30") for x in context_q_slots]
q_slot_values = {}
sql_pattern_new = ""
context_q = ""
satisfy = True
if context_label == "select replace column":
if sql_clauses["group_by"] != "":
satisfy = False
else:
sql_clauses["select"] = "select " + " , ".join(context_q_slots)
col_num = len(context_q_slots)
for i, qs in enumerate(context_q_slots):
col, _ = col_select([], columns_all_prev)
# if col_num - i <= len(columns_all_prev) and len(columns_all_prev) > 1:
# columns_all_prev = [x for x in columns_all_prev if x[0] != col]
q_slot_values[qs] = " ".join(col.split(" ")[:5])
slot_values[qs] = col
elif context_label == "select insert column":
sql_clauses["select"] = sql_clauses["select"] + " , " + " , ".join(context_q_slots)
col_num = len(context_q_slots)
for i, qs in enumerate(context_q_slots):
col, _ = col_select([], columns_all_prev)
# if col_num - i <= len(columns_all_prev) and len(columns_all_prev) > 1:
# columns_all_prev = [x for x in columns_all_prev if x[0] != col]
q_slot_values[qs] = " ".join(col.split(" ")[:5])
slot_values[qs] = col
elif context_label == "select replace agg":
if 'agg' not in sql_clauses["select"] or sql_clauses["select"].count("agg") > 1:
satisfy = False
else:
assert sql_clauses["select"].count("agg") == 1
for s, v in slot_values.items():
if "AGG" in s:
agg_kw_prev, agg_prev = s, v
break
agg_cur_list = [x for x in AGG_OPS if x != agg_prev]
agg_kw_cur = context_q_slots[0]
sql_clauses["select"] = sql_clauses["select"].replace(agg_kw_prev.lower(), agg_kw_cur)
agg_cur = random.choice(agg_cur_list)
q_slot_values[agg_kw_cur] = random.choice(sql_components["AGG"][agg_cur])
slot_values[agg_kw_cur] = agg_cur
if "COLUMN0" in context_question:
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
elif context_label == "select delete column":
if "agg" in sql_clauses["select"] or sql_clauses["select"].count("column") <= 1 or (sql_clauses["select"].count("column") <= 2 and len(context_q_slots) == 2):
satisfy = False
else:
assert sql_clauses["select"].count("column") > 1
sql_clauses["select"] = "select " + " , ".join(context_q_slots)
for qs in context_q_slots:
if "{COLUMN10}" == qs:
if "{COLUMN12}" in slot_values.keys():
slot_values["{COLUMN10}"] = slot_values["{COLUMN12}"]
elif "{COLUMN11}" in slot_values.keys():
slot_values["{COLUMN10}"] = slot_values["{COLUMN11}"]
elif "{COLUMN1}" in slot_values.keys():
slot_values["{COLUMN10}"] = slot_values["{COLUMN1}"]
q_slot_values[qs] = slot_values["{COLUMN10}"]
else:
q_slot_values[qs] = slot_values[qs]
elif context_label == "select delete agg": # need to check more examples
if "count" not in sql_clauses["select"] or ("*" in sql_clauses["select"] and "column" in sql_clauses["select"]) or (sql_clauses["select"].count("column") == 0 and len(context_q_slots) == 0) or (sql_clauses["select"].count("column0") == 0 and "COLUMN0" in context_question):
satisfy = False
else:
if len(context_q_slots) == 0:
sql_clauses["select"] = "select {COLUMN0}"
elif "{COLUMN0}" in context_q_slots:
q_slot_values[context_q_slots[0]] = slot_values["{COLUMN0}"]
else:
col, _ = col_select([], columns_all_prev)
q_slot_values[context_q_slots[0]] = col
slot_values[context_q_slots[0]] = col
sql_clauses["select"] = "select " + context_q_slots[0]
elif context_label == "select insert agg":
if "agg" in sql_clauses["select"] or "count" in sql_clauses["select"] or sql_clauses["select"].count("column") > 1:
satisfy = False
else:
if "{AGG0}" in context_q_slots:
slot_values["{AGG0}"] = random.choices(["MAX", "MIN", "SUM", "AVG", "COUNT"], weights=(1, 1, 1, 1, 3), k=1)[0]
q_slot_values["{AGG0}"] = random.choice(sql_components["AGG"][slot_values["{AGG0}"]])
else:
slot_values["{AGG0}"] = "COUNT"
if sql_clauses["select"].count("column") == 0:
sql_clauses["select"] = "select {AGG0} (*)"
q_slot_values["{COLUMN0}"] = ""
elif sql_clauses["select"].count("column") == 1:
sql_clauses["select"] = "select {AGG0} ({COLUMN0})"
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
else:
raise Exception("unexcepted select clause: ", sql_clauses["select"])
elif context_label == "select insert agg and column":
if "agg" not in sql_clauses["select"] and sql_clauses["group_by"] == "":
satisfy = False
else:
sql_clauses["select"] = sql_clauses["select"] + " , " + "{agg10} ({column10})"
for i, qs in enumerate(context_q_slots):
if "AGG" in qs:
slot_values[qs] = random.choice(AGG_OPS)
q_slot_values[qs] = random.choice(sql_components["AGG"][slot_values[qs]])
elif "COLUMN" in qs:
col, _ = col_select(["number"], columns_all_prev)
q_slot_values[qs] = " ".join(col.split(" ")[:5])
slot_values[qs] = col
if "agg" in sql_clauses["select"] and "COLUMN" not in context_question:
slot_values["{COLUMN10}"] = slot_values["{COLUMN0}"]
if "agg" not in sql_clauses["select"] and "COLUMN" not in context_question:
satisfy = False
elif context_label == "select replace agg and column":
if sql_clauses["group_by"] == "" and sql_clauses["where"] == "":
satisfy = False
else:
sql_clauses["select"] = "select {agg10} ({column10})"
for i, qs in enumerate(context_q_slots):
if "AGG" in qs:
slot_values[qs] = random.choice(AGG_OPS)
q_slot_values[qs] = random.choice(sql_components["AGG"][slot_values[qs]])
elif "COLUMN" in qs:
col, _ = col_select(["number"], columns_all_prev)
q_slot_values[qs] = " ".join(col.split(" ")[:5])
slot_values[qs] = col
if len(context_q_slots) == 0:
slot_values["{AGG10}"] = "COUNT"
slot_values["{COLUMN10}"] = "*"
elif len(context_q_slots) == 4:
sql_clauses["select"] = "select {agg10} ({column10}) , {agg20} ({column20})"
if sql_clauses["group_by"] != "":
gb_col = sql_clauses["group_by"].split(" ")[2].upper()
assert "COLUMN" in gb_col
sql_clauses["select"] = sql_clauses["select"] + " , " + gb_col
elif context_label == "where insert":
if "agg" in sql_clauses["select"] or "count" in sql_clauses["select"]:
satisfy = False
else:
if sql_clauses["where"] != "":
sql_clauses["where"] = sql_clauses["where"] + " AND " + "{COLUMN10} {OP10} {VALUE10}"
else:
sql_clauses["where"] = "WHERE {COLUMN10} {OP10} {VALUE10}"
if "{OP" in context_question:
op_val = random.choice([">", "<", ">=", "<=", "="])
else:
op_val = "="
slot_values["{OP10}"] = op_val
q_slot_values["{OP10}"] = random.choice(sql_components["OP"][op_val])
if op_val != "=":
col, value = col_select(["number", "VALUE10"], columns_all_prev)
else:
col, value = col_select(["VALUE10"], columns_all_prev)
q_slot_values["{COLUMN10}"] = " ".join(col.split(" ")[:5])
slot_values["{COLUMN10}"] = col
q_slot_values["{VALUE10}"] = " ".join(str(value[0][1]).split(" ")[:5])
slot_values["{VALUE10}"] = value[0][1]
if "COLUMN0" in context_question:
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
elif context_label == "where replace":
if sql_clauses["where"].count("column") != 1:
satisfy = False
else:
sql_clauses["where"] = "WHERE {COLUMN10} {OP10} {VALUE10}"
if "{OP" in context_question:
op_val = random.choice([">", "<", ">=", "<=", "="])
else:
op_val = "="
slot_values["{OP10}"] = op_val
q_slot_values["{OP10}"] = random.choice(sql_components["OP"][op_val])
if op_val != "=":
col, value = col_select(["number", "VALUE10"], columns_all_prev)
else:
col, value = col_select(["VALUE10"], columns_all_prev)
q_slot_values["{COLUMN10}"] = " ".join(col.split(" ")[:5])
slot_values["{COLUMN10}"] = col
q_slot_values["{VALUE10}"] = " ".join(str(value[0][1]).split(" ")[:5])
slot_values["{VALUE10}"] = value[0][1]
if "COLUMN0" in context_question:
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
elif context_label == "where replace value":
if sql_clauses["where"].count("column") != 1 or sql_clauses["group_by"] != "" or sql_clauses["order_by"] != "":
satisfy = False
else:
wh_toks = sql_clauses["where"].split(" ")
for tok in wh_toks:
if "column" in tok:
wh_col = tok
elif "value" in tok:
wh_val = tok
sql_clauses["where"] = sql_clauses["where"].replace(wh_val, "{VALUE10}")
q_slot_values["{COLUMN0}"] = slot_values[wh_col.upper()]
q_slot_values["{VALUE10}"] = " ".join(str(slot_values[wh_val.upper()]).split(" ")[:2]) + " " + q_slot_values["{COLUMN0}"].split(" ")[0] #just to add noisy to fake value
slot_values["{VALUE10}"] = q_slot_values["{VALUE10}"]
elif context_label == "where replace operation":
if sql_clauses["where"].count("column") != 1 or sql_clauses["group_by"] != "" or sql_clauses["order_by"] != "":
satisfy = False
else:
wh_toks = sql_clauses["where"].split(" ")
for tok in wh_toks:
if "column" in tok:
wh_col = tok
elif "value" in tok:
wh_val = tok
elif "op" in tok:
wh_op = tok
sql_clauses["where"] = sql_clauses["where"].replace(wh_op, "{OP10}")
q_slot_values["{VALUE0}"] = " ".join(str(slot_values[wh_val.upper()]).split(" ")[:4])
op_prev = slot_values[wh_op.upper()]
op_cur_list = [x for x in OPS if x != op_prev]
op_val = random.choice(op_cur_list)
slot_values["{OP10}"] = op_val
q_slot_values["{OP10}"] = random.choice(sql_components["OP"][op_val])
elif context_label == "order_by insert":
if "agg" in sql_clauses["select"] or "count" in sql_clauses["select"] or sql_clauses["group_by"] != "" or sql_clauses["order_by"] != "":
satisfy = False
else:
sql_clauses["order_by"] = "order by {column10} {dasc}"
sc = random.choice(["ASC", "DESC"])
slot_values["{DASC}"] = sc
col, _ = col_select([], columns_all_prev)
slot_values["{COLUMN10}"] = col
q_slot_values["SC_COL"] = random.choice(sql_components["SC_COL"][sc]).replace("[COL]", " ".join(col.split(" ")[:5]))
elif context_label == "order_by insert limit":
if "agg" in sql_clauses["select"] or "count" in sql_clauses["select"] or sql_clauses["order_by"] == "" or "limit" in sql_clauses["order_by"]:
satisfy = False
else:
if "{dasc}" in sql_clauses["order_by"]:
sc = slot_values["{DASC}"]
else:
sc = "ASC"
sql_clauses["order_by"] += " limit {value10}"
limit_val = random.choice([1,1,1,2,3,5])
slot_values["{VALUE10}"] = limit_val
if limit_val == 1:
q_slot_values["SC_COL_LIMIT"] = random.choice(sql_components["SC_COL_LIMIT"][sc])
else:
q_slot_values["SC_COL_LIMIT"] = random.choice(sql_components["SC_COL_LIMIT"][sc+"_NUM"]).replace("[NUM]", str(limit_val))
elif context_label == "order_by insert limit | select delete agg and column":
if "count" not in sql_clauses["select"] or sql_clauses["group_by"] == "" or sql_clauses["order_by"] != "":
satisfy = False
else:
sql_clauses["order_by"] = "order by count (*) {dasc} limit 1"
sel_cols = [x for x in sql_clauses["select"].split(" ") if "column" in x]
sql_clauses["select"] = "select " + " , ".join(sel_cols)
sc = random.choice(["ASC", "DESC"])
slot_values["{DASC}"] = sc
q_slot_values["SC_COL_LIMIT"] = random.choice(sql_components["SC_COL_LIMIT"][sc])
if "{COLUMN0}" in slot_values.keys():
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
elif context_label == "order_by insert limit | select replace column":
if sql_clauses["order_by"] != "" or sql_clauses["where"] != "":
satisfy = False
else:
col, _ = col_select([], columns_all_prev)
q_slot_values["{COLUMN10}"] = " ".join(col.split(" ")[:5])
slot_values["{COLUMN10}"] = col
sql_clauses["select"] = "select {column10}"
sc = random.choice(["ASC", "DESC"])
slot_values["{DASC}"] = sc
q_slot_values["SC_COL_LIMIT"] = random.choice(sql_components["SC_COL_LIMIT"][sc])
if "{COLUMN2}" in context_question:
col2, _ = col_select(["number"], columns_all_prev)
q_slot_values["{COLUMN20}"] = " ".join(col2.split(" ")[:5])
slot_values["{COLUMN20}"] = col2
sql_clauses["order_by"] = "order by {column20} {dasc} limit 1"
else:
sql_clauses["order_by"] = "order by count (*) {dasc} limit 1"
elif context_label == "order_by replace sc":
if "agg" in sql_clauses["select"] or "limit" not in sql_clauses["order_by"]:
satisfy = False
else:
if "{dasc}" in sql_clauses["order_by"]:
sc_prev = slot_values["{DASC}"]
else:
sc_prev = "ASC"
sc = "DESC" if sc_prev == "ASC" else "ASC"
slot_values["{DASC}"] = sc
q_slot_values["SC_COL_LIMIT"] = random.choice(sql_components["SC_COL_LIMIT"][sc])
if "{COLUMN0}" in slot_values.keys():
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
elif context_label == "no change":
if "having" not in sql_clauses["group_by"] and "value" not in sql_clauses["order_by"]:
satisfy = False
else:
q_slot_values["{OP0}"] = "top"
q_slot_values["[NUM]"] = random.choice([3,5,10])
if "having" in sql_clauses["group_by"]:
gb_toks = sql_clauses["group_by"].split(" ")
for tok in gb_toks:
if "op" in tok:
gb_op = tok.upper()
q_slot_values["{OP0}"] = random.choice(sql_components["OP"][slot_values[gb_op]])
elif context_label == "group_by insert | select insert agg and column":
if sql_clauses["group_by"] != "" or sql_clauses["order_by"] != "" or sql_clauses["where"].count("column") > 1 or "agg" in sql_clauses["select"] or "count" in sql_clauses["select"] or "column0" not in sql_clauses["select"]:
satisfy = False
else:
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
sql_clauses["group_by"] = "group by {column0}"
if "AGG" not in context_question:
sql_clauses["select"] = sql_clauses["select"] + " , count (*)"
else:
sql_clauses["select"] = sql_clauses["select"] + " , {agg10} ({column10})"
agg_cur = random.choice(AGG_OPS)
slot_values["{AGG10}"] = agg_cur
q_slot_values["{AGG10}"] = random.choice(sql_components["AGG"][agg_cur])
col, _ = col_select(["number"], columns_all_prev)
q_slot_values["{COLUMN10}"] = " ".join(col.split(" ")[:5])
slot_values["{COLUMN10}"] = col
elif context_label == "group_by insert | select replace agg and column":
if sql_clauses["group_by"] != "" or sql_clauses["order_by"] != "" or sql_clauses["where"] != "" or "agg" in sql_clauses["select"] or "count" in sql_clauses["select"] or "column0" not in sql_clauses["select"]:
satisfy = False
else:
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
sql_clauses["group_by"] = "group by {column0}"
if "AGG" not in context_question:
sql_clauses["select"] = "select {column0} , count (*)"
else:
if "COLUMN1" in context_question:
col, _ = col_select([], columns_all_prev)
q_slot_values["{COLUMN10}"] = " ".join(col.split(" ")[:5])
slot_values["{COLUMN10}"] = col
sql_clauses["select"] = "select {column10} , {agg10} ({column20})"
sql_clauses["group_by"] = "group by {column10}"
else:
sql_clauses["select"] = "select {column0} , {agg10} ({column20})"
agg_cur = random.choice(AGG_OPS)
slot_values["{AGG10}"] = agg_cur
q_slot_values["{AGG10}"] = random.choice(sql_components["AGG"][agg_cur])
col, _ = col_select(["number"], columns_all_prev)
q_slot_values["{COLUMN20}"] = " ".join(col.split(" ")[:5])
slot_values["{COLUMN20}"] = col
elif context_label == "group_by insert | order_by insert":
if sql_clauses["group_by"] != "" or sql_clauses["order_by"] != "" or sql_clauses["where"] != "" or "agg" in sql_clauses["select"] or "count" in sql_clauses["select"] or "column0" not in sql_clauses["select"] or sql_clauses["select"].count("column") > 2:
satisfy = False
else:
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
sql_clauses["group_by"] = "group by {column0}"
sql_clauses["order_by"] = "order by count (*) {dasc}"
if "COLUMN1" in context_question:
col, _ = col_select([], columns_all_prev)
q_slot_values["{COLUMN10}"] = " ".join(col.split(" ")[:5])
slot_values["{COLUMN10}"] = col
sql_clauses["group_by"] = "group by {column10}"
sc = random.choice(["ASC", "DESC"])
slot_values["{DASC}"] = sc
q_slot_values["SC"] = random.choice(sql_components["SC"][sc])
elif context_label == "group_by insert having":
if "having" in sql_clauses["group_by"] or sql_clauses["order_by"] != "" or sql_clauses["where"] != "" or "agg" in sql_clauses["select"] or "count" in sql_clauses["select"] or "column0" not in sql_clauses["select"] or sql_clauses["select"].count("column") > 2:
satisfy = False
else:
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
if sql_clauses["group_by"] == "":
sql_clauses["group_by"] = "group by {column0} having count (*) {op0} {value0}"
else:
sql_clauses["group_by"] += " having count (*) {op0} {value0}"
op_val = random.choice(OPS)
slot_values["{OP0}"] = op_val
q_slot_values["{OP0}"] = random.choice(sql_components["OP"][op_val])
value = random.choice([1, 3, 5, 10])
slot_values["{VALUE0}"] = value
q_slot_values["{VALUE0}"] = str(value)
elif context_label == "group_by insert having | select delete agg and column":
if "having" in sql_clauses["group_by"] or sql_clauses["group_by"] == "" or sql_clauses["order_by"] != "" or sql_clauses["where"] != "" or ("agg" not in sql_clauses["select"] and "count" not in sql_clauses["select"]) or "column0" not in sql_clauses["select"]:
satisfy = False
else:
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
sel_toks = sql_clauses["select"].split(" ")
agg_col_toks = []
for tok in sel_toks:
if "select" not in tok and "," not in tok:
if "({column" in tok or "column" not in tok:
agg_col_toks.append(tok)
if ")" in tok:
break
agg_col = " ".join(agg_col_toks)
sql_clauses["select"] = " ".join([x for x in sel_toks if x not in agg_col_toks])
sql_clauses["group_by"] += " having " + agg_col + " {op0} {value0}"
op_val = random.choice(OPS)
slot_values["{OP0}"] = op_val
q_slot_values["{OP0}"] = random.choice(sql_components["OP"][op_val])
value = random.choice([1, 3, 5, 10])
slot_values["{VALUE0}"] = value
q_slot_values["{VALUE0}"] = str(value)
elif context_label == "group_by replace | select replace column":
if sql_clauses["group_by"] == "" or sql_clauses["where"] != "" or "column0" not in sql_clauses["select"] or sql_clauses["select"].count("column") > 2:
satisfy = False
else:
col, _ = col_select([], columns_all_prev)
q_slot_values["{COLUMN10}"] = " ".join(col.split(" ")[:5])
slot_values["{COLUMN10}"] = col
if "{column0}" in sql_clauses["group_by"]:
sql_clauses["select"] = sql_clauses["select"].replace("{column0}", "{column10}")
sql_clauses["group_by"] = sql_clauses["group_by"].replace("{column0}", "{column10}")
elif "{column1}" in sql_clauses["group_by"]:
sql_clauses["select"] = sql_clauses["select"].replace("{column1}", "{column10}")
sql_clauses["group_by"] = sql_clauses["group_by"].replace("{column1}", "{column10}")
else:
satisfy = False
elif context_label == "insert SQL":
if sql_clauses["group_by"] != "" or sql_clauses["order_by"] != "" or sql_clauses["where"].count("value") != 1 or "select" in sql_clauses["where"] or "agg" in sql_clauses["select"] or "count" in sql_clauses["select"] or "column0" not in sql_clauses["select"] or sql_clauses["select"].count("column") > 2:
satisfy = False
else:
wh_toks = sql_clauses["where"].split(" ")
for tok in wh_toks:
if "column" in tok:
wh_col = tok
elif "value" in tok:
wh_val = tok
elif "op" in tok:
wh_op = tok
q_slot_values["{COLUMN0}"] = slot_values[wh_col.upper()]
q_slot_values["{VALUE0}"] = slot_values[wh_val.upper()]
value = random.choice([1, 3, 5, 10])
slot_values["{VALUE10}"] = value
q_slot_values["{VALUE10}"] = str(value)
op_val = random.choice(OPS)
slot_values["{OP10}"] = op_val
q_slot_values["{OP10}"] = random.choice(sql_components["OP"][op_val])
if "{OP1" in context_question:
sql_where = " where {column0} {op10} {value10}"
elif "{VALUE1" in context_question:
sql_where = " where {column0} {op0} {value10}"
else:
sql_where = " where {column0} {op0} {value0}"
if "COLUMN1" in context_question:
col, _ = col_select([], columns_all_prev)
q_slot_values["{COLUMN10}"] = " ".join(col.split(" ")[:5])
slot_values["{COLUMN10}"] = col
sql_clauses["select"] = "select {column10}"
if context_constraints[0] == "intersect":
sql_clauses["group_by"] = "intersect " + sql_clauses["select"] + sql_where
else:
sql_clauses["group_by"] = "except " + sql_clauses["select"] + sql_where
sql_clauses["where"] = ""
elif context_label == "where insert SQL":
if sql_clauses["group_by"] != "" or sql_clauses["order_by"] != "" or sql_clauses["where"].count("column") > 1 or "select" in sql_clauses["where"] or "count" in sql_clauses["select"] or "column0" not in sql_clauses["select"] or sql_clauses["select"].count("column") > 1:
satisfy = False
elif "agg" in sql_clauses["select"] and context_constraints[0] == "op":
col, _ = col_select([], columns_all_prev)
q_slot_values["{COLUMN10}"] = " ".join(col.split(" ")[:5])
slot_values["{COLUMN10}"] = col
sql_clauses["select"] = "select {column10}"
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
op_val = random.choice(OPS)
slot_values["{OP10}"] = op_val
q_slot_values["{OP10}"] = random.choice(sql_components["OP"][op_val])
sql_clauses["where"] = "where {column0} {op10} (" + sql_pattern.lower() + ")"
sql_clauses["where"] = sql_clauses["where"].replace("( ", "(").replace(" )", ")")
elif "agg" not in sql_clauses["select"] and context_constraints[0] != "op":
q_slot_values["{COLUMN0}"] = slot_values["{COLUMN0}"]
if "{TABLE0}" in slot_values.keys():
q_slot_values["{TABLE0}"] = slot_values["{TABLE0}"]
else:
q_slot_values["{TABLE0}"] = ""
if "COLUMN1" in context_question:
col, _ = col_select([], columns_all_prev)
q_slot_values["{COLUMN10}"] = " ".join(col.split(" ")[:5])
slot_values["{COLUMN10}"] = col
sql_clauses["select"] = "select {column10}"
if context_constraints[0] == "not in":
sql_clauses["where"] = "where {column0} not in (" + sql_pattern.lower() + ")"
else:
sql_clauses["where"] = "where {column0} in (" + sql_pattern.lower() + ")"
sql_clauses["where"] = sql_clauses["where"].replace("( ", "(").replace(" )", ")")
else:
satisfy = False
else:
print("\n--------------------Unexcepted context template: ", context_label)
satisfy = False
if satisfy:
# print("parsed prev sql: ", parsed)
# print("slot_values: ", slot_values)
# print("q_slot_values: ", q_slot_values)
sql_str_list = [v for k, v in sql_clauses.items() if v != ""]
sql_str_list.insert(1, "{from}")
sql_pattern_new = " ".join(sql_str_list).upper().replace("(", "( ").replace(")", " )")
# 9 generate final SQL-question pair
q_slot_values = {k.replace("10", "1").replace("20", "2").replace("30", "3"): v for k, v in q_slot_values.items()}
context_q = replace_dict(context_question, q_slot_values)
return sql_pattern_new, slot_values, context_q, satisfy
def add_augment_context(augment_data, context_templates, schema_dbs):
#question_gen, sql_gen, column_lables, q_slot_values, slot_values, template, columns_all
data_new = {}
skip_count = 0
count = 0
augment_iso = augment_data.copy()
for schema_str, exs in augment_iso.items():
count += 1
if count % 10000 == 0:
print("processed: ", count)
data_new[schema_str] = []
for ex in exs:
sql_pattern = ex[5]
columns_all_prev = ex[6].copy()
question_prev = ex[0]
sql_prev = ex[1]
col_labels_prev = ex[2].copy()
q_slot_values_prev = ex[3]
slot_values_prev = ex[4].copy()
context_label_list = ex[7].copy()
if random.random() <= 0.8:
try_num = 0
if "INTERSECT" in sql_pattern or "UNION" in sql_pattern or "EXCEPT" in sql_pattern or len(columns_all_prev) < 1:
continue
while try_num < 3:
context_template = random.choice(context_templates)
context_label = context_template['label']
prereqs = context_template["prereqs"]
edited_sql_pattern, slot_values, context_q, satisfy = edit_sql(sql_pattern, context_label, slot_values_prev, columns_all_prev, context_template)
try_num += 1
if satisfy:
break
if not satisfy:
continue
context_q = context_q + prev_token + question_prev
# print("question: ", context_q)
# print("previous sql pattern: ", sql_pattern)
# print("edited_sql_pattern: ", edited_sql_pattern)
# 10 generate column labels
slot_values_new = {}
for sl, vl in slot_values.items():
if "COLUMN" in sl:
slot_values_new[sl] = "_=_".join(vl.split(" "))
else:
slot_values_new[sl] = vl
column_labels, skip = get_labels(edited_sql_pattern)
if skip:
continue
column_lables_real = {}
for col, label in column_labels.items():
if col != "*":
if col not in slot_values.keys():
print("slot_values_prev: ", slot_values_prev)
print("q_slot_values_prev: ", q_slot_values_prev)
print("sql_pattern: ", sql_pattern)
print("context_label: ", context_label)
print("edited_sql_pattern: ", edited_sql_pattern)
print("slot_values: ", slot_values)
print("column_labels: ", column_labels)
col = slot_values[col]
for slot, value in slot_values.items():
label = label.replace(slot, str(value))
column_lables_real[col] = label
edited_sql = replace_dict(edited_sql_pattern.replace(" {FROM}", ""), slot_values_new)
#(question_gen, sql_gen, column_lables, q_slot_values, slot_values, template, columns_all)
# print("edited_sql: ", edited_sql)
# print("column_lables_real: ", column_lables_real)
# print("")
context_label_int = qsep_label_map[context_label]
context_label_list.insert(0, context_label_int)
data_new[schema_str].append((context_q, edited_sql, column_lables_real, None, slot_values, edited_sql_pattern, columns_all_prev, context_label_list))
else:
db = schema_dbs[schema_str]
template = random.choice(templates)
sql_constraints = template['SQL constraints']
sql_pattern = template["SQL pattern"]
question, q_constraints = random.choice(template["questions"])
constraints = list(set(sql_constraints + q_constraints))
sql_gen, question_gen, column_lables, q_slot_values, slot_values, template, columns_all = populate_one(db, question, sql_pattern, constraints)
context_q = question_gen + prev_token + question_prev
context_label_list.insert(0, 0)
data_new[schema_str].append((context_q, sql_gen, column_lables, q_slot_values, slot_values, template, columns_all, context_label_list))
return data_new
# + id="OtxxLzBrIoqO" outputId="32acbd01-5607-4cc6-933f-7bb1c7511caa"
augment_second_spider_wikisql = add_augment_context(augment_data_no_dev_wikisql, context_templates, schema_dbs_all)
# + id="gOd2X4Q1IoqO"
slot_update_dict = {"10": "11", "20": "21", "30": "31"}
def add_augment_context_second(augment_second_data, context_templates, schema_dbs):
#question_gen, sql_gen, column_lables, q_slot_values, slot_values, template, columns_all, [context_label_int]
data_new = {}
skip_count = 0
count = 0
augment_second_iso = augment_second_data.copy()
for schema_str, exs in augment_second_iso.items():
count += 1
if count % 10000 == 0:
print("processed: ", count)
data_new[schema_str] = []
for ex in exs:
sql_pattern = replace_dict(ex[5], slot_update_dict)
columns_all_prev = ex[6].copy()
question_prev = ex[0]
sql_prev = ex[1]
col_labels_prev = ex[2].copy()
q_slot_values_prev = ex[3]
slot_values_prev = {replace_dict(k, slot_update_dict) : v for k, v in ex[4].items()}.copy()
context_label_list = ex[7].copy()
if random.random() <= 0.8:
try_num = 0
if "INTERSECT" in sql_pattern or "UNION" in sql_pattern or "EXCEPT" in sql_pattern or len(columns_all_prev) < 1:
continue
while try_num < 3:
context_template = random.choice(context_templates)
context_label = context_template['label']
prereqs = context_template["prereqs"]
edited_sql_pattern, slot_values, context_q, satisfy = edit_sql(sql_pattern, context_label, slot_values_prev, columns_all_prev, context_template)
try_num += 1
if satisfy:
break
if not satisfy:
continue
context_q = context_q + prev_token + question_prev
# print("question: ", context_q)
# print("previous sql pattern: ", sql_pattern)
# print("edited_sql_pattern: ", edited_sql_pattern)
# 10 generate column labels
slot_values_new = {}
for sl, vl in slot_values.items():
if "COLUMN" in sl:
slot_values_new[sl] = "_=_".join(vl.split(" "))
else:
slot_values_new[sl] = vl
column_labels, skip = get_labels(edited_sql_pattern)
if skip:
continue
column_lables_real = {}
for col, label in column_labels.items():
if col != "*":
if col not in slot_values.keys():
print("slot_values_prev: ", slot_values_prev)
print("q_slot_values_prev: ", q_slot_values_prev)
print("sql_pattern: ", sql_pattern)
print("context_label: ", context_label)
print("edited_sql_pattern: ", edited_sql_pattern)
print("slot_values: ", slot_values)
print("column_labels: ", column_labels)
col = slot_values[col]
for slot, value in slot_values.items():
label = label.replace(slot, str(value))
column_lables_real[col] = label
edited_sql = replace_dict(edited_sql_pattern.replace(" {FROM}", ""), slot_values_new)
#(question_gen, sql_gen, column_lables, q_slot_values, slot_values, template, columns_all)
# print("edited_sql: ", edited_sql)
# print("column_lables_real: ", column_lables_real)
# print("")
context_label_int = qsep_label_map[context_label]
context_label_list.insert(0, context_label_int)
data_new[schema_str].append((context_q, edited_sql, column_lables_real, None, slot_values, edited_sql_pattern, columns_all_prev, context_label_list))
else:
db = schema_dbs[schema_str]
template = random.choice(templates)
sql_constraints = template['SQL constraints']
sql_pattern = template["SQL pattern"]
question, q_constraints = random.choice(template["questions"])
constraints = list(set(sql_constraints + q_constraints))
sql_gen, question_gen, column_lables, q_slot_values, slot_values, template, columns_all = populate_one(db, question, sql_pattern, constraints)
context_q = question_gen + prev_token + question_prev
context_label_list.insert(0, 0)
data_new[schema_str].append((context_q, sql_gen, column_lables, q_slot_values, slot_values, template, columns_all, context_label_list))
return data_new
# + id="KMz_A9nrIoqP" outputId="fca5d98c-0f75-42b6-d04a-49a8195e4b36"
augment_third_spider_wikisql = add_augment_context_second(augment_second_spider_wikisql, context_templates, schema_dbs_all)
# + id="-ywFWzzoIoqP" outputId="ca9f22da-4382-416d-e7d3-6da089522906"
count_aug(augment_third_spider_wikisql)
# + id="PwlNtTnFIoqP"
slot_update_dict = {"10": "12", "20": "22", "30": "32"}
def add_augment_context_third(augment_third_data, context_templates, schema_dbs):
#question_gen, sql_gen, column_lables, q_slot_values, slot_values, template, columns_all, [context_label_int]
data_new = {}
skip_count = 0
count = 0
augment_third_iso = augment_third_data.copy()
for schema_str, exs in augment_third_iso.items():
count += 1
if count % 10000 == 0:
print("processed: ", count)
data_new[schema_str] = []
for ex in exs:
sql_pattern = replace_dict(ex[5], slot_update_dict)
columns_all_prev = ex[6].copy()
question_prev = ex[0]
sql_prev = ex[1]
col_labels_prev = ex[2].copy()
q_slot_values_prev = ex[3]
slot_values_prev = {replace_dict(k, slot_update_dict) : v for k, v in ex[4].items()}.copy()
context_label_list = ex[7].copy()
if random.random() <= 0.8:
try_num = 0
if "INTERSECT" in sql_pattern or "UNION" in sql_pattern or "EXCEPT" in sql_pattern or len(columns_all_prev) < 1:
continue
while try_num < 3:
context_template = random.choice(context_templates)
context_label = context_template['label']
prereqs = context_template["prereqs"]
edited_sql_pattern, slot_values, context_q, satisfy = edit_sql(sql_pattern, context_label, slot_values_prev, columns_all_prev, context_template)
try_num += 1
if satisfy:
break
if not satisfy:
continue
context_q = context_q + prev_token + question_prev
# print("question: ", context_q)
# print("previous sql pattern: ", sql_pattern)
# print("edited_sql_pattern: ", edited_sql_pattern)
# 10 generate column labels
slot_values_new = {}
for sl, vl in slot_values.items():
if "COLUMN" in sl:
slot_values_new[sl] = "_=_".join(vl.split(" "))
else:
slot_values_new[sl] = vl
column_labels, skip = get_labels(edited_sql_pattern)
if skip:
continue
column_lables_real = {}
for col, label in column_labels.items():
if col != "*":
if col not in slot_values.keys():
print("slot_values_prev: ", slot_values_prev)
print("q_slot_values_prev: ", q_slot_values_prev)
print("sql_pattern: ", sql_pattern)
print("context_label: ", context_label)
print("edited_sql_pattern: ", edited_sql_pattern)
print("slot_values: ", slot_values)
print("column_labels: ", column_labels)
col = slot_values[col]
for slot, value in slot_values.items():
label = label.replace(slot, str(value))
column_lables_real[col] = label
edited_sql = replace_dict(edited_sql_pattern.replace(" {FROM}", ""), slot_values_new)
#(question_gen, sql_gen, column_lables, q_slot_values, slot_values, template, columns_all)
# print("edited_sql: ", edited_sql)
# print("column_lables_real: ", column_lables_real)
# print("")
context_label_int = qsep_label_map[context_label]
context_label_list.insert(0, context_label_int)
data_new[schema_str].append((context_q, edited_sql, column_lables_real, None, slot_values, edited_sql_pattern, columns_all_prev, context_label_list))
else:
db = schema_dbs[schema_str]
template = random.choice(templates)
sql_constraints = template['SQL constraints']
sql_pattern = template["SQL pattern"]
question, q_constraints = random.choice(template["questions"])
constraints = list(set(sql_constraints + q_constraints))
sql_gen, question_gen, column_lables, q_slot_values, slot_values, template, columns_all = populate_one(db, question, sql_pattern, constraints)
context_q = question_gen + prev_token + question_prev
context_label_list.insert(0, 0)
data_new[schema_str].append((context_q, sql_gen, column_lables, q_slot_values, slot_values, template, columns_all, context_label_list))
return data_new
# + id="NaoAx6e-IoqP" outputId="4dafc1bd-52f0-45bc-d3a6-24bbc3745d36"
augment_fourth_spider_wikisql = add_augment_context_third(augment_third_spider_wikisql, context_templates, schema_dbs_all)
# + id="POBWmiASIoqQ"
### process label prints for each column
def get_label_map(data):
label_dict = defaultdict(int)
for schema_str, example_list in data.items():
for example in example_list:
(question, sql, col_labels) = example
for val in col_labels.values():
label_dict[val] += 1
label_list = sorted(label_dict.items(), key=lambda kv: kv[1], reverse=True)
label_map = {}
count = 1
for label, _ in label_list:
label_map[label] = count
count += 1
return label_map
def map_labels(data, label_map, is_dev=False):
data_new = {}
skip_count = 0
count = 0
augment_data = data.copy()
for schema_str, exs in augment_data.items():
count += 1
if count % 100000 == 0:
print("processed: ", count)
data_new[schema_str] = []
for ex in exs:
skip = False
label_dict = ex[2]
label_dict_new = {}
for col, label in label_dict.items():
if label in label_map.keys():
label_dict_new[col] = label_map[label]
else:
skip = True
skip_count += 1
#else just skip
# context_q, edited_sql, column_lables_real, label_dict_int, slot_values, edited_sql_pattern, context_label_list
if not skip:
data_new[schema_str].append((ex[0], ex[1], ex[2], label_dict_new, ex[4], ex[5], ex[7]))
print("skip_count: ", skip_count)
return data_new
# + id="0CCnsGRnIoqQ" outputId="4fa438c2-47fa-49e5-98c4-c78aebd7bb01"
augment_first_spider_wikisql = map_labels(augment_data_no_dev_wikisql, label_map)
augment_second_spider_wikisql = map_labels(augment_second_spider_wikisql, label_map)
augment_third_spider_wikisql = map_labels(augment_third_spider_wikisql, label_map)
augment_fourth_spider_wikisql = map_labels(augment_fourth_spider_wikisql, label_map)
# + id="cWgGjjBpIoqQ" outputId="97deb5a1-1fdb-47d3-9b29-42ba61195244"
augment_context_all_spider_wikisql = defaultdict(list)
for augment_one in [augment_first_spider_wikisql, augment_second_spider_wikisql, augment_third_spider_wikisql, augment_fourth_spider_wikisql]:
for schema, examples in augment_one.items():
augment_context_all_spider_wikisql[schema].extend(examples)
two_count = 0
for schema, examples in augment_context_all_spider_wikisql.items():
for ex in examples:
two_count += 1
print(two_count)
# + id="hH0T78sxIoqQ"
MAX_TOKEN_LEN = 200
def write_final_file(augment_data):
data_json = []
skip_count = 0
line_count = 0
dup_count = 0
pro_count = 0
for schema_str, exs in augment_data.items():
for ex in exs:
line_count += 1
if line_count % 100000 == 0:
print("processed: ", line_count)
question, sql, label_strs, label_ints, sql_slot_values, sql_pattern, context_label_list = ex
col_str, val_str = schema_str.split(" |-| ")
colns = col_str.split(" </s> ")
values = val_str.split(" </s> ")
assert len(colns) == len(values)
cols = []
label_num = len(label_ints)
label_count = 0
for idx, coln in enumerate(colns):
col = {}
col["name"] = coln
col["value"] = values[idx]
if coln != "*":
col["name"] = " ".join(coln.split(" ")[1:])
col["label_int"] = 0
if coln in label_ints.keys():
col["label_int"] = label_ints[coln]
label_count += 1
cols.append(col)
assert label_count >= label_num
if label_count > label_num:
dup_count += 1
# print("\nWARNING: deplicated columns!")
# print("label_ints: ", label_ints)
# print("colns: ", colns)
col_list = []
label_list = []
value_list = []
col_count = 0
for i, col in enumerate(cols):
if col_count > 40 and col["label_int"] == 0:
continue
col_list.append(col["name"])
value_list.append(col["value"])
col_count += 1
label_list.append(int(col["label_int"]))
assert len(col_list) == len(value_list)
assert question.count(prev_token) + 1 == len(context_label_list)
label_str = " ".join([str(k) for k in label_list])
q_col_str = "<s> " + question.lower() + " </s> " + " </s> ".join(col_list).strip() + " </s> "
example_str = q_col_str + " ||| " + label_str + " ||| " + " ".join([str(x) for x in context_label_list])
tokens = tokenizer.tokenize(q_col_str)
if len(tokens) > MAX_TOKEN_LEN:
continue
data_json.append({"question": question.lower(),
"columns": col_list,
"rows": [value_list],
"column_labels": label_list,
"example_str": example_str,
"context_labels": context_label_list
})
pro_count += 1
print("total line: ", line_count)
print("skiped line: ", skip_count)
print("dup line: ", dup_count)
print("pro line: ", pro_count)
return data_json
# + id="8jDiEYNeIoqQ" outputId="4800c622-1000-4783-9674-472856bf4a58"
data_json = write_final_file(augment_context_all_spider_wikisql)
# + id="fNFK7Um4IoqR"
with open('data/augment_spider_wikisql_context.json', 'w') as outfile:
json.dump(data_json, outfile)
# + id="1TFqpYpmIoqR"
import codecs
def write_to_file(sql_data, output_file):
table_file = codecs.open(output_file, "w", "utf-8")
valid_count = 0
num_sql = len(sql_data)
check_point = int(num_sql*0.1)
max_col_num = 0
unique_labels = set()
skip_count = 0
for tn, sql_one in enumerate(sql_data):
if tn % check_point == 0:
print("processed: ", str(round(tn/num_sql, 2)))
example_str = sql_one['example_str']
valid_count += 1
table_file.write(example_str.strip().replace("\n", ""))
#add column names in another new line
table_file.write("\n")
table_file.close()
return valid_count
# + id="upZ51zbNIoqR" outputId="13a64262-85e0-45be-e736-630baa01d439"
write_to_file(data_json, "data/augment_spider_wikisql_context.txt")
# + id="ivuiJ9p3IoqR"
# + id="LOKWnfjgIoqR"
two_count = 0
for schema, examples in augment_fourth_spider_wikisql.items():
if two_count > 100:
break
for ex in examples:
two_count += 1
sql_pattern = ex[5]
columns_all_prev = ex[6]
question_prev = ex[0]
sql_prev = ex[1]
col_labels_prev = ex[2]
q_slot_values_prev = ex[3]
slot_values_prev = ex[4]
context_label_list = ex[7]
print("\nsql_pattern: ", sql_pattern)
print("question: ", question_prev)
print("sql: ", sql_prev)
print("column labels: ", col_labels_prev)
print("slot values: ", slot_values_prev)
print("context_label_list: ", context_label_list)
# + id="BQ7TcdrfIoqR"
| data_synthesis/augment_spider_wikisql_context.ipynb |