code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.073147, "end_time": "2022-03-16T23:56:35.085602", "exception": false, "start_time": "2022-03-16T23:56:35.012455", "status": "completed"} tags=[]
# # High Value Customers Identification (Insiders) #
# **By: <NAME>**
# + [markdown] papermill={"duration": 0.037078, "end_time": "2022-03-16T23:56:35.159179", "exception": false, "start_time": "2022-03-16T23:56:35.122101", "status": "completed"} tags=[]
# # IMPORTS
# + papermill={"duration": 3.743119, "end_time": "2022-03-16T23:56:38.939224", "exception": false, "start_time": "2022-03-16T23:56:35.196105", "status": "completed"} tags=[]
import re
import os
import inflection
import sqlite3
import numpy as np
import pandas as pd
import seaborn as sns
import umap.umap_ as umap
from matplotlib import pyplot as plt
from sklearn import metrics as m
from sklearn import preprocessing as pp
from sklearn import decomposition as dd
from sklearn import ensemble as en
from sklearn import manifold as mn
from sklearn import mixture as mx
from sklearn import cluster as c
from scipy.cluster import hierarchy as hc
from plotly import express as px
from sqlalchemy import create_engine
# + [markdown] papermill={"duration": 0.037111, "end_time": "2022-03-16T23:56:39.014591", "exception": false, "start_time": "2022-03-16T23:56:38.977480", "status": "completed"} tags=[]
# ## Loading Data
# + papermill={"duration": 0.470102, "end_time": "2022-03-16T23:56:39.522367", "exception": false, "start_time": "2022-03-16T23:56:39.052265", "status": "completed"} tags=[]
# load data
df_ecomm_raw = pd.read_csv('/home/marxcerqueira/repos/Kaggle-HighValue-Custormers-Identification/data/raw/Ecommerce.csv',
encoding='iso-8859-1',
low_memory=False)
#drop extra column
df_ecomm_raw = df_ecomm_raw.drop(columns = ['Unnamed: 8'], axis = 1)
# + [markdown] papermill={"duration": 0.039315, "end_time": "2022-03-16T23:56:39.600844", "exception": false, "start_time": "2022-03-16T23:56:39.561529", "status": "completed"} tags=[]
# # DATA DISCRIPTION
# + papermill={"duration": 0.058611, "end_time": "2022-03-16T23:56:39.697649", "exception": false, "start_time": "2022-03-16T23:56:39.639038", "status": "completed"} tags=[]
# Copy dataset
df0 = df_ecomm_raw.copy()
# + [markdown] papermill={"duration": 0.07013, "end_time": "2022-03-16T23:56:39.848141", "exception": false, "start_time": "2022-03-16T23:56:39.778011", "status": "completed"} tags=[]
# ## Rename Columns
# + papermill={"duration": 0.043039, "end_time": "2022-03-16T23:56:39.931880", "exception": false, "start_time": "2022-03-16T23:56:39.888841", "status": "completed"} tags=[]
cols_old = ['InvoiceNo','StockCode','Description','Quantity', 'InvoiceDate','UnitPrice','CustomerID','Country']
snakecase = lambda x: inflection.underscore(x)
col_news = list(map(snakecase, cols_old))
# Rename columns
df0.columns = col_news
# + [markdown] papermill={"duration": 0.03831, "end_time": "2022-03-16T23:56:40.008372", "exception": false, "start_time": "2022-03-16T23:56:39.970062", "status": "completed"} tags=[]
# ## Data Dimension
# + papermill={"duration": 0.047854, "end_time": "2022-03-16T23:56:40.096607", "exception": false, "start_time": "2022-03-16T23:56:40.048753", "status": "completed"} tags=[]
print('Number of rows: {}.'.format(df0.shape[0]))
print('Number of cols: {}.'.format(df0.shape[1]))
# + [markdown] papermill={"duration": 0.038038, "end_time": "2022-03-16T23:56:40.174383", "exception": false, "start_time": "2022-03-16T23:56:40.136345", "status": "completed"} tags=[]
# ## Data Types
# + papermill={"duration": 0.12488, "end_time": "2022-03-16T23:56:40.336887", "exception": false, "start_time": "2022-03-16T23:56:40.212007", "status": "completed"} tags=[]
df0.info()
# + [markdown] papermill={"duration": 0.037381, "end_time": "2022-03-16T23:56:40.412730", "exception": false, "start_time": "2022-03-16T23:56:40.375349", "status": "completed"} tags=[]
# ## Check NA Values
# + papermill={"duration": 0.2031, "end_time": "2022-03-16T23:56:40.653929", "exception": false, "start_time": "2022-03-16T23:56:40.450829", "status": "completed"} tags=[]
missing_count = df0.isnull().sum() # the count of missing values
value_count = df0.isnull().count() # the total values count
missing_percentage = round(missing_count/value_count*100,2) # the percentage of missing values
missing_df = pd.DataFrame({'missing value count': missing_count, 'percentage': missing_percentage})
missing_df
# + papermill={"duration": 0.151856, "end_time": "2022-03-16T23:56:40.844467", "exception": false, "start_time": "2022-03-16T23:56:40.692611", "status": "completed"} tags=[]
barchart = missing_df.plot.bar(y='percentage')
for index, percentage in enumerate( missing_percentage ):
barchart.text( index, percentage, str(percentage)+'%')
# + [markdown] papermill={"duration": 0.040011, "end_time": "2022-03-16T23:56:40.924169", "exception": false, "start_time": "2022-03-16T23:56:40.884158", "status": "completed"} tags=[]
# ## Fillout NA
# + papermill={"duration": 0.078533, "end_time": "2022-03-16T23:56:41.043249", "exception": false, "start_time": "2022-03-16T23:56:40.964716", "status": "completed"} tags=[]
# separate NA's in two different dataframe, one with NAs and other without it
df_missing = df0.loc[df0['customer_id'].isna(), :]
df_not_missing = df0.loc[~df0['customer_id'].isna(), :]
# + papermill={"duration": 0.237777, "end_time": "2022-03-16T23:56:41.321026", "exception": false, "start_time": "2022-03-16T23:56:41.083249", "status": "completed"} tags=[]
# create reference
df_backup = pd.DataFrame( df_missing['invoice_no'].drop_duplicates().copy() )
df_backup['customer_id'] = np.arange( 19000, 19000+len( df_backup ), 1) # Fillout NA stratety: creating customers_id to keep their behavior (25% of the database)
# merge original with reference dataframe
df0 = pd.merge( df0, df_backup, on='invoice_no', how='left' )
# coalesce
df0['customer_id'] = df0['customer_id_x'].combine_first( df0['customer_id_y'] )
# drop extra columns
df0 = df0.drop( columns=['customer_id_x', 'customer_id_y'], axis=1 )
# + papermill={"duration": 0.121891, "end_time": "2022-03-16T23:56:41.482340", "exception": false, "start_time": "2022-03-16T23:56:41.360449", "status": "completed"} tags=[]
df0.isna().sum()
# + [markdown] papermill={"duration": 0.039038, "end_time": "2022-03-16T23:56:41.559782", "exception": false, "start_time": "2022-03-16T23:56:41.520744", "status": "completed"} tags=[]
# ## Change Types
# + papermill={"duration": 0.111125, "end_time": "2022-03-16T23:56:41.709921", "exception": false, "start_time": "2022-03-16T23:56:41.598796", "status": "completed"} tags=[]
# Transforme datatype of variable invoice_date to datetime
df0['invoice_date'] = pd.to_datetime(df0['invoice_date'])
# + papermill={"duration": 0.056542, "end_time": "2022-03-16T23:56:41.809278", "exception": false, "start_time": "2022-03-16T23:56:41.752736", "status": "completed"} tags=[]
df0['customer_id'] = df0['customer_id'].astype('int64')
# + papermill={"duration": 0.046989, "end_time": "2022-03-16T23:56:41.895626", "exception": false, "start_time": "2022-03-16T23:56:41.848637", "status": "completed"} tags=[]
df0.dtypes
# + [markdown] papermill={"duration": 0.042409, "end_time": "2022-03-16T23:56:41.981034", "exception": false, "start_time": "2022-03-16T23:56:41.938625", "status": "completed"} tags=[]
# ## Descriptive Statistics
# + papermill={"duration": 0.114197, "end_time": "2022-03-16T23:56:42.137561", "exception": false, "start_time": "2022-03-16T23:56:42.023364", "status": "completed"} tags=[]
df0.describe().T
# + papermill={"duration": 0.199412, "end_time": "2022-03-16T23:56:42.382123", "exception": false, "start_time": "2022-03-16T23:56:42.182711", "status": "completed"} tags=[]
df0.describe(include = object).T
# + papermill={"duration": 0.059779, "end_time": "2022-03-16T23:56:42.484726", "exception": false, "start_time": "2022-03-16T23:56:42.424947", "status": "completed"} tags=[]
num_attributes = df0.select_dtypes(include = np.number)
cat_attributes = df0.select_dtypes(exclude = [np.number, np.datetime64])
# + [markdown] papermill={"duration": 0.044217, "end_time": "2022-03-16T23:56:42.574085", "exception": false, "start_time": "2022-03-16T23:56:42.529868", "status": "completed"} tags=[]
# ### Numerical Attributes
# + papermill={"duration": 0.113073, "end_time": "2022-03-16T23:56:42.730314", "exception": false, "start_time": "2022-03-16T23:56:42.617241", "status": "completed"} tags=[]
# central tendency - mean, median
ct1 = pd.DataFrame(num_attributes.apply(np.mean)).T
ct2 = pd.DataFrame(num_attributes.apply(np.median)).T
# dispersion - desvio padrão, min, max, range, skew, kurtosis
d1 = pd.DataFrame(num_attributes.apply(np.std)).T
d2 = pd.DataFrame(num_attributes.apply(np.min)).T
d3 = pd.DataFrame(num_attributes.apply(np.max)).T
d4 = pd.DataFrame(num_attributes.apply(lambda x: x.max()-x.min())).T
d5 = pd.DataFrame(num_attributes.apply(lambda x: x.skew())).T
d6 = pd.DataFrame(num_attributes.apply(lambda x: x.kurtosis())).T
#concatenate
m1 = pd.concat([d2,d3,d4,ct1,ct2,d1,d5,d6]).T.reset_index()
m1.columns = ['attributes', 'min', 'max', 'range', 'mean', 'mediana','std', 'skew','kurtosis']
m1
# + [markdown] papermill={"duration": 0.043254, "end_time": "2022-03-16T23:56:42.814406", "exception": false, "start_time": "2022-03-16T23:56:42.771152", "status": "completed"} tags=[]
# ### Categorical Attributes
# + [markdown] papermill={"duration": 0.04294, "end_time": "2022-03-16T23:56:42.901586", "exception": false, "start_time": "2022-03-16T23:56:42.858646", "status": "completed"} tags=[]
# #### Invoice_No
# + papermill={"duration": 0.400891, "end_time": "2022-03-16T23:56:43.344566", "exception": false, "start_time": "2022-03-16T23:56:42.943675", "status": "completed"} tags=[]
# problem: We got letters and numbers in invoice_no
#df1['invoice_no'].astype( int )
# identification:
df_letter_invoices = df0.loc[df0['invoice_no'].apply( lambda x: bool( re.search( '[^0-9]+', x ) ) ), :]
df_letter_invoices.head()
print( 'Total number of invoices: {}'.format( len( df_letter_invoices ) ) )
print( 'Total number of negative quantity: {}'.format( len( df_letter_invoices[ df_letter_invoices['quantity'] < 0 ] ) ) )
# + [markdown] papermill={"duration": 0.041702, "end_time": "2022-03-16T23:56:43.428182", "exception": false, "start_time": "2022-03-16T23:56:43.386480", "status": "completed"} tags=[]
# #### Stock_Code
# + papermill={"duration": 0.363969, "end_time": "2022-03-16T23:56:43.833231", "exception": false, "start_time": "2022-03-16T23:56:43.469262", "status": "completed"} tags=[]
# check stock codes only characters
df0.loc[df0['stock_code'].apply( lambda x: bool( re.search( '^[a-zA-Z]+$', x ) ) ), 'stock_code'].unique()
# Acão:
## 1. Remove stock_code in ['POST', 'D', 'M', 'PADS', 'DOT', 'CRUK']
# + [markdown] papermill={"duration": 0.046696, "end_time": "2022-03-16T23:56:43.923043", "exception": false, "start_time": "2022-03-16T23:56:43.876347", "status": "completed"} tags=[]
# # VARIABLE FILTERING
# + papermill={"duration": 0.060834, "end_time": "2022-03-16T23:56:44.028447", "exception": false, "start_time": "2022-03-16T23:56:43.967613", "status": "completed"} tags=[]
df1 = df0.copy()
# + papermill={"duration": 0.249154, "end_time": "2022-03-16T23:56:44.321040", "exception": false, "start_time": "2022-03-16T23:56:44.071886", "status": "completed"} tags=[]
# === Numerical attributes ====
df1 = df1.loc[df1['unit_price'] >= 0.04, :]
# === Categorical attributes ====
df1 = df1[~df1['stock_code'].isin( ['POST', 'D', 'DOT', 'M', 'S', 'AMAZONFEE', 'm', 'DCGSSBOY',
'DCGSSGIRL', 'PADS', 'B', 'CRUK'] )]
# description
df1 = df1.drop( columns='description', axis=1 )
# country
df1 = df1[~df1['country'].isin( ['European Community', 'Unspecified' ] ) ] #assuming this risk so we can use lat long parameters
# bad customers
df1 = df1[~df1['customer_id'].isin([16446])]
# quantity
df1_returns = df1.loc[df1['quantity'] < 0, :].copy()
df1_purchases = df1.loc[df1['quantity'] >= 0, :].copy()
# + [markdown] papermill={"duration": 0.044373, "end_time": "2022-03-16T23:56:44.408984", "exception": false, "start_time": "2022-03-16T23:56:44.364611", "status": "completed"} tags=[]
# # FEATURE ENGINEERING
# + papermill={"duration": 0.055198, "end_time": "2022-03-16T23:56:44.507496", "exception": false, "start_time": "2022-03-16T23:56:44.452298", "status": "completed"} tags=[]
df2 = df1.copy()
# + [markdown] papermill={"duration": 0.044141, "end_time": "2022-03-16T23:56:44.594213", "exception": false, "start_time": "2022-03-16T23:56:44.550072", "status": "completed"} tags=[]
# ## Feature Creation
# + papermill={"duration": 0.065201, "end_time": "2022-03-16T23:56:44.700913", "exception": false, "start_time": "2022-03-16T23:56:44.635712", "status": "completed"} tags=[]
# data reference
# RFM Model, creating feature for it
df_ref = df2.drop(['invoice_no', 'stock_code',
'quantity', 'invoice_date', 'unit_price',
'country'], axis = 1).drop_duplicates(ignore_index = True).copy()
# + [markdown] papermill={"duration": 0.041244, "end_time": "2022-03-16T23:56:44.784198", "exception": false, "start_time": "2022-03-16T23:56:44.742954", "status": "completed"} tags=[]
# ### Gross Revenue
# + papermill={"duration": 0.070152, "end_time": "2022-03-16T23:56:44.894824", "exception": false, "start_time": "2022-03-16T23:56:44.824672", "status": "completed"} tags=[]
# Gross Revenue ( Faturamento ) quantity * price
df1_purchases.loc[:, 'gross_revenue'] = df1_purchases.loc[:,'quantity'] * df1_purchases.loc[:, 'unit_price']
# Monetary (How much money a customer spends on purchases)
df_monetary = df1_purchases.loc[:, ['customer_id', 'gross_revenue']].groupby( 'customer_id' ).sum().reset_index()
df_ref = pd.merge( df_ref, df_monetary, on='customer_id', how='left' )
df_ref.isna().sum()
# + [markdown] papermill={"duration": 0.041441, "end_time": "2022-03-16T23:56:44.978340", "exception": false, "start_time": "2022-03-16T23:56:44.936899", "status": "completed"} tags=[]
# ### Recency
# + papermill={"duration": 0.064355, "end_time": "2022-03-16T23:56:45.084701", "exception": false, "start_time": "2022-03-16T23:56:45.020346", "status": "completed"} tags=[]
# Recency - Day from last purchase
df_recency = df1_purchases.loc[:, ['customer_id', 'invoice_date']].groupby( 'customer_id' ).max().reset_index()
df_recency['recency_days'] = ( df1['invoice_date'].max() - df_recency['invoice_date'] ).dt.days
df_recency = df_recency[['customer_id', 'recency_days']].copy()
df_ref = pd.merge( df_ref, df_recency, on='customer_id', how='left' )
df_ref.isna().sum()
# + [markdown] papermill={"duration": 0.043025, "end_time": "2022-03-16T23:56:45.170741", "exception": false, "start_time": "2022-03-16T23:56:45.127716", "status": "completed"} tags=[]
# ### Qty Products (different stock codes by customer)
# + papermill={"duration": 0.084992, "end_time": "2022-03-16T23:56:45.301269", "exception": false, "start_time": "2022-03-16T23:56:45.216277", "status": "completed"} tags=[]
# Quantity of unique products purchased (Frequency: qntd of products over time)
# Number of products (different stock codes by customer)
df_freq = (df1_purchases.loc[:, ['customer_id', 'stock_code']].groupby( 'customer_id' ).count()
.reset_index()
.rename( columns={'stock_code': 'qty_products'} ) )
df_ref = pd.merge( df_ref, df_freq, on='customer_id', how='left' )
df_ref.isna().sum()
# + [markdown] papermill={"duration": 0.044073, "end_time": "2022-03-16T23:56:45.389619", "exception": false, "start_time": "2022-03-16T23:56:45.345546", "status": "completed"} tags=[]
# ### Frequency
# + papermill={"duration": 0.808501, "end_time": "2022-03-16T23:56:46.245893", "exception": false, "start_time": "2022-03-16T23:56:45.437392", "status": "completed"} tags=[]
#Frequency Purchase (rate: purchases by day)
df_aux = ( df1_purchases[['customer_id', 'invoice_no', 'invoice_date']].drop_duplicates()
.groupby( 'customer_id')
.agg( max_ = ( 'invoice_date', 'max' ),
min_ = ( 'invoice_date', 'min' ),
days_= ( 'invoice_date', lambda x: ( ( x.max() - x.min() ).days ) + 1 ),
buy_ = ( 'invoice_no', 'count' ) ) ).reset_index()
# Frequency
df_aux['frequency'] = df_aux[['buy_', 'days_']].apply( lambda x: x['buy_'] / x['days_'] if x['days_'] != 0 else 0, axis=1 )
# Merge
df_ref = pd.merge( df_ref, df_aux[['customer_id', 'frequency']], on='customer_id', how='left' )
df_ref.isna().sum()
# + [markdown] papermill={"duration": 0.044387, "end_time": "2022-03-16T23:56:46.335487", "exception": false, "start_time": "2022-03-16T23:56:46.291100", "status": "completed"} tags=[]
# ### Number of Returns
# + papermill={"duration": 0.055092, "end_time": "2022-03-16T23:56:46.432798", "exception": false, "start_time": "2022-03-16T23:56:46.377706", "status": "completed"} tags=[]
#Number of Returns
df_returns = df1_returns[['customer_id', 'quantity']].groupby( 'customer_id' ).sum().reset_index().rename( columns={'quantity':'qty_returns'} )
df_returns['qty_returns'] = df_returns['qty_returns'] * -1
df_ref = pd.merge( df_ref, df_returns, how='left', on='customer_id' )
df_ref.loc[df_ref['qty_returns'].isna(), 'qty_returns'] = 0 #customers with 0 returned items
df_ref.isna().sum()
# + [markdown] papermill={"duration": 0.041617, "end_time": "2022-03-16T23:56:46.516935", "exception": false, "start_time": "2022-03-16T23:56:46.475318", "status": "completed"} tags=[]
# # EXPLORATORY DATA ANALYSIS (EDA)
# + papermill={"duration": 0.049873, "end_time": "2022-03-16T23:56:46.608544", "exception": false, "start_time": "2022-03-16T23:56:46.558671", "status": "completed"} tags=[]
df3 = df_ref.dropna().copy()
df3.isna().sum()
# + [markdown] papermill={"duration": 0.044203, "end_time": "2022-03-16T23:56:46.696784", "exception": false, "start_time": "2022-03-16T23:56:46.652581", "status": "completed"} tags=[]
# ## Space Study
# + papermill={"duration": 0.047596, "end_time": "2022-03-16T23:56:46.787220", "exception": false, "start_time": "2022-03-16T23:56:46.739624", "status": "completed"} tags=[]
# Original dataset
#df33 = df3.drop(columns = ['customer_id'], axis = '').copy()
# dataset with selected columns due feature selection based on its importance
cols_selected = ['customer_id', 'gross_revenue', 'recency_days', 'qty_products', 'frequency', 'qty_returns']
df33 = df3[cols_selected].drop(columns = 'customer_id', axis = 1)
# + papermill={"duration": 0.051158, "end_time": "2022-03-16T23:56:46.882589", "exception": false, "start_time": "2022-03-16T23:56:46.831431", "status": "completed"} tags=[]
df33.head()
# + papermill={"duration": 0.059024, "end_time": "2022-03-16T23:56:46.985821", "exception": false, "start_time": "2022-03-16T23:56:46.926797", "status": "completed"} tags=[]
mm = pp.MinMaxScaler()
df33['gross_revenue'] = mm.fit_transform(df33[['gross_revenue']])
df33['recency_days'] = mm.fit_transform(df33[['recency_days']])
df33['qty_products'] = mm.fit_transform(df33[['qty_products']])
df33['frequency'] = mm.fit_transform(df33[['frequency']])
df33['qty_returns'] = mm.fit_transform(df33[['qty_returns']])
X = df33.copy()
# + papermill={"duration": 0.048969, "end_time": "2022-03-16T23:56:47.078646", "exception": false, "start_time": "2022-03-16T23:56:47.029677", "status": "completed"} tags=[]
X.shape
# + [markdown] papermill={"duration": 0.044126, "end_time": "2022-03-16T23:56:47.167167", "exception": false, "start_time": "2022-03-16T23:56:47.123041", "status": "completed"} tags=[]
# #### PCA
# + papermill={"duration": 0.121096, "end_time": "2022-03-16T23:56:47.335287", "exception": false, "start_time": "2022-03-16T23:56:47.214191", "status": "completed"} tags=[]
pca = dd.PCA( n_components = X.shape[1])
principal_components = pca.fit_transform(X)
# plot explained variables
features = range(pca.n_components_)
plt.bar(features, pca.explained_variance_ratio_, color = 'black') #quais componentes principais com a maior variação de dados
# pca component
df_pca = pd.DataFrame( principal_components )
# + papermill={"duration": 0.154426, "end_time": "2022-03-16T23:56:47.533749", "exception": false, "start_time": "2022-03-16T23:56:47.379323", "status": "completed"} tags=[]
sns.scatterplot(x = 0, y = 1, data = df_pca);
# + [markdown] papermill={"duration": 0.045189, "end_time": "2022-03-16T23:56:47.623609", "exception": false, "start_time": "2022-03-16T23:56:47.578420", "status": "completed"} tags=[]
# #### UMAP
# + papermill={"duration": 14.340643, "end_time": "2022-03-16T23:57:02.008775", "exception": false, "start_time": "2022-03-16T23:56:47.668132", "status": "completed"} tags=[]
reducer = umap.UMAP(random_state = 42)
embedding = reducer.fit_transform(X) #gera o espaço projetado - embedding é a projeção gerada em outro espaço
#embedding
df_umap = pd.DataFrame()
df_umap['embedding_X'] = embedding[:, 0]
df_umap['embedding_y'] = embedding[:, 1]
#plot UMAP - cluster projetado de alta dimencionalidade
sns.scatterplot(x = 'embedding_X', y = 'embedding_y',
data = df_umap);
# + [markdown] papermill={"duration": 0.04506, "end_time": "2022-03-16T23:57:02.102302", "exception": false, "start_time": "2022-03-16T23:57:02.057242", "status": "completed"} tags=[]
# #### t-SNE
# + papermill={"duration": 11.083715, "end_time": "2022-03-16T23:57:13.232449", "exception": false, "start_time": "2022-03-16T23:57:02.148734", "status": "completed"} tags=[]
reducer = mn.TSNE( n_components = 2, n_jobs = -1, random_state = 42)
embedding = reducer.fit_transform(X) #gera o espaço projetado - embedding é a projeção gerada em outro espaço
#embedding
df_tsne = pd.DataFrame()
df_tsne['embedding_X'] = embedding[:, 0]
df_tsne['embedding_y'] = embedding[:, 1]
#plot UMAP - cluster projetado de alta dimencionalidade
sns.scatterplot(x = 'embedding_X', y = 'embedding_y',
data = df_tsne);
# + [markdown] papermill={"duration": 0.047428, "end_time": "2022-03-16T23:57:13.326650", "exception": false, "start_time": "2022-03-16T23:57:13.279222", "status": "completed"} tags=[]
# #### Tree-Based Embedding
# + papermill={"duration": 0.055105, "end_time": "2022-03-16T23:57:13.429388", "exception": false, "start_time": "2022-03-16T23:57:13.374283", "status": "completed"} tags=[]
df3.head()
# + papermill={"duration": 1.037353, "end_time": "2022-03-16T23:57:14.513853", "exception": false, "start_time": "2022-03-16T23:57:13.476500", "status": "completed"} tags=[]
# training dataset
X = df33.drop(columns = ['gross_revenue'], axis = 1) #target variable
y = df33['gross_revenue']
# I could use boruta to select features to build a better embedding space
# model definition
rf_model = en.RandomForestRegressor(n_estimators = 100, random_state = 42)
# model training
rf_model.fit(X,y)
# leaf
df_leaf = pd.DataFrame(rf_model.apply( X ))
# + papermill={"duration": 7.514579, "end_time": "2022-03-16T23:57:22.076730", "exception": false, "start_time": "2022-03-16T23:57:14.562151", "status": "completed"} tags=[]
# using UMAP to reduce the space study from 100 to 2
reducer = umap.UMAP(random_state = 42)
embedding = reducer.fit_transform(df_leaf) #gera o espaço projetado - embedding é a projeção gerada em outro espaço
#embedding
df_tree = pd.DataFrame()
df_tree['embedding_X'] = embedding[:, 0]
df_tree['embedding_y'] = embedding[:, 1]
#plot UMAP - cluster projetado de alta dimencionalidade
sns.scatterplot(x = 'embedding_X', y = 'embedding_y',
data = df_tree);
# + [markdown] papermill={"duration": 0.051896, "end_time": "2022-03-16T23:57:22.179925", "exception": false, "start_time": "2022-03-16T23:57:22.128029", "status": "completed"} tags=[]
# # DATA PREPARATION
# + papermill={"duration": 0.054524, "end_time": "2022-03-16T23:57:22.284886", "exception": false, "start_time": "2022-03-16T23:57:22.230362", "status": "completed"} tags=[]
# Tree-Based Embbeding
df4 = df_tree.copy()
# # UMAP Embbeding
# df4 = df_umap.copy()
# # TSNE Embedding
# df4 = df_tsne.copy()
# + [markdown] papermill={"duration": 0.051051, "end_time": "2022-03-16T23:57:22.385685", "exception": false, "start_time": "2022-03-16T23:57:22.334634", "status": "completed"} tags=[]
# # HYPERPARAMETER FINE-TUNNING
# + papermill={"duration": 0.052864, "end_time": "2022-03-16T23:57:22.487080", "exception": false, "start_time": "2022-03-16T23:57:22.434216", "status": "completed"} tags=[]
X = df4.copy()
# + papermill={"duration": 0.056091, "end_time": "2022-03-16T23:57:22.593140", "exception": false, "start_time": "2022-03-16T23:57:22.537049", "status": "completed"} tags=[]
X.head()
# + papermill={"duration": 0.055715, "end_time": "2022-03-16T23:57:22.698384", "exception": false, "start_time": "2022-03-16T23:57:22.642669", "status": "completed"} tags=[]
clusters = np.arange(2, 31, 1) #silhouette was increasing, so we put more k points
clusters
# + [markdown] papermill={"duration": 0.050041, "end_time": "2022-03-16T23:57:22.799164", "exception": false, "start_time": "2022-03-16T23:57:22.749123", "status": "completed"} tags=[]
# ## K-Means
# + papermill={"duration": 54.455357, "end_time": "2022-03-16T23:58:17.305098", "exception": false, "start_time": "2022-03-16T23:57:22.849741", "status": "completed"} tags=[]
kmeans_sil = []
for k in clusters:
# model definition
kmeans_model = c.KMeans( n_clusters = k, n_init = 100, random_state = 42 )
# model training
kmeans_model.fit(X)
# model predict
labels = kmeans_model.predict(X)
# model performance
sil = m.silhouette_score( X, labels, metric = 'euclidean')
kmeans_sil.append(sil)
# + papermill={"duration": 0.131399, "end_time": "2022-03-16T23:58:17.487479", "exception": false, "start_time": "2022-03-16T23:58:17.356080", "status": "completed"} tags=[]
plt.plot( clusters, kmeans_sil, linestyle = '--', marker = 'o', color = 'b' )
plt.xlabel( 'K' );
plt.ylabel('Silhouette Score');
plt.title('KMeans Silhouette Score per K ');
# + [markdown] papermill={"duration": 0.049528, "end_time": "2022-03-16T23:58:17.587456", "exception": false, "start_time": "2022-03-16T23:58:17.537928", "status": "completed"} tags=[]
# ## GMM
# + papermill={"duration": 685.392078, "end_time": "2022-03-17T00:09:43.029021", "exception": false, "start_time": "2022-03-16T23:58:17.636943", "status": "completed"} tags=[]
gmm_sil = []
for k in clusters:
# model definition
gmm_model = mx.GaussianMixture(n_components = k, n_init = 100, random_state = 42)
# model training
gmm_model.fit(X)
# model prediction
labels = gmm_model.predict(X)
# model performance
sil = m.silhouette_score(X, labels, metric = 'euclidean')
gmm_sil.append(sil)
# + papermill={"duration": 0.125518, "end_time": "2022-03-17T00:09:43.208065", "exception": false, "start_time": "2022-03-17T00:09:43.082547", "status": "completed"} tags=[]
plt.plot(clusters, gmm_sil, linestyle = '--', marker = 'o', color = 'b')
plt.xlabel( 'K' );
plt.ylabel('Silhouette Score');
plt.title('GMM Silhouette Score per K ');
# + [markdown] papermill={"duration": 0.071386, "end_time": "2022-03-17T00:09:43.332121", "exception": false, "start_time": "2022-03-17T00:09:43.260735", "status": "completed"} tags=[]
# ## Hierarchical Clustering
# + papermill={"duration": 0.73939, "end_time": "2022-03-17T00:09:44.124180", "exception": false, "start_time": "2022-03-17T00:09:43.384790", "status": "completed"} tags=[]
# model definition and training
hc_model = hc.linkage(X, 'ward')
# + [markdown] papermill={"duration": 0.052567, "end_time": "2022-03-17T00:09:44.229370", "exception": false, "start_time": "2022-03-17T00:09:44.176803", "status": "completed"} tags=[]
# ### H-Clustering Silhouette Score
# + code_folding=[] papermill={"duration": 30.772963, "end_time": "2022-03-17T00:10:15.054352", "exception": false, "start_time": "2022-03-17T00:09:44.281389", "status": "completed"} tags=[]
hc_sil = []
for k in clusters:
#model definition and training
hc_model = hc.linkage(X, 'ward')
# model predict
labels = hc.fcluster(hc_model, k, criterion = 'maxclust')
# metrics
sil = m.silhouette_score(X, labels, metric = 'euclidean')
hc_sil.append(sil)
# + papermill={"duration": 0.126439, "end_time": "2022-03-17T00:10:15.237681", "exception": false, "start_time": "2022-03-17T00:10:15.111242", "status": "completed"} tags=[]
plt.plot(clusters, hc_sil, linestyle = '--', marker = 'o', color = 'b')
# + [markdown] papermill={"duration": 0.054128, "end_time": "2022-03-17T00:10:15.346594", "exception": false, "start_time": "2022-03-17T00:10:15.292466", "status": "completed"} tags=[]
# ## Results
# + cell_style="split" papermill={"duration": 0.125174, "end_time": "2022-03-17T00:10:15.525905", "exception": false, "start_time": "2022-03-17T00:10:15.400731", "status": "completed"} tags=[]
## Results - Tree Based Embedding
df_results = pd.DataFrame({'KMeans:': kmeans_sil,
'GMM': gmm_sil,
'HC': hc_sil}
).T
df_results.columns = clusters
df_results.style.highlight_max(color = 'lightgreen', axis = 1)
# + cell_style="split" papermill={"duration": 0.076171, "end_time": "2022-03-17T00:10:15.655538", "exception": false, "start_time": "2022-03-17T00:10:15.579367", "status": "completed"} tags=[]
## Results - UMAP Embedding
df_results = pd.DataFrame({'KMeans:': kmeans_sil,
'GMM': gmm_sil,
'HC': hc_sil}
).T
df_results.columns = clusters
df_results.style.highlight_max(color = 'lightgreen', axis = 1)
# + cell_style="split" papermill={"duration": 0.077223, "end_time": "2022-03-17T00:10:15.787103", "exception": false, "start_time": "2022-03-17T00:10:15.709880", "status": "completed"} tags=[]
## Results - TSNE Embedding
df_results = pd.DataFrame({'KMeans:': kmeans_sil,
'GMM': gmm_sil,
'HC': hc_sil}
).T
df_results.columns = clusters
df_results.style.highlight_max(color = 'lightgreen', axis = 1)
# + [markdown] papermill={"duration": 0.056297, "end_time": "2022-03-17T00:10:15.901667", "exception": false, "start_time": "2022-03-17T00:10:15.845370", "status": "completed"} tags=[]
# # MACHINE LEARNING MODEL TRAINING
# + [markdown] papermill={"duration": 0.056092, "end_time": "2022-03-17T00:10:16.015123", "exception": false, "start_time": "2022-03-17T00:10:15.959031", "status": "completed"} tags=[]
# ## K-Means
# + papermill={"duration": 1.701353, "end_time": "2022-03-17T00:10:17.774838", "exception": false, "start_time": "2022-03-17T00:10:16.073485", "status": "completed"} tags=[]
# model definition
k = 8;
kmeans = c.KMeans(init = 'random', n_clusters = k, n_init = 100, max_iter = 300, random_state = 42)
# model training
kmeans.fit(X)
# clustering
labels = kmeans.labels_
# + papermill={"duration": 0.065633, "end_time": "2022-03-17T00:10:17.904369", "exception": false, "start_time": "2022-03-17T00:10:17.838736", "status": "completed"} tags=[]
# # trying with GMM beacuse of its approach in the embedding space
# # k=11 ;
# # model definition
# gmm_model = mx.GaussianMixture(n_components = k,n_init = 10 ,random_state=42)
# # model training
# gmm_model.fit(X)
# # model prediction
# labels = gmm_model.predict(X)
# + [markdown] papermill={"duration": 0.058536, "end_time": "2022-03-17T00:10:18.033389", "exception": false, "start_time": "2022-03-17T00:10:17.974853", "status": "completed"} tags=[]
# ## Cluster Validation
# + papermill={"duration": 0.492535, "end_time": "2022-03-17T00:10:18.583461", "exception": false, "start_time": "2022-03-17T00:10:18.090926", "status": "completed"} tags=[]
# WSS (Within-cluster Sum of Square )
# print('WSS score: {}'.format(kmeans.inertia_))
# SS (Silhouette Score)
print('SS score: {}'.format(m.silhouette_score(X, labels, metric = 'euclidean')))
# + [markdown] papermill={"duration": 0.062093, "end_time": "2022-03-17T00:10:18.705288", "exception": false, "start_time": "2022-03-17T00:10:18.643195", "status": "completed"} tags=[]
# # CLUSTER ANALYSIS
# + papermill={"duration": 0.063151, "end_time": "2022-03-17T00:10:18.828005", "exception": false, "start_time": "2022-03-17T00:10:18.764854", "status": "completed"} tags=[]
df9 = X.copy()
df9['cluster'] = labels
# + [markdown] papermill={"duration": 0.058159, "end_time": "2022-03-17T00:10:18.946452", "exception": false, "start_time": "2022-03-17T00:10:18.888293", "status": "completed"} tags=[]
# ## Visualization Inspection
# + papermill={"duration": 0.432456, "end_time": "2022-03-17T00:10:19.435336", "exception": false, "start_time": "2022-03-17T00:10:19.002880", "status": "completed"} tags=[]
# k = 8 for KMeans
sns.scatterplot(x = 'embedding_X', y = 'embedding_y', hue = 'cluster', data = df9, palette = 'deep')
# + [markdown] papermill={"duration": 0.056165, "end_time": "2022-03-17T00:10:19.547746", "exception": false, "start_time": "2022-03-17T00:10:19.491581", "status": "completed"} tags=[]
# ## Cluster Profile
# + papermill={"duration": 0.076526, "end_time": "2022-03-17T00:10:19.681110", "exception": false, "start_time": "2022-03-17T00:10:19.604584", "status": "completed"} tags=[]
df92 = df3[cols_selected].copy()
df92['cluster'] = labels
df92.head()
# + papermill={"duration": 0.091623, "end_time": "2022-03-17T00:10:19.829540", "exception": false, "start_time": "2022-03-17T00:10:19.737917", "status": "completed"} tags=[]
# Explaining clusters profile based on this averages
# Number of customer
df_cluster = df92[['customer_id', 'cluster']].groupby( 'cluster' ).count().reset_index()
df_cluster['perc_customer'] = 100*( df_cluster['customer_id'] / df_cluster['customer_id'].sum() )
# Avg Gross revenue
df_avg_gross_revenue = df92[['gross_revenue', 'cluster']].groupby( 'cluster' ).mean().reset_index()
df_cluster = pd.merge( df_cluster, df_avg_gross_revenue, how='inner', on='cluster' )
# Avg recency days
df_avg_recency_days = df92[['recency_days', 'cluster']].groupby( 'cluster' ).mean().reset_index()
df_cluster = pd.merge( df_cluster, df_avg_recency_days, how='inner', on='cluster' )
# Avg qty products
df_qty_products = df92[['qty_products', 'cluster']].groupby( 'cluster' ).mean().reset_index()
df_cluster = pd.merge( df_cluster, df_qty_products, how='inner', on='cluster' )
# Frequency
df_frequency = df92[['frequency', 'cluster']].groupby( 'cluster' ).mean().reset_index()
df_cluster = pd.merge( df_cluster, df_frequency, how='inner', on='cluster' )
# Avg qty returns
df_qty_returns = df92[['qty_returns', 'cluster']].groupby( 'cluster' ).mean().reset_index()
df_cluster = pd.merge( df_cluster, df_qty_returns, how='inner', on='cluster' )
df_cluster
# + papermill={"duration": 0.061519, "end_time": "2022-03-17T00:10:19.949457", "exception": false, "start_time": "2022-03-17T00:10:19.887938", "status": "completed"} tags=[]
# during the new EDA we can do a analyse inside each cluster
# + [markdown] papermill={"duration": 0.056474, "end_time": "2022-03-17T00:10:20.063861", "exception": false, "start_time": "2022-03-17T00:10:20.007387", "status": "completed"} tags=[]
# Cluster Insiders (04):
#
# - Number of customers: 551 (9.67% of costumers)
# - Avg Gross Revenue: $10410,00
# - Recency Average: 45 days
# - Avg of Qty Products Purchased: 366 un
# - Purchase Frequency: 0.21 products per day
# + [markdown] papermill={"duration": 0.058235, "end_time": "2022-03-17T00:10:20.179710", "exception": false, "start_time": "2022-03-17T00:10:20.121475", "status": "completed"} tags=[]
# # Exploratory Dada Analysis
# + papermill={"duration": 0.061214, "end_time": "2022-03-17T00:10:20.298990", "exception": false, "start_time": "2022-03-17T00:10:20.237776", "status": "completed"} tags=[]
df10 = df92.copy()
# + papermill={"duration": 0.067878, "end_time": "2022-03-17T00:10:20.424227", "exception": false, "start_time": "2022-03-17T00:10:20.356349", "status": "completed"} tags=[]
df10.head()
# + [markdown] papermill={"duration": 0.064884, "end_time": "2022-03-17T00:10:20.555481", "exception": false, "start_time": "2022-03-17T00:10:20.490597", "status": "completed"} tags=[]
# # MODEL DEPLOYMENT
# + papermill={"duration": 0.070052, "end_time": "2022-03-17T00:10:20.688111", "exception": false, "start_time": "2022-03-17T00:10:20.618059", "status": "completed"} tags=[]
df92.dtypes
# + papermill={"duration": 0.065148, "end_time": "2022-03-17T00:10:20.817124", "exception": false, "start_time": "2022-03-17T00:10:20.751976", "status": "completed"} tags=[]
df92['recency_days'] = df92['recency_days'].astype(int)
df92['qty_products'] = df92['qty_products'].astype(int)
df92['qty_returns'] = df92['qty_returns'].astype(int)
# + papermill={"duration": 0.060789, "end_time": "2022-03-17T00:10:20.935345", "exception": false, "start_time": "2022-03-17T00:10:20.874556", "status": "completed"} tags=[]
# # create database
# conn = sqlite3.connect('insiders_db.sqlite')
# # create table
# query_create_insiders = """
# CREATE TABLE insiders (
# custer_id INTEGER,
# gross_revenue REAL,
# recency_days INTEGER,
# qty_products INTEGER,
# frequency INTEGER,
# qty_returns INTEGER,
# cluster INTEGER
# )
# """
# conn.execute(query_create_insiders)
# conn.commit()
# conn.close()
# + papermill={"duration": 0.081882, "end_time": "2022-03-17T00:10:21.075381", "exception": false, "start_time": "2022-03-17T00:10:20.993499", "status": "completed"} tags=[]
# database connection
conn = create_engine('sqlite:///insiders_db.sqlite')
# # drop table
# query_drop_insiders = """
# DROP TABLE insiders
# """
#create table
query_create_insiders = """
CREATE TABLE insiders (
customer_id INTEGER,
gross_revenue REAL,
recency_days INTEGER,
qty_products INTEGER,
frequency INTEGER,
qty_returns INTEGER,
cluster INTEGER
)
"""
conn.execute(query_create_insiders)
# + papermill={"duration": 0.098343, "end_time": "2022-03-17T00:10:21.230398", "exception": false, "start_time": "2022-03-17T00:10:21.132055", "status": "completed"} tags=[]
# insert into data
df92.to_sql('insiders', con = conn, if_exists = 'append', index = False)
# + papermill={"duration": 0.088775, "end_time": "2022-03-17T00:10:21.378309", "exception": false, "start_time": "2022-03-17T00:10:21.289534", "status": "completed"} tags=[]
# consulting database
# get query
query_collect = """
SELECT * from insiders
"""
df = pd.read_sql_query(query_collect, conn)
df.head()
| reports/c9.0-mc-deploy-2022-03-16T20:56:33.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # pandapower WLS State Estimation
# This is an introduction into the usage of the pandapower state estimation module. It shows how to
# create measurements for a pandapower network and how to perform a state estimation with the weighted least squares (WLS) algorithm.
#
# ## Example Network
#
# We will be using the reference network from the book "Power System State Estimation" by <NAME> and <NAME>.
# It contains 3 buses with connecting lines between buses 1-2, 1-3 and 2-3. 8 measurements of different types enable WLS state estimation.
#
# We first create this network in pandapower.
# +
import pandapower as pp
net = pp.create_empty_network()
b1 = pp.create_bus(net, name="bus 1", vn_kv=1., index=1)
b2 = pp.create_bus(net, name="bus 2", vn_kv=1., index=2)
b3 = pp.create_bus(net, name="bus 3", vn_kv=1., index=3)
pp.create_ext_grid(net, 1) # set the slack bus to bus 1
l1 = pp.create_line_from_parameters(net, 1, 2, 1, r_ohm_per_km=.01, x_ohm_per_km=.03, c_nf_per_km=0., max_i_ka=1)
l2 = pp.create_line_from_parameters(net, 1, 3, 1, r_ohm_per_km=.02, x_ohm_per_km=.05, c_nf_per_km=0., max_i_ka=1)
l3 = pp.create_line_from_parameters(net, 2, 3, 1, r_ohm_per_km=.03, x_ohm_per_km=.08, c_nf_per_km=0., max_i_ka=1)
net
# -
# Now we can add our measurements, which are valid for one point in time.
#
# We add two voltage magnitude measurements on buses 1 / 2 with voltage magnitude of 1.006 pu / 0.968 pu and a standard deviation of 0.004 pu each:
# ## Adding Measurements
#
# Measurements are defined via the pandapower *create_measurement* function.
# The physical properties which can be measured are set with the *type* argument and can be one of the following: "p" for active power, "q" for reactive power, "v" for voltage and "i" for electrical current.
# The element is set with the *element_type* argument, it can be either "bus", "line" or "transformer".
# Power is measured in kW / kVar, voltage in per unit and current in A. Bus power injections are positive if power is generated at the bus and negative if it is consumed.
pp.create_measurement(net, "v", "bus", 1.006, .004, element=b1) # V at bus 1
pp.create_measurement(net, "v", "bus", 0.968, .004, element=b2) # V at bus 2
net.measurement
# We add bus injection measurements on bus 2 with P=-501 kW and Q=-286kVar and standard deviations of 10kVA:
pp.create_measurement(net, "p", "bus", -0.501, 10, element=b2) # P at bus 2
pp.create_measurement(net, "q", "bus", -0.286, 10, element=b2) # Q at bus 2
net.measurement
# Finally, we add line measurements for lines 0 and 1, both placed at the side of bus 1. The bus parameter defines the bus at which the line measurement is positioned, the line argument is the index of the line.
pp.create_measurement(net, "p", "line", 0.888, 8, element=l1, side=b1) # Pline (bus 1 -> bus 2) at bus 1
pp.create_measurement(net, "p", "line", 1.173, 8, element=l2, side=b1) # Pline (bus 1 -> bus 3) at bus 1
pp.create_measurement(net, "q", "line", 0.568, 8, element=l1, side=b1) # Qline (bus 1 -> bus 2) at bus 1
pp.create_measurement(net, "q", "line", 0.663, 8, element=l2, side=b1) # Qline (bus 1 -> bus 3) at bus 1
net.measurement
# ## Performing the State Estimation
#
# The measurements are now set. We have to initialize the starting voltage magnitude and voltage angles for the state estimator. In continous operation, this can be the result of the last state estimation. In our case, we set flat start conditions: 1.0 p.u. for voltage magnitude, 0.0 degree for voltage angles. This is easily done with the parameter "init", which we define as "flat".
# And now run the state estimation. Afterwards, the result will be stored in the table res_bus_est.
# +
from pandapower.estimation import estimate
success = estimate(net, init='flat')
print(success)
# -
# ## Handling of Bad Data
#
# The state estimation class allows additionally the removal of bad data, especially single or non-interacting false measurements. For detecting bad data the Chi-squared distribution is used to identify the presence of them. Afterwards follows the largest normalized residual test that identifys the actual measurements which will be removed at the end.
#
# To test this function we will add a single false measurement to the network (active power flow of line 1 at bus 3):
pp.create_measurement(net, "p", "line", 1.0, 8, element=l1, side=b3) # Pline (bus 1 -> bus 2) at bus 3
net.measurement
# The next step is the call of the largest normalized residual test's wrapper function *remove_bad_data* that handles the removal of the added false measurement, and returns a identication of success of the state estimation. The argument structure of this function is similiar to the *estimate* function (compare above). It only provides further adjustments according to the maximum allowed normalized residual ("rn_max_threshold"), and the probability of false required by the chi-squared test ("chi2_prob_false").
# +
from pandapower.estimation import remove_bad_data
import numpy as np
success_rn_max = remove_bad_data(net, init='flat', rn_max_threshold=3.0)
print(success_rn_max)
# -
# The management of results will be the same like for the *estimate* function (see following section).
# ## Working with Results
#
# We can show the voltage magnitude and angles directly:
net.res_bus_est.vm_pu
net.res_bus_est.va_degree
# The results match exactly with the results from the book: Voltages 0.9996, 0.9742, 0.9439; Voltage angles 0.0, -1.2475, -2.7457). Nice!
# Let's look at the bus power injections, which are available in res_bus_est as well
net.res_bus_est.p_mw
net.res_bus_est.q_mvar
# We can also compare the resulting line power flows with the measurements.
net.res_line_est.p_from_mw
net.res_line_est.q_from_mvar
# Again, this values do match the estimated values from our reference book.
# This concludes the small tutorial how to perform state estimation with a pandapower network.
| tutorials/state_estimation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="GFmthnDoq6j6"
# #Taylor Series Expansion with Python from Data Science Fabric
#
# recovered from **[Data Science Fabric](https://dsfabric.org/taylor-series-expansion-with-python)**
# + [markdown] id="CqznuxbxCo1T"
# Note: for ease and organization, titles were placed on the notebook for quick reading
# + [markdown] id="1AMg8SM_Cmhv"
# ##Libraries
# + id="h7GY4uApqKr7"
from sympy import series, Symbol
from sympy.functions import sin, cos, exp
from sympy.plotting import plot
import matplotlib.pyplot as plt
# + id="CxmWO41N-VW1"
from sympy.functions import ln
# + id="Nnpm4V-AqdMs"
# Define symbol
x = Symbol('x')
# + id="_bF3LYe8qmNp"
# Function for Taylor Series Expansion
def taylor(function, x0, n):
"""
Parameter "function" is our function which we want to approximate
"x0" is the point where to approximate
"n" is the order of approximation
"""
return function.series(x,x0,n).removeO()
# + [markdown] id="Klht6xkwChoF"
# ##First's Cases of Use
# + id="RHDTuup3q3r0" colab={"base_uri": "https://localhost:8080/"} outputId="4eeed4dd-f16d-435d-d216-b0d1f181692e"
print('sin(x) ≅', taylor(sin(x), 0, 4))
print('cos(x) ≅', taylor(cos(x), 0, 4))
print('e(x) ≅', taylor(exp(x), 0, 4))
# + colab={"base_uri": "https://localhost:8080/"} id="Q1lna5FZ-ZgT" outputId="8a397306-10ae-410e-c13a-f4340c68906a"
print("Ejercicio")
print('ln(x+1) ≅', taylor(ln(x+1), 0, 4))
# + id="WVlow_HJs9Uj" colab={"base_uri": "https://localhost:8080/"} outputId="a452f755-8658-401e-e5a4-ea8ac0d3f203"
print('sin(1) =', taylor(sin(x), 0, 4).subs(x,1))
print('cos(1) =', taylor(cos(x), 0, 4).subs(x,1))
print('e(1) =', taylor(exp(x), 0, 4).subs(x,1))
# + colab={"base_uri": "https://localhost:8080/"} id="rUkClcyg-yQy" outputId="4f0f0438-6c9b-4a3a-ff59-e967d4090173"
print("Ejercicio")
print('ln((1)+1) =', taylor(ln(x+1), 0, 4).subs(x,1))
# + [markdown] id="NJGQ60AHCcWJ"
# ##Tests of Taylor's Series
# + id="U6mI5cVErKKb" colab={"base_uri": "https://localhost:8080/"} outputId="54991e80-108a-42e1-c149-6a6ead226632"
print('Taylor 0 exp(x) ≅', taylor(exp(x), 0, 0))
print('Taylor 1 exp(x) ≅', taylor(exp(x), 0, 1))
print('Taylor 2 exp(x) ≅', taylor(exp(x), 0, 2))
print('Taylor 3 exp(x) ≅', taylor(exp(x), 0, 3))
print('Taylor 4 exp(x) ≅', taylor(exp(x), 0, 4))
print('Taylor 5 exp(x) ≅', taylor(exp(x), 0, 5))
print('Taylor 6 exp(x) ≅', taylor(exp(x), 0, 6))
print('Taylor 7 exp(x) ≅', taylor(exp(x), 0, 7))
print('Taylor 8 exp(x) ≅', taylor(exp(x), 0, 8))
# + colab={"base_uri": "https://localhost:8080/"} id="NZ-SQaG9_LUU" outputId="702b4e70-3e9d-4825-f161-1aa6004a38a3"
print("Ejercicio")
for i in range(1,10):
print('Taylor', i,'ln(x+1) ≅', taylor(ln(x+1), 0, i))
# + colab={"base_uri": "https://localhost:8080/"} id="CsqUwehb_110" outputId="2ef7dac4-e487-41c4-adda-92d7beb19bf1"
print("Ejercicio")
for i in range(1,10):
print('Taylor', i,'sin(x) ≅', taylor(sin(x), 0, i))
# + id="ZvqiDRLOtAFa" colab={"base_uri": "https://localhost:8080/"} outputId="57777e10-9ac8-4865-de83-1e3a1f66e922"
print('Taylor 0 sin(x) ≅', taylor(sin(x), 0, 0).subs(x,2),' = ',taylor(sin(x), 0, 0).subs(x,2).evalf())
print('Taylor 1 cos(x) ≅', taylor(cos(x), 0, 1).subs(x,2),' = ',taylor(cos(x), 0, 1).subs(x,2).evalf())
print('Taylor 2 exp(x) ≅', taylor(exp(x), 0, 2).subs(x,2),' = ',taylor(exp(x), 0, 2).subs(x,2).evalf())
print('Taylor 3 exp(x) ≅', taylor(exp(x), 0, 3).subs(x,2),' = ',taylor(exp(x), 0, 3).subs(x,2).evalf())
print('Taylor 4 exp(x) ≅', taylor(exp(x), 0, 4).subs(x,2),' = ',taylor(exp(x), 0, 4).subs(x,2).evalf())
print('Taylor 5 exp(x) ≅', taylor(exp(x), 0, 5).subs(x,2),' = ',taylor(exp(x), 0, 5).subs(x,2).evalf())
print('Taylor 6 exp(x) ≅', taylor(exp(x), 0, 6).subs(x,2),' = ',taylor(exp(x), 0, 6).subs(x,2).evalf())
print('Taylor 7 exp(x) ≅', taylor(exp(x), 0, 8).subs(x,2),' = ',taylor(exp(x), 0, 7).subs(x,2).evalf())
# + colab={"base_uri": "https://localhost:8080/"} id="DD9hyrwRAccB" outputId="64f9f49f-27d4-4650-874d-0e35ed43446f"
print("Ejercicio")
print('Taylor 0 sin(x) ≅', taylor(sin(x), 0, 0).subs(x,2),' = ',taylor(sin(x), 0, 0).subs(x,2).evalf())
print('Taylor 1 sin(x) ≅', taylor(sin(x), 0, 1).subs(x,2),' = ',taylor(sin(x), 0, 1).subs(x,2).evalf())
print('Taylor 2 sin(x) ≅', taylor(sin(x), 0, 2).subs(x,2),' = ',taylor(sin(x), 0, 2).subs(x,2).evalf())
print('Taylor 3 sin(x) ≅', taylor(sin(x), 0, 3).subs(x,2),' = ',taylor(sin(x), 0, 3).subs(x,2).evalf())
print('Taylor 4 ln(x+1) ≅', taylor(ln(x+1), 0, 4).subs(x,2),' = ',taylor(ln(x+1), 0, 4).subs(x,2).evalf())
print('Taylor 5 ln(x+1) ≅', taylor(ln(x+1), 0, 5).subs(x,2),' = ',taylor(ln(x+1), 0, 5).subs(x,2).evalf())
print('Taylor 6 ln(x+1) ≅', taylor(ln(x+1), 0, 6).subs(x,2),' = ',taylor(ln(x+1), 0, 6).subs(x,2).evalf())
print('Taylor 7 ln(x+1) ≅', taylor(ln(x+1), 0, 8).subs(x,2),' = ',taylor(ln(x+1), 0, 7).subs(x,2).evalf())
# + [markdown] id="O4EVQpjwCPIi"
# ##Comparison between methods
# + id="6CAcVG99Oxp4" colab={"base_uri": "https://localhost:8080/"} outputId="7275d303-4f85-404f-ef66-b54068bfb8f3"
import math
print('sympy exp(x)subs(x,2) =', exp(x).subs(x,2))
print('sympy exp(x).subs(x,2).evalf() =', exp(x).subs(x,2).evalf())
print('math.exp(2) =', math.exp(2))
# + colab={"base_uri": "https://localhost:8080/"} id="2CBDt3hlBLOJ" outputId="8d45084b-fec2-461c-827b-d80d50dc5a9a"
print("Ejercicio")
import math
print('sympy ln(x+1)subs(x,2) =', ln(x+1).subs(x,2))
print('sympy ln(x+1).subs(x,2).evalf() =', ln(x+1).subs(x,2).evalf())
print('math.ln(2+1) =', math.log1p(2))
# + colab={"base_uri": "https://localhost:8080/"} id="8tcpTiHSBqoo" outputId="ae343eae-4abe-43dd-e9af-010995b297b2"
print("Ejercicio")
import math
print('sympy sin(x)subs(x,2) =', sin(x).subs(x,2))
print('sympy sin(x).subs(x,2).evalf() =', sin(x).subs(x,2).evalf())
print('math.sin(2) =', math.sin(2))
# + [markdown] id="JncNv-rGB7G1"
# ##Plots of `exp()`
# + id="7-SIZbyzVb8I" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="82a655a3-8037-43da-8d8c-1377fd2397eb"
import math
import numpy as np
import matplotlib.pyplot as plt
values = np.arange(-5,5,0.1)
p_exp = np.exp(values)
t_exp1 = [taylor(exp(x), 0, 1).subs(x,v) for v in values]
legends = ['exp() ','Taylor 1 (constant)']
fig, ax = plt.subplots()
ax.plot(values,p_exp, color ='red')
ax.plot(values,t_exp1)
ax.set_ylim([-5,5])
ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + id="nEX84A5GV6-g" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="65521930-d668-46ff-8623-295ba7e71235"
import math
import numpy as np
import matplotlib.pyplot as plt
# if using a Jupyter notebook, include:
# %matplotlib inline
values = np.arange(-5,5,0.1)
p_exp = np.exp(values)
t_exp2 = [taylor(exp(x), 0, 2).subs(x,v) for v in values]
legends = ['exp() ','Taylor 2 (linear)']
fig, ax = plt.subplots()
ax.plot(values,p_exp, color ='red')
ax.plot(values,t_exp2)
ax.set_ylim([-5,5])
ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + id="0tgel982WPXX" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="7b5c5972-36fc-4c81-e925-602752f264e5"
import math
import numpy as np
import matplotlib.pyplot as plt
# if using a Jupyter notebook, include:
# %matplotlib inline
values = np.arange(-5,5,0.1)
p_exp = np.exp(values)
t_exp3 = [taylor(exp(x), 0, 3).subs(x,v) for v in values]
legends = ['exp() ','Taylor 3 (quadratic)']
fig, ax = plt.subplots()
ax.plot(values,p_exp, color ='red')
ax.plot(values,t_exp3)
ax.set_ylim([-5,5])
ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + id="coXe9XZGWbLO" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="971d25ee-7f82-4893-9f63-5a527997cf8b"
import math
import numpy as np
import matplotlib.pyplot as plt
# if using a Jupyter notebook, include:
# %matplotlib inline
values = np.arange(-5,5,0.1)
p_exp = np.exp(values)
t_exp4 = [taylor(exp(x), 0, 4).subs(x,v) for v in values]
legends = ['exp() ','Taylor 4 (cubic)']
fig, ax = plt.subplots()
ax.plot(values,p_exp, color ='red')
ax.plot(values,t_exp4)
ax.set_ylim([-5,5])
ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + id="9xTFhhEMPS1w" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="5bb2827d-1c28-4e45-f9b8-2e8400f2b22e"
import math
import numpy as np
import matplotlib.pyplot as plt
# if using a Jupyter notebook, include:
# %matplotlib inline
values = np.arange(-5,5,0.1)
p_exp = np.exp(values)
t_exp1 = [taylor(exp(x), 0, 1).subs(x,v) for v in values]
t_exp2 = [taylor(exp(x), 0, 2).subs(x,v) for v in values]
t_exp3 = [taylor(exp(x), 0, 3).subs(x,v) for v in values]
t_exp4 = [taylor(exp(x), 0, 4).subs(x,v) for v in values]
legends = ['exp() ','Taylor 1 (constant)','Taylor 3 (linear)','Taylor 3 (quadratic)','Taylor 4 (cubic)']
fig, ax = plt.subplots()
ax.plot(values,p_exp)
ax.plot(values,t_exp1)
ax.plot(values,t_exp2)
ax.plot(values,t_exp3)
ax.plot(values,t_exp4)
ax.set_ylim([-5,5])
ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + [markdown] id="hvOYNvmnCCAv"
# ##Plots of $\ln(x+1)$
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="1iVxZSwmDgp2" outputId="5d49b983-008e-42e4-964e-d7ea4f72d8a9"
import math
import numpy as np
import matplotlib.pyplot as plt
values = np.arange(0,5,0.1)
p_ln = [math.log1p(value) for value in values]
t_ln1 = [taylor(ln(x+1), 0, 1).subs(x,v) for v in values]
legends = ['ln(x+1) ','Taylor 1 (constant)']
fig, ax = plt.subplots()
ax.plot(values,p_ln, color ='red')
ax.plot(values,t_ln1)
ax.set_ylim([-5,5])
#ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
#ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
print("Note that the blue line is in y=0")
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="SXnLoH43Dgp4" outputId="535ef6ee-7de3-4be4-b4e9-e9fdae4ee4d2"
import math
import numpy as np
import matplotlib.pyplot as plt
values = np.arange(0,5,0.1)
p_ln = [math.log1p(value) for value in values]
t_ln2 = [taylor(ln(x+1), 0, 2).subs(x,v) for v in values]
legends = ['ln(x+1) ','Taylor 2 (Lineal)']
fig, ax = plt.subplots()
ax.plot(values,p_ln, color ='red')
ax.plot(values,t_ln2)
ax.set_ylim([-5,5])
ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="f_Cr9KbFDgp4" outputId="836b7129-6a2a-4afd-f02e-5456b6994bac"
import math
import numpy as np
import matplotlib.pyplot as plt
# if using a Jupyter notebook, include:
# %matplotlib inline
values = np.arange(0,5,0.1)
p_ln = [math.log1p(value) for value in values]
t_ln3 = [taylor(ln(x+1), 0, 3).subs(x,v) for v in values]
legends = ['ln(x+1) ','Taylor 3 (Quadratic)']
fig, ax = plt.subplots()
ax.plot(values,p_ln, color ='red')
ax.plot(values,t_ln3)
ax.set_ylim([-5,5])
ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="xHpktqdTDgp5" outputId="c2cbbd54-6188-4d26-a643-69209116f53a"
import math
import numpy as np
import matplotlib.pyplot as plt
values = np.arange(0,5,0.1)
p_ln = [math.log1p(value) for value in values]
t_ln4 = [taylor(ln(x+1), 0, 4).subs(x,v) for v in values]
legends = ['ln(x+1) ','Taylor 4 (Cubic)']
fig, ax = plt.subplots()
ax.plot(values,p_ln, color ='red')
ax.plot(values,t_ln4)
ax.set_ylim([-5,5])
ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="4r0bj2SpDgp5" outputId="5c906641-8eb8-4a48-ba82-55876778d987"
import math
import numpy as np
import matplotlib.pyplot as plt
# if using a Jupyter notebook, include:
# %matplotlib inline
values = np.arange(0,5,0.1)
p_ln = [math.log1p(value) for value in values]
t_ln1 = [taylor(ln(x+1), 0, 1).subs(x,v) for v in values]
t_ln2 = [taylor(ln(x+1), 0, 2).subs(x,v) for v in values]
t_ln3 = [taylor(ln(x+1), 0, 3).subs(x,v) for v in values]
t_ln4 = [taylor(ln(x+1), 0, 4).subs(x,v) for v in values]
legends = ['ln(x+1) ','Taylor 1 (constant)','Taylor 3 (linear)','Taylor 3 (quadratic)','Taylor 4 (cubic)']
fig, ax = plt.subplots()
ax.plot(values,p_ln)
ax.plot(values,t_ln1)
ax.plot(values,t_ln2)
ax.plot(values,t_ln3)
ax.plot(values,t_ln4)
ax.set_ylim([-2,3])
ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + [markdown] id="7n2IjpcICJXw"
# ##Plots of $\sin(x)$
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="7UBBbAHHHje5" outputId="7191134d-d3c6-4561-8ece-c3ee589d6435"
import math
import numpy as np
import matplotlib.pyplot as plt
values = np.arange(-2*math.pi,2*math.pi,0.1)
p_sin = [math.sin(value) for value in values]
t_sin1 = [taylor(sin(x), 0, 1).subs(x,v) for v in values]
legends = ['sin() ','Taylor 1 (constant)']
fig, ax = plt.subplots()
ax.plot(values,p_sin, color ='red')
ax.plot(values,t_sin1)
ax.set_ylim([-5,5])
#ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
#ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="Y7rdbsiDHjfH" outputId="1e30482f-34b8-42c8-cd1c-896ab558517b"
import math
import numpy as np
import matplotlib.pyplot as plt
# if using a Jupyter notebook, include:
# %matplotlib inline
values = np.arange(-2*math.pi,2*math.pi,0.1)
p_sin = [math.sin(value) for value in values]
t_sin2 = [taylor(sin(x), 0, 2).subs(x,v) for v in values]
legends = ['sin() ','Taylor 2 (linear)']
fig, ax = plt.subplots()
ax.plot(values,p_sin, color ='red')
ax.plot(values,t_sin2)
ax.set_ylim([-5,5])
ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="5VeV9P2THjfI" outputId="3cbd1e0a-c5e1-49b5-8533-861d6d5362f1"
import math
import numpy as np
import matplotlib.pyplot as plt
values = np.arange(-2*math.pi,2*math.pi,0.1)
p_sin = [math.sin(value) for value in values]
t_sin3 = [taylor(sin(x), 0, 3).subs(x,v) for v in values]
legends = ['sin()','Taylor 3 (quadratic)']
fig, ax = plt.subplots()
ax.plot(values,p_sin, color ='red')
ax.plot(values,t_sin3)
ax.set_ylim([-5,5])
ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="MQFCH9J7HjfI" outputId="43c8b4d2-7b18-406b-dfbd-03180fcb2d3d"
import math
import numpy as np
import matplotlib.pyplot as plt
# if using a Jupyter notebook, include:
# %matplotlib inline
values = np.arange(-2*math.pi,2*math.pi,0.1)
p_sin = [math.sin(value) for value in values]
t_sin4 = [taylor(sin(x), 0, 4).subs(x,v) for v in values]
legends = ['sin() ','Taylor 4 (cubic)']
fig, ax = plt.subplots()
ax.plot(values,p_sin, color ='red')
ax.plot(values,t_sin4)
ax.set_ylim([-5,5])
ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="QbARf2p5HjfI" outputId="8a41e5bc-b4c9-4525-ddc7-2b269901ebfc"
import math
import numpy as np
import matplotlib.pyplot as plt
values = np.arange(-2*math.pi,2*math.pi,0.1)
p_sin = [math.sin(value) for value in values]
t_sin1 = [taylor(sin(x), 0, 1).subs(x,v) for v in values]
t_sin2 = [taylor(sin(x), 0, 2).subs(x,v) for v in values]
t_sin3 = [taylor(sin(x), 0, 3).subs(x,v) for v in values]
t_sin4 = [taylor(sin(x), 0, 4).subs(x,v) for v in values]
legends = ['sin() ','Taylor 1 (constant)','Taylor 3 (linear)','Taylor 3 (quadratic)','Taylor 4 (cubic)']
fig, ax = plt.subplots()
ax.plot(values,p_sin)
ax.plot(values,t_sin1)
ax.plot(values,t_sin2)
ax.plot(values,t_sin3)
ax.plot(values,t_sin4)
ax.set_ylim([-5,5])
#ax.axhline(y=0.0, xmin=-5.0, xmax=5.0, color='black')
#ax.axvline(x=0.0, ymin=-10.0, ymax=10.0, color='black')
ax.legend(legends)
plt.show()
| Lab9/TaylorSymPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-west-2:236514542706:image/datascience-1.0
# ---
print('hello world!')
import sagemaker
session = sagemaker.Session()
bucket = session.default_bucket()
role = sagemaker.get_execution_role()
print(bucket)
print(role)
| chapter02/01-hello_world.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Solving MDPs: Linear Programming
# 
# The goal of this exercise is to solve the MDP in figure using Linear Programming. In this MDP the environment model is extremely simple, the transition function is deterministic so it is determined uniquely by the action.
#
# The variables of the linear program are the state values, the coefficients are given by the initial state distribution, that in our case is a deterministic function, as the state 1 is the initial state.
# So the coefficients of the objective function are [1, 0, 0].
#
# In the following we will use scipy.optimize.linprog function to optimize a linear program.
#
# We will use the following notation:
#
# 
#
# To rephrase the problem using upper bounds we have:
#
# $$
# V >= R + \gamma P V
# $$
#
# That becomes:
#
# $$
# (\gamma P - I)V <= - R
# $$
import numpy as np
import scipy.optimize
# number of states and number of actions
n_states = 3
n_actions = 2
# initial state distribution
mu = np.array([[1, 0, 0]]).T
mu
# Build the upper bound coefficients for the action A
# define the reward matrix for action A
R_A = np.zeros((n_states, 1), np.float)
R_A[0, 0] = 1
R_A[1, 0] = 0
R_A[2, 0] = 0
R_A
# Define the transition matrix for action A
P_A = np.zeros((n_states, n_states), np.float)
P_A[0, 1] = 1
P_A[1, 0] = 1
P_A[2, 1] = 1
P_A
gamma = 0.9
# Upper bound A matrix for action A
A_up_A = gamma * P_A - np.eye(3,3)
A_up_A
# The same for action B
# define the reward matrix for action B
R_B = np.zeros((n_states, 1), np.float)
R_B[0, 0] = 10
R_B[1, 0] = 1
R_B[2, 0] = 10
R_B
# Define the transition matrix for action A
P_B = np.zeros((n_states, n_states), np.float)
P_B[0, 2] = 1
P_B[1, 2] = 1
P_B[2, 2] = 1
P_B
# Upper bound A matrix for action B
A_up_B = gamma * P_B - np.eye(3,3)
A_up_B
# Upper bound matrix for all actions and all states
A_up = np.vstack((A_up_A, A_up_B))
# verify the shape: number of constraints are equal to |actions| * |states|
assert(A_up.shape[0] == n_states * n_actions)
# Reward vector is obtained by stacking the two vectors
R = np.vstack((R_A, R_B))
c = mu
b_up = -R
# Solve the linear program
res = scipy.optimize.linprog(c, A_up, b_up)
# Obtain the results: state values
V_ = res.x
V_
V = V_.reshape((-1, 1))
V
np.savetxt("solution/V.txt", V)
# 
# Let's analyze the results.
# We have that the value of state 2 is the lowest one, as expected.
# The values of states 1 and 3 are very close to each other and approximately equal to 1e+2.
# Now we can calculate the optimal policy by calculating the optimal action value function for each state action couple.
# transition matrix. On the rows we have states and actions, on the columns we have next states
P = np.vstack((P_A, P_B))
P
# Use the action value formula to calculate the action values for each state action pair.
Q_sa = R + gamma * P.dot(V)
# The first three rows are associated to action A, the last three are associated to action B
Q_sa
# Reshape so that it is easier to understand best actions
Q_sa_2 = np.stack((Q_sa[:3, 0], Q_sa[3:, 0]), axis=1)
Q_sa_2
best_actions = np.reshape(np.argmax(Q_sa_2, axis=1), (3, 1))
best_actions
# Action 1 (B) is the best action in each state.
# 
# As expected the best action in state 1 is action B. The action B is the best action for all states.
| Chapter02/Exercise02_02/Exercise02_02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
# %matplotlib inline
sns.set(style="darkgrid")
matplotlib.rcParams['figure.figsize'] = (20.0, 10.0)
def get_data(symbol):
df = pd.read_parquet(f"/data/stocks/{symbol}_1min.parquet.gzip")
return df
main = get_data("ACC")
df = main.query('20190826 == date')
# main.date.dt.year
df
# d = main.groupby('date')
# main = main.drop('volume',1)
# main.plot(x='date')
# +
df = main.copy()
df['date'] = pd.to_datetime(df['datetime'])
df['date'] = df['date'].dt.strftime('%Y-%d-%m')
dates = df.date.unique()
df.head(15)
# df1['date'] = pd.to_datetime(df1['datetime'])
# df1['date'] = df1['date'].dt.strftime('%Y-%d-%m')
# df1.set_index(df1['date'], inplace = True)
# g = df1.groupby(df1['date'])
# df.head()
# df.datetime.dt.
dates
d = df[df['date'] == '2016-08-06']
d
# +
def convert_to_24(date):
l = list(date)
if (int("".join(l[11:13])) in (1,2,3,4)):
nStr = str(int(l[12])+12)
l[11] = nStr[0]
l[12] = nStr[1]
return "".join(l)
convert_to_24("2016-06-08 09:55:00+00:00")
convert_to_24("2016-06-08 11:00:00+00:00")
convert_to_24("2016-06-08 10:25:00+00:00")
convert_to_24("2016-06-08 12:00:00+00:00")
convert_to_24("2016-06-08 03:00:00+00:00")
convert_to_24("2016-06-08 03:30:00+00:00")
# -
sns.lmplot(x='open', y='close', data=df)
# +
from datetime import datetime
from pytz import timezone
date_str = "2019-08-26 23:48:00"
datetime_obj = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S")
datetime_obj_utc = datetime_obj.replace(tzinfo=timezone('US/Eastern'))
print(datetime_obj_utc.strftime("%Y-%m-%d %H:%M:%S"))
now_asia = datetime_obj_utc.astimezone(timezone('Asia/Kolkata'))
print(now_asia.strftime(format))
# +
from datetime import datetime
from pytz import timezone
newyork = timezone('America/New_York').localize(datetime.strptime("2019-08-26 23:48:00", "%Y-%m-%d %H:%M:%S"))
berlin = newyork.astimezone(timezone('Asia/Kolkata')).replace(tzinfo=None)
print(berlin)
| plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_mxnet_p36)
# language: python
# name: conda_mxnet_p36
# ---
# # Mxnet BYOM: Train locally and deploy on SageMaker.
#
# 1. [Introduction](#Introduction)
# 2. [Prerequisites and Preprocessing](#Prequisites-and-Preprocessing)
# 1. [Permissions and environment variables](#Permissions-and-environment-variables)
# 2. [Data Setup](#Data-setup)
# 3. [Training the network locally](#Training)
# 4. [Set up hosting for the model](#Set-up-hosting-for-the-model)
# 1. [Export from MXNet](#Export-the-model-from-mxnet)
# 2. [Import model into SageMaker](#Import-model-into-SageMaker)
# 3. [Create endpoint](#Create-endpoint)
# 5. [Validate the endpoint for use](#Validate-the-endpoint-for-use)
#
#
# __Note__: Compare this with the [tensorflow bring your own model example](../tensorflow_iris_byom/tensorflow_BYOM_iris.ipynb)
# ## Introduction
# In this notebook, we will train a neural network locally on the location from where this notebook is run using MXNet. We will then see how to create an endpoint from the trained MXNet model and deploy it on SageMaker. We will then inference from the newly created SageMaker endpoint.
#
# The neural network that we will use is a simple fully-connected neural network. The definition of the neural network can be found in the accompanying [mnist.py](mnist.py) file. The ``build_graph`` method contains the model defnition (shown below).
#
# ```python
# def build_graph():
# data = mx.sym.var('data')
# data = mx.sym.flatten(data=data)
# fc1 = mx.sym.FullyConnected(data=data, num_hidden=128)
# act1 = mx.sym.Activation(data=fc1, act_type="relu")
# fc2 = mx.sym.FullyConnected(data=act1, num_hidden=64)
# act2 = mx.sym.Activation(data=fc2, act_type="relu")
# fc3 = mx.sym.FullyConnected(data=act2, num_hidden=10)
# return mx.sym.SoftmaxOutput(data=fc3, name='softmax')
# ```
#
# From this definitnion we can see that there are two fully-connected layers of 128 and 64 neurons each. The activations of the last fully-connected layer is then fed into a Softmax layer of 10 neurons. We use 10 neurons here because the datatset on which we are going to predict is the MNIST dataset of hand-written digit recognition which has 10 classes. More details can be found about the dataset on the [creator's webpage](http://yann.lecun.com/exdb/mnist/).
# ## Prequisites and Preprocessing
#
# ### Permissions and environment variables
#
# Here we set up the linkage and authentication to AWS services. In this notebook we only need the roles used to give learning and hosting access to your data. The Sagemaker SDK will use S3 defualt buckets when needed. Supply the role in the variable below.
# + isConfigCell=true
import boto3, re
from sagemaker import get_execution_role
role = get_execution_role()
# -
# ### Data setup
#
# Next, we need to pull the data from the author's site to our local box. Since we have ``mxnet`` utilities, we will use the utilities to download the dataset locally.
import mxnet as mx
data = mx.test_utils.get_mnist()
# ### Training
#
# It is time to train the network. Since we are training the network locally, we can make use of mxnet training tools. The training method is also in the accompanying [mnist.py](mnist.py) file. The method is shown below.
#
# ```python
# def train(data, hyperparameters= {'learning_rate': 0.11}, num_cpus=0, num_gpus =1 , **kwargs):
# train_labels = data['train_label']
# train_images = data['train_data']
# test_labels = data['test_label']
# test_images = data['test_data']
# batch_size = 100
# train_iter = mx.io.NDArrayIter(train_images, train_labels, batch_size, shuffle=True)
# val_iter = mx.io.NDArrayIter(test_images, test_labels, batch_size)
# logging.getLogger().setLevel(logging.DEBUG)
# mlp_model = mx.mod.Module(
# symbol=build_graph(),
# context=get_train_context(num_cpus, num_gpus))
# mlp_model.fit(train_iter,
# eval_data=val_iter,
# optimizer='sgd',
# optimizer_params={'learning_rate': float(hyperparameters.get("learning_rate", 0.1))},
# eval_metric='acc',
# batch_end_callback=mx.callback.Speedometer(batch_size, 100),
# num_epoch=10)
# return mlp_model
# ```
#
# The method above collects the ``data`` variable that ``get_mnist`` method gives you (which is a dictionary of data arrays) along with a dictionary of ``hyperparameters`` which only contains learning rate, and other parameters. It creates a [``mxnet.mod.Module``](https://mxnet.incubator.apache.org/api/python/module.html) from the network graph we built in the ``build_graph`` method and trains the network using the ``mxnet.mod.Module.fit`` method.
from mnist import train
model = train(data = data)
# ## Set up hosting for the model
#
# ### Export the model from mxnet
#
# In order to set up hosting, we have to import the model from training to hosting. We will begin by exporting the model from MXNet and saving it down. Analogous to the [TensorFlow example](../tensorflow_iris_byom/tensorflow_BYOM_iris.ipynb), some structure needs to be followed. The exported model has to be converted into a form that is readable by ``sagemaker.mxnet.model.MXNetModel``. The following code describes exporting the model in a form that does the same:
import os
os.mkdir('model')
model.save_checkpoint('model/model', 0000)
import tarfile
with tarfile.open('model.tar.gz', mode='w:gz') as archive:
archive.add('model', recursive=True)
# ### Import model into SageMaker
#
# Open a new sagemaker session and upload the model on to the default S3 bucket. We can use the ``sagemaker.Session.upload_data`` method to do this. We need the location of where we exported the model from MXNet and where in our default bucket we want to store the model(``/model``). The default S3 bucket can be found using the ``sagemaker.Session.default_bucket`` method.
# +
import sagemaker
sagemaker_session = sagemaker.Session()
inputs = sagemaker_session.upload_data(path='model.tar.gz', key_prefix='model')
# -
# Use the ``sagemaker.mxnet.model.MXNetModel`` to import the model into SageMaker that can be deployed. We need the location of the S3 bucket where we have the model, the role for authentication and the entry_point where the model defintion is stored (``mnist.py``). The import call is the following:
from sagemaker.mxnet.model import MXNetModel
sagemaker_model = MXNetModel(model_data = 's3://' + sagemaker_session.default_bucket() + '/model/model.tar.gz',
role = role,
entry_point = 'mnist.py')
# ### Create endpoint
#
# Now the model is ready to be deployed at a SageMaker endpoint. We can use the ``sagemaker.mxnet.model.MXNetModel.deploy`` method to do this. Unless you have created or prefer other instances, we recommend using 1 ``'ml.c4.xlarge'`` instance for this training. These are supplied as arguments.
predictor = sagemaker_model.deploy(initial_instance_count=1,
instance_type='ml.c4.xlarge')
# ### Validate the endpoint for use
#
# We can now use this endpoint to classify hand-written digits.
predict_sample = data['test_data'][0][0]
response = predictor.predict(data)
print('Raw prediction result:')
print(response)
# (Optional) Delete the Endpoint
print(predictor.endpoint)
# If you do not want continued use of the endpoint, you can remove it. Remember, open endpoints are charged. If this is a simple test or practice, it is recommended to delete them.
sagemaker.Session().delete_endpoint(predictor.endpoint)
# Clear all stored model data so that we don't overwrite them the next time.
os.remove('model.tar.gz')
import shutil
shutil.rmtree('export')
| under_development/mxnet_mnist_byom/mxnet_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Portfolio Exercise: Starbucks
# <br>
#
# <img src="https://opj.ca/wp-content/uploads/2018/02/New-Starbucks-Logo-1200x969.jpg" width="200" height="200">
# <br>
# <br>
#
# #### Background Information
#
# The dataset you will be provided in this portfolio exercise was originally used as a take-home assignment provided by Starbucks for their job candidates. The data for this exercise consists of about 120,000 data points split in a 2:1 ratio among training and test files. In the experiment simulated by the data, an advertising promotion was tested to see if it would bring more customers to purchase a specific product priced at $10. Since it costs the company 0.15 to send out each promotion, it would be best to limit that promotion only to those that are most receptive to the promotion. Each data point includes one column indicating whether or not an individual was sent a promotion for the product, and one column indicating whether or not that individual eventually purchased that product. Each individual also has seven additional features associated with them, which are provided abstractly as V1-V7.
#
# #### Optimization Strategy
#
# Your task is to use the training data to understand what patterns in V1-V7 to indicate that a promotion should be provided to a user. Specifically, your goal is to maximize the following metrics:
#
# * **Incremental Response Rate (IRR)**
#
# IRR depicts how many more customers purchased the product with the promotion, as compared to if they didn't receive the promotion. Mathematically, it's the ratio of the number of purchasers in the promotion group to the total number of customers in the purchasers group (_treatment_) minus the ratio of the number of purchasers in the non-promotional group to the total number of customers in the non-promotional group (_control_).
#
# $$ IRR = \frac{purch_{treat}}{cust_{treat}} - \frac{purch_{ctrl}}{cust_{ctrl}} $$
#
#
# * **Net Incremental Revenue (NIR)**
#
# NIR depicts how much is made (or lost) by sending out the promotion. Mathematically, this is 10 times the total number of purchasers that received the promotion minus 0.15 times the number of promotions sent out, minus 10 times the number of purchasers who were not given the promotion.
#
# $$ NIR = (10\cdot purch_{treat} - 0.15 \cdot cust_{treat}) - 10 \cdot purch_{ctrl}$$
#
# For a full description of what Starbucks provides to candidates see the [instructions available here](https://drive.google.com/open?id=18klca9Sef1Rs6q8DW4l7o349r8B70qXM).
#
# Below you can find the training data provided. Explore the data and different optimization strategies.
#
# #### How To Test Your Strategy?
#
# When you feel like you have an optimization strategy, complete the `promotion_strategy` function to pass to the `test_results` function.
# From past data, we know there are four possible outomes:
#
# Table of actual promotion vs. predicted promotion customers:
#
# <table>
# <tr><th></th><th colspan = '2'>Actual</th></tr>
# <tr><th>Predicted</th><th>Yes</th><th>No</th></tr>
# <tr><th>Yes</th><td>I</td><td>II</td></tr>
# <tr><th>No</th><td>III</td><td>IV</td></tr>
# </table>
#
# The metrics are only being compared for the individuals we predict should obtain the promotion – that is, quadrants I and II. Since the first set of individuals that receive the promotion (in the training set) receive it randomly, we can expect that quadrants I and II will have approximately equivalent participants.
#
# Comparing quadrant I to II then gives an idea of how well your promotion strategy will work in the future.
#
# Get started by reading in the data below. See how each variable or combination of variables along with a promotion influences the chance of purchasing. When you feel like you have a strategy for who should receive a promotion, test your strategy against the test dataset used in the final `test_results` function.
# +
# load in packages
from itertools import combinations
from test_results import test_results, score
import numpy as np
import pandas as pd
import scipy as sp
import sklearn as sk
import matplotlib.pyplot as plt
import seaborn as sb
# %matplotlib inline
# load in the data
train_data = pd.read_csv('./training.csv')
train_data.head()
# +
# Cells for you to work and document as necessary -
# definitely feel free to add more cells as you need
# -
def promotion_strategy(df):
'''
INPUT
df - a dataframe with *only* the columns V1 - V7 (same as train_data)
OUTPUT
promotion_df - np.array with the values
'Yes' or 'No' related to whether or not an
individual should recieve a promotion
should be the length of df.shape[0]
Ex:
INPUT: df
V1 V2 V3 V4 V5 V6 V7
2 30 -1.1 1 1 3 2
3 32 -0.6 2 3 2 2
2 30 0.13 1 1 4 2
OUTPUT: promotion
array(['Yes', 'Yes', 'No'])
indicating the first two users would recieve the promotion and
the last should not.
'''
return promotion
# +
# This will test your results, and provide you back some information
# on how well your promotion_strategy will work in practice
test_results(promotion_strategy)
| portfolio_exercises/Experiments/Starbucks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''finetune-vs-scratch-gHiQbun3-py3.8'': poetry)'
# name: python3
# ---
# +
import glob
sorted(glob.glob("../../data/irony/*.csv"))
# +
import pandas as pd
dfs_train = []
mapping = {
"ID": "id",
"TOPIC": "topic",
"IS_IRONIC": "is_ironic",
"MESSAGE": "text"
}
for lang in {"cu", "es", "mx"}:
name = f"../../data/irony/irosva.{lang}.training.csv"
print(name)
df = pd.read_csv(name).rename(columns=mapping).set_index("id")
df["lang"] = lang
dfs_train.append(df)
df_train = pd.concat(dfs_train)
df_train["split"] = "train"
df_train
# +
dfs_test = []
for lang in {"cu", "es", "mx"}:
name = f"../../data/irony/irosva.{lang}.test.csv"
gold_name = f"../../data/irony/irosva.{lang}.test.truth.csv"
print(name)
df = pd.read_csv(name).rename(columns=mapping)
del df["is_ironic"]
df_gold = pd.read_csv(gold_name).rename(columns=mapping)
df = df.merge(df_gold.loc[:, ["id", "is_ironic"]], on="id")
df["lang"] = lang
dfs_test.append(df)
df_test = pd.concat(dfs_test).set_index("id")
df_test["split"] = "test"
df_test
# +
df = pd.concat([df_train, df_test])
df.to_csv("../../data/irony/irosva_dataset.csv")
# -
# ## Reload
# +
df = pd.read_csv("../../data/irony/irosva_dataset.csv")
df
# -
# ## Datasets
df["split"].value_counts()
# +
# %load_ext autoreload
# %autoreload 2
from finetune_vs_scratch.irony import load_datasets, run
run("dccuchile/bert-base-spanish-wwm-cased", "cuda")
# +
# %load_ext autoreload
# %autoreload 2
from finetune_vs_scratch.irony import load_datasets, run
run("finiteautomata/robertuito-base-uncased", "cuda")
# -
| notebooks/misc/Irony.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# `ApJdataFrames` McClure
# ---
# `Title`: THE EVOLUTIONARY STATE OF THE PRE-MAIN SEQUENCE POPULATION IN OPHIUCHUS: A LARGE INFRARED SPECTROGRAPH SURVEY
# `Authors`: <NAME>.
#
# Data is from this paper:
# http://iopscience.iop.org/0067-0049/188/1/75/
import warnings
warnings.filterwarnings("ignore")
from astropy.io import ascii
import pandas as pd
# ## Table 1 - Target Information for Ophiuchus Sources
tbl1 = pd.read_csv("http://iopscience.iop.org/0067-0049/188/1/75/suppdata/apjs330182t1_ascii.txt",
sep="\t", na_values=" ... ", skiprows=[0,1,2], skipfooter=1, usecols=range(9))
tbl1.head()
# ## Table 2 - Spectral Type Information for the Entire Sample
tbl2 = pd.read_csv("http://iopscience.iop.org/0067-0049/188/1/75/suppdata/apjs330182t2_ascii.txt",
sep="\t", na_values=" ... ", skiprows=[0,1,2,4], skipfooter=4)
del tbl2["Unnamed: 13"]
tbl2.head()
# ##Merge the two catalogs
tbl1_2_merge = pd.merge(tbl1[["Name", "R.A. (J2000)", "Decl. (J2000)"]], tbl2, how="outer")
tbl1_2_merge.tail()
# ##Save data
lowAv = nonBinary = tbl1_2_merge['A_V'] < 10.0
nonBinary = tbl1_2_merge['Mult.'] != tbl1_2_merge['Mult.']
classIII = tbl1_2_merge['Class'] == 'III'
wtts = tbl1_2_merge['TT Type'] == 'WTTS'
diskless = tbl1_2_merge['State'] == 'Photosphere'
for val in [lowAv, nonBinary, classIII, wtts, diskless]:
print(val.sum())
# +
sample = nonBinary & diskless
sample.sum()
# -
tbl1_2_merge.to_csv('../data/McClure2010/tbl1_2_merge_all.csv', index=False)
tbl1_2_merge.columns
tbl1_2_merge[wtts]
# ! mkdir ../data/McClure2010
tbl1_2_merge.to_csv("../data/McClure2010/tbl1_2_merge.csv", index=False, sep='\t')
# *The end*
| notebooks/McClure2010.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.0
# language: julia
# name: julia-1.0
# ---
# Helpful packages for working with images and factorizations
# using Pkg; Pkg.add("Images")
# using Pkg; Pkg.add("ImageMagick") # And this allows us to load JPEG-encoded images
using Images, LinearAlgebra, Interact
# ### Using a SVD to compress an image
#
# In this exercise, we'll use a singular value decomposition (SVD) to compress an image -- so that we can store an image without keeping around "unnecessary" information.
#
# To start, let's define a singular value decomposition. In a SVD, we take a matrix $A$ and factorize it so that
#
# $$A = USV^T$$
#
# where matrices $U$ and $V$ are unitary and hold our singular vectors. Matrix $S$ is diagonal and stores our singular values in decreasing order from top/left to bottom/right.
#
# In Julia, our images are stored as arrays, so we can think of `yellowbanana` as a matrix
file = download("https://uploads6.wikiart.org/images/salvador-dali/the-persistence-of-memory-1931.jpg!Large.jpg")
img = load(file)
size(img)
img[24,24] # Each element in the array is a color
dump(img[24,24])
# We can extract each "channel" of red, green, and blue and view each independently:
channels = Float64.(channelview(img))
Gray.(channels[1, :, :])
# That means we can take the SVD of this image. So, we can store this picture of a banana as sets of singular vectors and singular values.
#
# **The reason this is important** is that we'll find that we do **not** need to keep track of *all* the singular vectors and *all* the singular values to store an image that still looks like a banana! This means we can choose to keep only the important information, throw away the rest, and thereby "compress" the image.
# If we don't throw away any data, we get back what we started with:
U, S, V = svd(channels[1,:,:])
Gray.(U * Diagonal(S) * V')
# But of course we're not doing any compression here — the sizes of U, S, and V are bigger than our original matrix! This is like the opposite of compression. The key is that the values are stored in decreasing order so we can start throwing things away.
sum(length.((U, S, V)))
length(img)
Gray.(U[:, 1:25] * Diagonal(S[1:25]) * V[:, 1:25]')
sum(length.((U[:, 1:25], S[1:25], V[:, 1:25])))/length(img)
# Of course this is just one channel of the image. Let's put it all back together and see how we can compress the different channels to find an acceptable compression level.
# +
function rank_approx(M, k)
U, S, V = svd(M)
M = U[:, 1:k] * Diagonal(S[1:k]) * V[:, 1:k]'
M = min.(max.(M, 0.0), 1.)
end
n = 100
@manipulate for k1 in 1:n, k2 in 1:n, k3 in 1:n
colorview( RGB,
rank_approx(channels[1,:,:], k1),
rank_approx(channels[2,:,:], k2),
rank_approx(channels[3,:,:], k3)
)
end
# -
# **So how can we use a SVD to determine what information in an image is really important?**
#
# The singular values tell us!
#
# If we have matrices $U$, $S$, and $V$ from our image, we can rebuild that image with the matrix product $USV^T$.
#
# Taking this matrix product is the same as adding together the outer products of each corresponding pair of vectors from $U$ and $V$, scaled by a singular value ($\sigma$) from $S$. In other words, for a (100 x 100) pixel image,
#
# $$A_{image} = USV^T = \sum_{i = 1}^{100} \sigma_i \mathbf{u_i}\mathbf{v_i'} $$
#
# Every outer product $u_i * v_i'$ creates a (100 x 100) matrix. Here we're summing together one hundred (100 x 100) matrices in order to create the original matrix $A_{image}$. The matrices at the beginning of the series -- those that are scaled by **large** singular values -- will be **much** more important in recreating the original matrix $A_{image}$.
#
# This means we can approximate $A_{image}$ as
#
# $$A_{image} \approx \sum_{i = 1}^{n} \sigma_i \mathbf{u_i}\mathbf{v_i'}$$
#
# where $n < 100$.
#
#
# #### Exercise
#
# Write a function called `compress_image`. Its input arguments should be an image and the factor by which you want to compress the image. A compressed grayscale image should display when `compress_image` is called.
#
# For example,
#
# ```julia
# compress_image("images/104_100.jpg", 33)
# ```
#
# will return a compressed image of a grayscale banana built using 3 singular values. (This image has 100 singular values, so use `fld(100, 33)` to determine how many singular values to keep. `fld` performs "floor" division.)
#
# *Hints*:
#
# * Perform the SVD on the `channelview` of a grayscale image.
# * In an empty input cell, execute `?svd` to find a function that wil perform an SVD for you.
| introductory-tutorials/intro-to-julia/long-version/050 Compressing an Image.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (base)
# language: python
# name: base
# ---
import time
start= time.time()
from PIL import Image
import numpy as np
im = Image.open("Task_1_Low.png")
np_im = np.array(im)
width,height = im.size
def find_start():
for x in range(height):
for y in range(width):
a,b,c=np_im[x,y]
if((a,b,c)==(45,204,113)):
return x,y
def find_end():
for x in range(height):
for y in range(width):
a,b,c=np_im[x,y]
if((a,b,c)==(231,76,60)):
return x,y
def min_index(list):
min = dist[list[0]]
mini = 0
for i in range(len(list)):
if dist[list[i]]<=min:
min=dist[list[i]]
mini=i
return mini
x,y=find_start()
parent = np.zeros((height,width))
dist = np.ones((height,width))*10000000
dist[x,y]=0
open = [(x,y)]
while len(open):
# print(open)
c_x,c_y = open[min_index(open)]
open.pop(min_index(open))
if (c_x+1)<height:
a,b,c=np_im[c_x+1,c_y]
if (dist[c_x+1,c_y]>dist[c_x,c_y]+1) and ((a,b,c)!=(255,255,255)):
dist[c_x+1,c_y]=dist[c_x,c_y]+1
open.append((c_x+1,c_y))
parent[c_x+1,c_y]=1
np_im[c_x+1,c_y]=(255,0,0)
if (a,b,c)==[231,76,60]:
break
if (c_x-1)>=0:
a,b,c=np_im[c_x-1,c_y]
if (dist[c_x-1,c_y]>dist[c_x,c_y]+1) and ((a,b,c)!=(255,255,255)):
dist[c_x-1,c_y]=dist[c_x,c_y]+1
open.append((c_x-1,c_y))
parent[c_x-1,c_y]=3
np_im[c_x-1,c_y]=(255,0,0)
if (a,b,c)==[231,76,60]:
break
if (c_y+1)<width:
a,b,c=np_im[c_x,c_y+1]
if (dist[c_x,c_y+1]>dist[c_x,c_y]+1) and ((a,b,c)!=(255,255,255)):
dist[c_x,c_y+1]=dist[c_x,c_y]+1
open.append((c_x,c_y+1))
parent[c_x,c_y+1]=4
np_im[c_x,c_y+1]=(255,0,0)
if (a,b,c)==[231,76,60]:
break
if (c_y-1)>=0:
a,b,c=np_im[c_x,c_y-1]
if (dist[c_x,c_y-1]>dist[c_x,c_y]+1) and ((a,b,c)!=(255,255,255)):
dist[c_x,c_y-1]=dist[c_x,c_y]+1
open.append((c_x,c_y-1))
parent[c_x,c_y-1]=2
np_im[c_x,c_y-1]=(255,0,0)
if (a,b,c)==[231,76,60]:
break
if (c_x+1)<height and (c_y +1)<width:
a,b,c=np_im[c_x+1,c_y+1]
if (dist[c_x+1,c_y+1]>dist[c_x,c_y]+1) and ((a,b,c)!=(255,255,255)):
dist[c_x+1,c_y+1]=dist[c_x,c_y]+1
open.append((c_x+1,c_y+1))
parent[c_x+1,c_y+1]=8
np_im[c_x+1,c_y+1]=(255,0,0)
if (a,b,c)==[231,76,60]:
break
if (c_x+1)<height and (c_y -1)>=0:
a,b,c=np_im[c_x+1,c_y-1]
if (dist[c_x+1,c_y-1]>dist[c_x,c_y]+1) and ((a,b,c)!=(255,255,255)):
dist[c_x+1,c_y-1]=dist[c_x,c_y]+1
open.append((c_x+1,c_y-1))
parent[c_x+1,c_y-1]=5
np_im[c_x+1,c_y-1]=(255,0,0)
if (a,b,c)==[231,76,60]:
break
if (c_x-1)>=0 and (c_y +1)<width:
a,b,c=np_im[c_x-1,c_y+1]
if (dist[c_x-1,c_y+1]>dist[c_x,c_y]+1) and ((a,b,c)!=(255,255,255)):
dist[c_x-1,c_y+1]=dist[c_x,c_y]+1
open.append((c_x-1,c_y+1))
parent[c_x-1,c_y+1]=7
np_im[c_x-1,c_y+1]=(255,0,0)
if (a,b,c)==[231,76,60]:
break
if (c_x-1)>=0 and (c_y -1)>=0:
a,b,c=np_im[c_x-1,c_y-1]
if (dist[c_x-1,c_y-1]>dist[c_x,c_y]+1) and ((a,b,c)!=(255,255,255)):
dist[c_x-1,c_y-1]=dist[c_x,c_y]+1
open.append((c_x-1,c_y-1))
parent[c_x-1,c_y-1]=6
np_im[c_x-1,c_y-1]=(255,0,0)
if (a,b,c)==[231,76,60]:
break
# new_im = Image.fromarray(np_im)
# new_im.show()
def path(a,b):
while 1:
x,y,z = np_im[a,b]
if (x,y,z) ==(45,204,113):
break
np_im[a,b]=(0,255,0)
if(parent[a,b]==1):
a=a-1
elif (parent[a,b]==2):
b=b+1
elif (parent[a,b]==3):
a=a+1
elif (parent[a,b]==4):
b=b-1
elif (parent[a,b]==5):
a=a-1
b=b+1
elif (parent[a,b]==6):
a=a+1
b=b+1
elif (parent[a,b]==7):
a=a+1
b=b-1
elif (parent[a,b]==8):
a=a-1
b=b-1
path(82,96)
new_im = Image.fromarray(np_im)
new_im.show()
new_im.save("Dijsktra Case2 Path.png")
end=time.time()
s= end-start
print("Time=",s)
print("Cost=",dist[82,96])
c = Image.open('Dijsktra Case2 Path.png')
d = c.resize((1000,1000), resample=Image.BOX)
d.save('Dijsktra Case2 Path High.png')
| Task 1/.ipynb_checkpoints/Dijsktra Case2 Path better1.4-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
#Загружаем необходимые библиотеки
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
#os.system('cls||clear') - очистка окна консоли
os.system('cls||clear')
print('Введите путь до файла с данными для обучения модели:')
#Вводим путь до файла с данными вместе с названием и расширением (датасет)
file_parh = input()
#Загружаем датасет
data = pd.read_excel(file_parh)
#Информационное сообщение о прогрессе
os.system('cls||clear')
print('Идет предобработка данных (исключение выбросов)')
#После загрузки данных проводим исключение выбросов в автоматическом режиме (трижды)
#По всем столбцам, для которых есть выбросы, сделаем замену выбросов на пустые значения
k = 0
while k < 3:
i = 0
while i < len(data.columns):
x = data.columns[i]
q75,q25 = np.percentile(data.loc[:,x],[75,25])
intr_qr = q75-q25
max = q75+(1.5*intr_qr)
min = q25-(1.5*intr_qr)
data.loc[data[x]<min,x] = np.nan
data.loc[data[x]>max,x] = np.nan
i += 1
#Исключим те строки, которые содержат выбросы (пустые значения по некоторым столбцам)
data = data.dropna(axis=0)
k += 1
#Информационное сообщение о прогрессе
os.system('cls||clear')
print('Идет нормализация данных')
#Нормализуем данные (приведем к диапазону [0,1])
from sklearn import preprocessing
minmaxscalar = preprocessing.MinMaxScaler()
col = data.columns
result = minmaxscalar.fit_transform(data)
minmaxresult = pd.DataFrame(result, columns=col)
#Информационное сообщение о прогрессе
os.system('cls||clear')
print('Идет настройка библиотек')
# +
#Импортируем библиотеки для построения моделей
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
#Импорт TensorFlow
import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# -
#Список моделей для пронозирования
models = ["LinearRegression", "KNeighborsRegressor", "GradientBoostingRegressor", "NeuralNetwork"]
#Объявляем функцию для обучения модели линейной регрессии. На входе в функцию обучающая выборка, на выходе модель
def LRmodel(test_features, test_labels):
#Подставляем оптимальные гиперпараметры в модель
model_base = LinearRegression(positive=True)
#Обучаем модель
model_base.fit(test_features,test_labels)
return model_base
#Объявляем функцию для обучения модели ближайших соседей. На входе в функцию обучающая выборка, на выходе модель
def KNRmodel(test_features, test_labels):
#Подставляем оптимальные гиперпараметры в модель
model_base = KNeighborsRegressor(algorithm='brute', leaf_size=10, n_neighbors=100, weights='distance')
#Обучаем модель
model_base.fit(test_features,test_labels)
return model_base
#Объявляем функцию для обучения модели градиентного бустинга. На входе в функцию обучающая выборка, на выходе модель
def GBRmodel(test_features, test_labels):
#Подставляем оптимальные гиперпараметры в модель
model_base = GradientBoostingRegressor(loss='lad', max_depth=2)
#Обучаем модель
model_base.fit(test_features,np.ravel(test_labels))
return model_base
#Объявляем функцию для обучения нейросети. На входе в функцию обучающая выборка, тестовая выборка, список входных параметров
#список выходных параметров, датасет, на выходе сама нейросеть. Но в самой функции происходит также проверка сети на тестовых данных
#Ввод параметров для оценки целевой переменной и вывод прогнозного значения
def NNmodel(test_features, test_labels, Xtest1, Ytest1, colni, colno, data_v):
#Переформатируем данные в массив
trgn_data = test_labels.values
trnn_data = test_features.values
trgn_data = np.ravel(trgn_data)
Xtrnn = trnn_data
Ytrnn = trgn_data
#Объявляем плейсхолдеры
X = tf.placeholder(dtype=tf.float32, shape=[None, 10])
Y = tf.placeholder(dtype=tf.float32, shape=[None])
#Инициализаторы
sigma = 1
weight_initializer = tf.variance_scaling_initializer(mode="fan_avg", distribution="uniform", scale=sigma)
bias_initializer = tf.zeros_initializer()
#Параметры архитектуры модели
n_start = 10
n_neurons_1 = 32
n_neurons_2 = 16
n_neurons_3 = 8
n_neurons_4 = 4
n_target = 1
#Уровень 1: Переменные для скрытых весов и смещений
W_hidden_1 = tf.Variable(weight_initializer([n_start, n_neurons_1]))
bias_hidden_1 = tf.Variable(bias_initializer([n_neurons_1]))
#Уровень 2: Переменные для скрытых весов и смещений
W_hidden_2 = tf.Variable(weight_initializer([n_neurons_1, n_neurons_2]))
bias_hidden_2 = tf.Variable(bias_initializer([n_neurons_2]))
#Уровень 3: Переменные для скрытых весов и смещений
W_hidden_3 = tf.Variable(weight_initializer([n_neurons_2, n_neurons_3]))
bias_hidden_3 = tf.Variable(bias_initializer([n_neurons_3]))
#Уровень 4: Переменные для скрытых весов и смещений
W_hidden_4 = tf.Variable(weight_initializer([n_neurons_3, n_neurons_4]))
bias_hidden_4 = tf.Variable(bias_initializer([n_neurons_4]))
#Уровень выходных данных: Переменные для скрытых весов и смещений
W_out = tf.Variable(weight_initializer([n_neurons_4, n_target]))
bias_out = tf.Variable(bias_initializer([n_target]))
#Скрытый уровень
hidden_1 = tf.nn.relu(tf.add(tf.matmul(X, W_hidden_1), bias_hidden_1))
hidden_2 = tf.nn.relu(tf.add(tf.matmul(hidden_1, W_hidden_2), bias_hidden_2))
hidden_3 = tf.nn.relu(tf.add(tf.matmul(hidden_2, W_hidden_3), bias_hidden_3))
hidden_4 = tf.nn.relu(tf.add(tf.matmul(hidden_3, W_hidden_4), bias_hidden_4))
#Выходной уровень (должен быть транспонирован)
out = tf.transpose(tf.add(tf.matmul(hidden_4, W_out), bias_out))
#Функция стоимости
mse = tf.reduce_mean(tf.squared_difference(out, Y))
#Оптимизатор
opt = tf.train.AdamOptimizer().minimize(mse)
#Создание сессии
netn2 = tf.Session()
#Запуск инициализатора
netn2.run(tf.global_variables_initializer())
#Количество эпох и размер куска данных
epochs = 100
batch_size = 50
for e in range(epochs):
#Перемешивание данных для обучения
shuffle_indices = np.random.permutation(np.arange(len(Ytrnn)))
Xtrnn = Xtrnn[shuffle_indices]
Ytrnn = Ytrnn[shuffle_indices]
#Обучение мини-партией
for i in range(0, len(Ytrnn) // batch_size):
start = i * batch_size
batch_x = Xtrnn[start:start + batch_size]
batch_y = Ytrnn[start:start + batch_size]
netn2.run(opt, feed_dict={X: batch_x, Y: batch_y})
#Оцениваем точность на тестовом наборе
pred1 = netn2.run(out, feed_dict={X: Xtest1})
predict = np.reshape(pred1,(pred1.size, 1))
errors = abs(predict - Ytest1)
print('Средняя абсолютная ошибка оценки параметра', end =" ")
print(np.mean(errors))
#Вводим и нормализуем данные, по которым будет осуществляться прогноз
imp_values = InputValues(colni, data_v)
#Осуществляем прогноз
predict_label = netn2.run(out, feed_dict={X: imp_values})
#Преобразуем из нормализованных данных в стандартные
col = colno
#Определим параметры, которые использовались для нормализации
minv = np.min(data_v[col])
maxv = np.max(data_v[col])
predict_label[0] = predict_label[0]*(maxv - minv) + minv
print(f"Прогнозное значение параметра {col[0]} составляет {predict_label[0][0]}")
return netn2
#Определяем функцию для вычисления точности модели. На входе модель, а также входные параметры и целевая переменная
def evaluate(model, test_features, test_labels):
#Делаем предсказание на основе входных параметров
predictions = model.predict(test_features)
#Считаем абсолютные ошибки в предсказаниях (разность между предсказанным значением и целевым значением)
errors = abs(predictions - test_labels)
mape = 100 * np.mean(errors / test_labels)
#Определяем точность модели
accuracy = 100 - mape
print('Средняя абсолютная ошибка: {:0.4f}'.format(np.mean(errors.values)))
#print('Точность = {:0.2f}%.'.format(accuracy[0]))
return accuracy
#Определяем функцию для вычисления точности модели. На входе модель, а также входные параметры и целевая переменная
def evaluate_2(model, test_features, test_labels):
#Делаем предсказание на основе входных параметров
predictions = model.predict(test_features)
#Преобразуем к виду [[],[],...] из одномерного массива
predict = np.reshape(predictions,(predictions.size, 1))
#Считаем абсолютные ошибки в предсказаниях (разность между предсказанным значением и целевым значением)
errors = abs(predict - test_labels)
mape = 100 * np.mean(errors / test_labels)
#Определяем точность модели
accuracy = 100 - mape
print('Средняя абсолютная ошибка: {:0.4f}'.format(np.mean(errors.values)))
#print('Точность = {:0.2f}%.'.format(accuracy[0]))
return accuracy
#Определяем функцию, которая будет использоваться для заведения входных параметров. На входе список параметров, датасет
#На выходе получаем датафрейм с введенными параметрами
def InputValues(cols, maindata):
#Создаем датафрем для входных значений
input_val = pd.DataFrame()
i = 0
while i < len(cols):
a = []
line = f"Введите значение параметра ({cols[i]}): "
param_val = input(line)
param_value = float(param_val)
#Проводим нормализацию данных на начальных (входных) данных модели
minv = np.min(maindata[cols[i]])
maxv = np.max(maindata[cols[i]])
param_value = (param_value - minv)/(maxv - minv)
a.append(param_value)
input_val[cols[i]] = a
i += 1
return input_val
exitway = 'Y' #Переменная для выхода из приложения
while exitway != 'N':
os.system('cls||clear')
print('Выберите целевую переменную для прогнозирования из предложенного списка:')
colnames_in = []
#выводим список доступных параметров
i = 0
while i < len(data.columns):
print(data.columns[i])
colnames_in.append(data.columns[i])
i += 1
label = input() #вводим значение
colnames_out = []
colnames_out.append(label)
colnames_in.remove(label) #исключаем выбранный параметр из общего списка
param = 'Y' #переменная для выхода для цикла ниже
#цикл для исключения параметров из списка входных
while param != 'N':
os.system('cls||clear')
print('Выберите переменную для исключения из списка параметров, либо введите N для завершения исключения параметров:')
#выводим список доступных параметров
k = 0
while k < len(colnames_in):
print(colnames_in[k])
k += 1
param = input()
if param != 'N':
colnames_in.remove(param) #исключаем выбранный параметр из общего списка
k = 0
#Разделим параметры
#Выходные
trg = minmaxresult[colnames_out]
#Входные
trn = minmaxresult[colnames_in]
#Подготовка обучающей и тестовой выборок (соотношение 70 на 30)
Xtrn, Xtest, Ytrn, Ytest = train_test_split(trn, trg, test_size=0.3)
os.system('cls||clear')
print('Для продолжения нажмите любую клавишу, для переподбора параметров нажмите Y')
exitway = input()
if exitway != 'Y':
os.system('cls||clear')
print('Выберите прогнозную модель из списка:')
#выводим список прогнозных моделей
k = 0
while k < len(models):
print(models[k])
k += 1
model_name = input()
#информационное сообщение
print('Идет обучение модели')
if model_name == 'LinearRegression':
model = LRmodel(Xtrn, Ytrn)
os.system('cls||clear')
#Оцениваем точность на тестовом наборе
base_accuracy = evaluate(model, Xtest, Ytest)
#Цикл для многократного введения параметров и оценки выходной переменной
endparam = 'Y'
while endparam != 'END':
#Вводим и нормализуем данные, по которым будет осуществляться прогноз
imp_values = InputValues(colnames_in, data)
#Осуществляем прогноз
predict_label = model.predict(imp_values)
#Преобразуем из нормализованных данных в стандартные
col = colnames_out
#Определим параметры, которые использовались для нормализации
minv = np.min(data[col])
maxv = np.max(data[col])
#преобразуем к изначальному виду
predict_label[0] = predict_label[0]*(maxv - minv) + minv
print(f"Прогнозное значение параметра {col[0]} составляет {predict_label[0][0]}")
print("Для продолжения нажмите любую кнопку, для завершения введите END")
endparam = input()
elif model_name == 'KNeighborsRegressor':
model = KNRmodel(Xtrn, Ytrn)
os.system('cls||clear')
#Оцениваем точность на тестовом наборе
base_accuracy = evaluate(model, Xtest, Ytest)
#Цикл для многократного введения параметров и оценки выходной переменной
endparam = 'Y'
while endparam != 'END':
#Вводим и нормализуем данные, по которым будет осуществляться прогноз
imp_values = InputValues(colnames_in, data)
#Осуществляем прогноз
predict_label = model.predict(imp_values)
#Преобразуем из нормализованных данных в стандартные
col = colnames_out
#Определим параметры, которые использовались для нормализации
minv = np.min(data[col])
maxv = np.max(data[col])
#преобразуем к изначальному виду
predict_label[0] = predict_label[0]*(maxv - minv) + minv
print(f"Прогнозное значение параметра {col[0]} составляет {predict_label[0][0]}")
print("Для продолжения нажмите любую кнопку, для завершения введите END")
endparam = input()
elif model_name == 'GradientBoostingRegressor':
model = GBRmodel(Xtrn, Ytrn)
os.system('cls||clear')
#Оцениваем точность на тестовом наборе
base_accuracy = evaluate_2(model, Xtest, Ytest)
#Цикл для многократного введения параметров и оценки выходной переменной
endparam = 'Y'
while endparam != 'END':
#Вводим и нормализуем данные, по которым будет осуществляться прогноз
imp_values = InputValues(colnames_in, data)
#Осуществляем прогноз
predict_label = model.predict(imp_values)
#Преобразуем из нормализованных данных в стандартные
col = colnames_out
#Определим параметры, которые использовались для нормализации
minv = np.min(data[col])
maxv = np.max(data[col])
#преобразуем к изначальному виду
predict_label[0] = predict_label[0]*(maxv - minv) + minv
print(f"Прогнозное значение параметра {col[0]} составляет {predict_label[0]}")
print("Для продолжения нажмите любую кнопку, для завершения введите END")
endparam = input()
elif model_name == 'NeuralNetwork':
os.system('cls||clear')
model = NNmodel(Xtrn, Ytrn, Xtest, Ytest, colnames_in, colnames_out, data)
else:
os.system('cls||clear')
print('Ошибка при выборе модели, вернитесь к подбору параметров')
print('Для возвращения к выбору параметров нажмите любую клавишу. Для завершения введите N')
exitway = input()
| app.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# ### 生成数据
def gen_data():
x = np.random.randint(0,30,(20,5))
w = np.random.randint(0,15,(1,5))
b = np.random.rand()
y = np.dot(x,w.T) + b
print('w:{}'.format(w))
print('b:{}'.format(b))
return x,y
gt_x,gt_y = gen_data()
# ### 预测y
def predict(w,b,x):
pred_y = np.dot(x,w.T) + b
return pred_y
# ### loss:MSE
#这里loss和梯度都必须用pred_y-gt_y!!!!!!!否则不收敛!
#因为pred_y-gt_y = wx+b-gt_y是开口向上的,是凸函数!!!!
def loss_MSE(gt_x,gt_y,w,b):
avg_loss = 0
pred_y = predict(w,b,gt_x)
avg_loss = np.sum(pred_y - gt_y)**2/len(gt_y)
return avg_loss
# ### 梯度计算
def cal_step_gradient(gt_x,gt_y,w,b,lr):
dw = np.zeros((1,5))
db = 0
pred_y = predict(w,b,gt_x)
diff = pred_y - gt_y
dw = np.dot(diff.T,gt_x)
db = diff
w = w - lr * dw
b = b - lr * db
return w,b
# ### 训练
def train(gt_x,gt_y,lr,max_iter):
#w和b的初始值
w = np.zeros((1,5))
b = 0
for i in range(max_iter):
# print('this is:{} times'.format(i))
w,b = cal_step_gradient(gt_x,gt_y,w,b,lr)
print('loss is {}'.format(loss_MSE(gt_x,gt_y,w,b)))
print('w:{}'.format(w))
print('b:{}'.format(b))
return w,b
train(gt_x,gt_y,0.00001,800)
| week3/linear_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In This notebook we utilize the model previously created to run RUL prediction on the engines in the test data files.
# +
import pandas as pd
import numpy as np
import math
import glob
import os
from tqdm import tqdm, tqdm_notebook
# Setting seed for reproducability
np.random.seed(101)
PYTHONHASHSEED = 0
import tensorflow as tf
import keras
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, LSTM, GRU, Masking, Activation, RepeatVector
from sklearn import preprocessing
from sklearn.externals import joblib
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
from data_generator import TSDataGenerator
import data_util
from util import rmse
# -
# ## Setup
DATA_DIR = os.path.abspath("./data")
MODEL_DIR = os.path.abspath("./model/engine20190122T0916")
# +
cols = ['id', 'cycle' ]
# Three operational setting columns
setting_cols = ['setting' + str(i) for i in range(1,4)]
cols.extend(setting_cols)
# Twenty one sensor columns
sensor_cols = ['s' + str(i) for i in range(1,22)]
cols.extend(sensor_cols)
# The data already comes sorted, but to be on the safe we will
# maintain an order based on engine and cycle and ascending cycle.
sort_cols = ['id','cycle']
# -
# ### Load Model
# +
# Feature columns.
feature_cols = ['cycle_norm', 'condition']
# Label columns
label_cols = ['RUL']
# Three operational setting columns
setting_cols = ['setting' + str(i) for i in range(1,4)]
feature_cols.extend(setting_cols)
# Twenty one sensor columns
sensor_cols = ['s' + str(i) for i in range(1,22)]
feature_cols.extend(sensor_cols)
# -
# Keras requires that you use same batch size in predicting as you used when training. Here we will create the same model, but parameterize the shapes. This will allow us to specify a model with a batch size of 1.
# +
def create_model(batch_size, seq_length, num_features, num_labels):
# build the network
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(sequence_length, num_features)))
model.add(LSTM(
input_shape=(sequence_length, num_features),
units=128,
batch_input_shape=(batch_size, sequence_length, num_features),
stateful=False,
dropout=0.05,
return_sequences=False))
# model.add(RepeatVector(sequence_length))
#
# model.add(LSTM(units=128, dropout=0.0, return_sequences=False))
# model.add(LSTM(units=64, dropout=0.02, return_sequences=False))
# model.add(LSTM(units=32, return_sequences=False))
model.add(Dense(units=64, activation='relu'))
model.add(Dense(units=32, activation='relu'))
model.add(Dense(units=num_labels, activation='relu'))
return model
# +
sequence_length = 25 # Size of the series time window.
batch_size = 1 # Number of time series sequences that will predict on
num_features = len(feature_cols)
num_labels = len(label_cols)
model_path = os.path.join(MODEL_DIR,'engine_model.h5')
print("Loading model: ", model_path)
# Device to load the neural network on. /cpu:0 or /gpu:0
# Use CPU device to avoid conflict if training has the GPU
DEVICE = "/cpu:0"
#DEVICE = "/gpu:0"
with tf.device(DEVICE):
inf_model = create_model(batch_size, sequence_length, num_features, num_labels)
inf_model.load_weights(model_path)
# custom_objects={'rmse':rmse}
# inf_model = load_model(model_path, custom_objects=custom_objects)
inf_model.summary()
# -
# ### Load Pipeline
#
# +
pipeline_path = os.path.join(MODEL_DIR, 'engine_pipeline.pkl')
print("Loading pipeline: ", pipeline_path)
pipeline = joblib.load(pipeline_path)
# -
# ## Prediction
# We will iterate over all the test files to run a prediction on a single batch for each engine's containing its last sequence_length events.
# +
path = os.path.join(DATA_DIR, "test_FD*.txt")
all_test_files = sorted(glob.glob(path))
path = os.path.join(DATA_DIR, "RUL_FD*.txt")
all_rul_files = sorted(glob.glob(path))
all_test_files
# +
test_prediction_results = []
for i, test_X_path in enumerate(all_test_files):
# Read in the features
test_df = data_util.load_data([test_X_path], cols, sort_cols)
# Read in the labels (RUL)
test_rul_df = data_util.load_rul_data([all_rul_files[i]], ['id', 'RUL_actual'])
# Calculate the RUL and merge back to the test dataframe
test_df = data_util.calc_test_rul(test_df, test_rul_df)
# Transform
test_df = data_util.transform(test_df, pipeline)
results = []
for engine_id in tqdm_notebook(test_df['id'].unique()):
engine_df = test_df[ test_df['id'] == engine_id]
# The last sequence_length entries are used for the batch.
# Skip the engines with less then sequence_length for now.
# TODO support padding
if engine_df.shape[0] > sequence_length:
last_batch = engine_df[feature_cols].iloc[-sequence_length:].values.reshape(batch_size, sequence_length, num_features)
rul = engine_df['RUL'].iloc[-1:].values[0]
prediction = inf_model.predict_on_batch(last_batch)[0,0]
results.append([engine_id, prediction, rul, prediction-rul])
test_prediction_results.append(np.array(results))
# -
# Plot the results by engine id. Those predictions above zero have over-estimated the actual RUL, while those below zero under-estimate the RUL. It is preferable to be less optimistic.
# +
sns.set_style('darkgrid')
fig = plt.figure(figsize=(20, 4))
for i, r in enumerate(test_prediction_results):
ax = plt.subplot(1, 4, i+1)
# Subtract the base to show the original engine id
ax.scatter(r[:,0] - data_util.path_to_id_base(all_test_files[i]), r[:,3], marker='.',)
ax.set_ylim(-100, 140)
ax.set_xlabel("Engine Id")
ax.set_ylabel("Prediction - RUL")
ax.set_title(os.path.basename(all_test_files[i]))
ax.axhline(y=0, color='r', marker='.')
plt.tight_layout()
plt.show()
# -
# The histogram illustrates the bins that the differences fall in.
# +
sns.set_style('darkgrid')
fig = plt.figure(figsize=(20, 6))
for i, r in enumerate(test_prediction_results):
ax2 = plt.subplot(2, 4, i+5)
sns.distplot(r[:,3], ax=ax2)
ax2.set_title(os.path.basename(all_test_files[i]))
plt.tight_layout()
plt.show()
# -
# Let's plot a single file for a close-up view.
file_index = 1 # 1 is test_FD002
results = test_prediction_results[file_index]
base_id = data_util.path_to_id_base(all_test_files[file_index])
# +
fig = plt.figure(figsize=(10, 4))
plt.scatter(results[:,0] - base_id, results[:,3], marker='.', label="Diff")
plt.xlabel("Engine Id")
plt.ylabel("Prediction - RUL")
plt.title(os.path.basename(all_test_files[file_index]))
plt.axhline(y=0, color='r', marker='.')
plt.show()
# -
# This is another view based on the RUL. The red line indicates what perfect prediction would have looked like. The majority of the predictions were optimistic.
# +
fig = plt.figure(figsize=(6, 6))
plt.scatter(results[:,2], results[:,1], marker='.')
plt.plot(results[:,2], results[:,2], 'r')
plt.xlabel("RUL actual")
plt.ylabel("RUL predicted")
plt.title(os.path.basename(all_test_files[file_index]))
plt.show()
# -
# Show the prediction of 3 randomly selected engines from each test file.
# +
test_prediction_results = []
for i, test_X_path in enumerate(all_test_files):
# Read in the features
test_df = data_util.load_data([test_X_path], cols, sort_cols)
# Read in the labels (RUL)
test_rul_df = data_util.load_rul_data([all_rul_files[i]], ['id', 'RUL_actual'])
# Calculate the RUL and merge back to the test dataframe
test_df = data_util.calc_test_rul(test_df, test_rul_df)
# Transform
test_df = data_util.transform(test_df, pipeline)
results = []
# Filter out those two small to fit a complete sequence. See padding
candidates = pd.DataFrame(test_df.groupby('id')['cycle'].max()).reset_index()
candidates = candidates[ candidates['cycle'] >= sequence_length ]
engine_ids = np.random.choice(candidates['id'], 3)
for engine_id in engine_ids:
engine_df = test_df[ test_df['id'] == engine_id]
start_idx = 0
end_idx = start_idx + sequence_length
while end_idx < engine_df.shape[0]:
last_batch = engine_df[feature_cols].iloc[start_idx:end_idx].values.reshape(batch_size, sequence_length, num_features)
rul = engine_df['RUL'].iloc[end_idx]
cycle = engine_df['cycle'].iloc[end_idx]
prediction = inf_model.predict_on_batch(last_batch)[0,0]
results.append([engine_id, cycle, prediction, rul])
start_idx += 1
end_idx += 1
test_prediction_results.append(np.array(results))
results = []
# +
def display_engine_prediction(prediction_results):
""" Plots prediction results for a set of engines.
Args:
prediction_results (list) - List of engine arrays of shape (cycle, 4). Where
the columns represent engine_id, cycle, prediction, RUL.
"""
# Total plots
n = len(prediction_results)
ncols = max(n//3,1)
nrows = math.ceil(n/ncols)
fig = plt.figure(figsize=(20, 10))
for i, r in enumerate(prediction_results):
engine_id = r[0,0]
cycle = r[:,1]
prediction = r[:,2]
rul = r[:,3]
ax = plt.subplot(nrows, ncols, i+1)
l1, = ax.plot( cycle, rul, label='RUL')
l2, = ax.plot( cycle, prediction, label='Prediction')
ax.set_xlabel('cycle')
ax.set_ylabel('RUL')
ax.set_ylim(0)
plt.title("Engine: {:.0f}".format(engine_id))
fig.legend((l1,l2), ('Actual', 'Prediction'), 'upper right')
plt.tight_layout()
plt.show()
# -
display_engine_prediction(test_prediction_results)
# +
test_prediction_results = []
for i, tmp_X_path in enumerate(all_test_files):
# Read in the features
tmp_df = data_util.load_data([tmp_X_path], cols, sort_cols)
# Read in the labels (RUL)
tmp_rul_df = data_util.load_rul_data([all_rul_files[i]], ['id', 'RUL_actual'])
# Calculate the RUL and merge back to the test dataframe
tmp_df = data_util.calc_test_rul(tmp_df, tmp_rul_df)
# Transform
tmp_df = data_util.transform(tmp_df, pipeline)
results = []
for engine_id in tqdm_notebook(tmp_df['id'].unique()):
engine_df = tmp_df[ tmp_df['id'] == engine_id]
# The last sequence_length entries are used for the batch.
# Skip the engines with less then sequence_length for now.
# TODO support padding
if engine_df.shape[0] >= sequence_length:
last_batch = engine_df[feature_cols].iloc[-sequence_length:].values.reshape(batch_size, sequence_length, num_features)
else:
gap = engine_df.shape[0] % sequence_length
num_pad_rows = sequence_length - gap
pad_df = pd.DataFrame(np.zeros((num_pad_rows, num_features)),
columns=feature_cols)
last_batch = pd.concat([pad_df, engine_df[feature_cols]]).values.reshape(batch_size, sequence_length, num_features)
padded_batch = last_batch
rul = engine_df['RUL'].iloc[-1:].values[0]
prediction = inf_model.predict_on_batch(last_batch)[0,0]
results.append([engine_id, prediction, rul, prediction-rul])
assert (last_batch.shape[1] % sequence_length) == 0
test_prediction_results.append(np.array(results))
# -
| model_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Janani-harshu/Machine_Learning_Projects/blob/main/Covid19_death_prediction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="3dyBMQQq8RHo"
# Covid-19 is one of the deadliest viruses you’ve ever heard. Mutations in covid-19 make it either more deadly or more infectious. We have seen a lot of deaths from covid-19 while there is a higher wave of cases. We can use historical data on covid-19 cases and deaths to predict the number of deaths in the future.
# In this notebook, I will take you through the task of Covid-19 deaths prediction with machine learning using Python.
# + [markdown] id="iDTK8rqw8aHF"
# ## Covid-19 Deaths Prediction (Case Study)
# You are given a dataset of Covid-19 in India from 30 January 2020 to 18 January 2022. The dataset contains information about the daily confirmed cases and deaths. Below are all the columns of the dataset:
#
# Date: Contains the date of the record
#
# Date_YMD: Contains date in Year-Month-Day Format
#
# Daily Confirmed: Contains the daily confirmed cases of Covid-19
#
# Daily Deceased: Contains the daily deaths due to Covid-19
#
# You need to use this historical data of covid-19 cases and deaths to predict the number of deaths for the next 30 days
# + id="IpO0VAy74-gn"
# Importing the libraries
import pandas as pd
import numpy as np
# + colab={"base_uri": "https://localhost:8080/"} id="OQpQgFFI5NGY" outputId="39218ea3-8544-47cd-dbdb-6c66edf1535a"
data = pd.read_csv("COVID19 data for overall INDIA.csv")
print(data.head())
# + colab={"base_uri": "https://localhost:8080/"} id="BhuGsJPu5SeQ" outputId="e817e25c-e3a3-4347-db8f-a4b51a9f9c0d"
data.isnull().sum()
# + id="d_Cxc8LK5Sav"
data = data.drop("Date", axis=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="20keXH175SYj" outputId="9b89bcfc-bb65-4831-df95-e91523b82ee5"
# Daily confirmed cases of Covid-19
import plotly.express as px
fig = px.box(data, x='Date_YMD', y='Daily Confirmed')
fig.show()
# + [markdown] id="OHdffxl_8t9m"
# ## Covid-19 Death Rate Analysis
# Now let’s visualize the death rate due to Covid-19:
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="e6fXjawh5SWM" outputId="6efb92a5-2bae-4341-b86e-4d01fffb1ec9"
cases = data["Daily Confirmed"].sum()
deceased = data["Daily Deceased"].sum()
labels = ["Confirmed", "Deceased"]
values = [cases, deceased]
fig = px.funnel_area(data, values=values,
names=labels,
title='Daily Confirmed Cases vs Daily Deaths')
fig.show()
# + colab={"base_uri": "https://localhost:8080/"} id="2Aqd9cMx5SUW" outputId="8abf717f-27d0-4b55-a655-c4c5263a8546"
# calculate the death rate of Covid-19:
death_rate = (data["Daily Deceased"].sum() / data["Daily Confirmed"].sum()) * 100
print(death_rate)
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="xyAXifTb5SR2" outputId="5e3979ab-572f-4ca3-e76c-2c5f6c560584"
# daily deaths of covid-19:
import plotly.express as px
fig = px.box(data, x='Date_YMD', y='Daily Deceased')
fig.show()
# + id="ZtPhm56x7p55"
# !pip install AutoTS
# + [markdown] id="PedOfSgS89Jn"
# ## Covid-19 Deaths Prediction Model
# Now let’s move to the task of covid-19 deaths prediction for the next 30 days. Here I will be using the AutoTS library, which is one of the best Automatic Machine Learning libraries for Time Series Analysis.
# + colab={"base_uri": "https://localhost:8080/"} id="ld-je6305SPj" outputId="deef3802-34fd-4b87-da82-e8b850f095a9"
from autots import AutoTS
model = AutoTS(forecast_length=30, frequency='infer', ensemble='simple')
model = model.fit(data, date_col="Date_YMD", value_col='Daily Deceased', id_col=None)
# + colab={"base_uri": "https://localhost:8080/"} id="zyjh9y8T5SFP" outputId="e690c3ed-8699-41d4-87f0-3586d8942b9c"
# Predict covid-19 deaths with machine learning for the next 30 days:
prediction = model.predict()
forecast = prediction.forecast
print(forecast)
# + [markdown] id="Q2tWeVXA9cll"
# So this is how we can predict covid-19 deaths with machine learning using the Python programming language. We can use the historical data of covid-19 cases and deaths to predict the number of deaths in future. You can implement the same method for predicting covid-19 deaths and waves on the latest dataset
| Covid19_death_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''py39-nlp'': conda)'
# language: python
# name: python3
# ---
# # Python - Object Oriented
# Source: https://www.tutorialspoint.com/python/python_classes_objects.htm
#
# #### Built-In Class Attributes
# +
class Employee:
"Common base class for all employees"
empCount = 0
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % Employee.empCount)
def displayEmployee(self):
print("Name : ", self.name, ", Salary: ", self.salary)
print("Employee.__doc__:", Employee.__doc__)
print("Employee.__name__:", Employee.__name__)
print("Employee.__module__:", Employee.__module__)
print("Employee.__bases__:", Employee.__bases__)
print("Employee.__dict__:", Employee.__dict__)
# -
# #### Class Inheritance
# +
class Parent: # define parent class
parentAttr = 100
def __init__(self):
print("Calling parent constructor")
def parentMethod(self):
print("Calling parent method")
def setAttr(self, attr):
Parent.parentAttr = attr
def getAttr(self):
print("Parent attribute :", Parent.parentAttr)
class Child(Parent): # define child class
def __init__(self):
print("Calling child constructor")
def childMethod(self):
print("Calling child method")
c = Child() # instance of child
c.childMethod() # child calls its method
c.parentMethod() # calls parent's method
c.setAttr(200) # again call parent's method
c.getAttr() # again call parent's method
# -
# #### Overriding Methods
# +
class Parent: # define parent class
def myMethod(self):
print("Calling parent method")
class Child(Parent): # define child class
def myMethod(self):
print("Calling child method")
c = Child() # instance of child
c.myMethod() # child calls overridden method
# -
# #### Overloading Operators
# +
class Vector:
def __init__(self, a, b):
self.a = a
self.b = b
def __str__(self):
return "Vector (%d, %d)" % (self.a, self.b)
def __add__(self, other):
return Vector(self.a + other.a, self.b + other.b)
v1 = Vector(2, 10)
v2 = Vector(5, -2)
print(v1 + v2)
# -
# #### Data Hiding
# +
class JustCounter:
__secretCount = 0
def count(self):
self.__secretCount += 1
print(self.__secretCount)
counter = JustCounter()
counter.count()
counter.count()
print(counter._JustCounter__secretCount)
# -
# #### Python Accessing Attributes
# +
class Employee:
# Common base class for all employees'
empCount = 0
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % Employee.empCount)
def displayEmployee(self):
print("Name : ", self.name, ", Salary: ", self.salary)
# "This would create first object of Employee class")
emp1 = Employee("Zara", 2000)
# "This would create second object of Employee class")
emp2 = Employee("Manni", 5000)
emp1.displayEmployee()
emp2.displayEmployee()
print("Total Employee %d" % Employee.empCount)
| novice/01-04/01-04_Latihan-Python - Object Oriented.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from astropy.io import fits
from astropy.wcs import WCS
from astropy.nddata import Cutout2D
from astropy.coordinates import SkyCoord
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from tqdm.notebook import tqdm
from joblib import Parallel, delayed
# -
with open('all.tsv') as f:
object_names = [{
'name': line[:-1].split('\t')[0],
'glon': np.float64(line[:-1].split('\t')[1]),
'glat': np.float64(line[:-1].split('\t')[2])
} for line in f]
object_names[:3]
size = (192, 192)
# +
ROOT = '../data/resaved_HPXcvt'
def perform_object(glon, glat):
hdu_100 = fits.open(f'{ROOT}/Planck100.fits')[0]
wcs_100 = WCS(hdu_100.header)
hdu_143 = fits.open(f'{ROOT}/Planck143.fits')[0]
wcs_143 = WCS(hdu_143.header)
hdu_217 = fits.open(f'{ROOT}/Planck217.fits')[0]
wcs_217 = WCS(hdu_217.header)
hdu_353 = fits.open(f'{ROOT}/Planck353.fits')[0]
wcs_353 = WCS(hdu_353.header)
hdu_545 = fits.open(f'{ROOT}/Planck545.fits')[0]
wcs_545 = WCS(hdu_545.header)
datasets = [
(hdu_100, wcs_100),
(hdu_143, wcs_143),
(hdu_217, wcs_217),
(hdu_353, wcs_353),
(hdu_545, wcs_545)
]
obj = np.zeros((5, size[0], size[1]))
for freq_idx, (hdu, wcs) in enumerate(datasets):
position = SkyCoord(glon, glat, frame='galactic', unit='deg')
cutout = Cutout2D(hdu.data, position, size, wcs=wcs)
data = np.array(cutout.data)
if data.shape != size or np.isnan(data).any():
return None
obj[freq_idx] = data.astype(np.float32)
return obj
results = []
while len(results) < 1564:# * 5:
glon, glat = np.random.uniform(0, 360), np.random.uniform(-90, 90)
obj = None
# try:
obj = perform_object(glon, glat)
# except Exception as e:
# print(e)
if obj is not None and obj.shape == (5, size[0], size[1]):
results.append((obj, None))
if len(results) % 100 == 0:
print(len(results))
# -
isnone = 0
for i in results:
if i is None:
isnone += 1
print(isnone)
sz_name = np.array([x[1] for x in results if x is not None])
sz_data = np.stack([x[0] for x in results if x is not None])
np.savez_compressed('wo_sz.npz', sz_data=sz_data, sz_names=sz_name)
sz_name.shape
| rebuild_dataset/crop_resaved-wo_sz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# # Distributed Neural Network Compression using Reinforcement Learning
# ------------------------------------------------
# ## Introduction
# In this notebook, we demonstrate how to compress a neural network (Resnet-18) using reinforcement learning. The work in this notebook is based on [1], even though heavily adapted to work with Amazon SageMaker RL. The following are the key highlights of AWS SageMaker RL demonstrated in this notebook.
# 1. A custom environment for neural network compression.
# 2. Usage of the Ray container in SageMaker with distributed training.
# 3. Using tensorflow within the environment in the container.
# 4. Network compression through RL.
#
# [1] [<NAME>, <NAME>, <NAME>, and <NAME>. "N2N learning: network to network compression via policy gradient reinforcement learning." arXiv preprint arXiv:1709.06030 (2017)](https://arxiv.org/abs/1709.06030).
#
# The RL problem here can be defined as follows:
#
# **Objective:** Search and find the smallest possible network architecture from a pre-trained network architecture, while producing the best accuracy possible.
#
# **Environment:** A custom developed environment that accepts a Boolean array of layers to remove from the RL agent and produces an observation that is some description of every layer in the network. This environment is sub-classed from OpenAI Gym's environment. It can be found in the [environment file](./src/environment.py).
#
# **State:** For every layer in the network there is a $1 \times 8$ array of floats. In Resnet-18, there are 40 removable layers.
#
# **Action:** A boolean array one for each layer. ```False``` implies don't remove the layer and ```True``` implies remove the layer.
#
# **Reward:** Consider, $C = 1 - \frac{M_s}{M}$, where $C$ is the compression ratio, $M_s$ is the number of parameters in a network that the RL agent explores, $M$ is the number of parameters in the master network to be compressed. The reward is $r = \frac{CA_s}{(2-C)A}$, where $A_s$ is the accuracy of the network that the RL agent explores and $A$ is the accuracy of the master network. If the explored network can't even train or is out-of-memory, the reward is $r = -1$.
#
# ## Attribution
#
# 1. Cifar10 Dataset: We use the cifar10 dataset in this notebook [2] to conduct our experiments.
# 2. We rely on the open-source codebase from [tensorflow/models repository](https://github.com/tensorflow/models), released under Apache 2.0 to build the backend resnet models. Please refer to the [license](https://github.com/tensorflow/models/blob/master/LICENSE) of that repository.
#
# [2] [Learning Multiple Layers of Features from Tiny Images, <NAME>, 2009.](https://www.cs.toronto.edu/~kriz/cifar.html)
#
# ## Pre-requisites
#
# ### Roles and permissions
# To get started, we'll import the sagemaker python library and setup the permissions and IAM role.
#
from time import gmtime, strftime
import sagemaker
role = sagemaker.get_execution_role()
# ### Auxiliary services and settings
#
# To run this notebook, we require the use of AWS services all of which are accessible right from the sagemaker library using the role that we just created. For instance, we need an S3 bucket where we need to store our output models, which can be created as follows:
sage_session = sagemaker.session.Session()
s3_bucket = sage_session.default_bucket()
s3_output_path = 's3://{}/'.format(s3_bucket)
print("S3 bucket path: {}".format(s3_output_path))
# For logs on cloudwatch or tracking the job on sagemaker console, we need a job_name. Let us create a prefix
job_name_prefix = 'rl-nnc'
# Running the RL containers in sagemaker produces logs on cloudwatch. It is tedious to migrate to cloudwatch just to monitor the algorithm logs. Let us therefore create some metric crawlers using simple regex that will help us bring the detail we need here. Since we are using the Ray container image, the following regex definitions will work.
# +
float_regex = "[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?"
metric_definitions = [
{'Name': 'episode_reward_mean',
'Regex': 'episode_reward_mean: (%s)' % float_regex},
{'Name': 'episode_reward_max',
'Regex': 'episode_reward_max: (%s)' % float_regex},
{'Name': 'episode_reward_min',
'Regex': 'episode_reward_min: (%s)' % float_regex},
]
# -
# ## The gamification of neural network compression
#
# We now need an environment for our RL agent to work on. This environment has the following behavior. It accepts from our RL agent, a list of layers to remove from the master network. Once it received its list, it will create a network with the removed layers. It will then use the master network's original weights to initialize the smaller network. Once initialized, the environment will train the small network with both a cross-entropy loss and a distillation loss from the master network as described in [2]. It will then output the reward.
#
# [2] [<NAME>., <NAME>. and <NAME>., 2015. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531.](https://arxiv.org/abs/1503.02531)
#
# ### A custom gym environment
# To construct and formalize this world, we use the gym environment's formulations. The environment itself is described in the [environment.py](./src/environment.py) file. The environment implements a constructor that sets it up, a `step` method that accepts actions and produces reward, and other functions that describe how the environment behaves. This is consistent with OpenAI Gym interfaces for defining an environment. Let us briefly look at the environment definition below.
# !pygmentize ./src/environment.py
# Of prominent notice in this file is the `NetworkCompression` class described in the [network_compression.py](./src/tensorflow_resnet/network_compression.py). This file contains all of the tensorflow implementation of ResNet-18, its training, distillation and others that are abstracted away from the environment. By changing the definition here, other networks can be implemented as well without altering the environment file.
#
# ### Setup data and upload to S3
#
# The next step is to download the dataset and have it uploaded to S3. We use some helper codes from [tensorflow's model](https://github.com/tensorflow/models) repository to download and setup the [Cifar10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html). The cifar10 dataset contains 50,000 training images and 10,000 validation images each $32 \times 32$ in RGB. Running the cell below will download the data into `cifar10_data` directory and upload to S3.
# %%time
# !python src/tensorflow_resnet/dataset/cifar10_download_and_extract.py
cifar_inputs = sage_session.upload_data(path='cifar10_data', key_prefix='cifar10_data')
# ### Prepare teacher weights
#
# A teacher network is used to train the child network using distillation loss. The code uses a pickle file dumped from the checkpoint for loading teacher weights and already has a pickle file for cifar10 dataset in the teacher directory
_ = sage_session.upload_data(path='teacher', key_prefix='cifar10_data')
# ### The RL agent
#
# For an RL agent we use the [asynchronous advantage actor-critic (A3C)](https://arxiv.org/abs/1602.01783) agent from the [Ray toolkit](https://ray.readthedocs.io/en/latest/example-a3c.html). We run training with 5 rollouts (architectures searched). We train the agent for 20 iterations in a GPU machine. The GPUs are also used to train the network in the environment. The A3C definitions and parameters of training can be found in the launcher file. We can also find the code that will register the custom environment that we have created below.
# !pygmentize ./src/train-ray.py
# ### Training
# Now that everything is setup, we can run our training job. For the training, we can use `sagemaker.rl.RLEstimator`. This class is a simple API that will take all our parameters and create the sagemker job for us. The following cell will do this. Refer the cell for description of each parameter.
# +
from sagemaker.rl import RLEstimator, RLToolkit, RLFramework
estimator = RLEstimator(entry_point="train-ray.py", # Our launcher code
source_dir='src', # Directory where the supporting files are at. All of this will be
# copied into the container.
dependencies=["common/sagemaker_rl"], # some other utils files.
toolkit=RLToolkit.RAY, # We want to run using the Ray toolkit against the ray container image.
framework=RLFramework.TENSORFLOW, # The code is in tensorflow backend.
toolkit_version='0.5.3', # Toolkit version. This will also choose an apporpriate tf version.
role=role, # The IAM role that we created at the begining.
train_instance_type="ml.p3.2xlarge", # Since we want to run fast, lets run on GPUs.
train_instance_count=2, # Single instance will also work, but running distributed makes things
# fast, particularly in the case of multiple rollout training.
output_path=s3_output_path, # The path where we can expect our trained model.
base_job_name=job_name_prefix, # This is the name we setup above to be to track our job.
hyperparameters = { # Some hyperparameters for Ray toolkit to operate.
"s3_bucket": s3_bucket,
"rl.training.stop.training_iteration": 1, # Number of iterations.
"rl.training.checkpoint_freq": 1,
},
metric_definitions=metric_definitions, # This will bring all the logs out into the notebook.
)
# -
# Now that the training job is setup, all that is needed is to run the `fit` call with the appropriate input buckets. The training should take about 25 mins to complete.
estimator.fit(cifar_inputs)
# ## Process Outputs
# Now that the training is complete, we can look at the best compressed network architecture were found during training. The list of networks with their accuracies and other metrics are stored in the output S3 bucket. This can be downloaded from S3. The file is named as `output.tar.gz` and is at the same location as the model file `model.tar.gz`. Let us download and extract this output directory. But before that, we need to clean any files leftover from previous runs, if any.
# !rm *_metrics.txt
# +
model_data = estimator.model_data
print('Model data path: ', model_data)
output_data = model_data.replace('model', 'output')
print('Output data path: ', output_data)
#Download the output file and extract.
# !aws s3 cp {output_data} .
# !tar xvfz output.tar.gz
# -
# Since the training runs across multiple workers, each worker stores the best model that it generates in it's own file. We will consolidate the files from all the workers to get the top networks from the training job.
metrics_file_name = 'consolidated_metrics.csv'
# !cat *_metrics.txt > {metrics_file_name}
import pandas as pd
df = pd.read_csv(metrics_file_name, sep=',', names = ["reward", "x-factor", "accuracy", "dir"])
df = df.sort_values('reward')
print(df.tail(10).to_string(index=False))
# The code above prints the best networks that were found during training and these are printed in the ascending order of reward. `x-factor` is how much compression has been performed and `accuracy` is the accuracy of the compressed model (trained only for 1 epoch). The `dir` is the directory where the compressed model is stored. This is in comparison with the master accuracy of `0.81`. While the best models produced here are trained, it always gives a performance boost when fine-tuned. We only train the network for a few epochs during reward calculation and hence the accuracy of the network can further be improved by fine-tuning. This can be done by using the checkpoint of the best network and fine-tuning it further for more epochs. While we only ran `1` iteration for the sake of demonstration, running more iterations will provide better results. For instance, by running for `1500` timesteps, we were able to achieve `5.7x` compression with `0.71` accuracy, which when fine-tuned further gave an accuracy of `.80`.
| reinforcement_learning/rl_network_compression_ray_custom/rl_network_compression_ray_custom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#The goal is to do a search on Amazon and download the images from all the pages
import requests
from bs4 import *
link = input('Direction: ')
req = requests.get(link)
soup = BeautifulSoup(req.text, 'lxml')
# I get the last page that appears on the web
pagination = soup.find_all('span', {'class': 'pagnDisabled'})
#I take out the text and add 1 (as it is going to be used in an array, I also need to reach the last page)
last_pagination = int(pagination[0].get_text()) + 1
urls = []
#The urls that are going to be opened are created
for x in range (1, last_pagination):
urls.append(link + '&page=' + str(x))
# open all the URLs and export the images that I find
k = 1
pag = 1
for x in urls:
print('Pagination : ' + str(pag))
req = requests.get(x)
soup = BeautifulSoup(req.text, 'lxml')
imgs = soup.find_all('img')
for i in imgs:
if str(i).find('src') != -1:
url = i['src']
name_image_folder = 'images/' + str(k) + '.jpg'
image = requests.get(url).content
with open(name_image_folder, 'wb') as handler:
handler.write(image)
k += 1
pag += 1
# -
from IPython.display import Image
from IPython.core.display import HTML
Image(url= 'images/web.png')
Image(url= 'images/pagination.png')
Image(url= 'images/dowloads.png')
import glob
Numero=len(glob.glob("images/*.jpg"))
print('Files inside the folder: ' + str(Numero))
| .ipynb_checkpoints/Download images of an Amazon search-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <h1 id="Metode-NumerikPendahuluan-Analisis-Error-" style="text-align: center;"><strong>Metode Numerik</strong><br /><strong><span style="color: blue;">EigenValue dan EigenVektor Numerik</span></strong><br /><img style="" src="images/MetNum.png" alt="" /></h1>
# <h2 id="(C)-Taufik-Sutanto---2019" style="text-align: center;">(C) <NAME> - 2020</h2>
# <h2 id="tau-data-Indonesia-~-https://tau-data.id" style="text-align: center;">tau-data Indonesia ~ <a href="https://tau-data.id" target="_blank"><span style="color: #0009ff;">https://tau-data.id/mfds-nm-05/</span></a></h2>
# + [markdown] slideshow={"slide_type": "slide"}
# # Eigenvalue (Eigenvector) Numerik:
#
# * Pendahuluan Eigenvalue dan eigen vektor
# * Aplikasi & Intuisi
# * Karakteristik Polinomial
# * Power Method
# * Rayleigh Quotient
# * Deflated Matrix Method
# * Jacobi Method
# -
# <p><img alt="" src="images/intro_eigen.png" style="width: 800px; height: 404px;" /></p>
#
# <p><img alt="" src="images/eigen_apps.png" style="width: 800px; height: 382px;" /></p>
#
# <p><img alt="" src="images/eigen_intuition.png" style="width: 800px; height: 316px;" /></p>
#
# # Karektiristik Polynomial ~ Permasalahan Mencari Akar
#
# <p><img alt="" src="images/eigen_characteristic_polymoial.png" style="width: 800px; height: 481px;" /></p>
#
# <h1 id="Keterbatasan">Keterbatasan - Companion Matrix</h1>
#
# <p><img alt="" src="images/eigen_companion_matrix.png" style="width: 800px; height: 544px;" /></p>
#
# # Contoh lain permasalahan numerik menggunakan AKP
#
# <p><img alt="" src="images/eigen_numeric_problem.png" style="width: 800px; height: 518px;" /></p>
#
# +
# Python implementation
import numpy as np
A = np.array([[ 1, 1 ],
[ 0, 2 ]])
A
# -
# Operasi Matrix
print('A \n',A)
print('A*A \n',A.dot(A))
print('A+A \n',A+A)
print("A' \n",A.transpose())
# +
# Eigenvalue dan eigenvector
A = np.array([[ 3, -1 ],[ -1, 3 ]])
print(A)
eVa, eVe = np.linalg.eig(A)
print('Eigenvalues \n',eVa)
print('Eigenvectors \n',eVe)
# -
A = [[1,2,3],[0,4,5],[0,0,6]]
A = np.array(A)
eVa, eVe = np.linalg.eig(A)
print('Eigenvalues \n',eVa)
print('Eigenvectors \n',eVe)
eVe[1]/eVe[1].max()
# # Latihan 1:
#
# * Diberikan matrix sbb:
# $$ \begin{bmatrix}
# -2 & -3 \\
# 6 & 7
# \end{bmatrix} $$
# * Tentukan **Error Relatif** aproksimasi eigenvalue terkait matrix tersebut
# * jika persamaan karakteristik polinomial-nya diselesaikan dengan metode Newton
# * **tiga iterasi** ($\lambda_3$) dengan $\lambda_0 = 1$
def F(x):
return x**2-5*x+4
def f(x):
return 2*x-5
# Newton (L3, Lo=1)
Lo =1
L1 = Lo - F(Lo)/f(Lo)
L2 = L1 - F(L1)/f(L1)
L3 = L2 - F(L2)/f(L2)
L3
eksak = 1
abs(L3-eksak)/abs(eksak) # Error_Relatif
# <p><img alt="" src="images/eigen_Pwr_Method_1.png" /></p>
#
# <p><img alt="" src="images/eigen_Pwr_Method_Alg.png" style="width: 680px; height: 240px;" /></p>
#
# <h1 id="Power-Method---Contoh">Power Method - Contoh</h1>
#
# <p><img alt="" src="images/eigen_Pwr_Method_eg.png" style="width: 800px; height: 528px;" /></p>
#
# <p><img alt="" src="images/eigen_Pwr_Method_inv.png" style="width: 736px; height: 183px;" /></p>
#
# Contoh
A = np.array([[ 2, 9 ],[ 9, 1 ]])
x = np.array([1,1]).transpose()
N = 5
for i in range(N):
xo = x
x = A.dot(x)
x1 = x
eigen = max(abs(x1))/max(abs(xo))
print('Eigenvalue = ', eigen)
print('Eigenvector = ', x)
eVa, eVe = np.linalg.eig(A)
print('Eigenvalues \n',eVa)
print('Eigenvectors \n',eVe)
# Seringnya Power method menggunakan normalisasi vector di setiap langkahnya
x = np.array([1,1]).transpose()
N = 6
for i in range(N):
x = x/x.max()
xo = x
x = A.dot(x)
x1 = x
eigen = max(abs(x1))/max(abs(xo))
print('Eigenvalue = ', eigen)
print('Eigenvector = ', x/x.max())
# # Latihan 2:
#
# * Diberikan matrix sbb:
# $$ \begin{bmatrix}
# 1 & 1 \\
# 4 & 1
# \end{bmatrix} $$
# * Tentukan **Error Relatif** aproksimasi eigenvalue
# * terkait matrix tersebut jika eigenvalue-nya didekati
# * dengan metode **Rayleigh** power Method **dua iterasi** ($\lambda_2$) dengan $x_0 = [1,1]'$
# Contoh
A = np.array([[ 1, 1 ],[ 4, 1 ]])
x = np.array([1,1]).transpose()
N = 4
for i in range(N):
xo = x
x = A.dot(x)
x1 = x
eigen = max(abs(x1))/max(abs(xo))
print('Eigenvalue = ', eigen)
print('Error Relatif = ', abs(3-eigen)/3)
# <h1 id="Rayleigh-Quotient">Rayleigh Quotient</h1>
#
# <ul>
# <li>Secara umum Power method Lambat, methode Rayleigh mempercepat iterasi Power Method</li>
# <li><img alt="" src="images/eigen_rayleigh_Quotient_1.png" style="width: 609px; height: 296px;" /></li>
# </ul>
#
# +
# Contoh (Sebelumnya)
import numpy as np
A = np.array([[ 1, 0, 2 ],[ 0, 1, 1 ], [2, 1, 2]])
A = np.linalg.inv(A)
x = np.array([1,1, 1]).transpose()
N = 2
eksak = -1
for i in range(N):
xo = x
x = A.dot(x)
x1 = x
eigen = max(abs(x1))/max(abs(xo))
eigenRayleigh = x.transpose().dot(A).dot(x)/x.transpose().dot(x)
print('EigenValue Menurut metode Rayleigh 2 iterasi =', eigenRayleigh)
print('EA Eigenvalue (Power Method)= ', abs(eigen-eksak))
print('EA Eigenvalue (Rayleigh)= ', abs(eigenRayleigh-eksak))
print('Eigenvector = ', x)
# + [markdown] slideshow={"slide_type": "slide"}
# <h1 id="End-of-Week-13">End of Module</h1>
#
# <hr />
# ...
| mfds-nm-05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
# %load_ext zipline
# +
# %%zipline --start=2011-1-1 --end=2013-1-1
from zipline.api import order, record, symbol
import matplotlib.pyplot as plt
def initialize(context):
pass
def handle_data(context, data):
order(symbol('AAPL'), 10)
record(AAPL=data[symbol('AAPL')].price)
def analyze(context, perf):
ax1 = plt.subplot(211)
perf.portfolio_value.plot(ax=ax1)
ax2 = plt.subplot(212, sharex=ax1)
perf.AAPL.plot(ax=ax2)
plt.gcf().set_size_inches(18, 8)
plt.show()
# -
| zipline/examples/buyapple.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''.venv'': venv)'
# name: python3
# ---
# # Test Coffea
#
# This will test Coffea to see if we can figure out how to use it with our code.
#
# First are the includes from coffea. This is based on the [example written by Ben](https://github.com/CoffeaTeam/coffea/blob/master/binder/servicex/ATLAS/LocalExample.ipynb).
# +
from servicex import ServiceXDataset
from coffea.processor.servicex import DataSource, FuncAdlDataset, Analysis
from coffea.processor.servicex import LocalExecutor
import matplotlib.pyplot as plt
from coffea import hist, processor
from IPython.display import display, update_display, HTML
# -
# And imports connected with running servicex.
# +
from func_adl import ObjectStream
from func_adl_servicex import ServiceXSourceUpROOT
from hist import Hist
import mplhep as mpl
import awkward as ak
from utils import files
# -
# Methods copied to help us get all leptons from the source files
def apply_event_cuts (source: ObjectStream) -> ObjectStream:
'''Event level cuts for the analysis. Keep from sending data that we aren't going to need at all in the end.
'''
return (source
.Where(lambda e: e.trigE or e.trigM))
def good_leptons(source: ObjectStream) -> ObjectStream:
'''Select out all good leptons from each event. Return their pt, eta, phi, and E, and other
things needed downstream.
Because uproot doesn't tie toegher the objects, we can't do any cuts at this point.
'''
return source.Select(lambda e:
{
'lep_pt': e.lep_pt,
'lep_eta': e.lep_eta,
'lep_phi': e.lep_phi,
'lep_energy': e.lep_E,
'lep_charge': e.lep_charge,
'lep_ptcone30': e.lep_ptcone30,
'lep_etcone20': e.lep_etcone20,
'lep_type': e.lep_type,
'lep_trackd0pvunbiased': e.lep_trackd0pvunbiased,
'lep_tracksigd0pvunbiased': e.lep_tracksigd0pvunbiased,
'lep_z0': e.lep_z0,
}) \
.AsParquetFiles('junk.parquet')
# Create the `func_adl` cuts to get the data. The dataset we use here doesn't matter, as long as it "looks" like all the datasets we are going to be processing.
ds = ServiceXSourceUpROOT('cernopendata://dummy', files['ggH125_ZZ4lep']['treename'], backend='open_uproot')
ds.return_qastle = True
leptons = good_leptons(apply_event_cuts(ds))
# The analysis code that will apply the 4 lepton cuts and make the 4 lepton mass plot.
class ATLAS_Higgs_4L(Analysis):
@staticmethod
def process(events):
import awkward as ak
from collections import defaultdict
sumw = defaultdict(float)
mass_hist = hist.Hist(
"Events",
hist.Cat("dataset", "Dataset"),
hist.Bin("mass", "$Z_{ee}$ [GeV]", 60, 60, 120),
)
dataset = events.metadata['dataset']
leptons = events.lep
# We need to look at 4 lepton events only.
cut = (ak.num(leptons) == 4)
# Form the invar mass, plot.
# diele = electrons[cut][:, 0] + electrons[cut][:, 1]
# diele.mass
dilepton = leptons[:,0] + leptons[:,1]
mass_4l = leptons.mass
# Fill the histogram
sumw[dataset] += len(events)
print(len(events))
mass_hist.fill(
dataset=dataset,
mass=ak.flatten(mass_4l),
)
return {
"sumw": sumw,
"mass": mass_hist
}
# Create the data source that we will be running against.
def make_ds(name: str, query: ObjectStream):
'''Create a ServiceX Datasource for a particular ATLAS Open data file
'''
datasets = [ServiceXDataset(files[name]['files'], backend_type='open_uproot')]
return DataSource(query=query, metadata={'dataset': name}, datasets=datasets)
# And run!
# +
analysis = ATLAS_Higgs_4L()
# TODO: It would be good if datatype was determined automagically (there is enough info)
executor = LocalExecutor(datatype='parquet')
datasource = make_ds('ggH125_ZZ4lep', leptons)
async def run_updates_stream(accumulator_stream):
global first
count = 0
async for coffea_info in accumulator_stream:
count += 1
print(count, coffea_info)
return coffea_info
# Why do I need run_updates_stream, why not just await on execute (which fails with async gen can't).
# Perhaps something from aiostream can help here?
result = await run_updates_stream(executor.execute(analysis, datasource))
# -
hist.plot1d(result['mass'])
| notebooks/atlas-demo-coffea.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from splinter import Browser
from bs4 import BeautifulSoup as bs
import pandas as pd
import requests
import time
# ## Step 1 - Scraping
# ### NASA Mars News
# # https://splinter.readthedocs.io/en/latest/drivers/chrome.html
# !which chromedriver
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser('chrome', **executable_path, headless=False)
nasa_url = 'https://mars.nasa.gov/news/'
browser.visit(nasa_url)
html = browser.html
bsoup = bs(html,"lxml")
# +
news_title = bsoup.find('div', class_='content_title').text
print(news_title)
news_p=bsoup.find('div', class_='article_teaser_body').text
print(news_p)
# -
# ### JPL Mars Space Images - Featured Image
jpl_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(jpl_url)
html = browser.html
soup = bs(html, 'lxml')
image_name= bsoup.find('article', class_='carousel_item')['alt']
print(image_name)
base_url = 'https://www.jpl.nasa.gov'
img_url = soup.find(attrs={'data-title':image_name})["data-fancybox-href"]
combo_url = base_url + img_url
print(combo_url)
# ### Mars Weather
weather_url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(weather_url)
html = browser.html
bsoup = bs(html, 'lxml')
mars_weather= bsoup.find('p', class_='css-901oao css-16my406 r-1qd0xha r-ad9z0x r-bcqeeo r-qvutc0')
print(mars_weather)
# ### Mars Facts
facts_url = 'https://space-facts.com/mars/'
mars_facts_tables = pd.read_html(facts_url)
mars_facts_tables
mars_df = mars_facts_tables[1]
mars_df
mars_df = mars_df.drop(columns=['Earth'])
mars_df
mars_df = mars_df.rename(columns=
{"Mars - Earth Comparison": "Measure"})
mars_df
html_table = mars_df.to_html(header=None,index=False)
html_table.replace('\n', '')
html_table
mars_df.to_html('mars_table.html')
# !open mars_table.html
# ### Mars Hemispheres
# +
hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(hemisphere_url)
html = browser.html
bsoup = bs(html, 'lxml')
hemisphere_urls = []
#include all 4 hemispheres
xpath = '//*[@id="product-section"]/div[2]/div/div/a'
hemisphere_anchors = browser.find_by_xpath(xpath)
# Loop through results
for anchor in hemisphere_anchors:
try:
hemisphere_title = anchor.find_by_tag('h3').text
hemisphere_href = anchor['href']
#request the next page using the href
hemisphere_page = requests.get(hemisphere_href).text
bsoup = bs(hemisphere_page, 'lxml')
anchor_tag_page2 = bsoup.select('#wide-image > div > ul > li:nth-child(1) > a')
hemisphere_url = anchor_tag_page2[0]['href']
img_dict = { "image title": hemisphere_title, "image url": hemisphere_url }
hemisphere_urls.append(img_dict)
except Exception as e:
print(e)
print("This is an exception being thrown")
hemisphere_urls
# -
| Missions_to_Mars/.ipynb_checkpoints/mission_to_mars-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
total_list = []
while True:
question = input("질문을 입력해주세요 : ")
if question == "q":
break
else:
total_list.append({"질문" : question, "답변" : ""})
for i in total_list:
print(i["질문"])
answer = input("답변을 입력해주세요 : ")
i["답변"] = answer
print(total_list)
# +
total_list = []
while True:
question = input("질문을 입력해주세요 : ")
if question == "q":
break
else:
total_list.append({"질문" : question, "답변" : ""})
for i in total_list:
x = 0
print(i["질문"])
answer = input("답변을 입력해주세요 : ")
i["답변"] = answer
total_list[x]=i
x = x + 1
print(total_list)
| QA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Welcome to nbdev
#
# > Create delightful python projects using Jupyter Notebooks
# `nbdev` is a library that allows you to fully develop a library in [Jupyter Notebooks](https://jupyter.org/), putting all your code, tests and documentation in one place. That is: you now have a true [literate programming](https://en.wikipedia.org/wiki/Literate_programming) environment, as envisioned by D<NAME> back in 1983!
#
# Using the interactive environment, you can easily debug and refactor your code. Add `#export` flags to the cells that define the functions you want to include in your python modules. Here, for instance, is how `combined_cos` is defined and documented in the `fastai` library:
# <img alt="Exporting from nbdev" width="700" caption="An example of a function defined in one cell (marked with the export flag) and explained, along with a visual example, in the following cells" src="images/export_example.png" />
# Using notebooks written like this, `nbdev` can create and run any of the following with a single command:
#
# - Searchable, hyperlinked documentation; any word you surround in backticks will by *automatically* hyperlinked to the appropriate documentation
# - Python modules, following best practices such as automatically defining `__all__` ([more details](http://xion.io/post/code/python-all-wild-imports.html)) with your exported functions, classes, and variables
# - Pip installers (uploaded to pypi for you)
# - Tests (defined directly in your notebooks, and run in parallel)
# - Navigate and edit your code in a standard text editor or IDE, and export any changes automatically back into your notebooks
#
# Since you are in a notebook, you can also add charts, text, links, images, videos, etc, that will included automatically in the documentation of your library. The cells where your code is defined will be hidden and replaced by standardized documentation of your function, showing its name, arguments, docstring, and link to the source code on github. For instance, the cells above are converted to:
# <img alt="Documentation in nbdev" width="600" caption="An example of automated documentation from the fastai library" src="images/doc_example.png" />
# See below for *Installing* and *Getting Started*. In the other pages of the documentation, you can get more details about:
#
# - the [export](http://nbdev.fast.ai/export.html) functionality from jupyter notebooks to a python library
# - the [cli](http://nbdev.fast.ai/cli.html) commands you can use with nbdev in a terminal
# - how [export2html](http://nbdev.fast.ai/export2html.html) builds a documentation for your library
# - how [sync](http://nbdev.fast.ai/sync.html) can allow you to export back form the python modules to the jupyter notebook
# - how to put [test](http://nbdev.fast.ai/test.html)s in your notebooks, which can be run in parallel, and exported to CI from your notebooks
# - get more info about the [additional functionality](http://nbdev.fast.ai/#Additional-functionality)
# ## Installing
# nbdev is is on PyPI so you can just run:
# ```
# pip install nbdev
# ```
#
# For an [editable install](https://stackoverflow.com/questions/35064426/when-would-the-e-editable-option-be-useful-with-pip-install), use the following:
# ```
# git clone https://github.com/fastai/nbdev
# pip install -e nbdev
# ```
# ## Getting Started
# To begin your own project, click here: [nbdev template](https://github.com/fastai/nbdev_template/generate). Fill in the requested info and click *Create repository from template*, and a new GitHub repo will be created for you.
#
# Now, open your terminal, and clone the repo you just created.
#
# Next, edit the `settings.ini` file. Note that it contains all the necessary information for when you'll be ready to package your library, so you shouldn't need to change the `setup.py` file provided by the template. The basic structure (that can be personalized provided you change the relevant information in `settings.ini`) is that the root of the repo will contain your notebooks, along with a folder `docs` where the doc will be auto-generated that contains everything for a [jekyll](https://jekyllrb.com/)-powered website. Because [GitHub Pages supports Jekyll](https://help.github.com/en/github/working-with-github-pages/setting-up-a-github-pages-site-with-jekyll), you can host your site for free on GitHub without any additional setup.
#
# Your `settings.ini` is where all parts of nbdev look for any required configuration information. Once you've edited it, run the command `nbdev_build_lib` (which is automatically installed for you when you install `nbdev`. You'll now find that you have a new directory, with the name of whatever you set `lib_name` to in `settings.ini`.
#
# Now, run `jupyter notebook`, and click `00_core.ipynb`. This is where you'll create your first module! Create Jupyter cells as you would in any notebook. For any cells that you want to be included in your python module, type `#export` as the first line of the module.
#
# In the last cell of your notebook, you can then run:
from nbdev.export import *
notebook2script()
# Or in the command line, you can run:
# ``` bash
# nbdev_build_lib
# ```
# as long as you are somewhere in the folder where you are developing your library. Either of these will do the same thing: update your module to include all exported cells in your notebook.
#
# To enable documentation in your GitHub repo, click 'Settings' on the main repo page, scroll down to 'GitHub Pages', and under 'Source' choose 'master branch /docs folder'. GitHub will then show you a link to your working documentation site.
#
# Finally, edit `index.ipynb`. This will be converted into your projects *README* file, and will also be the index for your documentation (the page you're reading right now actually comes from an `index.ipynb` file!) You can use the module you just exported in this library, which means you can show real working code, and actual outputs. Once you have everything as you want it, run `nbdev_build_docs` in the terminal. This will export HTML versions of your notebooks to the `docs` directory, and will create hyperlinks for any words in backticks (as long as they exist in your module). It will also create a menu for all notebooks you have created, and a table of contents for each.
# ## Additional functionality
# There's a lot of functionality in `nbdev`; see the docs for each module in the sidebar to learn about all the features. Here we'll briefly highlight a couple.
# ### Adding your project to pypi
# If you want people to be able to install your project by just typing `pip install your-project` then you need to upload it to [pypi](https://pypi.org/). The good news is, we've already created a fully pypi compliant installer for your project! So all you need to do is register at pypi, if you haven't previously done so, and then create a file called `~/.pypirc` with your login details. It should have these contents:
#
# ```
# [pypi]
# username = your_pypi_username
# password = <PASSWORD>
# ```
#
# To upload your project to pypi, just type `make pypi` in your project root directory. Once it's complete, a link to your project on pypi will be printed.
#
# **NB**: make sure you increment the version number in `settings.py` each time you want to push a new release to pypi.
# ### Avoiding and handling git conflicts
# Jupyter Notebooks can cause challenges with git conflicts, but life becomes much easier when you use `nbdev`. As a first step, run `nbdev_install_git_hooks` in the terminal from your project folder. This will set up git hooks which will remove metadata from your notebooks when you commit, greatly reducing the change you have a conflict.
#
# But if you do get a conflict, simply run `nbdev_fix_merge filename.ipynb`. This will replace any conflicts in cell outputs with your version, and if there are conflicts in input cells, then both cells will be included in the merged file, along with standard conflict markers (e.g. `=====`). Then you can open the notebook in Jupyter and choose which version to keep.
# ### Using nbdev as part of your CI
# You can use [GitHub actions](https://github.com/features/actions) to leverage the functionality of nbedv and easily make a CI that:
# - check the notebooks are readable (with `nbdev_read_nbs`)
# - check the notebooks have been cleaned of needless metadata to avoid merge conflicts (with `nbdev_clean_nbs`)
# - check there is no diff between the notebooks and the exported library (with `nbdev_diff_nbs`)
# - run the tests in your notebooks (with `nbdev_test_nbs`)
#
# The template contains a basic CI that uses the four points above, edit the file `.github/workflows/main.yml` to your liking and comment out the parts you don't want.
# ## Contributing
# If you want to contribute to `nbdev`, be sure to review the [contributions guidelines](https://github.com/fastai/nbdev/blob/master/CONTRIBUTING.md). This project adheres to fastai`s [code of conduct](https://github.com/fastai/nbdev/blob/master/CODE-OF-CONDUCT.md). By participating, you are expected to uphold this code. In general, the fastai project strives to abide by generally accepted best practices in open-source software development.
#
# Make sure you have the git hooks we use installed by running
# ```
# nbdev_install_git_hooks
# ```
# in the cloned repository folder.
# ## Copyright
# Copyright 2019 onwards, fast.ai, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this project's files except in compliance with the License. A copy of the License is provided in the LICENSE file in this repository.
| nbs/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Neural networks with PyTorch
#
# Deep learning networks tend to be massive with dozens or hundreds of layers, that's where the term "deep" comes from. You can build one of these deep networks using weight matrices manually, but in general it's very cumbersome and difficult to implement. PyTorch has a nice module `nn` that provides a nice way to efficiently build large neural networks. We will soon learn how to work with this module. But first, let's import all of the required packages.
#
# Before proceeding with this running code cells in this notebook, please ensure you have followed the instructions [here](https://github.com/beginners-machine-learning-london/intro_to_machine_learning_with_pytorch/tree/master/setup).
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import torch
from torch import nn, optim
from torchvision import datasets, transforms
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import helper
import matplotlib.pyplot as plt
# -
#
# We are now going to build a larger network that can solve a (formerly) difficult problem, identifying text in an image. Here we'll use the MNIST dataset which consists of greyscale handwritten digits. Each image is 28x28 pixels, you can see a sample below
#
# <img src='assets/mnist.png'>
#
# > Our goal is to build a neural network that can take one of these images and predict the digit in the image.
# There are a few steps to follow to build and train the neural network. These are as follows:
#
# 1. Loading the dataset
# 2. Building the neural network
# 3. Training the neural network
# # 1. Loading the dataset
# ## 1.1 Defining our transforms
# We need to get our dataset but before doing so we need to define a set of transformations that can be applied to the dataset that we can pass to Pytorch's dataset downloader. This is provided through the `torchvision` package.
# PyTorch provides the tools to perform common image transformations and processing before passing them to your neural network as training data.
# These transforms can be chained together using `transforms.Compose([list_of_transforms])`.
#
# > **Exercise**: Using the pytorch's [Compose](https://pytorch.org/docs/master/torchvision/transforms.html#torchvision.transforms.Compose) and [Normalize](https://pytorch.org/docs/master/torchvision/transforms.html#torchvision.transforms.Normalize) define a chain of transforms where we convert the images to tensors, then normalize them with the mean and standard deviation of 0.5.
# Assign the transform object to a variable called `transform` to be used when we're downloading our dataset.
# When you are done, your code should follow the same format as the one below:
#
# ```python
# >>> transform = transforms.Compose([transforms.CenterCrop(10), transforms.ToTensor()])
# ```
#
# In the above example, we are defining a chain of transforms where our images are cropped at center with desired output size of 10px, then they are converted to tensors.
transform = transforms.____([transforms.____(), transforms.____(____, ____)])
# ## 1.2. Downloading the MNIST Data
# Pytorch ships with a set of packaged datasets to get you started with training models instantly. These are located on `torchvision.datasets` module. See the [documentation](https://pytorch.org/docs/stable/torchvision/datasets.html) for more information on all the available datasets.
#
# For this workshop, we are going to download the MNIST dataset which consists of 50,000 images of handwritten drawings.
#
# You will write the code below to download the dataset of images called [MNIST](http://yann.lecun.com/exdb/mnist/) while transforming the images to be suitable for training.
#
# When downloading the data from Pytorch's datasets, it expects the chain of transform instructions to be passed in. You can download the MNIST data using `datasets.MNIST()` by passing in the following parameters:
#
# - `download_dir` for example `"~/test/MNIST_data/"` - Where in your computer you would like the data to be downloaded to.
# - `download= True` to indicate you would like pytorch to download this dataset for you from the Internet onto the `download_dir`.
# - `train=True` to indicate you want to use this data for training. If you set this attribute to false, Pytorch grabs the data from the testing set when downloading from MNIST dataset. We will cover networking testing and validation in another workshop.
# - `transform = transform` to load the transforms onto the downloaded object. This can then be passed to the dataloader which then takes care of **batching**, **transforming** and **shuffling** the data for us. All great endeavours for making sure our data is as random as possible for our model training.
#
# > **Exercise:** Use Pytorch's [documentation](https://pytorch.org/docs/stable/torchvision/datasets.html) and above instructions to download MNIST data to a directory of your choice. Ensure you are passing in your chain of transforms and asking Pytorch to download from the training dataset. Save your data to a variable called `trainset`.
trainset = datasets.____("~/____/____/", ____=____, ____=____, ____=____)
# If you complete this step correctly, you should see the following output when you run the code cell below:
#
# ```
# Dataset MNIST
# Number of datapoints: 60000
# Root location: C:\Users\Username/MNIST_data/ (Note: Yours will be different)
# Split: Train
# ```
# Check to see if you have downloaded the dataset correctly
trainset
# ## 1.3. Loading the downloaded data into a dataloader
# Final step of preparing the data is to load it onto a python generator so that you can feed the network in batches which improves the training speed. It also lets you shuffle the images each time you request the batch, making sure the training process is as random as possible to avoid possible **overfitting** to the data. What **overfitting** is and how we can avoid it further is covered in the future workshops.
#
# Pytorch provides a utility module called `torch.utils.data.DataLoader()`. This module accepts your training dataset alongside a set of parameters on how to create the dataloader for you. The main parameters are `shuffle` and `batch_size`. The batch size is the number of images we get in one iteration from the data loader and pass through our network, often called a *batch*. And `shuffle=True` tells it to shuffle the dataset every time we start going through the data loader again.
#
# > **Exercise:** Use Pytorch's [documentation](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) and above instructions to load the MNIST dataset you have just downloaded onto the `dataloader`, which will be a python generator that serves the data for us. Set the `batch_size` to `64` and `shuffle` to `true`.
dataloader = torch.____.____.____(____, ____=64, ____=True)
# We have the training data loaded into `dataloader` and we make that an iterator with `iter(dataloader)`. Later, we'll use this to loop through the dataset for training, like
#
# ```python
# for image, label in dataloader:
# ## do things with images and labels
# ```
#
# You'll notice we created the `dataloader` with a batch size of 64, and `shuffle=True`. As a demosntration, run the code cells below which grabs the first batch so we can check out the data. We can see below that `images` is just a tensor with size `(64, 1, 28, 28)`. So, 64 images per batch, 1 color channel, and 28x28 images.
dataiter = iter(dataloader)
images, labels = dataiter.next()
print(type(images))
print(images.shape)
print(labels.shape)
# Pytorch also allows us to convert an image loaded as a tensor on `dataloader` to an image again. To do so, we first convert it to a numpy array with `numpy()`. Considering an image is a matrix of numbers with the size of 28x28, we can use `squeeze()` to remove all the information from the numpy array that is not necessary for plotting an image (number of images per batch, colour channel).
plt.imshow(images[1].numpy().squeeze(), cmap='Greys_r');
# # 2. Building a Neural Network
# PyTorch provides a module `nn` that makes building networks much simpler. Below we will see how we can create a neural network of our own with 784 inputs, 256 hidden units, 10 output units and a softmax output.
# ## 2.1 Your Turn to Build a Network
#
# <img src="assets/mlp_mnist.png" width="600px">
# PyTorch provides a convenient way to build networks like the picture shown above where a tensor is passed sequentially through operations, `nn.Sequential` ([documentation](https://pytorch.org/docs/master/nn.html#torch.nn.Sequential)).
# Here is an example from the documentation
#
# ```python
# # Example of using Sequential
# model = nn.Sequential(
# nn.Conv2d(1,20,5),
# nn.ReLU(),
# nn.Conv2d(20,64,5),
# nn.ReLU()
# )
#
# ```
#
# In the above example, the network architecture is made out of convolutional layers (covered in future workshops) which take a long time to train on our CPUs and can be an overkill for our classification problem. The classification of handwritten digits is simple enough to use a linear network. You can create linear layers using `nn.Linear(no_input_node, no_output_nodes)`.
# > **Exercise:** Create a network with **784 input units**, a hidden layer with **128 units and a ReLU activation**, then a hidden layer with **64 units and a ReLU activation**, and finally an output layer with **a softmax activation** as shown above. You can use a ReLU activation with the `nn.ReLU` module and softmax activation with `nn.Softmax(dim=1)`.
from torch import nn
model = nn.____(nn.____(784, 128), nn.____(), nn.____(128, 64), nn.____(), nn.____(64, 10), nn.____(dim = 1))
print(model.state_dict)
# If you complete the previous exercise correctly you should see the following output when you run the above code cells.
# ```
# <bound method Module.state_dict of Sequential(
# (0): Linear(in_features=784, out_features=128, bias=True)
# (1): ReLU()
# (2): Linear(in_features=128, out_features=64, bias=True)
# (3): ReLU()
# (4): Linear(in_features=64, out_features=10, bias=True)
# (5): Softmax()
# )>
# ```
# ## 2.2 Forward pass
#
# Now that we have a network, let's see what happens when we pass in an image. The **forward pass** refers to calculation process of the values of the output layers from the inputs data. We can perform a forward pass through the network using `model.forward(image)`.
# +
# Grab some data
dataiter = iter(dataloader)
images, labels = dataiter.next()
# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
images.resize_(64, 1, 784)
# or images.resize_(images.shape[0], 1, 784) to automatically get batch size
# Forward pass through the network
img_idx = 0
probabilities = model.forward(images[img_idx,:])
img = images[img_idx]
helper.view_classify(img.view(1, 28, 28), probabilities)
# -
# We'll now see how we can train a neural network to accurately predict the numbers appearing in the MNIST images.
# # 3.Training Neural Networks
#
# ## 3.1 Overview of the Process
#
# The network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.
#
# <img src="assets/function_approx.png" width="500px">
#
# At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.
#
# The general process for training a neural network with PyTorch is as follows. Don't worry if you do not understand what each term means. We will cover them soon:
#
# 1. Make a forward pass through the network to get the network `output`
# 2. Use the network `output` to calculate the `loss` with the help of a `criterion`
# 3. Perform a backward pass through the network with `loss.backward()` to calculate the gradients
# 4. Take a step with Pytorch's `optimizer` to update the weights
#
# We've already seen how to perform the first step. Let's see in the next section how `loss` can be calculated.
# ## 3.2 Gradient Descent
#
# For our network to classify the images correctly, we need the right network parameters (the weights and biases in each layer). To find the network parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems
#
# $$
# \large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}
# $$
#
# where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the network outputs.
#
# By minimising this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base.
#
# <img src='assets/gradient_descent.png' width="350px">
# ## 3.2 Losses in PyTorch
#
# Let's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the negative-log likelihood loss (`nn.NLLLoss`). You'll usually see the loss assigned to `criterion`.
#
#
# <img src='assets/nllloss.png' width="650px">
#
# To actually calculate the loss, you first define the `criterion` then pass in the output of your network and the correct labels.
#
# ```python
# raw_output = model.forward(images)
# logits = torch.log(raw_output)
# criterion = nn.NLLLoss()
# loss = criterion(logits, labels)
# ```
#
# This means we need to pass in the output of our network into the loss which is the log of the output of the softmax function. The log of the output of the network is usually called the **logits** or **scores**. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities and perform this log operation on our network's `raw_output` using `torch.log()`.
#
# > **Exercise:** Define a negative log likelihood loss `criterion`
criterion = nn.____()
print(type(criterion)) # <class 'torch.nn.modules.loss.NLLLoss'>
# ## 3.3. Backpropagation (Backwardpass)
#
# For single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one you've built above. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.
#
# Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we look at the process visually. Check out this [video](https://www.youtube.com/watch?v=Ilg3gGewQ5U) to learn more about backpropagation.
#
# <img src='assets/backpropagation.png' width="800px">
# ## 3.4 Optimizer
#
# One of the final pieces we need to start training is an optimizer that we'll use to update the weights with the gradients. The formula below shows that after calculating the gradients (change in error with respect to weights), we multiply them by a learning rate and then add them to the old weights to update the weights.
#
# <img src='assets/weight_update_formula.png' width="350px">
#
# We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use **stochastic gradient descent** with `optim.SGD` which is simply the algorithm being used to network the train in batches. There are ofcourse other algorithms for performing weight updates such as `Adam` which you can find more about in Pytorch's [documentation](https://pytorch.org/docs/stable/optim.html#algorithms).
#
# You can see how to define this optimizer in the code cell below.
# +
from torch import optim
# Optimizers require the "network parameters" and a "learning rate" to optimize
# model.parameters() is a <generator object Module.parameters at 0x7f74ff4c4eb8>
# optim.SGD() Implements stochastic gradient descent
optimizer = optim.SGD(model.parameters(), lr=0.01)
# -
# ## 3.5 Updating the network parameters
#
# When we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent.
#
# <img src='assets/weight_update_formula.png' width="350px">
#
# Below we will see an example of calculating the gradients using a backwards pass. Let's see what our current weights are:
print('Initial weights - ', model[0].weight)
# Let's go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.
# +
images, labels = next(iter(dataloader))
images.resize_(64, 784)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model.forward(images) # output torch.Size([64, 10])
loss = criterion(torch.log(output), labels) # Scalar
# +
print('Before backward pass: \n', model[0].weight.grad) # None
loss.backward()
print('After backward pass: \n', model[0].weight.grad) #torch.Size([128, 784]) - gradients for 128 hidden to 784 input nodes
# -
# ## 3.6 Training For Real
# Now that we have a bit of understanding on how neural networks learn, let's implement it. Below I have provided you with with the code that trains the neural network and also have commented what each line of code does. Run the code cell below to see the network getting **trained**!! You will see the results of this training in a minute.
#
# You should notice the training loss dropping in each iteration or `epoch`.
epochs = 5
for e in range(epochs):
running_loss = 0
for images, labels in dataloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
# Training pass
# Resets the gradient step. If we don't do this, training loss actually increases per epoch
optimizer.zero_grad()
# Do a forward pass for a batch
output = model.forward(images)
# Check output vs target and Calculate the loss value (scalar) based on criteria
loss = criterion(torch.log(output), labels)
# Calculate the gradients for each weight by doing a backward propogation
loss.backward()
# Update the weights based on the above calculated gradients multiplied by learning step
optimizer.step()
# Return the value of the loss tensor (scalar) as a standard Python number and add to the variable running_loss
running_loss += loss.item() # calculate the total loss by adding up all the losses for each batch
else:
# Divide the the total loss by number of items in the dataloader to get the average loss of the model
print(f"Training loss: {running_loss/len(dataloader)}") # length of dataloader is 938 here
# Now that we have a trained network, let's make a few **inferences**. Passing in 10 images to the trained network to see what it predicts. Run the code cells below to withness the magic of neural networks!
# +
images, labels = next(iter(dataloader))
for i in range(10):
img = images[i].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
ps = model.forward(img)
helper.view_classify(img.view(1, 28, 28), ps)
# -
# # CONGRATULATIONS
# Well done on compeleting the above assignments! This was a hard exercise and you have learned a lot about how neural networks work and how to work with Pytorch. We hope that you enjoyed this **Introduction to machine learnign with Pytorch Workshop**. To learn more about neural networks and deep learning check out a few resources we have provided in our repo's [README.md](https://github.com/beginners-machine-learning-london/intro_to_machine_learning_with_pytorch).
#
# Also make sure to sign up on our meetup group to be informed of future workshops!
# [London Beginners Machine Learnign Meetup](https://www.meetup.com/beginners-machine-learning-london/)
| notebooks/Introduction to Pytorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kaggle House Prices Contest
# ### import all packages that will be needed to finish analysis.
import pandas as pd
# %matplotlib notebook
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from pivottablejs import pivot_ui
import statsmodels.api as sm
# ### import data from csv files
test = pd.read_csv('test.csv', sep = ',')
train = pd.read_csv('train.csv', sep = ',')
train.head()
# Separate SalePrice from DF. Combine the two DFs.
SalePrice = train.SalePrice
train = train.drop(['SalePrice'], axis = 1)
combine = pd.concat([train, test])
combine.shape
combine.columns = combine.columns.str.replace(" ", "_")
combine.head()
# check for NaNs
combine.isnull().sum().sum()
NaNlist = pd.DataFrame(combine.isnull().sum().sort_values(ascending = False), columns = ['total'])
NaNlist = NaNlist[NaNlist.total > 0]
NaNlist
NaNcolnames = NaNlist.index.values.tolist()
combine[NaNcolnames].dtypes
colnames = combine.columns
for name in colnames:
if combine[name].dtype == 'O':
combine[[name]] = combine[[name]].fillna('None')
else:
combine[[name]] = combine[[name]].fillna(0)
combine.isnull().sum().sum()
combine.head()
# Align all dtypes with correct types
cols = combine.columns.tolist()
cols
types = combine.dtypes.tolist()
data_types = pd.DataFrame({'Column_Names':cols,'Data_Type':types})
pd.options.display.max_rows
data_types
data_types.loc[data_types.Data_Type ==
'int64']
# INT64 that should be object.
# MSSubClass is a dwelling type.
# OverallQual is an ordinal 1-10 value for quality.
# OverallCondition is an ordinal 1-10 for quality.
# YearBuilt is a year.
# YearRemodAdd is a year.
# MoSold is a month.
# YrSold is a year.
combine = combine.astype({'MSSubClass': object, 'OverallQual': object, 'OverallCond': object, 'YearBuilt': object,
'YearRemodAdd': object, 'MoSold': object, 'YrSold': object})
combine.dtypes
data_types.loc[data_types.Data_Type == 'float64']
# All seem fine.
# Descriptive Stats on combine DF
combine.describe()
# Get dummies for objects
combine_obj = combine.select_dtypes('object')
combine_obj = combine.select_dtypes(include = 'object')
combine_obj.head()
combine_obj = pd.get_dummies(combine_obj)
combine_obj.head()
# Index by float variables
combine_float = combine.select_dtypes('float64')
# +
combine_int = combine.select_dtypes('int64')
# -
combine_float.head()
combine_float.columns
combine_int.columns
Id = combine_int.Id
combine_int = combine_int.drop(['Id'], axis = 1)
combine_nums = pd.concat([combine_float, combine_int])
combine_nums.columns
# ## creation of some predictors
combine_nums['TotalBaths'] = combine_nums['BsmtFullBath'] + combine_nums['BsmtHalfBath'] + combine_nums['FullBath'] + combine_nums['HalfBath']
#
# # Visualizations
fig, axes = plt.subplots(2,2)
axes[0,0].hist(combine.BsmtFinSF1, bins = 50, color = 'k', alpha = 0.5)
axes[0,1].hist(combine.BsmtFinSF2, bins = 50, color = 'k', alpha = 0.5)
axes[1,0].hist(combine.BsmtUnfSF, bins = 50, color = 'k', alpha = 0.5)
axes[1,1].hist(combine.TotalBsmtSF, bins = 50, color = 'k', alpha = 0.5)
combine.head()
f, axes = plt.subplots(2,2)
sns.countplot(combine.Street, ax = axes[0,0], palette = 'Set1')
sns.countplot(combine.LotShape, ax = axes[0,1], palette = 'Set2')
sns.countplot(combine.MiscFeature, ax = axes[1,0], palette = 'Set3')
sns.countplot(combine.MoSold, ax = axes[1,1], palette = 'Set3')
plt.subplots_adjust(wspace = 0, hspace = 0)
f, (ax1, ax2) = plt.subplots(1,2)
ax1.scatter(combine.LotArea, combine.TotalBsmtSF)
ax2.hist(combine.LotArea, bins = 50)
fig, axs = plt.subplots(2,2, sharex = True, sharey = True)
sns.countplot(combine.MoSold, ax = axs[0,0])
sns.countplot(combine.YrSold, ax = axs[0,1])
sns.regplot(combine.LotArea, combine.TotalBsmtSF, ax = axs[1,0])
sns.regplot(combine.LotArea, combine.TotalBsmtSF, ax = axs[1,1])
fig, ax = plt.subplots(1,1)
ax.bar(combine.MSSubClass, combine.Street)
combine.columns
cross = pd.crosstab(index = train.RoofMatl, columns = train.Fireplaces, values = train.SalePrice, aggfunc = np.mean).round(0)
cross
Y = SalePrice
X = train.LotArea
model = sm.OLS(Y,X).fit()
model.params
model.summary()
model.resid
fig, ax = plt.subplots(1,1)
plt.xlim(-500000,500000)
ax = sns.distplot(model.resid)
ax
| house prices kaggle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Optimizing RNN using Genetic Algorithm
# #### Importing required packages
# +
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split as split
from keras.layers import LSTM, Input, Dense
from keras.models import Model
from deap import base, creator, tools, algorithms
from scipy.stats import bernoulli
from bitstring import BitArray
np.random.seed(1120)
# -
# #### Reading dataset and using first 17,257 points as training/validation and rest of the 1500 points as test set.
# +
data = pd.read_csv('train.csv')
data = np.reshape(np.array(data['wp1']),(len(data['wp1']),1))
train_data = data[0:17257]
test_data = data[17257:]
# -
# #### Defining Helper Functions
# +
def prepare_dataset(data, window_size):
X, Y = np.empty((0,window_size)), np.empty((0))
for i in range(len(data)-window_size-1):
X = np.vstack([X,data[i:(i + window_size),0]])
Y = np.append(Y,data[i + window_size,0])
X = np.reshape(X,(len(X),window_size,1))
Y = np.reshape(Y,(len(Y),1))
return X, Y
def train_evaluate(ga_individual_solution):
# Decode GA solution to integer for window_size and num_units
window_size_bits = BitArray(ga_individual_solution[0:6])
num_units_bits = BitArray(ga_individual_solution[6:])
window_size = window_size_bits.uint
num_units = num_units_bits.uint
print('\nWindow Size: ', window_size, ', Num of Units: ', num_units)
# Return fitness score of 100 if window_size or num_unit is zero
if window_size == 0 or num_units == 0:
return 100,
# Segment the train_data based on new window_size; split into train and validation (80/20)
X,Y = prepare_dataset(train_data,window_size)
X_train, X_val, y_train, y_val = split(X, Y, test_size = 0.20, random_state = 1120)
# Train LSTM model and predict on validation set
inputs = Input(shape=(window_size,1))
x = LSTM(num_units, input_shape=(window_size,1))(inputs)
predictions = Dense(1, activation='linear')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='adam',loss='mean_squared_error')
model.fit(X_train, y_train, epochs=5, batch_size=10,shuffle=True)
y_pred = model.predict(X_val)
# Calculate the RMSE score as fitness score for GA
rmse = np.sqrt(mean_squared_error(y_val, y_pred))
print('Validation RMSE: ', rmse,'\n')
return rmse,
# -
# #### Genetic Representation of the Solution
# <img src="genetic_representation.png" alt="Genetic representation of a solution">
# #### Genetic Algorithm Overview
# <img src="ga.png" alt="Genetic Algorithm">
# +
population_size = 4
num_generations = 4
gene_length = 10
# As we are trying to minimize the RMSE score, that's why using -1.0.
# In case, when you want to maximize accuracy for instance, use 1.0
creator.create('FitnessMax', base.Fitness, weights = (-1.0,))
creator.create('Individual', list , fitness = creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register('binary', bernoulli.rvs, 0.5)
toolbox.register('individual', tools.initRepeat, creator.Individual, toolbox.binary, n = gene_length)
toolbox.register('population', tools.initRepeat, list , toolbox.individual)
toolbox.register('mate', tools.cxOrdered)
toolbox.register('mutate', tools.mutShuffleIndexes, indpb = 0.6)
toolbox.register('select', tools.selRoulette)
toolbox.register('evaluate', train_evaluate)
population = toolbox.population(n = population_size)
r = algorithms.eaSimple(population, toolbox, cxpb = 0.4, mutpb = 0.1, ngen = num_generations, verbose = False)
# -
# #### Print top N solutions - (1st only, for now)
# +
best_individuals = tools.selBest(population,k = 1)
best_window_size = None
best_num_units = None
for bi in best_individuals:
window_size_bits = BitArray(bi[0:6])
num_units_bits = BitArray(bi[6:])
best_window_size = window_size_bits.uint
best_num_units = num_units_bits.uint
print('\nWindow Size: ', best_window_size, ', Num of Units: ', best_num_units)
# -
# #### Train the model using best configuration on complete training set and make predictions on the test set
# +
X_train,y_train = prepare_dataset(train_data,best_window_size)
X_test, y_test = prepare_dataset(test_data,best_window_size)
inputs = Input(shape=(best_window_size,1))
x = LSTM(best_num_units, input_shape=(best_window_size,1))(inputs)
predictions = Dense(1, activation='linear')(x)
model = Model(inputs = inputs, outputs = predictions)
model.compile(optimizer='adam',loss='mean_squared_error')
model.fit(X_train, y_train, epochs=5, batch_size=10,shuffle=True)
y_pred = model.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
print('Test RMSE: ', rmse)
| Genetic-Algorithm-RNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="992dfd65-f2ec-486c-8ec5-7df93a068664" _uuid="c41ed77d44f9c07f6f95a8fcfa1b6d2ace658b2c"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from scipy import stats
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input/kkbox-churn-prediction-challenge"]).decode("utf8"))
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
import numpy as np # linear alegbra
import pandas as pd # data processing
import os # os commands
from datetime import datetime as dt #work with date time format
# %matplotlib notebook
# initiate matplotlib backend
import seaborn as sns # work over matplotlib with improved and more graphs
import matplotlib.pyplot as plt #some easy plotting
# + _cell_guid="c202a678-b1d4-4444-b313-3ce3594b5a51" _uuid="c53865c744f95bd7aaca04a2a5b0a6c9782d89d4"
transactions = pd.read_csv('../input/transactions.csv', engine = 'c', sep=',')#reading the transaction file
# + _cell_guid="6dfae882-9843-4684-90d5-3dae3a9d9fba" _uuid="5e88e2a22692b4beb3f8cf1605a24f6774587f01"
transactions_v2 =pd.read_csv('../input/transactions_v2.csv', engine = 'c', sep=',')
# + _cell_guid="f0c509de-543b-4b21-80c8-042e4851fedb" _uuid="c3034308f73022c7f0e10471bee30f00e1eb89ac"
transactions.info()
# + _cell_guid="80bdf630-74c9-45b4-9f75-f598207cc1f8" _uuid="24588ae07d30f249e3ef8bbd7e0a2cad354582b6"
transactions.describe()
# + [markdown] _cell_guid="57156994-d74a-405f-889d-0d3332c9bcb8" _uuid="89b02545f0df9688966eb48243c14b0e0a330deb"
# ** to reduce the size of transactions dataframe**
# + _cell_guid="db6dcf20-bbc9-41c6-b293-b06fb2396887" _uuid="e56d4992246465bb85c3bd4489948833ea20aa00"
print("payment_plan_days min: ",transactions['payment_plan_days'].min())
print("payment_plan_days max: ",transactions['payment_plan_days'].max())
print('payment_method_id min:', transactions['payment_method_id'].min())
print('payment_method_id max:', transactions['payment_method_id'].max())
# + _cell_guid="d67efbad-5db5-4048-b745-b2dc4cceb5dc" _uuid="19e54896bdf6ed40c77dbb662c5d8143d7c606f9"
# h=change the type of these series
transactions['payment_method_id'] = transactions['payment_method_id'].astype('int8')
transactions['payment_plan_days'] = transactions['payment_plan_days'].astype('int16')
transactions_v2['payment_method_id'] = transactions_v2['payment_method_id'].astype('int8')
transactions_v2['payment_plan_days'] = transactions_v2['payment_plan_days'].astype('int16')
# + _cell_guid="73a8dfac-6d30-443a-8be6-1e4eb9e43d7f" _uuid="28800f1e04569fa1cacbe041adb917dc96d91cfd"
print('plan list price varies from ', transactions['plan_list_price'].min(), 'to ',transactions['plan_list_price'].max() )
print('actual amount varies from ', transactions['actual_amount_paid'].min(),'to ', transactions['actual_amount_paid'].max() )
# + _cell_guid="4a8213ae-dfa3-4637-adaa-61b129a3d29e" _uuid="4112175a854ebf3660de3cbde8695cb410d94030"
transactions['plan_list_price'] = transactions['plan_list_price'].astype('int16')
transactions['actual_amount_paid'] = transactions['actual_amount_paid'].astype('int16')
transactions_v2['plan_list_price'] = transactions_v2['plan_list_price'].astype('int16')
transactions_v2['actual_amount_paid'] = transactions_v2['actual_amount_paid'].astype('int16')
# + [markdown] _cell_guid="7e8be115-f1ac-4578-a88b-b0f7190c385b" _uuid="eb3635d153583c9c7bc445340b2a5e372523f121"
# ** size of file has decreased by almost 33% **
# + _cell_guid="c638a90a-f422-4ef9-a596-2d7003bd4543" _uuid="7b856482c5decef187f0e4f7bf991778c66778d4"
transactions.info()
# + _cell_guid="8d3384ca-1406-4366-9fe8-e588fa5e01bb" _uuid="3999b02e2840b612162848657caa5c59a078c0fe"
transactions['is_auto_renew'] = transactions['is_auto_renew'].astype('int8') # chainging the type to boolean
transactions['is_cancel'] = transactions['is_cancel'].astype('int8')#changing the type to boolean
transactions_v2['is_auto_renew'] = transactions_v2['is_auto_renew'].astype('int8') # chainging the type to boolean
transactions_v2['is_cancel'] = transactions_v2['is_cancel'].astype('int8')#changing the type to boolean
# + _cell_guid="8ca24ff3-a71c-432f-b20c-033b21b66911" _uuid="7fe2e77bbacba9cedaba3a9f1e1e6d259b4f9e23"
sum(transactions.memory_usage()/1024**2) # memory usage
# + _cell_guid="66632670-5d34-4705-bc76-0e1ed0d86bf0" _uuid="a6e7ced0c0fb4d0efe065278ca18fd71c8518c2d"
transactions['membership_expire_date'] = pd.to_datetime(transactions['membership_expire_date'].astype(str), infer_datetime_format = True, exact=False)
transactions_v2['membership_expire_date'] = pd.to_datetime(transactions_v2['membership_expire_date'].astype(str), infer_datetime_format = True, exact=False)
# converting the series to string and then to datetime format for easy manipulation of dates
sum(transactions.memory_usage()/1024**2) # this wouldn't change the size of df as memory occupied by object is similar to datetime
# + _cell_guid="3162f650-043a-4da2-a542-997b7559d78f" _uuid="2ffd3e1b9e5f588ba13739748a9ead2f4345e79b"
transactions['transaction_date'] = pd.to_datetime(transactions['transaction_date'].astype(str), infer_datetime_format = True, exact=False)
transactions_v2['transaction_date'] = pd.to_datetime(transactions_v2['transaction_date'].astype(str), infer_datetime_format = True, exact=False)
# + [markdown] _cell_guid="6ca16009-a400-4bd1-aafd-06e4faec29fe" _uuid="b8e56edae280cad06f5c36e22f606a7f475b7e37"
# ** repeating the same process on members file/df**
# + _cell_guid="921ba6d3-d65d-4364-a6e3-0ab49669e134" _uuid="6c0abd6580322cbecca38b3ad8d42faa1540aad8"
members = pd.read_csv('../input/members_v3.csv')
# + _cell_guid="5decaaa9-777f-451f-9d53-682c6829bd08" _uuid="4676eb918e00ec0bf358e65dd0c01b22acc37783"
members.info()
# + _cell_guid="2dd34052-6cb3-4b81-87f1-86678c92af4e" _uuid="b0160c34048e62381b5217b113f22fc875ad55ed"
members.describe()
# + _cell_guid="2fa3d846-0a5b-4d49-a0eb-ff1844dd9f50" _uuid="c35ae39dc57167193b90754dabc14d54aa5d17b6"
members['city']=members['city'].astype('int8');
members['bd'] = members['bd'].astype('int16');
members['bd']=members['bd'].astype('int8');
members['registration_init_time'] = pd.to_datetime(members['registration_init_time'].astype(str), infer_datetime_format = True, exact=False)
#members['expiration_date'] = pd.to_datetime(members['expiration_date'].astype(str), infer_datetime_format = True, exact=False)
# + [markdown] _cell_guid="fdac1bc7-c92b-44df-a1d2-53adb070a017" _uuid="e031bbdecd32bc9719f627bf84eccbc15ae97c8b"
# ** doing the same with train data**
# + _cell_guid="928b36ac-bdce-4281-a987-4f8b7b24cb3d" _uuid="2a518c3a9450b3e3ce88e9a96209fc9cdfcaff53"
train = pd.read_csv('../input/train.csv')
train_v2 = pd.read_csv('../input/train_v2.csv')
train.head()
train['is_churn'] = train['is_churn'].astype('int8');
train_v2['is_churn'] = train_v2['is_churn'].astype('int8');
# + _cell_guid="5008ab3c-1a2a-4b72-a365-0093f3c5ed27" _uuid="1461b020f6b94faa2927dae91258fa0ab746cd5e"
transactions['msno'].head(10)
# + _cell_guid="7d9f7bdd-0405-4f09-af59-14645dfe232c" _uuid="0a710eda79c8cf79108f3d0f90cd0d5e897518b0"
train['msno'].nunique()#.append(train_v2)['msno'].nunique()
# + _cell_guid="9ac1d580-bf7d-43a1-b32c-f226d6bad9f2" _uuid="a819b894fc2e98c0572a84d5e8c2c3708cf9155b"
transactions['msno'].nunique()
# + _cell_guid="a207329b-e379-40a5-8499-11cefccbd34a" _uuid="e3c71326abda081b6b8707527794f18f20efae7a"
len(transactions_v2['msno'])#.nunique()
# + _cell_guid="457c3631-2236-4cac-a8e6-2cf3e36795a5" _uuid="5d7a7e4b16d9f3740e602f765cfb4f57f7ac5534"
transactions[transactions['msno']=='uQxbyACsPOEkTIrv9jZgoGXelGBW81ZsSZKy9fhj5Z8='].sort_values('membership_expire_date')
# + [markdown] _cell_guid="9e414366-d42a-4827-9c5b-8bf44f199372" _uuid="9fae3814ba776e2a5ee6357d74cc80d413dcb4bf"
# ** now merging all the dataframe with inner joint as we would not want half information about users**
# + _cell_guid="d0bf3fe1-08ad-4339-8ac8-a1568a6dce9d" _uuid="54a7c0fb9ee1b6e46c2450e4b73f8b2512713e83"
members_trans = members.merge(transactions, how='inner', on='msno')
data = members_trans.merge(train, how='inner', on='msno')
# deleting the previously imported df as they occupy space in memory
del transactions
del train
del members_trans
# + _cell_guid="9b32f587-7feb-4cbc-a9f3-f1f4a7901b45" _uuid="3de865b24deb4a31dd1a680f4fdca58edbc1a8ab"
members_trans = members.merge(transactions_v2, how='inner', on='msno')
test_data = members_trans.merge(train_v2, how='inner', on='msno')
# deleting the previously imported df as they occupy space in memory
del transactions_v2
del members
del train_v2
del members_trans
# + _cell_guid="d610f3e8-2855-47bc-b977-ce988334934e" _uuid="fd053aff9672050a8e21bc295a54db1f7316e1d4"
#total memory consumptions by all these data frame
sum(data.memory_usage()/1024**2)
# + [markdown] _cell_guid="2656f461-8917-4950-ab72-2ce4325c2002" _uuid="e44920e006ed654afb2ae2fdc9f464206e89fedc"
# Number of values in missing in data
# + _cell_guid="cfe04136-73c3-47ad-bfc6-d03014e72e21" _uuid="ac6ad32a19769c06f81e2818f8c298457396dd79"
sum(data['gender'].isnull())/len(data)
# + _cell_guid="bcfe3d15-8a50-44e4-8ecc-0a0e52511843" _uuid="398d43c56de9cdafa692ec7c9ddd2a90736ff768"
sum(test_data['gender'].isnull())/len(test_data)
# + [markdown] _cell_guid="fefbafa2-6922-4d4c-9f8e-c0d3e2719877" _uuid="3462ee4d5a5fe659a42d09f6bc77e9c76258c903"
# ~52% of gender data is missing or the users did not provide the data. This can be ascertained by seeing whether their is signinficant difference in churn of those whose gender is available vs those whose gender data is not available.
# + [markdown] _cell_guid="26bd67d5-b36a-421b-86f6-18896d09201f" _uuid="4f8759651c8b8baa3a75dac94d5bcc46ee2ea428"
# ******EAD for train data and dummy variables**
# + _cell_guid="d658f580-46ad-478a-af9e-737fd079e5cf" _uuid="c13cddb889fd489d402bb93d7ed2584f3b4cad10"
plt.figure()
data.groupby(['msno','gender'])['is_churn'].mean().groupby(level=1).mean().plot.bar();
#taking the mean will give us a fraction of people churning as values are 0/1
# + [markdown] _cell_guid="dda2a74a-8f80-4a71-80e8-821a690c65c4" _uuid="6403871dcc5b73eb851ad3f07d26f146ed30a69c"
# ** There seems to be no correlation between geder and churn, but let's check for a correlation between people who provided gender and who did not**
# + _cell_guid="68226b2f-d8f1-4dce-b165-ed85462c8ab7" _uuid="77fc5359ff1d50dac4bafcaf8bf78965a418b858"
def assign_gender(item):
if (item == 'male')|(item == 'female'):
return 1
else:
return 0
data['gender'] = data['gender'].apply(assign_gender)
# + _cell_guid="eb767a15-bcc3-4a59-b7f4-db71f3993ea3" _uuid="998143a997fc24ff93c6ff2781f759593aa99c63"
test_data['gender'] = test_data['gender'].apply(assign_gender)
# + _cell_guid="9a8efefa-f1d8-4027-97a7-cb71356671b3" _uuid="4090dae2458f426d1b4c4ec7369738983dfe9854"
plt.figure()
data.groupby(['msno','gender'])['is_churn'].mean().groupby(level=1).mean().plot.bar();
# + [markdown] _cell_guid="367d3ad3-9fe4-49a2-860f-6141d2ffb558" _uuid="bc81fa0576238f0c67c24a4458ba242528346fe3"
# ** we found a new dummy variable**
# + _cell_guid="2442b8ac-3828-4d7a-961d-60f3cba317ce" _uuid="3eb6715422994e73db068e27f61303cb72eb1120"
# plotting the correlation heatmap between the variables
correl = data.corr()
mask = np.zeros_like(correl, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(correl, mask=mask, cmap=cmap,
square=True, linewidths=.5, cbar_kws={"shrink": .5}, annot=True)
# + [markdown] _cell_guid="814d89a1-6bc9-4075-ad14-f642da545a0c" _uuid="664b62101f7251e0a5d2d58a00a1f2d68ca6250e"
# but this doesn't tell us a lot about any correlation
# + [markdown] _cell_guid="65c944b5-e394-450e-8210-5674ebe17c37" _uuid="9f77b4696ab590abf7cb6f1a900a8a33ab058a07"
# **Also, there is a difference in the plan listprice and actual amount paid. So surely there is a new variable in discount. **
# + _cell_guid="f4365e62-46c9-49de-9e07-5319f29dc38e" _uuid="7527acafc160a65425793031944bd29acdd2706a"
data['discount'] = data['plan_list_price'] - data['actual_amount_paid']
test_data['discount'] = test_data['plan_list_price'] - test_data['actual_amount_paid']
# + [markdown] _cell_guid="b4dec409-77a3-415a-8902-039b5fada886" _uuid="6c0e2b7f726c4db360a07243b7427849846ddf48"
# **See how many people churn from different cities**
# + _cell_guid="39fbf9cc-f99c-4475-b9b4-44895622e19c" _uuid="6249ed6c0b2492e50d400cac1e4e3d25778535ff"
# %matplotlib notebook
plt.figure()
data.groupby(['msno','city'])['is_churn'].mean().groupby(level=1).mean().plot.bar();
ax= plt.gca();
ax.set_xticks(np.linspace(0,22,23));
# + [markdown] _cell_guid="caec6d92-f6a9-4d89-a49c-1522bf41b242" _uuid="053d41d64eb26999f2082087bb1c149bcfc65621"
# **surely, there is a huge variation in churn proportion from different cities**
# + [markdown] _cell_guid="fdcba41b-72e1-4179-825f-fd8d0aadd52a" _uuid="8ebd13263367d5e109668324aad4bdfc062f9758"
# Doing the same analysis for other variables such as registered_via, payment_method_id, is_churn
# + _cell_guid="923abcbc-def1-4611-99d5-285e43b7a807" _uuid="7c5ff6f9c2af682f32ce2dac09406639ae12163c"
data['registered_via'].unique()
# + _cell_guid="3fcda9f0-5446-4096-aebd-87f3a3e94455" _uuid="1f17de977ea79b1fa3253fc700297656a66a1536"
plt.figure()
data.groupby(['msno','registered_via'])['is_churn'].mean().groupby(level=1).mean().plot.bar();
# + _cell_guid="5b0d275a-25d1-46d7-b02c-3d5fb8f2ceca" _uuid="8948e29f524face6beb3db65984a61625221be49"
plt.figure()
data.groupby(['msno','payment_method_id'])['is_churn'].mean().groupby(level=1).mean().plot.bar();
# + _cell_guid="8169c651-1dec-4d59-972e-2b8607ab3014" _uuid="e3661520d0272c7ad62977f8e4e06154462b8883"
plt.figure()
data.groupby(['msno','is_cancel'])['is_churn'].mean().groupby(level=1).mean().plot.bar();
# + [markdown] _cell_guid="043aab96-e34e-4d36-a6e4-97c5df8f8c02" _uuid="7bc51d8c02cd0fe3541607a67cfa64fcda00071b"
# ** now plotting a pairplot for each variable**
# + _cell_guid="eece31d7-0dcb-4e82-a1a4-1b65b825fef2" _uuid="3135c454f30b30c6f86773e280d69eb24fb220a2"
# %matplotlib inline
fig = plt.figure();
sns.pairplot(data.head(), hue='is_churn', diag_kind='kde');
fig.canvas.draw();
# + [markdown] _cell_guid="49c9302b-93a1-4560-9a9d-ed0127eec6ed" _uuid="6e1b91ab2cdb1f7c9ac3b4d7769a073de44cb878"
# ** now creating dummy variables for gender(whether gender priovided or not), registeres_via and city**
# + _cell_guid="df8c7e3c-c7e4-4c36-8945-d61a6418e3de" _uuid="5a31e0b0c90cb5ebefea09766f5d25363282a034"
newdf= data.join(pd.get_dummies(data['gender'])) #creating a new columns for paymenth method id dummyvariable
gender = {}
gender.update({True:'gender_provided'}) # create a dictionary to automatic renaming of columns
gender.update({False:'gender_not_provided'})
newdf = newdf.rename(columns=gender) #renaming the new columns
del newdf['gender']# deleting the extra columns
# + _cell_guid="37aa6768-1e42-4321-a081-3ca45c0157df" _uuid="95cd37f2fb4e5f4d8daa71e3b60d3c3597e3be76"
test_newdf= test_data.join(pd.get_dummies(test_data['gender'])) #creating a new columns for paymenth method id dummyvariable
gender = {}
gender.update({True:'gender_provided'}) # create a dictionary to automatic renaming of columns
gender.update({False:'gender_not_provided'})
test_newdf = test_newdf.rename(columns=gender) #renaming the new columns
del test_newdf['gender']# deleting the extra columns
# + _cell_guid="134154a0-cf6d-4871-b418-c781f225013f" _uuid="a1bb74116ca4047b50104dbc5e2ad6a106075f83"
newdf= newdf.join(pd.get_dummies(newdf['registered_via'])) #creating a new columns for paymenth method id dummyvariable
registered_via = {}
for i in data['registered_via'].unique():
registered_via.update({i:'registered_via{}'.format(i)}) # create a dictionary to automatic renaming of columns
newdf = newdf.rename(columns=registered_via) #renaming the new columns
del newdf['registered_via']# deleting the extra columns
# + _cell_guid="55930072-ea40-411e-a32e-2fca73f84e32" _uuid="ab90dcf6e21e680a6ff06e3d52e8a32b33f69971"
test_newdf= test_newdf.join(pd.get_dummies(test_newdf['registered_via'])) #creating a new columns for paymenth method id dummyvariable
registered_via = {}
for i in data['registered_via'].unique():
registered_via.update({i:'registered_via{}'.format(i)}) # create a dictionary to automatic renaming of columns
test_newdf = test_newdf.rename(columns=registered_via) #renaming the new columns
del test_newdf['registered_via']# deleting the extra columns
# + _cell_guid="0cf2a800-cd51-4938-87fe-9d329a9c6638" _uuid="c1938360605b8bebf09922317a4e5698257feee5"
newdf= newdf.join(pd.get_dummies(newdf['city'])) #creating a new columns for paymenth method id dummyvariable
city = {}
for i in data['city'].unique():
city.update({i:'city{}'.format(i)}) # create a dictionary to automatic renaming of columns
newdf = newdf.rename(columns=city) #renaming the new columns
del newdf['city']# deleting the extra columns
newdf.head(10)
# + _cell_guid="97455b48-92b8-4c66-acc0-2846404a4808" _uuid="8aa52424bb620e1b21b0430851e1f2dce8e29b94"
test_newdf= test_newdf.join(pd.get_dummies(test_newdf['city'])) #creating a new columns for paymenth method id dummyvariable
city = {}
for i in data['city'].unique():
city.update({i:'city{}'.format(i)}) # create a dictionary to automatic renaming of columns
test_newdf = test_newdf.rename(columns=city) #renaming the new columns
del test_newdf['city']# deleting the extra columns
# + _cell_guid="4449a554-72ff-40f1-9afe-48297cddd3dd" _uuid="1b808e6b9ad19e78ba606b092070e78e3dc0ef2b"
def reject_outliers(data, m=2):
return data[abs(data - np.mean(data)) < m * np.std(data)]
# + _cell_guid="afed6430-1b12-42b9-b46e-62a14fa2b50f" _uuid="56421bcde48e35e0241f66736594f8682c10391e"
del test_data
del data
# + [markdown] _cell_guid="58357143-a29d-4c43-abc0-158f87196253" _uuid="46b453c94538a2acc803aadcb60018310bc9bf99"
# ** calculating the percentage of people aged negative or more than 100**
#
#
# + _cell_guid="0030093b-b28c-47d6-bc77-fc505b7a6eab" _uuid="0e4b03515c10057cdee1daa0378e26612bfc4773"
bd_mean = np.mean(newdf['bd']);
test_bd_mean = np.mean(test_newdf['bd']);
# + _cell_guid="005b6b4b-446a-4080-a93e-6ba94fa1626d" _uuid="c98fa7410623e257a28112998013f423558b63ba"
print(len(newdf[(newdf['bd']<0)|(newdf['bd']>100)])/len(newdf)*100,'%')
# + _cell_guid="e6771de1-3612-4072-9749-093ffd886bf0" _uuid="49436bb6b9c41081e356807e64b9eb69b80be8e8"
newdf[(newdf['bd']<0)|(newdf['bd']>100)].loc[:,'bd'] = bd_mean # filling the odd aged people with value = mean of age of users
test_newdf[(test_newdf['bd']<0)|(test_newdf['bd']>100)].loc[:,'bd'] = test_bd_mean # filling the odd aged people with value = mean of age of users
# + _cell_guid="2d452991-1d0b-477a-a5da-5a6f826162ef" _uuid="df5746e79d6938b5139250f9282d1eec042f1a98"
test_newdf['count_of_recharge'] = 1
newdf['count_of_recharge'] = 1
# + _cell_guid="7c960764-1efb-4189-b342-8b7d8d3fb13b" _uuid="cff5df2c7bca3ebfeb9b05ff2151c028f9f08a42"
print(newdf.columns)
# + _cell_guid="0b4a8dd8-2d94-4e0e-93f5-160f10dccff3" _uuid="786bb9d4f652424aab8d887d78f80b69bfaf9bae"
print(test_newdf.columns)
# + _cell_guid="5dca6c77-07c4-4df2-aee0-d1132eb30fe3" _uuid="70c8907d4748335ed98cc53ce87fcd56ce1eabf9"
test_newdf_grouped = test_newdf.groupby('msno').agg({ 'bd':np.mean, 'registration_init_time':min,
'payment_plan_days':np.mean, 'plan_list_price':np.mean,'count_of_recharge':'sum', 'actual_amount_paid':np.mean,
'is_auto_renew':np.mean, 'transaction_date':min, 'membership_expire_date':max,
'is_cancel':np.mean, 'is_churn':max, 'discount':'sum', 'gender_not_provided':np.mean, 'gender_provided':np.mean,
'registered_via3':np.mean, 'registered_via4':np.mean, 'registered_via7':np.mean,
'registered_via9':np.mean, 'registered_via13':np.mean, 'city1':np.mean, 'city3':np.mean,
'city4':np.mean,'city5':np.mean, 'city6':np.mean, 'city7':np.mean, 'city8':np.mean,
'city9':np.mean, 'city10':np.mean, 'city11':np.mean, 'city12':np.mean, 'city13':np.mean,
'city14':np.mean, 'city15':np.mean, 'city16':np.mean, 'city17':np.mean, 'city18':np.mean,
'city19':np.mean, 'city20':np.mean, 'city21':np.mean, 'city22':np.mean})
# + _cell_guid="ddd86c6d-c271-4590-a10c-6f808ff9b624" _uuid="564253730b6274dc0b8aba30ce65a4e875c62339"
newdf_grouped = newdf.groupby('msno').agg({ 'bd':np.mean, 'registration_init_time':min,
'payment_plan_days':np.mean, 'plan_list_price':np.mean,'count_of_recharge':'sum', 'actual_amount_paid':np.mean,
'is_auto_renew':np.mean, 'transaction_date':min, 'membership_expire_date':max,
'is_cancel':np.mean, 'is_churn':max, 'discount':'sum', 'gender_not_provided':np.mean, 'gender_provided':np.mean,
'registered_via3':np.mean, 'registered_via4':np.mean, 'registered_via7':np.mean,
'registered_via9':np.mean, 'registered_via13':np.mean, 'city1':np.mean, 'city3':np.mean,
'city4':np.mean,'city5':np.mean, 'city6':np.mean, 'city7':np.mean, 'city8':np.mean,
'city9':np.mean, 'city10':np.mean, 'city11':np.mean, 'city12':np.mean, 'city13':np.mean,
'city14':np.mean, 'city15':np.mean, 'city16':np.mean, 'city17':np.mean, 'city18':np.mean,
'city19':np.mean, 'city20':np.mean, 'city21':np.mean, 'city22':np.mean})
# + _cell_guid="123f651d-38de-400b-8d1b-cf6a5fd33a5b" _uuid="1c410cb091b041f6e028fd952cf4f3c1a0b4d1ca"
newdf_grouped.head(10)
# + _cell_guid="61184554-b98c-4cee-adef-09c524e9b098" _uuid="4b3640ef043bed8fa8610fb416a137ddaeb11757"
newdf_grouped.columns
# + _cell_guid="2f62bc55-71b6-4b6c-bb3c-9cc42c271a3e" _uuid="66bc10a969a303a0353373b054b7640befedf8b7"
newdf_grouped[newdf_grouped.columns[-28:]] = newdf_grouped[newdf_grouped.columns[-28:]].applymap(lambda x: 1 if x>0 else 0).apply(lambda x: x.astype('int8'))
test_newdf_grouped[newdf_grouped.columns[-28:]] = test_newdf_grouped[newdf_grouped.columns[-28:]].applymap(lambda x: 1 if x>0 else 0).apply(lambda x: x.astype('int8'))# converting 0/1 for city
# + [markdown] _cell_guid="1b5e6ad6-0354-4d0e-b1ab-32917e7fc487" _uuid="43dd51be7297bd13d9cb21f87b7e07607ef51463"
# newdf_grouped[newdf_grouped.columns[11]].describe()
# + _cell_guid="a59a1ebb-e26e-4a0f-a10d-2f2f84097bf1" _uuid="0c606d0f8864e553848b1a8200dd8fad3bb577e9"
newdf_grouped['discount'] = newdf_grouped['discount'].astype('int16')
# + _cell_guid="058807d6-8a58-4e64-865d-8fec71a9148c" _uuid="078ee0e38337c3da4ffbcd05142c8ee8d31e39a5"
newdf_grouped[newdf_grouped.columns[2:5]].describe()
# + _cell_guid="66be7175-df52-4711-a66f-f65d6ee53e93" _uuid="681276e2235137ddacfd1e23395fea53d1525ed5"
newdf_grouped[newdf_grouped.columns[2:5]] = newdf_grouped[newdf_grouped.columns[2:5]].apply(lambda x: round(x).astype('int16'))
test_newdf_grouped[newdf_grouped.columns[2:5]] = test_newdf_grouped[newdf_grouped.columns[2:5]].apply(lambda x: round(x).astype('int16'))
# + _cell_guid="acd68c9a-251f-4391-9a21-84ce293acdb7" _uuid="5366af324533944f7d493cb8d81179fa7307e13e"
#churn is 6.5% which is not as bad.
np.divide(np.sum(newdf_grouped['is_churn']),newdf_grouped.index.nunique())*100
# + _cell_guid="c4d8edb4-ae1b-41c4-8e27-44e6e5d0341e" _uuid="2152a512939915e866d2eb35f4f55d6b4756a97d"
np.divide(np.sum(test_newdf_grouped['is_churn']),test_newdf_grouped.index.nunique())*100
# + _cell_guid="2a6a93ee-5a1d-4eed-98fc-0d8f5426b45b" _uuid="d542c86c9f101afb49c8ec6e7f65c08f8aceb27b"
newdf_grouped.head(10)
# + _cell_guid="9abbbd13-50ee-4a22-ad98-25cccb820dfc" _uuid="19e4c37effb98ea74479b5e994b71fd929f71ba5"
newdf_grouped['days_to_buy_membership'] = newdf_grouped['transaction_date'] - newdf_grouped['registration_init_time']
newdf_grouped['tenure_membership'] = newdf_grouped['membership_expire_date']-newdf_grouped['transaction_date']
newdf_grouped['days_to_buy_membership'] = (newdf_grouped['days_to_buy_membership']/ np.timedelta64(1, 'D')).astype(int)
newdf_grouped['tenure_membership'] = (newdf_grouped['tenure_membership']/ np.timedelta64(1, 'D')).astype(int)
# + _cell_guid="cd369462-30bd-4d46-9072-4304148d2704" _uuid="00408f4ca4f9e2c244a3c2dc317866ed025e55a6"
test_newdf_grouped['days_to_buy_membership'] = test_newdf_grouped['transaction_date'] - test_newdf_grouped['registration_init_time']
test_newdf_grouped['tenure_membership'] = test_newdf_grouped['membership_expire_date']-test_newdf_grouped['transaction_date']
test_newdf_grouped['days_to_buy_membership'] = (test_newdf_grouped['days_to_buy_membership']/ np.timedelta64(1, 'D')).astype(int)
test_newdf_grouped['tenure_membership'] = (test_newdf_grouped['tenure_membership']/ np.timedelta64(1, 'D')).astype(int)
# + _cell_guid="560acc0a-1ec0-4712-9385-3646bde8fd77" _uuid="0dbdf0b9ec632adb2599453dc41f623c3411e992"
newdf_grouped.columns
# + _cell_guid="e7aed230-e64b-43b7-ad62-1ca1621af565" _uuid="ba7841d6c0162f30e5df0220f2b0ef6fec896e5d"
# + _cell_guid="021824d4-8b57-45cc-a4df-dfb5b919dc2c" _uuid="c5514b764ba12a19ea327cc9f8f2dc8681cf0937"
from sklearn.preprocessing import MinMaxScaler
scale = MinMaxScaler();
scale.fit(X_train);
X_train_train = scale.transform(X_train);
X_test_trans = scale.transform(X_test);
# + [markdown] _cell_guid="155d0064-3f17-4b4d-b040-8c46a08e1a4d" _uuid="a70641efae143602e1ea606afc90d5bd7bf564dc"
# '''
# # h=change the type of these series
# transactions = pd.read_csv('../input/transactions.csv', engine = 'c', sep=',')#reading the transaction file
# transactions['payment_method_id'] = transactions['payment_method_id'].astype('int8')
# transactions['payment_plan_days'] = transactions['payment_plan_days'].astype('int16')
# transactions['plan_list_price'] = transactions['plan_list_price'].astype('int16')
# transactions['actual_amount_paid'] = transactions['actual_amount_paid'].astype('int16')
# transactions['is_auto_renew'] = transactions['is_auto_renew'].astype('int8') # chainging the type to boolean
# transactions['is_cancel'] = transactions['is_cancel'].astype('int8')#changing the type to boolean
# transactions['membership_expire_date'] = pd.to_datetime(transactions['membership_expire_date'].astype(str), infer_datetime_format = True, exact=False)
# # converting the series to string and then to datetime format for easy manipulation of dates
# sum(transactions.memory_usage()/1024**2) # this wouldn't change the size of df as memory occupied by object is similar to datetime
# transactions['transaction_date'] = pd.to_datetime(transactions['transaction_date'].astype(str), infer_datetime_format = True, exact=False)
# members = pd.read_csv('../input/members_v3.csv')
# members['city']=members['city'].astype('int8');
# members['bd'] = members['bd'].astype('int16');
# members['bd']=members['bd'].astype('int8');
# members['registration_init_time'] = pd.to_datetime(members['registration_init_time'].astype(str), infer_datetime_format = True, exact=False)
# #members['expiration_date'] = pd.to_datetime(members['expiration_date'].astype(str), infer_datetime_format = True, exact=False)
# train = pd.read_csv('../input/train.csv')
# train.head()
# train['is_churn'] = train['is_churn'].astype('int8');
# members_trans = members.merge(transactions, how='inner', on='msno')
# data = members_trans.merge(train, how='inner', on='msno')
# # deleting the previously imported df as they occupy space in memory
# del transactions
# del members
# del train
# del members_trans
# def assign_gender(item):
# if (item == 'male')|(item == 'female'):
# return 1
# else:
# return 0
# data['gender'] = data['gender'].apply(assign_gender)
# data['discount'] = data['plan_list_price'] - data['actual_amount_paid']
# newdf= data.join(pd.get_dummies(data['payment_method_id'])) #creating a new columns for paymenth method id dummyvariable
#
# payment_method_id = {}
# for i in data['payment_method_id'].unique():
# payment_method_id.update({i:'payment_method_id{}'.format(i)}) # create a dictionary to automatic renaming of columns
#
# newdf = newdf.rename(columns=payment_method_id) #renaming the new columns
# del newdf['payment_method_id']# deleting the extra columns
# newdf.head()
# newdf= newdf.join(pd.get_dummies(newdf['gender'])) #creating a new columns for paymenth method id dummyvariable
#
# gender = {}
# gender.update({True:'gender_provided'}) # create a dictionary to automatic renaming of columns
# gender.update({False:'gender_not_provided'})
# newdf = newdf.rename(columns=gender) #renaming the new columns
# del newdf['gender']# deleting the extra columns
# newdf.columns
# newdf= newdf.join(pd.get_dummies(newdf['registered_via'])) #creating a new columns for paymenth method id dummyvariable
#
# registered_via = {}
# for i in data['registered_via'].unique():
# registered_via.update({i:'registered_via{}'.format(i)}) # create a dictionary to automatic renaming of columns
#
#
# newdf = newdf.rename(columns=registered_via) #renaming the new columns
# del newdf['registered_via']# deleting the extra columns
# newdf.columns
# newdf= newdf.join(pd.get_dummies(newdf['city'])) #creating a new columns for paymenth method id dummyvariable
#
# city = {}
# for i in data['city'].unique():
# city.update({i:'city{}'.format(i)}) # create a dictionary to automatic renaming of columns
#
#
# newdf = newdf.rename(columns=city) #renaming the new columns
# del newdf['city']# deleting the extra columns
# newdf.head(10)
# def reject_outliers(data, m=2):
# return data[abs(data - np.mean(data)) < m * np.std(data)]
# bd_mean = np.mean(newdf['bd'])
# newdf[(newdf['bd']<0)|(newdf['bd']>100)].loc[:,'bd'] = bd_mean # filling the odd aged people with value = mean of age of users
#
# newdf['count_of_recharge'] = 1
# newdf_grouped = newdf.groupby('msno').agg({ 'bd':np.mean, 'registration_init_time':min,
# 'payment_plan_days':np.mean, 'plan_list_price':np.mean,'count_of_recharge':'sum', 'actual_amount_paid':np.mean,
# 'is_auto_renew':np.mean, 'transaction_date':min, 'membership_expire_date':max,
# 'is_cancel':np.mean, 'is_churn':min, 'discount':'sum', 'payment_method_id2':np.mean,
# 'payment_method_id3':sum, 'payment_method_id4':np.sum, 'payment_method_id5':np.sum,
# 'payment_method_id6':np.sum, 'payment_method_id8':np.sum, 'payment_method_id10':np.sum,
# 'payment_method_id11':np.sum, 'payment_method_id12':np.sum, 'payment_method_id13':np.sum,
# 'payment_method_id14':np.sum, 'payment_method_id15':np.sum, 'payment_method_id16':np.sum,
# 'payment_method_id17':np.sum, 'payment_method_id18':np.sum, 'payment_method_id19':np.sum,
# 'payment_method_id20':np.sum, 'payment_method_id21':np.sum, 'payment_method_id22':np.sum,
# 'payment_method_id23':np.sum, 'payment_method_id24':np.sum, 'payment_method_id25':np.sum,
# 'payment_method_id26':np.sum, 'payment_method_id27':np.sum, 'payment_method_id28':np.sum,
# 'payment_method_id29':np.sum, 'payment_method_id30':np.sum, 'payment_method_id31':np.sum,
# 'payment_method_id32':np.sum, 'payment_method_id33':np.sum, 'payment_method_id34':np.sum,
# 'payment_method_id35':np.sum, 'payment_method_id36':np.sum, 'payment_method_id37':np.sum,
# 'payment_method_id38':np.sum, 'payment_method_id39':np.sum, 'payment_method_id40':np.sum,
# 'payment_method_id41':np.sum, 'gender_not_provided':np.mean, 'gender_provided':np.mean,
# 'registered_via3':np.mean, 'registered_via4':np.mean, 'registered_via7':np.mean,
# 'registered_via9':np.mean, 'registered_via13':np.mean, 'city1':np.mean, 'city3':np.mean,
# 'city4':np.mean,'city5':np.mean, 'city6':np.mean, 'city7':np.mean, 'city8':np.mean,
# 'city9':np.mean, 'city10':np.mean, 'city11':np.mean, 'city12':np.mean, 'city13':np.mean,
# 'city14':np.mean, 'city15':np.mean, 'city16':np.mean, 'city17':np.mean, 'city18':np.mean,
# 'city19':np.mean, 'city20':np.mean, 'city21':np.mean, 'city22':np.mean})
#
# newdf_grouped[newdf_grouped.columns[-28:]] = newdf_grouped[newdf_grouped.columns[-28:]].applymap(lambda x: 1 if x>0 else 0).apply(lambda x: x.astype('int8')) # converting 0/1 for city
# newdf_grouped[newdf_grouped.columns[12:-28]] = newdf_grouped[newdf_grouped.columns[12:-28]].apply(lambda x:x.astype('int8'))
#
# newdf_grouped['discount'] = newdf_grouped['discount'].astype('int16')
# newdf_grouped[newdf_grouped.columns[2:5]] = newdf_grouped[newdf_grouped.columns[2:5]].apply(lambda x: round(x).astype('int16'))
# newdf_grouped['days_to_buy_membership'] = newdf_grouped['transaction_date'] - newdf_grouped['registration_init_time']
# newdf_grouped['days_to_buy_membership'] = (newdf_grouped['days_to_buy_membership']/ np.timedelta64(1, 'D')).astype(int)'''
# + _cell_guid="6b4ce8d2-d120-4cdf-9fff-50285e056f0d" _uuid="769594bcfaed0c599479d2540d4040e14707a3b6"
newdf_grouped.hist('actual_amount_paid',bins= 100);
# + _cell_guid="3f6f00c2-4e3e-45a7-99cc-94906aaa54db" _uuid="65eb95a536c131fba71c8dd1d69c0c1177711777"
#from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
#from sklearn.model_selection import train_test_split
#from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
#from sklearn.neural_network import MLPClassifier
# + _cell_guid="71b0c8c6-60ec-4491-a643-67bf687caf7b" _uuid="180d8a14f4e257c8b2ae76f73ea7554498afd622"
def evaluation_plots(est, X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test):
from sklearn import metrics
from sklearn.model_selection import learning_curve
#model: estimator function
y_pred=est.predict(X_test);
print('Model R2 on train data is: ', est.score(X_train, y_train));
print('Model R2 on test data is: ', est.score(X_test, y_test));
print('\n\nClassification report: \n', metrics.classification_report(y_test, y_pred));
plt.figure();
sns.heatmap(metrics.confusion_matrix(y_test, y_pred),
cmap='viridis',annot=True, annot_kws={'fontsize':13},
cbar=False, xticklabels=['Predicted Negative','Predicted Positive '], yticklabels=['Actual Negative','Actual Positive']);
plt.title("Confusion Matrix", fontsize=20);
# train_size, train_score, CV_score = learning_curve(est, X_train, y_train, train_sizes=np.linspace(0.1,1, 5));
#plt.figure();
#plt.plot(train_size, np.mean(train_score, axis=1), 'o-', color='b', label='training score');
#plt.plot(train_size, np.mean(CV_score, axis=1), 'o-', color='r', label='CV score');
#plt.xlabel("Training examples");
# plt.ylabel("Score");
# plt.fill_between(train_size, np.mean(train_score, axis=1) - np.std(train_score, axis=1),
# np.mean(train_score, axis=1) + np.std(train_score, axis=1), alpha=0.1,
# color="b");
# plt.fill_between(train_size, np.mean(CV_score, axis=1) - np.std(CV_score, axis=1),
# np.mean(CV_score, axis=1) + np.std(CV_score, axis=1), alpha=0.1,
# color="r");
#plt.title("Learning Curve", fontsize=20);
#plt.legend();
# plt.show();
plt.figure();
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred);
auc = metrics.auc(fpr, tpr);
plt.plot(fpr, tpr, 'g-', lw=2);
plt.plot([0,1],[0,1], 'b--', lw=3);
#plt.gca().ylabel("True Positive rate");
#plt.gca().xlabel("False Positive rate");
plt.gca().text(.6,.05,"Area under the curve {}".format(np.round(auc, 2)));
# + _cell_guid="46b9df93-ec30-4476-b11f-1681fcbb706c" _uuid="0d2187dfcb165e0bf3be2610da98386a2b1af8a4"
Logit = LogisticRegression()
from sklearn.model_selection import GridSearchCV
param_grid = {'C':[0.01,.1,1,10,100], 'penalty':['l1','l2']}
gscv = GridSearchCV(Logit, param_grid)
gscv.fit(X_train, y_train);
# + _cell_guid="8b69687c-ca08-4a92-a5a0-834b15f1fdcb" _uuid="6c4e7bacbc2012840f1a36a8524d7cb82b617853"
evaluation_plots(gscv, X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test);
# + _cell_guid="ec32550a-1403-44a4-817e-e59fdccc7978" _uuid="d7342579051de70eee5391e1746a17d880dcebb5"
train = newdf_grouped[newdf_grouped['membership_expire_date']<'2017-04-01']
#test = newdf_grouped[(newdf_grouped['membership_expire_date']>='2017-03-01')&(newdf_grouped['membership_expire_date']<'2017-04-01')]
# + _cell_guid="144aa601-c623-480f-80ef-b90b0b372d4a" _uuid="eb0887b1320867672d521a6d3193b4ddbbcb199e"
predictgscv.predict(X_test)
# + _cell_guid="b64f44ca-522c-44ec-b568-786d6a7f284c" _uuid="9e22673ab8def47e88ef6ea03389da2d1f22d222"
print("Churn rate in february: ", sum(train['is_churn'])/len(train['is_churn']))
#print("Chaurn rate in march: ",sum(test['is_churn'])/len(test['is_churn']))
# + _cell_guid="1ef06d6b-59f4-4810-a511-b3fd0fb22ce9" _uuid="4285df5ebed9eba8e0b6b33835db9f12da33ffcc"
# + _cell_guid="eec844d3-4fd9-497b-8e3c-9a08dc01fbe8" _uuid="83581db81bc310555eb30af94800bc6f412392ee"
from sklearn.preprocessing import normalize
# + _cell_guid="6d36efbd-8582-4499-b82e-0494701ae7ed" _uuid="6dc2b9c9515367d21c9bb198f34c6a42e1880192"
X_train = train[train.columns[[0,2,4,5,6,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,
41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78]]]
y_train = train['is_churn']
# + _cell_guid="322ff10c-17c4-4937-ad6c-e2cb7bd414f2" _uuid="841f791006e4c0ace729b00be89aa90259ec62df"
X_train.head()
# + _cell_guid="092bb5c2-fe0d-4dae-81e9-5ce3c646869e" _uuid="d6a6d30e9eb81d2b48f8693d8211b2dd34fbe448"
bern_X = X_train[[ 'is_auto_renew', 'city1', 'city3', 'city4',
'city5', 'city6', 'city7', 'city8', 'city9', 'city10', 'city11',
'city12', 'city13', 'city14', 'city15', 'city16', 'city17', 'city18',
'city19', 'city20', 'city21', 'city22', 'gender_not_provided', 'is_cancel', 'registered_via3', 'registered_via4', 'registered_via7',
'registered_via9', 'registered_via13']]
# + _cell_guid="299ea3fa-2c5d-4b1e-a2e8-0a935c269bcf" _uuid="814d5a617446d04172f15f6f884f608edb2531e6"
mult_X = X_train[['count_of_recharge','payment_method_id2',
'payment_method_id3', 'payment_method_id4', 'payment_method_id5',
'payment_method_id6', 'payment_method_id8', 'payment_method_id10',
'payment_method_id11', 'payment_method_id12', 'payment_method_id13',
'payment_method_id14', 'payment_method_id15', 'payment_method_id16',
'payment_method_id17', 'payment_method_id18', 'payment_method_id19',
'payment_method_id20', 'payment_method_id21', 'payment_method_id22',
'payment_method_id23', 'payment_method_id24', 'payment_method_id25',
'payment_method_id26', 'payment_method_id27', 'payment_method_id28',
'payment_method_id29', 'payment_method_id30', 'payment_method_id31',
'payment_method_id32', 'payment_method_id33', 'payment_method_id34',
'payment_method_id35', 'payment_method_id36', 'payment_method_id37',
'payment_method_id38', 'payment_method_id39', 'payment_method_id40',
'payment_method_id41']]
# + _cell_guid="4879ff52-09f7-4efa-a114-8ae4c969d46d" _uuid="79a3b4e6fd6b213a04a99bb4ebb353c631e03d10"
gaus_X = normalize(X_train[['bd', 'payment_plan_days', 'actual_amount_paid',
'discount', 'days_to_buy_membership']])
# + _cell_guid="0c8b1ba7-2fc2-443c-8280-c39c1ecf89b8" _uuid="7b2b720894a5f8af40dbf1613aab5139a3634ba7"
# + _cell_guid="e3fee947-8e0f-4db8-bbec-973f9c301aad" _uuid="9468a98a0bcc70ca73c9acbbc24171dda0466794"
gaus_X_test = normalize(test[['bd', 'payment_plan_days', 'actual_amount_paid',
'discount', 'days_to_buy_membership']])
mult_X_test = test[['count_of_recharge','payment_method_id2',
'payment_method_id3', 'payment_method_id4', 'payment_method_id5',
'payment_method_id6', 'payment_method_id8', 'payment_method_id10',
'payment_method_id11', 'payment_method_id12', 'payment_method_id13',
'payment_method_id14', 'payment_method_id15', 'payment_method_id16',
'payment_method_id17', 'payment_method_id18', 'payment_method_id19',
'payment_method_id20', 'payment_method_id21', 'payment_method_id22',
'payment_method_id23', 'payment_method_id24', 'payment_method_id25',
'payment_method_id26', 'payment_method_id27', 'payment_method_id28',
'payment_method_id29', 'payment_method_id30', 'payment_method_id31',
'payment_method_id32', 'payment_method_id33', 'payment_method_id34',
'payment_method_id35', 'payment_method_id36', 'payment_method_id37',
'payment_method_id38', 'payment_method_id39', 'payment_method_id40',
'payment_method_id41']]
bern_X_test = test[[ 'is_auto_renew', 'city1', 'city3', 'city4',
'city5', 'city6', 'city7', 'city8', 'city9', 'city10', 'city11',
'city12', 'city13', 'city14', 'city15', 'city16', 'city17', 'city18',
'city19', 'city20', 'city21', 'city22', 'gender_not_provided', 'is_cancel', 'registered_via3', 'registered_via4', 'registered_via7',
'registered_via9', 'registered_via13']]
# + _cell_guid="db5b17f7-0d60-42c5-991c-7e35075c8dc3" _uuid="16b42671846e3496ec2568867c4cf0ee3b63dfc3"
# + _cell_guid="8a6c745a-d746-48f1-9bf7-88af4a0f840c" _uuid="92b49be2e50a056182e4856f126ba687517b8c99" active=""
#
# + _cell_guid="4ea21652-299e-4d29-8e13-b97457fe1c79" _uuid="a3b549f5d902876cc2187a8623277ab3d7b02eb6"
gauss.fit(gaus_X, y_train)
bern.fit(bern_X, y_train)
multi.fit(mult_X, y_train)
# + _cell_guid="c52613c4-239b-4cbd-b295-530688ca473f" _uuid="1026d92ef09ea0bedbedf2f8ffa17ab12cb38862"
gauss.fit(gaus_X, y_train)
bern.fit(bern_X, y_train)
multi.fit(mult_X, y_train)
# + _cell_guid="55e590ba-616e-4f26-94c9-3ffe595623f9" _uuid="af95aa698b6b10652909d856fb426c8972b7f9f2"
predict = model.predict(X_test)
# + _cell_guid="35de1d58-e98d-4c82-b8f1-9311cae4edf9" _uuid="c304e54d44bf33f24b0ee3cf4d9b54ae64f8c350"
submission = pd.DataFrame()
submission['msno'] = newdf[(newdf['expiration_date']>='2017-04-01')&(newdf['expiration_date']<'2017-05-01')].index
submission['is_churn'] = predict
submission.set_index('msno')
# + _cell_guid="a40516dc-602f-440b-a710-001c673f0a10" _uuid="4836476bd911f37c68a86b7762b1e69628c72e2b"
submission.to_csv('submission.csv')
# + _cell_guid="e891d533-5cff-4efa-9705-ad128e7cfeff" _uuid="73836d9c0f967c8bf004a85785399ccaf084f1d5"
| WSDM - KKBox's Churn Prediction Challenge/Size Reduction, Merge and features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# # Project 2: Model Project
# **Name:** <NAME> (pvw581) // Spring Semester 2020
# # Ramsey model
# Firstly, modules needed are imported.
import numpy as np
import mpmath as mp
import sympy
import matplotlib.pyplot as plt
from scipy import optimize
import sympy as sm
import ipywidgets as widgets
# ## Model description
# In this project, we will focus on the neoclassic **Ramsey model** of economic growth.
#
# In the Ramsey model, there is **a finite number of homogeneous agents** with **an infinite time horizon**. Therefore, it is considered as a representative agent model. A **closed economy** is considered and time is continuous. At any point in time, there are **three markets** - one for output goods, one for labor, and the rental market for capital goods.
#
# There are **two types of agents**. On the one hand, there are **firms**. They rent capital and hire labor. They want to maximize their profit. On the other hand, **households** own capital, they supply labor and consume. They decide how much it is optimal to consume and save, so they can maximize their utility.
#
# We have **four variables**:
# - Output: $Y_t$
# - Capital: $K_t$
# - Labor supply: $L_t$
# - Consumption: $C_t$
#
# However, we will mostly consider these variables per worker, i.e.
# - Output per worker: $y_t = \frac{Y_t}{L_t}$
# - Capital per worker: $k_t = \frac{K_t}{L_t}$
# - Labor per worker: $l_t = 1$
# - Consumption per worker: $c_t = \frac{C_t}{L_t}$
#
# Moreover, prices are taken as given by both firms and households. We have
# - Wage rate: $w_t$
# - Rental rate of capital: $r_t$
#
# Capital depreciates at rate $\delta>0$ by period. Therefore, the return on household savings, $R_t$, is given by
#
# $$ R_t = 1 + r_t - \delta$$
# Firms are owned by households. They rent capital for $r_t$ and hire labor for $w_t$ to produce goods. Then profits are transferred to the households. Firms have access to production technology that is given by **production function**:
#
# $$
# Y_t = F(K_t,L_t) = f(k_t)L_t
# $$
#
# The reresentative firm maximizes profit:
#
# $$
# \underset{K_t,L_t}{\mathrm{max}} F(K_t,L_t) - r_rK_t - w_tL_t
# $$
#
# with first order conditions
#
# $$
# F_K(K_t,L_t)= f'(k_t) = r_t \\
# $$
#
# $$
# F_L(K_t,L_t) = f(k_t) - f'(k_t)k_t = w_t
# $$
#
# that define demand functions for capital and labor.
#
# ### 2. Households
# Households work for a real wage. They use it for either consumption or they save it in the form of
# accumulating assets, on which they later get interest income. Households try to maximize their current happiness and happiness of all their future descendants, i.e. **overall utility**. They maximize the discounted sum of utility from consumption:
#
# $$
# \underset{\{c_t\}_{t=0}^{\infty}}{\mathrm{max}} \sum_{t=0}^{\infty} \beta^tu(c_t)
# $$
#
# where $u'(c_t)>0$, $u''(c_t)<0$ and $\beta \in (0,1)$ is a preference factor. Moreover constrains are given by
#
# $$
# k_{t+1} = R_tk_t + w_t - c_t \\
# $$
#
# $$
# \underset{t \to \infty}{\mathrm{lim}}\frac{k_t}{\mathcal{R}_t } \geq 0 \\
# $$
#
# $$
# \mathcal{R}_t = \prod_{j=0}^{t}R_j
# $$
# with $k_0$, and paths for $\{R_t\}_{t=0}^{\infty}$ and $\{w_t\}_{t=0}^{\infty}$ given.
#
# Optimal behavior of household is implied by Euler equation:
#
# $$
# \frac{u'(c_t)}{u'(c_{t+1})} = \beta R_{t+1}
# $$
# We consider CRRA utility function
# $$
# u(c_t) = \frac{c_t^{1- \gamma}}{1- \gamma}
# $$
# where $\gamma>0$, and Cobb-Douglas production function defined as
# $$
# F(K_t,L_t) = AK_t^{\alpha}L_t^{1-\alpha}
# $$
# where $\alpha \in (0,1)$ and $A>0$.
# This implies that
# $$
# f(k_t) = Ak_t^{\alpha}
# $$
# and therefore
# $$
# r_t = A \alpha k_t^{\alpha-1} \\
# $$
# $$
# w_t = A(1-\alpha)k_t^{\alpha}
# $$
# ## Steady state
# ### Analytical solution
# To find an analytical solution of **steady-state**, we use **sympy**. Firstly, we find an analytical expression for the Euler equation with respect to $c_{t+1}$.
#
# Then, we need to define all symbols.
gamma = sm.symbols('gamma')
delta = sm.symbols('delta')
c1 = sm.symbols('c_t')
c2 = sm.symbols('c_{t+1}')
k1 = sm.symbols('k_t')
R = sm.symbols('R_{t+1}')
beta = sm.symbols('beta')
A = sm.symbols('A')
alpha = sm.symbols('alpha')
kss = sm.symbols('k_{ss}')
css = sm.symbols('c_{ss}')
u = sm.symbols('u')
r = A*alpha*k1**(alpha-1)
w = A*(1-alpha)*k1**alpha
R = 1 + r - delta
# Utility function is given by
utilityfun1 = c1**(1-gamma)/(1-gamma)
utilityfun2 = c2**(1-gamma)/(1-gamma)
display(utilityfun1,utilityfun2)
# Euler equation is then given by
eulereq = sm.Eq(sm.diff(utilityfun1,c1)/sm.diff(utilityfun2,c2),beta*R)
eulereq
# We solve Euler equation with respect to $c_{t+1}$ and obain:
eulereq_sol = sm.solve(eulereq,c2)[0]
sm.Eq(c2,eulereq_sol)
# In steady state, it holds that the level of capital and consumption is constant, i.e. $k_{t+1}=k_t$ and $c_{t+1}=c_t$.
# From the Euler equation, steady-state of capital can be expressed as
#
steady_state_c = sm.Eq(eulereq_sol,c1)
steady_state_c
steady_state_c_sol = sm.solve(steady_state_c,k1)
sm.Eq(kss,steady_state_c_sol[0])
# From budget constraint $k_{t+1} = R_tk_t + w_t - c_t $, steady-state of consumption is expressed as
steady_state_k = sm.Eq(k1,(R*k1+w-c1))
steady_state_k
steady_state_k_sol = sm.solve(steady_state_k,c1)
sm.Eq(css,steady_state_k_sol[0])
# These two equations define steady-state level of capital and consumption in Ramsey model.
# ### Numerical solution
# Firstly, we choose parameters:
beta = 0.96
delta = 0.10
alpha = 1/3
gamma = 0.3
A = 2
# We want to find steady-state levels of consumption, $c_{ss}$, and capital, $k_{ss}$.
#
# To do that, we need to define functions from the setup.
# +
# Utility function
def u(c):
"""
Inputs:
1) c: current period's level of consumption per worker
Returns:
1) Value of current period's utility
"""
if gamma != 1:
return (c**(1 - gamma) - 1) / (1 - gamma)
else:
return np.log(c) # When gamma = 1, we obtain log function
# Lifetime utility function for an agent who lives for only two periods
# Beta is preference factor, 0 < beta < 1
def U(c1, c2):
"""
Inputs:
1) C1: consumption per worker in period 1
2) C2: consumption per worker in period 2
Returns:
1) Value of utility for lifetime consumption
"""
return u(c1) + beta * u(c2)
# Intensive production function f(k)
# Where A > 0 and 0 < alpha < 1
def f(k):
"""
Inputs:
1) k_t: current period's level of capital per worker
Returns:
1) y_t: current period's output per worker
"""
return A*k**alpha
# Rental rate of capital r_t
def r(k):
"""
Inputs:
1) k_t: current period's level of capital per worker
Returns:
1) r_t: current period's level of rental rate of capital
"""
return A*alpha*k**(alpha-1)
# Wage rate w_t
def w(k):
"""
Inputs:
1) k_t: current period's level of capital per worker
Returns:
1) w_t: current period's level of wage rate
"""
return A*(1-alpha)*k**alpha
# Return on household savings R_t
def R(k):
"""
Inputs:
1) k_t: current period's level of capital per worker
Returns:
1) R_t: current period's level of return on household savings
"""
return 1+r(k)-delta
# -
# We define another two functions. The first on is a **budget constraint**, i.e. $k_{t+1} = R_tk_t + w_t - c_t $, that gives us the level of next period's capital per worker. The second one is the Euler equation with respect to $c_{t+1}$, i.e. $c_{t+1}=c_t(\beta R_{t+1})^{1/\gamma}$.
# +
# Next period's capital stock per worker
def capital(k,c):
"""
Inputs:
1) k_t: current period's level of capital per worker
2) c_t: current period's level of consumption per worker
Returns:
1) k_t+1: next period's capital stock per worker
"""
return (R(k))*k + w(k) - c
# Euler equation that gives next period's consumption per worker
def euler(k,c):
"""
Inputs:
1) k_t: next period's level of capital per effective worker
2) c_t: current period's level of consumption per effective worker
Returns:
1) c_t+1: next period's consumption per effective worker
"""
return (beta*R(k))**(1/gamma)*c
# -
# We define functions that give us the difference in capital and consumption between any two periods. In steady-state, it hold that $k_{t+1}=k_t$ and $c_{t+1}=c_t$.
# +
# The change in capital stock per worker between any two periods is
# k_t+1 - k_t = capital(k, c) - k
# In steady state, capital stock per worker is constant, i.e. k_t=k_t+1
# Thus, steady state values of k and c must solve
# capital(k,c) - k = 0
def F1(k,c):
return capital(k,c) - k
# The change in consumption between any two periods is
# c_t+1 - c_t = euler(k, c) - c
# In steady state, also consumption is constant, i.e. c_t+1=c_t
# Thus, steady state values of k and c must solve
# euler(k,c) - c = 0
def F2(k,c):
return euler(k,c) - c
# Get steady state
def ramseySteadyState(X):
out = [F1(X[0], X[1])]
out.append(F2(X[0], X[1]))
return out
# -
# Now, we want to find steady-state values of capital and consumption. Equations *capital* and *euler* form a two-equation dynamical system with two unknowns. It must, therefore, hold that
#
# F1(k_ss, c_ss) = 0
#
# F2(k_ss, c_ss) = 0
#
# That is a root-finding problem. We fill use the function *fsolve()* to find these roots.
# +
# Our guess
k0, c0 = 1, 1
# Use solver to define steady state
k_SS, c_SS = optimize.fsolve(func=ramseySteadyState, x0=(k0, c0))
r_ss = r(k_SS)
w_ss = w(k_SS)
# Print results
print('Steady state for given parameters is defined by:')
print(f"k_ss = {k_SS:.4f}")
print(f"c_ss = {c_SS:.4f}")
print(f"r_ss = {r_ss:.4f}")
print(f"w_ss = {w_ss:.4f}")
# -
# Now, we would like to visualize a steady-state by the phase diagram. We define functions for a steady-state.
# +
# The steady-state level of capital stock per effective worker
def k_ss():
return ((beta*delta-beta+1)/(A*alpha*beta))**(1/(alpha-1))
# The steady-state level of consumption per effective worker
def c_ss():
return A*k_ss()**alpha-delta*k_ss()
# Define a function that takes k as an input and return the value of c consistent with F1(k, c) = 0
def locusK(k):
"""
Inputs:
1) k: capital stock per effective worker
Returns:
1) c: consumption per effective worker
"""
return f(k) - delta * k
# -
# And we construct the diagram.
# +
# Create a grid of points for plotting
gridmax, gridsize = 200, 10000
grid = np.linspace(0, gridmax, gridsize)
# Create a new figure
plt.figure(figsize=(16,8))
# Add the c and k
plt.plot(grid, locusK(grid), '-', color='royalblue', label=r'$k_{t+1}=k_t$, the capital isocurve')
plt.axvline(k_ss(), color='black', label=r'$c_{t+1}=c_t$, the consumption isocurve')
plt.plot(k_ss(), c_ss(), marker='.', markersize=20, color='skyblue')
# Create annotation for steady state
ss = '$k_{ss}$ = '+ str(round(k_ss(),2)) + ', ' + '$c_{ss}$ = '+ str(round(c_ss(),2))
plt.annotate(ss,
xy=(k_ss(), c_ss()),
xytext=(-60, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->",
shrinkA=20, shrinkB=10,
connectionstyle="angle,angleA=0,angleB=90,rad=10"))
# Add arrows to indicate out of steady-state dynamics
x_len = 0.025 * gridmax
y_len = 1 / x_len
plt.arrow(x=0.5 * k_ss(), y=0.5 * c_ss(), dx=0, dy=y_len, color='slategray')
plt.arrow(x=0.5 * k_ss(), y=0.5 * c_ss(), dx=x_len, dy=0, color='slategray')
plt.arrow(x=0.5 * k_ss() + x_len, y=1.5 * c_ss(), dx=0, dy=y_len, color='slategray')
plt.arrow(x=0.5 * k_ss() + x_len, y=1.5 * c_ss(), dx=-x_len, dy=0, color='slategray')
plt.arrow(x=1.5 * k_ss(), y=0.5 * c_ss() + y_len, dx=0, dy=-y_len, color='slategray')
plt.arrow(x=1.5 * k_ss(), y=0.5 * c_ss() + y_len, dx=x_len, dy=0, color='slategray')
plt.arrow(x=1.5 * k_ss() + x_len, y=1.5 * c_ss() + y_len, dx=0, dy=-y_len, color='slategray')
plt.arrow(x=1.5 * k_ss() + x_len, y=1.5 * c_ss() + y_len, dx=-x_len, dy=0, color='slategray')
# Label axis
plt.xlabel('$k_t$')
plt.ylabel('$c_t$', rotation='horizontal')
# Add a title to the plot
plt.title('Phase Diagram for the Ramsey Model', size=20)
# Add the legend
plt.legend(frameon=False)
plt.grid(True)
# Change range of axis
plt.ylim((0, 6))
plt.xlim((-1, 100))
plt.savefig('Ramsey-Phase-Diagram.png')
# -
# ## Pernament change in preference parameter $\beta$
# Preference parameter $\beta$ controls consumption preferences. The change in this parameter will affect the Euler equation. We assume that the economy is now in the steady-state that can be represented by the phase diagram above and we want to see what happens when the parameter $\beta$ changes.
#
#
# To see how the economy responds to the change in parameter $\beta$, we construct an interactive plot with the slider.
#
# We define functions of steady-state once again, however this time with input *beta*.
# +
def k_star(beta):
return ((beta*delta-beta+1)/(A*alpha*beta))**(1/(alpha-1))
def c_star(beta):
return A*k_star(beta)**alpha-delta*k_star(beta)
def locusK_beta(k):
"""
Inputs:
1) k: capital stock per effective worker
Returns:
1) c: consumption per effective worker
"""
return f(k) - delta * k
# -
# We define function *beta_shock*:
def beta_shock(betas):
# Create a grid of points for plotting
gridmax, gridsize = 200, 10000
grid = np.linspace(0, gridmax, gridsize)
# Create a new figure
plt.figure(figsize=(16,8))
# Add the c and k
plt.plot(grid, locusK_beta(grid), '-', color='royalblue', label=r'$k_{t+1}=k_t$, the capital isocurve')
plt.axvline(k_star(betas), color='black', label=r'$c_{t+1}=c_t$, the consumption isocurve')
plt.plot(k_star(betas), c_star(betas), marker='.', markersize=20, color='skyblue')
# Create annotation for steady state
ss_beta = '$k_{ss}$ = '+ str(round(k_star(betas),2)) + ', ' + '$c_{ss}$ = '+ str(round(c_star(betas),2))
plt.annotate(ss_beta,
xy=(k_star(betas), c_star(betas)), # theta, radius
xytext=(-60, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->",
shrinkA=20, shrinkB=10,
connectionstyle="angle,angleA=0,angleB=90,rad=10"))
# Label axis
plt.xlabel('$k_t$')
plt.ylabel('$c_t$', rotation='horizontal')
# Add a title to the plot
plt.title('Phase Diagram for the Ramsey Model, pernament shock in preference parameter', size=20)
# Add the legend
plt.legend(frameon=False)
plt.grid(True)
# Change range of axis
plt.ylim((0, 6))
plt.xlim((-1, 100))
# And we can construct an interactive phase diagram with *beta slider*. This plot shows the response of the economy to an unforeseen shock to the preference parameter $\beta$.
widgets.interact(beta_shock,
betas=widgets.FloatSlider(description='beta', min=0.01, max=0.99, step=0.01, value=0.96),
);
# It can be seen that a decrease in $\beta$ results in a decrease in both steady-state capital, $k_{ss}$, and consumption, $c_{ss}$.
# ## Government
# Now, we introduce government in the economy. Government expenditure, $G_t>0$, does not affect households' preferences for consumption and firms' current and future productivity. That means that government expenditure does not affect the consumption
# isocurve, it only shifts the capital isocurve downwards, i.e. it is now defined as
#
# $$
# c_t = Ak_t^{\alpha}-\delta k_t - G_t
# $$
# We define functions of steady-state once again, however, this time we include government expenditure, $G$.
# +
def k_govern():
return ((beta*delta-beta+1)/(A*alpha*beta))**(1/(alpha-1))
def c_govern(G):
return A*k_star(beta)**alpha-delta*k_star(beta)-G
def locusK_G(k,G):
"""
Inputs:
1) k: capital stock per effective worker
Returns:
1) c: consumption per effective worker
"""
return f(k) - delta * k - G
# -
# We define function *government_exp*:
def government_exp(G):
# Create a grid of points for plotting
gridmax, gridsize = 200, 10000
grid = np.linspace(0, gridmax, gridsize)
# Create a new figure
plt.figure(figsize=(16,8))
# Add the c and k
plt.plot(grid, locusK_G(grid,G), '-', color='royalblue', label=r'$k_{t+1}=k_t$, the capital isocurve')
plt.axvline(k_govern(), color='black', label=r'$c_{t+1}=c_t$, the consumption isocurve')
plt.plot(k_govern(), c_govern(G), marker='.', markersize=20, color='skyblue')
# Create annotation for steady state
ss_g = '$k_{ss}$ = '+ str(round(k_govern(),2)) + ', ' + '$c_{ss}$ = '+ str(round(c_govern(G),2))
plt.annotate(ss_g,
xy=(k_govern(), c_govern(G)), # theta, radius
xytext=(-60, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->",
shrinkA=20, shrinkB=10,
connectionstyle="angle,angleA=0,angleB=90,rad=10"))
# Label axis
plt.xlabel('$k_t$')
plt.ylabel('$c_t$', rotation='horizontal')
# Add a title to the plot
plt.title('Phase Diagram for the Ramsey Model, government expenditures', size=20)
# Add the legend
plt.legend(frameon=False)
plt.grid(True)
# Change range of axis
plt.ylim((0, 6))
plt.xlim((-1, 100))
# And we can construct an interactive phase diagram with *G slider*. This plot shows the response of the economy to a permanent change in government expenditures.
widgets.interact(government_exp,
G=widgets.FloatSlider(description='G', min=0, max=3, step=0.2, value=0),
);
# It can be seen that a permanent increase in government expenditures shifts the blue curve that illustrates the level of consumption downwards.
# ## Conclusion
# In this project, we focused on the Ramsey model. We found both analytical and numerical solutions for a steady-state that are given by
print('Analytical solution for steady state:')
display(sm.Eq(kss,steady_state_c_sol[0]),sm.Eq(css,steady_state_k_sol[0]))
print('Numerical solution for steady state for given parameters:')
print(f"k_ss = {k_SS:.4f}")
print(f"c_ss = {c_SS:.4f}")
# Then we visualized this solution by the phase diagram. For our further analysis, we decided to focus on a permanent change in preference parameter $\beta$ and the introduction of government. Using an interactive phase diagram, one can see that decrease in preference parameter $\beta$ results in the shift of the consumption isocurve to the left which implies a decrease in steady-state capital and consumption. After that, we introduced government expenditure, $G_t>0$. Government expenditure does not affect households' preferences for consumption and firms' current and future productivity. Using an interactive phase diagram once again, it can be seen that an increase in government expenditure, $G_t$, results in the shift of the capital isocurve downwards. This implies that a steady-state level of capital remains the same as before, but a steady-state level of consumption decreases.
| modelproject/modelproject.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="_K2AEUdFOPpM" colab_type="text"
# # **Loading and initial analysis of dataset from dataworld**
# + [markdown] id="qcjKh_vNOgCw" colab_type="text"
# ## Installing dataworld and importing libraries
# + id="iVKup3oVPAOn" colab_type="code" colab={}
# #!pip install datadotworld
# #!pip install datadotworld[pandas]
# + id="xcZR9fCRPY-3" colab_type="code" colab={}
# #!dw configure
# + id="5CmtrOObnV3R" colab_type="code" colab={}
from google.colab import drive
import pandas as pd
import numpy as np
import datadotworld as dw
# + id="OFR7Eue3oWkf" colab_type="code" outputId="81cc148f-abc5-4fc1-dc5d-735e4f45e3b1" executionInfo={"status": "ok", "timestamp": 1581860583063, "user_tz": -60, "elapsed": 1758, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06127576755732744416"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# ls
# + id="sXqsFE9Boee_" colab_type="code" outputId="a0f9c5d2-696f-4ee4-cec1-8bf331c7da5c" executionInfo={"status": "ok", "timestamp": 1581860585487, "user_tz": -60, "elapsed": 528, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06127576755732744416"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# cd "drive/My Drive/Colab Notebooks/dw_matrix"
# + id="wLto3q6IovtQ" colab_type="code" outputId="55e6d0be-7cd9-472a-b8df-43cc688bf0cb" executionInfo={"status": "ok", "timestamp": 1581531740062, "user_tz": -60, "elapsed": 1917, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06127576755732744416"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# ls
# + id="6gM9GmbToy90" colab_type="code" colab={}
# !mkdir data
# + id="jOPM9cKco2if" colab_type="code" colab={}
# !echo 'data' > .gitignore
# + id="5Bw_61pFpJ9j" colab_type="code" colab={}
# !git add .gitignore
# + [markdown] id="oc0vXWl_OwZT" colab_type="text"
# ## Loading dataset
# + id="y0sZk1rWpPmG" colab_type="code" colab={}
data = dw.load_dataset('datafiniti/mens-shoe-prices')
# + [markdown] id="MrMWSwLFO1kQ" colab_type="text"
# ## Getting to know the dataset
# + id="fy8a0bRepjg5" colab_type="code" outputId="7d462fdb-835d-48ef-81f7-4577c43e0a48" executionInfo={"status": "ok", "timestamp": 1581860668710, "user_tz": -60, "elapsed": 2365, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06127576755732744416"}} colab={"base_uri": "https://localhost:8080/", "height": 122}
df = data.dataframes['7004_1']
df.shape
# + id="obdEdxpJpzt3" colab_type="code" outputId="aee8f091-fac6-4c03-a9d1-b92428042edb" executionInfo={"status": "ok", "timestamp": 1581860670852, "user_tz": -60, "elapsed": 709, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06127576755732744416"}} colab={"base_uri": "https://localhost:8080/", "height": 649}
df.sample(5)
# + id="Ytp2DjmFp4Ag" colab_type="code" outputId="6e514a58-9f6b-4bfd-f1b9-b84f1af9d3d7" executionInfo={"status": "ok", "timestamp": 1581860675291, "user_tz": -60, "elapsed": 673, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06127576755732744416"}} colab={"base_uri": "https://localhost:8080/", "height": 221}
df.columns
# + id="U2jCQbJdqHz4" colab_type="code" outputId="b9ca1340-d3a9-4d30-c4e2-730c701fd6de" executionInfo={"status": "ok", "timestamp": 1581860677127, "user_tz": -60, "elapsed": 513, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06127576755732744416"}} colab={"base_uri": "https://localhost:8080/", "height": 102}
df.prices_currency.unique()
# + id="MYfnESxmqLU4" colab_type="code" outputId="a18be76c-1df3-494b-ffae-47c22a970a62" executionInfo={"status": "ok", "timestamp": 1581860681159, "user_tz": -60, "elapsed": 665, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06127576755732744416"}} colab={"base_uri": "https://localhost:8080/", "height": 255}
df.prices_currency.value_counts(normalize=True)
# + [markdown] id="EAn2ruCjQRoy" colab_type="text"
# Because most of the prices are in USD, we will leave only these rows for now (instead of checking exchange rates of others and converting them into USD). We also see that there are some other values put in 'prices_currency' mistakenly.
# + id="7M8xp7B7qR_t" colab_type="code" outputId="50be8b91-1a5c-45a8-9c37-6814074a418e" executionInfo={"status": "ok", "timestamp": 1581860684202, "user_tz": -60, "elapsed": 473, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06127576755732744416"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
df_usd = df[ df.prices_currency == 'USD'].copy()
df_usd.shape
# + id="BPIAOA_JqyhT" colab_type="code" outputId="36f06c83-d5d0-430a-d80f-ec33c9c06ccb" executionInfo={"status": "ok", "timestamp": 1581860690156, "user_tz": -60, "elapsed": 522, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06127576755732744416"}} colab={"base_uri": "https://localhost:8080/", "height": 265}
df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float)
df_usd['prices_amountmin'].hist();
# + [markdown] id="wWO-kJxHPoCu" colab_type="text"
# It looks like there are some outliers, so the next step will be to filter out these values.
# + [markdown] id="xajEG_0nPcsm" colab_type="text"
# ## Filtering and saving the data to a file
# + id="iPCKH9pPrcJI" colab_type="code" outputId="6f368872-5a61-4ff6-cc14-5b17b04532ce" executionInfo={"status": "ok", "timestamp": 1581860692217, "user_tz": -60, "elapsed": 521, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06127576755732744416"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
filter_max = np.percentile( df_usd['prices_amountmin'], 99)
filter_max
# + id="YZkduoUBtuYA" colab_type="code" colab={}
df_usd_filter = df_usd[ df_usd['prices_amountmin'] < filter_max ]
# + id="PIANtDtcuO_0" colab_type="code" outputId="47d9cf17-e622-4016-9670-52ad1e321d9a" executionInfo={"status": "ok", "timestamp": 1581860699618, "user_tz": -60, "elapsed": 1898, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06127576755732744416"}} colab={"base_uri": "https://localhost:8080/", "height": 265}
df_usd_filter.prices_amountmin.hist(bins = 100);
# + id="uZfiWIG1uSQo" colab_type="code" colab={}
# df.to_csv('data/shoes_prices.csv', index=False)
| day3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Django Shell-Plus
# language: python
# name: django_extensions
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Download-embeddings-from-PostgresSQL" data-toc-modified-id="Download-embeddings-from-PostgresSQL-1"><span class="toc-item-num">1 </span>Download embeddings from PostgresSQL</a></span></li><li><span><a href="#Try-to-load-embeddings" data-toc-modified-id="Try-to-load-embeddings-2"><span class="toc-item-num">2 </span>Try to load embeddings</a></span></li></ul></div>
# +
from esper.stdlib import *
from esper.prelude import *
import os
import struct
import json
def decode(features):
return json.loads(features.tobytes().decode())
# -
# # Download embeddings from PostgresSQL
# +
EMBS_DIR = '/app/data/embs/'
IDS_FILE = os.path.join(EMBS_DIR, 'face_ids.bin')
EMBS_FILE = os.path.join(EMBS_DIR, 'face_embs.bin')
ENDIAN = 'little'
DIMENSIONS = 128
if not os.path.exists(EMBS_DIR):
os.makedirs(EMBS_DIR)
with open(IDS_FILE, 'wb') as f_ids, open(EMBS_FILE, 'wb') as f_embs:
for i, x in enumerate(FaceFeatures.objects.all().values(
'face__id', 'features'
).order_by('face__id')):
if i % 10000 == 0:
print('Wrote {} embeddings'.format(i))
fid = x['face__id']
emb = decode(x['features'])
assert len(emb) == DIMENSIONS, 'Incorrect dimensions: {} != {}'.format(
len(emb), DIMENSIONS)
f_ids.write(fid.to_bytes(8, byteorder=ENDIAN))
for v in emb:
f_embs.write(struct.pack('f', v))
print('Done! ({} embeddings written)'.format(i + 1))
# -
# # Try to load embeddings
import esper.face_embeddings as face_embed
| app/notebooks/store_face_features_locally.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Structural Covariance Network Analysis
#
# `scona` is a tool to perform network analysis over structural covariance networks of brain regions.
#
# This tutorial will go through the basic functionality of `scona`, taking us from our inputs (a matrix of structural regional measures over subjects) to a report of local network measures for each brain region, and network level comparisons to a cohort of random graphs of the same degree.
# +
import matplotlib.pylab as plt
# %matplotlib inline
import networkx as nx
import numpy as np
import seaborn as sns
sns.set(context="notebook", font_scale=1.5, style="white")
import scona as scn
import scona.datasets as datasets
from scona.scripts.visualisation_commands import view_corr_mat
# -
# ## 0 Import your data
#
# A `scona` analysis starts with four inputs.
#
# * **regional_measures**: A pandas DataFrame with subjects as rows. The columns should include structural measures for each brain region, as well as any subject-wise covariates.
#
# * **names**: A list of names of the brain regions. This will be used to specify which columns of the **regional_measures** matrix to want to correlate over.
#
# * **covars** *(optional)*: A list of your covariates. This will be used to specify which columns of **regional_measure** you wish to correct for.
#
# * **centroids**: A list of tuples representing the cartesian coordinates of brain regions. This list should be in the same order as the list of brain regions to accurately assign coordinates to regions. The coordinates are expected to obey the convention the the x=0 plane is the same plane that separates the left and right hemispheres of the brain.
# Read in sample data from the NSPN WhitakerVertes PNAS 2016 paper.
df, names, covars, centroids = datasets.NSPN_WhitakerVertes_PNAS2016.import_data()
# ### Let's take a look at these different inputs:
#
# `df` is the **regional_measures** file.
#
# Each row is a participant in the study, and each column is a measure associated with that person.
#
# `names` are the columns in the regional measures data frame that you care about.
#
# The regional measures file can contain as many columns as you'd like. Some will be demographic measures (such as `centre`, `age_scan`, or `male`), others will be brain measures that you care about (see the **names** list below), and there can even be useless ones such as `Unnamed:` and `lh_unknown_part1` that are created as part of your preprocessing.
# The first 5 rows of our regional measures data frame
df.head()
# The top 10 regional names
names[:10]
# `centroids` are the x, y, z coordinates of the centres of each of the brain regions (listed in `names`)
# The centroids for the top 10 regions
centroids[:10]
# In this case `covars` is an empty list because the analyses published by Whitaker, Vertes et al did not correct for any measures, but you *could* give it a list of column names containing information that you'd like to covary out of the correlation matrices.
#
# A good example would be `age_scan` and `male` so lets set up that alternate covars list (`covars_age_male`) to compare the results as we go through this tutorial.
# Show the covars that ships with the example data set
covars
# Create a list of covariates to check their effect
covars_age_male = ['age_scan', 'male']
covars_age_male
# ## 1 Create a correlation matrix
#
# ### 1.0 Correct for covariates
#
# The `scona` command `create_residuals_df` will calculate the residual variation in each of the brain regions after correcting for the covariates defined in `covars`.
#
# We give the function the data frame (`df`) the columns that correspond to the brain regions we care about (`names`) and the list of columns we want to covary out (`covars` or `covars_age_male`).
#
# Note that it's totally fine to pass an empty list for `covars`. In fact, that's the default behaviour! In this case the values will simply be de-meaned.
#
# You can see the slight variations in the values when we remove the (linear) effects of age and gender (`df_res_age_male`) compared to no covariates (`df_res`).
df_res = scn.create_residuals_df(df, names, covars)
df_res.head()
df_res_age_male = scn.create_residuals_df(df, names, covars_age_male)
df_res_age_male.head()
# ### 1.1 Create a correlation matrix
#
# Now we're ready to create a correlation matrix.
#
# You'll notice (above) that `df_res` doesn't contain the other measures from `df` - it only has the brain measures we're interested in. The default behaviour of `create_corrmat` is to correlate all of the columns. You can pass an optional measure (`names`) if you want to only correlate a subset of measures.
#
# The default method of creating a correlation matrix is pearson correlation. If you'd prefer you can use any of the options implemented by pandas, which are `pearson`, `kendall`, `spearman` at time of writing.
#
# Our correlation matrix `M` is a square pandas data frame with the rows and columns both corresponding to the `names` of the regions you're interested in.
M = scn.create_corrmat(df_res)
M.head()
M_age_male = scn.create_corrmat(df_res_age_male)
# Take a look at the two different correlation matrices
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].imshow(M, cmap='RdBu_r', vmin=-0.8, vmax=0.8)
ax[0].set_title('No covariates')
ax[1].imshow(M_age_male, cmap='RdBu_r', vmin=-0.8, vmax=0.8)
ax[1].set_title('Remove age and gender effects')
plt.show()
# ## 2 Create a weighted graph
#
# From our correlation matrix we're ready to make a graph!
#
# A short sidenote on the `BrainNetwork` class. This is a very lightweight subclass of the [`Networkx.Graph`](https://networkx.github.io/documentation/stable/reference/classes/graph.html) class. This means that any methods you can use on a `Networkx.Graph` object can also be used on a `BrainNetwork` object, although the reverse is not true. We have **added** various methods that allow us to keep track of measures that have already been calculated. This is particularly useful later on when one is dealing with 1000 random graphs (or more!) and saves a lot of time.
#
# Initialise a weighted graph `G` from the correlation matrix `M`. The `parcellation` and `centroids` arguments are used to label nodes with names and coordinates respectively.
#
# `G` can be created from a pandas data frame (as we're doing here), a numpy array, or a networkx graph. So this is a great place to jump in from another processing package if you have a preferred way of creating your correlation matrix.
#
# The `scn.BrainNetwork` options `parcellation` and `centroids` are optional, but recommended. If you don't give `scn.BrainNetwork` the names of your regions or the coordinates of each region, there will be some graph measures that can't be calculated (for example euclidean distance) and the output might be somewhat harder to interpret (you won't have a nice mapping between the nodes back to your regional data).
#
# In this section of the tutorial we'll just take the data with no covariates forward to help with readability) but a comparison of values with and without the age and gender covariates is at the very end of this notebook :)
G = scn.BrainNetwork(network=M, parcellation=names, centroids=centroids)
# Take a look at the information stored for the first node
# of the graph we created
G.nodes[0]
# Or you can see the weight (the correlation strength)
# of the egde between nodes 0 and 1
G.edges[(0,1)]
# ## 3 Threshold to create a binary graph
#
# Next we'll threshold `G` at cost 10 to create a binary graph with 10% as many edges as the complete graph `G`.
#
# Ordinarily when thresholding one takes the 10% of edges with the *highest* weight. However, this can sometimes leave a disconnected graph, which in the context of brain networks is not biologically plausible nor easily interpretable. Therefore, in order to ensure that the resulting graph is connected, we will calculate a **minimum spanning tree** first. A "tree" is the smallest possible graph where you can travel from every node to every other node. The minimum spanning tree builds that tree from the strongest available edges.
#
# This calculation happens within the `BrainNetwork.threshold` method. If you want to omit the minimum spanning tree step, you can pass the argument `mst=False` to `G.threshold`.
#
# The `threshold` method does not edit objects inplace.
G10 = G.threshold(10)
# Note that edges that still exist now have a weight of 1
G10.edges[(0,1)]
# Some of these edges don't exist any more!
# (90% of them to be exact)
try:
G10.edges[(0,50)]
except KeyError:
print("This edge does not exist!")
# ## 4 Calculate nodal measures
#
# ### 4.0 Calculate standard network measures
#
# The `BrainNetwork` method `calculate_nodal_measures` computes and records the following nodal measures **by default**:
#
# * `betweenness`: betweenness centrality
# * `closeness`: closeness centrality
# * `clustering`: clustering coefficient
# * `degree`: number of edges from that node
# * `module`: which module this node belongs to according to the louvain community detection algorithm (starting from 0)
# * `participation_coefficient`: participation coefficient according to the partition defined above
# * `shortest_path_length`: shortest path length from this node to any other in the network
#
#
# Lets start by seeing the nodal attributes we already have. The useful method `report_nodal_measure` returns all these values in a DataFrame.
G10.report_nodal_measures().head()
# We can run `calculate_nodal_measures` to fill in a lot more information to that DataFrame.
#
# This method takes around 6 seconds to run. Which isn't long but adds up when you're creating random graphs for comparison with your real network data. Therefore this `BrainNetwork` method will check to see if the measures already exist before it re-calculates them. If they're already there it will just move on to the next one.
#
# You can re-calculate the nodal measures by setting the option `force=True`.
# %%timeit -n 1
G10.calculate_nodal_measures()
# %%timeit -n 1
G10.calculate_nodal_measures()
# %%timeit -n 1
G10.calculate_nodal_measures(force=True)
# The `calculate_nodal_measures` method assigns these calculated values to the `BrainNetwork` graph.
#
# We can look at a nice summary using `report_nodal_measures` which outputs a pandas DataFrame.
G10.report_nodal_measures().head()
# ### 4.1 Calculate spatial nodal measures
#
# The method `calculate_spatial_measures` calculates nodal and edge values that relate to the euclidean distances between nodes. They can only be calculated if the `centroids` file is available.
#
# The edge attributes are:
#
# * `euclidean`: the distance between two nodes (in the units given in the `centroids` file, likely mm).
# * `interhem`: whether the node connects regions in two different hemispheres (`1` if yes, `0` if no).
#
# The nodal measures are:
#
# * `average_dist`: the average distance across all edges connected to that node
# * `total_dist`: the total distance of all edges connected to that node
# * `hemisphere`: whether the node is in the left (`L`) or right (`R`) hemisphere
# * `interhem`: number of interhemispheric edges from that node
# * `interhem_proportion`: proportion of interhemispheric edges from that node
#
G10.calculate_spatial_measures()
# Lets take a look at all the nodal measures again
G10.report_nodal_measures().head()
# ### 4.2 Add your own measures
#
# It's quite likely that you may want to attribute additional values to each node. For example, in their paper in 2016, Whitaker, Vertes and colleagues correlated the cross sectional decrease in cortical thickness between ages 14 and 24 for each region with its degree. They showed that the hubs - the best connected regions - changed the most in this late adolescent age range.
#
# As we're using this data
#
# We can also add measures as one might normally add nodal attributes to a networkx graph
# +
dCT = df.loc[:, ['age_scan'] + names].corr().iloc[0, 1:]
dCT.head()
# -
nx.set_node_attributes(G10, name="dCT", values={ node: dCT[G10._node[node]['name']] for node in G10.nodes()})
# These show up in our DataFrame too
nodal_df = G10.report_nodal_measures(columns=['name', 'degree', 'dCT'])
nodal_df.head()
nodal_df = G10.report_nodal_measures(columns=['degree', 'dCT'])
sns.regplot(data=nodal_df, y='dCT', x='degree')
sns.despine()
# ### Calculate Global measures
#
# These are attributes of the WHOLE GRAPH.
G10.calculate_global_measures()
G10.rich_club()
# ## Create a GraphBundle
#
# The `GraphBundle` object is the `scona` way to handle across-network comparisons. What is it? Essentially it's a python dictionary with `BrainNetwork` objects as values.
brain_bundle = scn.GraphBundle([G10], ['NSPN_cost10'])
# This creates a dictionary-like object with BrainNetwork `H` keyed by `'NSPN_cost=10'`
brain_bundle
# Now add a series of random graphs created by edge swap randomisation of G10 (keyed by `'NSPN_cost10'`).
#
# The `create_random_graphs` method of the `GraphBundle` class takes in a **real** network (in our case `G10`) and creates a number (`10` in the example below) of random graphs. The output is a dictionary of all these graphs. The original one has the name you gave it (here: `"NSPN_cost10"`) and the random graphs have a suffix of `_Ri` where `i` is an integer counter starting at `0`.
#
# What's pretty cool is that this function again saves the work you've done before. So if you generate a few random graphs and save them to the `brain_bundle` dictionary and then later want to add some more, the `create_random_graphs` method will recognise that there are some random graphs there already and append the new ones to the `brain_bundle`. The index for these additional random graphs will start counting from where the original run left off.
#
#
# KW: DO A TIMEIT FUN DEMO HERE.
# Note that 10 is not usually a sufficient number of random graphs to do meaningful analysis,
# it is used here for time considerations
brain_bundle.create_random_graphs('NSPN_cost10', 5)
brain_bundle
# ### Report on a GraphBundle
#
# The following method will calculate global measures (if they have not already been calculated) for all of the graphs in `brain_bundle` and report the results in a DataFrame. We can do the same for rich club coefficients below.
brain_bundle.report_global_measures()
rich_club_df = brain_bundle.report_rich_club()
rich_club_df.iloc[55:65, :]
# +
calc_closeness = lambda x: x.calculate_nodal_measures(measure_list=["closeness"])
report_closeness = lambda x: x.report_nodal_measures(columns=["name", "closeness"])
brain_bundle.apply(graph_function=calc_closeness)
closeness_dict = brain_bundle.apply(graph_function=report_closeness)
for graph_key, closeness_df in closeness_dict.items():
print(closeness_df.head())
# +
calc_degree = lambda x: x.calculate_nodal_measures(measure_list=["degree"])
report_degree = lambda x: x.report_nodal_measures(columns=["name", "degree"])
brain_bundle.apply(graph_function=calc_degree)
degree_dict = brain_bundle.apply(graph_function=report_degree)
for graph_key, degree_df in degree_dict.items():
print(degree_dict["NSPN_cost10"].head() == degree_df.head())
# -
degree = G10.report_nodal_measures(columns=["degree"])
sns.distplot(degree)
np.percentile(degree, 90)
| tutorials/introductory_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Attention on MNIST (Saliency and grad-CAM)
# Lets build the mnist model and train it for 5 epochs. It should get to about ~99% test accuracy.
# +
from __future__ import print_function
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation, Input
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 5
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax', name='preds'))
model.add(Activation('softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# -
# ## Saliency
# To visualize activation over final dense layer outputs, we need to switch the `softmax` activation out for `linear` since gradient of output node will depend on all the other node activations. Doing this in keras is tricky, so we provide `utils.apply_modifications` to modify network parameters and rebuild the graph.
#
# If this swapping is not done, the results might be suboptimal. We will start by swapping out 'softmax' for 'linear' and compare what happens if we dont do this at the end.
# Lets pick an input over which we want to show the attention.
# +
class_idx = 0
indices = np.where(y_test[:, class_idx] == 1.)[0]
# pick some random input from here.
idx = indices[0]
# Lets sanity check the picked image.
from matplotlib import pyplot as plt
# %matplotlib inline
plt.rcParams['figure.figsize'] = (18, 6)
plt.imshow(x_test[idx][..., 0])
# -
# Time for saliency visualization.
# +
from vis.visualization import visualize_saliency
from vis.utils import utils
from keras import activations
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'preds')
# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)
grads = visualize_saliency(model, layer_idx, filter_indices=class_idx, seed_input=x_test[idx])
# Plot with 'jet' colormap to visualize as a heatmap.
plt.imshow(grads, cmap='jet')
# -
# To used guided saliency, we need to set `backprop_modifier='guided'`. For rectified saliency or deconv saliency, use `backprop_modifier='relu'`. Lets try these options quickly and see how they compare to vanilla saliency.
for modifier in ['guided', 'relu']:
grads = visualize_saliency(model, layer_idx, filter_indices=class_idx,
seed_input=x_test[idx], backprop_modifier=modifier)
plt.figure()
plt.title(modifier)
plt.imshow(grads, cmap='jet')
# Both of them look a lot better than vanilla saliency! This in inline with observation in the paper.
#
# We can also visualize negative gradients to see the parts of the image that contribute negatively to the output by using `grad_modifier='negate'`.
grads = visualize_saliency(model, layer_idx, filter_indices=class_idx, seed_input=x_test[idx],
backprop_modifier='guided', grad_modifier='negate')
plt.imshow(grads, cmap='jet')
# Lets try all the classes and show original inputs and their heatmaps side by side. We cannot overlay the heatmap on original image since its grayscale.
#
# We will also compare the outputs of guided and rectified or deconv saliency.
# This corresponds to the Dense linear layer.
for class_idx in np.arange(10):
indices = np.where(y_test[:, class_idx] == 1.)[0]
idx = indices[0]
f, ax = plt.subplots(1, 4)
ax[0].imshow(x_test[idx][..., 0])
for i, modifier in enumerate([None, 'guided', 'relu']):
grads = visualize_saliency(model, layer_idx, filter_indices=class_idx,
seed_input=x_test[idx], backprop_modifier=modifier)
if modifier is None:
modifier = 'vanilla'
ax[i+1].set_title(modifier)
ax[i+1].imshow(grads, cmap='jet')
# Guided saliency seems to give the best results.
# ## grad-CAM - vanilla, guided, rectified
# These should contain more detail since they use `Conv` or `Pooling` features that contain more spatial detail which is lost in `Dense` layers. The only additional detail compared to saliency is the `penultimate_layer_idx`. This specifies the pre-layer whose gradients should be used. See this paper for technical details: https://arxiv.org/pdf/1610.02391v1.pdf
#
# By default, if `penultimate_layer_idx` is not defined, it searches for the nearest pre layer. For our architecture, that would be the `MaxPooling2D` layer after all the `Conv` layers. Lets look at all the visualizations like before.
# +
from vis.visualization import visualize_cam
# This corresponds to the Dense linear layer.
for class_idx in np.arange(10):
indices = np.where(y_test[:, class_idx] == 1.)[0]
idx = indices[0]
f, ax = plt.subplots(1, 4)
ax[0].imshow(x_test[idx][..., 0])
for i, modifier in enumerate([None, 'guided', 'relu']):
grads = visualize_cam(model, layer_idx, filter_indices=class_idx,
seed_input=x_test[idx], backprop_modifier=modifier)
if modifier is None:
modifier = 'vanilla'
ax[i+1].set_title(modifier)
ax[i+1].imshow(grads, cmap='jet')
# -
# In this case it appears that saliency is better than grad-CAM as penultimate `MaxPooling2D` layer has `(12, 12)` spatial resolution which is relatively large as compared to input of `(28, 28)`. Is is likely that the conv layer hasnt captured enough high level information and most of that is likely within `dense_4` layer.
#
# Here is the model summary for reference.
model.summary()
# ## Visualization without swapping softmax
# As alluded at the beginning of the tutorial, we want to compare and see what happens if we didnt swap out softmax for linear activation. Lets try this with guided saliency which gave us the best results so far.
# +
# Swap linear back with softmax
model.layers[layer_idx].activation = activations.softmax
model = utils.apply_modifications(model)
for class_idx in np.arange(10):
indices = np.where(y_test[:, class_idx] == 1.)[0]
idx = indices[0]
grads = visualize_saliency(model, layer_idx, filter_indices=class_idx,
seed_input=x_test[idx], backprop_modifier='guided')
f, ax = plt.subplots(1, 2)
ax[0].imshow(x_test[idx][..., 0])
ax[1].imshow(grads, cmap='jet')
# -
# It does not work as well!
#
# It does not work! The reason is that maximizing an output node can be done by minimizing other outputs. Softmax is weird that way. It is the only activation that depends on other node output(s) in the layer.
| examples/mnist/attention.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Notebook 8:
#
# This is an IPython notebook. Most of the code is composed of bash scripts, indicated by %%bash at the top of the cell, otherwise it is IPython code. This notebook includes code to download, assemble and analyze a published RADseq data set.
# +
### Notebook 8
### Data set 8: Barnacles
### Authors: <NAME> al. 2015
### Data Location: SRP051026
# -
# ## Download the sequence data
# Sequence data for this study are archived on the NCBI sequence read archive (SRA). Below I read in SraRunTable.txt for this project which contains all of the information we need to download the data.
#
# + Project SRA: SRP051026
# + BioProject ID: PRJNA269631
# + SRA link: http://trace.ncbi.nlm.nih.gov/Traces/study/?acc=SRP051026
# + language="bash"
# ## make a new directory for this analysis
# mkdir -p empirical_8/fastq/
# -
# #### For each ERS (individuals) get all of the ERR (sequence file accessions).
# +
## IPython code
import pandas as pd
import numpy as np
import urllib2
import os
## open the SRA run table from github url
url = "https://raw.githubusercontent.com/"+\
"dereneaton/RADmissing/master/empirical_8_SraRunTable.txt"
intable = urllib2.urlopen(url)
indata = pd.read_table(intable, sep="\t")
## print first few rows
print indata.head()
# -
def wget_download(SRR, outdir, outname):
""" Python function to get sra data from ncbi and write to
outdir with a new name using bash call wget """
## get output name
output = os.path.join(outdir, outname+".sra")
## create a call string
call = "wget -q -r -nH --cut-dirs=9 -O "+output+" "+\
"ftp://ftp-trace.ncbi.nlm.nih.gov/"+\
"sra/sra-instant/reads/ByRun/sra/SRR/"+\
"{}/{}/{}.sra;".format(SRR[:6], SRR, SRR)
## call bash script
# ! $call
# Here we pass the SRR number and the sample name to the `wget_download` function so that the files are saved with their sample names.
for ID, SRR in zip(indata.Sample_Name_s, indata.Run_s):
wget_download(SRR, "empirical_8/fastq/", ID)
# + language="bash"
# ## convert sra files to fastq using fastq-dump tool
# ## output as gzipped into the fastq directory
# fastq-dump --gzip -O empirical_8/fastq/ empirical_8/fastq/*.sra
#
# ## remove .sra files
# rm empirical_8/fastq/*.sra
# + language="bash"
# ls -lh empirical_8/fastq/
# -
# ## Make a params file
# + language="bash"
# pyrad --version
# + language="bash"
# ## remove old params file if it exists
# rm params.txt
#
# ## create a new default params file
# pyrad -n
# -
# #### Note:
# The data here are from Illumina Casava <1.8, so the phred scores are offset by 64 instead of 33, so we use that in the params file below.
# + language="bash"
# ## substitute new parameters into file
# sed -i '/## 1. /c\empirical_8/ ## 1. working directory ' params.txt
# sed -i '/## 6. /c\TGCAGG ## 6. cutters ' params.txt
# sed -i '/## 7. /c\20 ## 7. N processors ' params.txt
# sed -i '/## 9. /c\6 ## 9. NQual ' params.txt
# sed -i '/## 10./c\.85 ## 10. clust threshold ' params.txt
# sed -i '/## 12./c\4 ## 12. MinCov ' params.txt
# sed -i '/## 13./c\10 ## 13. maxSH ' params.txt
# sed -i '/## 14./c\empirical_8_m4 ## 14. output name ' params.txt
# sed -i '/## 18./c\empirical_8/fastq/*.gz ## 18. data location ' params.txt
# sed -i '/## 29./c\2,2 ## 29. trim overhang ' params.txt
# sed -i '/## 30./c\p,n,s ## 30. output formats ' params.txt
# -
# cat params.txt
# ## Assemble in _pyrad_
#
# + language="bash"
# pyrad -p params.txt -s 234567 >> log.txt 2>&1
# + language="bash"
# sed -i '/## 12./c\2 ## 12. MinCov ' params.txt
# sed -i '/## 14./c\empirical_8_m2 ## 14. output name ' params.txt
# + language="bash"
# pyrad -p params.txt -s 7 >> log.txt 2>&1
# -
# ## Results
# We are interested in the relationship between the amount of input (raw) data between any two samples, the average coverage they recover when clustered together, and the phylogenetic distances separating samples.
# #### Raw data amounts
# The average number of raw reads per sample is 1.36M.
# +
import pandas as pd
## read in the data
s2dat = pd.read_table("empirical_8/stats/s2.rawedit.txt", header=0, nrows=42)
## print summary stats
print s2dat["passed.total"].describe()
## find which sample has the most raw data
maxraw = s2dat["passed.total"].max()
print "\nmost raw data in sample:"
print s2dat['sample '][s2dat['passed.total']==maxraw]
# -
# ### Look at distributions of coverage
# pyrad v.3.0.63 outputs depth information for each sample which I read in here and plot. First let's ask which sample has the highest depth of coverage. The std of coverages is pretty low in this data set compared to several others.
# +
## read in the s3 results
s8dat = pd.read_table("empirical_8/stats/s3.clusters.txt", header=0, nrows=14)
## print summary stats
print "summary of means\n=================="
print s8dat['dpt.me'].describe()
## print summary stats
print "\nsummary of std\n=================="
print s8dat['dpt.sd'].describe()
## print summary stats
print "\nsummary of proportion lowdepth\n=================="
print pd.Series(1-s8dat['d>5.tot']/s8dat["total"]).describe()
## find which sample has the greatest depth of retained loci
max_hiprop = (s8dat["d>5.tot"]/s8dat["total"]).max()
print "\nhighest coverage in sample:"
print s8dat['taxa'][s8dat['d>5.tot']/s8dat["total"]==max_hiprop]
# -
maxprop =(s8dat['d>5.tot']/s8dat['total']).max()
print "\nhighest prop coverage in sample:"
print s8dat['taxa'][s8dat['d>5.tot']/s8dat['total']==maxprop]
# +
import numpy as np
## print mean and std of coverage for the highest coverage sample
with open("empirical_8/clust.85/82121_15.depths", 'rb') as indat:
depths = np.array(indat.read().strip().split(","), dtype=int)
print "Means for sample 82121_15"
print depths.mean(), depths.std()
print depths[depths>5].mean(), depths[depths>5].std()
# -
# #### Plot the coverage for the sample with highest mean coverage
# Green shows the loci that were discarded and orange the loci that were retained. The majority of data were discarded for being too low of coverage.
# +
import toyplot
import toyplot.svg
import numpy as np
## read in the depth information for this sample
with open("empirical_8/clust.85/82121_15.depths", 'rb') as indat:
depths = np.array(indat.read().strip().split(","), dtype=int)
## make a barplot in Toyplot
canvas = toyplot.Canvas(width=350, height=300)
axes = canvas.axes(xlabel="Depth of coverage (N reads)",
ylabel="N loci",
label="dataset8/sample=82121_15")
## select the loci with depth > 5 (kept)
keeps = depths[depths>5]
## plot kept and discarded loci
edat = np.histogram(depths, range(30)) # density=True)
kdat = np.histogram(keeps, range(30)) #, density=True)
axes.bars(edat)
axes.bars(kdat)
#toyplot.svg.render(canvas, "empirical_8_depthplot.svg")
# -
# ### Print final stats table
# cat empirical_8/stats/empirical_8_m4.stats
# cat empirical_8/stats/empirical_8_m2.stats
# ## Infer ML phylogeny in _raxml_ as an unrooted tree
# + language="bash"
# ## raxml argumement w/ ...
# raxmlHPC-PTHREADS-AVX -f a -m GTRGAMMA -N 100 -x 12345 -p 12345 -T 20 \
# -w /home/deren/Documents/RADmissing/empirical_8/ \
# -n empirical_8_m4 -s empirical_8/outfiles/empirical_8_m4.phy
#
# + language="bash"
# ## raxml argumement w/ ...
# raxmlHPC-PTHREADS-AVX -f a -m GTRGAMMA -N 100 -x 12345 -p 12345 -T 20 \
# -w /home/deren/Documents/RADmissing/empirical_8/ \
# -n empirical_8_m2 -s empirical_8/outfiles/empirical_8_m2.phy
#
# + language="bash"
# head -n 20 empirical_8/RAxML_info.empirical_8
# -
# ### Plot the tree in R using `ape`
#
# %load_ext rpy2.ipython
# + magic_args="-h 800 -w 800" language="R"
# library(ape)
# tre <- read.tree("empirical_8/RAxML_bipartitions.empirical_8")
# ltre <- ladderize(tre)
#
# par(mfrow=c(1,2))
# plot(ltre, use.edge.length=F)
# nodelabels(ltre$node.label)
#
# plot(ltre, type='u')
# -
# ### Get phylo distances (GTRgamma dist)
# + language="R"
# mean(cophenetic.phylo(ltre))
#
| emp_nb_Barnacles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Luis-Martinez-Bautista/daa_2021_1/blob/master/11_enero.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="AWdrl4GU7LTs"
class NodoArbol:
def __init__ (self, value, left = None, right = None):
self.data = value
self.left = left
self.right = right
# + id="r384Kj-f8t7g"
arbol = NodoArbol("R", NodoArbol("C"), NodoArbol("H"))
# + id="zlc4ElG_9ry4"
nodo1 = NodoArbol("C")
nodo2 = NodoArbol("H")
# + colab={"base_uri": "https://localhost:8080/"} id="XJ4G4fWW-R6w" outputId="b63d1de4-51d9-4bcd-db3e-f905ea96c1c8"
print(arbol.right.data)
print(arbol_v2.right.data)
print(arbol2.left.left.left.data)
# + id="n9GNrmyM-8-g"
arbol2 = NodoArbol(4, NodoArbol(3, NodoArbol(2, NodoArbol(2))), NodoArbol(5))
# + colab={"base_uri": "https://localhost:8080/"} id="siwNei8oEGvu" outputId="fb247611-f2c7-44ad-e2da-e2264e55f900"
aux = arbol2
while aux.left != None:
aux = aux.left
print(aux.data)
# + colab={"base_uri": "https://localhost:8080/"} id="S5XfRmbpE2SF" outputId="3bebeb14-71c2-438c-b5c3-c4b6e24e0357"
arbol3 = NodoArbol("Santi", None, NodoArbol("Jesus", NodoArbol("Pedro", None, NodoArbol("Diana"))))
print(arbol3.right.left.right.data)
| 11_enero.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python3
# ---
# # Estimating molecular volumes to aid in powder X-ray diffraction indexing
# > An overview of using database-derived atomic volumes to aid PXRD indexing.
#
# - toc: true
# - badges: false
# - comments: true
# - categories: [PXRD, Indexing]
# - author: <NAME>
# - image: images/Volumes.png
# # Introduction
#
# An [article](http://scripts.iucr.org/cgi-bin/paper?S0108768101021814) published in 2001 by <NAME> describes how crystallographic databases can be used to derive the average volume occupied by atoms of each element in crystal structures. Using his tabulated values, it's possible to rapidly estimate the volume occupied by a given molecule, and use this to aid indexing of powder diffraction data. This is particularly useful for laboratory diffraction data, which is generally associated with lower figures of merit such as de Wolff's $M_{20}$ and Smith and Snyder's $F_N$, which can make discriminating between alternative options more challenging. Other volume estimation methods, notably the 18 ų rule are also commonly used, though Hofmann's volumes give generally more accurate results.
#
# I've put together a freely available web-app, *HofCalc*, which can be used to conveniently obtain these estimates. It should display reasonably well on mobile devices as well as PCs/laptops. You can access it at the following address:
#
# [https://hofcalc.herokuapp.com](https://hofcalc.herokuapp.com)
#
# 
#
# This post will explain how it works, and will look at some examples of how it can be used in practice. I'm grateful to <NAME> who provided invaluable feedback and assistance with debugging of the app.
# # Hofmann volumes
#
# After applying various filters to crystal structures deposited in the CSD, Hofmann ended up with a dataset comprised of 182239 structures. Hofmann only considers the elements up to atomic number 100 (fermium) in his work, and assumes that the volume of the unit cell is equivalent to:
#
# $$V_{est} = \sum\limits_{i=1}^{100} n_i\bar{v_i}(1+\bar{\alpha}T) = \bold{n\bar{v}}(1+\bar{\alpha}T)$$
#
# Where $n_i$ is the number of atoms of element $i$ in the unit cell, and $\bar{v_i}$ is the average volume occupied by an atom of element $i$. He also assumes that atomic volumes vary linearly with temperature.
#
# He split the dataset into 20 subsets, then used an iterative least-squares method to solve the above equation for each of the subsets. This allowed him to find the average volumes occupied by atoms of each element, and due to the splitting of the data into subsets, he also obtains their standard deviations. The coefficient of thermal expansion, $\bar{\alpha}$, was found to be $0.95 \times 10^{-4} K^{-1}$. This temperature correction factor then allowed him to provide the average volumes for all of the elements represented in the CSD at 298 K.
#
# You can download a ```.json``` file containing the 298 K volumes [here](https://github.com/mspillman/blog/blob/master/_notebooks/files/Hofmann-volumes.json).
# # Comparison with other atomic volumes
#
# Let's compare Hofmann's volumes to those obtained from other sources. Hofmann's article compares his volumes to those derived by Mighell and coworkers, which were published in 1987. As additional comparison points, which highlight the importance of using *crystallographic volumes* in this context, I also downloaded some [atomic radii data](https://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_%28data_page%29) from Wikipedia and converted these into atomic volumes (assuming spherical atoms). Sources for these radii may be found at the bottom of the Wikipedia article.
#
# Click on the coloured boxes in the top left to view individual types of volume, and shift-click to add other volumes back in again.
# +
#collapse-hide
import json
import pandas as pd
import numpy as np
import altair as alt
with open("files/Hofmann-volumes.json") as hv:
hofmann_volumes = json.load(hv)
hv.close()
vols = []
for i, key in enumerate(hofmann_volumes.keys()):
vols.append([i+1, key, hofmann_volumes[key]])
df = pd.DataFrame(vols)
df.columns = ["Atomic number", "Element", "Hofmann"]
df.reset_index(drop=True, inplace=True)
df.replace("N/A", np.NaN, inplace=True)
wikiradii = pd.read_excel("files/wikipedia_radii.xlsx")
wikiradii.replace("", np.NaN, inplace=True)
radtype = ["Mighell", "Empirical","Calculated","vdW","Covalent-single","Covalent-triple",
"Metallic"]
for r in radtype:
# Radii are in pm so /100 to convert to angstroms.
if r == "Mighell":
df[r] = wikiradii[r].values.astype(float)
else:
df[r] = (4*np.pi/3)*(wikiradii[r].values.astype(float)/100)**3
# Convert our dataframe to long-form as this is what is expected by altair
dflong = df.melt("Atomic number", var_name="Volume",
value_vars=["Hofmann"] + radtype)
# Restore the element symbols to the long dataframe
element = []
for an in dflong["Atomic number"]:
element.append(df["Element"][df["Atomic number"] == an].item())
dflong["Element"] = element
click = alt.selection_multi(encodings=["color"])
# scatter plot, modify opacity based on selection
scatter = alt.Chart(dflong).mark_point().encode(
x=alt.X('Element:N',sort=dflong["Atomic number"].values),
y=alt.Y("value:Q", axis=alt.Axis(title='Volume / ų')),
tooltip=['Element', 'Volume:N', 'value'],
opacity=alt.value(0.85),
color="Volume:N"
).transform_filter(click).properties(width=650, height=500).interactive()
# legend
legend = alt.Chart(dflong).mark_rect().encode(
y=alt.Y('Volume:N', axis=alt.Axis(title='Select volume'), sort=[4,6,0,1,2,3,5,7]),
color=alt.condition(click, 'Volume:N',
alt.value('lightgray'), legend=None),
).properties(
selection=click,
)
chart = (legend | scatter)
chart
# -
# As you can see, the volumes of Hofmann and Mighell differ significantly from those I derived from the atomic radii.
#
# Let's print out some of the statistics describing the data, as well compare the coefficient of variation for each type of volume.
#collapse-hide
df.describe()[["Hofmann"]+radtype]
#collapse-hide
import matplotlib.pyplot as plt
plt.figure(figsize=(8,5))
((df.describe().loc["std"] / df.describe().loc["mean"])[["Hofmann"]+radtype]).plot.bar()
plt.ylabel("Coefficient of variation")
plt.show()
# We see a much lower coefficient of variation for Hofmann's volumes than the others.
# # HofCalc - using the web app
#
# [HofCalc](http://hofcalc.herokuapp.com) makes use of two key python libraries to process chemical formulae ([pyvalem](https://github.com/xnx/pyvalem)) and resolve chemical names ([PubChemPy](https://github.com/mcs07/PubChemPy)) prior to processing. This allows the app to have a really convenient interface for specifying queries (see below), which enables users to easily mix and match between formulae and names to obtain the information they need.
#
# ## Formulae and names
#
# ### Basic use
# The simplest option is to enter the chemical formula or name of the material of interest. Names are resolved by querying [PubChem](https://pubchem.ncbi.nlm.nih.gov/), so common abbreviations for solvents can often be used e.g. DMF.
# Note that formulae can be prefixed with a multiple, e.g. 2H2O
#
# | Search term | Type | $V_{Hofmann}$ |
# |:-----------:|:-------:|:-------------:|
# | ethanol | name | 69.61 |
# | CH3CH2OH | formula | 69.61 |
# | water | name | 21.55 |
# | 2H2O | formula | 43.10 |
#
#
# ### Multiple search terms
#
# It is also possible to search for multiple items simultaneously, and mix and match name and formulae by separating individual components with a semicolon. This means that for example, 'amodiaquine dihydrochloride dihydrate' can also be entered as 'amodiaquine; 2HCl; 2H2O'.
#
# | Search term | Total $V_{Hofmann}$ |
# |:-------------------------------------:|:-------------------:|
# | carbamazepine; L-glutamic acid | 497.98 |
# | zopiclone; 2H2O | 496.02 |
# | C15H12N2O; CH3CH2COO-; Na+ | 419.79 |
# | sodium salicylate; water | 204.21 |
# | amodiaquine dihydrochloride dihydrate | 566.61 |
# | amodiaquine; 2HCl; 2H2O | 566.61 |
#
#
# ### More complex examples - hemihydrates
#
# In cases where fractional multiples of search components are required, such as with hemihydrates, care should be taken to check the evaluated chemical formula for consistency with the expected formula.
#
# | Search term | Evaluated as | $V_{Hofmann}$ | Divide by | Expected Volume |
# |:-----------------------------------------------------------:|:--------------------:|:-----------------:|:---------:|:---------------:|
# | Calcium sulfate hemihydrate | Ca2 H2 O9 S2 | 253.07 | 2 | 126.53 |
# | calcium; calcium; sulfate; sulfate; water | Ca2 H2 O9 S2 | 253.07 | 2 | 126.53 |
# | calcium; sulfate; 0.5H2O | Ca1 H1.0 O4.5 S1 | 126.53 | - | 126.53 |
# | Codeine phosphate hemihydrate | C36 H50 N2 O15 P2 | 1006.77 | 2 | 503.38 |
# | codeine; codeine; phosphoric acid; phosphoric acid; water | C36 H50 N2 O15 P2 | 1006.77 | 2 | 503.38 |
# | codeine; phosphoric acid; 0.5H2O | C18 H25.0 N1 O7.5 P1 | 503.38 | - | 503.38 |
#
# ### Charged species in formulae
#
# Charges could potentially interfere with the parsing of chemical formulae. For example, two ways of representing an oxide ion:
#
# | Search term | Evaluated as |
# |:-----------:|:------------:|
# | O-2 | 1 x O |
# | O2- | 2 x O |
#
# Whilst is is recommended that charges be omitted from HofCalc queries, if including charges in your queries, ensure that the correct number of atoms has been determined in the displayed atom counts or the downloadable summary file. For more information on formatting formulae, see the [pyvalem documentation](https://github.com/xnx/pyvalem).
#
#
# ## Temperature
#
# The temperature, $T$ (in kelvin) is automatically included in the volume calculation via the following equation:
#
# $$V = \sum{n_{i}v_{i}}(1 + \alpha(T - 298))$$
#
# Where $n_{i}$ and $v_{i}$ are the number and Hofmann volume (at 298 K) of the $i$th element in the chemical formula, and $\alpha = 0.95 \times 10^{-4} K^{-1}$.
#
#
# ## Unit cell volume
#
# If the volume of a unit cell is supplied, then the unit cell volume divided by the estimated molecular volume will also be shown.
#
# | Search term | $V_{cell}$ | $V_{Hofmann}$ | $\frac{V_{cell}}{V_{Hofmann}}$ |
# |:---------------:|:----------------:|:--------------:|:------------------------------:|
# | zopiclone, 2H2O | 1874.61 | 496.02 | 3.78 |
# | verapamil, HCl | 1382.06 | 667.57 | 2.07 |
#
#
# ## Summary Files
#
# Each time HofCalc is used, a downloadable summary file is produced. It is designed to serve both as a record of the query for future reference and also as a method to sense-check the interpretation of the entered terms, with links to the PubChem entries where relevant.
# An example of the contents of the summary file for the following search terms is given below.
#
# Search term = ```carbamazepine; indomethacin```
#
# T = 293 K
#
# Unit cell volume = 2921.6 ų
#
#
# ```json
# {
# "combined": {
# "C": 34,
# "H": 28,
# "N": 3,
# "O": 5,
# "Cl": 1
# },
# "individual": {
# "carbamazepine": {
# "C": 15,
# "H": 12,
# "N": 2,
# "O": 1
# },
# "indomethacin": {
# "C": 19,
# "H": 16,
# "Cl": 1,
# "N": 1,
# "O": 4
# }
# },
# "user_input": [
# "carbamazepine",
# "indomethacin"
# ],
# "PubChem CIDs": {
# "carbamazepine": 2554,
# "indomethacin": 3715
# },
# "PubChem URLs": {
# "carbamazepine": "https://pubchem.ncbi.nlm.nih.gov/compound/2554",
# "indomethacin": "https://pubchem.ncbi.nlm.nih.gov/compound/3715"
# },
# "individual_volumes": {
# "carbamazepine": 303.86,
# "indomethacin": 427.77
# },
# "V_Cell / V_Hofmann": 3.99,
# "Temperature": 293,
# "Hofmann Volume": 731.62,
# "Hofmann Density": 1.35
# }
# ```
# # Case study: CT-DMF2
#
# The crystal structure of chlorothiazide N,N-dimethylformamide, a.k.a [CT-DMF2](https://doi.org/10.1002/jps.20942), was solved from laboratory powder diffraction data back in 2007. I decided to try re-indexing the diffraction data to see if HofCalc would be of use.
#
# Using the *DASH* interface to *DICVOL*, the following unit cells are suggested:
#
# 
#
# Both monoclinic and triclinic cells are obtained with very different unit cell volumes. Whilst the figures of merit certainly push towards accepting the conclusion of a monoclinic unit cell, it's worth checking to see if this makes sense given the expected composition of the material. In addition, there may be more than one dimethylformamide molecule crystallising with the chlorothiazide - HofCalc may be able to shed some light there too.
#
# The [paper](https://doi.org/10.1002/jps.20942) states that the solvate was formed by recrystallisation of chlorothiazide from DMF solvent, so it seems logical to try the following permutations:
# 1. chlorothiazide alone
# 2. chlorothiazide + 1 DMF
# 3. chlorothiazide + 2 DMF (etc)
#
#
# | HofCalc query | $V_{Hofmann}$ | $V_{cell}$ | $\frac{V_{cell}}{V_{Hofmann}}$ |
# |:-------------------------------:|:-------------:|:------------------:|:------------------------------:|
# | chlorothiazide | 284.73 | 2422 (triclinic) | 8.51 |
# | chlorothiazide | 284.73 | 3950 (monoclinic) | 13.87 |
# | chlorothiazide; DMF | 385.09 | 2422 (triclinic) | 6.29 |
# | chlorothiazide; DMF | 385.09 | 3950 (monoclinic) | 10.26 |
# | chlorothiazide; DMF; DMF | 485.45 | 2422 (triclinic) | 4.99 |
# | chlorothiazide; DMF; DMF | 485.45 | 3950 (monoclinic) | 8.14 |
# | chlorothiazide; DMF; DMF; DMF | 585.81 | 2422 (triclinic) | 4.13 |
# | chlorothiazide; DMF; DMF; DMF | 585.81 | 3950 (monoclinic) | 6.74 |
#
# If we exclude those results with $\frac{V_{cell}}{V_{mol}}$ ratios > 0.25 away from a (crystallographically sensible) whole number, we can see from the table that the most favourable compositions are CT + 2xDMF (monoclinic) and CT + 3xDMF (triclinic). Given the higher figure of merit for the monoclinic unit cell, it seems reasonable to take this forward and attempt space-group determination. Doing this in *DASH* identifies the most probable space group as $P2_1/c$, which then implies $Z'=2$. This is indeed the correct result.
#
# If we compare this to the commonly used 18 ų rule, we end up with the following results:
#
# | Possible composition | $V_{18Å^{3}}$ | $V_{cell}$ | $\frac{V_{cell}}{V_{18Å^{3}}}$ |
# |:-------------------------------:|:-------------:|:------------------:|:--------------------------:|
# | chlorothiazide | 306 | 2422 (triclinic) | 7.92 |
# | chlorothiazide | 306 | 3950 (monoclinic) | 12.91 |
# | chlorothiazide; DMF | 396 | 2422 (triclinic) | 6.12 |
# | chlorothiazide; DMF | 396 | 3950 (monoclinic) | 9.97 |
# | chlorothiazide; DMF; DMF | 486 | 2422 (triclinic) | 4.98 |
# | chlorothiazide; DMF; DMF | 486 | 3950 (monoclinic) | 8.13 |
# | chlorothiazide; DMF; DMF; DMF | 576 | 2422 (triclinic) | 4.20 |
# | chlorothiazide; DMF; DMF; DMF | 576 | 3950 (monoclinic) | 6.86 |
#
# Again, CT + 2xDMF is in the candidates to check, however, using the 18 ų rule, a triclinic pure chlorothiazide unit cell also becomes a viable possibility. Had there been a less clear distinction in the indexing figure-of-merit, this may have resulted in time being wasted on testing this additional possibility.
# # Conclusions
#
# Hofmann's volumes give more accurate estimates of molecular volumes in crystals, and should be used in preference to the 18 ų rule where possible.
#
# To make this easier for people, the [HofCalc web-app](https://hofcalc.herokuapp.com) can be used to very rapidly and conveniently obtain these estimates.
| _notebooks/2021-11-10-Hofcalc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pyspark.sql
import pyspark.sql.functions as sf
spark = pyspark.sql.SparkSession.Builder().getOrCreate()
# # Watson Sales Product Sample Data
#
# In this example, we want to have a look at the pivoting capabilities of Spark. Since pivoting is commonly used with sales data containing information for different product categories or countries, we will use a data set called "Watson Sales Product Sample Data" which was downloaded from https://www.ibm.com/communities/analytics/watson-analytics-blog/sales-products-sample-data/
# # 1 Load and inspect data
#
# First we load the data, which is provided as a single CSV file, which again is well supported by Apache Spark
basedir = "s3://dimajix-training/data"
data = (
spark.read.option("header", True)
.option("inferSchema", True)
.csv(basedir + "/watson-sales-products/WA_Sales_Products_2012-14.csv")
)
# ### Inspect schema
#
# Since we used the existing header information and also let Spark infer appropriate data types, let us inspect the schema now.
data.printSchema()
# ### Inspect pivoting candidates
#
# Now let us find some good candidates for a pivoting column. A pivoting column shouldn't have too many distinct entries, otherwise the result probably doesn't make too much sense and doesn't help the business expert in interpretation.
#
# We can either use
# ```
# data.select("Retailer type").distinct().count()
# ```
# which will give us the number of distinct values for a single column, or we can use the Spark aggregate function `countDistinct` which allows us to retrieve information for multiple columns within a single `select`.
# +
result = data.select(
sf.countDistinct("Retailer country"),
sf.countDistinct("Retailer type"),
sf.countDistinct("Product line"),
sf.countDistinct("Product type"),
sf.countDistinct("Quarter"),
)
result.toPandas()
# -
# # 2 Pivoting by Product Line
#
# The first example pivots by the product line, since there are only five different distinct values.
revenue_per_product_line = (
data.groupBy("Quarter", "Retailer Country")
.pivot("Product line")
.agg(sf.sum("Revenue"))
)
revenue_per_product_line.toPandas()
# ## 2.1 Exercise
#
# Craete an aggragated table with
# * Country and Product Line in Rows
# * The quantity for each quarter in different columns
# +
# YOUR CODE HERE
# -
# # 3 Unpivoting again
#
# Sometimes you just need the opposite operation: You have a data set in pivoted format and want to unpivot it. There is no simple built in function provided by Spark, but you can construct the unpivoted table as follows
# * For every pivoted column:
# * Project data frame onto non-pivot columns
# * Add a new column with an appropriate name containing the name of the pivot column as its value
# * Add a new column with an appropriate name containing the values of the pivot column
# * Union together all these data frames
# ## 3.1 Specific Example
#
# Now let us perform these steps for the pivoted table above
# +
revenue_camping = revenue_per_product_line.select(
sf.col("Quarter"),
sf.col("Retailer Country"),
sf.lit("Camping Equipment").alias("Product line"),
sf.col("Camping Equipment").alias("Revenue"),
)
revenue_golf = revenue_per_product_line.select(
sf.col("Quarter"),
sf.col("Retailer Country"),
sf.lit("Golf Equipment").alias("Product line"),
sf.col("Golf Equipment").alias("Revenue"),
)
revenue_mountaineering = revenue_per_product_line.select(
sf.col("Quarter"),
sf.col("Retailer Country"),
sf.lit("Mountaineering Equipment").alias("Product line"),
sf.col("Mountaineering Equipment").alias("Revenue"),
)
revenue_outdoor = revenue_per_product_line.select(
sf.col("Quarter"),
sf.col("Retailer Country"),
sf.lit("Outdoor Protection").alias("Product line"),
sf.col("Outdoor Protection").alias("Revenue"),
)
revenue_personal = revenue_per_product_line.select(
sf.col("Quarter"),
sf.col("Retailer Country"),
sf.lit("Personal Accessories").alias("Product line"),
sf.col("Personal Accessories").alias("Revenue"),
)
result = (
revenue_camping.union(revenue_golf)
.union(revenue_mountaineering)
.union(revenue_outdoor)
.union(revenue_personal)
)
result.limit(10).toPandas()
# -
# ## 3.2 Generic Approach
#
# Of course manually unpivoting is somewhat tedious, but we already see a pattern:
# * Select all non-pivot columns
# * Create a new column containing the pivot column name
# * Create a new column containing the pivot column values
# * Union together everything
#
# This can be done by writing some small Python functions as follows:
# +
import functools
# Unpivot a single column, thereby creating one data frame
def unpivot_column(df, other, pivot_column, pivot_value, result_column):
columns = (
[df[c] for c in other]
+ [sf.lit(pivot_value).alias(pivot_column)]
+ [df[pivot_value].alias(result_column)]
)
return df.select(*columns)
# Unpivot multiple columns by using the above method
def unpivot(df, pivot_column, pivot_values, result_column):
"""
df - input data frame
pivot_column - the name of the new column containg each pivot column name
pivot_values - the list of pivoted column names
result_column - the name of the column containing the values of the pivot columns
"""
common_columns = [f.name for f in df.schema.fields if not f.name in pivot_values]
unpivot_dfs = [
unpivot_column(df, common_columns, pivot_column, v, result_column)
for v in pivot_values
]
return functools.reduce(lambda x, y: x.union(y), unpivot_dfs)
# -
# Let's test the function
product_lines = [
"Camping Equipment",
"Golf Equipment",
"Mountaineering Equipment",
"Outdoor Protection",
"Personal Accessories",
]
result_per_product_line = unpivot(
revenue_per_product_line, "Product Line", product_lines, "Revenue"
)
result_per_product_line.toPandas()
# ## 3.3 Exercise
#
# Now unpivot the result of exercise 2.1. You can do that either manually or try using the generic function defined above.
| spark-training/spark-python/jupyter-advanced-pivoting/Pivoting - Full.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
import cv2
import glob
import random
import numpy as np
import glob
from scipy.ndimage import zoom
emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"] #Emotion list
fishface = cv2.face.createFisherFaceRecognizer() #Initialize fisher face classifier
# ------------------- LIVE FACE RECOGNITION -----------------------------------
def detectFaces(frame):
'''cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
detected_faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=6,
minSize=(50, 50),
flags=cv2.CASCADE_SCALE_IMAGE)'''
faceDet = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
faceDet2 = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
faceDet3 = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
faceDet4 = cv2.CascadeClassifier("haarcascade_frontalface_alt_tree.xml")
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #Convert image to grayscale
#Detect face using 4 different classifiers
face = faceDet.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face2 = faceDet2.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face3 = faceDet3.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face4 = faceDet4.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
facefeatures = []
#Go over detected faces, stop at first detected face, return empty if no face.
if len(face) == 1:
facefeatures = face
elif len(face2) == 1:
facefeatures == face2
elif len(face3) == 1:
facefeatures = face3
elif len(face4) == 1:
facefeatures = face4
#Cut and save face
'''for (x, y, w, h) in facefeatures: #get coordinates and size of rectangle containing face
gray = gray[y:y+h, x:x+w] #Cut the frame to size
detected_faces = fishface.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=6,
minSize=(50, 50),
flags=cv2.CASCADE_SCALE_IMAGE)'''
#out = cv2.resize(gray, (350, 350)) #Resize face so all images have same size
return gray, facefeatures
def extract_face_features(gray, detected_face, offset_coefficients):
(x, y, w, h) = detected_face
horizontal_offset = int(offset_coefficients[0] * w)
vertical_offset = int(offset_coefficients[1] * h)
extracted_face = gray[y + vertical_offset:y + h,
x + horizontal_offset:x - horizontal_offset + w]
new_extracted_face = zoom(extracted_face, (350. / extracted_face.shape[0],
350. / extracted_face.shape[1]))
'''new_extracted_face = new_extracted_face.astype(np.float32)
new_extracted_face /= float(new_extracted_face.max())'''
return new_extracted_face
if __name__ == "__main__":
fishface.load("results_0.0.yml")
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
# detect faces
gray, detected_faces = detectFaces(frame)
face_index = 0
cv2.putText(frame, "Press Esc to QUIT", (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1)
# predict output
for face in detected_faces:
(x, y, w, h) = face
if w > 100:
# draw rectangle around face
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
gray_temp = gray[y:y+h, x:x+w] #Cut the frame to size
extracted_face = cv2.resize(gray_temp, (100, 100))
# extract features
#extracted_face = extract_face_features(gray, face, (0.075, 0.05)) #(0.075, 0.05)
# predict result
prediction_result = fishface.predict(extracted_face)
# draw extracted face in the top right corner
frame[face_index * 100: (face_index + 1) * 100, -101:-1, :] = cv2.cvtColor(extracted_face * 255, cv2.COLOR_GRAY2RGB)
# annotate main image with a label
cv2.putText(frame, emotions[prediction_result],(x,y), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 2, 155, 5)
# increment counter
face_index += 1
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(10) & 0xFF == 27:
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
# -
| Live Face Recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tflearn]
# language: python
# name: conda-env-tflearn-py
# ---
# # Hello, Tensor World!
# +
import tensorflow as tf
# Create TensorFlow object called hello_constant
hello_constant = tf.constant('Hello World!')
with tf.Session() as sess:
# Run the tf.constant operation in the session
output = sess.run(hello_constant)
print(output)
# +
x = tf.placeholder(tf.string)
with tf.Session() as sess:
output = sess.run(x, feed_dict={x: 'Hello World'})
print(output)
# +
x = tf.placeholder(tf.string)
y = tf.placeholder(tf.int32)
z = tf.placeholder(tf.float32)
with tf.Session() as sess:
output = sess.run(x, feed_dict={x: 'Test String', y: 123, z: 45.67})
print(output)
# -
# ## Quiz
# +
# Solution is available in the other "solution.py" tab
import tensorflow as tf
def run():
output = None
x = tf.placeholder(tf.int32)
with tf.Session() as sess:
# TODO: Feed the x tensor 123
output = sess.run(x, feed_dict={x:123})
return output
# -
# ## Quiz
# +
# Solution is available in the other "solution.py" tab
import tensorflow as tf
# TODO: Convert the following to TensorFlow:
x = tf.constant(10)
y = tf.constant(2)
z = tf.subtract(tf.divide(x, y), 1)
# TODO: Print z from a session
with tf.Session() as sess:
# TODO: Feed the x tensor 123
output = sess.run(z)
print(output)
# -
# ## Quiz
# +
# Solution is available in the other "quiz_solution.py" tab
import tensorflow as tf
def get_weights(n_features, n_labels):
"""
Return TensorFlow weights
:param n_features: Number of features
:param n_labels: Number of labels
:return: TensorFlow weights
"""
# TODO: Return weights
return tf.Variable(tf.truncated_normal((n_features, n_labels)))
def get_biases(n_labels):
"""
Return TensorFlow bias
:param n_labels: Number of labels
:return: TensorFlow bias
"""
# TODO: Return biases
return tf.Variable(tf.zeros(n_labels))
def linear(input, w, b):
"""
Return linear function in TensorFlow
:param input: TensorFlow input
:param w: TensorFlow weights
:param b: TensorFlow biases
:return: TensorFlow linear function
"""
# TODO: Linear Function (xW + b)
return tf.add(tf.matmul(input, w), b)
# +
# Solution is available in the other "sandbox_solution.py" tab
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from quiz import get_weights, get_biases, linear
def mnist_features_labels(n_labels):
"""
Gets the first <n> labels from the MNIST dataset
:param n_labels: Number of labels to use
:return: Tuple of feature list and label list
"""
mnist_features = []
mnist_labels = []
mnist = input_data.read_data_sets('/datasets/ud730/mnist', one_hot=True)
# In order to make quizzes run faster, we're only looking at 10000 images
for mnist_feature, mnist_label in zip(*mnist.train.next_batch(10000)):
# Add features and labels if it's for the first <n>th labels
if mnist_label[:n_labels].any():
mnist_features.append(mnist_feature)
mnist_labels.append(mnist_label[:n_labels])
return mnist_features, mnist_labels
# Number of features (28*28 image is 784 features)
n_features = 784
# Number of labels
n_labels = 3
# Features and Labels
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# Weights and Biases
w = get_weights(n_features, n_labels)
b = get_biases(n_labels)
# Linear Function xW + b
logits = linear(features, w, b)
# Training data
train_features, train_labels = mnist_features_labels(n_labels)
with tf.Session() as session:
# TODO: Initialize session variables
session.run(tf.global_variables_initializer())
# Softmax
prediction = tf.nn.softmax(logits)
# Cross entropy
# This quantifies how far off the predictions were.
# You'll learn more about this in future lessons.
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
# You'll learn more about this in future lessons.
loss = tf.reduce_mean(cross_entropy)
# Rate at which the weights are changed
# You'll learn more about this in future lessons.
learning_rate = 0.08
# Gradient Descent
# This is the method used to train the model
# You'll learn more about this in future lessons.
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: train_features, labels: train_labels})
# Print loss
print('Loss: {}'.format(l))
| Lesson16-Intro to TensorFlow/HelloWorld.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.9 64-bit (''itk'': conda)'
# metadata:
# interpreter:
# hash: 08284c7b8df3c43eacdc07b3a3e218c122fb5b477e5d2b89332e3279e5230b20
# name: python3
# ---
import os
import numpy
import vtk
import csv
from IPython.display import Image
def vtk_show(renderer, width=400, height=300):
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetOffScreenRendering(1)
renderWindow.AddRenderer(renderer)
renderWindow.SetSize(width, height)
renderWindow.Render()
windowToImageFilter = vtk.vtkWindowToImageFilter()
windowToImageFilter.SetInput(renderWindow)
windowToImageFilter.Update()
writer = vtk.vtkPNGWriter()
writer.SetWriteToMemory(1)
writer.SetInputConnection(windowToImageFilter.GetOutputPort())
writer.Write()
data = str(buffer(writer.GetResult()))
return Image(data)
def createDummyRenderer():
renderer = vtk.vtkRenderer()
renderer.SetBackground(1.0, 1.0, 1.0)
camera = renderer.MakeCamera()
camera.SetPosition(-256, -256, 512)
camera.SetFocalPoint(0.0, 0.0, 255.0)
camera.SetViewAngle(30.0)
camera.SetViewUp(0.46, -0.80, -0.38)
renderer.SetActiveCamera(camera)
return renderer
l2n = lambda l: numpy.array(1)
n2l = lambda n: list(n)
# +
# Path to the .mha file
filenameSegmentation = "./nac_brain_atlas/brain_segmentation.mha"
# Path to colorfile.txt
filenameColorfile = "./nac_brain_atlas/colorfile.txt"
# Opacity of the different volumes (between 0.0 and 1.0)
volOpacityDef = 0.25
# +
reader = vtk.vtkMetaImageReader()
reader.SetFileName(filenameSegmentation)
castFilter = vtk.vtkImageCast()
castFilter.SetInputConnection(reader.GetOutputPort())
castFilter.SetOutputScalarTypeToUnsignedShort()
castFilter.Update()
imdataBrainSeg = castFilter.GetOutputPort()
# +
fid = open(filenameColorfile, "r")
reader = csv.reader(fid)
dictRGB = {}
for line in reader:
dictRGB[int(line[0])] = [float(line[2])/255.0,
float(line[3])/255.0,
float(line[4])/255.0]
fid.close()
# +
funcColor = vtk.vtkColorTransferFunction()
for idx in dictRGB.keys():
funcColor.AddRGBPoint(idx,
dictRGB[idx][0],
dictRGB[idx][1],
dictRGB[idx][2])
# +
funcOpacityScalar = vtk.vtkPiecewiseFunction()
for idx in dictRGB.keys():
funcOpacityScalar.AddPoint(idx, volOpacityDef if idx != 0 else 0.0)
# +
funcOpacityGradient = vtk.vtkPiecewiseFunction()
funcOpacityGradient.AddPoint(1, 0.0)
funcOpacityGradient.AddPoint(5, 0.1)
funcOpacityGradient.AddPoint(100, 1.0)
# -
propVolume = vtk.vtkVolumeProperty()
propVolume.ShadeOff()
propVolume.SetColor(funcColor)
propVolume.SetScalarOpacity(funcOpacityScalar)
propVolume.SetGradientOpacity(funcOpacityGradient)
propVolume.SetInterpolationTypeToLinear()
# +
# ## Volume Rendering
# +
mapperVolume = vtk.vtkFixedPointVolumeRayCastMapper()
mapperVolume.SetInputConnection(imdataBrainSeg)
actorVolume = vtk.vtkVolume()
actorVolume.SetMapper(mapperVolume)
actorVolume.SetProperty(propVolume)
renderer = vtk.vtkRenderer()
renderWin = vtk.vtkRenderWindow()
renderWin.AddRenderer(renderer)
renderInteractor = vtk.vtkRenderWindowInteractor()
renderInteractor.SetRenderWindow(renderWin)
renderer.AddVolume(actorVolume)
renderer.SetBackground(vtk.vtkNamedColors().GetColor3d("Black"))
renderWin.SetSize(800, 800)
renderWin.Render()
renderInteractor.Start()
# -
| Demo/w4/VolumeRendering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import torch
import torchvision
from PIL import Image
import torchvision.transforms as transforms
# %matplotlib inline
import matplotlib.pyplot as plt
import torchvision.transforms.functional as F
import numpy as np
import cv2
colors = torch.tensor([[0,0,128],[0,0,64],[0,0,32],[0,128,0],[0,64,0],[0,32,0],[0,16,0],[0,8,0],[0,4,0],[0,2,0],
[0,1,0],[128,0,0],[64,0,0],[32,0,0],[16,0,0],[8,0,0],[4,0,0],[1,0,0]])
colormap = torch.tensor([
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255]
])
# +
img = Image.open('./gtFine/test/VWI10001 2020-02-02-08-04-50_labelIds.png')
img = cv2.imread('./gtFine/test/VWI10001 2020-02-02-08-04-50_labelIds.png')
real = Image.open('./leftImg8bit/test/VWI10001 2020-02-02-08-04-50.png')
# img_transform = transforms.Compose([
# transforms.ToTensor(),
# ])
# img = img_transform(img)
# +
class ConvertMaskColors:
"""Convert Mask colors to reflect the pretrained data colors"""
def __init__(self,):
pass
def __call__(self,img):
pass
class TopLeftCornerErase:
def __init__(self, n_pixels: int):
self.n_pixels = n_pixels
def __call__(self, img: torch.Tensor) -> torch.Tensor:
all_pixels = img.reshape(4, -1).transpose(1, 0)
idx = torch.randint(len(all_pixels), (1,))[0]
random_pixel = all_pixels[idx][:, None, None]
return F.erase(img, 0, 0, self.n_pixels, self.n_pixels, random_pixel)
# +
erase = transforms.Compose([
TopLeftCornerErase(100)
# reverse_preprocess,
])
trans = transforms.Compose([
transforms.ToPILImage(),
])
# -
cls = img.unique()
# res = torch.stack([torch.where(img==cls_val, torch.tensor(1), torch.tensor(0)) for cls_val in cls])
res = [torch.where(img==cls_val, torch.tensor(1), torch.tensor(0)) for cls_val in cls]
res = torch.stack(res)
img = cv2.imread('./gtFine/test/VWI10001 2020-02-02-08-04-50_labelIds.png')
b,g,r = cv2.split(img)
shape = b.shape
b = b.reshape(-1)
g = g.reshape(-1)
r = r.reshape(-1)
color_b = {}
color_g = {}
color_r = {}
# colors = colors.numpy()
# colormap = colormap.numpy()
for i,color in enumerate(colors):
# print(colors[i])
color_b[colors[i][0]] = colormap[i][0].item()
color_g[colors[i][1]] = colormap[i][1].item()
color_r[colors[i][2]] = colormap[i][2].item()
# colormap
# +
for i in color_b:
b = np.where(b == i, color_b[i], b)
for i in color_g:
g = np.where(g == i, color_g[i], g)
for i in color_r:
r = np.where(r == i, color_r[i], r)
# -
b = b.reshape(shape)
g = g.reshape(shape)
r = r.reshape(shape)
new_img = cv2.merge((b,g,r))
trans(img)
trans(new_img)
b
b = np.where(b == 128, 0, b)
b
| data/CityScapes/.ipynb_checkpoints/convert Image Mask-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: traffic
# language: python
# name: traffic
# ---
# # Reading image files
# +
import glob
import os
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from PIL import Image
# -
# cd
# +
### Training
# -
# ! curl –O https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/GTSRB_Final_Training_Images.zip > Documents/hands-on-2021/GTSRB/GTSRB_Final_Training_Images.zip
import zipfile
with zipfile.ZipFile("Documents/hands-on-2021/GTSRB/GTSRB_Final_Training_Images.zip","r") as zip_ref:
zip_ref.extractall("Documents/hands-on-2021/GTSRB")
# +
### Test
# -
# ! curl –O https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/GTSRB_Final_Test_GT.zip > Documents/hands-on-2021/GTSRB/GTSRB_Final_Test_GT.zip
import zipfile
with zipfile.ZipFile("Documents/hands-on-2021/GTSRB/GTSRB_Final_Test_GT.zip","r") as zip_ref:
zip_ref.extractall("Documents/hands-on-2021/GTSRB")
# +
TRAINING_CSV_PATH = 'C:/Users/chahinez/Documents/GTSRB_Final_Training_Images/GTSRB/Final_Training/Images/*/*.csv'
IMAGE_WIDTH = 30
IMAGE_HEIGHT = IMAGE_WIDTH
# -
os.path.dirname(TRAINING_CSV_PATH)
# +
def build_images_database(path):
"""Build a pandas DataFrame with all images information.
Parameters
----------
path: path pattern to read csv files containing images information
Returns
-------
A pandas DataFrame with one line per image
"""
df_list = []
for filename in glob.glob(path):
df = pd.read_csv(filename, sep=';')
df['path'] = os.path.dirname(filename) + '/' + df['Filename']
df_list.append(df)
return pd.concat(df_list, ignore_index=True)
def build_images_list(filename_list, boxes_list=None):
"""Build a list of images as Numpy array
Parameters
----------
filename_list: list of images filenames
boxes_list: optional list of boxes for clipping within images. Boxes are 4-tuples (x1, y1, x2, y2).
Returns
-------
A list of images as Numpy arrays
"""
# If no boxes_list is given, generate a fake boxes list with None boxes
# image.resize will take full size images if argument box == None
if boxes_list is None:
boxes_list = [None] * len(filename_list)
assert len(filename_list) == len(boxes_list), "filenames and boxes lists must have the same length"
images_list = []
for (filename, image_box) in zip(filename_list, boxes_list):
image = Image.open(filename)
image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box) # box argument clips image to (x1, y1, x2, y2)
image = np.array(image)
images_list.append(image)
return images_list
def build_classes_array(df, col_name='ClassId'):
"""Build a vector (numpy array) of classes
Parameters
----------
df : pandas Dataframe with column containing classes id
Returns
-------
Numpy array with images classes
"""
return df[col_name].values
# -
images_df = build_images_database(TRAINING_CSV_PATH)
images_df.head()
# Reading only 200 images for the moment
images_list = build_images_list(images_df.head(200)['path'].values,
images_df.head(200).apply(lambda r: (r['Roi.X1'], r['Roi.Y1'], r['Roi.X2'], r['Roi.Y2']),
axis=1).values)
plt.imshow(images_list[190])
build_classes_array(images_df)
images_df['ClassId'].value_counts()
(images_df['Roi.X2'] - images_df['Roi.X1']).hist(bins=100);
(images_df['Roi.Y2'] - images_df['Roi.Y1']).median()
plt.imshow(Image.open(images_df.query('Width == 162')['path'].values[0]))
plt.imshow(Image.open(images_df.query('Width == 162')['path'].values[0]).resize((30, 30)))
# +
from PIL import Image
img = Image.open('../data/raw/GTSRB_Final_Training_Images/GTSRB/Final_Training/Images/00000/00000_00000.ppm')
# -
plt.imshow(img)
images_df.head(1)
img = Image.open('../data/raw/GTSRB_Final_Training_Images/GTSRB/Final_Training/Images/00010/00029_00010.ppm')
plt.imshow(img)
img = Image.open('../data/raw/GTSRB_Final_Training_Images/GTSRB/Final_Training/Images/00010/00005_00029.ppm')
plt.imshow(img)
plt.imshow(Image.open(images_df.query('ClassId == 20').iloc[20]['path']))
# ## Training the model
# +
# import cv2
import tensorflow as tf
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Conv2D, MaxPool2D, Dense , Flatten , Dropout
# -
# #### Take a sample to test the code
# +
N = 10000
images_list = build_images_list(images_df.head(N)['path'].values,
images_df.head(N).apply(lambda r: (r['Roi.X1'], r['Roi.Y1'], r['Roi.X2'], r['Roi.Y2']),
axis=1).values)
data = np.array(images_list)
labels = build_classes_array(images_df)[:N]
print(data.shape, labels.shape)
# +
# Splitting training and testing dataset
X_train , X_test, y_train,y_test = train_test_split(data, labels , test_size=0.2, random_state=42)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# +
#Converting the labels into one hot encoding
y_train = to_categorical(y_train, 43)
y_test = to_categorical(y_test, 43)
#Building the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=X_train.shape[1:]))
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(rate=0.5))
model.add(Dense(43, activation='softmax'))
#Compilation of the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# -
epochs = 15
history = model.fit(X_train, y_train, batch_size=32, epochs=epochs, validation_data=(X_test, y_test))
#model.save("my_model.h5")
import tensorflow as tf
tf.__version__
test_model = tf.keras.models.load_model('C:/Users/chahinez/Documents/hands-on-2021/models/traffic_signs_2021-03-28_17-45-02.h5')
test_model.summary()
| notebooks/reading_images.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Tacotron 2 inference code
# Edit the variables **checkpoint_path** and **text** to match yours and run the entire code to generate plots of mel outputs, alignments and audio synthesis from the generated mel-spectrogram using Griffin-Lim.
# +
# # !pip install http://download.pytorch.org/whl/cu90/torch-0.4.0-cp36-cp36m-linux_x86_64.whl
# # !pip install torchvision
# # !pip install --upgrade tensorflow-gpu
# # !pip install unidecode inflect librosa tensorboardX
# -
# #### Import libraries and setup matplotlib
# +
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import IPython.display as ipd
import numpy as np
import torch
from hparams import create_hparams
from model import Tacotron2
from layers import TacotronSTFT
from audio_processing import griffin_lim
from train import load_model
from text import text_to_sequence
# %matplotlib inline
# -
def plot_data(data, figsize=(16, 4)):
fig, axes = plt.subplots(1, len(data), figsize=figsize)
for i in range(len(data)):
axes[i].imshow(data[i], aspect='auto', origin='bottom',
interpolation='none')
# #### Setup hparams
hparams = create_hparams("distributed_run=False,mask_padding=False")
hparams.sampling_rate = 22050
hparams.filter_length = 1024
hparams.hop_length = 256
hparams.win_length = 1024
# #### Load model from checkpoint
checkpoint_path = "/home/qianhuang/tacotron2/outdir/checkpoint_8000"
model = load_model(hparams)
try:
model = model.module
except:
pass
model.load_state_dict({k.replace('module.',''):v for k,v in torch.load(checkpoint_path)['state_dict'].items()})
_ = model.eval()
# #### Prepare text input
text = "This is an example of text to speech synthesis after 20 hours training."
sequence = np.array(text_to_sequence(text, ['english_cleaners']))[None, :]
sequence = torch.autograd.Variable(
torch.from_numpy(sequence)).cuda().long()
# #### Decode text input and plot results
mel_outputs, mel_outputs_postnet, _, alignments = model.inference(sequence)
plot_data((mel_outputs.data.cpu().numpy()[0],
mel_outputs_postnet.data.cpu().numpy()[0],
alignments.data.cpu().numpy()[0].T))
mel_np = mel_outputs_postnet.cpu().detach().numpy()
np.save('20hr_mel.npy',mel_np)
# #### Load TacotronSTFT and convert mel-spectrogram to spectrogram
taco_stft = TacotronSTFT(
hparams.filter_length, hparams.hop_length, hparams.win_length,
sampling_rate=hparams.sampling_rate)
mel_decompress = taco_stft.spectral_de_normalize(mel_outputs_postnet)
mel_decompress = mel_decompress.transpose(1, 2).data.cpu()
spec_from_mel_scaling = 1000
spec_from_mel = torch.mm(mel_decompress[0], taco_stft.mel_basis)
spec_from_mel = spec_from_mel.transpose(0, 1).unsqueeze(0)
spec_from_mel = spec_from_mel * spec_from_mel_scaling
# #### Synthesize audio from spectrogram using the Griffin-Lim algorithm
waveform = griffin_lim(torch.autograd.Variable(spec_from_mel[:, :, :-1]),
taco_stft.stft_fn, 60)
ipd.Audio(waveform[0].data.cpu().numpy(), rate=hparams.sampling_rate)
| .ipynb_checkpoints/inference-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Supplementary Script 1: Basic phylogenetic analysis
import os
from collections import defaultdict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
# %matplotlib inline
# +
colors = sns.color_palette()
# path to patient overview file of original cohort (Supplementary Table 1)
overview_fp = 'Supplementary_Table_1.xlsx'
# path to phylogenetic clustering file of original cohort (Supplementary Table 2)
clustering_fp = 'Supplementary_Table_2.xlsx'
col_mono_dm = 'Monophyletic distant mets'
col_mono_lm = 'Monophyletic lymphatic mets'
col_metmono_dm = 'Monophyletic distant mets ALT_CLF'
col_metmono_lm = 'Monophyletic lymphatic mets ALT_CLF'
col_no_dm = 'No distant mets'
col_no_lm = 'No lymphatic mets'
try:
all_overview_df = pd.read_excel(overview_fp)
# analyze Naxerova cohort (Kim cohort is analyzed separately for Fig. 2)
overview_df = all_overview_df[all_overview_df.Dataset == 'Naxerova']
except FileNotFoundError:
print('ERROR: File {} needs to be in the same folder!'.format(overview_fp))
raise
try:
clustering_df = pd.read_excel(clustering_fp)
except FileNotFoundError:
print('ERROR: File {} needs to be in the same folder!'.format(clustering_fp))
raise
# +
# create dataframe for bar plot depicting the observed frequency of monophyletic clades
d = defaultdict(list)
for index, row in overview_df.iterrows():
if not np.isnan(row[col_mono_dm]):
met_type = 'Distant'
d['Patient'].append(row.Patient)
d['MonophyleticClade'].append(True if row[col_mono_dm] == 1 else False)
d['MetMonophyleticClade'].append(True if row[col_metmono_dm] == 1 else False)
d['Type'].append(met_type)
d['NoSamples'].append(clustering_df[(clustering_df.Patient == row.Patient)
& (clustering_df['Metastasis type'] == met_type)].m.values[0])
if not np.isnan(row[col_mono_lm]):
met_type = 'Lymphatic'
d['Patient'].append(row.Patient)
d['MonophyleticClade'].append(True if row[col_mono_lm] == 1 else False)
d['MetMonophyleticClade'].append(True if row[col_metmono_lm] == 1 else False)
d['Type'].append(met_type)
d['NoSamples'].append(clustering_df[(clustering_df.Patient == row.Patient)
& (clustering_df['Metastasis type'] == met_type)].m.values[0])
phyletic_df = pd.DataFrame(data=d)
print('Mean and median number of lymphatic mets for subjects with at least 2 lymphatic mets: {}, {}'.format(
np.mean(phyletic_df[(phyletic_df['Type'] == 'Lymphatic')]['NoSamples']),
np.median(phyletic_df[(phyletic_df['Type'] == 'Lymphatic')]['NoSamples'])))
print('Mean and median number of distant mets for subjects with at least 2 distant mets: {}, {}'.format(
np.mean(phyletic_df[(phyletic_df['Type'] == 'Distant')]['NoSamples']),
np.median(phyletic_df[(phyletic_df['Type'] == 'Distant')]['NoSamples'])))
# +
def compare_monophyletic_clades(df, col_lm, col_dm, label):
"""
Investigate the fraction of patients with a monophyletic clade of all lymphatic or distant metastases
:param df: dataframe with patients and whether there were monophyletic clades
:param col_lm: name of boolean dataframe column for monophyletic lymphatic mets
:param col_dm: name of boolean dataframe column for monophyletic distant mets
:param label: what is being investigated
"""
#
dm_mono_frac = len(df[df[col_dm] == 1]) / df[col_dm].count()
lm_mono_frac = len(df[df[col_lm] == 1]) / df[col_lm].count()
print('{} of distant metastases: {:.3%} ({}/{})'.format(
label, dm_mono_frac, len(df[df[col_dm] == 1]), df[col_dm].count()))
print('{} of lymphatic metastases: {:.3%} ({}/{})'.format(
label, lm_mono_frac, len(df[df[col_lm] == 1]), df[col_lm].count()))
oddsratio, pvalue = stats.fisher_exact(
[[len(df[df[col_dm] == 1]), len(df[df[col_dm] == 0])],
[len(df[df[col_lm] == 1]), len(df[df[col_lm] == 0])]])
print('Probability to observe a more imbalanced ratio by chance is {:.4e}.'.format(pvalue))
def plot_monophyletic_clade(df, y_col, ylabel, filepath=None):
"""
Create bar plot with the fraction of monophyletic clades per metastasis type
:param df: dataframe with patients and whether there were monophyletic clades
:param y_col: name of boolean column with monophyletic clade information
:param ylabel: y-axis label
:param filepath: if not None plot is stored at the given path
"""
plot_height = 3 # plot height
ci = 90 # confidence interval
sns.set(font_scale = 1.17)
with sns.axes_style('white', {'axes.grid': False, 'grid.linestyle': u':',
'xtick.labelsize': 13, 'ytick.labelsize': 10,
'ytick.major.size': 4.0}):
fg = sns.catplot(x='Type', y=y_col, data=phyletic_df, ci=ci, height=plot_height,
aspect=0.9, palette=colors, kind='bar', order=['Lymphatic', 'Distant'])
fg.set(ylim=[0, 1], ylabel=ylabel)
fg.set(xlabel='');
fg.fig.get_axes()[0].yaxis.set_tick_params(which='major', left='on')
if filepath is not None:
plt.savefig(filepath, dpi=150, bbox_inches='tight', transparent=True)
# -
# Compare the monophyletic clades of lymphatic and distant mets (Fig. 1b)
compare_monophyletic_clades(overview_df, col_mono_lm, col_mono_dm, label='Monophyletic clade')
plot_monophyletic_clade(phyletic_df, y_col='MonophyleticClade', ylabel='Monophyletic clade',
filepath='pnl_monophyletic_clade.pdf')
# Compare the monophyletic clades of lymphatic and distant mets when no primary tumor samples
# can be in a monophyletic clade (Supplementary Information)
compare_monophyletic_clades(overview_df, col_metmono_lm, col_metmono_dm, label='Monophyletic met clade')
plot_monophyletic_clade(phyletic_df, y_col='MetMonophyleticClade', ylabel='Monophyletic met clade',
filepath='pnl_monophyletic_met_clade.pdf')
# +
# Create boxplot to compare the number of samples of lymphatic and distant metastases (Fig. 1e)
# diamond marker for mean per group
meanpointprops = dict(marker='d', markeredgecolor='black', markeredgewidth=0.2,
markerfacecolor='magenta', markersize=9)
with sns.axes_style('white', {'axes.grid': False, 'grid.linestyle': u':',
'xtick.labelsize': 13, 'ytick.labelsize': 10,
'ytick.major.size': 4.0}):
f, ax = plt.subplots(figsize=(2.0, 2.5))
sns.boxplot(x='Type', y='NoSamples', data=phyletic_df, palette=colors, whis=True,
showmeans=True, meanprops=meanpointprops, orient="v", width=0.7, showfliers=False,
order=['Lymphatic', 'Distant'], ax=ax)
sns.swarmplot(x='Type', y='NoSamples', data=phyletic_df, color=".3", size=6,
order=['Lymphatic', 'Distant'], ax=ax)
ax.yaxis.set_tick_params(which='major', left='on')
ax.set(xlabel='');
ax.set(ylim=[0, 10], ylabel='No of sampled metastases')
sns.despine()
plt.savefig('pnl_met_samples.pdf', dpi=150, bbox_inches='tight', transparent=True)
mwus = stats.mannwhitneyu(phyletic_df[phyletic_df.Type == 'Lymphatic'].NoSamples,
phyletic_df[phyletic_df.Type == 'Distant'].NoSamples, alternative='two-sided')
print('Number of lymphatic (mean: {:.3f}, #{}) vs distant (mean {:.3f}, #{}) metastases samples: Mann-Whitney U statistic {:.3f}, p-value {:.4e}'.format(
np.mean(phyletic_df[phyletic_df.Type == 'Lymphatic'].NoSamples),
len(phyletic_df[phyletic_df.Type == 'Lymphatic'].NoSamples),
np.mean(phyletic_df[phyletic_df.Type == 'Distant'].NoSamples),
len(phyletic_df[phyletic_df.Type == 'Distant'].NoSamples),
mwus[0], mwus[1]))
# -
| Supplementary_Script_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Housing Price Prediction
# ##### Input Features - size of house, number of bedrooms
# ##### Output Feature - price of house
# ##### Predict House Price given multiple features
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
# +
# Hypothesis Function
def hypothesis(thetarg, x):
return np.dot(x, thetarg)
# +
# Cost function used to calculate the error between hypothesis and actual value over m training examples
def cost(x, y, thetarg, m):
return float((1 / (2 * m)) * np.dot((hypothesis(thetarg, x) - y).T, (hypothesis(thetarg, x) - y)))
# +
# Gradient Descent method to minimize cost function in configurable alpha and iterations
def gradient_descent(x, y, thetarg, m):
jvec = []
theta_history = []
for i in range(num_iterations):
theta_history.append(list(thetarg[:, 0]))
jvec.append(cost(x, y, thetarg, m))
for j in range(len(thetarg)):
thetarg[j] = thetarg[j] - (alpha / m) * np.sum((hypothesis(thetarg, x) - y) *
np.array(x[:, j]).reshape(m, 1))
return thetarg, theta_history, jvec
# -
# ### Load data using Pandas and extract input features and output feature
muldata = pd.read_csv("/Users/akshatkumar/Downloads/machine-learning-ex1/ex1/ex1data2.txt")
indata = muldata.drop('price', axis=1)
indata.insert(0, "x0", 1)
outdata = muldata['price']
# ### Normalization to bring input features to same scale
# +
normin = (indata - indata.mean()) / indata.std()
normin.fillna(0, inplace=True)
normout = (outdata - outdata.mean()) / outdata.std()
inmatrix = normin.values
outmatrix = normout.values.reshape(outdata.size, 1)
# -
# ### Price Vizualization with respect to normalized input features
size = normin['size']
bedrooms = normin['bedrooms']
price = normout
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_label("Price Vizualization with respect to normalized input features")
ax.set_xlabel("size of house")
ax.set_ylabel("number bedrooms")
ax.set_zlabel("price of house")
ax.scatter(size, bedrooms, price)
# ### Setting Prameters for Multivariate Linear Regression
theta = np.zeros([inmatrix.shape[1], 1])
msize = len(outmatrix)
num_iterations = 1000
alpha = 0.01
# ### Running Gradient Descent for cost function convergence
theta_final, theta_hist, compute_cost = gradient_descent(inmatrix, outmatrix, theta, msize)
theta_final
iterations = list(range(1, num_iterations))
compute_cost.pop(0)
# ### Visualize Cost Function convergence with increase in number of iterations
plt.title("Cost Function fall with iterations")
plt.xlabel("Number of iterations")
plt.ylabel("Cost Function")
plt.plot(iterations, compute_cost)
# ### PREDICTED Price Vizualization with respect to normalized input features
size = normin['size']
bedrooms = normin['bedrooms']
price = normout
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_label("Price Vizualization with respect to normalized input features")
ax.set_xlabel("size of house")
ax.set_ylabel("number bedrooms")
ax.set_zlabel("price of house")
ax.plot(size, bedrooms, hypothesis(theta_final,inmatrix).flatten())
| House-Price-Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_geospatial-analysis)
# language: python
# name: conda_geospatial-analysis
# ---
import pandas as pd
import geopandas as gpd
from geopandas.tools import sjoin
import glob
import os
def loadFile(file_path):
shape = gpd.GeoDataFrame(gpd.read_file(file_path))
return shape
def polysWithPoints(parcels, damages, new_file):
parcels = loadFile(parcels)
damages = loadFile(damages)
polysWithPoints = sjoin(parcels, damages, op='contains')
polysWithPoints.to_file(new_file)
return polysWithPoints
def getBaseName(string):
string = string.split('_')[-1]
string = string.split('.')[0]
return string
path_base = "/media/seanandrewchen/seanchen_ssd/gis-data/"
path_parcels = "parcels/parcels-by-county/"
path_damages = "damages/damages-by-county/"
counties_parcels = sorted(glob.glob(path_base + path_parcels + "/*.shp"))
counties_damages = sorted(glob.glob(path_base + path_damages + "/*.shp"))
# +
counties = []
for e in counties_parcels:
counties.append(getBaseName(e))
# -
files = list(zip(counties_parcels, counties_damages, counties))
for file in files:
newFileName = "affected-parcels-" + file[2] + ".shp"
polysWithPoints(file[0], file[1], newFileName)
| notebooks/training-data-processing/select_parcels.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NumPy Array Element Selection
import numpy as np
# +
nums = np.arange(10, 20)
nums
# +
gt4 = nums > 14
print(type(gt4))
# nums[gt4]
nums[ [False, False, False, False, False, True, True, True, True, True] ]
# -
nums[ [1,3,5 ] ]
nums[nums > 14] * 2
# +
# nums[nums > 14] = nums[nums > 14] * 2
gt14 = nums > 14
nums[gt14] *= 2
nums[gt14] = nums[gt14] * 2
nums
# +
nums[0] = 999
nums
# +
# for index, value in np.ndenumerate(nums):
# nums[index[0]] = value * 2
# nums
double_me_if_gt14 = lambda value: value * 2 if value > 14 else value
double_me_if_gt14 = np.vectorize(double_me_if_gt14)
print(double_me_if_gt14(nums))
nums
# +
matrix_nums = np.arange(36).reshape(6,6)
matrix_nums
# +
onedim_matrix_nums = matrix_nums.reshape(36)
onedim_matrix_nums
# -
onedim_matrix_nums[5] = 9999
onedim_matrix_nums
matrix_nums
matrix_nums[1,:]
matrix_nums[:,1]
matrix_nums[:,1:5:2]
| jupyter_conda/05_Numpy_SelectElements.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# ### PPP Calculation of Csh, Total Sandstone Porosity, Sw in Payzone Using Laminated Aquifer Slope Method
#
#
# Use this workflow having already computed CshAquifer(array),Rw,a,m,n from the picket plot in excel from the aquifer. Prepare arrays for:
# - Gamma Ray values every 2ft of pay
# - Nuetron and Density values every 2ft of pay
# - Rt values every 2 ft of pay (no need to calculate Rss)
#
# use https://apps.automeris.io/wpd/
# link to github: https://github.com/AdrianSalinas98/Petroleum-Engineering-Repository
#
#
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#- Gamma Shale: 1 value (local max) **NEAR THE PAY ZONE**
#- Gamma Sand: 1 value (local min) **APPROXIMATING AQUIFER**
#- Gamma Aquifer: 1 value (local min)
#- Gamma Pay: Gamma ray of the Pay Zone **[Array]**
#- Csh Aquifer: shale concentration of Aquifer 1 value **MUST BE AT SAME DEPTH AS Gamma Aquifer**
#returns [Array]: CshPay: shale concentration along the payzone
def Csh(GammaShale,GammaAquifer,GammaPay,CshAquifer):
slope = (1-CshAquifer)/(GammaShale-GammaAquifer) #1 value for each well
CshPay = slope*(GammaPay-GammaShale)+1 #distribution for each 2ft of pay
return CshPay
def CshNew(GammaShale,GammaSand,GammaPay):
CshPay = (GammaPay - GammaSand)/(GammaShale - GammaSand) #distribution for each 2ft of pay
return CshPay
#-----------------------------------------------------------------------------------------
#NPay: nuetron porosity every 2 ft of pay [Array]
#DPay: density every 2 ft of pay [Array]
#NShale: nuetron porosity of pure shale (1 value)
#DShale: density porosity of pure shale (1 value)
#CshPay: input the CshPay [Array] from above
#returns [Array]: PhiTotal: corrected, total porosity in the pay zone
def TotalPorosity(NPay,DPay,NShale,DShale,CshPay):
PhiDcorrected = (DPay-(CshPay*DShale))/(1-CshPay)
PhiNcorrected = (NPay-(CshPay*NShale))/(1-CshPay)
PhiTotal = ( ((PhiNcorrected**2)+(PhiDcorrected**2))/2 )**.5
return PhiTotal
#-----------------------------------------------------------------------------------------
#Rw: single value from picket plot
#Rt: DEEP resistivity every 2 ft of pay [Array]
#phiTotal: input array from above
#a,m,n single values from picket plot:
#returns [2-D Array]: of Sw and Shc
def Saturations(Rw,Rt,phiTotal,a,m,n):
Sw = ((Rw/Rt)*(a/(phiTotal**m)))**(1/n)
Shc = 1-Sw
return np.array([Sw,Shc])
#-----------------------------------------------------------------------------------------
#For loops using arrays: GammaPay,Rw,Npay,Dpay,Rt values. Return distribution, weighted
data = pd.read_csv('template.csv')
GammaPay = np.array(data['GammaPay'])
Rw = np.array(data['Rw'])
Rt = np.array(data['Rt'])
NPay = np.array(data['Npay'])
DPay = np.array(data['Dpay'])
#GammaAquifer is assumed with the I-1 log, so if your pay zone is a Haliburton log, subtract 15 from API reading
GammaShale = 135
GammaAquifer = 60
GammaSand = 45
CshAquifer = 0.1667
NShale = .36 #.402
DShale = .168 #.23
a = 1
m = 2
n = 2.5
CshPay = np.array([])
CshPayNew = np.array([])
for i in range(len(GammaPay)):
calc = Csh(GammaShale,GammaAquifer,GammaPay[i],CshAquifer)
CshPay = np.append(CshPay,calc)
calcNew = CshNew(GammaShale,GammaSand,GammaPay[i])
CshPayNew = np.append(CshPayNew,calcNew)
PorosityPay = np.array([])
PorosityPayNew = np.array([])
for i in range(len(CshPay)):
calc1 = TotalPorosity(NPay[i],DPay[i],NShale,DShale,CshPay[i])
PorosityPay = np.append(PorosityPay,calc1)
calc1New = TotalPorosity(NPay[i],DPay[i],NShale,DShale,CshPayNew[i])
PorosityPayNew = np.append(PorosityPayNew,calc1New)
WaterSaturationPay = np.array([])
WaterSaturationPayNew = np.array([])
OilSaturationPay = np.array([])
for i in range(len(PorosityPay)):
calc2 = Saturations(Rw[i],Rt[i],PorosityPay[i],a,m,n)
WaterSaturationPay = np.append(WaterSaturationPay,calc2[0])
OilSaturationPay = np.append(OilSaturationPay,calc2[1])
calc2New = Saturations(Rw[i],Rt[i],PorosityPayNew[i],a,m,n)
WaterSaturationPayNew = np.append(WaterSaturationPayNew,calc2New[0])
# print(PorosityPay)
# print(WaterSaturationPay)
#data['PorosityPay'] = PorosityPay
#data['WaterSaturation'] = WaterSaturationPay
data['PorosityPayNew'] = PorosityPayNew
data['WaterSaturationPayNew'] = WaterSaturationPayNew
# print(PorosityPayNew)
# print(WaterSaturationPayNew)
data.to_csv('SandExport.csv')
data
# -
# ## Peicewise funtion for Depth Correlation: Well I-4 (MD to TVD relationship)
# +
md = pd.read_csv('md.csv')
md.head()
MD = np.array(md['MD'])
TVD = np.array(md['TVD'])
depths = np.array(md['depths'])
newdepths = np.zeros(len(depths))
for i in range(0,len(depths)):
if depths[i] < 6500.1:
newdepths[i] = (0.968*depths[i] + 180.991)
elif depths[i] > 6500.1 and depths[i] < 7275.1:
newdepths[i] = (0.865*depths[i] + 845.35)
elif depths[i] > 7275.1:
newdepths[i] = (0.8394*depths[i] + 1034.2)
print(depths)
print(newdepths)
plt.plot(depths, 'r-',label = 'MD')
plt.plot(newdepths, 'b-', label = 'TVD')
md['depthTVD'] = newdepths
md.to_csv('DepthExport.csv')
# -
# ### Calculating Average Porosity and Water Saturation for each Sand
#
# - The average distributions across each sand will be weighted based on how much feet of pay each zone has.
# - Wells that have thicker payzones will contribute more weight to the average values of Porosity and water saturation.
#
# importing data for distribution visuals, weighted averages already calculated.
# +
sands = pd.read_csv('sands.csv')
F3Por = sands['F3Por']
F3w = sands['F3w']
F4Por = sands['F4Por']
F4w = sands['F4w']
F5Por = sands['F5Por']
F5w = sands['F5w']
bins1 = 20
ax1 = plt.subplot(3,2,1)
n, bins,patches = ax1.hist(F3Por,bins1,facecolor = 'r',density=True)
ax1.set_xlim([.1,.4])
ax1.set_xlabel('Porosity' )
ax1.set_ylabel('Count')
ax1.set_title('F3 Sand Porosity')
bins2 = 20
ax2 = plt.subplot(3,2,2)
n, bins,patches = ax2.hist(F3w,bins2,facecolor = 'b',density=True)
ax2.set_xlim([.2,1])
ax2.set_xlabel('Water Saturation')
ax2.set_ylabel('Count')
ax2.set_title('F3 Sand Water Saturation')
bins3 = 15
ax3 = plt.subplot(3,2,3)
n, bins,patches = ax3.hist(F4Por,bins3,facecolor = 'r',density=True)
ax3.set_xlim([.1,.4])
ax3.set_xlabel('Porosity' )
ax3.set_ylabel('Count')
ax3.set_title('F4 Sand Porosity')
bins4 = 15
ax4 = plt.subplot(3,2,4)
n, bins,patches = ax4.hist(F4w,bins4,facecolor = 'b',density=True)
ax4.set_xlim([.2,1])
ax4.set_xlabel('Water Saturation')
ax4.set_ylabel('Count')
ax4.set_title('F4 Sand Water Saturation')
bins5 = 30
ax5 = plt.subplot(3,2,5)
n, bins,patches = ax5.hist(F5Por,bins5,facecolor = 'r',density=True)
ax5.set_xlim([.1,.4])
ax5.set_xlabel('Porosity' )
ax5.set_ylabel('Count')
ax5.set_title('F5 Sand Porosity')
bins6 = 30
ax6 = plt.subplot(3,2,6)
n, bins,patches = ax6.hist(F5w,bins6,facecolor = 'b',density=True)
ax6.set_xlim([.2,1])
ax6.set_xlabel('Water Saturation')
ax6.set_ylabel('Count')
ax6.set_title('F5 Sand Water Saturation')
# bins7 = 15
# ax7 = plt.subplot(4,2,7)
# n, bins,patches = ax7.hist(F5DPor,bins7,facecolor = 'r',density=True)
# ax7.set_xlim([.1,.4])
# ax7.set_xlabel('Porosity' )
# ax7.set_ylabel('Count')
# ax7.set_title('F5D Sand Porosity')
# bins8 = 15
# ax8 = plt.subplot(4,2,8)
# n, bins,patches = ax8.hist(F5Dw,bins8,facecolor = 'b',density=True)
# ax8.set_xlim([.2,1])
# ax8.set_xlabel('Water Saturation')
# ax8.set_ylabel('Count')
# ax8.set_title('F5D Sand Water Saturation')
plt.subplots_adjust(left=0.0, bottom=0.0, right=1.5, top=2.5, wspace=.5, hspace=.5)
# +
summary = sands.describe()
df2 = pd.DataFrame([[0.31,0.51,0.26,0.71,0.29,0.59]], columns=['F3Por','F3w','F4Por','F4w','F5Por','F5w'],
index = ['Weighted Mean Porosity'])
summaryR = pd.concat([df2, summary]).round(2)
summaryR
# df3 = pd.DataFrame([[]],columns=['F3Por','F3w','F4Por','F4w','F5BPor','F5Bw','F5DPor','F5Dw'],
# index = ['Net to Gross Ratio'])
# -
# ## Contour Map
#
# Lets start by reading in the digitized data from our plats
# +
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from scipy.interpolate import griddata
plat = pd.read_csv('Plat.csv')
points = np.array(plat.iloc[0:23,[2,3]].copy()) # x,y
values = np.array(plat.iloc[0:23,4].copy()) # z
boundary_points = np.array(plat.iloc[:,[0,1]].copy())
print(values[-1])
print(points[-1])
#min and max of x and y points chosen for the boundary, @ z resolution
Nx = 500
Ny = 500
xi = np.linspace(-3000, 5000,Nx)
yi = np.linspace(0, 5000,Ny)
X,Y = np.meshgrid(xi,yi)
Z = griddata(points,values,(X,Y),method = 'cubic')
boolean = ~np.isnan(Z) #does opposite of replacing nan with true(nan are false)
#choose values based on true index(real values)
#doing the same as spreadsheetbut with real values (make x_column beside y_column and corresponsding Z_column)
Z1 = Z[boolean]
Z1 = Z1.reshape(-1)
X1 = X[boolean]
X1 = X1.reshape(-1)
Y1 = Y[boolean]
Y1 = Y1.reshape(-1)
points2 = np.column_stack((X1,Y1))
Z2 = griddata(points2,Z1,(X,Y),method = 'nearest')
#remove negative depth values
Z2 = np.where(Z2<0,0.0,Z2)
#set boundary as a path object
path = Path(boundary_points)
#flatten our figure space(X,Y meshgrid), then arange in (N by 2 array for 'contains_points' function)
XX,YY = X.flatten(), Y.flatten()
XY = np.vstack((XX,YY)).T
path_boolean = path.contains_points(XY)
path_boolean = path_boolean.reshape(Ny,Nx)
#make points 'outside' path nan
outside = path_boolean == False
Z2[outside] = np.nan
fig,ax = plt.subplots(constrained_layout = True)
C = ax.contourf(X,Y,Z2)
cbar = fig.colorbar(C)
#visualize reservoir boundary
patch = PathPatch(path,facecolor = 'none',edgecolor='r')
ax.add_patch(patch)
#show wells
ax.plot(1531.419458,2480.929597,'ro',label = 'I1')
ax.plot(1702.392344,1448.838004,'ro',label = 'I2')
ax.plot(635.9383307,1781.909319,'ro',label = 'I3')
ax.plot(2616.05529,957.5985418,'ro',label = 'I4_SH')
ax.plot(3255.077087,2229.756209,'rx',label = 'I4_BHL')
I4sh = [2616.05529,3255.077087]
I4bh = [957.5985418,2229.756209]
ax.plot(I4sh,I4bh,'r-.')
ax.plot(-329.6119086,1975.176578,'ro',label = 'N1')
ax.plot(-329.6119086,1975.176578,'ro',label = 'NST_SH')
ax.plot(-263.7958533,2592.891319,'rx',label = 'NST_BHL')
N1STsh = [-329.6119086,-263.7958533]
N1STbh = [1975.176578,2592.891319]
ax.plot(N1STsh,N1STbh,'r-.')
ax.legend = True
print(boundary_points)
# -
# ## THICKNESS
# +
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from scipy.interpolate import griddata
plat = pd.read_csv('Thicc.csv')
points = np.array(plat.iloc[0:23,[2,3]].copy()) # x,y
values = np.array(plat.iloc[0:23,4].copy()) # z
boundary_points = np.array(plat.iloc[:,[0,1]].copy())
print(values[-1])
print(points[-1])
#min and max of x and y points chosen for the boundary, @ z resolution
Nx = 500
Ny = 500
xi = np.linspace(-3000, 5000,Nx)
yi = np.linspace(0, 5000,Ny)
X,Y = np.meshgrid(xi,yi)
Z = griddata(points,values,(X,Y),method = 'cubic')
boolean = ~np.isnan(Z) #does opposite of replacing nan with true(nan are false)
#choose values based on true index(real values)
#doing the same as spreadsheetbut with real values (make x_column beside y_column and corresponsding Z_column)
Z1 = Z[boolean]
Z1 = Z1.reshape(-1)
X1 = X[boolean]
X1 = X1.reshape(-1)
Y1 = Y[boolean]
Y1 = Y1.reshape(-1)
points2 = np.column_stack((X1,Y1))
Z2 = griddata(points2,Z1,(X,Y),method = 'nearest')
#remove negative depth values
Z2 = np.where(Z2<0,0.0,Z2)
#set boundary as a path object
path = Path(boundary_points)
#flatten our figure space(X,Y meshgrid), then arange in (N by 2 array for 'contains_points' function)
XX,YY = X.flatten(), Y.flatten()
XY = np.vstack((XX,YY)).T
path_boolean = path.contains_points(XY)
path_boolean = path_boolean.reshape(Ny,Nx)
#make points 'outside' path nan
outside = path_boolean == False
Z2[outside] = np.nan
fig,ax = plt.subplots(constrained_layout = True)
C = ax.contourf(X,Y,Z2)
cbar = fig.colorbar(C)
#visualize reservoir boundary
patch = PathPatch(path,facecolor = 'none',edgecolor='r')
ax.add_patch(patch)
#show wells
ax.plot(1531.419458,2480.929597,'ro',label = 'I1')
ax.plot(1702.392344,1448.838004,'ro',label = 'I2')
ax.plot(635.9383307,1781.909319,'ro',label = 'I3')
ax.plot(2616.05529,957.5985418,'ro',label = 'I4_SH')
ax.plot(3255.077087,2229.756209,'rx',label = 'I4_BHL')
I4sh = [2616.05529,3255.077087]
I4bh = [957.5985418,2229.756209]
ax.plot(I4sh,I4bh,'r-.')
ax.plot(-329.6119086,1975.176578,'ro',label = 'N1')
ax.plot(-329.6119086,1975.176578,'ro',label = 'NST_SH')
ax.plot(-263.7958533,2592.891319,'rx',label = 'NST_BHL')
N1STsh = [-329.6119086,-263.7958533]
N1STbh = [1975.176578,2592.891319]
ax.plot(N1STsh,N1STbh,'r-.')
ax.legend = True
print(boundary_points)
# -
| Calculation of Csh, Total Sandstone Porosity, Sw in Payzone.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Format DataFrame
# +
import pandas as pd
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
train_df = pd.DataFrame(data.data, columns=data.feature_names)
train_df["diagnosis"] = data.target
print(train_df.shape)
train_df.head()
# -
# # Set Up Environment
# +
from hyperparameter_hunter import Environment, CVExperiment
env = Environment(
train_dataset=train_df,
results_path="HyperparameterHunterAssets",
target_column="diagnosis",
metrics=["roc_auc_score"],
cv_type="StratifiedKFold",
cv_params=dict(n_splits=5, random_state=32),
verbose=1,
)
# -
# We're initializing our `Environment` with `verbose=1` to tell our experiments to only log the essentials because we're about to run lots of experiments.
#
# Now that HyperparameterHunter has an active `Environment`, we can do two things:
#
# # 1. Perform Experiments
# +
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
experiment_0 = CVExperiment(KNeighborsClassifier, {})
# -
experiment_1 = CVExperiment(SVC, {})
experiment_2 = CVExperiment(LinearSVC, {})
experiment_3 = CVExperiment(NuSVC, {})
experiment_4 = CVExperiment(DecisionTreeClassifier, {})
experiment_5 = CVExperiment(RandomForestClassifier, {})
experiment_6 = CVExperiment(AdaBoostClassifier, {})
experiment_7 = CVExperiment(GradientBoostingClassifier, {})
experiment_8 = CVExperiment(GaussianNB, {})
experiment_9 = CVExperiment(LinearDiscriminantAnalysis, {})
experiment_10 = CVExperiment(QuadraticDiscriminantAnalysis, {})
experiment_11 = CVExperiment(MLPClassifier, {})
# Of course, SKLearn has many more algorithms than those shown here, but I think you get the idea.
#
# Notice that in all the above experiments, we gave `CVExperiment` `model_init_params={}`. Passing an empty dict tells it to use the default hyperparameters for the `model_initializer`, which it'll figure out on its own.
#
# # 2. Hyperparameter Optimization
#
# We're just going to do optimization on one of the algorithms used above (`AdaBoostClassifier`); although, HyperparameterHunter can certainly do consecutive optimization rounds.
#
# Notice below that `optimizer` correctly identifies `experiment_6` as being the only saved experiment it can learn from because it's optimizing `AdaBoostClassifier`.
# +
from hyperparameter_hunter import RandomForestOptPro, Real, Integer, Categorical
optimizer = RandomForestOptPro(iterations=12, random_state=42)
optimizer.forge_experiment(
model_initializer=AdaBoostClassifier,
model_init_params=dict(
n_estimators=Integer(25, 100),
learning_rate=Real(0.5, 1.0),
algorithm=Categorical(["SAMME", "SAMME.R"]),
),
)
optimizer.go()
| examples/sklearn_examples/classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import os
import folium
import pickle
import json
import pandas as pd
import branca.colormap as cm
from collections import defaultdict
print(folium.__version__)
# +
#us_states = os.path.join('..\\example\\data', 'us-states.json')
#US_Unemployment_Oct2012 = os.path.join('..\\example\\data', 'US_Unemployment_Oct2012.csv')
fr_iris = "../BIGBASE/iris_clean.json"
print("loading IRIS data")
geo_json_data = json.load(open(fr_iris))
print("loading JSON data")
with open('../BIGBASE/iris_revenu_dataviz.pickle', 'rb') as handle:
FR_Revenue = pickle.load(handle)
FR_Revenue = FR_Revenue[["IRIS","RFPQ211"]]
print(FR_Revenue.shape)
#fr_revenue_dict = FR_Revenue.set_index('IRIS')['RFPQ211']
fr_revenue_dict = defaultdict(lambda: 1400)
for index, row in FR_Revenue.iterrows():
fr_revenue_dict[row["IRIS"]]=row["RFPQ211"]
# -
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
plt.figure();
myplot = FR_Revenue[["RFPQ211"]].plot.hist(bins=1000)
plt.show()
# +
def my_color_function(feature):
"""Maps low values to green and hugh values to red."""
if fr_revenue_dict[feature['properties']['DCOMIRIS']] > 22200:
return '#ff0000'
else:
return '#008000'
#m = folium.Map([48.8566, 2.3522], tiles='cartodbpositron', zoom_start=2)
m = folium.Map(width=250,height=250,location=[48.8566, 2.3522], zoom_start=4)
folium.GeoJson(
geo_json_data,
style_function=lambda feature: {
'fillColor': my_color_function(feature),
'color': 'black',
'weight': 2,
'dashArray': '5, 5'
}
).add_to(m)
#
# def my_color_function(feature):
# """Maps low values to green and hugh values to red."""
# if unemployment_dict[feature['id']] > 6.5:
# return '#ff0000'
# else:
# return '#008000'
#
# m = folium.Map([43, -100], tiles='cartodbpositron', zoom_start=4)
#
# folium.GeoJson(
# geo_json_data,
# style_function=lambda feature: {
# 'fillColor': my_color_function(feature),
# 'color': 'black',
# 'weight': 2,
# 'dashArray': '5, 5'
# }
# ).add_to(m)
#
# m.save(os.path.join('results', 'Colormaps_0.html'))
#
# m
# -
#m.save(os.path.join('results', 'Colormaps_2.html'))
m
| python_dashboard/caf_insee_visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="u__kG-T18Dlg"
from numpy import *
from scipy.stats import norm
import numpy as np
import matplotlib.pyplot as plt
# Pricing a European option using Black-Scholes formula and Monte Carlo simulations
# Pricing a Barrier option using Monte Carlo simulations
S0 = 100 # spot price of the underlying stock today
K = 105 # strike at expiry
mu = 0.05 # expected return
sigma = 0.2 # volatility
r = 0.05 # risk-free rate
T = 1.0 # years to expiry
Sb = 110 # barrier
# Complete the following functions
def BS_european_price(S0, K, T, r, sigma):
t = 0
d1 = (log(S0/K) + (r+sigma**2/2) * (T-t)) / (sigma*np.sqrt(T-t))
d2 = d1 - sigma * np.sqrt(T-t)
c = norm.cdf(d1)*S0 - norm.cdf(d2)*K*np.exp(-r*(T-t))
p = norm.cdf(-d2)*K*np.exp(-r*(T-t)) - norm.cdf(-d1)*S0
return c, p
def MC_european_price(S0, K, T, r, mu, sigma, numSteps, numPaths):
paths = np.zeros((numSteps + 1, numPaths))
# dT is the time increment (in years)
dT = T / numSteps
# Vector of paths will store realizations of the asset price
# First asset price is the initial price
paths[0] = [S0] * numPaths
# Generate paths
for iPath in range(numPaths):
for iStep in range(numSteps):
paths[iStep + 1, iPath] = paths[iStep, iPath] * np.exp((mu - 0.5 * sigma ** 2) * dT
+ sigma * np.sqrt(dT) * np.random.normal(0,1))
# Calculate the payoff for each path for Call & Put
CallPayoffT = np.maximum(paths[numSteps] - K, 0)
PutPayoffT = np.maximum(K - paths[numSteps], 0)
# Discount back
c = np.mean(CallPayoffT) * np.exp(-r * T)
p = np.mean(PutPayoffT) * np.exp(-r * T)
return c, p
def MC_barrier_knockin_price(S0, Sb, K, T, r, mu, sigma, numSteps, numPaths):
paths = np.zeros((numSteps + 1, numPaths))
# dT is the time increment (in years)
dT = T / numSteps
# Vector of paths will store realizations of the asset price
# First asset price is the initial price
paths[0] = [S0] * numPaths
# Generate paths
for iPath in range(numPaths):
for iStep in range(numSteps):
paths[iStep + 1, iPath] = paths[iStep, iPath] * np.exp((mu - 0.5 * sigma ** 2) * dT
+ sigma * np.sqrt(dT) * np.random.normal(0,1))
# Calculate the payoff for each path for Call & Put
CallPayoffT = np.maximum(paths[numSteps] - K, 0)
PutPayoffT = np.maximum(K - paths[numSteps], 0)
# The option becomes a standard option if the barrier was crossed some time before expiration, o/w payoff is zero
a = sum(paths>=Sb,axis=0)
for iPath in range(numPaths):
# If the barrier was not reached, change payoff to zero
if a[iPath] == 0:
CallPayoffT[iPath] = 0
PutPayoffT[iPath] = 0
# Discount back
c = np.mean(CallPayoffT) * np.exp(-r * T)
p = np.mean(PutPayoffT) * np.exp(-r * T)
return c, p
# + colab={"base_uri": "https://localhost:8080/"} id="o0Un9zya8Dlq" outputId="9d9d0d8c-7e1d-4e01-81ef-32ed32e5ac91"
# Define variable numSteps to be the number of steps for multi-step MC
# numPaths - number of sample paths used in simulations
numSteps = 252
numPaths = 1000000
# Implement your Black-Scholes pricing formula
call_BS_European_Price, putBS_European_Price = BS_european_price(S0, K, T, r, sigma)
# Implement your one-step Monte Carlo pricing procedure for European option
callMC_European_Price_1_step, putMC_European_Price_1_step = MC_european_price(S0, K, T, r, mu, sigma, 1, numPaths)
# Implement your multi-step Monte Carlo pricing procedure for European option
callMC_European_Price_multi_step, putMC_European_Price_multi_step = MC_european_price(S0, K, T, r, mu, sigma, numSteps, numPaths)
# Implement your one-step Monte Carlo pricing procedure for Barrier option
callMC_Barrier_Knockin_Price_1_step, putMC_Barrier_Knockin_Price_1_step = MC_barrier_knockin_price(S0, Sb, K, T, r, mu, sigma, 1, numPaths)
# # Implement your multi-step Monte Carlo pricing procedure for Barrier option
callMC_Barrier_Knockin_Price_multi_step, putMC_Barrier_Knockin_Price_multi_step = MC_barrier_knockin_price(S0, Sb, K, T, r, mu, sigma, numSteps, numPaths)
print('Black-Scholes price of an European call option is ' + str(call_BS_European_Price))
print('Black-Scholes price of an European put option is ' + str(putBS_European_Price))
print('One-step MC price of an European call option is ' + str(callMC_European_Price_1_step))
print('One-step MC price of an European put option is ' + str(putMC_European_Price_1_step))
print('Multi-step MC price of an European call option is ' + str(callMC_European_Price_multi_step))
print('Multi-step MC price of an European put option is ' + str(putMC_European_Price_multi_step))
print('One-step MC price of an Barrier call option is ' + str(callMC_Barrier_Knockin_Price_1_step))
print('One-step MC price of an Barrier put option is ' + str(putMC_Barrier_Knockin_Price_1_step))
print('Multi-step MC price of an Barrier call option is ' + str(callMC_Barrier_Knockin_Price_multi_step))
print('Multi-step MC price of an Barrier put option is ' + str(putMC_Barrier_Knockin_Price_multi_step))
# + [markdown] id="SMX6FfB2M5dI"
# Plot one chart in Python that illustrates your Monte Carlo pricing procedure in the best way.
# + id="iYiHndH8snS5"
# Plot one chart illustrates Monte Carlo pricing procedure in the best way
def plot_MC (S0, Sb, K, T, r, mu, sigma, numSteps, numPaths):
paths = np.zeros((numSteps + 1, numPaths))
# dT is the time increment (in years)
dT = T / numSteps
# Vector of paths will store realizations of the asset price
# First asset price is the initial price
paths[0] = [S0] * numPaths
# Generate paths
for iPath in range(numPaths):
for iStep in range(numSteps):
paths[iStep + 1, iPath] = paths[iStep, iPath] * np.exp((mu - 0.5 * sigma ** 2) * dT
+ sigma * np.sqrt(dT) * np.random.normal(0,1))
# Plot paths
[plt.plot(paths[:,i], linewidth=2, color='black') for i in range(numPaths)]
# plot horizontal line of barrier
plt.plot([Sb]*numSteps)
plt.title('Monte Carlo pricing procedure')
plt.xlabel('Time')
plt.ylabel('Price of underlying')
return paths
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="JCriaygo5C4S" outputId="92e87bad-0c00-4174-fa21-3fd9fd38f2d0"
plot = plot_MC (S0, Sb, K, T, r, mu, sigma, 12, 50000)
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="Vhq1bLPj49be" outputId="198ca40a-9c12-4318-820c-e3d17b06cb05"
plot = plot_MC (S0, Sb, K, T, r, mu, sigma, 252, 50000)
# + [markdown] id="7WPN1bOoMrFC"
# Compute prices of Barrier options with volatility increased and decreased by 10% from the original inputs.
# + colab={"base_uri": "https://localhost:8080/"} id="CepxA1paNExI" outputId="4650228f-07c1-4988-c6ab-54eafc1ee464"
# volatility decreased by 10% from the original inputs
callMC_Barrier_Knockin_Price_1_step , putMC_Barrier_Knockin_Price_1_step = MC_barrier_knockin_price(S0, Sb, K, T, r, mu, sigma*0.9, 1, numPaths)
print('one-step MC price of an Barrier call option is ' + str(callMC_Barrier_Knockin_Price_1_step))
print('one-step MC price of an Barrier put option is ' + str(putMC_Barrier_Knockin_Price_1_step))
# + colab={"base_uri": "https://localhost:8080/"} id="ggoXwRUIL0lc" outputId="e9518410-cef6-4926-86bc-8ea6c1694720"
callMC_Barrier_Knockin_Price_multi_step, putMC_Barrier_Knockin_Price_multi_step = MC_barrier_knockin_price(S0, Sb, K, T, r, mu, sigma*0.9, 252, numPaths)
print('Multi-step MC price of an Barrier call option is ' + str(callMC_Barrier_Knockin_Price_multi_step))
print('Multi-step MC price of an Barrier put option is ' + str(putMC_Barrier_Knockin_Price_multi_step))
# + colab={"base_uri": "https://localhost:8080/"} id="wY-k3NWWLrcp" outputId="9c6f26eb-875e-4678-ed4a-8b65eeccfbc1"
# volatility increased by 10% from the original inputs
callMC_Barrier_Knockin_Price_1_step , putMC_Barrier_Knockin_Price_1_step = MC_barrier_knockin_price(S0, Sb, K, T, r, mu, sigma*1.1, 1, numPaths)
print('One-step MC price of an Barrier call option is ' + str(callMC_Barrier_Knockin_Price_1_step))
print('One-step MC price of an Barrier put option is ' + str(putMC_Barrier_Knockin_Price_1_step))
# + colab={"base_uri": "https://localhost:8080/"} id="7Z2CzDkoMdDR" outputId="8904e69d-8901-48e6-a17e-745ec5a46071"
callMC_Barrier_Knockin_Price_multi_step, putMC_Barrier_Knockin_Price_multi_step = MC_barrier_knockin_price(S0, Sb, K, T, r, mu, sigma*1.1, 252, numPaths)
print('Multi-step MC price of an Barrier call option is ' + str(callMC_Barrier_Knockin_Price_multi_step))
print('Multi-step MC price of an Barrier put option is ' + str(putMC_Barrier_Knockin_Price_multi_step))
# + [markdown] id="6tqTSCp-NVKm"
# Design your own procedure for choosing a number of time steps and a number of scenarios in Monte Carlo pricing for European option to get the same price (up to the cent) as given by the Black-Scholes formula.
# + id="XYV35jCK8Dlu"
numSteps_list = [1, 2, 12, 24, 252]
c1_list = []
p1_list = []
for i in numSteps_list:
c, p = MC_european_price(S0, K, T, r, mu, sigma, i, 100000)
c1_list.append(c)
p1_list.append(p)
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="xRfbtdrM8Dlv" outputId="0e9871e0-f1a7-4919-81a9-370324a0eb15"
plt.plot(numSteps_list, c1_list, '-o', label='call')
plt.plot(numSteps_list, p1_list, '-o', label='put')
plt.plot(numSteps_list, [call_BS_European_Price]* len(numSteps_list), label='callBS')
plt.plot(numSteps_list, [putBS_European_Price]* len(numSteps_list), label='putBS')
plt.title('European Option Pricing (numPaths = 100000)')
plt.xlabel('numSteps')
plt.ylabel('Option Price')
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="FnWHfqLAbdoZ" outputId="69eb2826-3607-4fe7-e9dc-6c4ba751fb3e"
# Monte Carlo pricing for European call option
for i in np.arange(12,24):
c,_ = MC_european_price(S0, K, T, r, mu, sigma, i, 100000)
if c.round(2) == call_BS_European_Price.round(2):
print ('When numSterps =', i)
print ('MC pricing for European call = ', c.round(2))
print ('Black-Scholes price of an European call option = ', call_BS_European_Price.round(2))
break
# + colab={"base_uri": "https://localhost:8080/"} id="q_0XjQlmqHUO" outputId="f83d6bc4-6c6b-4297-e3ed-34bf134e65ab"
# Monte Carlo pricing for European put option
for i in np.arange(12,24):
_,p = MC_european_price(S0, K, T, r, mu, sigma, i, 100000)
if p.round(2) == putBS_European_Price.round(2):
print ('When numSterps =', i)
print ('MC pricing for European put = ', p.round(2))
print ('Black-Scholes price of an European put option = ', putBS_European_Price.round(2))
break
# + id="XAYdrStNyeV8"
| A4/Code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
variados = ['Rubens', ('Engenheiro', 10), {'Engenheiro': 10, 'Resíduos': 'Matéria Preferida'}, 24.2]
print(variados[0])
print(variados[1])
print(variados[2])
print(variados[3])
| Python/exercicios_DSA/exercicios/ex009.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to weaponize the randomness of k-means
#
# ## Introduction: The Problem
#
# A typical introductory text about the k-means algorithm will state that one of the main issues in using this algorithm is that the user has to set the number of clusters beforehand. **Never mind the number of clusters: How do you know if your data actually has any structure at all?** If the probability distributions underlying your dataset (sometimes called the "data generating distributions") do not have any cluster structure, then any solution found by the k-means algorithm will be completely random. Even if your data has structure, then generally multiple runs of k-means will not produce identical clusterings.
#
# In this notebook I will show a way to use this randomness of clusterings found by k-means to identify whether your data has cluster structure and how many clusters k-means can reasonably find.
#
# I'll start with an example of how a dataset with two unrelated and normally distributed features leads to random solutions.
# +
# %matplotlib inline
import numpy as np
from numpy.random import normal
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
n = 10000
def genNormalData(n):
x = []
for xi in range(n):
x.append(normal())
return x
fig = plt.gcf()
fig.suptitle("No structure: Random solutions", fontsize=18)
fig.set_figheight(4)
fig.set_figwidth(15)
# Just so this notebook is exactly reproducible:
np.random.seed(0)
for i in range(4):
x1 = genNormalData(n)
x2 = genNormalData(n)
X = np.array([x1, x2]).T
kmeans = KMeans(2, init='random')
clusters = kmeans.fit_predict(X)
plt.subplot(141 + i)
plt.scatter(x1, x2, c=clusters)
# -
# In real life, you're unlikely to be able to conveniently visualize your data like I've done here. So we're going to need a more algorithmic approach to figuring out if your solution is random and if your data has any structure to it. So let's generate a toy dataset that we can play with.
#
# ## A Toy Dataset With Some Structure
#
# To investigate some possible approaches that may work to identify random solutions and/or structure, let's take a look at this dataset, with four clusters with uniformly distributed datapoints.
#
# +
import numpy as np
from numpy.random import uniform
def gen(n, l=1, split=True):
x = []
for xi in range(n):
r = uniform() * 2 - 1
if r > 0:
r = r + 0.25
else:
r = r - 0.25
x.append(r)
return x
# Just so this notebook is exactly reproducible:
np.random.seed(0)
x = gen(n)
y = gen(n)
plt.scatter(x, y, s=1)
# -
# ## Running K-Means on the Toy
#
# From the visuailzations it's obvious that this dataset has 4 clusters, but of course in practical situations, this will not be so obvious. Let's see what happens when we try to cluster this dataset with 2, 3, 4 or 5 clusters.
# +
import matplotlib.pyplot as plt
X = np.array([x, y]).T
plt.figure(1, figsize=(12, 4))
for i in range(4):
k = 2 + i
kmeans = KMeans(k, init='random')
clusters = kmeans.fit_predict(X)
plt.subplot(141 + i)
plt.title(str(k)+ " clusters")
plt.scatter(x, y, c=clusters, s=1)
# -
# If we look at any of these clusterings, all except for the solution with 4 clusters are random. The solution with 2 clusters could randomly join both left and right clusters, or it could form a cluster of the top squares and another cluster containing the bottom squares.
#
# Looking at the solution with 3 clusters, we see that K-means has chosen to join the top two squares. Given the data generating probability distributions, it could have equally probably chosen the bottom two squares, or two on the left or the two on the right. However, *given our data sample, it will almost always choose to merge the bottom two.
#
# Here are the centroids from running K-means with k=3 100 times:
# +
np.random.seed(0)
def get_cluster_centers(X, k, loops):
kmeans = KMeans(k, n_jobs=-1, init='random')
# n_jobs=-1 tells it to use all available CPUs
allclusters = []
for i in range(loops):
kmeans.fit(X)
centers = kmeans.cluster_centers_
for c in centers:
allclusters.append(c)
x_ac = [p[0] for p in allclusters]
y_ac = [p[1] for p in allclusters]
return (x_ac, y_ac)
(x_ac, y_ac) = get_cluster_centers(X, 3, 100)
plt.scatter(x_ac, y_ac, marker='x', s=80)
# -
# ## So now we have two problems
#
# First, the solution is random, and second, rerunning k-means isn't displaying the random behaviour, suggesting actually a very stable solution.
#
# Knowing that the solution is random, we should find a KPI of some kind, that correctly reports that this is so.
# One solution is to apply a variant of k-fold cross validation. Using for instance 5 folds, the idea is normally to split the data into 5 chunks and run the clustering 5 times, each time on 80% of the data but a different subset of the sample. Unfortunately, we observe that using 80% of the data, the clustering remains equally stable. But this changes if we run on 20% instead: Running first on the first 20%, then on the second 20%, and so on.
#
# The higher the number of folds, the more reliably we can demonstrate randomness, but if we choose too many folds, for example 1000, in this case we would only have 10 points left per clustering, and using 3 clusters we would expect the results to show random results even if the underlying distributions contain three clusters.
#
# ## Using Folds to Bring out the Randomness
#
# We split the data into 10 folds and cluster each fold individually. Behold randomness:
# +
from sklearn.model_selection import KFold
np.random.seed(0)
n_clusters = 3
n_splits = 10
def get_clusters_using_folds(X, n_clusters, n_splits):
kfold = KFold(n_splits=n_splits)
x_ac = []
y_ac = []
for train_index, test_index in kfold.split(X):
(x_fold, y_fold) = get_cluster_centers(X[test_index], k=n_clusters, loops=1)
for x in x_fold:
x_ac.append(x)
for y in y_fold:
y_ac.append(y)
return (x_ac, y_ac)
plt.figure(1, figsize=(12, 4))
for i in range(4):
k = 2 + i
plt.subplot(141 + i)
plt.title("Cluster centroids with k=" + str(k))
(x_wfolds, y_wfolds) = get_clusters_using_folds(X, k, n_splits)
plt.scatter(x_wfolds, y_wfolds, marker='x', s=80)
# -
# In the plot where we have clustered with k=3 we finally see the behaviour that we expected from the underlying distributions: There are cluster centers in each of the corners, as well between each of the clusters at approximately (0, -0.8), (0, 0.8), (0.8, 0) and (0, -0.8).
#
# When clustering with the correct number of clusters (4), we see no randomness as expected.
#
# ## Quantifying Randomness
#
# It's clear visually that the plot of cluster centroids using k=3 (3 clusters) does not correspond to a stable solution, and k=4, does. How to put a number on that?
#
# I suggest to run k-means again on the cluster centroids. With k=4 and 10 folds, we will have calculated 40 different cluster centroids. If we cluster these 40 centroids again with k=4, we will find 4 very compact clusters, indicating that k=4 is right for this dataset. If we do the same on the 30 clusters that were generated with k=3, we find 3 clusters that are not compact and not well separated. After clustering the cluster centroids, we can look at two measures:
#
# * "Cluster stability": The score of the k-means algorithm when run on the centroids (the objective of k-means, related to the cluster compactness).
# * "Cluster confidence": The number of cluster centroids in each cluster. If we generated the cluster centroids using 10 folds, then after clustering the centroids, each cluster of centroids should contain 10 points, indicating that each cluster is found every time. To measure this we can look at the variance on the number of times each cluster is found.
#
# One can argue that both these KPIs are defined the wrong way around: Both for the stability and the confidence, low values are better, which is contrary to what the name suggests. Sorry about that.
#
# We can now plot these measures, cluster stability and cluster confidence, as a function of k (the number of clusters). A traditional method is the "elbow method" which looks at the k-means score after clustering the data points directly, for different values of k.
# +
from collections import Counter
k_arr = [k+1 for k in range(10)]
def get_stability_and_confidence_k(X, k, n_splits):
# Generate cluster centroids using folds:
(xkx, xky) = get_clusters_using_folds(X, k, n_splits)
Xk = np.array([xkx, xky]).T
# Run k-means again on cluster centroids:
kmeans = KMeans(k, n_jobs=-1, init='random')
clusters = kmeans.fit_predict(Xk)
# Calculate KPIs
stability = -kmeans.score(Xk)
hist = np.histogram(clusters, bins=k)
confidence = np.var(list(Counter(clusters).values()))
return (stability, confidence)
def get_stability_and_confidence(X, k_arr, n_splits):
stabs = []
confs = []
for k in k_arr:
(s, c) = get_stability_and_confidence_k(X, k, n_splits)
print("k=%2d: Stab=%7.2f Conf=%5.2f" % (k, s, c))
stabs.append(s)
confs.append(c)
return (stabs, confs)
(stabs, confs) = get_stability_and_confidence(X, k_arr, n_splits)
# +
def get_elbow_scores(X, k_arr):
elbow_scores = []
for k in k_arr:
kmeans = KMeans(k, init='random')
kmeans.fit(X)
elbow_scores.append(-kmeans.score(X))
return elbow_scores
elbow_scores = get_elbow_scores(X, k_arr)
# +
def plot_result(k_arr, x, y, colors, stabs, confs, elbow_scores):
plt.figure(1, figsize=(12, 12))
plt.subplot(221)
plt.scatter(x,y, c=colors)
plt.title('The data')
plt.subplot(222)
plt.plot(k_arr,elbow_scores, marker='o')
plt.title('Elbow method')
plt.xlabel('k')
plt.ylabel('score')
plt.xticks(k_arr)
plt.grid(b=True, which='major', linestyle='--')
plt.subplot(223)
plt.plot(k_arr, stabs, marker='o')
plt.title('Cluster stability (lower is better)')
plt.xlabel('k')
plt.ylabel('score')
plt.xticks(k_arr)
plt.grid(b=True, which='major', linestyle='--')
plt.subplot(224)
plt.plot(k_arr, confs, marker='o')
plt.title('Cluster confidence (lower is better)')
plt.xlabel('k')
plt.ylabel('confidence')
plt.xticks(k_arr)
plt.grid(b=True, which='major', linestyle='--')
plot_result(k_arr, x, y, None, stabs, confs, elbow_scores)
# -
# These results show that for this data, we get both good stability and good confidence (low values for both) for the trivial solution (1 cluster) and for the solution with 4 clusters. The elbow method also idicates the correct number of clusters.
#
#
# ## Does it work on real data?
#
# Let's try this on a few datasets that are a bit more realistic.
#
# ### The Iris Dataset
#
# The Iris dataset is a well-known dataset with plant measurements, 4 measurements per plant from 3 different types of plants. There are 50 measurements of each type.
#
# Hopefully our method will indicate 3 clusters in this data. To visualize the data I apply PCA down to 2 dimensions. It only has a 150 examples, so I'll be using 5 folds.
# +
from sklearn.datasets import load_iris
from sklearn.preprocessing import scale
from sklearn.decomposition import PCA
np.random.seed(0)
(data, target) = load_iris(return_X_y=True)
X = scale(data)
X_pca = PCA(n_components=2).fit_transform(X)
x = [x[0] for x in X_pca]
y = [x[1] for x in X_pca]
(stabs, confs) = get_stability_and_confidence(X, k_arr, n_splits=5)
elbow_scores = get_elbow_scores(X, k_arr)
plot_result(k_arr, x, y, target, stabs, confs, elbow_scores)
# -
# I have trouble applying my method to this dataset because of the limited statistics. My impression from the 2-dimensional visualization of the data is that 2 clusters might be good, but the confidence plot shows that the identified clusters are not always the same ones. I won't argue that the case for 3 clusters is obvious from these plots, but 3 clusters does appear to be a reasonable solution.
#
# The elbow method does not show a clear kink.
#
# ### The MNIST Digits Dataset
#
# The MNIST dataset is a collection of 8x8 images of hand-written digits, where each digit is a gray-scale from 0 to 16. There are 1797 images in the dataset and 64 features, which is a lot for a clustering algorithm due to the curse of dimensionality. I will therefore again apply PCA to reduce the dimension of the data before clustering.
#
# I will apply the UMAP algorithm to create a 2-dimensional visualization. UMAP is similar in application to t-SNE, but in my experience it's faster and provides better separation. UMAP is a way to project high-dimensional features into a lower-dimensional space by creating a curved surface in the high-dimensional space.
# +
from sklearn.datasets import load_digits
from umap import UMAP
np.random.seed(0)
(data, target) = load_digits(return_X_y=True)
umapper = UMAP(n_neighbors=5, min_dist=0.3, metric='correlation')
vizdata = umapper.fit_transform(data)
x = [x[0] for x in vizdata]
y = [x[1] for x in vizdata]
X = PCA(n_components=5).fit_transform(scale(data))
k_arr = [k+1 for k in range(20)]
(stabs, confs) = get_stability_and_confidence(X, k_arr, n_splits=10)
elbow_scores = get_elbow_scores(X, k_arr)
plot_result(k_arr, x, y, target, stabs, confs, elbow_scores)
# -
# It may be surprising that it works at all to interpret each image as a 64 dimensional vector and cluster them, since the exact same digit translated across the image by one pixel produces an entirely different vector. But it works because the pixels are relatively large.
#
# Since there are 10 digits in the data, one would hope to find 10 clusters.
#
# In this example I basically blindly applied the algorithm and there is no particular reason to use PCA to go down to 5 dimensions. Clustering in 5 dimensions favours approximately clusters, with 9 clusters having the better "confidence" (always find the same ones) and 11 clusters the better "stability" (more stable centroids). Using other values than 5 works for PCA works more or less the same way.
#
# The elbow method does not appear very useful for this case.
#
# ## Conclusions
#
# The fact that a clustering algorithm identifies clusters, does not mean these clusters have any significance. Using the idea of k-fold cross-validation, randomness can be generated in the clustering solutions, if the clusterings are artificial. Using the stability and confidence as defined proves useful to identify meaningful clusterings, and it scales well to large datasets.
#
# The traditional elbow method often proves useless on practical datasets. Other approaches use clustering evaluation metrics to understand the quality of a clustering, such as the Silhouette score. These metrics often do not evaluate the question if other clusterings are equally valid and often do not scale well with to large numbers of samples.
#
# In a [later notebook called "Clustering Quality Metrics"](https://github.com/pbalm/mlbits/blob/master/Clustering%20Quality%20Metrics.ipynb) I compare how well the Silhouette index and the Adjusted Rand Index hold up against this task.
| Weaponizing the randomness of k-means.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# 
# # How to use EstimatorStep in AML Pipeline
#
# This notebook shows how to use the EstimatorStep with Azure Machine Learning Pipelines. Estimator is a convenient object in Azure Machine Learning that wraps run configuration information to help simplify the tasks of specifying how a script is executed.
#
#
# ## Prerequisite:
# * Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning
# * If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to:
# * install the AML SDK
# * create a workspace and its configuration file (`config.json`)
# Let's get started. First let's import some Python libraries.
import azureml.core
# check core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
# ## Initialize workspace
# Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`.
from azureml.core import Workspace
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep = '\n')
# ## Get default AmlCompute
# You can create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you use default `AmlCompute` as your training compute resource.
# +
cpu_cluster = ws.get_default_compute_target("CPU")
# use get_status() to get a detailed status for the current cluster.
print(cpu_cluster.get_status().serialize())
# -
# Now that you have created the compute target, let's see what the workspace's `compute_targets` property returns. You should now see one entry named 'cpucluster' of type `AmlCompute`.
# ## Use a simple script
# We have already created a simple "hello world" script. This is the script that we will submit through the estimator pattern. It prints a hello-world message, and if Azure ML SDK is installed, it will also logs an array of values ([Fibonacci numbers](https://en.wikipedia.org/wiki/Fibonacci_number)).
# ## Build an Estimator object
# Estimator by default will attempt to use Docker-based execution. You can also enable Docker and let estimator pick the default CPU image supplied by Azure ML for execution. You can target an AmlCompute cluster (or any other supported compute target types). You can also customize the conda environment by adding conda and/or pip packages.
#
# > Note: The arguments to the entry script used in the Estimator object should be specified as *list* using
# 'estimator_entry_script_arguments' parameter when instantiating EstimatorStep. Estimator object's parameter
# 'script_params' accepts a dictionary. However 'estimator_entry_script_arguments' parameter expects arguments as
# a list.
#
# > Estimator object initialization involves specifying a list of DataReference objects in its 'inputs' parameter.
# In Pipelines, a step can take another step's output or DataReferences as input. So when creating an EstimatorStep,
# the parameters 'inputs' and 'outputs' need to be set explicitly and that will override 'inputs' parameter
# specified in the Estimator object.
# +
from azureml.core import Datastore
from azureml.data.data_reference import DataReference
from azureml.pipeline.core import PipelineData
def_blob_store = Datastore(ws, "workspaceblobstore")
input_data = DataReference(
datastore=def_blob_store,
data_reference_name="input_data",
path_on_datastore="20newsgroups/20news.pkl")
output = PipelineData("output", datastore=def_blob_store)
# +
from azureml.train.estimator import Estimator
est = Estimator(source_directory='.',
compute_target=cpu_cluster,
entry_script='dummy_train.py',
conda_packages=['scikit-learn'])
# -
# ## Create an EstimatorStep
# [EstimatorStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.estimator_step.estimatorstep?view=azure-ml-py) adds a step to run Estimator in a Pipeline.
#
# - **name:** Name of the step
# - **estimator:** Estimator object
# - **estimator_entry_script_arguments:**
# - **runconfig_pipeline_params:** Override runconfig properties at runtime using key-value pairs each with name of the runconfig property and PipelineParameter for that property
# - **inputs:** Inputs
# - **outputs:** Output is list of PipelineData
# - **compute_target:** Compute target to use
# - **allow_reuse:** Whether the step should reuse previous results when run with the same settings/inputs. If this is false, a new run will always be generated for this step during pipeline execution.
# - **version:** Optional version tag to denote a change in functionality for the step
# +
from azureml.pipeline.steps import EstimatorStep
est_step = EstimatorStep(name="Estimator_Train",
estimator=est,
estimator_entry_script_arguments=["--datadir", input_data, "--output", output],
runconfig_pipeline_params=None,
inputs=[input_data],
outputs=[output],
compute_target=cpu_cluster)
# -
# ## Build and Submit the Experiment
from azureml.pipeline.core import Pipeline
from azureml.core import Experiment
pipeline = Pipeline(workspace=ws, steps=[est_step])
pipeline_run = Experiment(ws, 'Estimator_sample').submit(pipeline)
# ## View Run Details
from azureml.widgets import RunDetails
RunDetails(pipeline_run).show()
| how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-estimatorstep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demo 3 : Data Modeling, Creating a Table with Apache Cassandra
# <img src="images/cassandra-logo.png" width="250" height="250">
# ## Walk through the basics of Apache Cassandra:
# <br><li>Creating a table <li>Inserting rows of data<li>Running a simple SQL query to validate the information.
# ### Use a python `wrapper/python` driver called cassandra to run the Apache Cassandra queries. This library should be preinstalled but in the future to install this library you can run this command in a notebook to install locally:
# `! pip install cassandra-driver`<br>
#
# More documentation can be found here: https://datastax.github.io/python-driver/
# `conda install -c anaconda cassandra-driver`
#
# Anaconda driver check this https://anaconda.org/anaconda/cassandra-driver
# ### Import Apache Cassandra python package
import cassandra
# ### Create a connection to the database
# 1. Connect to the local instance of Apache Cassandra *['127.0.0.1']*.
# 2. The connection reaches out to the database (*dataengineering*) and uses the correct privileges to connect to the database (*user and password*).
# 3. Once we get back the cluster object, we need to connect and that will create our session that we will use to execute queries.<BR><BR>
#
# *Note 1:* This block of code will be standard in all notebooks
from cassandra.cluster import Cluster
try:
cluster = Cluster(['127.0.0.1']) #If you have a locally installed Apache Cassandra instance
session = cluster.connect()
except Exception as e:
print(e)
# ### Test the Connection and Error Handling Code
# *Note:* The try-except block should handle the error: We are trying to do a `select *` on a table but the table has not been created yet.
try:
session.execute("""select * from en_sahih""")
except Exception as e:
print(e)
# ### Create a keyspace to the work in
# *Note:* We will ignore the Replication Strategy and factor information right now. Remember, this will be the strategy and replication factor on a one node local instance.
# +
try:
session.execute("""
CREATE KEYSPACE IF NOT EXISTS quran
WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"""
)
except Exception as e:
print(e)
# -
# ### Connect to our Keyspace.<br>
# *Compare this to how a new session in PostgreSQL is created.*
try:
session.set_keyspace('quran')
except Exception as e:
print(e)
# ### We will create AL-QURAN, each surah has a lot of information we could add to the AL-QURAN database. We will design english translation by create quran_index, sura, aya, text.
# ### But ...Stop
#
# ### We are working with Apache Cassandra a NoSQL database. We can't model our data and create our table without more information.
#
# ### Think about what queries will you be performing on this data?
# <img src="images/quran-nosql-schema.png" width="500" height="500">
# <img src="images/cassandra-architecture.png">
# #### We want to be able to get every Ayah translations that was released in a particular Surah.
# `select * from en_sahih WHERE surah=1`
#
# *To do that:* <ol>
# <li> We need to be able to do a WHERE on surah.
# <li>Surah will become my partition key,
# <li>Ayah will be my clustering column to make each Primary Key unique.
# <li>**Remember there are no duplicates in Apache Cassandra.**</ol>
#
# `Table Name: English Saheeh International
# column 1: Quran Index
# column 2: Surah
# column 3: Ayah
# column 4: Text Translation`
#
# PRIMARY KEY(surah, ayah)
#
#
# ### Now to translate this information into a Create Table Statement.
# More information on Data Types can be found here: https://datastax.github.io/python-driver/<br>
# *Note:* Again, we will go in depth with these concepts in Lesson 3.
query = "drop table en_sahih"
try:
rows = session.execute(query)
except Exception as e:
print(e)
query = "CREATE TABLE IF NOT EXISTS en_sahih "
query = query + "(quran_index int, surah int, ayah int, text_translation_en text, PRIMARY KEY (surah, ayah))"
try:
session.execute(query)
except Exception as e:
print(e)
# The query should run smoothly.
# ### Insert rows of data
# To get the Quran dataset here:
# https://github.com/langsari/quran-dataset
# Insert a row of data
# +
query = "INSERT INTO en_sahih (quran_index, surah, ayah, text_translation_en)"
query = query + " VALUES (%s, %s, %s, %s)"
try:
session.execute(query, (1, 1, 1, 'In the name of Allah, the Entirely Merciful, the Especially Merciful.'))
except Exception as e:
print(e)
# -
# Insert multiple rows of data
# +
query = "INSERT INTO en_sahih (quran_index, surah, ayah, text_translation_en)"
query = query + " VALUES (%s, %s, %s, %s)"
try:
session.execute(query, (2, 1, 2, '[All] praise is [due] to Allah, Lord of the worlds -'))
except Exception as e:
print(e)
try:
session.execute(query, (3, 1, 3, 'The Entirely Merciful, the Especially Merciful,'))
except Exception as e:
print(e)
try:
session.execute(query, (4, 1, 4, 'Sovereign of the Day of Recompense.'))
except Exception as e:
print(e)
try:
session.execute(query, (5, 1, 5, 'It is You we worship and You we ask for help.'))
except Exception as e:
print(e)
try:
session.execute(query, (6, 1, 6, 'Guide us to the straight path -'))
except Exception as e:
print(e)
try:
session.execute(query, (7, 1, 7, 'The path of those upon whom You have bestowed favor, not of those who have evoked [Your] anger or of those who are astray.'))
except Exception as e:
print(e)
# -
# Insert multiple rows of data with different Surah
# +
query = "INSERT INTO en_sahih (quran_index, surah, ayah, text_translation_en)"
query = query + " VALUES (%s, %s, %s, %s)"
try:
session.execute(query, (8, 2, 1, '<NAME>.'))
except Exception as e:
print(e)
try:
session.execute(query, (9, 2, 2, 'This is the Book about which there is no doubt, a guidance for those conscious of Allah -'))
except Exception as e:
print(e)
# -
# ### Validate your data was inserted into the table.
# *Note:* The for loop is used for printing the results. If executing queries in the cqlsh, this would not be required.
#
# *Note:* Depending on the version of Apache Cassandra you have installed, this might throw an "ALLOW FILTERING" error instead of printing the 2 rows that we just inserted. This is to be expected, as this type of query should not be performed on large datasets, we are only doing this for the sake of the demo.
# +
query = 'SELECT * FROM en_sahih'
try:
rows = session.execute(query)
except Exception as e:
print(e)
# -
for row in rows :
print (row.quran_index, row.surah, row.ayah, row.text_translation_en)
# ### Validate the Data Model with the original query.
#
# `select * from en_sahih WHERE surah=1`
# +
query = "select * from en_sahih WHERE surah=1"
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.quran_index, row.surah, row.ayah, row.text_translation_en)
# -
# ### Drop the table to avoid duplicates and clean up.
query = "drop table en_sahih"
try:
rows = session.execute(query)
except Exception as e:
print(e)
# ### Close the session and cluster connection
session.shutdown()
cluster.shutdown()
| 5-data-modeling/5-3-data-modeling_apache-cassandra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Ch15 Figure1
# +
# They gave out customer coupons and had a one-day sales event. At the end of the month the data analyst ran a report that showed a 10% increase in sales.
coupon_code = ['xmas70', 'happynewyear60', 'coupon80', 'freeshipping', 'family75', 'None']
discount = {'xmas70': .7, 'happynewyear60': .6, 'coupon80': .8, 'freeshipping': 1, 'family75': .75, 'None': 1}
data = []
for i in range(1000):
sku = rd.randint(0,15)
quantity = rd.randint(1,3)
coupon = coupon_code[rd.randint(0, len(coupon_code)-1)]
disc = discount[coupon]
if coupon != '':
if rd.random() <= .7:
quantity = quantity + 2
data.append([i, coupon, disc, sku, quantity])
df = pd.DataFrame(data, columns=['id', 'coupon_code', 'discount', 'sku', 'quantity'])
# df.to_csv('csv_output/ch15_fig1.csv', index=False)
df = pd.read_csv('csv_output/ch15_fig1.csv')
df.head()
# +
df = pd.read_csv('csv_output/ch15_fig1.csv')
# %matplotlib inline
sns.set_style("white")
f, ax = plt.subplots(1, figsize=(8,6))
pvt = df.pivot_table(index='sku', columns='coupon_code', values='quantity', aggfunc=np.mean)
df2 = pd.DataFrame((pvt.values - pvt.values[:,0:1])[:,1:], columns=pvt.columns[1:])
ax.bar(left=np.arange(5), height=df2.mean())
ax.set_xticks(np.arange(5)+.5);
ax.set_xticklabels(df2.columns);
ax.set_title('average sales quantity comparing with no coupon code')
f.savefig('svg_output/ch15_fig1.svg', format='svg')
# -
# Pivoting average sales quantity by item sku and by coupon code (including no coupon code), taking average sales quantity of each coupon code and minus the average sales quantity of no coupon code, we get how many more units sold on average when using each of the coupon code versus not using coupon code. With the coupon code with most discount (60%), it shows on averagely 0.1 more unit sales than not using any.
| ch15_fig1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Talking About, Talking About I:
# Machine Learning
# ====
#
# ##### Version 0.1
#
# ***
# By <NAME> & <NAME> 2018 Nov 10
# Today, we are going to examine the way in which different outlets discuss machine learning (the topic of focus for Session 7). Here we will examine two different news articles written on the same topic. As you read through the articles, consider these questions and discuss them within your groups:
#
# 1. Does the content of the article make sense? For example, does the article define jargon or other technical ideas well? Are any extrapolations it makes about the meaning of the research reasonable/justified, or are they wild/fanciful?
# 2. Is the article an effective piece of communication?
# * Is the purpose of the article clear?
# * Does it provide a reasonable description of the work?
# * Is it written at a level that matches the intended audience?
# * Are the adopted analogies effective?
# 3. What (simple) changes could be made to improve the article?
# ## Article 1
#
# ### ASTRONOMERS HARNESS MACHINE LEARNING TO BETTER UNDERSTAND THE UNIVERSE
#
# As the scope of usage of machine learning gradually grows in enterprises and scientific fields, another field where it has penetrated is astronomy, helping astronomers gain understanding of the properties of a large numbers of stars.
#
# “It’s like video-streaming services not only predicting what you would like to watch in the future, but also your current age, based on your viewing preferences,” quips Adam Miller of NASA’s Jet Propulsion Laboratory in Pasadena, California.
#
# Mr. Miller is the lead author of a new report on the findings which he presented at the annual American Astronomical Society meeting in Seattle earlier last week and also appearing in the Astrophysical Journal. “We are predicting fundamental properties of the stars,” he said.
#
# Utilizing this branch of Artificial Intelligence, scientists are sorting through thousands of stars in our galaxy and learn their sizes, compositions and other basic traits based on sky survey images.
#
# A news release explains that using the new technique, computer algorithms glean through available stacks of images, identifying patterns that reveal a star’s properties, garnering data on billions of stars in a comparatively less amount of time and expense. Normally, these kinds of details require a spectrum, which is a detailed sifting of the starlight into different wavelengths.
#
# The Machines went through a “training period” first where Miller and his colleagues started with 9,000 stars as their training set. A spectra for these stars revealed several of their basic properties like sizes, temperatures and the amount of heavy elements, such as iron while the varying brightness of the stars had recorded by the Sloan Digital Sky Survey, producing plots called light curves were fed into the machine to help it make associations between the two sets.
#
# After the training period the computer was able to make predictions on its own about other stars by only analyzing light-curves, and gather further data which humans alone cannot process. Herein, computers with their advanced algorithms helped.
#
#
#
# *source*: [https://dataconomy.com/2015/01/astronomers-harness-machine-learning-to-better-understand-the-universe/](https://dataconomy.com/2015/01/astronomers-harness-machine-learning-to-better-understand-the-universe/)
# *Write notes on the article here*
# ## Article 2
#
# ### Nasa Looks to Machine Learning to Faster Identify Stars
#
# Nasa astronomers are now turning to a method called "machine learning" to help them understand the properties of large numbers of stars.
# The research is part of the growing field of machine learning, in which computers learn from large data sets, finding patterns that humans might not otherwise see.
#
# "With 'machine learning', computer algorithms can quickly flip through available stacks of images, identifying patterns that reveal a star's properties," the US space agency said in a statement.
#
# The technique, that essentially works in the same way as e-mail spam filters, has the potential to gather information on billions of stars in a relatively short time and with less expense.
#
# "It is like video-streaming services not only predicting what you would like to watch in the future, but also your current age, based on your viewing preferences," said lead author <NAME> of Nasa's Jet Propulsion Laboratory in Pasadena, California.
#
# But before the machines can learn, they first need a "training period."
#
# Miller and his colleagues started with 9,000 stars as their training set.
#
# They obtained spectra for these stars which revealed several of their basic properties: sizes, temperatures and the amount of heavy elements, such as iron.
#
# The varying brightness of the stars had also been recorded by the Sloan Digital Sky Survey, producing plots called light curves.
#
# By feeding the computer both sets of data, it could then make associations between the star properties and the light curves.
#
# Once the training phase was over, the computer was able to make predictions on its own about other stars by only analysing light-curves.
#
# "We can discover and classify new types of stars without the need for spectra, which are expensive and time-consuming to obtain," Miller added.
#
# The team's next goal is to get their computers smart enough to handle the more than 50 million variable stars.
#
# The report was published in the Astrophysical Journal.
#
#
# *source*: [https://gadgets.ndtv.com/science/news/nasa-looks-to-machine-learning-to-faster-identify-stars-647554](https://gadgets.ndtv.com/science/news/nasa-looks-to-machine-learning-to-faster-identify-stars-647554)
# *Write notes on the article here*
| Session7/Day5/TalkingAboutTalkingAbout.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def water_level_correction(width_footing, depth_footing, water_depth=0):
"""
Provide values that are common to all methods
"""
dw1 = water_depth
dw2 = water_depth - depth_footing
if dw2<0:
dw2=0
#save some values
top_dist_ratio = dw1/depth_footing
bottom_dist_ratio = dw2/width_footing
rw1 = 0.5 * (1 + top_dist_ratio)
rw2 = 0.5 * (1 + bottom_dist_ratio)
if rw1 > 1: #water level above base level of foundation
rw1=1
if rw2 > 1:#water level below base level of foundation
rw2=1
print(rw1,rw2)
water_level_correction(2, 2, 3)
| notebooks/water_correction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Getting Started
# ## Python Installation
# Download lastest version of Python3 from https://www.python.org/downloads/ and install Python3 locally.
#
# For Windows users, after installation, you may need to edit environmental variables to let the command shell to recognize `python` or `python3` command globally. See https://anthonydebarros.com/2015/08/16/setting-up-python-in-windows-10/
# ## Jupyter Notebook Installation
# Follow http://jupyter.org/install in section "Installing Jupyter with pip" to install Jupyer.
#
# Basically you need to run the following
# ```
# python3 -m pip install --upgrade pip
# python3 -m pip install jupyter
# ```
# You may find more information in the above link.
# ## Frequently Used Packages Installation
# Here is a list of frequently used packages for scientific computation
#
# * `numpy`, `scipy`: basic numerical computation, array manipulation and linear algebra
# * `pandas`: dataframe manipulation
# * `matplotlib`: the most frequently used plot and visulazation package
# * `seaborn`: statistical data visualization
# * `sympy`: symbolic computation, a python alternative of mathematica
# * `urllib`, `requests`, `beautifulsoup4`: access online material, parse HTML file, useful when you need to implement your own web crawler to collect raw data.
# * `sqlite3`, `json`: easy handler for SQL database and JSON files
# ## TensorFlow Installation with pip (CPU only)
# For full installation guide, please check https://www.tensorflow.org/install/
#
# But basically if all above are done properly, you only need
# ```
# pip3 install tensorflow
# ```
#
# To validate your TensorFlow is installed successfully, first enter python interactive environment by typing `python3` in command line and then run the following code
# ```python
# >>> import tensorflow as tf
# >>> hello = tf.constant('Hello, TensorFlow!')
# >>> sess = tf.Session()
# >>> print(sess.run(hello))
# ```
# If the system outputs the following, then you are ready to begin writing TensorFlow programs:
# ```
# Hello, TensorFlow!
# ```
| notebooks/00_Getting_Started.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#### import libraries ###########################################
# %load_ext jupyternotify
import pandas as pd
import os
from nltk.corpus import stopwords
import string
import re
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.stem.snowball import SnowballStemmer
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans, DBSCAN
from sklearn.metrics import adjusted_rand_score
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
import plotly.plotly as py
import operator
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud
import time
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.cluster import AgglomerativeClustering
from gensim.models import KeyedVectors
from gensim.test.utils import datapath
from sklearn.cluster import MiniBatchKMeans
import pickle
from gensim.models import Word2Vec
from xgboost import XGBClassifier
from sklearn.preprocessing import scale
import pandas as pd
# +
import pandas as pd
import codecs
from tqdm import tqdm
def load_dataset(path):
dataset = pd.DataFrame(columns=['X', 'y1', 'y2'])
#print('Loading dataset...')
with codecs.open(path, "r", encoding='utf-8', errors='ignore') as fdata:
for line in tqdm(fdata.readlines()):
line_split = line.split()
formated = ' '.join(line_split[:-2])
dataset.loc[-1] = [formated, line_split[-2], line_split[-1]] # adding a row
dataset.index = dataset.index + 1 # shifting index
dataset = dataset.sort_index() # sorting by index
return dataset
# +
from nltk.tokenize import TweetTokenizer
from nltk.stem import SnowballStemmer
from nltk.corpus import stopwords
import re
stopwords_german = set(stopwords.words('german'))
usernamePattern = re.compile('@[A-Za-z0-9_]{1,15}')
urlPattern = re.compile('(https?:\/\/)[\/.:\w(1-9)]*\s?')
lbrPattern = re.compile('|LBR|')
stemmer = SnowballStemmer("german")
tkz = TweetTokenizer(preserve_case=False, reduce_len=True, strip_handles=True)
def tokenize(text):
text = re.sub(usernamePattern, "", text)
text = re.sub(urlPattern, "", text)
text = re.sub(lbrPattern, "", text)
output = []
tokens = tkz.tokenize(text)
for token in tokens:
if token not in stopwords_german:
if len(token) > 1:
if token[0] == '#':
token = token[1:]
output.append(token)
return output
# -
def tokenize_tfidf(text):
text = re.sub(usernamePattern, "", text)
text = re.sub(urlPattern, "", text)
text = re.sub(lbrPattern, "", text)
output = []
tokens = tkz.tokenize(text)
for token in tokens:
if token not in stopwords_german:
if len(token) > 1:
if token[0] == '#':
token = token[1:]
output.append(stemmer.stem(token))
return output
def encode_label(y):
np.ones(len(y))
choose = lambda l : 1 if l == 'OFFENSE' else 0
return [choose(l) for l in y]
dataset = load_dataset(path = '/home/text_mining_project/text_mining_project_2018/evaluation/germeval2018.training.txt')
dataset_own = load_dataset(path = '/home/text_mining_project/export.tsv')
# dataset_full = pd.concat([dataset, dataset_own])
dataset_full = dataset
# +
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
X = dataset_full['X'].values
y = dataset_full['y1'].values
y = encode_label(y)
X_train_dataset, X_test_dataset, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
# +
from sklearn.feature_extraction.text import TfidfVectorizer
max_features = None
stopwords_german = set(stopwords.words('german'))
# stopwords_german = set(stopwords.words('english'))
tfidf = TfidfVectorizer(tokenizer=tokenize_tfidf, stop_words=stopwords_german, max_features=max_features, ngram_range=(1,3))
print('Transforming documents...')
X_train_tfidf = tfidf.fit_transform(X_train_dataset)
X_test_tfidf = tfidf.transform(X_test_dataset)
print('Transformation finished!')
# -
model = Word2Vec.load("word2vec_nostem_stopword.model")
# +
from numpy import zeros
def make_bag_of_centroids(X, word_centroid_map, cluster_size):
centroids_list = []
for sentence in X:
centroids = zeros(cluster_size, dtype="float32")
for word in sentence:
if word in word_centroid_map:
centroids[word_centroid_map[word]] += 1
centroids_list.append(centroids)
return centroids_list
as_centroid = lambda s: make_bag_of_centroids(s, word2centroid, kmeans_args['n_clusters'])
# +
kmeans_args = {
'n_clusters': 1000,
}
clustering = MiniBatchKMeans(**kmeans_args).fit_predict(model.wv.vectors)
pickle.dump(clustering, open('./minibatchkmeans.bin', 'wb'))
# -
word2centroid = {k: v for k, v in zip(model.wv.index2word, clustering)}
def merge_vector_lists(x1, x2):
result = []
for i in range(len(x1)):
result.append(x1[i] + x2[i])
return result
# +
stopwords_german = set(stopwords.words('german'))
# stopwords_german = set(stopwords.words('english'))
def preprocess(X):
return [tokenize(sentence) for sentence in X]
X_preprocess_train = preprocess(X_train_dataset)
X_preprocess_test = preprocess(X_test_dataset)
X_train_centroid = scale(as_centroid(X_preprocess_train))
X_test_centroid = scale(as_centroid(X_preprocess_test))
from scipy.sparse import csr_matrix, hstack
X_train_centroid_sparse = csr_matrix(X_train_centroid)
X_test_centroid_sparse = csr_matrix(X_test_centroid)
X_train = hstack([X_train_centroid_sparse, X_train_tfidf])
X_test = hstack([X_test_centroid_sparse, X_test_tfidf])
# +
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
lsvc = LinearSVC(penalty="l1", dual=False).fit(X_train, y_train)
model = SelectFromModel(lsvc, prefit=True)
X_train = model.transform(X_train)
X_test = model.transform(X_test)
# +
# from sklearn.preprocessing import MinMaxScaler
# from sklearn.feature_selection import SelectKBest
# from sklearn.feature_selection import chi2
#
# scaler = MinMaxScaler(copy=True, feature_range=(0, 1))
# X_train = scaler.fit_transform(X_train.todense())
# X_test = scaler.transform(X_test.todense())
#
# ch2 = SelectKBest(chi2, k=2000)
# X_train = ch2.fit_transform(X_train, y_train)
# X_test = ch2.transform(X_test)
# +
import warnings
seed = 42
k = 7
jobs = -1
warnings.filterwarnings("ignore")
# +
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.model_selection import cross_val_score
names = ["Linear SVM", "SGDClassifier", "BernoulliNB", "LogisticRegression",
"KNeighborsClassifier", "AdaBoostClassifier", "Random Forest", "Decision Tree"]
classifiers = [
LinearSVC(random_state=seed),
SGDClassifier(max_iter=1000, tol=None),
BernoulliNB(),
LogisticRegression(random_state=seed, solver='sag', max_iter=1000),
KNeighborsClassifier(),
AdaBoostClassifier(),
RandomForestClassifier(random_state=seed),
DecisionTreeClassifier(random_state=seed)
]
print('Searching best estimator...')
print()
best_classifier = None
for name, clf in zip(names, classifiers):
scores = cross_val_score(clf, X_train, y_train, cv=k, n_jobs=jobs)
print('Mean accuracy %s: %0.3f (+/- %0.3f)' % (name, scores.mean(), scores.std() * 2))
if not best_classifier:
best_classifier = (name, scores.mean())
else:
if best_classifier[1] < scores.mean():
best_classifier = (name, scores.mean())
print()
print('Best estimator: %s (mean acc %0.3f, %d-fold cross-validation)' % (best_classifier[0], best_classifier[1], k))
# +
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
print('Searching best estimator (F1 score) ...')
print()
best_classifier = None
for name, clf in zip(names, classifiers):
scores = cross_val_score(clf, X_train, y_train, cv=k, n_jobs=jobs, scoring='f1')
print('Mean F1 score %s: %0.3f (+/- %0.3f)' % (name, scores.mean(), scores.std() * 2))
if not best_classifier:
best_classifier = (name, scores.mean())
else:
if best_classifier[1] < scores.mean():
best_classifier = (name, scores.mean())
print()
print('Best estimator: %s (mean F1 score %0.3f, %d-fold cross-validation)' % (best_classifier[0], best_classifier[1], k))
# -
clf = LinearSVC()
clf.fit(X_train, y_train)
predictions=clf.predict(X_test)
from sklearn.metrics import f1_score
f1=f1_score(predictions, y_test)
print("F1-Score predictions: ", f1)
clf = BernoulliNB(alpha=0.1, binarize=0, class_prior=None, fit_prior=False)
clf.fit(X_train, y_train)
predictions=clf.predict(X_test)
from sklearn.metrics import f1_score
f1=f1_score(predictions, y_test)
print("F1-Score predictions: ", f1)
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(predictions, y_test)
print("Accuracy-Score predictions: ", accuracy)
clf = AdaBoostClassifier()
clf.fit(X_train, y_train)
predictions=clf.predict(X_test)
from sklearn.metrics import f1_score
f1=f1_score(predictions, y_test)
print("F1-Score predictions: ", f1)
| w2v_k_means_tfidf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: teach
# language: python
# name: teach
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Colecciones avanzadas
# + [markdown] slideshow={"slide_type": "slide"}
# ```python
# import collections
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# ## ChainMap
# + [markdown] slideshow={"slide_type": "subslide"}
# `ChainMap` es una estructura de datos proporcionada por la biblioteca estándar de Python que permite tratar varios diccionarios como uno solo.
#
# *Diccionarios: constant-time lookups (búsquedas de tiempo constante)*
# + [markdown] slideshow={"slide_type": "subslide"}
# Ejercicio 🧠: construye un inventario hecho de diccionarios y crea un ChainMap de todos esos diccionarios.
# -
toys = {"Blocks": 30, "Monopoly": 20}
computers = {"iMac": 1000, "Chromebook": 800, "PC": 400}
clothing = {"Jeans": 40, "T-Shirt": 10}
# +
from collections import ChainMap
inventory = ChainMap(toys, computers, clothing)
# -
inventory
inventory["Monopoly"]
computers["Macbook"] = 3000
inventory
inventory["Macbook"]
# Permite crear una nueva instancia con una asignación (diccionario) adicional al principio de la lista de mapas para que sea más fácil evitar modificar las estructuras de datos subyacentes existentes.
# +
import collections
a = {"a": "A", "c": "C"}
b = {"b": "B", "c": "D"}
# -
repetido = ChainMap(a, b)
repetido["c"]
import argparse
import os
from collections import ChainMap
por_defecto = {"verbose": False}
# + jupyter={"outputs_hidden": true}
parser = argparse.ArgumentParser()
parser.add_argument("--verbose")
args = parser.parse_args()
# + jupyter={"outputs_hidden": true}
cli_args = {key: value for key, value in vars(args).items() if value}
# -
config = ChainMap(cli_args, os.environ, defaults)
# +
# dd["a"]["b"]
# +
m1 = collections.ChainMap(a, b)
m2 = m1.new_child()
print("m1 before:", m1)
print("m2 before:", m2)
# +
m2["c"] = "E"
print("m1 after:", m1)
print("m2 after:", m2)
# + [markdown] slideshow={"slide_type": "subslide"}
# Más información:
# * https://docs.python.org/3.7/library/collections.html#collections.ChainMap
# * https://www.blog.pythonlibrary.org/2016/03/29/python-201-what-is-a-chainmap/
# * https://pymotw.com/3/collections/chainmap.html
# + [markdown] slideshow={"slide_type": "slide"}
# ## OrderedDict
# + [markdown] slideshow={"slide_type": "subslide"}
# `OrderedDict` es un diccionario en el que las claves mantienen el orden en que se insertan, lo que significa que si cambia el valor de una clave más adelante, no cambiará la posición de la clave.
# -
d = {}
d["aaa"] = 1
d
cnt = collections.Counter(milista)
nod = dict(cnt.most_common())
from collections import OrderedDict
# + slideshow={"slide_type": "subslide"}
milista = ["a", "c", "c", "a", "b", "a", "a", "b", "c"]
od = collections.OrderedDict(cnt.most_common())
for key, value in od.items():
print(key, value)
# + slideshow={"slide_type": "subslide"}
od["a"] = 20
od
# +
nod["a"] = 40
for k, v in nod.items():
print(k, v)
# -
# Más información:
#
# * https://stackabuse.com/introduction-to-pythons-collections-module/#theordereddict
# ## defaultdict
for valor in milista:
if valor in midiccionario.values() and isinstance(midiccionario[valor]):
midiccionario[valor].append(123)
else:
midiccionario[valor]= []
midiccionario[valor].append(123)
from collections import defaultdict
bbdd = defaultdict(list)
bbdd["a"] = 1
bbdd["c"].append(45)
bbdd
# + [markdown] slideshow={"slide_type": "slide"}
# ## enum
# + slideshow={"slide_type": "subslide"}
from enum import Enum
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
# + slideshow={"slide_type": "subslide"}
print(Color.RED)
# + slideshow={"slide_type": "subslide"}
type(Color.RED)
# -
isinstance(Color.GREEN, Color)
print(Color.RED.name)
print(Color["RED"])
print(Color(1))
# No se puede repetir el nombre
def imprimir_color(color: Color) -> str:
return "asdasd"
# +
from enum import Enum, unique
class Mistake2(Enum):
ONE = 1
TWO = 2
THREE = 3
FOUR = 3
# -
Mistake2(3)
# + jupyter={"outputs_hidden": true}
class Shape(Enum):
SQUARE = 2
SQUARE = 3
# -
# Por defecto, los `enum` permiten múltiples nombres como alias para un mismo valor. Cuando no queremos este comportamiento, se puede usar el siguiente decorador para garantizar que cada valor se use solo una vez en la enumeración:
#
# `@enum.unique`
def print_color(color):
assert isinstance(color, Color), print("not a color")
print(color)
print_color(Color.RED)
# +
from enum import Enum, unique
@unique
class Mistake(Enum):
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
# -
Mistake(4)
# Cual será el resultado de esto?
# +
from enum import auto, Enum
class Ordinal(Enum):
NORTH = auto()
SOUTH = auto()
EAST = auto()
WEST = auto()
AAA = auto()
list(Ordinal)
# +
class AutoName(Enum):
def _generate_next_value_(name, start, count, last_values):
return name # definimos que la función auto() le de como valor a cada elemento su propio nombre
class Ordinal(AutoName):
NORTH = auto()
SOUTH = auto()
EAST = auto()
WEST = auto()
list(Ordinal)
# -
# Por ejemplo, si quieres que los usuarios indiquen que no es "Verde", "verde", o "Veerde", sino Color.green: usad el objeto `enum.Enum`. Es a la vez explícito y específico (cosas a las que siempre aspirar en Python).
# +
from enum import Enum
class EstadoBug(Enum):
nuevo = 7
incompleto = 6
invalido = 5
no_arreglar = 4
en_progreso = 3
fix_subido = 2
fix_publicado = 1
# -
estado_actual = EstadoBug.no_arreglar
estado_deseado = EstadoBug.fix_publicado
estado_actual == estado_deseado
if estado_actual != EstadoBug.fix_publicado:
enviar_email(direcion:"asdasd@asdasd")
# ### El Zen the Python
import this
# Más información:
# * https://docs.python.org/3.7/library/enum.html 👈🏼
# * https://stackoverflow.com/questions/37601644/python-whats-the-enum-type-good-for
# * https://pymotw.com/3/enum/
# ### Counter
# **collections.Counter** nos poermite, de forma automática, contar el número de veces que aparece cada elemento en, por ejemplo, una lista.
# *El ejemplo mostrado durante el curso se ha eliminado porque usaba un archivo local de mi ordenador.*
# +
from collections import Counter
words = ["hola", "hola", "adios", "curso", "python", "python", "python"]
contador = Counter(words)
# -
contador
contador.most_common()
# +
import collections
contador = collections.Counter(words)
contador.most_common(20) # mostrar las 20 más comunes y el número de veces que aparece cada una
# -
# lambda functions, comprehensions, map
# ### Ejercicios
# https://gist.githubusercontent.com/jsdario/6d6c69398cb0c73111e49f1218960f79/raw/8d4fc4548d437e2a7203a5aeeace5477f598827d/el_quijote.txt
# * Descargar el texto de la URL enviada por el chat usando la librería `requests`. **Si no podéis descargarlo**: se puede copiar todo el texto y pegarlo en un archivo `.txt`, el ejercicio entonces será abrir y leer ese archivo.
# * Convertir texto a MINÚSCULAS.
# * Crear una lista con todas las palabras que hay en el texto.
# * Usar `collections.Counter` con esta lista de palabras y ver cuales son las 10 palabras más comunes.
#
# **Extra**
#
# * Lo mismo que antes, pero sin tener en cuenta las palabras que están en esta lista: https://raw.githubusercontent.com/Alir3z4/stop-words/master/spanish.txt
# * ¿Cual es la palabra más larga del texto? (en longitud de caracteres)
# * Crear una lista con todas las palabras que empiezan por la letra -> **L** <- (minúscula)
# Cosas a tener en cuenta (o buscar como se hacen):
# * descar contenidos de una URL
# * convertir texto a minusculas
# * dividir una cadena de texto
#
# ...
# # Título: cheatsheet de markdown
#
# ## Título 2
#
# **negrita**
#
# *cursiva*
#
# `código`
#
# * lista
# * lista
# <NAME>
#
# * 🖥: https://ricardoanderegg.com/
# * 🐦: https://twitter.com/ricardoanderegg
# * 👨🏻🎓: https://www.linkedin.com/in/ricardoanderegg/
# * https://docs.python.org/3.7/library/collections.html#collections.Counter
| colecciones_avanzadas/00_colecciones_avanzadas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <b>Ejercicios con Python</b>
#
# <ol>
# <li>
# Escribir una función de Python que encuentre los números primos en los primeros N números naturales.
# <ul>
# <li>N es el argumento de la función.</li>
# <li>Llamar la función desde el código principal.</li>
# </ul>
# </li>
# <li>
# Escribir una función de Python que encuentre el producto cruz en vectores de 3 componentes.
# <ul>
# <li>La función recibe dos listas (vectores).</li>
# <li>La función regresa un tercer vector (lista) con el resultado.</li>
# <li>Llamar a la función desde el código principal.</li>
# </ul>
# </li>
# <li>
# Re-escribir el problema 2, pero ahora los vectores son leídos desde un archivo de texto.
# </li>
# </ol>
import math
def findPrimes(n):
primes=[]
for i in range(2, n +1):
isPrime=True
limit = int(math.sqrt(i))
for test in range(2,limit+1):
if i % test == 0:
isPrime=False
break
if isPrime:
primes.append(i)
return(primes)
def crossProductA(a, b):
product = [a[1]*b[2] - a[2]*b[1], a[2]*b[0] - a[0]*b[2], a[0]*b[1] - a[1]*b[0]]
return product
def crossProductB():
file = open("vectors.txt", "r")
'''
for line in file:
print(line)
'''
linea1 = file.readline().strip()
linea2 = file.readline().strip()
vector1 = linea1.split(",")
vector2 = linea2.split(",")
for i in range(3):
vector1[i] = float(vector1[i])
vector2[i] = float(vector2[i])
product = crossProductA(vector1, vector2)
return product
primes100 = findPrimes(100)
print(primes100)
vectorA = [1, 2, 3]
vectorB = [4, 5, 6]
producto = crossProductA(vectorA, vectorB)
print(producto)
print(crossProductB())
| ElArteDeLaAnalitica.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + slideshow={"slide_type": "slide"}
import pandas as pd
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
# + [markdown] slideshow={"slide_type": "fragment"}
# # Загрузим данные и отделим столбец калорий
# + slideshow={"slide_type": "fragment"}
data_train = pd.read_excel('./../description/train.xlsx')
data_test = pd.read_excel('./../description/test.xlsx')
# + slideshow={"slide_type": "fragment"}
Y = data_train['Energ_Kcal']
X = data_train.drop(['Energ_Kcal'], axis=1)
X.head()
# + [markdown] slideshow={"slide_type": "slide"}
# # Создадим общую таблицу для анализа данных
# + slideshow={"slide_type": "fragment"}
data = X.append(data_test, ignore_index=True)
data.head()
# + slideshow={"slide_type": "subslide"}
data.info()
# + [markdown] slideshow={"slide_type": "slide"}
# # Удалим последние 5 столбцов, они на калорийность не влияют
# + slideshow={"slide_type": "fragment"}
data = data.iloc[:, :-5]
# + slideshow={"slide_type": "subslide"}
data.describe().T
# + slideshow={"slide_type": "subslide"}
data.dtypes
# + [markdown] slideshow={"slide_type": "slide"}
# # Подсчет числа пустых ячеек в столбцах
# + slideshow={"slide_type": "fragment"}
data.isnull().sum(axis=0).sort_values(ascending=False).head(10)
# + [markdown] slideshow={"slide_type": "slide"}
# # Посмотрим распределение количества пропущенных значений
# + slideshow={"slide_type": "subslide"}
fig = plt.figure(figsize=(10,10))
fig = (X.isnull().sum(axis=0)/X.shape[0]).plot()
fig = (data_test.isnull().sum(axis=0)/data_test.shape[0]).plot()
fig
# + [markdown] slideshow={"slide_type": "slide"}
# # Посмотрим число уникальных значений
# + slideshow={"slide_type": "fragment"}
columns = data.columns.tolist()
unique_analyse = pd.DataFrame(columns=columns[1:], index=['count_unique_values', 'percentage'])
for index in range(1, len(columns)):
data_column_stat = data[columns[index]].value_counts()
unique_values = len(data_column_stat)
most_popular_values = data_column_stat.iloc[0] / data_column_stat.sum()
unique_analyse[columns[index]] = [unique_values, round(most_popular_values * 100,2)]
unique_analyse.T.sort_values(by='percentage', ascending=False).head(10)
# + slideshow={"slide_type": "slide"}
unique_analyse.T.sort_values(by='count_unique_values').head(10)
# + slideshow={"slide_type": "fragment"}
low_unique = unique_analyse.T.sort_values(by='percentage', ascending=False).head(10).index
# + slideshow={"slide_type": "fragment"}
data[low_unique].describe().T
# + slideshow={"slide_type": "subslide"}
data[low_unique].nunique()
# + [markdown] slideshow={"slide_type": "slide"}
# # Посмотрим схожие данные
# + slideshow={"slide_type": "fragment"}
x = data[['Vit_D_µg', 'Vit_D_IU']]
x.corr()
# + slideshow={"slide_type": "fragment"}
x = data[['Vit_A_RAE', 'Vit_A_IU', 'Retinol_(µg)']]
x.corr()
# + slideshow={"slide_type": "fragment"}
c = data.corr().abs()
k = 0
max_corr = pd.DataFrame(columns=[1,2,3])
for i in c.columns:
for j in c.columns:
if i != j:
max_corr.loc[k] = [i, j, c[i][j]]
k += 1
# + slideshow={"slide_type": "subslide"}
max_corr.sort_values(by=3).tail(15)
# + slideshow={"slide_type": "subslide"}
x = data[['Folate_Tot_(µg)', 'Folate_DFE_(µg)', 'Folic_Acid_(µg)']]
x.corr()
# + slideshow={"slide_type": "subslide"}
data = data.drop('Folate_DFE_(µg)', axis=1)
data = data.drop('Vit_D_IU', axis=1)
data = data.drop('Vit_A_RAE', axis=1)
data.head()
# + [markdown] slideshow={"slide_type": "slide"}
# # Посмотрим теперь корреляцию на тренировочных данных относительно целевой переменной
# + slideshow={"slide_type": "subslide"}
correlation = data_train.corr()['Energ_Kcal'].sort_values()
print('Наивысшая позитивная корреляция: \n', correlation.tail(15))
print('\nНаивысшая негативная корреляция: \n', correlation.head(15))
# + [markdown] slideshow={"slide_type": "slide"}
# ### Таким образом мы видим как различные столбцы влияют на целевую переменную. Проанализируем полиномиальные свойства основных корреляционных величин.
# + slideshow={"slide_type": "fragment"}
poly_target = Y
col = ['Lipid_Tot_(g)', 'FA_Mono_(g)', 'FA_Sat_(g)', 'FA_Poly_(g)', 'Carbohydrt_(g)', 'Water_(g)']
poly_features = data[col]
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy='median')
poly_features = imputer.fit_transform(poly_features)
from sklearn.preprocessing import PolynomialFeatures
poly_transformer = PolynomialFeatures(degree=3)
poly_transformer.fit(poly_features)
poly_features = poly_transformer.transform(poly_features)
#print(poly_transformer.get_feature_names())
poly_transformer.get_feature_names(input_features = col)[:15]
poly_features = pd.DataFrame(poly_features,
columns = poly_transformer.get_feature_names(col))
poly_features['Energ_Kcal'] = poly_target
corr = poly_features.corr()['Energ_Kcal'].sort_values()
print(corr.head(10))
print(corr.tail(10))
# + slideshow={"slide_type": "subslide"}
data_train.plot.scatter(x='Water_(g)', y='Energ_Kcal')
# + slideshow={"slide_type": "-"}
far_values = data_train.fillna(0).iloc[:, :-6]
far_values['check'] = far_values['Water_(g)'] + data_train['Energ_Kcal']
far_values.sort_values(by='check', ascending=False).tail(4)
# + slideshow={"slide_type": "-"}
indx = far_values.sort_values(by='check', ascending=False).tail(4).index
for i in indx:
print(far_values.loc[i]['check'])
for j in tr.columns:
if far_values.loc[i][j] != 0:
print(j)
# + slideshow={"slide_type": "subslide"}
data_train.plot.scatter(x='Lipid_Tot_(g)', y='Energ_Kcal')
# + slideshow={"slide_type": "subslide"}
fig = plt.figure(figsize=(9,9))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(data_train['Water_(g)'], data_train['Lipid_Tot_(g)'], data_train['Energ_Kcal'])
# + [markdown] slideshow={"slide_type": "slide"}
# # Переведем все в граммы
# + slideshow={"slide_type": "fragment"}
data['Vit_A(g)'] = data['Vit_A_IU'] * 0.3 / 100000
data = data.drop('Vit_A_IU', axis=1)
# + slideshow={"slide_type": "fragment"}
columns_gr = data.columns
for i in columns_gr:
if i.find('µg') != -1:
new_col = i.replace('µg', 'g')
data[new_col] = X[i] / 100000
data = data.drop(i, axis=1)
if i.find('mg') != -1:
new_col = i.replace('mg', 'g')
data[new_col] = data[i] / 1000
data = data.drop(i, axis=1)
data.columns
# + [markdown] slideshow={"slide_type": "fragment"}
# Проверим, что мы ничего не потеряли
# + slideshow={"slide_type": "fragment"}
Y.shape
# + slideshow={"slide_type": "fragment"}
data_test.shape
# + slideshow={"slide_type": "fragment"}
data.shape
# + slideshow={"slide_type": "fragment"}
out = data.loc[:Y.shape[0]-1]
out = out.join(Y)
out.to_csv('main_train.csv', index=False)
# + slideshow={"slide_type": "fragment"}
out = data.loc[Y.shape[0]:]
out.to_csv('main_test.csv', index=False)
# -
| 1/.ipynb_checkpoints/main-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
df= pd.read_csv('player2.csv')
df.head()
df.drop_duplicates(inplace=True)
df.isnull().sum()
df.duplicated().sum()
df.columns
df.shape
df.index
df.info()
df.columns
df.count()
df.loc[[0],['Name']]
df.describe().T
# # work on the name
df['Name'].head()
df['Name'].describe()
df['Name'].nunique()
df['Name'].unique()
df[df['Name']=='<NAME>']
df['Name'].value_counts().head()
df[df['Name']=='Paulinho']
df['Name'].value_counts().tail()
df[df['Name']=='<NAME>']
# # work on the Position
df['Position'].head()
df['Position'].describe()
df['Position'].nunique()
df['Position'].unique()
df[df['Position']=='Right Winger']
df['Position'].value_counts().head()
df[df['Position']=='Defensive Midfield']
df['Position'].value_counts().tail()
df[df['Position']=='Defender']
# # work on the Age
df['Age'].head()
df['Age'].describe()
df['Age'].nunique()
df['Age'].unique()
df[df['Age']==22]
df[df['Age']==df['Age'].max()]
df[df['Age']==df['Age'].min()]
plt.hist(df['Age'],bins=10)
plt.show()
plt.plot(df['Position'],df['Age'])
plt.show()
df[df['Age']>25]
# # work in the Team_from
df['Team_from'].head()
df['Team_from'].describe()
df['Team_from'].nunique()
df['Team_from'].unique()
df['Team_to'].value_counts().head(10)
df[df['Team_from']=='Real Madrid']
df[df['Name']=='Ronaldo']
df['Team_from'].value_counts().tail(10)
df[df['Team_from']=='Al-Ahli (UAE)']
df[df['Team_to']=='FC Barcelona']['Position'].value_counts()
df[df['Team_to']=='FC Barcelona']['Age'].value_counts()
df[df['Team_to']=='Real Madrid']['Age'].value_counts()
# # work in the League_from
df['League_from'].head()
df['League_from'].describe()
df['League_from'].value_counts().head()
df[df['Age']==df['Age'].max()]['League_from']
# # work on the Season
df['Season'].head()
df['Season'].describe()
# # work on the Market_value
df['Market_value'].head()
df['Market_value'].describe()
df['Market_value'].max()
df['Market_value'].min()
df[df['Market_value']==df['Market_value'].max()]['Name']
df[df['Name']=='<NAME>??'].head(4213)
plt.plot(df['Market_value'],df['Season'])
plt.show()
plt.scatter(df['Season'],df['Market_value'])
plt.show()
# # work on the Transfer_fee
df['Transfer_fee'].head()
df['Transfer_fee'].describe()
df[df['Transfer_fee']==df['Transfer_fee'].max()]['Name']
plt.plot(df['Transfer_fee'],df['Season'])
plt.show()
| 2 Football Transfer/Project2_V2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SeCaPr - Sequence Capture Processor
# A computational pipeline for processing Illumina sequence capture data [](http://bioconda.github.io/recipes/secapr/README.html)
#
# ***
#
# ## Documentation
# The documentation, including an empirical data tutorial, is divided into the following steps (click on the links):
# - [Cleaning and adapter trimming](subdocs/cleaning_trimming.ipynb)
# - [De-novo contig assembly](subdocs/contig_assembly.ipynb)
# - [Extract target contigs](subdocs/extract_contigs.ipynb)
# - [Align contigs](subdocs/align_contigs.ipynb)
# - [Reference-based assembly](subdocs/reference_assembly.ipynb)
# - [Locus selection](subdocs/locus_selection.ipynb)
# - [Phasing alleles](subdocs/phasing.ipynb)
# - [Phylogeny estimation](subdocs/phylogeny_msc.ipynb)
#
#
# ***
#
# ## Installation & Setup
# SECAPR is available as a conda package on the bioconda channel. This makes installation very simple. Follow the instructions on this page to get the SECAPR pipeline set up and ready to use:
#
# <div class="alert alert-block alert-info">
# **INFO:** Commands in blue boxes have to be executed from a bash-command line terminal.
# </div>
#
#
# ### 1. Install conda
#
# Download the **Python2.7 version** of [Miniconda](https://conda.io/miniconda.html) and install it by executing the downloaded sh-file (see commands below). Conda is a software and environment manager, that makes installation of new software and of required dependencies very simple and straightforward.
#
# *Download conda (MacOS 64bit):*
# <div class="alert alert-block alert-info">
# wget https://repo.continuum.io/miniconda/Miniconda2-latest-MacOSX-x86_64.sh
# </div>
#
# *Download conda (Linux 64bit):*
# <div class="alert alert-block alert-info">
# wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh
# </div>
#
# *Download conda (Linux 32bit):*
# <div class="alert alert-block alert-info">
# wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86.sh
# </div>
#
# *Install conda:*
# <div class="alert alert-block alert-info">
# sh Miniconda2-latest-*.sh
# </div>
#
# *Add Bioconda channels (containing bioinformatics software):*
# <div class="alert alert-block alert-info">
# conda config --add channels defaults; conda config --add channels conda-forge; conda config --add channels bioconda; conda config --add channels https://conda.anaconda.org/faircloth-lab
# </div>
#
#
# ***
#
# ### 2. Install the SECAPR environment
# Conda automatically downloads and installs all necessary software dependencies. We strongly recommend to **install SECAPR and all it's dependencies in a separate virtual environment**, in order to not interfer with potentially already installed verisons of the software dependencies.
#
# *Install SECAPR in virtual environment (here named `secapr_env`):*
# <div class="alert alert-block alert-info">
# conda create -n secapr_env secapr
# </div>
#
# Alternatively you can also just plainly install the software on your computer (without creating an environment) by clicking on the icon below and following the instructions (**not recommended!**):
#
# [](http://bioconda.github.io/recipes/secapr/README.html)
#
# ***
#
# ### 3. Activate the environment
# To activate the newly created environment, type:
#
# *Activate environment*:
# <div class="alert alert-block alert-info">
# source activate secapr_env
# </div>
#
# When the environment is activated, all the necessary software dependencies will be available in the standarad path, e.g. when you type `samtools` the samtools version required for SECAPR will be executed. After you are done using secapr, you can deactivate the environment to switch back to your standard environment with this command:
#
# *De-activate environment*:
# <div class="alert alert-block alert-info">
# source deactivate
# </div>
#
# ***
#
# ### 4. Check active environment
# Check if you are connected to the correct environment (there should eb a star in front of secapr_env in the output of this command):
#
# *Check active environment*:
# <div class="alert alert-block alert-info">
# conda info --envs
# </div>
#
#
# <div class="alert alert-block alert-warning">IMPORTANT : When you are using the SECAPR pipeline, make sure the secapr_env is activated. Activate with **source activate secapr_env**
# </div>
#
# ***
#
# ### 5. Install SECAPR development version
#
# The development version of SECAPR is stored on this GitHub page and contains the newest updates, which might not yet be available through the conda version. However you need to install the SECAPR environment with conda first by following the steps above. Once the environment is installed, you can update SECAPR to the development version by following these steps:
#
# 1. Connect to your secapr environment (`source activate secapr_env`)
# 2. Remove the current secapr installation (`conda remove secapr`)
# 3. Download the new version from github (`wget https://github.com/AntonelliLab/seqcap_processor/archive/master.zip`)
# 4. Unzip the downloaded file (`unzip master.zip`)
# 5. Move the unzipped directory to a safe location on your computer, i.e. not on your Desktop or Download folder, since this will be the path where secapr will be executed from in the future
# 6. Enter the unzipped secapr directory (`cd seqcap_processor-master`)
# 7. Install secapr from the folder (`python -m pip install -e .`)
| docs/notebook/.ipynb_checkpoints/main_doc-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Boolean Generator
# This notebook will show how to use the boolean generator to generate a boolean combinational function. The function that is implemented is a 2-input XOR.
# ### Step 1: Download the `logictools` overlay
# +
from pynq.overlays.logictools import LogicToolsOverlay
logictools_olay = LogicToolsOverlay('logictools.bit')
# -
# ### Step 2: Specify the boolean function of a 2-input XOR
# The logic is applied to the on-board pushbuttons and LED, pushbuttons **PB0** and **PB3** are set as inputs and LED **LD2** is set as an output
function = ['LD2 = PB3 ^ PB0']
# ### Step 3: Instantiate and setup of the boolean generator object.
# The logic function defined in the previous step is setup using the `setup()` method
boolean_generator = logictools_olay.boolean_generator
boolean_generator.setup(function)
# __Find the On-board pushbuttons and LEDs__
# 
# ### Step 4: Run the boolean generator verify operation
boolean_generator.run()
# #### Verify the operation of the XOR function
#
# | PB0 | PB3 | LD2 |
# |:---:|:---:|:---:|
# | 0 | 0 | 0 |
# | 0 | 1 | 1 |
# | 1 | 0 | 1 |
# | 1 | 1 | 0 |
# ### Step 5: Stop the boolean generator
boolean_generator.stop()
# ### Step 6: Re-run the entire boolean function generation in a single cell
# **Note**: The boolean expression format can be `list` or `dict`. We had used a `list` in the example above. We will now use a `dict`.
# <font color="DodgerBlue">**Alternative format:**</font>
# ```python
# function = {'XOR_gate': 'LD2 = PB3 ^ PB0'}
# ```
# +
from pynq.overlays.logictools import LogicToolsOverlay
logictools_olay = LogicToolsOverlay('logictools.bit')
boolean_generator = logictools_olay.boolean_generator
function = {'XOR_gate': 'LD2 = PB3 ^ PB0'}
boolean_generator.setup(function)
boolean_generator.run()
# -
# __Stop the boolean generator__
boolean_generator.stop()
| boards/Pynq-Z2/logictools/notebooks/boolean_generator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **Using the RDP Classifier to add taxonomic classification to the sequences.**
#
# **Taxonomic classification of Diptera sequences using RDP classifier**
# + language="bash"
# cd ~/bioinformatics/github/co1_metaanalysis/data/input/diptera
#
# # Executing RDP classifier, this includes the setting of the maximum size or memeory -Xmx**** to 80g
# #java -Xmx80g -jar ~/bioinformatics/github/co1_metaanalysis/code/tools/RDPTools/classifier.jar classify -c 0.8 -t ~/bioinformatics/github/co1_metaanalysis/code/tools/CO1Classifierv3.2/mydata_trained/rRNAClassifier.properties -o enafroCOI_all_clean_Diptera_taxa1 enafroCOI_all_clean_Diptera.fasta
#
# source ../../../code/classify.sh
# RDPclassifyCOI enafroCOI_all_clean_Diptera_95g.fasta enafroCOI_all_clean_Diptera.fasta enafroCOI_diptera_all_data.fasta << EOF
# 80g
# 80g
# 80g
# EOF
#
# ls
# -
# **Reformating the taxa file to tsv**
# + language="bash"
# cd ~/bioinformatics/github/co1_metaanalysis/data/input/diptera
# source ../../../code/classify.sh
#
# RDPcoiresults2tsv enafroCOI_all_clean_Diptera_taxa enafroCOI_diptera_all_data_taxa
#
# head -10 enafroCOI_all_clean_Diptera_taxa.tsv
# -
# **Simple Statistical analysis using R**
# This exploits an Rscript whose details have been explaiined in [RDPclassifier_results.ipynb](./RDPclassifier_results.ipynb)
# It Generates 3 files:
# 1. .*families.tsv - contains a list of all families predifined and assigned at a bootstrap score of 1 and from sequences of >= 500 seqeunce length
# 2. .*genera.tsv - contains a list of all genera predifined and assigned at a bootstrap score of 1 and from sequences of >= 500 seqeunce length
# 3. .*clash.tsv - contains a data frame of all sequnces with unmatched predefined and assigned species at a bootstrap of 1.0 or sequence length of >= 500
#
# ">=" - greater or equals to
# + language="bash"
# cd ~/bioinformatics/github/co1_metaanalysis/data/input/diptera
# Rscript --vanilla ../../../code/RDPresults_eval.R enafroCOI_all_clean_Diptera_taxa.tsv
# Rscript --vanilla ../../../code/RDPresults_eval.R enafroCOI_diptera_all_data_taxa.tsv
# -
# ### **HEADERs**
# **Editting headers**
# As a result of RDPclassification:
# 1. Some records will have taxons assigned to them
# 2. Some will have assigned taxons that mismatch the predefined ones
#
# To deal with this headers are generated from the RDP classifation results and attention paid to only those records with over 500 nucleotides and species assignment bootstrap of 1.0
# + language="bash"
# cd ~/bioinformatics/github/co1_metaanalysis/data/input/diptera
# . ~/bioinformatics/github/co1_metaanalysis/code/classify.sh
#
# RDPcoiheaders enafroCOI_all_clean_Diptera_taxa_clash.tsv enafroCOI_all_clean_Diptera_taxa.tsv enafroCOI_diptera_all_data_taxa.tsv << EOF
# 500
# 1
# 500
# 1
# 500
# 1
# EOF
#
# ls
# + language="bash"
# cd ~/bioinformatics/github/co1_metaanalysis/data/input/diptera
# . ~/bioinformatics/github/co1_metaanalysis/code/process_all_input_files.sh
#
# + language="bash"
# cd ~/bioinformatics/github/co1_metaanalysis/data/input/diptera
# . ~/bioinformatics/github/co1_metaanalysis/code/process_all_input_files.sh
# awk -F'[>|]' '/^>/{print $2}' enafroCOI_all_clean_Diptera_taxa_clash_headers > process_IDs
#
# cp enafroCOI_all_clean_Diptera.fasta infile.fasta
# delete_unwanted infile.fasta << EOF
# 1
# process_IDs
# 4
# EOF
# mv infile_unwanted.fasta enafroCOI_all_clean_Diptera_taxa_clash.fasta
# rm process_IDs infile.fasta
# + language="bash"
# cd ~/bioinformatics/github/co1_metaanalysis/data/input/diptera
# . ~/bioinformatics/github/co1_metaanalysis/code/process_all_input_files.sh
#
# replacing_headers enafroCOI_all_clean_Diptera_taxa_clash.fasta << EOF
# enafroCOI_all_clean_Diptera_taxa_clash_headers
# EOF
# + language="bash"
# cd ~/bioinformatics/github/co1_metaanalysis/data/input/diptera
# #removing gaps. The output is "input_dgpd.fasta", without gaps, "-".
# . ~/bioinformatics/github/co1_metaanalysis/code/process_all_input_files.sh
# remove_gaps infile_unwanted.fasta
# #Introducing a field "l-xxx" that has the length of the sequence in the header
# awk '/^>/{hdr=$0; next}
# { seq=$0 } match(seq,/^.*$/) { LEN=RLENGTH }
# { print hdr"|l-"LEN; print seq }' infile_unwanted_dgpd.fasta > infile_unwanted_dgpd_edited.fasta
#
# mv infile_unwanted_dgpd_edited.fasta infile_unwanted.fasta
# rm infile_unwanted_dgpd.fasta
# grep ">" infile_unwanted.fasta > infile_headers_fasta
# head -20 infile_headers_fasta
# + language="bash"
# cd ~/bioinformatics/github/co1_metaanalysis/data/input/diptera
# #substituting headers from infile_headers_fasta into all *diptera*.fasta files.
# . ~/bioinformatics/github/co1_metaanalysis/code/process_all_input_files.sh
# replacing_headers diptera_clean_headers_fasta *diptera*.fasta << EOF
# infile_headers_fasta
# EOF
# #cleaning up residual files
# mv infile_headers_fasta dipt99_headers_fasta
# rm -rf infile.fasta infile_unwanted.fasta outputfile process_IDs sedeW4xeJ
| code/RDPclassifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import pydeck as pdk
# !jupyter nbextension install --sys-prefix --symlink --overwrite --py pydeck
# !jupyter nbextension enable --sys-prefix --py pydeck
# !jupyter nbextension enable --py widgetsnbextension --sys-prefix
# API KEY 입력(MapBox Access Token 발급 필요)
MAPBOX_API_KEY = "MapBox API Key"
# 데이터 불러오기
gps = pd.read_csv('C:/Users/anhis/data/tri_bike_parts1.csv')
# 데이터 샘플링 (50만개)
gps_sample = gps.sample(500000)
gps_sample=gps_sample[['latitude', 'longitude', 'h3_9_lng', 'h3_9_lat']]
gps_sample.head()
# +
view_state = pdk.ViewState(
longitude=126.986,
latitude=37.565,
zoom=10,
min_zoom=5,
max_zoom=15,
pitch=40.5
)
r = pdk.Deck(
map_style='mapbox://styles/mapbox/dark-v10',
initial_view_state=view_state,
layers=[
pdk.Layer(
'HexagonLayer',
data=gps_sample,
get_position='[h3_9_lng, h3_9_lat]',
get_fill_color='[255, 255, 120]',
radius=200,
elevation_scale=4,
elevation_range=[0, 1000],
pickable=True,
extruded=True,
),
],
)
r.show()
# +
view_state = pdk.ViewState(
longitude=126.986,
latitude=37.565,
zoom=10,
min_zoom=5,
max_zoom=15,
pitch=40.5
)
r = pdk.Deck(
map_style='mapbox://styles/mapbox/dark-v10',
initial_view_state=view_state,
layers=[
pdk.Layer(
'ScatterplotLayer',
data=gps_sample,
get_position='[longitude, latitude]',
get_radius=50,
get_fill_color='[255, 255, 255]',
pickable=True,
auto_highlight=True
),
],
)
r.show()
# +
view_state = pdk.ViewState(
longitude=126.986,
latitude=37.565,
zoom=10,
bearing=0,
pitch=0,
min_zoom=5,
max_zoom=15,
)
r = pdk.Deck(
map_style='mapbox://styles/mapbox/dark-v10',
initial_view_state=view_state,
layers=[
pdk.Layer(
'ScreenGridLayer',
data=gps_sample,
get_position='[longitude, latitude]',
cellSizePixels = 10,
get_radius=50,
get_fill_color='[255, 255, 255]',
pickable=True,
auto_highlight=True
),
],
)
r.show()
# -
| Data_Visualization/GPS_PYDECK.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="O2aBT2GeQhb_"
import plotly.express as px
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="EemhDOKVQsPp" outputId="a06e1f94-8205-436d-94e9-f348759666c8"
penguins = pd.read_csv('https://raw.githubusercontent.com/Ramseths/data_visualization_workshop/main/data/penguins.csv')
histograma = px.histogram(data_frame=penguins, x = 'Body Mass (g)', nbins = 5)
histograma.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="8i4z4023Rih_" outputId="7089c8f6-82a3-46c6-bb1f-4e1853b55222"
caja_bigote = px.box(data_frame=penguins, y = 'Flipper Length (mm)', title='Diagrama de Caja')
caja_bigote.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="AkFGx76MSHAq" outputId="38c5d678-feaa-43af-f7d8-214651872305"
estudiantes = pd.read_csv('https://raw.githubusercontent.com/Ramseths/data_visualization_workshop/main/data/score_estudiantes.csv')
estudiantes.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="rTO_k0lST76G" outputId="557a0f8a-207b-4bba-e05f-14d533a09d50"
fig_color = px.bar(data_frame=estudiantes, x = 'nombre_estudiante', y = 'puntuacion', title='Puntaje por estudiantes',
color = 'ciudad', color_discrete_map = {'Irapuato' : 'rgb(255, 0, 0)', 'Celaya' : 'rgb(254, 255, 0)'})
fig_color.show()
# + id="p1r4uq9wUdGO"
temp_semanales = pd.DataFrame({'dia':['Lunes','Martes','Miercoles','Jueves','Viernes','Sabado','Domingo'], 'temp':[26,27,25,32,34,25,29]})
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="hveohWAEWuIs" outputId="cef1bcd0-6864-4ab8-e9bb-8f90a25e2b80"
temp_semanales
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="5v1aGtfQWvIZ" outputId="2079252f-356f-4268-a676-5cca8b8af818"
escala = [('rgb(255, 0, 0)'),
('rgb(255, 100, 100)'),
('rgb(200, 200, 0)')]
fig_px = px.bar(data_frame=temp_semanales, x = 'dia', y = 'temp', color_continuous_scale = escala, color = 'temp')
fig_px.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="NNg6cCdHYa8S" outputId="df2e161d-eb63-4398-a7a0-f8771f4fd78e"
# Gráfico de dispersión
fig_dispersion = px.scatter(data_frame=penguins, x='Body Mass (g)', y = 'Flipper Length (mm)', title = 'Gráfico de Dispersión')
fig_dispersion.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="voNB9HmNjPFP" outputId="b5b08a6a-a592-4069-d0e7-3a7d1cc4a26c"
# Gráfico de Líneas
msft = pd.read_csv('https://raw.githubusercontent.com/Ramseths/data_visualization_workshop/main/data/MSFT.csv')
msft.head()
fig_lineas = px.line(data_frame=msft, x = 'Date' , y = 'Open', title = 'MSFT Stock Price (5 años)')
fig_lineas.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="VYSapRVHkVAp" outputId="8a7d48fc-6e3e-4c5d-b992-2598be5d2396"
import plotly.graph_objects as go
dispersion_go = go.Figure(
go.Scatter(x=penguins['Body Mass (g)'],
y=penguins['Flipper Length (mm)'],
mode = 'markers')
)
dispersion_go.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="ZcpWJjgUlUhq" outputId="007cfc53-0f24-44aa-ab3c-366d2d660011"
dispersion_go_lines = go.Figure(
go.Scatter(x=msft['Date'],
y=msft['Open'],
mode = 'lines')
)
dispersion_go_lines.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="XY7WTcKzlyZy" outputId="0945b7f8-4ccb-491d-f216-b3d7dad027a7"
df_bike = pd.read_csv('https://raw.githubusercontent.com/Ramseths/data_visualization_workshop/main/data/seoul_bike_data.csv')
df_bike.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="zsIf0u05mqqP" outputId="b2741b04-0707-4e1d-a7cb-311702e475e0"
correlacion = df_bike.corr(method='pearson')
correlacion.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="eZDTZS5Fm-rU" outputId="8fa06e6b-15be-4c99-b355-f43cd0b5b453"
fig_correlacion = go.Figure(
go.Heatmap(
x = correlacion.columns,
y = correlacion.columns,
z = correlacion.values.tolist(),
colorscale = 'rdylgn', zmin = -1, zmax = 1
)
)
fig_correlacion.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="Vsz8_HKYn14e" outputId="026b85c7-c947-45d3-b3ea-564cbcdbf151"
# Gráfico de dispersión con Hover Data
fig_dispersion = px.scatter(data_frame=penguins, x='Body Mass (g)', y = 'Flipper Length (mm)', title = 'Gráfico de Dispersión',
hover_data = ['Individual ID', 'Sex'], color = 'Clutch Completion')
fig_dispersion.show()
# + id="e1ULPlXMpMz-"
| notebooks/dia_dos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Image pyramids
#
# Take a look at how downsampling with image pyramids works.
#
# First, we'll read in an image then construct and display a few layers of an image pyramid.
# +
import numpy as np
import matplotlib.pyplot as plt
import cv2
# %matplotlib inline
# Read in the image
image = cv2.imread('images/rainbow_flag.jpg')
# Change color to RGB (from BGR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
# +
level_1 = cv2.pyrDown(image)
level_2 = cv2.pyrDown(level_1)
level_3 = cv2.pyrDown(level_2)
# Display the images
f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10))
ax1.set_title('original')
ax1.imshow(image)
ax2.imshow(level_1)
ax2.set_xlim([0, image.shape[1]])
ax2.set_ylim([image.shape[0], 0])
ax3.imshow(level_2)
ax3.set_xlim([0, image.shape[1]])
ax3.set_ylim([image.shape[0], 0])
ax4.imshow(level_3)
ax4.set_xlim([0, image.shape[1]])
ax4.set_ylim([image.shape[0], 0])
# -
| 4. Feature Vectors/Image Pyramids.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('base')
# language: python
# name: python3
# ---
# ### Comparison of the GOES/XRS responses used in sswidl
# From the older CHIANTI v9 with wrong factors in GOES13/15 short
# https://hesperia.gsfc.nasa.gov/ssw/gen/idl/synoptic/goes/goes_chianti_resp_20200812.fits
#
# And the newer CHIANTI v10 which should be correct
# https://hesperia.gsfc.nasa.gov/ssw/gen/idl/synoptic/goes/goes_chianti_response_latest.fits
#
# * 24-05-2022 IGH
# +
from astropy.io import fits
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import goesxrs_temp as gtem
import warnings
warnings.simplefilter('ignore')
# Just setup plot fonts
plt.rcParams.update({'font.size': 18,'font.family':"sans-serif",\
'font.sans-serif':"Arial",'mathtext.default':"regular"})
# -
# ---
# For GOES 15 should be minimal change for long (only from CHIANT v9 to v10) but more for short (as also removal of wrong factor)
#
# Is the same for GOES 13 and 14
# +
# Load in responses for GOES 15
sat=15
resps_old, resptmk_old = gtem.get_resps(sat=sat,old_ver=True)
resps, resptmk = gtem.get_resps(sat=sat)
# Plot them to see what they look like:
fig= plt.figure(figsize=(23, 6))
ax1 = fig.add_subplot(1, 3, 1)
plt.plot(1e6*resptmk,resps[:,0],marker=None,color='firebrick',lw=2,label='$1-8\;\AA$ New')
plt.plot(1e6*resptmk,resps[:,1],marker=None,color='darkblue',lw=2,label='$0.5-4\;\AA$ New')
plt.plot(1e6*resptmk_old,resps_old[:,0],marker=None,ls='--',color='firebrick',lw=2,label='$1-8\;\AA$ Old')
plt.plot(1e6*resptmk_old,resps_old[:,1],marker=None,ls='--',color='darkblue',lw=2,label='$0.5-4\;\AA$ Old')
ax1.set_ylabel("Response [$\mathrm{10^{-55}\;Wm^{-2}cm^{3}}$]")
ax1.set_xlabel("Temperature [K]")
ax1.set_xscale("log")
ax1.set_yscale("log")
plt.legend(title='XRS{:.0f}'.format(sat))
ax2 = fig.add_subplot(1, 3, 2)
plt.plot(1e6*resptmk,resps[:,0]/resps_old[:,0],marker=None,color='firebrick',lw=2,label='$1-8\;\AA$')
plt.plot(1e6*resptmk,resps[:,1]/resps_old[:,1],marker=None,color='darkblue',lw=2,label='$0.5-4\;\AA$')
plt.plot([1e6*np.min(resptmk),1e6*np.max(resptmk)],[1,1],'--',color='grey')
plt.ylim([0,3])
ax2.set_ylabel("New/Old")
ax2.set_xlabel("Temperature [K]")
ax2.set_xscale("log")
ax2.set_yscale("linear")
plt.legend()
ax3 = fig.add_subplot(1, 3, 3)
plt.plot(1e6*resptmk,resps[:,1]/resps[:,0],marker=None,color='rebeccapurple',lw=2,label='New')
plt.plot(1e6*resptmk,resps_old[:,1]/resps_old[:,0],marker=None,color='rebeccapurple',ls='--',lw=2,label='Old')
ax3.set_ylabel("$(0.5-4\;\AA)$/$(1-8\;\AA)$")
ax3.set_xlabel("Temperature [K]")
ax3.set_xscale("log")
ax3.set_yscale("log")
plt.legend()
plt.show()
# -
# ---
# For GOES 16 should be minimal change - only from CHIANT v9 to v10
# +
# Load in responses for GOES 16
sat=16
resps_old, resptmk_old = gtem.get_resps(sat=sat,old_ver=True)
resps, resptmk = gtem.get_resps(sat=sat)
# Plot them to see what they look like:
fig= plt.figure(figsize=(23, 6))
ax1 = fig.add_subplot(1, 3, 1)
plt.plot(1e6*resptmk,resps[:,0],marker=None,color='firebrick',lw=2,label='$1-8\;\AA$ New')
plt.plot(1e6*resptmk,resps[:,1],marker=None,color='darkblue',lw=2,label='$0.5-4\;\AA$ New')
plt.plot(1e6*resptmk_old,resps_old[:,0],marker=None,ls='--',color='firebrick',lw=2,label='$1-8\;\AA$ Old')
plt.plot(1e6*resptmk_old,resps_old[:,1],marker=None,ls='--',color='darkblue',lw=2,label='$0.5-4\;\AA$ Old')
ax1.set_ylabel("Response [$\mathrm{10^{-55}\;Wm^{-2}cm^{3}}$]")
ax1.set_xlabel("Temperature [K]")
ax1.set_xscale("log")
ax1.set_yscale("log")
plt.legend(title='XRS{:.0f}'.format(sat))
ax2 = fig.add_subplot(1, 3, 2)
plt.plot(1e6*resptmk,resps[:,0]/resps_old[:,0],marker=None,color='firebrick',lw=2,label='$1-8\;\AA$')
plt.plot(1e6*resptmk,resps[:,1]/resps_old[:,1],marker=None,color='darkblue',lw=2,label='$0.5-4\;\AA$')
plt.plot([1e6*np.min(resptmk),1e6*np.max(resptmk)],[1,1],'--',color='grey')
plt.ylim([0,3])
ax2.set_ylabel("New/Old")
ax2.set_xlabel("Temperature [K]")
ax2.set_xscale("log")
ax2.set_yscale("linear")
plt.legend()
ax3 = fig.add_subplot(1, 3, 3)
plt.plot(1e6*resptmk,resps[:,1]/resps[:,0],marker=None,color='rebeccapurple',lw=2,label='New')
plt.plot(1e6*resptmk,resps_old[:,1]/resps_old[:,0],marker=None,color='rebeccapurple',ls='--',lw=2,label='Old')
ax3.set_ylabel("$(0.5-4\;\AA)$/$(1-8\;\AA)$")
ax3.set_xlabel("Temperature [K]")
ax3.set_xscale("log")
ax3.set_yscale("log")
plt.legend()
plt.show()
# -
# ---
# For GOES 12 should be minimal change - only from CHIANT v9 to v10
# +
# Load in responses for GOES 12
sat=12
resps_old, resptmk_old = gtem.get_resps(sat=sat,old_ver=True)
resps, resptmk = gtem.get_resps(sat=sat)
# Plot them to see what they look like:
fig= plt.figure(figsize=(23, 6))
ax1 = fig.add_subplot(1, 3, 1)
plt.plot(1e6*resptmk,resps[:,0],marker=None,color='firebrick',lw=2,label='$1-8\;\AA$ New')
plt.plot(1e6*resptmk,resps[:,1],marker=None,color='darkblue',lw=2,label='$0.5-4\;\AA$ New')
plt.plot(1e6*resptmk_old,resps_old[:,0],marker=None,ls='--',color='firebrick',lw=2,label='$1-8\;\AA$ Old')
plt.plot(1e6*resptmk_old,resps_old[:,1],marker=None,ls='--',color='darkblue',lw=2,label='$0.5-4\;\AA$ Old')
ax1.set_ylabel("Response [$\mathrm{10^{-55}\;Wm^{-2}cm^{3}}$]")
ax1.set_xlabel("Temperature [K]")
ax1.set_xscale("log")
ax1.set_yscale("log")
plt.legend(title='XRS{:.0f}'.format(sat))
ax2 = fig.add_subplot(1, 3, 2)
plt.plot(1e6*resptmk,resps[:,0]/resps_old[:,0],marker=None,color='firebrick',lw=2,label='$1-8\;\AA$')
plt.plot(1e6*resptmk,resps[:,1]/resps_old[:,1],marker=None,color='darkblue',lw=2,label='$0.5-4\;\AA$')
plt.plot([1e6*np.min(resptmk),1e6*np.max(resptmk)],[1,1],'--',color='grey')
plt.ylim([0,3])
ax2.set_ylabel("New/Old")
ax2.set_xlabel("Temperature [K]")
ax2.set_xscale("log")
ax2.set_yscale("linear")
plt.legend()
ax3 = fig.add_subplot(1, 3, 3)
plt.plot(1e6*resptmk,resps[:,1]/resps[:,0],marker=None,color='rebeccapurple',lw=2,label='New')
plt.plot(1e6*resptmk,resps_old[:,1]/resps_old[:,0],marker=None,color='rebeccapurple',ls='--',lw=2,label='Old')
ax3.set_ylabel("$(0.5-4\;\AA)$/$(1-8\;\AA)$")
ax3.set_xlabel("Temperature [K]")
ax3.set_xscale("log")
ax3.set_yscale("log")
plt.legend()
plt.show()
# -
# What does this mean for the T, EM?
# Just choose some value of the short and long channel flux and loop over the sat number
for sat in range(11,18):
print("Sat{0:.0f}".format(sat))
tmko, emo=gtem.get_tem(5e-5,1e-5,sat=sat,old_ver=True)
print("Old{0:.0f}: {1:.2f}MK, {2:.2e}cm^-3$".format(sat,tmko,emo))
tmk, em=gtem.get_tem(5e-5,1e-5,sat=sat)
print("New{0:.0f}: {1:.2f}MK, {2:.2e}cm^-3$".format(sat,tmk,em))
| xrs_compare_tresp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from tqdm import tqdm
import sys
import time
import random
import pickle
import json
from itertools import cycle
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Pool
import sqlalchemy
from sqlalchemy import create_engine
from urllib.parse import quote
# sys.path.insert(0, '../tools/')
# from specialRequests import specialRequests
# +
class AnimePlanetCrawler:
def __init__(self):
with open('../tools/credentials.json') as file:
credentials = json.load(file)
username = credentials["dblogin"]["username"]
password = credentials["<PASSWORD>"]["password"]
db_string = f"postgresql://{username}:{password}@localhost:5432/animeplanet"
self.db = create_engine(db_string)
# self.sr = specialRequests()
# -
def loadData(self):
print('loading data...')
with self.db.connect() as con:
query = """SELECT url
FROM web_scrape
WHERE html_text IS NOT NULL;"""
self.done = set(pd.read_sql(sqlalchemy.text(query), con)['url'].to_list())
query = """SELECT url
FROM web_scrape
WHERE html_text IS NULL;"""
self.pending = set(pd.read_sql(sqlalchemy.text(query), con)['url'].to_list())
self.novel = set()
self.batch = {}
def saveData(self):
print('saving data...')
self.novel = self.novel.difference(set(self.batch.keys()))
self.pending = self.pending.difference(set(self.batch.keys()))
data_dict = {'done': list(self.done),
'pending': list(self.pending),
'novel': list(self.novel),
'batch': self.batch}
with open('../data/urls.pkl','wb') as file:
pickle.dump(data_dict, file)
with open("../data/urls.json", 'w') as file:
json.dump(data_dict, file, indent=2)
batch_dict = {'url': list(self.batch.keys()),
'html_text': list(self.batch.values())}
batch_df = pd.DataFrame(batch_dict)
novel_dict = {'url': list(self.novel),
'html_text': [np.NaN for _ in range(len(self.novel))]}
novel_df = pd.DataFrame(novel_dict)
batch_urls = batch_dict['url']
novel_urls = novel_dict['url']
with self.db.connect() as con:
print('\tremoving popped pending data...')
query = f"""DELETE FROM web_scrape
WHERE url in ({str(batch_urls)[1:-1]})"""
con.execute(sqlalchemy.text(query))
print('\tsaving done data...')
batch_df.to_sql('web_scrape', con, index=False, if_exists='append', method='multi')
try:
print('\tsaving pending data...')
novel_df.to_sql('web_scrape', con, index=False, if_exists='append', method='multi')
except Exception as e:
print(e)
query = f"""UPDATE web_scrape
SET html_text = NULL
WHERE url IN ({str(batch_urls)[1:-1]})"""
con.execute(sqlalchemy.text(query))
self.done = self.done.difference(batch_urls)
self.pending = self.pending.difference(novel_urls)
self.pending = self.pending.union(batch_urls)
self.batch = {}
self.novel = set()
# + tags=[]
def popBatch(self):
dist_to10 = 10 - (len(self.done) % 10)
popped_urls = set()
while len(popped_urls) < dist_to10:
pop_url = self.pending.pop()
if pop_url[-1] == '.':
new_url = pop_url.replace('forum/members', 'users')[:-1]
self.pending.add(new_url)
popped_urls.add(pop_url)
return popped_urls
# -
def scrapePage(url):
if ('forum/members' in url) and (url[-1] == '.'):
return (url, '')
resp = requests.get(f'http://192.168.0.3:5000/special-requests?url={quote(url)}')
html_text = resp.text
# html_text = self.sr.get(url)
return (url, html_text)
def scrapePages(urls):
resp = requests.post(f'http://192.168.0.3:5000/special-requests', json={'url':urls})
url_html_dict = resp.json()
# html_text = self.sr.get(url)
return url_html_dict.items()
def parsePage(html_text):
if html_text == '':
return set()
soup = BeautifulSoup(html_text, 'html.parser')
links = [str(a.get('href')) for a in soup.find_all('a')]
in_domain_links = filter(lambda x: x and x[0] == '/', links)
cur_urls = set([f'https://www.anime-planet.com{link}' for link in in_domain_links])
return cur_urls
def processCrawlResults(self, url_html_tup):
html_text_list = [x[1] for x in url_html_tup]
with Pool(4) as p:
cur_urls_set_list = p.map(parsePage, html_text_list)
cur_urls = set().union(*cur_urls_set_list)
for url, html_text in url_html_tup:
self.done.add(url)
self.batch[url] = 'failed scrape' if html_text == '' else html_text
cur_urls = (cur_urls.difference(self.pending)).difference(self.done)
self.novel.update(cur_urls)
self.pending.update(cur_urls)
def printCrawlProgress(self):
len_done = len(self.done)
len_pending = len(self.pending)
print(len_pending, len_done, 0 if len_pending == 0 else len_done/(len_pending+len_done))
return len_done
def waiter(secs):
print(f'waiting {secs} secs...')
for _ in tqdm(range(secs)):
time.sleep(1)
# + tags=[]
def crawl(self):
self.loadData()
print('starting crawl...')
start_time = time.time()
while len(self.pending) > 0:
popped_urls = self.popBatch()
with ThreadPoolExecutor(max_workers=10) as executor:
url_html_tup = list(executor.map(scrapePage, popped_urls))
self.processCrawlResults(url_html_tup)
len_done = self.printCrawlProgress()
if len_done % 100 == 0:
end_time = time.time()
print('timer: ', end_time-start_time)
self.saveData()
if len_done % 500000 == 0:
sleep_time = random.randint(500, 1000)
elif len_done % 100000 == 0:
sleep_time = random.randint(100, 200)
elif len_done % 10000 == 0:
sleep_time = random.randint(20, 30)
elif len_done % 1000 == 0:
sleep_time = random.randint(10, 20)
else:
sleep_time = random.randint(5, 10)
waiter(sleep_time)
print('starting crawl...')
start_time = time.time()
else:
time.sleep(max(min(np.random.poisson(2), 5), 1))
# -
AnimePlanetCrawler.loadData = loadData
AnimePlanetCrawler.saveData = saveData
AnimePlanetCrawler.popBatch = popBatch
AnimePlanetCrawler.processCrawlResults = processCrawlResults
AnimePlanetCrawler.printCrawlProgress = printCrawlProgress
AnimePlanetCrawler.crawl = crawl
# + tags=[]
crawler = AnimePlanetCrawler()
# -
crawler.crawl()
| notebooks/01_webCrawler.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Sequitur (Nevill-Manning) algorithm
# For the algorithm reference see:
#
# 1. http://arxiv.org/abs/cs/9709102
# 2. https://en.wikipedia.org/wiki/Sequitur_algorithm
# 3. http://www.sequitur.info/
#
# I'm testing the pysequitur library against the web interface provided by sequitur.info.
#
# ## Background
#
# Main reason to make a new library is that I needed to have the Sequitur algorithm to work with "any" object pairs (digram), not just textual input. This is best achieved by working with list indexes rather than rudimentary text replacements on the algorithm.
#
# Other reason was to test efficiency of two different approaches for the algorithm:
#
# 1. array slicing method
# 2. digram storage method
#
# I was quessing array slicer is an easier way, but searching digrams from existing rules might take longer time and concequently have a downside effect to the performance of the algorithm. Thus the second way, "digram storage method" was implemented. Trivial tests show that the latter is a faster implementation indeed.
#
# Other Python libraries for the algorithm can be found from GitHub:
#
# 1. https://github.com/mspandit/sequitur-python
# 2. https://github.com/raysaagar/parallel-sequitur
#
# The latter provides both serial and parallel versions of the algorithm. Unexpectively the parallel version is slower on small data sets on my tests. Both algos work slower than the "digram storage method". I found _sequitur-python_ just recently so performance and features of that library are yet to be tested.
#
#
# ## Array slicing method
#
# I'm not going to the deep details of the source code on these two implementations. Basicly on array slicing method every rule is a plain list of sequence items. So finding and comparing digrams needs to loop over all rules and then slice root sequence to parts, which are two items long. Comparison is thus made like this:
#
# <pre><code class="python">rule[i:i+2] == digram</code></pre>
#
# This is one of the most resource eating part on the algorithm. Of cource the rule utility is another similar loop over loop over loop place which takes most of the time on the algo:
#
# <pre><code class="python">for rule1 in self:
# for rule2 in self:
# l = len(rule2)
# ind = 0
# while ind < l:
# c = rule2[ind+1:]
# del rule2[ind:]
# rule2 += rule1 + c
# ind += 1
# </code></pre>
#
# ## Digram storage method
#
# On the digram storage method array slicing is eliminated by storing every item with the previous item. In this case digrams are stored on memory, which causes approximately twice the memory consumption but then, at least trice the performance increase. On Digram storage method comparison can be done with more efficient way:
#
# <pre><code class="python">digram in rule</code></pre>
#
# Also the rule utility is behaving a bit different way after this data structure change. Still I coudn't find a way to eliminate loops over loops, but rule modification method seems to be more efficient, because there is no need to delete and reinstantiate the list of rule items. Simplified it looks like this:
#
# <pre><code class="python">for rule1 in self:
# for rule2 in self:
# l = len(rule2)
# ind = 0
# while ind < l:
# self[ind:ind] = rule[:]
# </code></pre>
#
# All code with comments can be found from GitHub directory: https://github.com/markomanninen/pysequitur/blob/master/pysequitur/main.py
#
# ## Using library
#
# Library offers these two implementations of the algorithm with dedicated classes:
#
# - Sequencer
# - Sequencer2
#
# For nice output print_grammar function is provided. Five different constants can be used to alter output and representation of the classes used on the library's print utility:
#
# - **RULE_INDEX_STR**<br/>
# How to represent rule indexes on output, default is "^%s" where %s is replaced with a rule index number
# - **SEQUENCE_KEY**<br/>
# Define special key to be used to mark the main root sequence, default is "S"
# - **ARROW**<br/>
# Special character to be used as a separator between rule indices and rules, default "→"
# - **NEWLINE_REPLACEMENT**<br/>
# What to do with newlines on output, default replacement is "↵"
# - **SPACE_REPLACEMENT**<br/>
# What to do with space characters on output, default replacement is "_"
#
# ## Comparison
#
# Let's compare these two different sequencers and print_grammar utility with an example children nursery rhyme:
# +
from pysequitur import Sequencer, Sequencer2, print_grammar
seq = """pease porridge hot,
pease porridge cold,
pease porridge in the pot,
nine days old.
some like it hot,
some like it cold,
some like it in the pot,
nine days old."""
# -
print (Sequencer(seq).get())
print (Sequencer2(seq).get())
# As we can notice, data structures are different, latter is somewhat easier to understand, more straightforward. Now let's compare grammar generated by sequencers:
print_grammar(Sequencer(seq))
print_grammar(Sequencer2(seq))
# They should give similar output, just index numbers should be different. Then let's make some trivial efficiency check with __%%timeit__ notebook magics:
# %%timeit
Sequencer(seq)
# %%timeit
Sequencer2(seq)
# Sequencer 2 takes more than twice longer to create the grammar. Before trying with longer text samples, let's try a trivial tester, which compares generated sequence to the expected output. If they don't match, function will output error.
#
# ### Test output
def test(sequence, expect, sequencer_class):
""" Test sequence if result is as expected. This is a very trivial tester. """
s = sequencer_class()
for c in sequence:
s.stream(c)
try:
assert str(s) == expect
print ("Test ok!")
except AssertionError:
print ('Assertion error!', 'Sequence "%s" gave: "%s". Expected: "%s"' % (sequence, str(s), expect))
test('abcdbcabcd', '^3^1^3', Sequencer)
test('abcdbcabcdbc', '^3^1^3', Sequencer)
# ### Longer text sample
#
# Now it is time to compare longer text with sequencers. I'm using samples found from: http://www.sequitur.info/ to double check everything works as expeted. First import Genesis text from file:
input_string = open('genesis1.txt', 'r').read()
# Then processing with two different sequencers:
# %%timeit
Sequencer(input_string)
# %%timeit
Sequencer2(input_string)
# Result is that "digram storage method" seems to be trice faster than simpler "array slicing method".
#
# Other long text sample ("I am Sam" poem) looks like this when converted to a grammar:
input_string = open('IamSam.txt', 'r').read()
s = Sequencer(input_string)
print_grammar(s)
# ### Stream sequence
#
# So far we have provided a input sequence as a single pass as a class constructor argument. Let's assume, that sequencer is used to find out repeating hierarchal structures from a constant stream of a numeric input data. On the next code block I will emulate constant data flow within a while loop. A small delay is used on a loop to create an effect of a continuous stream of timely data.
#
# To differentiate rule indexes and numeric data I will change RULE_INDEX_STR constant to the more approriate one so that rule index numbers will be wrapped with curly brackets. Also because index numbers in Python are in a form of integer types, we need to make sure that Rule Indices and input values are not mixed on the process. Class Int is presented here for that reason.
#
# _Use Kernel->Interrupt from the main menu to stop while loop in case of infinite recursion!_
# +
import time
from IPython.display import clear_output
import pysequitur
class Int(int):
def __repr__(self):
return str(self)
def __eq__(self, v):
return isinstance(v, Int) and int.__eq__(self, v)
# reinstantiate default rule index string: ^%s with: {%s}
pysequitur.main.RULE_INDEX_STR = "{%s}"
# init sequencer
s = Sequencer()
# init delay, 0.5 seconds
delay = 0.5
# init numeric data and map them to Int type
a = [1,2,3,4,2,3,1,2,3,4,1,2,3,4,2,3,1,2,3,4]
a = list(map(Int, a))
try:
l = len(a)
i = 0
while i < l:
s.stream(a[i])
print_grammar(s)
time.sleep(delay)
clear_output(wait=True)
i+=1
except KeyboardInterrupt:
pass
# -
# ## Resolve
#
# When sequencer is used to generate (which also means same as compress the data), we can use resolve method of the Sequencer class to decode main rule back to original sequence:
input_string = open('peaseporridge.txt', 'r').read()
s = Sequencer(input_string)
print (''.join(s.resolve()))
# ## Random sequence
#
# One more final case just to demonstrate sequencer in working is to use random letter pattern and see how it is processed by the library:
# +
import time
from random import shuffle, randint as rand
from IPython.display import clear_output
def test_random_seq(f=5, delay=0.1):
abc = 'abcdefghijklmnopqrstuvwyz'
l = len(abc)-1
s = ''
i = l*f
# to stop script execution on Jupyter notebook: Kernel -> Interrupt
try:
s = Sequencer(abc[rand(0, l)])
while i > 0:
i -= 1
s.stream(abc[rand(0, l)])
print ('sequence:', s)
print ('grammar:', s.get()[0])
print ('rules:', s.get()[1:])
print ('resolve:', s.resolve())
time.sleep(delay)
clear_output(wait=True)
except KeyboardInterrupt:
pass
# -
test_random_seq()
# ## The [MIT](https://github.com/markomanninen/pysequitur/blob/master/LICENSE) License
#
# Copyright © 2016 <NAME>
| Python Sequitur Algorithms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="K9MWVyjeB-HZ" executionInfo={"status": "ok", "timestamp": 1624451415196, "user_tz": -330, "elapsed": 1002, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18342100915741616506"}}
# Importing Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
plt.style.use('seaborn-dark')
import warnings
warnings.simplefilter('ignore')
# + id="fO3beXsx2XY2" executionInfo={"status": "ok", "timestamp": 1624451536458, "user_tz": -330, "elapsed": 1808, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18342100915741616506"}}
train = pd.read_csv('/content/drive/MyDrive/Data Science/AV/Churn Prediction/3. Data-Extraction-Collection/churn_prediction.csv')
data = train.copy()
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="updE0DtZ4zhc" executionInfo={"status": "ok", "timestamp": 1624452061603, "user_tz": -330, "elapsed": 634, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18342100915741616506"}} outputId="ed4283de-55e0-4947-cf04-f7e9cbbcf4e2"
train.head()
# + id="t2_34HsfQfEJ" executionInfo={"status": "ok", "timestamp": 1624451604415, "user_tz": -330, "elapsed": 386, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18342100915741616506"}}
ID_COL, TARGET_COL = 'customer_id', 'churn'
# + id="xUNJjigLT3cN" executionInfo={"status": "ok", "timestamp": 1624451652010, "user_tz": -330, "elapsed": 610, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18342100915741616506"}}
# non missing data variables
CATEGORICAL = ['customer_nw_category','gender', 'occupation',]
CONTINUOUS = ['dependents', 'city','vintage', 'age', 'branch_code', 'current_balance',
'previous_month_end_balance', 'average_monthly_balance_prevQ',
'average_monthly_balance_prevQ2', 'current_month_credit',
'previous_month_credit', 'current_month_debit', 'previous_month_debit',
'current_month_balance', 'previous_month_balance',]
# + id="l3sOkvpt3roX" executionInfo={"status": "ok", "timestamp": 1624452030210, "user_tz": -330, "elapsed": 13, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18342100915741616506"}}
def Box_Plot(data, i):
sns.set(font_scale=1.3)
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(10, 3))
# for i, k in enumerate(CONTINUOUS):
_ = data[i].boxplot(ax =axes, vert=False)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 767} id="0z81wMv12g4j" executionInfo={"status": "error", "timestamp": 1624452031092, "user_tz": -330, "elapsed": 893, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18342100915741616506"}} outputId="d1fff978-9494-4e57-efd2-2d8d4391cc05"
for i in enumerate(CONTINUOUS):
Box_Plot(data, i)
# + id="s8D2xxhF3G-X" executionInfo={"status": "ok", "timestamp": 1624451964015, "user_tz": -330, "elapsed": 21, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18342100915741616506"}}
| 5. Feature Engineering/2. Outlier-Treatment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Named tuples
# + active=""
# namedtuple is a function which generates a new class → class factory
# that new class inherits from tuple
# but also provides named properties to access elements of the tuple but an instance of that class is still a tuple
# -
from collections import namedtuple
class Point2D:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
def __repr__(self):
return f'Point2D(name={self.fname},surname={self.lname})'
def __eq__(self, other):
if isinstance(other, Point2D):
return self.fname == other.fname and self.lname == other.lname
else:
return false
p2d = Point2D('Natiq', 'Vahabov')
p2d
# +
# such a long code and it is mutable object we can change name,surname in any time
# -
p2d.fname = 'Nail'
p2d
Point2DTuple = namedtuple('Point2DTuple', ['fname', 'lname'])
p2d_t = Point2DTuple('Natiq', 'Vahabov')
p2d_t
p2d_t.fname = "Nail"
# +
# convert tuple to orderedDict
dic = p2d_t._asdict()
# -
dic
dic['fname']
Point2DTuple._fields
# modifing the value of named tuple
p2d_t = p2d_t._replace(fname='Nail')
p2d_t
# +
# extending the named tuple
new_fields = p2d_t._fields + ('age',)
# -
new_fields
Point3DTuple = namedtuple('Point3DTuple', new_fields)
p3d_t = Point3DTuple(*p2d_t, 27)
p3d_t
Point3DTuple.__doc__
Point3DTuple.fname.__doc__
help(Point3DTuple)
Point3DTuple.__doc__ = "3D named tuple"
Point3DTuple.__doc__
# +
# setting defaults to namedtuple
Vector2D = namedtuple('Vector2D', 'x1 y1 x2 y2 origin_x origin_y') # adding default values to origins
Vector2D_zero = Vector2D(0,0,0,0,0,0)
Vector2D_zero
# -
v2 = Vector2D_zero._replace(x1=10,y1=10,x2=20,y2=20)
v2
# +
# alternative method with __new__
Vector2D.__new__.__defaults__ = (-10, -10)
v2 = Vector2D(x1=10,y1=10,x2=20,y2=20)
# -
v2
# +
# convert any list of dicts to namedtuple
def tuplify(dicts):
keys = {key for dict_ in dicts for key in dict_.keys()}
Struct = namedtuple('Struct', sorted(keys), rename=True)
Struct.__new__.__defaults__ = (None,) * len(Struct._fields)
return [Struct(**dict_) for dict_ in dicts]
# -
data_list = [{'key1':10},
{'key2':20},
{'key1':10, 'key3':30}]
tuple_list = tuplify(data_list)
tuple_list
type(data_list)
| named_tuples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [](https://github.com/awslabs/aws-data-wrangler)
#
# # 21 - Global Configurations
#
# [Wrangler](https://github.com/awslabs/aws-data-wrangler) has two ways to set global configurations that will override the regular default arguments configured in functions signatures.
#
# - **Environment variables**
# - **wr.config**
#
# *P.S. Check the [function API doc](https://aws-data-wrangler.readthedocs.io/en/2.14.0/api.html) to see if your function has some argument that can be configured through Global configurations.*
#
# *P.P.S. One exception to the above mentioned rules is the `botocore_config` property. It cannot be set through environment variables
# but only via `wr.config`. It will be used as the `botocore.config.Config` for all underlying `boto3` calls.
# The default config is `botocore.config.Config(retries={"max_attempts": 5}, connect_timeout=10, max_pool_connections=10)`.
# If you only want to change the retry behavior, you can use the environment variables `AWS_MAX_ATTEMPTS` and `AWS_RETRY_MODE`.
# (see [Boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#using-environment-variables))*
# ## Environment Variables
# %env WR_DATABASE=default
# %env WR_CTAS_APPROACH=False
# %env WR_MAX_CACHE_SECONDS=900
# %env WR_MAX_CACHE_QUERY_INSPECTIONS=500
# %env WR_MAX_REMOTE_CACHE_ENTRIES=50
# %env WR_MAX_LOCAL_CACHE_ENTRIES=100
import awswrangler as wr
import botocore
wr.athena.read_sql_query("SELECT 1 AS FOO")
# ## Resetting
# Specific
wr.config.reset("database")
# All
wr.config.reset()
# ## wr.config
wr.config.database = "default"
wr.config.ctas_approach = False
wr.config.max_cache_seconds = 900
wr.config.max_cache_query_inspections = 500
wr.config.max_remote_cache_entries = 50
wr.config.max_local_cache_entries = 100
# Set botocore.config.Config that will be used for all boto3 calls
wr.config.botocore_config = botocore.config.Config(
retries={"max_attempts": 10},
connect_timeout=20,
max_pool_connections=20
)
wr.athena.read_sql_query("SELECT 1 AS FOO")
# ## Visualizing
wr.config
| tutorials/021 - Global Configurations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
from datetime import datetime
url = 'https://www.cdc.gov/coronavirus/2019-ncov/cases-updates/cases-in-us.html'
# ### Extract date
content = requests.get(url).content
soup =BeautifulSoup(content, 'lxml')
data = soup.findAll(text=re.compile("Updated .*202"))
data
m = re.search("Updated (\w+ \d+, 202\d)", data[0])
data_date_str = m.group(1)
data_date = datetime.strptime(data_date_str, '%B %d, %Y').strftime('%Y-%m-%d')
data_date
# ### Extract data and add into Dataframe
dfs = pd.read_html(url)
if len(dfs) == 0 :
raise Exception('There is no dataframes detected')
df = dfs[0]
df
# +
col0 = df.columns[0]
df = df.set_index(col0).T
df
# +
# remove non alphanumeric characters from the columns
# also lowercase them
df.columns = df.columns.map(lambda x: re.sub('\W+', '', str.lower(x)))
# add column date to the front
df.insert(0, 'date', data_date)
# -
df
# ### Save to file
# +
from pathlib import Path
csv_file = '../data/cdc-us-cases.csv'
if Path(csv_file).exists():
# read out the old dat
old_df = pd.read_csv(csv_file)
df = df.append(old_df)
df.drop_duplicates(subset=['date'], inplace=True)
# -
df.sort_values(by=['date'], inplace=True)
df
df.to_csv(csv_file, index=False)
# ### Plot graph
# +
# import seaborn as sns
# import matplotlib.pyplot as plt
# from pandas.plotting import register_matplotlib_converters
# register_matplotlib_converters()
# png_file = '../images/cdc-us-cases.png'
# # convert to pd.datetime
# df['datetime_idx'] = pd.to_datetime(df['date'], format="%Y-%m-%d")
# df.set_index('datetime_idx', inplace=True)
# df
# -
| notebooks-scrappers/CDC-cases-in-US.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="HHsuQMMJyms4"
# # HS RDF HydroShare Python Client Basic Resource Operation Examples
#
#
# ---
#
#
# The following code snippets show examples for how to use the HS RDF HydroShare Python Client for performing basic resource operations.
# + [markdown] id="b_Tj5gJx0fRj"
# ## Install the HS RDF Python Client
#
# The HS RDF Python Client for HydroShare won't be installed by default, so it has to be installed first before you can work with it. Use the following command to install the Python Client from the GitHub repository. Eventually we will distribute this package via the Python Package Index (PyPi) so that it can be installed via pip from PyPi.
# + id="hzriLgMl0oJ2"
# !pip install hsclient
# + [markdown] id="CZNOazcn9-23"
# ## Authenticating with HydroShare
#
# Before you start interacting with resources in HydroShare you will need to authenticate. To authenticate with HydroShare, you can either specify your username and password or you can call the `sign_in()` function, which will prompt you to input your username and password.
# + id="3njsiY73m7_V"
from hsclient import HydroShare
username = 'username'
password = 'password'
hs = HydroShare(username, password)
# -
# In most cases you will not want anyone to see your username and password, so you can also call the `sign_in()` function to be prompted for your username and password. This is better to use if you are sharing a Jupyter Notebook.
# +
from hsclient import HydroShare
hs = HydroShare()
hs.sign_in()
# + [markdown] id="xWITDJt79xiA"
# ## Basic Resource Operations
# + [markdown] id="TH3UUihSojIb"
# ### Create a New Empty Resource
#
# A "resource" is a container for your content in HydroShare. Think of it as a "working directory" into which you are going to organize the code and/or data you are using and want to share. The following code can be used to create a new, empty resource within which you can create content and metadata.
#
# This code creates a new resource in HydroShare. It also creates an in-memory object representation of that resource in your local environmment that you can then manipulate with further code.
# + id="W9azvJ_Co87w"
# Create the new, empty resource
new_resource = hs.create()
# Get the HydroShare identifier for the new resource
resIdentifier = new_resource.resource_id
print('The HydroShare Identifier for your new resource is: ' + resIdentifier)
# Construct a hyperlink for the new resource
print('Your new resource is available at: ' + new_resource.metadata.url)
# + [markdown] id="tBhL3LdOLCOx"
# ### Retrieving an Existing Resource
#
# If you want to work on an existing resource rather than creating a new one, you can retrieve an existing resource using its HydroShare Identifier. The resource identifier is passed as a string. The resource's metadata is retrieved and loaded into memory.
# + id="-My8I_cmLOIf"
# Get an existing resource using its identifier
existing_resource = hs.resource(resIdentifier)
print('Just retrieved the resource with ID: ' + resIdentifier)
# + [markdown] id="R142RJv-xZBx"
# ### Deleting a Resource
#
# If you want to delete a resource you are currently working with, you can just call the `delete()` function on that resource. This will delete your local copy of the resource and the resource in HydroShare.
# + id="G9LBisuOx5WM"
new_resource.delete()
# + [markdown] id="7msplpxjHwqQ"
# Alternatively, if you know the HydroShare identifier of the resource you want to delete, you can use it to delete the resource.
# + id="Wf91WAB1H5KS"
# Delete the resource using its identifier
hs.resource(resIdentifier).delete()
print('Deleted resource: ' + resIdentifier)
# + [markdown] id="lZY3F7gj1e_k"
# ## Download an Entire Resource
#
# HydroShare allows you to download an entire resource as a zipped file that uses the BagIt packaging standard. You can identify the resource you want to download using its HydroShare identifier. When you call the `download()` function on the resource, you can pass a path where you want to save the zipped file. Leaving the path blank downloads the files to the directory.
#
# This example downloads the HydroShare resource containing these example Jupyter Notebooks.
# + id="Xqgoq9yk15e4"
# Get the resource you want to download using its identifier
res = hs.resource(resIdentifier)
# Download the resource as a zipped file. Pass in a file path as a string if you
# want to download to a particular location.
res.download()
# -
| docs/examples/Basic_Operations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import requests
import io
from tkinter import *
from PIL import Image, ImageTk
class Meowner():
def __init__(self):
self.janela = Tk()
self.janela.title('Meowner')
self.janela.geometry('800x600')
self.janela.tk_setPalette(background= '#7e7d80', foreground = '#ffFF00', activeBackground = 'blue')
self.btn_update = Button(self.janela, text='OTRO GATO!', command=self.update_cat)
self.btn_update.pack()
self.update_cat()
self.janela.mainloop()
def update_cat(self):
self.cat_img_url = self.get_random_cat()
self.image = self.get_tkinter_image(self.cat_img_url)
try:
self.label.destroy()
except:
pass
self.label = Label(self.janela, image=self.image, width=600, height=400)
self.label.pack()
def get_tkinter_image(self, img_url):
raw_data = requests.get(img_url, stream=True).raw.data
im = Image.open(io.BytesIO(raw_data))
image = ImageTk.PhotoImage(im)
return image
def get_random_cat(self):
url = 'https://aws.random.cat/meow'
r = requests.get(url)
r_json = r.json()
return r_json['file']
m = Meowner()
# -
| Projeto_Final/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # mapping-challenge-mask_rcnn-training
# 
#
# This notebook contains the baseline code for the training a vanilla [Mask RCNN](https://arxiv.org/abs/1703.06870) model for the [crowdAI Mapping Challenge](https://www.crowdai.org/challenges/mapping-challenge).
#
# This code is adapted from the [Mask RCNN]() tensorflow implementation available here : [https://github.com/matterport/Mask_RCNN](https://github.com/matterport/Mask_RCNN).
#
# First we begin by importing all the necessary dependencies :
# +
import os
import sys
import time
import numpy as np
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
#
# A quick one liner to install the library
# # !pip install git+https://github.com/waleedka/coco.git#subdirectory=PythonAPI
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
from mrcnn.evaluate import build_coco_results, evaluate_coco
from mrcnn.dataset import MappingChallengeDataset
import zipfile
import urllib.request
import shutil
# -
# ## Dataset location
# Now we have to download all the files in the datasets section and untar them to have the following structure :
# ```
# ├── data
# | ├── pretrained_weights.h5 (already included in this repository)
# │ ├── test
# │ │ └── images/
# │ │ └── annotation.json
# │ ├── train
# │ │ └── images/
# │ │ └── annotation.json
# │ └── val
# │ └── images/
# │ └── annotation.json
# ```
# Note that the `pretrained_weights.h5` (available at [https://www.crowdai.org/challenges/mapping-challenge/dataset_files](https://www.crowdai.org/challenges/mapping-challenge/dataset_files)) are the weights used for the baseline submission, and are obtained by running the learning schedule mentioned later in the experiment. In the said experiment, the initial weights used can be found [here](https://github.com/matterport/Mask_RCNN/releases/download/v2.1/mask_rcnn_balloon.h5).
# +
ROOT_DIR = os.getcwd()
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
PRETRAINED_MODEL_PATH = os.path.join(ROOT_DIR,"data/" "pretrained_weights.h5")
LOGS_DIRECTORY = os.path.join(ROOT_DIR, "logs")
# -
# ## Experiment Configuration
# +
class MappingChallengeConfig(Config):
"""Configuration for training on data in MS COCO format.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "spacenet-rio-buildings"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 5
# Uncomment to train on 8 GPUs (default is 1)
GPU_COUNT = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # 1 Backgroun + 1 Building
IMAGE_MAX_DIM=320
IMAGE_MIN_DIM=320
MEAN_PIXEL = [81.16231469, 86.53528546, 64.72005973]
MINI_MASK_SHAPE = (128, 128)
config = MappingChallengeConfig()
config.display()
# -
# ## Instantiate Model
model = modellib.MaskRCNN(mode="training", config=config, model_dir=LOGS_DIRECTORY)
# Load pretrained weights
model_path = PRETRAINED_MODEL_PATH
model.load_weights(model_path, by_name=True)
# ## Load Training and Validation Dataset
# +
# Load training dataset
dataset_train = MappingChallengeDataset()
dataset_train.load_dataset(dataset_dir=os.path.join("data", "train"))
dataset_train.prepare()
# Load validation dataset
dataset_val = MappingChallengeDataset()
val_coco = dataset_val.load_dataset(dataset_dir=os.path.join("data", "val"))
dataset_val.prepare()
# -
# ## Train
# +
# *** This training schedule is an example. Update to your needs ***
# Training - Stage 1
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=40,
layers='heads')
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=60,
layers='4+')
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=80,
layers='all')
# -
# Now you can monitor the training by running :
# ```
# tensorboard --logdir=logs/[path-to-your-experiment-logdir]
# ```
# and if everything works great, you should see something like :
# 
#
# # Author
# <NAME> [<EMAIL>](<EMAIL>)
| Training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''ERA'': conda)'
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import os
from glob import glob
from functools import reduce
import tqdm.notebook as tq
def df_from_list(df_list: list, merge_col='date') -> pd.DataFrame:
"""Function merge DataFrames from list to the single frame
Args:
df_list (list): List of dataframes
merge_col (str, optional): column on which merge will be perfomed.
Defaults to 'date'.
Returns:
pd.DataFrame: Merged dataframe
"""
return reduce(lambda left, right: pd.merge(left, right, on=merge_col), df_list)
path_to_meteo = 'S:/education/HSI/aspirantura/Dissertation/conus_data/WS_meteo/'
path_to_geo = 'S:/education/HSI/aspirantura/Dissertation/conus_data/featureXtractor/'
save_path = 'S:/education/HSI/aspirantura/Dissertation/conus_data/geo_met/'
os.makedirs(save_path, exist_ok=True)
all_meteo = glob(path_to_meteo + '*.csv')
# merge HydroATLAS data to one dataframe
test_geo = df_from_list(df_list=[pd.read_csv(path, sep=';')
for path in glob(path_to_geo + '*.csv')],
merge_col='gauge_id')
# simplify comprassion with modify of gauge_id column
test_geo['gauge_id'] = ['0' + str(gauge) if len(str(gauge)) != 8
else str(gauge)
for gauge in test_geo['gauge_id']]
# +
# Necessary columns to work with time-varying variables
monthes = ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12']
time_varying_vars = [item for sublist in [['tmp_dc_s{}'.format(i)
for i in monthes],
['pre_mm_s{}'.format(i)
for i in monthes],
['pet_mm_s{}'.format(i)
for i in monthes],
['aet_mm_s{}'.format(i)
for i in monthes],
['cmi_ix_s{}'.format(i)
for i in monthes],
['snw_pc_s{}'.format(i)
for i in monthes],
['swc_pc_s{}'.format(i)
for i in monthes]]
for item in sublist]
# -
for meteo_file in tq.tqdm(all_meteo):
# loop through all files to generete new one
gauge = meteo_file.split('\\')[1][:-4]
gauge_geo = test_geo.loc[np.where(test_geo['gauge_id'] == gauge)]
# split variables for static and dynamic (monthly varying)
varying_vars = gauge_geo[time_varying_vars]
static_vars = gauge_geo[gauge_geo.columns.difference(time_varying_vars)]
# read meteo file. Later it will be modified for final output
gauge_meteo = pd.read_csv(meteo_file)
# expand values to fullfill final file
expanded_values = [[value]*len(gauge_meteo)
for value in static_vars.values[0]]
# assign to final frame static variables
for i, column in enumerate(static_vars.columns):
gauge_meteo.loc[:, column] = expanded_values[i]
# create columns for varying variables
unique_varies = np.unique([col[:-2] for col in varying_vars.columns])
# assert to final frame dynamic variables
for i, date in enumerate(gauge_meteo['date']):
month = date.split('-')[1]
for variable in unique_varies:
gauge_meteo.loc[i, variable] = varying_vars[variable + month].to_list()
gauge_meteo.to_csv(save_path + f'{gauge}_met_geo.csv',
index=False)
| meteo_w_geo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python3
# ---
# # Cleaning and preparing MoMA datasets for use in instruction
import pandas as pd
# Import MOMA Artworks collection from GitHub
moma_data = pd.read_csv("https://github.com/MuseumofModernArt/collection/blob/master/Artworks.csv?raw=true")
moma_data['Classification'].unique()
# ## Cleaning and simplifying the datasets for Open Labs 1 and 2
# Filter by artworks classified as paintings, sculptures, photographs, audio,
# or video
moma_classification_filter = moma_data[
moma_data['Classification'].isin(
['Painting', 'Sculpture', 'Photograph', 'Video', 'Audio']
)
]
moma_classification_filter
moma_classification_filter.columns
# Drop the columns from the artworks dataset that will be merged into the
# dataset from the MoMA artists dataset in Open Lab 2
drop_columns = ['ArtistBio', 'Nationality', 'BeginDate', 'EndDate', 'Gender']
moma_classification_filter_drop = moma_classification_filter.drop(
columns=drop_columns
)
moma_classification_filter_drop
# +
# Remove artworks that have multiple artists to simplify data cleaning steps in
# activities
moma_single_artists = moma_classification_filter_drop[
~moma_classification_filter_drop['ConstituentID'].astype(str).str.contains(',')
].copy()
# Output should only include False
moma_single_artists['ConstituentID'].astype(str).str.contains(',').unique()
# +
# Convert data type of ConstituentID column to numeric
moma_single_artists.loc[:, 'ConstituentID'] = pd.to_numeric(
moma_single_artists['ConstituentID']
)
# dtype should be float64
moma_single_artists['ConstituentID'].dtype
# -
# ## Create dataset files for Open Labs 1 and 2
# Create data files for each classification type
moma_single_artists.query('Classification == "Painting"').to_csv('moma_paintings.csv', index_label='Index')
moma_single_artists.query('Classification == "Sculpture"').to_json('moma_sculptures.json')
moma_single_artists.query('Classification == "Video"').to_csv('moma_videos.csv', index_label='Index')
moma_single_artists.query('Classification == "Audio"').to_csv('moma_audios.csv', index_label='Index')
# Only recreate Excel file if data is modified... it takes a while to write
# this file
moma_single_artists.query('Classification == "Photograph"').to_excel('moma_photographs.xlsx', index_label='Index')
# Test new datasets, each output should be single classification type
print(
pd.read_json('moma_sculptures.json')['Classification'].unique(),
pd.read_csv('moma_paintings.csv')['Classification'].unique(),
pd.read_excel('moma_photographs.xlsx')['Classification'].unique(),
pd.read_csv('moma_videos.csv')['Classification'].unique(),
pd.read_csv('moma_audios.csv')['Classification'].unique()
)
# Fetch the artists data from the MoMA GitHub page for storage in our repo
# !curl https://media.githubusercontent.com/media/MuseumofModernArt/collection/master/Artists.csv -o moma_artists.csv
# ## Cleaning and simplifying the datasets for Open Lab 3
# Create a dataset of the full filtered artworks and artist info and drop
# extraneous columns
artist = pd.read_csv('moma_artists.csv')
complete_filtered_artworks = pd.merge(moma_single_artists, artist, how='left', on='ConstituentID').drop(
columns=[
'DisplayName', 'Wiki QID', 'ULAN', 'URL', 'ThumbnailURL', 'Seat Height (cm)'
]
)
complete_filtered_artworks.columns
# Save the full filtered dataset with artist info
complete_filtered_artworks.to_csv('moma_artworks_subset.csv', index_label=False)
# +
# Data structure for start of lab 3
paintings_filtered = complete_filtered_artworks.query('Classification == "Painting"').reset_index(drop=True)
# Data structure for start of lab 3
photos_filtered = complete_filtered_artworks.query('Classification == "Photograph"').reset_index(drop=True)
# -
# ## Create datasets for Open Lab 3
# +
# Save a filtered dataset of just paintings with artist info for use in Open
# Lab 3 after completing Open Lab 2
paintings_filtered.to_csv('moma_paintings_full.csv', index_label=False)
# Save a filtered dataset of just paintings with artist info for use in Open
# Lab 3 after completing Open Lab 2
photos_filtered.query('Classification == "Photograph"').to_csv('moma_photographs_full.csv', index_label=False)
# -
# ## Cleaning and simplifying the datasets for Open Labs 4 and 5
#
# Apply most, but not all, modification steps in Open Lab 3 and some additional modifications
# +
# Create a copy of the state of the dataset used at the start of Lab 3
complete_artworks_final = complete_filtered_artworks.copy()
# Create column containing area of artwork
complete_artworks_final['Area (cm^2)'] = complete_artworks_final['Width (cm)'] * complete_filtered_artworks['Height (cm)']
# Create column containing aspect ratio of artwork
complete_artworks_final['Aspect'] = complete_artworks_final['Width (cm)'] / complete_artworks_final['Height (cm)']
# Replace infinite values in "Aspect" column created by division by zero with NaN
import numpy as np
complete_artworks_final['Aspect'].replace(np.inf, np.nan, inplace=True)
# Return whether a painting is oil-based (Yes) or not (No) based on the
# occurrence of the word "oil" in the artwork medium description
def is_oil_based_painting(medium):
# Test if value is a string (can't apply string methods on NaNs)
if type(medium) == str:
# Create a list of lowercase words, commas removed, from description
description = medium.lower().replace(',', '').split(' ')
# Test if "oil" is in list
if 'oil' in description:
return 'Y'
return 'N'
# Use the Series method apply to call the "is_oil_based_painting" function on
# the column "Medium"
complete_artworks_final['OilPainting'] = complete_artworks_final['Medium'].apply(
is_oil_based_painting
)
# Reassign the column "Gender" with the results of calling the string method
# lower() on the values in the "Gender" column
complete_artworks_final['Gender'] = complete_artworks_final['Gender'].str.lower()
# Create column "YearCreated" containing the year in which an artwork was
# created based extraction of year from "Date" column string
complete_artworks_final['YearCreated'] = complete_artworks_final['Date'].str.extract('(\d{4})')
# Return a string indicating the aspect of an artwork, either "Portrait",
# "Square", or "Landscape"
def set_aspect_string(val):
if val < 1:
return 'Portrait'
if val == 1:
return 'Square'
if val > 1:
return 'Landscape'
# Create column "AspectString" containing a string value representing the
# aspect of a painting
complete_artworks_final['AspectString'] = complete_artworks_final['Aspect'].apply(set_aspect_string)
complete_artworks_final[complete_artworks_final['Aspect'] == np.inf]
# -
# ## Save dataset files for Open Labs 4 and 5
#
# +
# Save a cleaned and modified dataset of paintings ready for use in Labs 4 & 5
complete_artworks_final.query('Classification == "Painting"').reset_index(drop=True).to_csv('moma_paintings_final.csv', index_label=False)
# Save a cleaned and modified dataset of photos ready for use in Labs 4 & 5
complete_artworks_final.query('Classification == "Photograph"').reset_index(drop=True).to_csv('moma_photographs_final.csv', index_label=False)
# -
| moma_data/data-prep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # defaultdict of fixed-length buffers
from collections import deque
# Buffer with maximum length of 3
buffer = deque([], 3)
buffer
buffer.append(1)
buffer
buffer.append(2)
buffer.append(3)
buffer
buffer.append(4)
# First item is kicked out
buffer
# Now let's say we want a number of these buffers that we want to reference via a dictionary
# ### `defaultdict`
from collections import defaultdict
# `defaultdict` takes a callable that'll be used as the default value if the requested key doesn't exist in the `dict`
x = defaultdict(list)
x['a']
x['b'].append(2)
x
# The default value is generated upon the first access
# ### `partial`
# `partial` allows you to partially apply a function. In our case it's nice since we can create a callable for `defaultdict`.
from functools import partial
x = partial(deque, maxlen=3)
x
x()
# ### Putting the two together
x = defaultdict(partial(deque, maxlen=3))
x
x['a']
x
x['a'].append(1)
x['a'].append(2)
x
x['a'].append(3)
x['a'].append(4)
x
x['b']
x['b'].append(1)
x['b'].append(2)
x['b'].append(3)
x
x['b'].append(4)
x
| python/defaultdict-of-fixed-length-buffers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cloud Tools - Firebase
import os
import sys
from pymagic.cloud_tools import Firebase
import firebase_admin
from firebase_admin import firestore
# # App Initialization
# To interact with Firebase's SDK through Python, we have to create an 'app' instance of our Firebase project.
#
# For this you specify the URLs of your project as well as your authentication token.
# +
firebase_app_client = \
Firebase.initapp(
storage_url=os.environ['example_firebase_storage_url'],
db_url=os.environ['example_firebase_db_url'],
token_path=os.environ['example_firebase_token_path']
)
firestore_client = \
firebase_admin.firestore.client(
app=firebase_app_client
)
database = firestore.client()
# -
# # FireStorage
# FireStorage is Firebase's file storage service, similar to AWS's S3.
# ## Send File to Bucket
# +
if sys.platform == "linux":
sav_dir = "/home/collier/test_folder/"
else:
sav_dir = "/Users/collier/Downloads/"
Firebase.file_to_bucket(
blob_name="test.png",
file_path=sav_dir + "test.png",
content_type='image/png',
metadata_d={"test_key": 'test_val'}
)
# -
# ## List Bucket Objects
# This function returns a list of bucket objects. From these objects you can retrieve object names, time_created, metadata and more! Let's print the names of our bucket objects.
bucket_blobs = Firebase.list_bucket_objects()
[x.name for x in bucket_blobs[-1:]]
# ## Delete Bucket Object
Firebase.delete_bucket_object(
blob_name="test.png"
)
# # FireStore
# FireStore is Firebase's flagship collection/document-based NoSQL database. Similar to AWS's DynamoDB.
# ## FireStore Objects
# From [this Firbease blog](https://firebase.googleblog.com/2019/06/understanding-collection-group-queries.html): "In Cloud Firestore, your data is divided up into documents and collections. Documents often point to subcollections that contain other documents, like in this example, where each restaurant document contains a subcollection with all the reviews of that restaurant.
# + active=""
# <img src="https://2.bp.blogspot.com/-nv3LtEi-3K4/XQql2xyOtQI/AAAAAAAADoQ/v7lMvVE4ElAr7ezloD7ZQrvJGCBG-mQoACLcBGAs/s1600/2.png" width=60 height=60 />
# -
# So in order to interact with Firestore, we need to specify the sequence of objects needed to reach the final object we are creating/updating/deleting.
# ## Creating/Updating Documents
# Let's create some objects in our database.
# ### Inside a Collection
# When we add a document to our database, we need to specify that the parent object type is a 'collection' as well as that collection's name.
#
# If the collection does not exist, Firebase will create it for us.
#define the collection/document path
obj_types = ["collection","document"]
obj_names = ["test_collection","test_document"]
# #### Creating
Firebase.load_delete_firestore(
method="insert",
obj_types=obj_types,
obj_names=obj_names,
#new document contents
d={"field":"val"}
)
# #### Updating
# If we want to update a document, we have several options.
#
# First, let's perform a straight update which completelt overwrites the document's data.
#
# Here we need to specify the tree of objects that lead to the document to update:
#define the collection/document path
obj_types = ["collection","document"]
obj_names = ["test_collection","test_document"]
Firebase.load_delete_firestore(
method="update",
obj_types=obj_types,
obj_names=obj_names,
#updated document contents
d={"field_updated":"val_updated"}
)
# #### Updating with a Constraint
# What if we don't want to perform am update, but only want to update a document if it meets some constraint, such as a field having a certain value. This will search for documents that have the key constraint.
#
# To do that, we just need to include a 'constraint_key' and its value in our function call.
#
# Now we will only update documents that meet this condition.
#
# Notice that we left off the 'doc_key' key and value in our dictionary since this operation will keep the existing 'doc_key' in the database and simply update the 'doc_data' values.
#
# Also, notice that in our collection/document path, we fall back to our root collection containing the documents we are seeing to update.
#define the collection/document path
obj_types = ["collection"]
obj_names = ["test_collection"]
Firebase.load_delete_firestore(
method="update",
obj_types=obj_types,
obj_names=obj_names,
constraint_key="field_updated",
constraint_val="val_updated",
#updated document contents
d={"field_updated":"val_updated_new"}
)
# #### Upserting with a Constraint
# What if we attempt to update a document using a key constraint, but no document exists? In that case we would want to insert it into the collection. This type of operation is called an **Upsert**.
#
# To accomplish this, simply add a value to the 'upsert_doc_name' in the function call, this will create a new document with that name if the constraint key is not found.
#define the collection/document path
obj_types = ["collection"]
obj_names = ["test_collection"]
Firebase.load_delete_firestore(
method="update",
obj_types=obj_types,
obj_names=obj_names,
constraint_key="field_updated",
constraint_val="val_update_old", #doesnt exist
upsert_doc_name="test_doc_new", #new doc name
#updated document contents
d={"field":"val_upsert"}
)
# ### Inside a Subcollection
# A sub-collection is a collection that lives inside a document. We can create a sub collection by specifying its name and the names of the parent collection and document.
#
# Notice we specify the object names based on the order of their heirarchy in the database. This is not required but it helps to undersrand where data is being inserted/updated.
Firebase.load_delete_firestore(
method="insert",
parent_object_type="sub_collection",
parent_collection_name="test_collection",
parent_document_name="test_doc",
parent_sub_collection_name="test_sub_collection",
d={
"doc_key":"test_sub_collection_doc",
"doc_data":{"field":"val"}
}
)
# Let's do the same operations we did on collection documents, this time on sub-collection documents.
#straight update
Firebase.load_delete_firestore(
method="update",
parent_object_type="sub_collection",
parent_collection_name="test_collection",
parent_document_name="test_doc",
parent_sub_collection_name="test_sub_collection",
d={
"doc_key":"test_sub_collection_doc",
"doc_data":{"field":"val_updated"}
}
)
#update with constraint
Firebase.load_delete_firestore(
method="update",
parent_object_type="sub_collection",
parent_collection_name="test_collection",
parent_document_name="test_doc",
parent_sub_collection_name="test_sub_collection",
constraint_key="field",
constraint_val="val_updated",
#updated document contents
d={
"doc_data":{"field_updated":"val_updated_new"}
}
)
#upsert with constraint
Firebase.load_delete_firestore(
method="update",
parent_object_type="sub_collection",
parent_collection_name="test_collection",
parent_document_name="test_doc",
parent_sub_collection_name="test_sub_collection",
constraint_key="field_updated",
constraint_val="val_updated_old",
upsert=True,
#updated document contents
d={
"doc_key":"upsert_test",
"doc_data":{"field":"val_upsert"}
}
)
# ## Deleting Objects
# When deleting objects in Firebase, we can do much of the same types of operations we did when we were updating objects.
#
# Let's create a few more objects to delete.
# +
#single field to delete in a document
Firebase.load_delete_firestore(
method="insert",
parent_object_type="collection",
parent_collection_name="test_collection",
#new document contents
d={
"doc_key":"delete_field",
"doc_data":{"field_to_delete":"val"}
}
)
#single document to delete by name
Firebase.load_delete_firestore(
method="insert",
parent_object_type="collection",
parent_collection_name="test_collection",
#new document contents
d={
"doc_key":"delete_by_name",
"doc_data":{"field":"val"}
}
)
#single document to delete by key constraint
Firebase.load_delete_firestore(
method="insert",
parent_object_type="collection",
parent_collection_name="test_collection",
#new document contents
d={
"doc_key":"delete_by_key_constraint",
"doc_data":{"field":"delete_me"}
}
)
#single document to delete key constraint
# within a subcollection
Firebase.load_delete_firestore(
method="insert",
parent_object_type="sub_collection",
parent_collection_name="test_collection",
parent_document_name="test_doc",
parent_sub_collection_name="test_sub_collection",
d={
"doc_key":"delete_by_key_constraint",
"doc_data":{"field":"delete_me"}
}
)
# -
# ### Deleting a Field
# Let's delete a single field in a document
Firebase.load_delete_firestore(
method="delete",
parent_object_type="field",
parent_collection_name="test_collection",
parent_document_name="delete_field",
constraint_key="field_to_delete"
)
# ### Deleting Document by Name
# Let's delete a document by name.
Firebase.load_delete_firestore(
method="delete",
parent_object_type="document",
parent_collection_name="test_collection",
parent_document_name="delete_by_name",
)
# ### Deleting Document by Key Constraint
#collection
Firebase.load_delete_firestore(
method="delete",
parent_object_type="collection",
parent_collection_name="test_collection",
constraint_key="field",
constraint_val="delete_me"
)
#subcollection
Firebase.load_delete_firestore(
method="delete",
parent_object_type="sub_collection",
parent_collection_name="test_collection",
parent_document_name="test_doc",
parent_sub_collection_name="test_sub_collection",
constraint_key="field",
constraint_val="delete_me"
)
# ### Deleting All Documents
# To delete all documents in a collection or subcollection, we just need to leave off the key constraint argument.
#
# You can also specify a 'document_delete_batch_size' to avoid memory errors if the size of the collection is to big. The function will call itself again if the deleted document count exceeds this limit.
#subcollection
Firebase.load_delete_firestore(
method="delete",
parent_object_type="sub_collection",
parent_collection_name="test_collection",
parent_document_name="test_doc",
parent_sub_collection_name="test_sub_collection",
document_delete_batch_size=1
# constraint_key="field",
# constraint_val="delete_me"
)
#collection
Firebase.load_delete_firestore(
method="delete",
parent_object_type="collection",
parent_collection_name="test_collection",
document_delete_batch_size=1
# constraint_key="field",
# constraint_val="delete_me"
)
| examples/cloud_tools - Firebase.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# hide
# %load_ext autoreload
# %autoreload 2
# # Tutorial
# Import modules:
import ratioimage.ratio_images as ri
import ratioimage.plot_results as rp
# Load in images and segmentations (label images):
C0_imgs = ri.create_img_dict_from_folder(load_pattern="data/PercevalHR_data/imgs/*C0.tiff")
C1_imgs = ri.create_img_dict_from_folder(load_pattern="data/PercevalHR_data/imgs/*C1.tiff")
lab_imgs = ri.create_img_dict_from_folder(
load_pattern="data/PercevalHR_data/segs/*.tif", label=True
)
C0_imgs.keys()
# Display example pair of ratio images + segmentation using plotting function from `clonedetective`.
# +
from clonedetective.utils import plot_new_images
img_name = "a1g01"
plot_new_images(
[C0_imgs[img_name], C1_imgs[img_name], lab_imgs[img_name]],
["C0 channel", "C1 channel", "label image (segmentation)"],
interpolation="none",
)
# -
# Measure regionprops of images and calculate ratio of intensities. Output is a tidy dataframe
# +
df = ri.ratiometric_measure_region_props_to_tidy_df(C1_imgs, C0_imgs, lab_imgs)
df = ri.split_img_key_col_to_sample_id_and_replicates(
df, sample_id_colname="genotype", split_regex=r"g"
)
df = ri.sample_id_to_categories(
df, sample_id_colname="genotype", old_to_new_sample_ids={"a1": "ctrl", "a2": "mut"}
)
df.head()
# -
# Plot results - aggregated per cell:
# +
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(1.75, 2.5))
rp.tidy_create_strip_box_plot(
x="genotype",
y="ratio_mean_int",
data=df,
ax=ax,
y_axis_limit=4,
y_label="PercevalHR 488/405\n(Relative ATP/ADP)",
palette=["#95a5a6", "#95a5a6"],
)
# -
# Plot results - aggregated per image/replicate:
grouped_df = (
df.groupby(["genotype", "rep_id"]).agg({"ratio_mean_int": "mean"}).reset_index()
)
# +
fig, ax = plt.subplots(figsize=(1.75, 2.5))
rp.tidy_create_swarm_box_plot(
x="genotype",
y="ratio_mean_int",
ax=ax,
data=grouped_df,
y_axis_limit=2.5,
y_label="PercevalHR 488/405\n(Relative ATP/ADP)",
palette=["#95a5a6", "#95a5a6"],
)
# -
# Create ratiometric images:
ratio_imgs = ri.create_dict_of_ratio_images(C1_imgs, C0_imgs, lab_imgs)
ratio_imgs.keys()
plot_new_images(
[ratio_imgs['a1g01'], ratio_imgs['a2g01']],
["a1g01 ratio image", "a2g01 ratio image"],
img_cmap="magma",
vmax=2.5,
figure_shape=(1, 2),
figure_size=(10, 5),
colorbar=True,
colorbar_title="PercevalHR 488/405"
)
| 02_Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Multiple Subplots
# Sometimes it is helpful to compare different views of data side by side.
# To this end, Matplotlib has the concept of *subplots*: groups of smaller axes that can exist together within a single figure.
# These subplots might be insets, grids of plots, or other more complicated layouts.
# In this section we'll explore four routines for creating subplots in Matplotlib.
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
import numpy as np
# ## ``plt.axes``: Subplots by Hand
#
# The most basic method of creating an axes is to use the ``plt.axes`` function.
# As we've seen previously, by default this creates a standard axes object that fills the entire figure.
# ``plt.axes`` also takes an optional argument that is a list of four numbers in the figure coordinate system.
# These numbers represent ``[left, bottom, width, height]`` in the figure coordinate system, which ranges from 0 at the bottom left of the figure to 1 at the top right of the figure.
#
# For example, we might create an inset axes at the top-right corner of another axes by setting the *x* and *y* position to 0.65 (that is, starting at 65% of the width and 65% of the height of the figure) and the *x* and *y* extents to 0.2 (that is, the size of the axes is 20% of the width and 20% of the height of the figure):
ax1 = plt.axes() # standard axes
ax2 = plt.axes([0.65, 0.65, 0.2, 0.2])
# The equivalent of this command within the object-oriented interface is ``fig.add_axes()``. Let's use this to create two vertically stacked axes:
# +
fig = plt.figure()
ax1 = fig.add_axes([0.1, 0.5, 0.8, 0.4],
xticklabels=[], ylim=(-1.2, 1.2))
ax2 = fig.add_axes([0.1, 0.1, 0.8, 0.4],
ylim=(-1.2, 1.2))
x = np.linspace(0, 10)
ax1.plot(np.sin(x))
ax2.plot(np.cos(x));
# -
# We now have two axes (the top with no tick labels) that are just touching: the bottom of the upper panel (at position 0.5) matches the top of the lower panel (at position 0.1 + 0.4).
# ## ``plt.subplot``: Simple Grids of Subplots
#
# Aligned columns or rows of subplots are a common-enough need that Matplotlib has several convenience routines that make them easy to create.
# The lowest level of these is ``plt.subplot()``, which creates a single subplot within a grid.
# As you can see, this command takes three integer arguments—the number of rows, the number of columns, and the index of the plot to be created in this scheme, which runs from the upper left to the bottom right:
for i in range(1, 7):
plt.subplot(2, 3, i)
plt.text(0.5, 0.5, str((2, 3, i)),
fontsize=18, ha='center')
# The command ``plt.subplots_adjust`` can be used to adjust the spacing between these plots.
# The following code uses the equivalent object-oriented command, ``fig.add_subplot()``:
fig = plt.figure()
fig.subplots_adjust(hspace=0.4, wspace=0.4)
for i in range(1, 7):
ax = fig.add_subplot(2, 3, i)
ax.text(0.5, 0.5, str((2, 3, i)),
fontsize=18, ha='center')
# We've used the ``hspace`` and ``wspace`` arguments of ``plt.subplots_adjust``, which specify the spacing along the height and width of the figure, in units of the subplot size (in this case, the space is 40% of the subplot width and height).
# ## ``plt.subplots``: The Whole Grid in One Go
#
# The approach just described can become quite tedious when creating a large grid of subplots, especially if you'd like to hide the x- and y-axis labels on the inner plots.
# For this purpose, ``plt.subplots()`` is the easier tool to use (note the ``s`` at the end of ``subplots``). Rather than creating a single subplot, this function creates a full grid of subplots in a single line, returning them in a NumPy array.
# The arguments are the number of rows and number of columns, along with optional keywords ``sharex`` and ``sharey``, which allow you to specify the relationships between different axes.
#
# Here we'll create a $2 \times 3$ grid of subplots, where all axes in the same row share their y-axis scale, and all axes in the same column share their x-axis scale:
fig, ax = plt.subplots(2, 3, sharex='col', sharey='row')
# Note that by specifying ``sharex`` and ``sharey``, we've automatically removed inner labels on the grid to make the plot cleaner.
# The resulting grid of axes instances is returned within a NumPy array, allowing for convenient specification of the desired axes using standard array indexing notation:
# axes are in a two-dimensional array, indexed by [row, col]
for i in range(2):
for j in range(3):
ax[i, j].text(0.5, 0.5, str((i, j)),
fontsize=18, ha='center')
fig
# In comparison to ``plt.subplot()``, ``plt.subplots()`` is more consistent with Python's conventional 0-based indexing.
# ## ``plt.GridSpec``: More Complicated Arrangements
#
# To go beyond a regular grid to subplots that span multiple rows and columns, ``plt.GridSpec()`` is the best tool.
# The ``plt.GridSpec()`` object does not create a plot by itself; it is simply a convenient interface that is recognized by the ``plt.subplot()`` command.
# For example, a gridspec for a grid of two rows and three columns with some specified width and height space looks like this:
grid = plt.GridSpec(2, 3, wspace=0.4, hspace=0.3)
# From this we can specify subplot locations and extents using the familiary Python slicing syntax:
plt.subplot(grid[0, 0])
plt.subplot(grid[0, 1:])
plt.subplot(grid[1, :2])
plt.subplot(grid[1, 2]);
# This type of flexible grid alignment has a wide range of uses.
# I most often use it when creating multi-axes histogram plots like the ones shown here:
# +
# Create some normally distributed data
mean = [0, 0]
cov = [[1, 1], [1, 2]]
x, y = np.random.multivariate_normal(mean, cov, 3000).T
# Set up the axes with gridspec
fig = plt.figure(figsize=(6, 6))
grid = plt.GridSpec(4, 4, hspace=0.2, wspace=0.2)
main_ax = fig.add_subplot(grid[:-1, 1:])
y_hist = fig.add_subplot(grid[:-1, 0], xticklabels=[], sharey=main_ax)
x_hist = fig.add_subplot(grid[-1, 1:], yticklabels=[], sharex=main_ax)
# scatter points on the main axes
main_ax.plot(x, y, 'ok', markersize=3, alpha=0.2)
# histogram on the attached axes
x_hist.hist(x, 40, histtype='stepfilled',
orientation='vertical', color='gray')
x_hist.invert_yaxis()
y_hist.hist(y, 40, histtype='stepfilled',
orientation='horizontal', color='gray')
y_hist.invert_xaxis()
# -
# <!--NAVIGATION-->
# < [Customizing Colorbars](04.07-Customizing-Colorbars.ipynb) | [Contents](Index.ipynb) | [Text and Annotation](04.09-Text-and-Annotation.ipynb) >
#
# <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/04.08-Multiple-Subplots.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
#
| week4/day4/theory/matplotplib/2_matplotlib/8_Multiple-Subplots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="CupWNjTD_qCv" executionInfo={"status": "ok", "timestamp": 1620595152107, "user_tz": 300, "elapsed": 14824, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhTXJj0g9GwUhtudBJeOwFU0FzIiOeVg0oWkjNopA=s64", "userId": "16512623676434440228"}} outputId="7aac07b4-8133-4629-9478-faf15c0a2343"
from google.colab import drive
drive.mount("/content/gdrive", force_remount=True)
import os
os.chdir("/content/gdrive/My Drive/CS498DL/CS498DL-project/cases")
# + id="ZdRvjuRkXiPf"
import numpy as np
import torch
import matplotlib.pyplot as plt
# + id="wW9mOGbCT4cX"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# + [markdown] id="OmA_x16Lx6wC"
# # Define neural network
# + id="f2YDGGkcXpy8"
# Model
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, nof=50, dim=2, num_layers=4):
super().__init__()
self.dim = dim
self.input_layer = nn.Linear(dim, nof)
def f():
return nn.Sequential(nn.Linear(nof, nof), nn.Tanh())
self.hidden_layer = nn.Sequential(
*[f() for _ in range(num_layers)],
)
self.output_layer = nn.Linear(nof, 1)
def forward(self, x, y):
input = torch.cat((x, y), dim=-1)
input = torch.tanh(self.input_layer(input))
input = self.hidden_layer(input)
input = self.output_layer(input)
return input
# + [markdown] id="2oMf9FMVx_zw"
# # Initialize data and forcing function
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="8XFlzbTNZXY6" executionInfo={"status": "ok", "timestamp": 1620594889149, "user_tz": 300, "elapsed": 456, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhTXJj0g9GwUhtudBJeOwFU0FzIiOeVg0oWkjNopA=s64", "userId": "16512623676434440228"}} outputId="337e292b-0f93-4b16-9660-e6d6cf405e6b"
# Initialize data
gridsize_x = 100
gridsize_y = 100
x = torch.linspace(0, 1, gridsize_x, device=device)
y = torch.linspace(0, 1, gridsize_y, device=device)
X, Y = torch.meshgrid(x, y)
X = X.to(device)
Y = Y.to(device)
num_modes = 3
# coef = np.random.rand(num_modes) * 10
coef = np.array([10.0, 1.0, 5.0])
f = torch.zeros_like(X)
for i in range(num_modes):
f += coef[i] * torch.sin((i+1) * np.pi * X) * torch.sin((i+4) * np.pi * Y)
# load f data into torch
X = X.reshape((gridsize_x, gridsize_y, 1))
Y = Y.reshape((gridsize_x, gridsize_y, 1))
f = f.reshape((gridsize_x, gridsize_y, 1)).to(device)
Xc = X.clone().to(device)
Xc.requires_grad = True
Yc = Y.clone().to(device)
Yc.requires_grad = True
# load BC data into torch
bound_left = 0.0
bound_right = 0.0
u0 = -torch.sin(2.0 * np.pi * y)
pe = 100
def plot(*args):
return map(lambda x : x.cpu().detach().numpy(), args)
plt.plot(*plot(y, u0))
# + [markdown] id="9Rk_ngdyyMWk"
# # Loss functions for different pdes
# + id="ZgZtiix7bwMP"
# loss functions
def pde_loss_poisson(pred, x, y, pe, lambda_pde):
du_dx = torch.autograd.grad(pred.sum(), x, create_graph=True)[0]
du2_dx2 = torch.autograd.grad(du_dx.sum(), x, create_graph=True)[0]
du_dy = torch.autograd.grad(pred.sum(), y, create_graph=True)[0]
du2_dy2 = torch.autograd.grad(du_dy.sum(), y, create_graph=True)[0]
loss = torch.sum(torch.square(du_dx + pred * du_dy - du2_dy2 / pe))
return lambda_pde * loss / (gridsize_x * gridsize_y)
def boundary_loss_dirichlet(pred, bound_left, bound_right, u0, lambda_bound=1.0):
loss = torch.sum((pred[:, 0, 0] - bound_left) ** 2 / gridsize_x + (pred[:, -1, 0] - bound_right) ** 2 / gridsize_x)
loss += torch.sum((pred[0, :, 0] - u0) ** 2 / gridsize_y)
return lambda_bound * loss
# + [markdown] id="MjX-iZxUzE2u"
# # Define analytical solutions for comparison
# + id="YClA376Xbwmk"
# define network
net = Net(nof=50, num_layers=4).to(device)
# define optimizer
learning_rate = 0.0025
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
# + colab={"base_uri": "https://localhost:8080/", "height": 472} id="l08QAJcSj_r7" executionInfo={"status": "ok", "timestamp": 1620594991315, "user_tz": 300, "elapsed": 100686, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhTXJj0g9GwUhtudBJeOwFU0FzIiOeVg0oWkjNopA=s64", "userId": "16512623676434440228"}} outputId="c896deaa-5cfd-41f6-8546-3647abfaa54b"
# train
num_epochs = 10000
lambda_pde = 1.0
lambda_bound = 5.0
log = {"loss_pde" : [],
"loss_bound" : [],
"loss_total" : []}
for epoch in range(num_epochs):
if epoch % 1000 == 0:
learning_rate *= 0.99
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
net.train()
upred = net.forward(Xc, Yc)
loss_bound = boundary_loss_dirichlet(upred, bound_left, bound_right, u0, lambda_bound=lambda_bound)
loss_pde = pde_loss_poisson(upred, Xc, Yc, pe, lambda_pde)
loss = loss_pde + loss_bound
log["loss_pde"].append(loss_pde.item())
log["loss_bound"].append(loss_bound.item())
log["loss_total"].append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 1000 == 0:
print(f"Epoch {epoch} --- Loss : {loss}, pde_loss : {loss_pde}, bound_loss : {loss_bound}")
print(loss)
plt.semilogy(log["loss_pde"], 'r', label = 'loss_pde')
plt.semilogy(log["loss_bound"], 'g', label = 'loss_bound')
plt.semilogy(log["loss_total"], 'b', label = 'loss_total')
plt.legend()
# + id="N_VT6Ewx45vF"
# Saving log file because colab doesnt have plotly shit
import pickle
with open('loss_burgers_rare_fan.pickle', 'wb') as handle:
pickle.dump(log, handle)
# + [markdown] id="phl-smh1zkjS"
# # Preview results
# + colab={"base_uri": "https://localhost:8080/", "height": 515} id="aH-2V-GxlBZN" executionInfo={"status": "ok", "timestamp": 1620594991796, "user_tz": 300, "elapsed": 100206, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhTXJj0g9GwUhtudBJeOwFU0FzIiOeVg0oWkjNopA=s64", "userId": "16512623676434440228"}} outputId="b0658cfd-98a2-440d-e540-134abc51006f"
pinn_sol = net.forward(X, Y)
plt.contourf(*plot(X[:, :, 0], Y[:, :, 0], pinn_sol[:, :, 0]), levels=100)
plt.colorbar()
freq = 5
plt.figure()
for i in range(10):
plt.plot(*plot(y, pinn_sol[i * 5, :, 0]))
# + [markdown] id="sC1Mc1kQSAIs"
# # Compute FDM solution for comparison
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="r6XeqPBdSB0d" executionInfo={"status": "ok", "timestamp": 1620595001322, "user_tz": 300, "elapsed": 108341, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhTXJj0g9GwUhtudBJeOwFU0FzIiOeVg0oWkjNopA=s64", "userId": "16512623676434440228"}} outputId="ebbf3b50-d79d-4dc6-9d19-2452f4f1a5ca"
def fdm_sol_stationary_viscous_burgers(u0, pe, final_time):
"""
FDM solution for viscous burgers on domain [0, 1]
assumes u = 0.0 at x = 0.0, 1.0
"""
from scipy.sparse.linalg import spsolve
import scipy.sparse as spp
import numpy.linalg as la
fdm_grid_size = u0.shape[0] - 2 # takes 2 end pts in account
dx = 1.0 / (fdm_grid_size + 1) # takes 2 end pts in account
gradient = (spp.diags([-0.5, 0.5], [-1, 1], shape=(fdm_grid_size, fdm_grid_size),
format="csr") / dx)
laplacian = (spp.diags([1.0, -2.0, 1.0], [-1, 0, 1],
shape=(fdm_grid_size, fdm_grid_size), format="csr") / (dx ** 2))
fdm_sol = u0[1:-1].copy()
time = 0.0
while time < final_time:
dt = 0.01 * dx
if time + dt > final_time:
dt = final_time - time
fdm_sol[...] += dt * (-fdm_sol * (gradient @ fdm_sol) + laplacian * fdm_sol / pe)
time += dt
final_fdm_sol = np.zeros_like(u0)
final_fdm_sol[1:-1] = fdm_sol
return final_fdm_sol
import numpy as np
import matplotlib.pyplot as plt
pe = 100.0
fdm_grid_size = gridsize_y
x_fdm = np.linspace(0.0, 1.0, fdm_grid_size)
u0 = -np.sin(2 * np.pi * x_fdm)
final_time = np.linspace(0.0, 0.5, 10)
# plt.plot(x_fdm, u0, '--')
fdm_sol = 0 * pinn_sol
final_time = np.linspace(0.0, 1.0, gridsize_x)
for i, t in enumerate(final_time):
fdm_sol[i, ...] = torch.from_numpy(fdm_sol_stationary_viscous_burgers(u0, pe, t)).unsqueeze(-1)
# plt.plot(x_fdm, fdm_sol)
plt.contourf(*plot(X[:, :, 0], Y[:, :, 0], fdm_sol[:, :, 0]), levels=100)
# + [markdown] id="wR6HAdZvznbG"
# # Generate solution and error contours
# + colab={"base_uri": "https://localhost:8080/", "height": 585} id="ttGmXW3Z-XfC" executionInfo={"status": "ok", "timestamp": 1620595209791, "user_tz": 300, "elapsed": 1040, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhTXJj0g9GwUhtudBJeOwFU0FzIiOeVg0oWkjNopA=s64", "userId": "16512623676434440228"}} outputId="0de27a3f-9660-44f2-8c11-5c2a23773001"
ana_sol = fdm_sol
pinn_sol = net.forward(Xc, Yc)
error = torch.abs(pinn_sol - ana_sol) / torch.max(torch.abs(ana_sol)) * 100.0
import numpy.linalg as la
error_l2 = la.norm((pinn_sol - ana_sol).cpu().detach().numpy()) / np.sqrt(gridsize_x * gridsize_y)
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']
import matplotlib.ticker as ticker
# specify your own formatter for colorbar labels in return select desired format
def myfmt(x, pos):
return '{0:.1f}'.format(x)
fig = plt.figure(figsize=(6,8))
ax1 = plt.subplot(211)
cntr1 = ax1.contourf(*plot(X[:, :, 0], Y[:, :, 0], pinn_sol[:, :, 0]), cmap="Spectral", levels=20)
cbar1 = fig.colorbar(cntr1, ax=ax1, label='u(t, y)', format=ticker.FuncFormatter(myfmt))
# ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.set_title('Solution')
# This is the fix for the white lines between contour levels
for c in cntr1.collections:
c.set_edgecolor("face")
ax2 = plt.subplot(212)
cntr2 = ax2.contourf(*plot(X[:,:,0], Y[:,:,0], (error[:,:,0]) + 1e-12), cmap="inferno", levels=100)
cbar2 = fig.colorbar(cntr2, ax=ax2, label='Error (%)', format=ticker.FuncFormatter(myfmt))
ax2.set_xlabel('t')
ax2.set_ylabel('y')
ax2.set_title('Error')
for c in cntr2.collections:
c.set_edgecolor("face")
plt.tight_layout()
# + id="GGSULGo6_gRf"
fig.savefig('burgers_rare_fan.eps', dpi=300)
# + colab={"base_uri": "https://localhost:8080/"} id="cfr2S8egAjCq" executionInfo={"status": "ok", "timestamp": 1620596216819, "user_tz": 300, "elapsed": 419, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhTXJj0g9GwUhtudBJeOwFU0FzIiOeVg0oWkjNopA=s64", "userId": "16512623676434440228"}} outputId="5086fd94-8c3c-4804-e91e-c13b27f8c7d9"
error_l2
# + id="9ZlGh8wcAioj"
| cases/1d_burgers_time_rare_fan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # * Visualizing of genetic similarity with Lightning + GraphX *
# ## Setup lightning
# %libraryDependencies += "org.viz.lightning" %% "lightning-scala" % "0.1.6"
# %update
import org.viz.lightning._
import org.apache.spark.graphx._
val lgn = Lightning(host="https://lightning-spark-summit.herokuapp.com" )
lgn.enableNotebook()
# ## Load structure similarity data
# Public data from http://www.brain-map.org/
val source = "/Users/mathisonian/projects/spark-summit/notebooks/data/allen-connectivity.txt"
val g = GraphLoader.edgeListFile(sc, source)
# ## Show the network (unlabeled)
# +
val links = g.edges.collect().map(e => Array(e.srcId.toInt, e.dstId.toInt))
lgn.force(links)
# -
# ## Show the network colored by degree
# +
val links = g.edges.collect().map(e => Array(e.srcId.toInt, e.dstId.toInt))
val degrees = g.degrees.sortBy(_._1).collect().map(x => Math.log(x._2))
lgn.force(links, value=degrees, colormap="Lightning")
# -
# ## Show the network colored by connected components
# +
val links = g.edges.collect().map(e => Array(e.srcId.toInt, e.dstId.toInt))
val connectedComponents = g.connectedComponents().vertices.sortBy(_._1).map(_._2.toInt).collect()
lgn.force(links, label=connectedComponents)
| scala/graphx.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# # Rigid Body Work-Precision Diagrams
# ### <NAME>
# +
using OrdinaryDiffEq, ParameterizedFunctions, ODE, ODEInterfaceDiffEq, LSODA,
Sundials, DiffEqDevTools
k(t) = 0.25*sin(t)^2
g = @ode_def RigidBody begin
dy1 = I₁*y2*y3
dy2 = I₂*y1*y3
dy3 = I₃*y1*y2 + k(t)
end I₁ I₂ I₃
p = [-2.0,1.25,-0.5]
prob = ODEProblem(g,[1.0;0.0;0.9],(0.0,10.0),p)
abstols = 1.0 ./ 10.0 .^ (6:13)
reltols = 1.0 ./ 10.0 .^ (3:10);
sol = solve(prob,Vern7(),abstol=1/10^14,reltol=1/10^14)
test_sol = TestSolution(sol)
using Plots; gr()
# -
plot(sol)
setups = [Dict(:alg=>DP5())
#Dict(:alg=>ode45()) # fails
Dict(:alg=>dopri5())
Dict(:alg=>Tsit5())
Dict(:alg=>Vern6())
]
wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=true,numruns=100,maxiters=10000)
plot(wp)
# The DifferentialEquations.jl algorithms once again pull ahead. This is the first benchmark we've ran where `ode45` doesn't fail. However, it still doesn't do as well as `Tsit5`. One reason why it does so well is that the maximum norm that ODE.jl uses (as opposed to the L2 norm of Sundials, DifferentialEquations, and ODEInterface) seems to do really well on this problem. `dopri5` does surprisingly bad in this test.
#
# ## Higher Order
setups = [Dict(:alg=>DP8())
#Dict(:alg=>ode78()) # fails
Dict(:alg=>Vern7())
Dict(:alg=>Vern8())
Dict(:alg=>dop853())
Dict(:alg=>Vern6())
]
wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100,maxiters=1000)
plot(wp)
setups = [Dict(:alg=>Vern7())
Dict(:alg=>Vern8())
Dict(:alg=>odex())
Dict(:alg=>CVODE_Adams())
Dict(:alg=>lsoda())
Dict(:alg=>ddeabm())
Dict(:alg=>ARKODE(Sundials.Explicit(),order=6))
]
wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100,maxiters=1000)
plot(wp)
# ## Comparison with Non-RK methods
#
# Now let's test Tsit5 and Vern9 against parallel extrapolation methods and an
# Adams-Bashforth-Moulton:
setups = [Dict(:alg=>Tsit5())
Dict(:alg=>Vern9())
Dict(:alg=>VCABM())
Dict(:alg=>AitkenNeville(min_order=1, max_order=9, init_order=4, threading=true))
Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=4, threading=true))
Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true))]
solnames = ["Tsit5","Vern9","VCABM","AitkenNeville","Midpoint Deuflhard","Midpoint Hairer Wanner"]
wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,names=solnames,
save_everystep=false,verbose=false,numruns=100)
plot(wp)
setups = [Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=9, threading=false))
Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=false))
Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true))
Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :romberg, threading=true))
Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :bulirsch, threading=true))]
solnames = ["Deuflhard","No threads","standard","Romberg","Bulirsch"]
wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,names=solnames,
save_everystep=false,verbose=false,numruns=100)
plot(wp)
setups = [Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=10, threading=true))
Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true))
Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=11, init_order=10, threading=true))
Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=15, init_order=10, threading=true))
Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=7, init_order=6, threading=true))]
solnames = ["1","2","3","4","5"]
wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,names=solnames,
save_everystep=false,verbose=false,numruns=100)
plot(wp)
# ### Conclusion
#
# Once again, the OrdinaryDiffEq.jl pull far ahead in terms of speed and accuracy.
using DiffEqBenchmarks
DiffEqBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file])
| notebook/NonStiffODE/RigidBody_wpd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
a = np.arange(10)
type(a)
from numpy import *
a = arange(10)
a
a.dtype
a.shape
m = array([arange(4), arange(4)])
m
m.shape
a = array([arange(5,9), arange(4,8)])
a
a[1,2]
a = arange(10)
a
a[1:6]
a[::-1]
a[::2]
| chapter3/section3.1_numpy_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework 2: Stanford Sentiment Treebank
__author__ = "<NAME>"
__version__ = "CS224u, Stanford, Spring 2019"
# ## Contents
#
# 1. [Overview](#Overview)
# 1. [Methodological note](#Methodological-note)
# 1. [Set-up](#Set-up)
# 1. [A softmax baseline](#A-softmax-baseline)
# 1. [RNNClassifier wrapper](#RNNClassifier-wrapper)
# 1. [Error analysis](#Error-analysis)
# 1. [Homework questions](#Homework-questions)
# 1. [Sentiment words alone [2 points]](#Sentiment-words-alone-[2-points])
# 1. [A more powerful vector-summing baseline [3 points]](#A-more-powerful-vector-summing-baseline-[3-points])
# 1. [Your original system [4 points]](#Your-original-system-[4-points])
# 1. [Bake-off [1 point]](#Bake-off-[1-point])
# ## Overview
#
# This homework and associated bake-off are devoted to the Stanford Sentiment Treebank (SST). The homework questions ask you to implement some baseline systems, and the bake-off challenge is to define a system that does extremely well at the SST task.
#
# We'll focus on the ternary task as defined by `sst.ternary_class_func`.
#
# The SST test set will be used for the bake-off evaluation. This dataset is already publicly distributed, so we are counting on people not to cheat by develping their models on the test set. You must do all your development without using the test set at all, and then evaluate exactly once on the test set and turn in the results, with no further system tuning or additional runs. __Much of the scientific integrity of our field depends on people adhering to this honor code__.
#
# Our only additional restriction is that __you cannot make any use of the subtree labels__. This corresponds to the 'Root' condition in the paper. As we discussed in class, the subtree labels are a really interesting feature of SST, but bringing them in results in a substantially different learning problem.
#
# One of our goals for this homework and bake-off is to encourage you to engage in __the basic development cycle for supervised models__, in which you
#
# 1. Write a new feature function. We recommend starting with something simple.
# 1. Use `sst.experiment` to evaluate your new feature function, with at least `fit_softmax_classifier`.
# 1. If you have time, compare your feature function with `unigrams_phi` using `sst.compare_models` or `sst.compare_models_mcnemar`. (For discussion, see [this notebook section](sst_02_hand_built_features.ipynb#Statistical-comparison-of-classifier-models).)
# 1. Return to step 1, or stop the cycle and conduct a more rigorous evaluation with hyperparameter tuning and assessment on the `dev` set.
#
# [Error analysis](#Error-analysis) is one of the most important methods for steadily improving a system, as it facilitates a kind of human-powered hill-climbing on your ultimate objective. Often, it takes a careful human analyst just a few examples to spot a major pattern that can lead to a beneficial change to the feature representations.
# ## Methodological note
#
# You don't have to use the experimental framework defined below (based on `sst`). However, if you don't use `sst.experiment` as below, then make sure you're training only on `train`, evaluating on `dev`, and that you report with
#
# ```
# from sklearn.metrics import classification_report
# classification_report(y_dev, predictions)
# ```
# where `y_dev = [y for tree, y in sst.dev_reader(class_func=sst.ternary_class_func)]`. We'll focus on the value at `macro avg` under `f1-score` in these reports.
# ## Set-up
#
# See [the first notebook in this unit](sst_01_overview.ipynb#Set-up) for set-up instructions.
from collections import Counter
import numpy as np
import os
import pandas as pd
import random
from sklearn.linear_model import LogisticRegression
import sst
import torch.nn as nn
from torch_rnn_classifier import TorchRNNClassifier
from torch_tree_nn import TorchTreeNN
import utils
SST_HOME = os.path.join('data', 'trees')
# ## A softmax baseline
#
# This example is here mainly as a reminder of how to use our experimental framework with linear models.
def unigrams_phi(tree):
"""The basis for a unigrams feature function.
Parameters
----------
tree : nltk.tree
The tree to represent.
Returns
-------
Counter
A map from strings to their counts in `tree`. (Counter maps a
list to a dict of counts of the elements in that list.)
"""
return Counter(tree.leaves())
# Thin wrapper around `LogisticRegression` for the sake of `sst.experiment`:
def fit_softmax_classifier(X, y):
mod = LogisticRegression(
fit_intercept=True,
solver='liblinear',
multi_class='ovr')
mod.fit(X, y)
return mod
# The experimental run with some notes:
softmax_experiment = sst.experiment(
SST_HOME,
unigrams_phi, # Free to write your own!
fit_softmax_classifier, # Free to write your own!
train_reader=sst.train_reader, # Fixed by the competition.
assess_reader=sst.dev_reader, # Fixed until the bake-off.
class_func=sst.ternary_class_func) # Fixed by the bake-off rules.
# `softmax_experiment` contains a lot of information that you can use for analysis; see [this section below](#Error-analysis) for starter code.
# ## RNNClassifier wrapper
#
# This section illustrates how to use `sst.experiment` with RNN and TreeNN models.
# To featurize examples for an RNN, we just get the words in order, letting the model take care of mapping them into an embedding space.
def rnn_phi(tree):
return tree.leaves()
# The model wrapper gets the vocabulary using `sst.get_vocab`. If you want to use pretrained word representations in here, then you can have `fit_rnn_classifier` build that space too; see [this notebook section for details](sst_03_neural_networks.ipynb#Pretrained-embeddings).
def fit_rnn_classifier(X, y):
sst_glove_vocab = utils.get_vocab(X, n_words=10000)
mod = TorchRNNClassifier(
sst_glove_vocab,
eta=0.05,
embedding=None,
batch_size=1000,
embed_dim=50,
hidden_dim=50,
max_iter=50,
l2_strength=0.001,
bidirectional=True,
hidden_activation=nn.ReLU())
mod.fit(X, y)
return mod
rnn_experiment = sst.experiment(
SST_HOME,
rnn_phi,
fit_rnn_classifier,
vectorize=False, # For deep learning, use `vectorize=False`.
assess_reader=sst.dev_reader)
# ## Error analysis
#
# This section begins to build an error-analysis framework using the dicts returned by `sst.experiment`. These have the following structure:
#
# ```
# 'model': trained model
# 'train_dataset':
# 'X': feature matrix
# 'y': list of labels
# 'vectorizer': DictVectorizer,
# 'raw_examples': list of raw inputs, before featurizing
# 'assess_dataset': same structure as the value of 'train_dataset'
# 'predictions': predictions on the assessment data
# 'metric': `score_func.__name__`, where `score_func` is an `sst.experiment` argument
# 'score': the `score_func` score on the assessment data
# ```
# The following function just finds mistakes, and returns a `pd.DataFrame` for easy subsequent processing:
def find_errors(experiment):
"""Find mistaken predictions.
Parameters
----------
experiment : dict
As returned by `sst.experiment`.
Returns
-------
pd.DataFrame
"""
raw_examples = experiment['assess_dataset']['raw_examples']
raw_examples = [" ".join(tree.leaves()) for tree in raw_examples]
df = pd.DataFrame({
'raw_examples': raw_examples,
'predicted': experiment['predictions'],
'gold': experiment['assess_dataset']['y']})
df['correct'] = df['predicted'] == df['gold']
return df
softmax_analysis = find_errors(softmax_experiment)
rnn_analysis = find_errors(rnn_experiment)
# Here we merge the sotmax and RNN experiments into a single DataFrame:
# +
analysis = softmax_analysis.merge(
rnn_analysis, left_on='raw_examples', right_on='raw_examples')
analysis = analysis.drop('gold_y', axis=1).rename(columns={'gold_x': 'gold'})
# -
# The following code collects a specific subset of examples; small modifications to its structure will give you different interesting subsets:
# +
# Examples where the softmax model is correct, the RNN is not,
# and the gold label is 'positive'
error_group = analysis[
(analysis['predicted_x'] == analysis['gold'])
&
(analysis['predicted_y'] != analysis['gold'])
&
(analysis['gold'] == 'positive')
]
# -
error_group.shape[0]
for ex in error_group['raw_examples'].sample(5):
print("="*70)
print(ex)
# ## Homework questions
#
# Please embed your homework responses in this notebook, and do not delete any cells from the notebook. (You are free to add as many cells as you like as part of your responses.)
# ### Sentiment words alone [2 points]
#
# NLTK includes an easy interface to [Minqing Hu and <NAME>'s __Opinion Lexicon__](https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html), which consists of a list of positive words and a list of negative words. How much of the ternary SST story does this lexicon tell?
#
# For this problem, submit code to do the following:
#
# 1. Create a feature function `op_unigrams` on the model of `unigrams_phi` above, but filtering the vocabulary to just items that are members of the Opinion Lexicon. Submit this feature function.
#
# 1. Evaluate your feature function with `sst.experiment`, with all the same parameters as were used to create `softmax_experiment` in [A softmax baseline](#A-softmax-baseline) above, except of course for the feature function.
#
# 1. Use `utils.mcnemar` to compare your feature function with the results in `softmax_experiment`. The information you need for this is in `softmax_experiment` and your own `sst.experiment` results. Submit your evaluation code. You can assume `softmax_experiment` is already in memory, but your code should create the other objects necessary for this comparison.
# +
from nltk.corpus import opinion_lexicon
# Use set for fast membership checking:
positive = set(opinion_lexicon.positive())
negative = set(opinion_lexicon.negative())
# -
# ### A more powerful vector-summing baseline [3 points]
#
# In [Distributed representations as features](sst_03_neural_networks.ipynb#Distributed-representations-as-features), we looked at a baseline for the ternary SST problem in which each example is modeled as the sum of its 50-dimensional GloVe representations. A `LogisticRegression` model was used for prediction. A neural network might do better here, since there might be complex relationships between the input feature dimensions that a linear classifier can't learn.
#
# To address this question, rerun the experiment with `torch_shallow_neural_classifier.TorchShallowNeuralClassifier` as the classifier. Specs:
# * Use `sst.experiment` to conduct the experiment.
# * Using 3-fold cross-validation, exhaustively explore this set of hyperparameter combinations:
# * The hidden dimensionality at 50, 100, and 200.
# * The hidden activation function as `nn.Tanh` or `nn.ReLU`.
# * (For all other parameters to `TorchShallowNeuralClassifier`, use the defaults.)
#
# For this problem, submit code to do the following:
#
# 1. Your model wrapper function around `TorchShallowNeuralClassifier`. This function should implement the requisite cross-validation; see [this notebook section](sst_02_hand_built_features.ipynb#Hyperparameter-search) for examples.
# 1. Your average F1 score according to `sst.experiment`.
# 2. The optimal hyperparameters chosen in your experiment. (You can just paste in the dict that `sst._experiment` prints.)
#
# We're not evaluating the quality of your model. (We've specified the protocols completely, but there will still be a lot of variation in the results.) However, the primary goal of this question is to get you thinking more about this strikingly good baseline feature representation scheme for SST, so we're sort of hoping you feel compelled to try out variations on your own.
# ### Your original system [4 points]
#
# There are many options for modeling this task. If you spend more than a few hours on this homework problem, you should consider letting it grow into your final project! Here are some relatively manageable ideas that you might try:
#
# 1. We didn't systematically evaluate the `bidirectional` option to the `TorchRNNClassifier`. Similarly, that model could be tweaked to allow multiple LSTM layers (at present there is only one), and you could try adding layers to the classifier portion of the model as well.
#
# 1. We've already glimpsed the power of rich initial word representations, and later in the course we'll see that smart initialization usually leads to a performance gain in NLP, so you could perhaps achieve a winning entry with a simple model that starts in a great place.
#
# 1. The [practical introduction to contextual word representations](contextualreps.ipynb) (to be discussed later in the quarter) covers pretrained representations and interfaces that are likely to boost the performance of any system.
#
# 1. The `TreeNN` and `TorchTreeNN` don't perform all that well, and this could be for the same reason that RNNs don't peform well: the gradient signal doesn't propagate reliably down inside very deep trees. [Tai et al. 2015](https://aclanthology.info/papers/P15-1150/p15-1150) sought to address this with TreeLSTMs, which are fairly easy to implement in PyTorch.
#
# 1. In the [distributed representations as features](#Distributed-representations-as-features) section, we just summed all of the leaf-node GloVe vectors to obtain a fixed-dimensional representation for all sentences. This ignores all of the tree structure. See if you can do better by paying attention to the binary tree structure: write a function `glove_subtree_phi` that obtains a vector representation for each subtree by combining the vectors of its daughters, with the leaf nodes again given by GloVe (any dimension you like) and the full representation of the sentence given by the final vector obtained by this recursive process. You can decide on how you combine the vectors.
#
# 1. If you have a lot of computing resources, then you can fire off a large hyperparameter search over many parameter values. All the model classes for this course are compatible with the `scikit-learn` and [scikit-optimize](https://scikit-optimize.github.io) methods, because they define the required functions for getting and setting parameters.
# ## Bake-off [1 point]
#
# The bake-off will begin on April 22. The announcement will go out on Piazza. As we said above, the bake-off evaluation data is the official SST test set release. For this bake-off, you'll evaluate your original system from the above homework problem on this test set. Rules:
#
# 1. Only one evaluation is permitted.
# 1. No additional system tuning is permitted once the bake-off has started.
#
# To enter the bake-off, upload this notebook on Canvas:
#
# https://canvas.stanford.edu/courses/99711/assignments/187246
#
# The cells below this one constitute your bake-off entry.
#
# Systems that enter will receive the additional homework point, and systems that achieve the top score will receive an additional 0.5 points. We will test the top-performing systems ourselves, and only systems for which we can reproduce the reported results will win the extra 0.5 points.
#
# The bake-off will close at 4:30 pm on April 24. Late entries will be accepted, but they cannot earn the extra 0.5 points. Similarly, you cannot win the bake-off unless your homework is submitted on time.
# +
# Enter your bake-off assessment code in this cell.
# Please do not remove this comment.
# -
# On an otherwise blank line in this cell, please enter
# your macro-average F1 value as reported by the code above.
# Please enter only a number between 0 and 1 inclusive.
# Please do not remove this comment.
| hw2_sst.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Flatten, Dense
model = Sequential() # keras modeli
model.add(Convolution2D(32, 3, 3, input_shape = (64, 64, 3), activation = 'relu')) # evrisim katmani (64, 64, 3) RGB ve 64x64px indirge
model.add(MaxPooling2D(pool_size = (2, 2) )) # pooling katmani max algoritmasi
model.add(Convolution2D(32, 3, 3, activation = 'relu')) # evrisim katmani
model.add(MaxPooling2D(pool_size = (2, 2) )) # pooling katmani max algoritmasi
model.add(Flatten()) # Flatten islemi
model.add(Dense(128, activation = 'relu')) # YSA 128 girdi
model.add(Dense(1, activation = 'sigmoid')) # YSA 1 cikti
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# **shear_range: Float. Shear Intensity (Shear angle in counter-clockwise direction in degrees) | Sair diyor ki; float deger alir. kesme yogunlugu yani oransal girili ve saat yonunun tersine shear acisi alir**
#
# **zoom_range: Float or [lower, upper]. Range for random zoom. If a float, [lower, upper] = [1-zoom_range, 1+zoom_range].**
# -> **object** : the Keras Object model.
#
# -> **generator** : a generator whose output must be a list of the form:
# - (inputs, targets)
# - (input, targets, sample_weights)
# a single output of the generator makes a single batch and hence all arrays in the list
# must be having the length equal to the size of the batch. The generator is expected
# to loop over its data infinite no. of times, it should never return or exit.
#
# -> **steps_per_epoch** : it specifies the total number of steps taken from the generator
# as soon as one epoch is finished and next epoch has started. We can calculate the value
# of steps_per_epoch as the total number of samples in your dataset divided by the batch size.
#
# -> **Epochs** : an integer and number of epochs we want to train our model for.
#
# -> **Verbose** : specifies verbosity mode(0 = silent, 1= progress bar, 2 = one line per epoch).
#
# -> **callbacks** : a list of callback functions applied during the training of our model.
#
# -> **validation_data** can be either:
# - an inputs and targets list
# - a generator
# - an inputs, targets, and sample_weights list which can be used to evaluate
# the loss and metrics for any model after any epoch has ended.
#
# -> **validation_steps** :only if the validation_data is a generator then only this argument
# can be used. It specifies the total number of steps taken from the generator before it is
# stopped at every epoch and its value is calculated as the total number of validation data points
# in your dataset divided by the validation batch size.
# +
from keras.preprocessing.image import ImageDataGenerator
trainDatagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2, # yana bukme
zoom_range = 0.2,
horizontal_flip = True)
testDatagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
trainingSet = trainDatagen.flow_from_directory(
'veriler/training_set',
target_size = (64, 64),
batch_size = 2,
class_mode = 'binary'
)
testSet = trainDatagen.flow_from_directory( # bu bize (x, y) don durur bu da -> (batch_size, **target_size, channels) ve y
'veriler/test_set',
target_size = (64, 64),
batch_size = 2,
class_mode = 'binary'
)
model.fit_generator(
generator = trainingSet, # okudugumuz degeri
steps_per_epoch = len(trainingSet), # her epoch da okunacak ornek
epochs = 1, # kac epoch istedigimiz
validation_data = testSet, # dogrulama verileri
validation_steps = len(testSet)/2 #
)
# +
import pandas as pd
import numpy as np
testSet.reset()
pred = model.predict_generator(testSet, verbose = 1)
pred[pred > 0.5] = 1
pred[pred <= 0.5] = 0
testLabel = []
for i in range(len(testSet)):
testLabel.extend(np.array(testSet[i][1])) # yukarida anlattim y kismi burada doner. (In the section directoryiterator)
#print(testSet.filenames)
d = {'dosyaIsimleri' : testSet.filenames, 'tahminler' : pred.tolist() , 'test' : testLabel}
cikti = pd.DataFrame(d)
cikti
# +
"""**Confision Matrix**"""
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(testLabel, pred)
cm
# -
(cm[0, 0] + cm[1, 1])/sum(cm.reshape(-1, 1))
| CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Gathering and Data Wrangling
# Importing Neccessary Modules
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
import requests
url = 'https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M'
html = requests.get(url).text
html
soup = BeautifulSoup(html,'html.parser')
soup
data = []
for tr in soup.find_all('tr'):
td = tr.find_all('td')
if(len(td) == 3):
data.append(td)
final = []
for i in data:
if not 'Not assigned' == i[1].text.strip():
temp = []
temp.append(str(i[0].text.strip()))
temp.append(str(i[1].text.strip()))
temp.append(str(i[2].text.strip()))
final.append(temp)
pc_data = pd.DataFrame(data=final,columns=['PostCode','Borough','Neighbourhood'])
pc_data = pc_data.Neighbourhood.groupby([pc_data.PostCode,pc_data.Borough]).apply(list).reset_index()
pc_data['Neighbourhood'] = pc_data['Neighbourhood'].apply(lambda x : ", ".join(x))
pc_data
pc_data.shape
pc_data.to_csv('pc_data.csv')
df = pd.read_csv('Geospatial_Coordinates.csv')
df.columns = ['PostCode', 'Latitude', 'Longitude']
pc_data = pd.merge(pc_data,df,on='PostCode')
pc_data.head()
# +
# Create Map
import folium
canada_map = folium.Map(location=[43.651070, -79.347015])
for lat, lng, borough, neighborhood in zip(pc_data['Latitude'], pc_data['Longitude'], pc_data['Borough'], pc_data['Neighbourhood']):
label = '{}, {}'.format(neighborhood, borough)
label = folium.Popup(label, parse_html=True)
folium.CircleMarker(
[lat, lng],
radius=5,
popup=label,
color='blue',
fill=True,
fill_color='#3186cc',
fill_opacity=0.7,
parse_html=False).add_to(canada_map)
# -
canada_map
| Data Gathering and Data Wrangling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="SE8ipjHXO_F3" colab_type="text"
# ### Import libraries
# + id="lUQlzSWNIZKe" colab_type="code" colab={}
import random
import numpy as np
from skimage.transform import resize
from keras.datasets import mnist
from keras.utils import np_utils
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# + [markdown] id="sBf6yylnPJaG" colab_type="text"
# ### Load MNIST
# + id="J9HpF25SIj8o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="7fc9d839-35c8-4e2f-bd97-e36c9cc9d6ee"
mnist_img_height , mnist_img_width = 28 , 28
(x_train,y_train), (x_test, y_test) = mnist.load_data()
# + id="GaaXaeD8IoWH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="4c74e0d4-019f-4a1f-d31c-bfbb0222e139"
print(f"Number of train images:{x_train.shape[0]}")
print(f"Number of test images:{x_test.shape[0]}")
# + [markdown] id="gi4MH0t_JMPQ" colab_type="text"
# ### Build two-digit mnist sequences
# + id="IEo7zjLwJOVf" colab_type="code" colab={}
def build_sequence_mnist(data,labels,dataset_size,IS_TRAIN=True):
# sequence data size
seq_img_height = 64
seq_img_width = 64
seq_data = np.ndarray(shape=(dataset_size,seq_img_height,seq_img_width),
dtype=np.float32)
seq_labels = []
for i in range(0,dataset_size):
#Pick a random number of digits to be in the dataset
# num_digits = random.randint(1,2)
s_indices = [random.randint(0,len(data)-1) for p in range(0,2)]
if IS_TRAIN:
# concatenating images and labels together
new_image = np.hstack([x_train[index] for index in s_indices])
new_label = [y_train[index] for index in s_indices]
else:
new_image = np.hstack([x_test[index] for index in s_indices])
new_label = [y_test[index] for index in s_indices]
#Resize image
new_image = resize(new_image,(seq_img_height,seq_img_width))
seq_data[i,:,:] = new_image
seq_labels.append(tuple(new_label))
#Return the synthetic dataset
return seq_data,seq_labels
# + id="J7-CO7tXKyOQ" colab_type="code" colab={}
x_seq_train,y_seq_train = build_sequence_mnist(x_train,y_train,60000)
x_seq_test,y_seq_test = build_sequence_mnist(x_test,y_test,10000,IS_TRAIN=False)
# + id="IOE49QEmLzCZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="5e3c4433-9d50-4bdc-a76a-2c1bf5df72f7"
RANDOM_IMG_TO_SHOW = 2
plt.figure()
plt.imshow(x_seq_test[RANDOM_IMG_TO_SHOW], cmap='gray')
plt.title("Labeled as: {}".format(''.join(str(i) for i in y_seq_test[RANDOM_IMG_TO_SHOW])))
plt.grid(b=None)
plt.show()
# + [markdown] id="Xcht9iebXJeT" colab_type="text"
# ### One-Hot encoding
# + id="xbajoq1gXVMt" colab_type="code" colab={}
#Converting labels to One-hot representations of shape (set_size,digits,classes)
possible_classes = 10
def convert_labels(labels):
#Declare output ndarrays
dig0_arr = np.ndarray(shape=(len(labels),possible_classes))
dig1_arr = np.ndarray(shape=(len(labels),possible_classes))
for index,label in enumerate(labels):
dig0_arr[index,:] = np_utils.to_categorical(label[0],possible_classes)
dig1_arr[index,:] = np_utils.to_categorical(label[1],possible_classes)
return [dig0_arr,dig1_arr]
# + id="voq2ld3CXwha" colab_type="code" colab={}
train_labels = convert_labels(y_seq_train)
test_labels = convert_labels(y_seq_test)
# + id="OLJmhQrYPpVe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="312efd2d-dc66-4daf-c567-94620d31cd44"
np.shape(test_labels[0])
# + [markdown] id="wY3NHcsto0iZ" colab_type="text"
# ### Second Model --> LRCN (CNN-LSTM)
# + id="XhwNcNBAo5ik" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 707} outputId="afdc31b5-1dc4-4b56-f4d9-4c314683d9dd"
from keras import applications
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, BatchNormalization, Lambda
nb_classes = 10
img_rows = 64
img_cols = 64
img_channels = 1
#number of convulation filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
#defining the input
inputs = Input(shape=(None,img_rows,img_cols,img_channels))
#Model taken from keras example. Worked well for a digit, dunno for multiple
cov = TimeDistributed(Conv2D(nb_filters,kernel_size,padding='same'))(inputs)
cov = TimeDistributed(Activation('relu'))(cov)
cov = TimeDistributed(BatchNormalization())(cov)
cov = TimeDistributed(Conv2D(nb_filters,kernel_size))(cov)
cov = TimeDistributed(Activation('relu'))(cov)
cov = TimeDistributed(BatchNormalization())(cov)
cov = TimeDistributed(MaxPooling2D(pool_size=pool_size))(cov)
cov = TimeDistributed(Dropout(0.25))(cov)
cov_out = TimeDistributed(Flatten())(cov)
# return_sequences = true -> to share output of lstm to all dense layers
encoded_vid = LSTM(64,return_sequences=True)(cov_out)
encoded_vid = TimeDistributed(Dense(32, activation='relu'))(encoded_vid)
encoded_vid = Dropout(0.25)(encoded_vid)
#Prediction layers
c0 = TimeDistributed(Dense(nb_classes, activation='softmax'),name='digit1')(encoded_vid)
c1 = TimeDistributed(Dense(nb_classes, activation='softmax'),name='digit2')(encoded_vid)
model = Model(inputs=[inputs],outputs=[c0,c1], name='custom-cnn-lstm')
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc'])
model.summary()
# + [markdown] id="PvZ9tiPfoSc1" colab_type="text"
# ### Prepare Data
# + id="BNT6JXd4oTy1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="3e223273-b4f0-4aec-9c05-2f2bcd2db273"
def prep_data_keras(img_data):
### 10 refers to time_steps
img_data = img_data.reshape(len(img_data)//10,10,64,64,1)
img_data = img_data.astype('float32')
img_data /= 255.0
return img_data
train_images = prep_data_keras(x_seq_train)
test_images = prep_data_keras(x_seq_test)
print(f"Test Image Shape: {test_images.shape}")
# + id="Gh7G6V8D5uBP" colab_type="code" colab={}
train_labels[0] = train_labels[0].reshape(6000,10,10)
train_labels[1] = train_labels[1].reshape(6000,10,10)
# + [markdown] id="RW7yO7RKW_OM" colab_type="text"
# ### Define callbacks and call model.fit()
# + id="FjUIChSFpB3F" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="abb57078-c6c5-4aed-fb29-9d111e5fc532"
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping
import numpy as np
save_dir = 'two-digit-custom-model-test3-with-cnn-lstm.h5'
model_checkpoint = ModelCheckpoint(save_dir,
monitor='val_loss', verbose=1,
save_best_only=True, mode='min')
lr_sched = LearningRateScheduler(lambda epoch: 1e-4 * (0.75 ** np.floor(epoch / 2)))
early = EarlyStopping(monitor='val_loss', patience=5, mode='min', verbose=1)
hist = model.fit(train_images,train_labels,batch_size=32,epochs=50,verbose=2,validation_split=0.2, callbacks=[model_checkpoint, lr_sched, early])
# + [markdown] id="hSs9CHhRhXY-" colab_type="text"
# ### Model Visualization
# + id="8Il0BKnlhaBC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 329} outputId="84b8c11d-1234-4773-bd94-b4204e3f9a75"
def plot_accuracy_loss(history):
"""
Plot the accuracy and the loss during the training of the nn.
"""
fig = plt.figure(figsize=(16,10))
# Plot accuracy
plt.subplot(221)
plt.plot(hist.history['digit1_acc'],'bo--', label = "digit1_acc")
plt.plot(hist.history['val_digit1_acc'], 'ro--', label = "val_digit1_acc")
plt.title("digit1_acc vs val_digit1_acc")
plt.ylabel("digit1_accuracy")
plt.xlabel("epochs")
plt.legend()
# Plot loss function
plt.subplot(222)
plt.plot(hist.history['digit2_acc'],'bo--', label = "digit2_acc")
plt.plot(hist.history['val_digit2_acc'], 'ro--', label = "val_digit2_acc")
plt.title("digit2_acc vs val_digit2_acc")
plt.ylabel("digit2_accuracy")
plt.xlabel("epochs")
plt.legend()
plt.show()
plot_accuracy_loss(hist)
# + [markdown] id="3-8d4BKoXRjS" colab_type="text"
# ### Save model weights
# + id="SuCHIJgFXqZd" colab_type="code" colab={}
# !cp two-digit-custom-model-test3-with-cnn-lstm.h5 drive/My\ Drive/mnist-generator/.
# + [markdown] id="_f2oBOFeXbRT" colab_type="text"
# ### Load Model
# + id="VMGmy9fqXiWr" colab_type="code" colab={}
# !cp drive/My\ Drive/mnist-generator/two-digit-custom-model-test2-with-batchnormalization.h5 .
# + id="7jgdWDDcXeVb" colab_type="code" colab={}
model.load_weights('two-digit-custom-model-test2-with-batchnormalization.h5')
# + [markdown] id="sdEWDUgrXUqL" colab_type="text"
# ### Evaluate model on test
# + id="OxDhqAXtFGif" colab_type="code" colab={}
test_labels[0] = test_labels[0].reshape(1000,10,10)
test_labels[1] = test_labels[1].reshape(1000,10,10)
# + id="wUsheTedYP-L" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="00e838c0-3bf4-47b6-9230-ce534085b7d8"
scores = model.evaluate(test_images, test_labels)
print(f"First digit accuracy: {scores[3]} , Second digit accuracy: {scores[4]}")
# + [markdown] id="vpKuFFTVZE_0" colab_type="text"
# ### Model Inference
# + id="CGf1rN2VZdjD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="57014f8a-dd96-4a77-e57f-2f3593e1f4e6"
#Printing some examples of real and predicted labels
predictions = model.predict(test_images)
predictions[0] = predictions[0].reshape(10000,10)
predictions[1] = predictions[1].reshape(10000,10)
test_labels[0] = test_labels[0].reshape(10000,10)
test_labels[1] = test_labels[1].reshape(10000,10)
for i in random.sample(range(0,10000),50):
actual_labels = []
predicted_labels = []
for j in range(0,2):
actual_labels.append(np.argmax(test_labels[j][i]))
predicted_labels.append(np.argmax(predictions[j][i]))
print("Actual labels: {}".format(actual_labels))
print("Predicted labels: {}\n".format(predicted_labels))
| LRCN_(CNN_LSTM)_two_digit_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # TP Denial of Service
# ### <NAME>
# + pycharm={"name": "#%%\n"}
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from pyspark.sql import SparkSession
# + pycharm={"name": "#%%\n"}
spark = SparkSession.builder.appName("Python Spark").getOrCreate()
sc = spark.sparkContext
# + [markdown] pycharm={"name": "#%% md\n"}
# # Chargement des données csv
# + pycharm={"name": "#%%\n"}
df_ddos = spark.read\
.option("delimiter", ",")\
.option("header", "true")\
.option("inferSchema", "true")\
.csv('dataset.netflow')
# + [markdown] pycharm={"name": "#%% md\n"}
# récupération des noms des colonnes
# + pycharm={"name": "#%%\n"}
df_ddos.printSchema()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### extraction des adresses sources les plus fréquentes
# je veux afrficher une tableau des 50 adresses les plus actives
# + pycharm={"name": "#%%\n"}
df_source_sum =(
df_ddos.groupBy('SourceIP')\
.count()\
.sort('count', ascending=False).limit(10))
df_source_sum.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### on affiche le barchart equvallent
# + pycharm={"name": "#%%\n"}
import seaborn as sns
pd_sum = df_source_sum.toPandas()
sns.catplot(x='SourceIP', y='count', data=pd_sum,
kind='bar', order=pd_sum['SourceIP'])
# + [markdown] pycharm={"name": "#%% md\n"}
# ### extraction des adresse destination les plus fréquentes
# + pycharm={"name": "#%%\n"}
df_dest_sum = (df_ddos.groupBy('DestIP')\
.count()\
.sort('count', ascending=False).limit(20))
# + [markdown] pycharm={"name": "#%% md\n"}
# je veux avoir la liste des top x des adresses de destinatio
# + pycharm={"name": "#%%\n"}
df_dest_sum.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# on affiche les destinations qui sont le plus sollicités
# + pycharm={"name": "#%%\n"}
import seaborn as sns
pd_sum_dest = df_dest_sum.toPandas()
sns.catplot(x='DestIP', y='count', data=pd_sum_dest,
kind='bar', order=pd_sum_dest['DestIP'])
# + [markdown] pycharm={"name": "#%% md\n"}
# on remarque que c'est l'adresse 192.168.3.11 qui est le plus touché
#
# du coup je me focalise uniquement sur cette adresse en filtrant les logs
# + pycharm={"name": "#%%\n"}
df_ddos_filtered = df_ddos.filter(df_ddos['DestIP']=="192.168.3.11")
# + pycharm={"name": "#%%\n"}
df_ddos_filtered.show(10)
# + [markdown] pycharm={"name": "#%% md\n"}
# j'essaye de voir précisemment le top x des sources sur cette adresse
# + pycharm={"name": "#%%\n"}
df_source_filtered_sum =(
df_ddos_filtered.groupBy('SourceIP')\
.count()\
.sort('count', ascending=False).limit(50))
df_source_filtered_sum.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### j'affiche le diagramme des adresse IP qui se connectent à l'IP 192.168.3.11
# + pycharm={"name": "#%%\n"}
import seaborn as sns
pd_filtered_sum = df_source_filtered_sum.toPandas()
sns.catplot(x='SourceIP', y='count', data=pd_filtered_sum,
kind='bar', order=pd_filtered_sum['SourceIP'])
# + pycharm={"name": "#%% je regroupe par type de port\n"}
df_source_filtered_agg_sum =(df_ddos_filtered.groupBy('SourceIP')\
.agg({'Bytes': 'sum'})\
.sort('sum(Bytes)', ascending=False)\
.limit(50))
df_source_filtered_agg_sum.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# encore une fois on visualise :
# + pycharm={"name": "#%%\n"}
import seaborn as sns
pd_filtered_agg_sum = df_source_filtered_agg_sum.toPandas()
sns.catplot(x='SourceIP', y='sum(Bytes)', data=pd_filtered_agg_sum,
kind='bar', order=pd_filtered_agg_sum['SourceIP'])
| MServajean/TP_NOTE/jupiter_epellegrin_tp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Support Vector Classifier Modeling
# +
# import libraries
from warnings import filterwarnings
filterwarnings("ignore")
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
from imblearn.over_sampling import SMOTENC
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import plot_roc_curve
from sklearn.svm import SVC
from sklearn.feature_selection import SelectPercentile
from src.seed import SEED
from src.helper import praf1, confmat
# %matplotlib inline
sns.set(font_scale=1.2)
# +
# load data split it up
train = pd.read_csv("../data/processed/train.csv")
X = train.iloc[:, :-1]
y = train.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=SEED, stratify=y)
# -
# account for class imbalance
sm = SMOTENC(np.arange(19, 69), random_state=SEED, n_jobs=-1)
X_train, y_train = sm.fit_resample(X_train, y_train)
# ## Baseline Model
# +
# create a baseline model and make predictions
svc = SVC(random_state=SEED)
svc.fit(X_train, y_train)
train_pred = svc.predict(X_train)
test_pred = svc.predict(X_test)
# +
# output scoring results
a = praf1(y_train, train_pred, "Training")
b = praf1(y_test, test_pred, "Testing")
pd.concat([a, b])
# -
# output confusion matrix
confmat([y_train, y_test], [train_pred, test_pred], ["Training", "Testing"])
# +
# output roc/auc curve
fig, ax = plt.subplots(figsize=(12, 8))
plot_roc_curve(svc, X_train, y_train, name="Training", ax=ax)
plot_roc_curve(svc, X_test, y_test, name="Testing", ax=ax)
line = np.linspace(0, 1)
plt.plot(line, line, "--")
plt.title("Baseline SVC ROC/AUC")
plt.show()
# -
# Our baseline model has terrible recall during testing. Although our AUC score is .82 during tests, we see that we are missing about 95% of customers who will soon leave. This model it therefore performing very badly. We'll seek to increase our f1 score and balance out our precision and recall when tuning our model in the future.
# ## Parameter Tuning
# We'll test our a few different models and try to identify which one works best.
# +
# param grid and grid search
param_grid = {
"kernel": ["poly", "rbf"],
"degree": [2, 3, 4, 5],
"C": [1, 10, 100, 1000],
"gamma": [1, 10, 100],
}
gs = GridSearchCV(
SVC(random_state=SEED),
param_grid,
scoring="f1",
n_jobs=-1,
)
gs.fit(X_train, y_train)
gs.best_params_
# +
# predict and output train/test results
train_pred = gs.predict(X_train)
test_pred = gs.predict(X_test)
a = praf1(y_train, train_pred, "Training")
b = praf1(y_test, test_pred, "Testing")
pd.concat([a, b])
# -
# output confusion matrix
confmat([y_train, y_test], [train_pred, test_pred], ["Training", "Testing"])
# +
# output roc/auc curve
fig, ax = plt.subplots(figsize=(12, 8))
plot_roc_curve(gs, X_train, y_train, name="Training", ax=ax)
plot_roc_curve(gs, X_test, y_test, name="Testing", ax=ax)
line = np.linspace(0, 1)
plt.plot(line, line, "--")
plt.title("Baseline SVC ROC/AUC")
plt.show()
# -
# With a parameter space of 96 models, our best model optimized for f1, is a polynomial kernel with degree 5. This model suffers by having a lower AUC score, but it does have a better recall score of 25%, and a f1 of .31.
| notebooks/09-Skellet0r-svc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--BOOK_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="fig/cover-small.jpg">
# *This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by <NAME>; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).*
#
# *The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).*
#
# <!--NAVIGATION-->
# < [How to Run Python Code](01-How-to-Run-Python-Code.ipynb) | [Contents](Index.ipynb) | [Basic Python Semantics: Variables and Objects](03-Semantics-Variables.ipynb) >
# # A Quick Tour of Python Language Syntax
# Python was originally developed as a teaching language, but its ease of use and clean syntax have led it to be embraced by beginners and experts alike.
# The cleanliness of Python's syntax has led some to call it "executable pseudocode", and indeed my own experience has been that it is often much easier to read and understand a Python script than to read a similar script written in, say, C.
# Here we'll begin to discuss the main features of Python's syntax.
#
# Syntax refers to the structure of the language (i.e., what constitutes a correctly-formed program).
# For the time being, we'll not focus on the semantics – the meaning of the words and symbols within the syntax – but will return to this at a later point.
#
# Consider the following code example:
# + jupyter={"outputs_hidden": false}
import randint
# set the midpoint
midpoint = randint(3, 7)
# make two empty lists
lower = []; upper = []
# split the numbers into lower and upper
for i in range(10):
if (i < midpoint):
lower.append(i)
else:
upper.append(i)
print("lower:", lower)
print("upper:", upper)
# -
# This script is a bit silly, but it compactly illustrates several of the important aspects of Python syntax.
# Let's walk through it and discuss some of the syntactical features of Python
# ## Comments Are Marked by ``#``
# The script starts with a comment:
# ``` python
# # set the midpoint
# ```
# Comments in Python are indicated by a pound sign (``#``), and anything on the line following the pound sign is ignored by the interpreter.
# This means, for example, that you can have stand-alone comments like the one just shown, as well as inline comments that follow a statement. For example:
# ``` python
# x += 2 # shorthand for x = x + 2
# ```
# Python does not have any syntax for multi-line comments, such as the ``/* ... */`` syntax used in C and C++, though multi-line strings are often used as a replacement for multi-line comments (more on this in [String Manipulation and Regular Expressions](14-Strings-and-Regular-Expressions.ipynb)).
# ## End-of-Line Terminates a Statement
# The next line in the script is
# ``` python
# midpoint = 5
# ```
# This is an assignment operation, where we've created a variable named ``midpoint`` and assigned it the value ``5``.
# Notice that the end of this statement is simply marked by the end of the line.
# This is in contrast to languages like C and C++, where every statement must end with a semicolon (``;``).
#
# In Python, if you'd like a statement to continue to the next line, it is possible to use the "``\``" marker to indicate this:
# + jupyter={"outputs_hidden": false}
x = 1 + 2 + 3 + 4 +\
5 + 6 + 7 + 8
# -
# It is also possible to continue expressions on the next line within parentheses, without using the "``\``" marker:
# + jupyter={"outputs_hidden": false}
x = (1 + 2 + 3 + 4 +
5 + 6 + 7 + 8)
# -
# Most Python style guides recommend the second version of line continuation (within parentheses) to the first (use of the "``\``" marker).
# ## Semicolon Can Optionally Terminate a Statement
# Sometimes it can be useful to put multiple statements on a single line.
# The next portion of the script is
# ``` python
# lower = []; upper = []
# ```
# This shows the example of how the semicolon (``;``) familiar in C can be used optionally in Python to put two statements on a single line.
# Functionally, this is entirely equivalent to writing
# ``` python
# lower = []
# upper = []
# ```
# Using a semicolon to put multiple statements on a single line is generally discouraged by most Python style guides, though occasionally it proves convenient.
# ## Indentation: Whitespace Matters!
# Next, we get to the main block of code:
# ``` Python
# for i in range(10):
# if i < midpoint:
# lower.append(i)
# else:
# upper.append(i)
# ```
# This is a compound control-flow statement including a loop and a conditional – we'll look at these types of statements in a moment.
# For now, consider that this demonstrates what is perhaps the most controversial feature of Python's syntax: whitespace is meaningful!
#
# In programming languages, a *block* of code is a set of statements that should be treated as a unit.
# In C, for example, code blocks are denoted by curly braces:
# ``` C
# // C code
# for(int i=0; i<100; i++)
# {
# // curly braces indicate code block
# total += i;
# }
# ```
# In Python, code blocks are denoted by *indentation*:
# ``` python
# for i in range(100):
# # indentation indicates code block
# total += i
# ```
# In Python, indented code blocks are always preceded by a colon (``:``) on the previous line.
# The use of indentation helps to enforce the uniform, readable style that many find appealing in Python code.
# But it might be confusing to the uninitiated; for example, the following two snippets will produce different results:
# ```python
# >>> if x < 4: >>> if x < 4:
# ... y = x * 2 ... y = x * 2
# ... print(x) ... print(x)
# ```
# In the snippet on the left, ``print(x)`` is in the indented block, and will be executed only if ``x`` is less than ``4``.
# In the snippet on the right ``print(x)`` is outside the block, and will be executed regardless of the value of ``x``!
x = 5
if x < 4:
x = x*2
print(x)
# Python's use of meaningful whitespace often is surprising to programmers who are accustomed to other languages, but in practice it can lead to much more consistent and readable code than languages that do not enforce indentation of code blocks.
# If you find Python's use of whitespace disagreeable, I'd encourage you to give it a try: as I did, you may find that you come to appreciate it.
#
# Finally, you should be aware that the *amount* of whitespace used for indenting code blocks is up to the user, as long as it is consistent throughout the script.
# By convention, most style guides recommend to indent code blocks by four spaces, and that is the convention we will follow in this report.
# Note that many text editors like Emacs and Vim contain Python modes that do four-space indentation automatically.
# ## Whitespace *Within* Lines Does Not Matter
# While the mantra of *meaningful whitespace* holds true for whitespace *before* lines (which indicate a code block), white space *within* lines of Python code does not matter.
# For example, all three of these expressions are equivalent:
# + jupyter={"outputs_hidden": false}
x=1+2
x = 1 + 2
x = 1 + 2
# -
# Abusing this flexibility can lead to issues with code readibility – in fact, abusing white space is often one of the primary means of intentionally obfuscating code (which some people do for sport).
# Using whitespace effectively can lead to much more readable code,
# especially in cases where operators follow each other – compare the following two expressions for exponentiating by a negative number:
# ``` python
# x=10**-2
# ```
# to
# ``` python
# x = 10 ** -2
# ```
# I find the second version with spaces much more easily readable at a single glance.
# Most Python style guides recommend using a single space around binary operators, and no space around unary operators.
# We'll discuss Python's operators further in [Basic Python Semantics: Operators](04-Semantics-Operators.ipynb).
# ## Parentheses Are for Grouping or Calling
#
# In the previous code snippet, we see two uses of parentheses.
# First, they can be used in the typical way to group statements or mathematical operations:
# + jupyter={"outputs_hidden": false}
2 * (3 + 4)
# -
# They can also be used to indicate that a *function* is being called.
# In the next snippet, the ``print()`` function is used to display the contents of a variable (see the sidebar).
# The function call is indicated by a pair of opening and closing parentheses, with the *arguments* to the function contained within:
# + jupyter={"outputs_hidden": false}
print('first value:', 1)
# + jupyter={"outputs_hidden": false}
print('second value:', 2)
# -
# Some functions can be called with no arguments at all, in which case the opening and closing parentheses still must be used to indicate a function evaluation.
# An example of this is the ``sort`` method of lists:
# + jupyter={"outputs_hidden": false}
L = [4,2,3,1]
L.sort()
print(L)
# -
# The "``()``" after ``sort`` indicates that the function should be executed, and is required even if no arguments are necessary.
# ## Aside: A Note on the ``print()`` Function
#
# Above we used the example of the ``print()`` function.
# The ``print()`` function is one piece that has changed between Python *2.x* and Python *3.x*. In Python 2, ``print`` behaved as a statement: that is, you could write
# ``` python
# # Python 2 only!
# >> print "first value:", 1
# first value: 1
# ```
# For various reasons, the language maintainers decided that in Python 3 ``print()`` should become a function, so we now write
# ``` python
# # Python 3 only!
# >>> print("first value:", 1)
# first value: 1
# ```
# This is one of the many backward-incompatible constructs between Python 2 and 3.
# As of the writing of this book, it is common to find examples written in both versions of Python, and the presence of the ``print`` statement rather than the ``print()`` function is often one of the first signs that you're looking at Python 2 code.
# ## Finishing Up and Learning More
#
# This has been a very brief exploration of the essential features of Python syntax; its purpose is to give you a good frame of reference for when you're reading the code in later sections.
# Several times we've mentioned Python "style guides", which can help teams to write code in a consistent style.
# The most widely used style guide in Python is known as PEP8, and can be found at https://www.python.org/dev/peps/pep-0008/.
# As you begin to write more Python code, it would be useful to read through this!
# The style suggestions contain the wisdom of many Python gurus, and most suggestions go beyond simple pedantry: they are experience-based recommendations that can help avoid subtle mistakes and bugs in your code.
# <!--NAVIGATION-->
# < [How to Run Python Code](01-How-to-Run-Python-Code.ipynb) | [Contents](Index.ipynb) | [Basic Python Semantics: Variables and Objects](03-Semantics-Variables.ipynb) >
| 02-Basic-Python-Syntax.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.2 64-bit (''venv'': venv)'
# name: python382jvsc74a57bd0d46d14bfc67a8d69f82f77af2e60e5b02fcabf25a759a56cf61072a2d590541d
# ---
import pandas as pd
df = pd.read_csv("/Users/alexigna/projects/skillfactory/unit1/data_sf.csv")
# # Задача 1
# С помощью сводной таблицы и функции loc посчитайте, сколько получают ("Wage") русские футболисты ("Russia"), играющие за ФК "AS Monaco".
df.loc[df["Nationality"]=="Russia"].pivot_table(
index="Club",
columns="Nationality",
values="Wage",
margins=0,
aggfunc="sum",
).loc["AS Monaco"].to_list()[0]
# # Задача 2
# Создайте сводную таблицу, содержащую сведения о средней скорости футболистов (SprintSpeed), занимающих разные позиции (Position) в разных футбольных клубах (Club).
#
# Основываясь на данных таблицы, отметьте три позиции, представители которых в среднем обладают самой высокой скоростью.
# +
df.pivot_table(
values="SprintSpeed",
aggfunc="mean",
columns="Club",
index="Position",
# columns="Position",
# index="Club",
fill_value=0,
margins=True,
)["All"].sort_values(ascending=False).index.to_list()
#.loc["All"].sort_values(ascending=False)
#.sort_values("All", ascending=False)
#.loc["All"]#.sort_values(ascending=False)
# 12 RB (правый защитник)
# 6 LM (левый полузащитник)
# 4 RM (правый полузащитник)
# 14 CF (центральный отянутый форвард)
# RWM (правый атакующий полузащитник)
# 11 RS (правый конечный форвард)
# -
# # Задача 3
# Используя таблицу, созданную на предыдущем шаге, отметьте названия трёх клубов, в которых центральные форварды (ST) обладают наибольшей средней скоростью.
df.pivot_table(
values="SprintSpeed",
aggfunc="mean",
columns="Club",
index="Position",
# columns="Position",
# index="Club",
fill_value=0,
margins=True,
).loc["ST"].sort_values(ascending=False)
| unit_1/python-6.14.ipynb |