code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import random
from torch.utils.data import Dataset,sampler,DataLoader
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.autograd import Variable
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
# -
from data_functions import *
from models import es_rnn,holt_winters_no_trend
data = pd.read_pickle("aggreg_PRODUCTION_data.pkl")
data.Date = pd.to_datetime(data['Date'])
data = data.sort_values(by='Date')
data = data[data.Date > '2012-12-31']
data=data.set_index('Date')
data.Energy_Generated=pd.to_numeric(data.Energy_Generated)
train_data = data[data.index < '2018-12-31']
test_data =data[data.index > '2018-12-31']
#daily_data_with_weather = train_data[['Condition','Min_Temperature','Max_Temperature','Energy_Generated']]
daily_data_without_weather = train_data[['Energy_Generated']]
weekly_data_mean = train_data.Energy_Generated.resample('W').mean().dropna()
monthly_data_mean = train_data.Energy_Generated.resample('M').mean().dropna()
# normalize features
monthly_data_mean = monthly_data_mean.values.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(monthly_data_mean)
scaled = scaled.flatten()
print(scaled)
series = scaled
train=series[:-5]
test=series
# +
sl=sequence_labeling_dataset(train,100,False)
sl_t=sequence_labeling_dataset(test,100,False)
train_dl= DataLoader(dataset=sl,
batch_size=4,
shuffle=False)
test_dl= DataLoader(dataset=sl_t,
batch_size=4,
shuffle=False)
# -
hw=es_rnn()
print(sl)
opti = torch.optim.Adam(hw.parameters(), lr=0.01)#,weight_decay=0.0001
#Initial Prediction
overall_loss=[]
batch=next(iter(test_dl))
inp=batch[0].float()#.unsqueeze(2)
out=batch[1].float()#.unsqueeze(2).float()
print(inp,out)
shifts=batch[2].numpy()
pred=hw(inp,shifts)
plt.plot(torch.cat([inp[0],out[0,:]]).detach().numpy(),"g")
plt.plot(torch.cat([inp[0],pred[0,:]]).detach().numpy(),"r")
plt.show()
print(len(series))
#Initial Loss RMSE
(torch.mean((pred-out)**2))**(1/2)
#Baseline Loss Predicting last value at each step
(torch.mean((inp[0][-1]-out)**2))**(1/2)
overall_loss_train=[]
overall_loss=[]
for j in tqdm(range(20)):
loss_list_b=[]
train_loss_list_b=[]
#here we use batches of past, and to be forecasted value
#batches are determined by a random start integer
for batch in iter(train_dl):
opti.zero_grad()
inp=batch[0].float()#.unsqueeze(2)
out=batch[1].float()#.unsqueeze(2).float()
shifts=batch[2].numpy()
#it returns the whole sequence atm
pred=hw(inp,shifts)
loss=(torch.mean((pred-out)**2))**(1/2)
train_loss_list_b.append(loss.detach().cpu().numpy())
loss.backward()
opti.step()
#here we use all the available values to forecast the future ones and eval on it
for batch in iter(test_dl):
inp=batch[0].float()#.unsqueeze(2)
out=batch[1].float()#.unsqueeze(2).float()
shifts=batch[2].numpy()
pred=hw(inp,shifts)
#loss=torch.mean(torch.abs(pred-out))
loss=(torch.mean((pred-out)**2))**(1/2)
loss_list_b.append(loss.detach().cpu().numpy())
print(np.mean(loss_list_b))
print(np.mean(train_loss_list_b))
overall_loss.append(np.mean(loss_list_b))
overall_loss_train.append(np.mean(train_loss_list_b))
#Plot of Train and Validatiaon Loss, we nicely converge
plt.plot(overall_loss,"g")
plt.plot(overall_loss_train,"r")
# +
#Forecasting on the Validation set
batch=next(iter(test_dl))
inp=batch[0].float()#.unsqueeze(2)
out=batch[1].float()#.unsqueeze(2).float()
shifts=batch[2].numpy()
pred=hw(torch.cat([inp,out],dim=1),shifts)
#plt.plot(torch.cat([inp,out,pred],dim=1)[0].detach().numpy(),"r")
plt.plot(torch.cat([inp[0],out[0,:]]).detach().numpy(),"g")
plt.plot(torch.cat([inp[0],pred[0,:]]).detach().numpy(),"r")
plt.show()
# +
#Forecasting to the Future looks good.
batch=next(iter(test_dl))
inp=batch[0].float()#.unsqueeze(2)
out=batch[1].float()#.unsqueeze(2).float()
shifts=batch[2].numpy()
pred=hw(torch.cat([inp,out],dim=1),shifts)
plt.plot(torch.cat([inp,out],dim=1)[0].detach().numpy(),"b")
plt.plot(torch.cat([inp,out,pred],dim=1)[0].detach().numpy(),"r")
plt.show()
# +
param_list=[]
for params in hw.parameters():
param_list.append(params)
param_list=torch.sigmoid(params[0:2]).detach().numpy()
param_list
# -
| MODEL_PREDIC_BENCHMARK/hybrid_rnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Wrangle and Analyse of WeRateDogs's Tweets
# <pre>
# Udacity - Nanodegree Data Analyst
# Author: <NAME>
# </pre>
# ## Table of Contents
# - [Introduction](#intro)
# - [Data Wrangling](#data_wrangling)
# - [Gather](#gather)
# - [Assessing of Tidiness](#assessing_tidiness)
# - [Clean of Tidiness](#clean_tidiness)
# - [Assessing of Quality](#assessing_quality)
# - [Clean of Data Quality](#clean_quality)
# - [Exploratory Analysis](#eda)
# <a id='intro'></a>
# ## Introduction
# The dataset used is a Twitter user's tweet log called @dog_rates or WeRateDogs. This Twitter profile classifies dogs by assigning notes and kind comments. The main objective of this project is to evaluate and clean the data set provided, but some analyzes and visualizations will also be carried out.
#
# +
import numpy as np
import pandas as pd
import json
import requests
import os
from functools import reduce
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# suppress warnings from final output
import warnings
warnings.simplefilter("ignore")
# -
# <a id='data_wrangling'></a>
# ## Data Wrangling
#
# The assessing and cleaning steps will be divided into two each, for Data Organization and Data Quality.
# <a id='gather'></a>
# ### Gather
# * Dataset 1:
#Open dataset 1
df = pd.read_csv('twitter-archive-enhanced.csv')
df.head()
# * Dataset 2:
#Dataset 2 url and name information
url_tsv = 'https://d17h27t6h515a5.cloudfront.net/topher/2017/August/599fd2ad_image-predictions/image-predictions.tsv'
name_tsv = 'image-predictions.tsv'
#Download dataset 2 programmatically
response = requests.get(url_tsv)
with open(name_tsv, 'wb') as file:
file.write(response.content)
#Open dataset 2
df2 = pd.read_csv(name_tsv, sep='\t')
df2.head()
# Dataset 3:
#Reads JSON file with .txt extension
with open('tweet-json.txt','r') as json_file:
lines = json_file.readlines()
lines = [line.strip("\n") for line in lines]
lines = ''.join(lines).split('}{')
data_json = [json.loads('%s}' % line) if idx == 0 else
json.loads('{%s' % line) if idx == len(lines)-1
else json.loads('{%s}' % line)
for idx, line in enumerate(lines)]
#Open dataset 3
df3 = pd.DataFrame(data_json)
df3.head()
# <a id='assessing_tidiness'></a>
# ### Assessing of Tidiness
# First, the verification will be only of the structures (rows, size, columns) of the data frames.
#Number of lines / observations of each dataset
df.shape[0], df2.shape[0], df3.shape[0]
#Number of columns of each dataset
df.shape[1], df2.shape[1], df3.shape[1]
# Checking the columns of the dataframes:
#Features (columns) of each dataset
df.columns, df2.columns, df3.columns
# - All of these columns and information should not be separated into three different dataframes.
# - Most df3 columns are not absolutely necessary, as specified in the project proposal.
#
# Checking the columns common to dataframes:
#Common columns between df and df2
np.intersect1d(df.columns.values, df2.columns.values)
#Common columns between df and df3
np.intersect1d(df.columns.values, df3.columns.values)
#Common columns between df2 and df3
np.intersect1d(df2.columns.values, df3.columns.values)
# - There should be a column in df3 with the name 'tweet_id'. The column is present with the name 'id'.
#
# Checking the 'doggo', 'floofer', 'pupper', 'puppo' columns:
df.sample(5)[['doggo', 'floofer', 'pupper', 'puppo']].head()
df['doggo'].value_counts()
df['floofer'].value_counts()
df['pupper'].value_counts()
df['puppo'].value_counts()
# - The 'doggo', 'floofer', 'pupper', 'puppo' columns should not exist, at least, if they were dummies variables to train a machine learning model. There should only be one column for 'stages' for these values.
#Number of instances that have at least 1 stage
((df['doggo'] == "doggo") + (df['floofer']=="floofer") + (df['pupper']=='pupper') + (df['puppo']=='puppo')).value_counts()
#Number of instances that have exactly 1 stage
((df['doggo'] == "doggo") ^ (df['floofer']=="floofer") ^ (df['pupper']=='pupper') ^ (df['puppo']=='puppo')).value_counts()
# - In 380 lines there is at least one stage. There are only 1 stage on 366 lines. In 14 lines there are 2 or more stages.
# #### Tidiness:
# - Discard unnecessary columns in df3_copy.
# - The 'id' column in df3_copy should be 'tweet_id'.
# - All columns and information should be on a single dataframe.
# - A new 'stages' column must be created.
# - The 'doggo', 'floofer', 'pupper', 'puppo' columns must be removed.
# <a id='clean_tidiness'></a>
# ### Clean (Tidiness)
df1_copy, df2_copy, df3_copy = df.copy(), df2.copy(), df3.copy()
# #### Discard unnecessary columns in df3_copy.
#
# #### Define
# - Remove unnecessary columns in df3_copy, leaving only 'id', 'retweet_count', 'favorite_count':
#
# In the project proposal it was defined that, at least, the columns 'id', 'retweet_count', 'favorite_count' would be necessary.
# #### Code
df3_copy = df3_copy[['id', 'retweet_count', 'favorite_count']]
# #### Test
df3_copy.head()
# #### The 'id' column in df3_copy should be 'tweet_id'.
# #### Define
# - Rename the 'id' column to 'tweet_id' in df3_copy:
# #### Code
df3_copy.rename(columns={'id':'tweet_id'}, inplace=True)
# #### Test
assert 'tweet_id' in df3_copy.columns and 'id' not in df3_copy.columns
df3_copy.head()['tweet_id']
# #### All columns and information should be on a single dataframe. (1)
# #### Define
# - Joining the df1_copy and df2_copy dataframes:
# #### Code
df_clean = pd.merge(df1_copy, df2_copy, on = 'tweet_id', how = 'left')
# ### Test
df_clean.head()
df_clean.shape[1]
df_clean.columns
# #### All columns and information should be on a single dataframe. (2)
# #### Define
# - Joining the df_clean (df_copy and df2_copy) and df3_copy dataframes:
# #### Code
df_clean = pd.merge(df_clean, df3_copy, on = 'tweet_id', how = 'left')
# #### Test
df_clean.head()
df_clean.shape[1]
df_clean.columns
# #### A new 'stages' column must be created.
# #### Define
# Create a stage column for the values 'doggo', 'floofer', 'pupper', 'puppo'.
# #### Code
is_doggo = df_clean.doggo == 'doggo'
is_floofer = df_clean.floofer == 'floofer'
is_pupper = df_clean.pupper == 'pupper'
is_puppo = df_clean.puppo == 'puppo'
#Creating 'stages' column with default value 'none'
df_clean['stages'] = 'none'
#1 stage
df_clean.loc[is_doggo, 'stages'] = 'doggo'
df_clean.loc[is_floofer, 'stages'] = 'floofer'
df_clean.loc[is_pupper, 'stages'] = 'pupper'
df_clean.loc[is_puppo, 'stages'] = 'puppo'
#2 stages
df_clean.loc[is_doggo & is_floofer, 'stages'] = 'doggo, floofer'
df_clean.loc[is_doggo & is_pupper,'stages'] = 'doggo, pupper'
df_clean.loc[is_doggo & is_puppo, 'stages'] = 'doggo, puppo'
df_clean.loc[is_floofer & is_pupper, 'stages'] = 'floofer, pupper'
df_clean.loc[is_floofer & is_puppo, 'stages'] = 'floofer, puppo'
df_clean.loc[is_pupper & is_puppo, 'stages'] = 'pupper, puppo'
#3 stages
df_clean.loc[is_doggo & is_floofer & is_pupper, 'stages'] = 'doggo, floofer, pupper'
df_clean.loc[is_doggo & is_floofer & is_puppo, 'stages'] = 'doggo, floofer, puppo'
df_clean.loc[is_doggo & is_pupper & is_puppo, 'stages'] = 'doggo, pupper, puppo'
df_clean.loc[is_floofer & is_pupper & is_puppo, 'stages'] = 'floofer, pupper, puppo'
#4 stages
df_clean.loc[is_doggo & is_floofer & is_pupper & is_puppo, 'stages'] = 'doggo, floofer, pupper, puppo'
# #### Test
df_clean.sample(5)['stages']
assert 'stages' in df_clean.columns.values
assert df_clean['stages'].isna().sum() == 0
df_clean['stages'].value_counts()
# #### The 'doggo', 'floofer', 'pupper', 'puppo' columns must be removed.
# #### Define
# Remove the 'doggo', 'floofer', 'pupper', 'puppo' columns.
# #### Code
df_clean.drop(columns=['doggo', 'floofer', 'pupper', 'puppo'], inplace=True)
# #### Test
assert 'doggo' not in df_clean.columns.values
assert 'floofer' not in df_clean.columns.values
assert 'pupper' not in df_clean.columns.values
assert 'puppo' not in df_clean.columns.values
# * The dataframes df, df2 and df3 will no longer be used, so they can be deleted.
del df, df2, df3
# * The dataframes df1_copy, df2_copy and df3_copy will no longer be used, so they can be deleted.
del df1_copy, df2_copy, df3_copy
# <a id='assessing_quality'></a>
# ### Assessing of Data Quality
#
# * Checking general dataframe information:
df_clean.info()
# * Checking values of numeric features:
#Checking statistics for quantitative numerical features
num_columns = ['rating_numerator', 'rating_denominator','img_num','p1_conf',
'p2_conf', 'p3_conf', 'retweet_count', 'favorite_count']
df_clean[num_columns].describe()
# * Checking which columns have missing values (NaN):
df_clean.isnull().sum()
# * Displaying columns have missing values (NaN) and the amount of these values:
#Count of NaN values more than 0
na_values = df_clean.isna().sum()
na_values = na_values[na_values > 0]
na_values
#Plot of the NaN values per feature
plt.figure(figsize=(10,8))
na_values.sort_values().plot(kind='barh', position=0, color='blue')
plt.title('Missing values by feature');
plt.xlabel('Count of missing values (NaN)');
plt.ylabel('Feature');
# Columns with NaN values must be treated in some way, like filling with some value.
# * Checking for duplicate whole lines:
df_clean.duplicated().sum()
# No duplicate lines.
# * Evaluating the 'source' column:
df_clean['source'].value_counts()
# The values in the source column will be the values between the anchor tags.
# * Evaluating the 'rating_denominator' column:
df_clean.rating_denominator.unique()
df_clean.query('rating_denominator == 0')[['rating_numerator', 'rating_denominator']]
# The value of 'rating_denominator' must not be 0, because calculating the grade is not feasible with that.
# * Evaluating the 'stage' column:
df_clean['stages'].value_counts()
# * Evaluating the 'p1', 'p1_conf', 'p1_dog', 'p2', 'p2_conf', 'p2_dog', 'p3', 'p3_conf', 'p3_dog' columns:
p_columns = ['stages', 'p1', 'p1_conf', 'p1_dog', 'p2', 'p2_conf', 'p2_dog', 'p3', 'p3_conf', 'p3_dog']
df_clean[p_columns].head()
df_clean.query('p1.isnull()', engine='python')[p_columns].head()
#Query used to check the number of times that all columns in p_columns are NaN
query_ps_is_null = 'p1.isnull() & p2.isnull() & p3.isnull() & '
query_ps_is_null = query_ps_is_null + 'p1_conf.isnull() & p2_conf.isnull() & p3_conf.isnull() & '
query_ps_is_null = query_ps_is_null + 'p1_dog.isnull() & p2_dog.isnull() & p3_dog.isnull()'
query_ps_is_null
#number of times that all columns in p_columns are NaN
len(df_clean.query(query_ps_is_null, engine='python'))
# When one of the columns 'p1', 'p1_conf', 'p1_dog', 'p2', 'p2_conf', 'p2_dog', 'p3', 'p3_conf', 'p3_dog' is NaN, the others are also.
# * Evaluating the 'retweet_count' and 'favorite_count' columns:
df_clean[['retweet_count', 'favorite_count']].describe().min()
df_clean[['retweet_count', 'favorite_count']].isna().sum()
# The 'retweet_count', 'favorite_count', as seen above had NaN values twice each. The smallest value for these columns is 0.
df_clean['retweet_count'].dtype
df_clean['favorite_count'].dtype
# These columns with suffix 'count' should not be of type float, but int.
# * Evaluating the 'img_num' column:
df_clean['img_num'].min(), df_clean['img_num'].max()
df_clean['img_num'].unique()
df_clean['img_num'].dtype
# This column with suffix 'num' should not be of type float, but int. The column values are {1., 2., 3. and 4.} but should be {1, 2, 3, 4}.
# #### Quality:
#
# - The 'rating_denominator' column must not have a value of 0.
# - The values in the source column will be the values between the anchor tags.
# - The 'retweet_count' column must have its NaN values filled in.
# - The 'favorite_count' column must have its NaN values filled in.
# - Unrated tweets, considering columns p1, p2 and p3, should be removed.
# - The column type 'img_num' should be int64 and not float64.
# - The column type 'favorite_count' should be int64 and not float64.
# - The column type 'retweet_count' should be int64 and not float64.
# - Only lines that do not represent a retweet should be kept.
# - If confirmed that the tweets are not retweets, the columns for retweets are unnecessary.
# <a id='clean_quality'></a>
# ### Clean of Data Quality
# #### The 'rating_denominator' column must not have a value of 0.
# #### Define
# Replace the 0 values of rating_denominator with 1.
# #### Code
df_clean.loc[df_clean.rating_denominator == 0, 'rating_denominator'] = 1
# #### Test
assert 0 not in df_clean.rating_denominator.unique()
# #### The values in the source column will be the values between the anchor tags.
# #### Define
# Change the values in 'source' to just the desired part of the string.
# #### Code
old_source = '<a href="http://twitter.com/download/iphone" rel="nofollow">Twitter for iPhone</a>'
new_source = 'Twitter for iPhone'
df_clean.loc[df_clean.source == old_source, 'source'] = new_source
old_source = '<a href="http://vine.co" rel="nofollow">Vine - Make a Scene</a>'
new_source = 'Vine - Make a Scene'
df_clean.loc[df_clean.source == old_source, 'source'] = new_source
old_source = '<a href="http://twitter.com" rel="nofollow">Twitter Web Client</a>'
new_source = 'Twitter Web Client'
df_clean.loc[df_clean.source == old_source, 'source'] = new_source
old_source = '<a href="https://about.twitter.com/products/tweetdeck" rel="nofollow">TweetDeck</a>'
new_source = 'TweetDeck'
df_clean.loc[df_clean.source == old_source, 'source'] = new_source
# #### Test
df_clean['source'].value_counts()
# #### The 'retweet_count' column must have its NaN values filled in.
# #### Define
#
# Fill NaN values of The 'retweet_count' with 0
# #### Code
df_clean['retweet_count'].fillna(0, inplace=True)
# #### Test
assert df_clean['retweet_count'].isna().sum() == 0
# #### The 'favorite_count' column must have its NaN values filled in.
# #### Define
#
# Fill NaN values of The 'favorite_count' with 0
# #### Code
df_clean['favorite_count'].fillna(0, inplace=True)
# #### Test
assert df_clean['favorite_count'].isna().sum() == 0
# #### Unrated tweets, considering columns p1, p2 and p3, should be removed.
#
# #### Define
# Drop the lines where p1 is NaN. This should make the columns 'p1', 'p1_conf', 'p1_dog', 'p2', 'p2_conf', 'p2_dog', 'p3', 'p3_conf', 'p3_dog' do not have NaN values.
# #### Code
df_clean.head()
p_columns = ['p1', 'p1_conf', 'p1_dog', 'p2', 'p2_conf', 'p2_dog', 'p3', 'p3_conf', 'p3_dog']
df_clean.dropna(subset=p_columns, inplace=True)
# #### Test
p_columns = ['p1', 'p1_conf', 'p1_dog', 'p2', 'p2_conf', 'p2_dog', 'p3', 'p3_conf', 'p3_dog']
for column in p_columns:
assert df_clean[column].isna().sum() == 0
df_clean.shape
# #### The column type 'img_num' should be int64 and not float64.
# #### Define
# Change column type 'img_num' from float64 to int64.
# #### Code
df_clean['img_num'] = df_clean['img_num'].astype(np.int64)
# #### Test
assert df_clean['img_num'].dtype == np.dtype(np.int64)
# #### The column type 'favorite_count' should be int64 and not float64.
#
# #### Define
# Change column type 'favorite_count' from float64 to int64.
# #### Code
df_clean['favorite_count'] = df_clean['favorite_count'].astype(np.int64)
# #### Test
assert df_clean['favorite_count'].dtype == np.dtype(np.int64)
# #### The column type 'retweet_count' should be int64 and not float64.
#
# #### Define
# Change column type 'retweet_count' from float64 to int64.
# #### Code
df_clean['retweet_count'] = df_clean['retweet_count'].astype(np.int64)
# #### Test
assert df_clean['retweet_count'].dtype == np.dtype(np.int64)
# #### Only lines that do not represent a retweet should be kept.
# #### Define
# Remove lines where 'retweeted_status_id' is not null.
# #### Code
df_clean = df_clean[df_clean.retweeted_status_id.isnull()]
# #### Test
assert df_clean.retweeted_status_id.isnull().sum() == df_clean.shape[0]
df_clean.retweeted_status_id.value_counts()
# #### After confirming that tweets are not retweets, columns for retweets are unnecessary.
#
# #### Define
#
# Remove 'retweeted_status_id', 'retweeted_status_user_id', and 'retweeted_status_timestamp' columns.
df_clean.drop(columns=['retweeted_status_id', 'retweeted_status_user_id','retweeted_status_timestamp'], inplace=True)
# #### Test
assert 'retweeted_status_id' not in df_clean.columns
assert 'retweeted_status_user_id' not in df_clean.columns
assert 'retweeted_status_timestamp' not in df_clean.columns
# ### Save dataframe to .csv
df_clean.to_csv('twitter_archive_master.csv', index=False)
# <a id='eda'></a>
# ## Exploratory Analysis
# Análise - 3 informações e 1 visualização valores das notas, frequências de raças
df_clean['rating'] = df_clean['rating_numerator']/df_clean['rating_denominator']
#Numeric columns that are quantitative
numeric_columns = ['rating', 'rating_numerator','rating_denominator','img_num','p1_conf','p2_conf','p3_conf',
'retweet_count','favorite_count']
# ### Which tweet got the highest score?
#Highest rating
df_clean['rating'].max()
#Highest rated Tweet
highest_grade_tweet = df_clean[df_clean['rating'] == df_clean['rating'].max()]
highest_grade_tweet
# Below are some of the characteristics of the highest rated tweet:
highest_grade_tweet['name'].values[0]
highest_grade_tweet['text'].values[0]
highest_grade_tweet['jpg_url']
highest_grade_tweet['expanded_urls'].values[0]
highest_grade_tweet['retweet_count'].values[0]
highest_grade_tweet['favorite_count'].values[0]
#Classification of the p1 algorithm and its confidence in the result:
highest_grade_tweet['p1'].values[0], highest_grade_tweet['p1_conf'].values[0]
#Classification of the p1 algorithm and its confidence in the result:
highest_grade_tweet['p2'].values[0], highest_grade_tweet['p2_conf'].values[0]
#Classification of the p1 algorithm and its confidence in the result:
highest_grade_tweet['p3'].values[0], highest_grade_tweet['p3_conf'].values[0]
# ### How are the measurements of each stage?
stages_count = df_clean['stages'].value_counts()
stages_count
sum_stages = stages_count.sum()
sum_stages
#Percentage of frequency of each stage in total
stages_count.apply(lambda x : x/sum_stages)
stages_count = stages_count.drop(index='none')
sum_stages = stages_count.sum()
sum_stages
#Percentage of frequency of each stage in total, except 'none'
stages_count.apply(lambda x : x/sum_stages)
#Colors used in the plot
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
color = sns.color_palette(flatui)
#Plot bar chart for counting stages
plt.figure(figsize=[11.69,8.27]);
sns.barplot(stages_count.index, stages_count.values, palette=color)
plt.title('Count of the stages');
plt.ylabel('Number of dogs');
plt.xlabel('Stage');
# There is a big difference in the number of dogs per stage. 84% of the stages are 'none'. Considering those that are classified (there is no stage 'none') 66% are 'pupper', 20% are doggo, 7.5% are 'puppo', 2.3% are 'floofer' and and the rest is classified as with 2 stages.
# * Checking the average values for each stage:
stages = df_clean.groupby('stages')
stages[numeric_columns].mean()
# The rating do not show significant differences by stages.
#
# Considering only the unique stages, the 'puppo' stage has the highest average of favorites count, and the 'doggo' stage has the highest average retweet count.
# ### How are the results of the algorithms p1, p2 and p3 compared to each other?
#
# * Checking the number of cases where each algorithm identified a dog in the image:
ps_dogs = ['p1_dog','p2_dog','p3_dog']
df_clean[ps_dogs].sum()
# The p2 algorithm was the one that identified the largest number of dogs, 1495. Followed by p1 with 1477 and p3 with 1446. This metric slightly differentiated the efficiency of the algorithms, but the difference between them was not very large.
#
# * Checking the average confidence of each algorithm:
ps_confs = ['p1_conf','p2_conf','p3_conf']
df_clean[ps_confs].mean()
# The p1 algorithm is the one with the highest average confidence, around 59%. The p2 and p3 algorithms have an average confidence of approximately 13% and 6%, respectively. This metric was able to quite distinguish the supposed efficiency of the algorithms, p1 being reasonably efficient, and p2 and p3 relatively inefficient.
# * Checking the total number of dog breeds identified by the algorithms:
len(df_clean.query('p1_dog == True')['p1'].unique())
len(df_clean.query('p2_dog == True')['p2'].unique())
len(df_clean.query('p3_dog == True')['p3'].unique())
# The algorithms p1, p2 and p3, identified 111,113 and 116 different breeds of dogs respectively.
# ### References
#
# UDACITY - Data Analyst Nanodegree Program: https://www.udacity.com/course/data-analyst-nanodegree--nd002
#
# StackOverflow: https://stackoverflow.com/questions/21104592/json-to-pandas-dataframe
#
# WeRateDogs, Twitter profile (@dog_rates): https://twitter.com/dog_rates/status/749981277374128128
| wrangle_act.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import math
class Polygon:
def __init__(self):
self.x=[]
self.y=[]
self.col=[]
def rect(x1,y1,x2,y2,c):
r=Polygon()
r.x=[x1,x2,x2,x1]
r.y=[y1,y1,y2,y2]
r.col=c
return r
def moon(xpos, ypos, rad, c):
m1=Polygon()
m2=Polygon()
list_m=[]
res=50
shift=0.8
a=2*math.pi/res
for i in range(res) :
m1.x.append(xpos+math.cos(i*a)*rad)
m1.y.append(ypos+math.sin(i*a)*rad)
for i in range(res) :
m2.x.append(xpos+math.cos(i*a)*rad+shift)
m2.y.append(ypos+math.sin(i*a)*rad)
m1.col=c
m2.col=[1,0,0]
list_m.append(m1)
list_m.append(m2)
return list_m
def star(xpos,ypos,rad,peaks,c):
s=Polygon()
res=peaks*2
a=2*math.pi/res
sf=0.4
for i in range(res):
if i%2==0:
radius=rad*sf
else:
radius=rad
s.x.append(xpos+math.cos(i*a-a/2)*radius)
s.y.append(ypos+math.sin(i*a-a/2)*radius)
s.col=c
return s
#def display_flag(canvas,objects,m_objects,s_objects):
def display_flag(canvas,m_objects,objects,s_objects):
plt.figure(figsize=canvas)
plt.axis('off')
for o in objects:
plt.fill (o.x, o.y, c=o.col)
for moon in m_objects:
for ci in moon:
plt.fill (ci.x, ci.y, c=ci.col)
for st in s_objects:
plt.fill (st.x, st.y, c=st.col)
# +
objs=[]
mobjs=[]
sobjs=[]
bg=rect(0,0,15,10,[1,0,0])
objs.append(bg)
#cross1=rect(4,2,6,8,[1,1,1])
#objs.append(cross1)
white=rect(0.03,0.05,14.96,5,[1,1,1])
objs.append(white)
#objs.append(moon(9,7,7,[0,0,1]))
mobjs.append(moon(3, 7.5, 2, [1,1,1]))
sobjs.append(star(4,8.8,0.3,5,[1,1,1]))
sobjs.append(star(2.8,7.8,0.3,5,[1,1,1]))
sobjs.append(star(5.2,7.8,0.3,5,[1,1,1]))
sobjs.append(star(3.3,6.5,0.3,5,[1,1,1]))
sobjs.append(star(4.7,6.5,0.3,5,[1,1,1]))
#display_flag((15,10), objs, mobjs)
display_flag((15,10), mobjs, objs, sobjs)
# -
| T1/02_fun_with_flags/Santos_Singapore Flag.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from urllib.request import urlopen
import pickle
from bs4 import BeautifulSoup
home_url = 'https://ur.wikipedia.org'
home_url = 'https://ks.wikipedia.org'
links = ['https://ur.wikipedia.org/w/index.php?title=%D8%AE%D8%A7%D8%B5:%D8%AA%D9%85%D8%A7%D9%85_%D8%B5%D9%81%D8%AD%D8%A7%D8%AA&from=1057']
all_links = []
# Main code
prev_len = 0
for link in links:
while link:
html_doc = ''
with urlopen(link) as response:
for line in response:
line = line.decode('utf-8')
html_doc = html_doc + line.replace('\n','')
soup = BeautifulSoup(html_doc, 'html.parser')
div = soup.find('div',{'class':'mw-allpages-body'})
if div:
anchors = div.find_all('a');
all_links = all_links + [home_url + anchor['href'] for anchor in anchors]
print(len(set(all_links)))
if prev_len == len(set(all_links)):
break
nav_div = soup.find('div',{'class':'mw-allpages-nav'})
if nav_div and len(nav_div.find_all('a')) == 2:
link = home_url + nav_div.find_all('a')[1]['href']
prev_len = len(set(all_links))
len(set(all_links))
all_links = list(set(all_links)); len(all_links)
all_links[500]
with open('all_urdu_wikipedia_links.pkl', 'wb') as f:
pickle.dump(all_links, f)
| datasets-preparation/get-all-article-links-for-urdu-wikipedia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.0
# language: julia
# name: julia-0.6
# ---
# Starting with Julia
# =====
# * Basics in Julia
# * Check the [Julia WikiBook](https://en.wikibooks.org/wiki/Introducing_Julia/) for much more detailed introduction to Julia
# * Or visit Julia documentation [pages](https://docs.julialang.org/en/stable/)
#
# ### Content
# <ul>
# <li><a href=#vectors#>Vector & Matrices</a></li>
# <li><a href=#indexing#>Indexing in Julia</a></li>
# <li><a href=#collections#>Other collections</a></li>
# <li><a href=#functions#>Functions</a></li>
# <li><a href=#flow#>Flow control</a></li>
# <li><a href=#scope#>Scope</a></li>
# <li><a href=#packages#>Packages</a></li>
# <li><a href=#notes#>Other important notes</a></li>
# </ul>
#
# ### Start Julia Terminal
# Lets start slowly
1+2
# Check-out how Julia syntax is simple and similar to Matlab
freq = 1/pi; # frequency
x = 1.5; # x value
y = sin(2*pi*freq*x); # y value
# Now print the result (just like Matlab "fprintf" function)
@printf("So y equals %.2f (rounded to 2 decimal places)\n",y)
# # Vector and Matrix<a name="vectors"></a>
# * Vectors and Matrices are stored in Julia as ["Arrays"](https://en.wikibooks.org/wiki/Introducing_Julia/Arrays_and_tuples#Creating_arrays)
# * Vector elements must be separated by commas
# * Matrix rows are separated by semicolumns
# * Size matters when multiplying vectors/matrices. Not like in R!
# +
# Vector separated by commas! Othwrise 2D Array (=Matrix)
vector = [1,2,3,4,5];
# Matrix: rows separated by semicolumn
matrix = [1 2 3 4 5;6 7 8 9 10];
println(matrix);
# Matrices or vectors (=arrays) can also store strings (or ther types)
string_vec = ["one","two","and so on"];
println(string_vec)
# Size matters:
[1 1 1]*[10 10 10]'
# Dimension Mismatch Error: [1 1 1]*[10 10 10]
# try this in R: c(1,1,1)*c(10,10,10) and c(1,1,1)*t(c(10,10,10))
# Non of the above would return and error in R! Scary...
# -
# Unlike in Matlab, 1:1:10 is not a vector, but `StepRange`
# * Conversion is possible using `collect` function
range = 1:1:10
# Convert range to vector use `collect` function
v = collect(range);
println("Type of range = ",typeof(range))
println("Type of v = ",typeof(v))
# #### Indexing in Julia<a name="indexing"></a>
# * Same principle as in _Matlab_, i.e. [row,column]
# * Use **[] instead of ()**!
# * Indexing starts with 1 (not like in, e.g. Python) and ends with an `end`
# * Use `:` symbol for "all elements"
# * In Julia, dot `.` is used to signify "element-wise" operations
# +
vector = [1,2,3,4];
println("Second element of the vector = ",vector[2])
matrix = [1 2 3;4 5 6;7 8 9];
println("Second row and last column of the matrix = ",matrix[2,end])
# Conditional indexing: in Matlab>> matrix(matrix>=8) = 100;
# Following command replace all elements > 8 with 100.
# In Julia, dot `.` is used to signifies "element-wise" operations.
matrix[matrix.>=8] = 100;
println("Matrix with modified elements")
println(matrix)
# -
# ### Other collections<a name="collections"></a>
# * [Dictionaries](https://en.wikibooks.org/wiki/Introducing_Julia/Dictionaries_and_sets#Dictionaries): similar to _Matlab_ structure or cell array
# * [Sets](https://en.wikibooks.org/wiki/Introducing_Julia/Dictionaries_and_sets#Sets): just like Dictionaries but to avoid duplicated entries
# * [Tuples](https://en.wikibooks.org/wiki/Introducing_Julia/Arrays_and_tuples#Tuples): used often in Python
# * [Use-defined](https://en.wikibooks.org/wiki/Introducing_Julia/Types#Creating_types) types
# * [DataFrames](https://en.wikibooks.org/wiki/Introducing_Julia/DataFrames): requires external package (see section Packages)
# +
# Create dictionary
dict1 = Dict("a" => 1, "b" => 2, "c" => 3)
# Indexint via keys (like in DataFrames)
a = dict1["a"];
println("Show variable for 'a' = $a\n")
# Create an tuple
tup = (1,2,3);
# Standard indexing
t = tup[2];
println("Tuple use standard indexing, tup[2] = $t\n")
# -
# ## Functions<a name="functions"></a>
# There are several ways how to write a [function](https://en.wikibooks.org/wiki/Introducing_Julia/Functions) in Julia:
# * Standard: use `function` keyword
# * Natural: write just like in Math books
# * Anonymous functions: without names (handy to use as input)
#
# We can write multiple **functions with the same name**!
# * Inputs, or type of input, make then the difference!
# * Principle of multiple dispatch
# +
# Lets create a function with 2 input variables and print them
function printinput(a,b)
println("printinput function test")
println("First input = $a")
println("Second input = $b")
end
# Call the function
printinput(1,2)
# Function with and output value
function outfce(a,b) # not like in matlab c = outfce(a,b)!
return a+b; # return command is optional!
end
c = outfce(1,2);
println("\noutfce test output = $c\n")
# Lets try different, the more "natural" way of writting function
f(x,y) = x*2 + y*4;
# Call the function
o = f(10,20);
println("f(x,y) test output = $o\n");
# We can write multiple function with the same name
function printinput(a,b,c)
println("calling 'printinput' function with 3 inputs")
end
printinput(1,2,3)
# Type of input file matters as well
function printtype(a::Int)
println("\nprinttype function input is an Integer")
end
function printtype(a::Float64)
println("\nprinttype function input is a Float64")
end
printtype(1)
printtype(1.0)
# -
# ## Flow control<a name="flow"></a>
# Julia offers similar [flow control](https://en.wikibooks.org/wiki/Introducing_Julia/Controlling_the_flow) as other languages
# * `if`, `for` and `while` work similar to _Matlab_
# * No `switch` command
# +
# If example
a = 1;
if a == 1
println("'a' equals 1\n")
elseif a == 2
println("'a' equals 2\n")
else
println("'a' does not equal 1 or 2\n")
end
# Nice way how to use 'if' in one line setting output variable >> ternary operator
b = a == 1 ? "'b' equals 1" : "b does not equal 1";
println(b);
# For loops
vec = [1,2,3,4]
# Same as in Matlab
# for i = 1:length(vec)
# println(vec[i]);
# end
# Same as above but without indexing
println("\nTest for loop")
for i in vec
println(i);
end
# While
println("\nThes while loop")
i = 4;
while i > 0
println(i);
i = i - 1;
end
# -
# ## Scope<a name="scope"></a>
# The [scope](https://docs.julialang.org/en/release-0.4/manual/variables-and-scoping/#man-variables-and-scoping) of a variable is the region of code within which a variable is visible
# * All variables defined in a script (not inside a function) are **global** (no keyword is needed)
# * Functions can access and **modify** global variables!
# * `for`, `while`, `try` and `try-catch-finally` introduce new **local** scope
# * Variables with the same name in different scopes can be used simultaneosly
#
# If you are switching from Matlab, make sure you understant the concept of Scopes in Julia, and the differences compare to Matlab/Octave
# +
# Lets have a look at global and local scopes
workspace(); # clear the workspace first
a = [1,2,3]; # test vector 'a'
b = [1,2,3]; # calling 'b = a' would just copy the variable instead of creating a new one!
# Remember that function can access these vectors without any action
function scope1()
println("accessing 'a' inside a function scope1 = $a\n")
end
# call the function
scope1()
# Modify a vector
function scope2()
a[1] = 10;
d_inside = 1;
end
# Modify global 'a' vector by running scope2 function!
scope2()
# 'd_inside' variable is not accessable outside the functio!
println("'a' after calling scope2 = $a\n")
# Variables with same name in different scopes
function scope3()
b = [100,200]; # new variable NOT affecting 'c' vector!!
b[1] = b[1]*2;
end
scope3()
println("'b' vector after calling scope3 = $b\n")
# IF does not introduce new scope
if b[1] == 1
c = 1;
end
println("New variable 'c' was created = $c\n")
# For loop, however, introduces a new scope
for i = 1:10
e_for = 1;
end
# 'e_for' (as well as 'i') do not exist outside of for loop (can be changed using `global` keyword)
# -
# ### Other important notes<a name="notes"></a>
# * Be careful when [copying](https://docs.julialang.org/en/release-0.4/stdlib/base/?highlight=deepcopy#Base.deepcopy) variables: Use either `copy` or `deepcopy` function
# * Heard of Pipelines before? It's awesome feature and you can use the principle in Julia!
workspace()
a = [1,2,3];
b = a;
a[1] = 10;
println("Modifying 'a' affects also 'b'! See: b = $b\n")
# To avoid that use 'copy' function
c = copy(a);
a[2] = 20;
println("'c' variable is now (almost fully) independent of 'a'. See: c = $c\n")
# pipeline: pass first input (2.123) to function sqrt, take the output and pass it to sine,
# and then take the result and use it as the second input to function round.
# The result is then stored in `a`
a = 2.123 |> sqrt |> sin |> x -> round(Int,x);
println("a = $a")
# ## Packages<a name="packages"></a>
# The Julia library can be easily extended via [Package](https://docs.julialang.org/en/stable/manual/packages/) installation
# * Julia provides "official" list of **tested** packages: pkg.julialang.org
# * These packages must be on GitHub >> requires Internet connection
# * It is possible to checkout different version of installed Packages
# * Packages can be updates directly from Julia shell
#
# Installed packages can be fully loaded to workspace or imported
# * Full name must be used when Package is imported
# * Be careful when loading (='using') multiple packages due to name-conflicts
#
# You can write your our packages = [modules](https://en.wikibooks.org/wiki/Introducing_Julia/Modules_and_packages#Structure_of_a_package)
# * load modeles/functions using 'include' command
# +
# Update installed packages
Pkg.update()
# Install DataFrames package
Pkg.add("DataFrames")
# Load package to the workspace
using DataFrames
# Load package but use the full Pkg name to avoid name-conflict
# For example, Gadfly and PyPlot packages contain 'plot' function
import Gadfly
import PyPlot
# To call Gadfly plot:
Gadfly.plot(x=[1,2,3],y=[10,11,12])
# To call PyPlot plot:
PyPlot.plot([1,2,3],[10,11,12])
# Load your own package.
# WARNING: Windows users, use double backslash \\ or forward slash / in paths!
# include("f:/path/to/your/julia/Package/src/PackageName.jl")
# Or, add Path to Julia 'LOAD_PATH':
# push!(LOAD_PATH, "f:/path/to/your/julia/Package/src/src") # where is the source code?
# To ensure this path is loaded every time you start julia, copy it to:
# C:\Users\username\AppData\Local\Julia-0.6.0\etc\julia\juliarc.jl
| notebooks/.ipynb_checkpoints/JuliaIntro-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:minds] *
# language: python
# name: conda-env-minds-py
# ---
# # Stonks
#
# ### Dependencies and data
# +
# Dependencies
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
# Config
mpl.style.use('ggplot')
mpl.rcParams['figure.figsize'] = [16, 4]
# AAL data
aal_df = pd.read_csv('data/nasdaq/AAL.csv', parse_dates=['Date'])
aal_df.head()
# Plot AAL
aal_df['Close'].plot(title='AAL Daily Closing Price', ylabel='Price (USD)', xlabel='Date')
plt.xticks(ticks=range(0, aal_df.shape[0], 500),
labels=aal_df.loc[range(0, aal_df.shape[0], 500), 'Date'].dt.date)
plt.show()
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DS104 Data Wrangling and Visualization : Lesson One Companion Notebook
# ### Table of Contents <a class="anchor" id="DS104L1_toc"></a>
#
# * [Table of Contents](#DS104L1_toc)
# * [Page 1 - Introduction ](#DS104L1_page_1)
# * [Page 2 - Adding Columns in R](#DS104L1_page_2)
# * [Page 3 - Adding Columns in Python](#DS104L1_page_3)
# * [Page 4 - Renaming Columns](#DS104L1_page_4)
# * [Page 5 - Renaming Columns in Python](#DS104L1_page_5)
# * [Page 6 - Combining Columns in R](#DS104L1_page_6)
# * [Page 7 - Separating Columns in R](#DS104L1_page_7)
# * [Page 8 - Combining Columns in Python](#DS104L1_page_8)
# * [Page 9 - Separating Columns in Python](#DS104L1_page_9)
# * [Page 10 - Subsetting Data in R](#DS104L1_page_10)
# * [Page 11 - Subsetting Data in Python](#DS104L1_page_11)
# * [Page 12 - Key Terms](#DS104L1_page_12)
# * [Page 13 - Hands On](#DS104L1_page_13)
# * [Page 14 - Hands On Practice - Solution](#DS104L1_page_14)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 1 - Introduction <a class="anchor" id="DS104L1_page_1"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
from IPython.display import VimeoVideo
# Tutorial Video Name: Manipulating Columns and Rows
VimeoVideo('241243071', width=720, height=480)
# # Introduction
#
# This lesson marks the start of your data wrangling journey. One of the best-kept secrets in data science is that you will spend most of your time wrangling the data into the right format, not actually running analyses. Prior to this, data has mostly been clean and ready for your immediate usage, so you'll now start to get a feel for the work that goes into preparing a dataset for analysis. You'll start with the basics of data manipulation - learning to play around with columns and rows.
#
# By the end of this lesson, you should be able to:
#
# * Add new columns
# * Rename columns
# * Split columns up
# * Combine columns together
# * Subset your data to select only some columns and/or rows
#
# This lesson will culminate with a hands on in which you will manipulate a dataset about fake news stories.
#
# <div class="panel panel-success">
# <div class="panel-heading">
# <h3 class="panel-title">Additional Info!</h3>
# </div>
# <div class="panel-body">
# <p>You may want to watch this <a href="https://vimeo.com/429857151"> recorded live workshop on the Python material in this lesson </a> or this <a href="https://vimeo.com/436301467"> recorded live workshop on the R material in this lesson </a> </p>
# </div>
# </div>
#
#
from IPython.display import VimeoVideo
# Tutorial Video Name: Manipulating Columns and Rows
VimeoVideo('429857151', width=720, height=480)
from IPython.display import VimeoVideo
# Tutorial Video Name: Manipulating Columns and Rows
VimeoVideo('436301467', width=720, height=480)
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 2 - Adding Columns in R<a class="anchor" id="DS104L1_page_2"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# There will come a time in any data scientist’s life when you will need to add or remove columns and rows. You may also need to take what’s in one column and make it two, or combine two columns into one. The formal term for smooshing columns together is *concatenation*. You'll be playing around with data manipulation using **[this dataset](https://repo.exeterlms.com/documents/V2/DataScience/Data-Wrang-Visual/babies.zip)**.
#
# ---
# ## Adding Columns in R
#
# Adding in a new column in R is relatively easy. All that needs to happen is to specify the dataset and the name of the new column before the ```=``` , and then you can add anything you want into the column. In the example shown below, you are creating a new column named ```Footprint``` that is blank, because you have contained a space between the double quotes. However, you could instead add in any character string you wanted in the quotes or add a number (not in quotes). You could even conditionally format that column based on information contained in other columns, which is called *recoding*. You will learn how to recode soon.
#
# ```{r}
# babies$Footprint = " "
# ```
#
# Now it’s important to note that you cannot create a new column and a new dataset at the same time, so if you think you are doing anything you might need to revert later, it is a good idea to save it as a new data frame first, just in case.
#
# ---
#
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 3 - Adding Columns in Python<a class="anchor" id="DS104L1_page_3"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Adding Columns in Python
#
# For the next few lessons, you'll need the Python package `pandas`, so make sure you run the following code if you're following along:
#
# ```python
# import pandas as pd
# ```
#
# Adding columns in Python is also a snap. Simply call the data frame, and then place in square brackets the name of the new column, and provide the value after the equals sign, like so:
#
# ```python
# babies['Footprint'] = 'Y'
# ```
#
# You have now created a column in this data set that indicates whether or not a baby has had his or her footprint taken yet, and filled every instance of this column with the value ```Y``` standing for “yes.”
#
# ---
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 4 - Renaming Columns<a class="anchor" id="DS104L1_page_4"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Renaming Columns
#
# Column names can also be changed in both R and Python with little fuss. Although it’s always nice to be able to rename columns to something that is meaningful for you to work with, it becomes especially important if the source of your data allows spaces in the header row. For instance, in MS Excel, you are allowed to have headers at the top of your data with spaces in them. However, R and Python tend to throw up errors when you try to call any columns that have spaces embedded, and while R will default to removing spaces and instead placing a period as a separator in most cases, Python will not. So renaming columns becomes particularly essential then!
#
# <div class="panel panel-info">
# <div class="panel-heading">
# <h3 class="panel-title">Tip!</h3>
# </div>
# <div class="panel-body">
# <p>You may be thinking to yourself, "Isn't it so much easier to just remove the spaces in MS Excel then hard-coding it in R or Python?" The answer is probably "yes," but there will come a time when your data is so big that you can't even open MS Excel without it crashing. So it's imperative you know how to do it in another program as well.</p>
# </div>
# </div>
#
# ---
#
# ## Renaming Columns in R
#
# Below is the code to rename in R. First you call the ```names``` function, and then specify the dataset that you want to name. Then, in square brackets, you again specify ```names``` and the dataset, but in addition, place after the double equals sign the name of the original variable. Lastly, after the ```<-``` , you will place your new name for the column in double quotes.
#
# ```{r}
# names(babies)[names(babies) == "ParentPhoneNumber"] <- "Phone"
# ```
#
# The code above with use the ```names``` function to rename the column ```ParentPhoneNumber``` to ```Phone``` in the ```babies``` dataset.
#
# ---
#
# ## Choosing Column Names
#
# Make sure to choose column names with care. Good column names can make the data analysis process go much easier, since it is easy to tell what data is contained with them and it does not take long to reference the columns. Poor column name choices can increase the complexity of your data analysis by confusing yourself and others, both with a poor name/data fit and with an increased chance of making typos. A good column name will be short, succinct, and easily understood by someone who does not work with the data regularly. It will also be easily read – so make sure that column names with more than one word are either in *camel case* (EveryFirstLetterCapitalized), or have some other delineation like periods (periods.between.words) or underscores (underscore_between_words). Using some typical naming conventions will save you many headaches later on.
#
# ---
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 5 - Renaming Columns in Python<a class="anchor" id="DS104L1_page_5"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Renaming Columns in Python
#
# When compared to R, renaming columns in Python has slightly simpler syntax, using the ```.rename``` function. Specify the data frame, then use ```.rename```. The arguments you'll include are ```columns=```, which is where you'll put a key-value pair consisting of: ```{'OldValue' : 'NewValue'}```, and ```inplace=True```, which allows you to make this change permanently added to your data frame.
#
# ```python
# babies.rename(columns={'ParentPhoneNumber' : 'Phone'}, inplace=True)
# ```
#
# <div class="panel panel-danger">
# <div class="panel-heading">
# <h3 class="panel-title">Caution!</h3>
# </div>
# <div class="panel-body">
# <p>Python does not support periods between words, so this naming convention will not convert between programs!</p>
# </div>
# </div>
#
# ---
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 6 - Combining Columns in R<a class="anchor" id="DS104L1_page_6"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Combining Columns in R
#
# The opposite of splitting columns is being able to put those columns back together again. For instance, they may be stored in your database separately for ease of use and analysis (sorting by last name, anyone?), but when you actually want to provide a data printout to a customer, such as a list of names and email address to contact, you may want it in a format that is easier to read. In R, this function is called ```unite```, and it is contained within the `tidyr` package:
#
# ```{r}
# install.packages("tidyr")
# library("tidyr")
# ```
#
# The code is shown below, where you specify the name of the new dataset before the arrow, the call the unite function, type in the name of the original dataset, type in the name of the column you want to create that will contain the information from the current columns, and then specify the columns that you want to combine and how you want the data to be combined. Take a look:
#
# ```{r}
# babies2 <- unite(babies1, Address, StreetAddress, City, Zipcode, sep = "/")
# ```
#
# So this function above will create a new column named ```Address``` that will be made from the three columns ```StreetAddress```, ```City```, and ```Zipcode```. The argument ```sep=``` to specifies how your data is broken up. The separator in this case is a backslash.
#
# You can choose to mush everything together with no separators, but typically for readability you might want to add a space, comma, or other separator. Adding a separator also means that you can take them apart again easily later if you need to.
#
# It is very important to choose a separator that will not occur naturally in your data. For instance, if you look up to the previous figure in the ```StreetAddress``` column, you will find that dashes, periods, commas, and even hashtags all appear within that column. If you had chosen to use one of those separators rather than the front slash, it would mess with your data! R starts parsing columns as soon as it sees the separator, so if it comes earlier or later than expected, your columns won’t all have the same data in them. You will know you’ve made this mistake if you see this warning:
#
# 
#
# <div class="panel panel-danger">
# <div class="panel-heading">
# <h3 class="panel-title">Caution!</h3>
# </div>
# <div class="panel-body">
# <p>This problem will generate a warning, but will STILL RUN. So always make sure you examine your columns carefully to make sure things went as expected.</p>
# </div>
# </div>
#
# ---
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 7 - Separating Columns in R<a class="anchor" id="DS104L1_page_7"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Separating Columns in R
#
# Typically, if you are transferring data from a file into R, even if all the data is stored in one giant chunk, you can delimit it upon import. However, there may be times when you still need to break columns apart. The way you do this in R is with the ```separate()``` function, which is also part of the ```tidyr``` package. It allows you to go from data like this, where your ```City``` and ```Zipcode``` columns are all in one column…
#
# 
#
# To data like this, where you were able to split those columns apart.
#
# 
#
# Here are some common examples of when you might separate columns:
#
# * First and Last name are stored together, but you need to use them separately.
# * Addresses are stored as we would write them out, not separated into street address, city, state, and zipcode.
# * Inches and feet for height are stored together.
# * Month, Day, and Year are stored together, but you’d like to look at only month or year.
#
# Here is the code you will use to separate columns in R for the ```Address``` column:
#
# ```{r}
# babies1 <- separate(babies, Address, c("StreetAddress", "City", "Zipcode"), sep="/")
# ```
#
# After you call the ```separate()``` function, you will then put in the name of the current data set, followed by the name of the column you are breaking apart. You will place the names of the new columns you would like to create from the original column in a vector, denoted by the ```c()```. Each new column to be created should be in quotes and separated by a comma. Lastly, you will provide the argument ```sep=```, which is for specifying your separator, or the way in which you will break up the columns. This could be a character like a ```/``` or ```,```, or it could be blank in the quotes, indicating a space. Whenever R finds the thing placed within the quotes, it will make a new column, and those chunks of information will be placed, in order, into the new columns that you specified. In the case of the code above, you are splitting out the ```Address``` column into three columns, separating at the backslash, and those three new columns will be named ```StreetAddress```, ```City```, and ```Zipcode```.
#
# ---
#
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 8 - Combining Columns in Python<a class="anchor" id="DS104L1_page_8"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Combining Columns in Python
#
# The Python syntax for combining columns reads much more like a sentence, which makes it rather user friendly. First, specify the name of the new data frame and column you'll create, then after the equals sign, place the name of the first column you would like to combine, the separator, and the remaining columns you would like to add. Adding the ```(str)``` at the end means that the variable will become a string (character) if it was not already.
#
# ```python
# babies['FullName'] = babies["Name"] + " " + babies["First"].map(str)
# ```
#
# Unlike R, Python not only creates a new column with the concatenation, but also leaves the old columns untouched, which is a nice feature.
#
# This function becomes slightly more lengthy if you are concatenating more than one column; however; it stills reads and has the logic of a sentence:
#
# ```python
# babies['Address'] = babies["Street Address"] + " / " + babies["City"] + " / " + babies["Zipcode"].map(str)
# ```
#
# The above code combines three columns into the new column of ```Address```, and between each part, there is a separator of a space.
#
# ---
#
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 9 - Separating Columns in Python <a class="anchor" id="DS104L1_page_9"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Separating Columns in Python
#
# Unfortunately, separating columns in Python can be a bit unwieldy. Although it is quite easy to actually separate them, the new separated columns are automatically placed inside a new dataframe instead of the one that contains the original column. This means that in addition to separating the column, you also need to add into your current data frame the new separated columns.
#
# ---
#
# ## Splitting the Columns
#
# The first step is to separate the column. As shown below, you will create a new data frame, which will then contain your columns after you’ve split them. You call the ```str.split()``` function, and then specify what your data is broken up by (your separator). In this case, it is a front slash. The ```expand=True``` argument is very important – it makes each separated section its own column in a data frame rather than just producing a list.
#
# ```python
# babies1 = babies['Address'].str.split('/', expand=True)
# ```
#
# This code separates the ```Address``` column based on the forward slash; however, you will notice that the columns are not labeled at all, just zero indexed.
#
# 
#
# ---
#
# ## Renaming the Columns
#
# The easy fix for that is to add in a ```.rename()``` function to the whole shebang like this:
#
# ```python
# babies2 = babies['Address'].str.split('/', expand=True).rename(columns = lambda x: "Address"+str(x+1))
# ```
#
# In the ```.rename()``` function, you are specifying that you want to rename columns with the argument ```columns=```, and then you'll use the ```lambda x``` function to say that you want the trunk of ```Address``` to be repeated every time, and then you’ll add numbers on. You can leave them like this, or rename the columns like you learned above – it’s a personal preference.
#
# 
#
# ---
#
# ## Adding the Columns Back In
#
# The next step is to add those columns back into your dataset. To do this, you will append your data side by side. In Python, the ```pandas``` package has a function called ```.concat()``` that will add in columns or rows. All you need to do is specify the names of the datasets that you want to place side by side, in the order in which you want to see them side by side, in the square brackets and then specify ```axis=1``` to tell Python that you are adding columns, not rows.
#
# ```python
# babies3 = pd.concat([babies, babies2], axis=1)
# ```
#
# 
#
# ---
#
#
#
#
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 10 - Subsetting Data in R<a class="anchor" id="DS104L1_page_10"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
# # Subsetting Data in R
#
# *Subsetting* is when you take a portion of your old data set and turn it into its own new dataset. It's also a way to drop columns or rows you don't need. When you subset, you can choose which columns and which rows you’d like to take with you into the new dataset.
#
# ---
#
# ## Subsetting Using Indexes
#
# In R, subsetting data is this easy:
#
# ```{r}
# babies6 <- babies[1:5, 1:3]
# ```
#
# This will keep only the first five rows of data and the first three columns. You will always specify the rows first, with the index of the starting row, followed by a colon and the index of the ending row. Then you'll add a column, and put in your information with the columns. The first number in the second set of brackets is the index of the starting column, and the second number is the index of your ending column.
#
# Said another way, the first set of numbers is for the rows you want to keep. The format is ```first : last```. The second set of numbers is for the columns you want to keep. Again, the format is ```first : last```.
#
# ---
#
# ## Subsetting Using Column Names
#
# Alternatively, when dealing with columns, you can specify the names of the columns you want to keep. This way, you don't need to worry about whether they are adjacent to each other or not. You'll create a new vector, in this case, called ```Keeps```, that has the name of the columns you want to retain. Then, you'll apply that vector to the your dataset, placing it in square brackets, like this:
#
# ```{r}
# keeps <- c("Name", "Birthday", "ParentEmail")
# babies7 <- babies[keeps]
# ```
#
# The resulting dataset looks like this:
#
# 
#
# ---
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 11 - Subsetting Data in R<a class="anchor" id="DS104L1_page_11"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
# # Subsetting Data in Python
#
# You will now learn how to subset your data in Python.
#
# ---
#
# ## Subsetting Rows
#
# A very similar subset command can be done in Python to limit the number of rows you have. This particular code takes only the first three rows. Remember Python has zero indexing, so choosing three rows means that the index number will actually read ```2```.
#
# ```python
# babies7 = babies[:3]
# ```
#
# ---
#
# ## Keeping Columns
#
# For columns, you can select by column names, placing them in double square brackets:
#
# ```python
# babies8 = babies[['Name', 'Birthday', 'ParentEmail']]
# ```
#
# This new dataset, ```babies8```, will only be left with the columns of ```Name```, ```Birthday```, and ```ParentEmail```.
#
# ---
#
# ## Dropping Columns
#
# You can also just drop columns, if you are keeping most of the columns and only getting rid of one or two. In the code below, you will be dropping ```ParentPhone``` out of the ```babies``` dataset using the function ```.drop```:
#
# ```python
# babies.drop(['ParentPhone'], axis=1)
# ```
#
# As with other Python code, the ```axis=1``` argument tells Python to apply this information to the columns, not the rows.
#
# ---
#
# ## Summary
#
# In this lesson, you got started with the fundamentals of data wrangling! You learned how to manipulate columns and rows in both Python and R, and you should now be able to complete the following tasks:
#
# * Adding columns
# * Renaming columns
# * Combining columns
# * Separating columns
# * Subsetting columns and rows
#
# You will need these skills in order to tame your raw data and turn it into something useful!
#
# ---
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 12 - Key Terms<a class="anchor" id="DS104L1_page_12"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
# # Key Terms
#
# Below is a list and short description of the important keywords learned in this lesson. Please read through and go back and review any concepts you do not fully understand. Great Work!
#
# <table class="table table-striped">
# <tr>
# <th>Keyword</th>
# <th>Description</th>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>Concatenation</td>
# <td>Combining columns together.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>Separator</td>
# <td>A punctuation mark that can be used to break up your data into additional columns; often but not always a comma.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>Measures</td>
# <td>What Tableau calls continuous variables.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>Dimensions</td>
# <td>What Tableau calls categorical variables.</td>
# </tr>
# </table>
#
# ---
#
# ## Key R Code
#
# <table class="table table-striped">
# <tr>
# <th>Keyword</th>
# <th>Description</th>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>names</td>
# <td>A function used to rename columns.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>separate()</td>
# <td>A function in the tidyr library to split up columns by a separator.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>sep=</td>
# <td>An argument to separate() and unite() that specifies the separator within double quotes.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>unite()</td>
# <td>A function in the tidyr library to combine columns together.</td>
# </tr>
# </table>
#
# ---
#
# ## Key R Libraries
#
# <table class="table table-striped">
# <tr>
# <th>Keyword</th>
# <th>Description</th>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>tidyr</td>
# <td>A package used for data manipulation and wrangling.</td>
# </tr>
# </table>
#
#
#
# ---
#
# ## Key Python Code
#
# <table class="table table-striped">
# <tr>
# <th>Keyword</th>
# <th>Description</th>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>.rename()</td>
# <td>A function to rename columns.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>inplace=True</td>
# <td>An argument to .rename() that changes the names permanently.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>.str.split()</td>
# <td>A function that will split columns based on a separator.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>expand=True</td>
# <td>An argument for str.split() that will ensure each separated section becomes it's own column, rather than just being part of a list.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>pd.concat()</td>
# <td>A function in pandas that adds two dataframes back together.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>.drop()</td>
# <td>A function where the specified columns are removed from the dataset.</td>
# </tr>
# </table>
#
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 13 - Hands On<a class="anchor" id="DS104L1_page_13"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
# # Jupyter Notebook and Pandas Hands-On
#
# This Hands-On will be graded. The best way to become a data scientist is to practice!
#
# <div class="panel panel-danger">
# <div class="panel-heading">
# <h3 class="panel-title">Caution!</h3>
# </div>
# <div class="panel-body">
# <p>Do not submit your project until you have completed all requirements, as you will not be able to resubmit.</p>
# </div>
# </div>
#
# You are working for an ecology company, and they have been tracking bison throughout North America. They've collected **[data on the location, number, genus, and species of bison](https://repo.exeterlms.com/documents/V2/DataScience/Data-Wrang-Visual/BisonTracking.zip)**. They'd like to know some basic information about the bison, to determine whether the species is still in danger or whether it is recovering.
#
# Please perform the following tasks:
#
# * Read in your data as a CSV file
# * Look at the first seven rows of your data
# * Look at the last ten rows of your data
# * Determine the number of rows and columns your dataset has
#
#
# And answer the following questions:
#
# * How many bison are of the species antiquus?
# * What is the mean and standard deviation of Length?
# * What is the median length of the bison?
#
# Please annotate your code with markdown to explain each step, then attach your ipynb or an HTML copy of your notebook here, so your work can be graded.
#
# <div class="panel panel-danger">
# <div class="panel-heading">
# <h3 class="panel-title">Caution!</h3>
# </div>
# <div class="panel-body">
# <p>Be sure to zip and submit your entire directory when finished!</p>
# </div>
# </div>
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 14 - Practice Hands on R Solution<a class="anchor" id="DS104L1_page_14"></a>
#
# [Back to Top](#DS104L1_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
# # fake news stories
#
# For your Lesson 1 Practice Hands-On, you will be completing the following requirements. This Hands-On will **not** be graded, but you are encouraged to complete it. The best way to become a great data scientist is to practice! Please submit your actual R script / Python notebook.
#
# <div class="panel panel-danger">
# <div class="panel-heading">
# <h3 class="panel-title">Caution!</h3>
# </div>
# <div class="panel-body">
# <p>Do not submit your project until you have completed all requirements, as you will not be able to resubmit.</p>
# </div>
# </div>
#
# ---
# ## Requirements
#
# Here is a dataset on fake news stories: **[Fake News Dataset](https://repo.exeterlms.com/documents/V2/DataScience/Data-Wrang-Visual/FakeNews.zip)**. You will be practicing your column and row manipulations with it.
#
# ---
# ### Part 1: Please complete the following tasks in R:
#
# 1. Add a column labeled ```StoryType``` and fill it with ```Fake```.
# 2. Remove the ```when``` column.
# 3. Separate the ```url``` column out so that you can see in one column the website and in the second column the domain. For example, if you have the following in ```url```, it should be broken out like this:
#
# http://wayback.archive.org/web/20161004072420id_/http://alertchild.com/
#
# Website: http://wayback.archive.org/web/20161004072420id
# Domain: /http://alertchild.com/
#
# 4. Put back together the domain column.
# 5. Keep only the first ten rows of the data.
#
# ---
# ### Part 2: Please complete the same list of tasks above in Python.
#
# <div class="panel panel-danger">
# <div class="panel-heading">
# <h3 class="panel-title">Caution!</h3>
# </div>
# <div class="panel-body">
# <p>Be sure to zip and submit your entire directory when finished!</p>
# </div>
# </div>
#
#
#
#
#
#
# # Part 1 Solution in R
#
# To add a column labeled ```StoryType``` and fill it with ```Fake```:
#
# ```{r}
# FakeNews$StoryType = "Fake"
# ```
#
# To remove the "when" column:
#
# ```{r}
# FakeNews1 <- FakeNews[, 2:4]
# ```
#
# To separate the URL column so you can see the website in one column and the domain in the other:
#
# ```{r}
# library("tidyr")
# FakeNews2 <- separate(FakeNews1, url, c("Website", "Domain"), sep="_")
# ```
#
# To put back together the domain column you broke apart:
#
# ```{r}
# FakeNews3 <- unite(FakeNews2, FullSiteName, Website, Domain, sep = "_")
# ```
#
# To keep only the first ten rows of data:
#
# ```{r}
# FakeNews4 <- FakeNews3[1:10,]
# ```
#
# ---
# ## Part 2 Solution in Python
#
# ```python
# import pandas as pd
#
# FakeNews = pd.read_excel('C:/Users/meredith.dodd/Documents/New Curriculum/104 L1/FakeNews.xlsx')
# FakeNews.head()
#
# #Add a column labeled StoryType and fill it with Fake
#
# FakeNews['StoryType'] = "Fake"
# FakeNews.head()
#
# #Remove the when column
#
# FakeNews.drop(['when'], axis=1, inplace=True)
# FakeNews.head()
#
# #Separate the URL column into Website and Domain
#
# FakeNews1 = FakeNews['url'].str.split('_', expand=True).rename(columns = lambda x: "URL"+str(x+1))
# FakeNews1.head()
#
# FakeNews2 = pd.concat([FakeNews, FakeNews1], axis=1)
# FakeNews2.head()
#
# FakeNews2.drop(['url'], axis=1, inplace=True)
# FakeNews2.head()
#
# #Put back together the domain column
#
# FakeNews2['url'] = FakeNews2["URL1"] + "_" + FakeNews2["URL2"].map(str)
# FakeNews2.head()
#
# FakeNews2.drop(['URL1', 'URL2'], axis=1, inplace=True)
# FakeNews2.head()
#
# #Keep only the first ten rows of the data
# FakeNews3 = FakeNews2[:10]
# FakeNews3
# ```
#
#
| Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/RECAP_DS/03_DATA_WRANGLING_AND_VISUALISATION/L01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit import IBMQ, Aer, execute
from qiskit.tools.visualization import plot_histogram
# +
## oracle_initialize_part
def OR(qubit_1, qubit_2, k):
# enter qubit numbers here
""" function does the equivalent of a classical OR between qubit numbers a and b and stores the result in qubit number k """
# qc.barrier(q)
qc.x(q[qubit_1])
qc.x(q[qubit_2])
# qc.barrier(q)
qc.ccx(q[qubit_1], q[qubit_2], q[k])
qc.x(q[k])
# qc.barrier(q)
qc.x(q[qubit_1])
qc.x(q[qubit_2])
# qc.barrier(q)
def are_not_equal(a_0, b_0, k):
# enter node numbers here. For example, a is node 0, b is node 1 and c is node 2
""" function outputs 1 if nodes a and b are not the same. Node numbering starts from 0
as in the problem statement. k is the qubit number where the output is XOR-ed. qubit
numbering also starts from 0 """
# qc.barrier(q)
qc.cx(q[2*a_0], q[2*b_0])
qc.cx(q[(2*a_0) + 1], q[(2*b_0) + 1])
OR(2*b_0, (2*b_0)+1, k)
qc.cx(q[2*a_0], q[2*b_0])
qc.cx(q[(2*a_0) + 1], q[(2*b_0) + 1])
# qc.barrier(q)
def is_not_3(a, k):
qc.ccx(q[2*a], q[(2*a)+1], q[k])
qc.x(q[k])
def initialize_oracle_part(n):
t = 4
# qc.barrier(q)
are_not_equal(0, 1, 6) # node a and b are not equal
are_not_equal(0, 2, 7)
are_not_equal(1, 2, 8)
is_not_3(0, 11)
is_not_3(1, 12)
is_not_3(2, 13)
# qc.barrier(q)
qc.mct([q[6], q[7], q[8], q[11], q[12], q[13]], q[10],[q[9], q[14], q[15], q[16]]) # answer is stored in 10. please keep 9 a clean qubit, it's used as ancilla here
# qc.barrier(q)
is_not_3(0, 11)
is_not_3(1, 12)
is_not_3(2, 13)
are_not_equal(0, 1, 6) # node a and b are not equal
are_not_equal(0, 2, 7)
are_not_equal(1, 2, 8)
# +
## distance_black_box
distances = {
"32": 3,
"31": 2,
"30": 4,
"21": 7,
"20": 6,
"10": 5,
}
def dist_single():
qr = QuantumRegister(2)
qr_target = QuantumRegister(5)
qc = QuantumCircuit(qr, qr_target, name='dist_single')
for edge in distances:
if edge[0] == '3':
node = format(int(edge[1]), '02b')
d_bin = format(distances[edge], '02b')
for idx in range(len(node)):
if node[idx] == '0':
qc.x(qr[idx])
for idx in range(len(d_bin)):
if d_bin[idx] == '1':
qc.ccx(qr[0], qr[1], qr_target[idx])
for idx in range(len(node)):
if node[idx] == '0':
qc.x(qr[idx])
return qc
def dist():
qr1 = QuantumRegister(2)
qr2 = QuantumRegister(2)
qr_target = QuantumRegister(5)
qr_anc = QuantumRegister(2)
qc = QuantumCircuit(qr1, qr2, qr_target, qr_anc, name='dist')
for edge in distances:
if edge[0] != '3':
# convert to binaries
node1 = format(int(edge[0]), '02b')
node2 = format(int(edge[1]), '02b')
d_bin = format(distances[edge], '02b')
for idx in range(len(node1)): # assume node1 and node2 have the same length
if node1[idx] == '0':
qc.x(qr1[idx])
for idx in range(len(node2)):
if node2[idx] == '0':
qc.x(qr2[idx])
for idx in range(len(d_bin)):
if d_bin[idx] == '1':
qc.mct(qr1[:]+qr2[:], qr_target[idx], qr_anc)
for idx in range(len(node2)): # invert back
if node2[idx] == '0':
qc.x(qr2[idx])
for idx in range(len(node1)):
if node1[idx] == '0':
qc.x(qr1[idx])
return qc
# +
## multi_adder_1
def maj(a, b, k):
qc.cx(q[k], q[b])
qc.cx(q[k], q[a])
qc.ccx(q[a], q[b], q[k])
def unmaj(a, b, k):
qc.ccx(q[a], q[b], q[k])
qc.cx(q[k], q[a])
qc.cx(q[a], q[b])
def multiple_adder(a, b, c_0, z):
arr_size = len(a)
maj(c_0, b[0], a[0])
for i in range(arr_size-1):
maj(a[i], b[i+1], a[i+1])
qc.cx(q[a[arr_size-1]], q[z])
for i in reversed(range(arr_size-1)):
unmaj(a[i], b[i+1], a[i+1])
unmaj(c_0, b[0], a[0])
# -
## diffusion
def diffusion():
qc.h(q[0:6])
qc.x(q[0:6])
qc.h(q[5])
qc.barrier()
qc.mct(q[0:5], q[5], q[7:10])
qc.barrier()
qc.h(q[5])
qc.x(q[0:6])
qc.h(q[0:6])
# +
qubit_num = 25 # max is 32 if you're using the simulator
# Ancilla indices
inputs = [0, 1, 2, 3, 4, 5]
init_ancillae = [6, 7, 8, 9]
valid = [10]
temp_dist = [11, 12, 13, 14, 15]
total_dist = [16, 17, 18, 19, 20]
gate_ancillae = [21, 22, 23]
check_dist = [11, 12, 13, 14, 15] # initialize 13 here
carry_check = [24]
inputs = inputs[0]
init_ancillae = init_ancillae[0]
valid = valid[0]
temp_dist = temp_dist[0]
total_dist = total_dist[0]
gate_ancillae = gate_ancillae[0]
check_dist = check_dist[0]
carry_check = carry_check[0]
q = QuantumRegister(qubit_num)
c = ClassicalRegister(6)
qc = QuantumCircuit(q, c)
qc.h(q[0:6])
qc.x(q[carry_check])
# forward oracle
initialize_oracle_part(4)
qc.append(dist_single(), q[inputs:inputs+2] + q[temp_dist:temp_dist+5])
multiple_adder([11, 12, 13, 14], [16, 17, 18, 19], init_ancillae, 20)
qc.append(dist_single().inverse(), q[inputs:inputs+2] + q[temp_dist:temp_dist+5])
qc.append(dist(), q[inputs:inputs+4] + q[temp_dist:temp_dist+5] + q[gate_ancillae:gate_ancillae+2])
multiple_adder([11, 12, 13, 14], [16, 17, 18, 19], init_ancillae, 20)
qc.append(dist().inverse(), q[inputs:inputs+4] + q[temp_dist:temp_dist+5] + q[gate_ancillae:gate_ancillae+2])
qc.append(dist(), q[inputs+2:inputs+6] + q[temp_dist:temp_dist+5] + q[gate_ancillae:gate_ancillae+2])
multiple_adder([11, 12, 13, 14], [16, 17, 18, 19], init_ancillae, 20)
qc.append(dist().inverse(), q[inputs+2:inputs+6] + q[temp_dist:temp_dist+5] + q[gate_ancillae:gate_ancillae+2])
qc.x(q[check_dist:check_dist+3]) # init 15
multiple_adder([11, 12, 13, 14, 15], [16, 17, 18, 19, 20], init_ancillae, carry_check)
# carry_check
# qc.barrier()
qc.cz(q[valid], q[carry_check])
# qc.barrier()
# inverse oracle
multiple_adder([11, 12, 13, 14, 15], [16, 17, 18, 19, 20], init_ancillae, carry_check)
qc.x(q[check_dist:check_dist+3]) # init 15
qc.append(dist().inverse(), q[inputs+2:inputs+6] + q[temp_dist:temp_dist+5] + q[gate_ancillae:gate_ancillae+2])
multiple_adder([11, 12, 13, 14], [16, 17, 18, 19], init_ancillae, 20)
qc.append(dist(), q[inputs+2:inputs+6] + q[temp_dist:temp_dist+5] + q[gate_ancillae:gate_ancillae+2])
qc.append(dist().inverse(), q[inputs:inputs+4] + q[temp_dist:temp_dist+5] + q[gate_ancillae:gate_ancillae+2])
multiple_adder([11, 12, 13, 14], [16, 17, 18, 19], init_ancillae, 20)
qc.append(dist(), q[inputs:inputs+4] + q[temp_dist:temp_dist+5] + q[gate_ancillae:gate_ancillae+2])
qc.append(dist_single().inverse(), q[inputs:inputs+2] + q[temp_dist:temp_dist+5])
multiple_adder([11, 12, 13, 14], [16, 17, 18, 19], init_ancillae, 20)
qc.append(dist_single(), q[inputs:inputs+2] + q[temp_dist:temp_dist+5])
initialize_oracle_part(4)
diffusion()
qc.measure(q[:6], c)
# qc.draw()
# +
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Unroller
pass_ = Unroller(['u3', 'cx'])
pm = PassManager(pass_)
new_circuit = pm.run(qc)
print(new_circuit.count_ops())
# -
backend = Aer.get_backend('qasm_simulator')
job = execute(qc, backend, shots=1024)
counts = job.result().get_counts()
print(sorted(counts.items(), key=lambda x:x[1], reverse=True)[0:20])
plot_histogram(counts)
| final_circuit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Info
# This file trains credibility classifier based on the NIST assessors credibility judgments to be used to improve BM25 results in credibility and correctness. To this end,
#
# - we split the 50 topics into 10 validation sets of 5 topics and trained classifiers on the remaining 45 topics.
# - this ensured the classifier is alien to the documents discussing validation topics
# - and it predicts the documents' credibility.
#
# The inputs
#
# - BM25 run: Our BM25 run computed using Anserini
# - 10fold_groups.txt: the 10 validation sets of 5 topics
# - qrels_correctness.txt shared by NIST
# - infected.txt to filter out malicious documents.
#
# Outputs:
#
# - 10 logistic regression classifiers
# +
import pandas as pd
import os,sys,re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from collections import Counter
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from joblib import dump, load
# -
infecteds = pd.read_csv('/home/ludwig/Documents/DecisionRUN/trec/infected.txt', header=None, sep = ' ')
infecteds.columns = ['DOCID']
# +
bm25run = pd.read_csv('treceval/UWatMDS_BM25.txt', header=None, sep = ' ')
bm25run.columns = ['TID','QID','DOCID','REL','COR','CRE']
bm25run = bm25run[~bm25run.DOCID.isin(infecteds.DOCID)]
qrels = pd.read_csv('qrels_correctness.txt', header=None, sep = ' ')
qrels.columns = ['TID','QID','DOCID','REL','COR','CRE']
qrels = qrels[~qrels.DOCID.isin(infecteds.DOCID)]
qrels = qrels[qrels.CRE.isin([0,1])]
qrels.head()
# -
# ## Classification
qrels.shape
# for docname in doclist['DOCID']:
DOCS_DIR = '/media/ludwig/story/DecisionRunDocs/trec_decision_parts/trec_decision_docs/'
SAVE_DIR = 'model/'
counter = 1
docs = []
for docname in qrels['DOCID'].drop_duplicates():
try:
with open(DOCS_DIR + docname) as fh:
docs.append(fh.read())
except:
docs.append('!DOCTYPE')
print('there is a problem with %s' % counter)
if counter % 1000 == 0:
print(counter)
counter += 1
mapper = pd.DataFrame(qrels.DOCID.drop_duplicates())
mapper['DOCS'] = docs
mapper.head()
qrels = qrels.merge(mapper, on = 'DOCID', how = 'left')
qrels.head()
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from joblib import dump, load
# ## Kfold
topics = sorted(qrels['TID'].unique())
splts = pd.read_csv('10fold_groups.txt', header=None)
splts.shape
splts[splts[0] == 1][1].values
counter = 7
splts[splts[0] == 7]
train_index = splts[splts[0] != 7][1].values
test_index = splts[splts[0] == 7][1].values
test_index
vect = CountVectorizer(ngram_range=(4,4), analyzer='char', binary=True)
logreg = LogisticRegression()
pline = Pipeline([('vectorizer', vect), ('logreg', logreg)])
test_topics = test_index
train_topics = train_index
train, test = qrels[~qrels['TID'].isin(test_topics)], qrels[qrels['TID'].isin(test_topics)]
train, test = qrels[~qrels['TID'].isin(test_topics)], qrels[qrels['TID'].isin(test_topics)]
train = train[~train['DOCID'].isin(test['DOCID'])]
X_train = train['DOCS'].values.tolist()
X_test = test['DOCS'].values.tolist()
y_train, y_test = train['CRE'], test['CRE']
vect = CountVectorizer(ngram_range=(4,4), analyzer='char', binary=True)
logreg = LogisticRegression()
pline = Pipeline([('vectorizer', vect), ('logreg', logreg)])
pline.fit(X_train, y_train)
# accs = []
counter = 1
for gr in splts[0].unique():
train_index = splts[splts[0] != gr][1].values
test_index = splts[splts[0] == gr][1].values
vect = CountVectorizer(ngram_range=(4,4), analyzer='char', binary=True)
logreg = LogisticRegression()
pline = Pipeline([('vectorizer', vect), ('logreg', logreg)])
test_topics = test_index
train_topics = train_index
train, test = qrels[~qrels['TID'].isin(test_topics)], qrels[qrels['TID'].isin(test_topics)]
print(train.TID.unique())
print(test.TID.unique())
train = train[~train['DOCID'].isin(test['DOCID'])]
X_train = train['DOCS'].values.tolist()
X_test = test['DOCS'].values.tolist()
y_train, y_test = train['CRE'], test['CRE']
print('# of datapoint is %s' % len(X_train))
vect = CountVectorizer(ngram_range=(4,4), analyzer='char', binary=True)
logreg = LogisticRegression()
pline = Pipeline([('vectorizer', vect), ('logreg', logreg)])
pline.fit(X_train, y_train)
dump(pline, 'LOGREG_10fold_v2_%s.joblib' % counter)
counter += 1
# y_pred_class = pline.predict(X_test)
# print(metrics.accuracy_score(y_test, y_pred_class))
# accs.append(metrics.accuracy_score(y_test, y_pred_class))
| classifier/.ipynb_checkpoints/TREC-Kfold_Train-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Diffusion Limited Aggregation**
# 
#
# Diffusion limited aggregation is a process in which particles randomly walking, come close enough the already existing particles in the region that they stick (*Stickiness* : A hyperparameter that can be varied). Now these particles follow a random motion, called Broiwnian Motion.
#
# For the purpose of simulation, the surface is considered to be a canvas of MxM dimensions [A matrix], initialised to all zero, but one, that is the centre most point in the plane. Thats the particle, with which the aggregation begins, the particle spawning in random in the proximity start a random walk in the canvas and when it comes in the proximity of an existing particle in the aggregation, it gets stuck at it, becoming a part of that aggregation. Then a new particle will spawn, following the same procedure, shall it find its place in the aggregation, attaching itself to the already existing aggregation and the process goes on till each particle has found its place in the aggregation.
# # Imports
# +
# General
import numpy as np
import random
from IPython.display import clear_output
from tqdm import tqdm_notebook
tqdm_notebook()
import time
import matplotlib as mpl
from datetime import datetime
import pandas as pd
import os
# Visualisation
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='whitegrid')
# Algorithms
from scipy.optimize import curve_fit
import statsmodels.api as sm
# -
# # Paths
# +
# General
root_path = '../'
data_path = 'Data/'
# Prepared Data
prepared_data_path = 'Prepared Data/'
plotting_path = root_path+'Plots/'
aggregateTS = pd.DataFrame(columns=['Timestamp'])
plt.rcParams['figure.dpi'] = 180
plt.rcParams['figure.figsize'] = (25,15)
# -
# # DLA Simulations
# + code_folding=[]
class DLA:
def __init__(self):
self.instantiate(100, 2000, mode='Default')
def instantiate(self, _shape, _particles, mode='Custom', verbose=False):
self.canvas_mode = mode
self.shape = _shape
self.particles = _particles
self.canvas = np.zeros([self.shape, self.shape])
self.canvas[int(self.shape/2)][int(self.shape/2)] = 1
# length : 4(n-1)
self.boundary_points = [(w, h) for w in [0, self.shape-1] for h in range(0, self.shape)]
self.boundary_points += [(h, w) for w in [0, self.shape-1] for h in range(0, self.shape)]
self.boundary_points = set(self.boundary_points)
self.boundary_points = [list(e) for e in self.boundary_points]
self.cache = []
if verbose:
print('Number of Spawning points for a player : ', len(self.boundary_points))
def spawnParticle(self):
if self.boundary_points == []:
spawnIdx = -999
return spawnIdx
spawnIdx = random.choice(self.boundary_points)
while self.canvas[spawnIdx[0], spawnIdx[1]] != 0:
spawnIdx = random.choice(self.boundary_points)
return spawnIdx
def checkState(self, idx, _stck):
allProximities = set([(idx[0]+i, idx[1]+e) for i in [0, 1, -1] for e in [0, 1, -1]])
allProximities = [list(e) for e in allProximities if e != tuple(
idx) if (0 <= e[0] < self.shape) and (0 <= e[1] < self.shape)]
allVacantProximities = [e for e in allProximities if self.canvas[e[0]][e[1]] == 0]
if sum([self.canvas[e[0]][e[1]] for e in allProximities]) == 0:
return True, random.choice(allProximities)
else:
if random.random() < _stck:
return False, [-999, -999]
else:
if allVacantProximities != []:
return True, random.choice(allVacantProximities)
else:
print('Nowhere to go!')
return False, [-999, -999]
def initiate(self, stickiness=1):
for eachParticles in tqdm_notebook(range(self.particles)):
inMotion = True
spawnP_Idx = self.spawnParticle()
if spawnP_Idx == -999:
print('No boundary point left. Terminating!')
break
self.canvas[spawnP_Idx[0], spawnP_Idx[1]] = 1
while inMotion:
inMotion, nextMove = self.checkState(spawnP_Idx, stickiness)
if inMotion:
self.canvas[spawnP_Idx[0], spawnP_Idx[1]] = 0
self.canvas[nextMove[0], nextMove[1]] = 1
spawnP_Idx = nextMove
else:
if spawnP_Idx in self.boundary_points:
self.boundary_points.remove(spawnP_Idx)
print(len(self.boundary_points))
break
t = self.canvas.copy()
aggregateTS.loc[eachParticles, 'Timestamp'] = datetime.now()
self.cache.append(t)
return self.cache
# +
dlaSim = DLA()
p = 4000
s = 200
dlaSim.instantiate(s, p, verbose=True)
CACHE = dlaSim.initiate()
# + [markdown] heading_collapsed=true
# # Simulation Animation
# + hidden=true
import matplotlib.animation as anim
class AnimatedGif:
def __init__(self, size=(680, 520)):
self.fig = plt.figure()
self.fig.set_size_inches(size[0] / 100, size[1] / 100)
ax = self.fig.add_axes([0, 0, 1, 1], frameon=False, aspect=1)
ax.set_xticks([])
ax.set_yticks([])
self.images = []
def add(self, image, label='', _stck=-999):
plt_im = plt.imshow(image, vmin=0, cmap = 'copper', vmax=1, animated=True)
lbl = int(int(label)/100 * 100)
if lbl == 0:
lbl = ''
plt_txt1 = plt.text(-30,10, lbl, fontsize=12, color='red')
plt_txt2 = plt.text(210, 10, 'Stickiness : {0}'.format(_stck),fontsize=12, color='red')
self.images.append([plt_im, plt_txt1, plt_txt2])
def save(self, filename):
animation = anim.ArtistAnimation(self.fig, self.images)
animation.save(filename, writer='imagemagick', fps=200)
# + hidden=true
m = 580
n = 320
animated_gif = AnimatedGif(size=(m, n))
images = []
for i in tqdm_notebook(range(len(CACHE))):
animated_gif.add(CACHE[i], label=str(i), _stck = '1.0')
animated_gif.save('dla-animated_P4000_C200_S1_fps200.gif')
# -
# # DLA Time Dependence
# +
fig = aggregateTS.reset_index().set_index('Timestamp').plot()
_=fig.set_xlabel('Timestamp', fontsize=25)
_=fig.set_ylabel('#Particles', fontsize=25)
_=fig.set_title('DLA aggregate growth vs Time', fontsize=40)
plt.savefig(root_path+'Material/dlaTime.jpg', bbox_inches='tight')
# -
# # Random Testing Space
[k for k in fig.__dir__() if 'legend' in k]
baseTS = aggregateTS.iloc[0,0]
(aggregateTS.iloc[300,0] - baseTS).microseconds
| Diffusion Limited Aggregation/DLA-Simulator/DLA-Simulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # License
# ***
# Copyright (C) 2017 <NAME>, <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ***
# # Simple oversampling - Pandas and imbalanced-learn
# ## Imports
# +
import pandas as pd # pandas for handling mixed data sets
import numpy as np # numpy for basic math and matrix operations
# imbalanced-learn for oversampling
from imblearn.over_sampling import RandomOverSampler
# -
# ## Proportional oversampling
# #### Create a sample data set
scratch_df = pd.DataFrame({'x': pd.Series(np.arange(0, 10)),
'y': [0, 1, 0, 0, 0, 0, 1, 0, 0, 0]})
scratch_df
# If the event in a classification problem or the value in a prediction problem is imbalanced (usually toward zero) this can lead to biased models, single class predictions for classification models, and biased predictions for prediction models. The simplest approach for an imbalanced target is to *oversample* the data set.
# +
# fit random oversampling function
# cannot pass single array for X, must use numpy.reshape(-1, 1)
ros = RandomOverSampler()
over_sample_x, over_sample_y = ros.fit_sample(scratch_df.x.get_values().reshape(-1, 1),
scratch_df.y)
# create Pandas dataframe from oversampling results
over_sample_df = pd.DataFrame({'over_sample_x': over_sample_x.reshape(16,),
'over_sample_y': over_sample_y})
over_sample_df
| 02_analytical_data_prep/src/py_part_2_over_sample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# Making the Logo
# ==================================
#
# Having some funky ellipses in a simplex inspired some interest when I put the logo
# together for pyrolite, so I put together a cleaned-up example of how you can create
# these kinds of plots for your own data. These examples illustrate different methods to
# show distribution of (homogeneous, or near so) compositional data for exploratory
# analysis.
#
#
#
# +
import matplotlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors
import matplotlib.cm
from pyrolite.comp.codata import *
from pyrolite.util.skl.transform import ILRTransform, ALRTransform
from pyrolite.util.synthetic import random_composition
import pyrolite.plot
from pyrolite.util.plot.helpers import plot_pca_vectors, plot_stdev_ellipses
np.random.seed(82)
# ignore sphinx_gallery warnings
import warnings
warnings.filterwarnings("ignore", "Matplotlib is currently using agg")
# -
# First we choose some colors, create some log-distributed synthetic data. Here I've
# generated a synthetic dataset with four samples having means equidistant from the
# log-space centre and with varying covariance. This should illustrate the spatial
# warping of the simplex nicely. Additionally, I chose a log-transform here to go
# from and to compositional space (:class:`~pyrolite.util.skl.ILRTransform`, which uses
# the isometric log-ratio function :func:`~pyrolite.comp.codata.ilr`). Choosing
# another transform will change the distortion observed in the simplex slightly.
# This synthetic dataset is added into a :class:`~pandas.DataFrame` for convenient access
# to plotting functions via the pandas API defined in :class:`pyrolite.plot.pyroplot`.
#
#
#
t10b3 = [ # tableau 10 colorblind safe colors, a selection of 4
(0, 107, 164),
(171, 171, 171),
(89, 89, 89),
(95, 158, 209),
]
t10b3 = [(r / 255.0, g / 255.0, b / 255.0) for r, g, b in t10b3]
# +
d = 1.0 # distance from centre
sig = 0.1 # scale for variance
# means for logspace (D=2)
means = np.array(np.meshgrid([-1, 1], [-1, 1])).T.reshape(-1, 2) * d
# means = np.array([(-d, -d), (d, -d), (-d, d), (d, d)])
covs = ( # covariance for logspace (D=2)
np.array(
[
[[1, 0], [0, 1]],
[[0.5, 0.15], [0.15, 0.5]],
[[1.5, -1], [-1, 1.5]],
[[1.2, -0.6], [-0.6, 1.2]],
]
)
* sig
)
means = ILRTransform().inverse_transform(means) # compositional means (D=3)
size = 2000 # logo @ 10000
pts = [random_composition(mean=M, cov=C, size=size) for M, C in zip(means, covs)]
T = ILRTransform()
to_log = T.transform
from_log = T.inverse_transform
df = pd.DataFrame(np.vstack(pts))
df.columns = ["SiO2", "MgO", "FeO"]
df["Sample"] = np.repeat(np.arange(df.columns.size + 1), size).flatten()
chem = ["MgO", "SiO2", "FeO"]
# -
fig, ax = plt.subplots(
2, 2, figsize=(10, 10 * np.sqrt(3) / 2), subplot_kw=dict(projection="ternary")
)
ax = ax.flat
_ = [[x.set_ticks([]) for x in [a.taxis, a.laxis, a.raxis]] for a in ax]
# First, let's look at the synthetic data itself in the ternary space:
#
#
#
kwargs = dict(marker="D", alpha=0.2, s=3, no_ticks=True, axlabels=False)
for ix, sample in enumerate(df.Sample.unique()):
comp = df.query("Sample == {}".format(sample))
comp.loc[:, chem].pyroplot.scatter(ax=ax[0], c=t10b3[ix], **kwargs)
plt.show()
# We can take the mean and covariance in log-space to create covariance ellipses and
# vectors using principal component analysis:
#
#
#
kwargs = dict(ax=ax[1], transform=from_log, nstds=3)
ax[1].set_title("Covariance Ellipses and PCA Vectors")
for ix, sample in enumerate(df.Sample.unique()):
comp = df.query("Sample == {}".format(sample))
tcomp = to_log(comp.loc[:, chem])
plot_stdev_ellipses(tcomp.values, color=t10b3[ix], resolution=1000, **kwargs)
plot_pca_vectors(tcomp.values, ls="-", lw=0.5, color="k", **kwargs)
plt.show()
# We can also look at data density (here using kernel density estimation)
# in logratio-space:
#
#
#
# +
kwargs = dict(ax=ax[-2], bins=100, axlabels=False)
ax[-2].set_title("Individual Density, with Contours")
for ix, sample in enumerate(df.Sample.unique()):
comp = df.query("Sample == {}".format(sample))
comp.loc[:, chem].pyroplot.density(cmap="Blues", vmin=0.05, **kwargs)
comp.loc[:, chem].pyroplot.density(
contours=[0.68, 0.95],
cmap="Blues_r",
contour_labels={0.68: "σ", 0.95: "2σ"},
**kwargs,
)
plt.show()
# -
# We can also do this for individual samples, and estimate percentile contours:
#
#
#
kwargs = dict(ax=ax[-1], axlabels=False)
ax[-1].set_title("Overall Density")
df.loc[:, chem].pyroplot.density(bins=100, cmap="Greys", **kwargs)
plt.show()
for a in ax:
a.set_aspect("equal")
a.patch.set_visible(False)
plt.show()
| docs/source/tutorials/logo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Melanoma analysis with MobileNet V2
# This notebook shows how good is [MobileNet V2](#MobileNet-V2) for [melanoma](#Melanoma) analysis.
import datetime
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
# Check if a GPU is available.
tf.config.list_physical_devices('GPU')
# # Melanoma
# __Melanoma__, also redundantly known as __malignant melanoma__, is a type of skin cancer that develops from the pigment-producing cells known as melanocytes. Melanomas typically occur in the skin, but may rarely occur in the mouth, intestines, or eye (uveal melanoma). In women, they most commonly occur on the legs, while in men, they most commonly occur on the back. About 25% of melanomas develop from moles. Changes in a mole that can indicate melanoma include an increase in size, irregular edges, change in color, itchiness, or skin breakdown.
# 
# <div style="text-align: center; font-weight: bold">Pic.1. A melanoma of approximately 2.5 cm (1 in) by 1.5 cm (0.6 in)</div>
# The primary cause of melanoma is ultraviolet light (UV) exposure in those with low levels of the skin pigment melanin. The UV light may be from the sun or other sources, such as tanning devices. Those with many moles, a history of affected family members, and poor immune function are at greater risk. A number of rare genetic conditions, such as xeroderma pigmentosum, also increase the risk. Diagnosis is by biopsy and analysis of any skin lesion that has signs of being potentially cancerous.
# Melanoma is the most dangerous type of skin cancer. Globally, in 2012, it newly occurred in 232,000 people. In 2015, 3.1 million people had active disease, which resulted in 59,800 deaths. Australia and New Zealand have the highest rates of melanoma in the world. High rates also occur in Northern Europe and North America, while it is less common in Asia, Africa, and Latin America. In the United States, melanoma occurs about 1.6 times more often in men than women. Melanoma has become more common since the 1960s in areas mostly populated by people of European descent.
# # MobileNet V2
# __MobileNetV2__ is a convolutional neural network architecture that seeks to perform well on mobile devices. It is based on an inverted residual structure where the residual connections are between the bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. As a whole, the architecture of MobileNetV2 contains the initial fully convolution layer with 32 filters, followed by 19 residual bottleneck layers.
# 
# <div style="text-align: center; font-weight: bold">Pic.2. MobileNet V2 architecture</div>
# If you want to learn more about MobileNet V2, read [here](https://paperswithcode.com/method/mobilenetv2).
# # Data loading
# +
generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
rotation_range=180,
horizontal_flip=True,
vertical_flip=True,
brightness_range=(0.2, 1.5),
validation_split=0.2,
)
training_set = generator.flow_from_directory(
'/small-data',
target_size=(224, 224),
batch_size=32,
class_mode='categorical',
subset='training'
)
validation_set = generator.flow_from_directory(
'/small-data',
target_size=(224, 224),
batch_size=32,
class_mode='categorical',
subset='validation'
)
# -
CLASS_NUMBER = len(training_set.class_indices)
# ### Data source
# As a data source, we use the ISIC Archive.
# The ISIC Archive is an open source platform with publicly available images of skin lesions under Creative Commons licenses. The images are associated with ground-truth diagnoses and other clinical metadata. Images can be queried using faceted search and downloaded individually or in batches. The initial focus of the archive has been on dermoscopy images of individual skin lesions, as these images are inherently standardized by the use of a specialized acquisition device and devoid of many of the privacy challenges associated with clinical images. To date, the images have been provided by specialized melanoma centers from around the world. The archive is designed to accept contributions from new sources under the Terms of Use and welcomes new contributors. There are ongoing efforts to supplement the dermoscopy images in the archive with close-up clinical images and a broader representation of skin types. The images in the Archive are used to support educational efforts through linkage with Dermoscopedia and are used for Grand Challenges and Live Challenges to engage the computer science community for the development of diagnostic AI.
# For more information, go to [ISIC Archive web site](https://www.isic-archive.com/)
# # Model training
# ### Building the model
# We take the model from TensorFlow Hub. [Look here](https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4).
model = tf.keras.models.Sequential([
hub.KerasLayer("https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4", output_shape=[1280],
trainable=False),
tf.keras.layers.Dense(CLASS_NUMBER, activation='softmax')
])
model.build([None, 224, 224, 3])
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# ### Preparing TensorFlow callbacks
# For our convenience, we create a few TensorFlow callbacks.
# #### The TensorBoard callback
# We want to see how the training is going. We add the callback, which will log the metrics to TensorBoard.
log_dir = '../logs/fit/' + datetime.datetime.now().strftime('mobilenetv2')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
# #### The EarlyStopping callback
# This callback stops training when the metrics (e.g. validation loss) are not improving,
early_stop_callback = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0.01,
patience=10,
restore_best_weights=True
)
# #### The ModelCheckpoint callback
# This callback saves the model with the best metrics during training.
# +
checkpoint_path = 'checkpoints/mobilenetv2.ckpt'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path,
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
save_freq='epoch',
mode='auto'
)
# -
# ### Training the model
model.fit(
training_set,
validation_data=validation_set,
epochs=200,
callbacks=[
tensorboard_callback,
checkpoint_callback,
early_stop_callback
]
)
# # Model validation
# ### Loading the model
# We load the model with the best metrics (e.g. validation loss) from the checkpoint.
model = tf.keras.models.Sequential([
hub.KerasLayer("https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4", output_shape=[1280],
trainable=False),
tf.keras.layers.Dense(CLASS_NUMBER, activation='softmax')
])
model.build([None, 224, 224, 3])
model.load_weights('./checkpoints/mobilenetv2.ckpt')
# ### Loading the test data
testing_set = generator.flow_from_directory(
'/small-data-test',
target_size=(224, 224),
batch_size=32,
class_mode='categorical'
)
# ### Making diagnoses
true_labels = np.concatenate([testing_set[i][1] for i in range(len(testing_set))], axis=0)
predicted_labels = model.predict(testing_set)
# ### Plot the ROC Curve
# +
fpr = dict()
tpr = dict()
auc_metric = dict()
diagnosis_index_dict = {v: k for k, v in testing_set.class_indices.items()}
for i in range(CLASS_NUMBER):
diagnosis = diagnosis_index_dict[i]
fpr[diagnosis], tpr[diagnosis], _ = roc_curve(true_labels[:, i], predicted_labels[:, i])
auc_metric[diagnosis] = auc(fpr[diagnosis], tpr[diagnosis])
# +
for diagnosis in testing_set.class_indices:
plt.plot(fpr[diagnosis], tpr[diagnosis], label=diagnosis)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
# -
# ### Show AUC
auc_metric
| mobilenet_v2/notebook-ENG.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
# # Compare corner finders in TEMCA optical images
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
import boto3
import cv2
import skimage.io as io
import skimage
from skimage import data
from skimage import feature
from skimage import color
from skimage import filters
from skimage import transform
from skimage import draw
from skimage import measure
from skimage import morphology
import skdemo
from scipy import ndimage as ndi
import time
# get an image
from toolbox.TAO.tao import TAO
from toolbox.TAO.taoDB import TAOdb
tao = TAO()
taoDB = TAOdb()
url = taoDB.get_aperture_image_url('donaldtrump', 'test1', '000040')
im = io.imread(url)
skdemo.imshow_with_histogram(im);
# ## Retrieve the template for this specimen
# get the master template for the specimen
url = taoDB.get_specimen_template_url('testspecimen')
template_im = io.imread(url)
skdemo.imshow_with_histogram(template_im)
# ## Get the boundaries of the template
# +
w, h = template_im.shape
image = template_im
edges = feature.canny(image, sigma=4.0)
# Much better solution: find contours and then subsample the polygon
contours = measure.find_contours(image, 0.5)
tolerance1 = 2.5
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(10, 5), sharex=True,
sharey=True)
ax0.imshow(image, 'gray')
ax0.set_title('template')
ax1.imshow(edges, 'gray')
ax1.set_title('template polygon: ' + str(tolerance1))
for contour in measure.find_contours(image, 0):
coords = measure.approximate_polygon(contour, tolerance=tolerance1)
ax1.plot(coords[:, 1], coords[:, 0], '-r', linewidth=4)
print("Number of coordinates:", len(contour), len(coords))
#print coords
# -
# ### goodFeaturesToTrack doesn't work on binary
# +
cap = cv2.VideoCapture(0)
frame_count = 0
tolerance1 = 2.5
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(12, 4), sharex=True,
sharey=True)
ax0.set_title('gray')
ax1.set_title('edges')
ax2.set_title('corners')
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edges = feature.canny(gray, sigma=4.0)
edges2 = gray.copy()
edges2[~edges] = 0
edges2[edges] = 255
ax1.imshow(edges2, 'gray')
ax2.clear()
corners = cv2.goodFeaturesToTrack(edges2,6,0.01,40)
if (corners != None):
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(frame,(x,y),10,255,-1)
ax0.imshow(frame)
display.display(plt.gcf())
display.clear_output(wait=True)
time.sleep(0.01)
#
if (frame_count > 5 and np.mean(gray) <50):
break
frame_count = frame_count + 1
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
print ('exit capture')
# -
cap.release()
cv2.destroyAllWindows()
# ## Get line endpoints
# +
cap = cv2.VideoCapture(0)
frame_count = 0
tolerance1 = 2.5
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(12, 4), sharex=True,
sharey=True)
ax0.set_title('gray')
ax1.set_title('edges')
ax2.set_title('corners')
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edges = feature.canny(gray, sigma=4.0)
edges2 = gray.copy()
edges2[~edges] = 0
edges2[edges] = 255
ax1.imshow(edges2, 'gray')
ax2.clear()
corners = cv2.goodFeaturesToTrack(edges2,6,0.01,40)
if (corners != None):
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(frame,(x,y),10,255,-1)
ax0.imshow(frame)
display.display(plt.gcf())
display.clear_output(wait=True)
time.sleep(0.01)
#
if (frame_count > 5 and np.mean(gray) <50):
break
frame_count = frame_count + 1
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
print ('exit capture')
# -
| TEMCA/OpticalCornerFinding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Get 3RWW Calibrated Radar Rainfall Data from the Teragon Rainfall API
#
# > *This notebook is a work-in-progress*
#
# *Get timeseries rainfall data for an area of interest within Allegheny County*
#
# ---
#
# When radar estimates of rainfall are calibrated with actual rain gauge data, a highly accurate and valuable source of rainfall data can be calculated over large geographic areas. The result is called *Calibrated Radar Rainfall Data*, or *Gauge-Adjusted Radar Rainfall Data (GARRD)*
#
# 3 Rivers Wet Weather (3RWW), with support from [Vieux Associates](http://www.vieuxinc.com/), uses calibrated data from the NEXRAD radar located in Moon Township, PA with rain gauge measurements collected during the same time period and rain event for every square kilometer in Allegheny County. The resulting rainfall data is equivalent in accuracy to having 2,276 rain gauges placed across the County.
#
# You can view and explore this data on 3RWW's calibrated radar rainfall data site at [www.3riverswetweather.org/municipalities/calibrated-radar-rainfall-data](http://www.3riverswetweather.org/municipalities/calibrated-radar-rainfall-data)
#
# This notebook walks through how to programmatically access 3RWW's massive repository of high resolution spatiotemporal rainfall data for Allegheny County via the ***Teragon Rainfall Dataset API*** for an area of interest. The Teragon Rainfall Dataset API accesses a cache of the historic rainfall data that has been processed by Vieux Associates. It is the same data you'll find on 3RWW's [calibrated-radar-rainfall-data](http://www.3riverswetweather.org/municipalities/calibrated-radar-rainfall-data) website (which is also managed by Teragon).
#
# Complete documentation for the Teragon Rainfall Dataset API is available at [3rww.github.io/api-docs](https://3rww.github.io/api-docs/?language=Python#teragon-rainfall-dataset-api-10).
# ## First: Notebook Setup
#
# ~~This assumes you've got things set up following the recommendations in the ***Getting Started*** notebook.~~
# imports from the Python standard library
import json #read and write JSON
# We're going to use a few external Python packages to make our lives easier:
# imports from 3rd-party libraries
# Requests - HTTP requests for humans
import requests
# PETL - an Extract/Transform/Load toolbox
import petl as etl
# sortedcontainers provides a way to have sorted dictionaries (before Python 3.7)
from sortedcontainers import SortedDict
# Python DateUtil (parser) - a helper for reading timestamps
from dateutil.parser import parse
# ArcGIS API for Python - for accessing 3RWW's open reference datasets in ArcGIS Online
from arcgis.gis import GIS
from arcgis import geometry, GeoAccessor
# for displaying things from the ArcGIS Online in this Jupyter notebook
from IPython.display import display
# ## 1. The basics of getting calibrated radar rainfall data
#
# Getting rainfall data programmatically is a mostly straightforward endeavor: it requires you to submit a HTTP request with parameters specifying locations of interest and a time range. It returns a `csv`-like plain-text response where time intervals are on the x-axis, locations are the y-axis, and values are rainfall amounts. Data quality metadata is included.
#
# Complete API documentation is available at [3rww.github.io/api-docs](https://3rww.github.io/api-docs/?language=Python#teragon-rainfall-dataset-api-10).
#
# To demonstrate the basics of making a call to the API, we'll first use some pre-selected pixel values; we'll demonstrate how to get pixel locations from geodata later on, and then revisit submitting the request for specific locations.
#
#
# ### Assemble the request payload
#
# We'll use the Python `requests` library to make our calls to the API.
#
# The Hurricane Ivan rainfall event in 2004 will server as an example (2004-09-17 03:00 to 2004-09-18 00:00); the pixels used are pre-selected for this example (we'll get to identifying pixels for an area of interest in a bit).
#
# The request payload for that event, as a Python `dictionary` looks like this:
data = {
'startyear': 2004,
'startmonth': 9,
'startday': 17,
'starthour': 3,
'endyear': 2004,
'endmonth': 9,
'endday': 18,
'endhour': 0,
'interval': 'Hourly',
'zerofill': 'yes',
'pixels': '148,134;149,134;148,133;149,133'
}
# The Teragon Rainfall Dataset API only accesspts `POST` requests. Using the Python `requests` library, then, we construct our call like this, using the `post` method:
response = requests.post(
url="http://web.3riverswetweather.org/trp:API.pixel",
data=data
)
# Note that the `data` argument for `request.post` is explicitly
# used here: the API does not accept the request payload as
# query string parameters (`params`), which is the default behavior
# for the requests library.
# That's it. The data is contained in the `response` variable. As mentioned earlier, the API returns a `csv`-like plain-text response where time intervals are on the x-axis, locations are the y-axis, and values are rainfall amounts. You can print the response:
print(response.text)
# ## 2. Working with the data from the API
#
# That raw response was a little hard to read, so we'll use the wonderful Python `PETL` library to get something human-readable (you might just as easily swap in the Python Pandas library to do this sort of thing).
table = etl.fromcsv(etl.MemorySource(response.text.encode()))
etl.vis.displayall(table)
# That's better. Note that each pixel column has a column that follows it: the API response includes data quality metadata for every value if it exists. In this example, there isn't any data quality issues noted, thus the `-` following every value.
#
# Once you've noted any data quality issues, you might consider removing those additional columns and clean things up to make working with the data a bit simpler, as follows:
def clean_up_response(response_text):
"""a helper function for cleaning up the API response, using PETL
params:
response_text = a Python `requests` library `response.text` object
returns:
a PETL table object
"""
table = etl.fromcsv(etl.MemorySource(response_text.encode()))
# get a list of the existing table header values
h = list(etl.header(table))
# we know every other column is a notes column, so we identify those
xy_cols = zip(*[iter(h[1:])] * 2)
# store some things:
new_header = ['Timestamp']
fields_to_cut = []
# then we iterate over those columns and
for each in xy_cols:
# note the correct id, assembled from columns
id_col, note_col = each[0], each[1]
# assemble a new notes column name
notes_col = "{0}-n".format(id_col)
# add those to our new header (array)
new_header.extend([id_col, notes_col])
# track fields that we might want to remove
fields_to_cut.append(notes_col)
short_header = list(set(new_header).difference(set(fields_to_cut)))
# transform the table
table_cleaned = etl \
.setheader(table, new_header) \
.select('Timestamp', lambda v: v.upper() != 'TOTAL') \
.convert('Timestamp', lambda t: parse(t).isoformat()) \
.replaceall('N/D', None)\
.cutout(*tuple(fields_to_cut))\
.convert(
{h: float for h in short_header if h != 'Timestamp'}
)
return table_cleaned
# +
table_cleaned = clean_up_response(response.text)
etl.vis.displayall(table_cleaned)
# -
# There it is. Export that to CSV with PETL like this:
#
# ```python
# etl.tocsv(table_cleaned, "path/to/save/your/data.csv")
# ```
#
# Now what if we want to work with this a key-value store? Try this:
data = SortedDict()
for row in etl.transpose(table_cleaned).dicts():
inside = SortedDict()
for d in row.items():
if d[0] != 'Timestamp':
if d[1]:
v = float(d[1])
else:
v = d[1]
inside[d[0]] = v
data[row['Timestamp']] = inside
print(json.dumps(data, indent=2))
# This provides a quick rainfall time-series per pixel.
#
# ---
#
# > *EndNote: We've started codifying the above processes in a "wrapper API" available at http://3rww-rainfall-api.civicmapper.com/apidocs/, so you don't have to post-process the data like we just demonstrated. Check it out.*
# ## 3. Getting reference geodata
#
# As we've seen above, 3RWW's Rainfall Data API is not spatial: it returns rainfall values for locations at points in time, but those locations are only represented by 'Pixel' IDs; it does not provide actual geometry or coordinates for those pixels.
#
# The challenge in using that API comes in formulating the location for the request. Location is specified with a "pixel ID", which translates to a location on a 1-kilometer grid set over Allegheny County, PA. The pixel (or pixels) is a required parameter; finding and entering those raw values is somewhat tedious.
#
# To do anything that is location specific with this data (e.g., query rainfall in a specific watershed), you'll want some geodata for reference.
#
# ### Vieux Pixel Polygons
#
# A copy of the pixels used for all calibrated radar rainfall products (created by Vieux) are available on [3RWW's Open Data Portal](http://data-3rww.opendata.arcgis.com/) and 3RWW's regular ArcGIS Online site at:
#
# * [data-3rww.opendata.arcgis.com/datasets/228b1584b89a45308ed4256c5bedd43d_1](https://data-3rww.opendata.arcgis.com/datasets/228b1584b89a45308ed4256c5bedd43d_1), and
# * [3rww.maps.arcgis.com/home/item.html?id=228b1584b89a45308ed4256c5bedd43d](https://3rww.maps.arcgis.com/home/item.html?id=228b1584b89a45308ed4256c5bedd43d)
#
# ...respectively. We can retrieve it programmatically a couple of ways:
#
# * with the [ArcGIS API for Python](https://developers.arcgis.com/python/); or
# * by using the [Python Requests library](http://docs.python-requests.org/en/master/) to make a call directly to the Portal's ArcGIS REST API.
#
# We'll show both ways below.
# #### Using the ArcGIS API for Python
# Establish a connection to your 3RWW's ArcGIS Online portal.
gis = GIS('https://3rww.maps.arcgis.com')
# We can search for the feature layer by name:
search_results = gis.content.search('Gauge Adjusted Radar Rainfall Data')
for item in search_results:
display(item)
garrd_item = search_results[0]
# Alternatively, we can use the item `id` to directly find the feature layer:
garrd_id = "228b1584b89a45308ed4256c5bedd43d"
garrd_item = gis.content.get(itemid=garrd_id)
garrd_item
# Either way gets us `gaard_item`: a feature layer *collection* item, which contains individual feature layers. This one (we know from clicking on the item above), has both points and polygons variants of the GARRD reference geometry. We're interested in the polygons (grid). Get that as follows:
garrd_item.layers
# it's the second item, index 1
garrd_grid = garrd_item.layers[1]
garrd_grid
# Since we're in a notebook now, the ArcGIS API for Python lets you put that on a map:
m = gis.map('Pittsburgh')
m.add_layer(garrd_grid)
m
# Finally, we can turn that into a `geojson`-like Python dictionary.
q = garrd_grid.query(out_sr=4326)
garrd_grid_geojson = q.to_geojson
# #### Using `requests`
#
# This approach is a little more hands on, but works without fuss and without the overhead of the ArcGIS API for Python used above.
#
# We need to get the service `url` from the item detail page on 3RWW's Open Data Portal, and then construct query parameters for the request as a Python dictionary.
# service URL - note how '/query' is at the end of the URL
service_url = 'https://services6.arcgis.com/dMKWX9NPCcfmaZl3/ArcGIS/rest/services/garrd/FeatureServer/1/query'
# query string parameters
params = {
'where': '1=1', # Esri's convention for returning everything from the ArcGIS REST API
'outFields': 'PIXEL', # only include the GARRD 'PIXEL' field
'outSR': '4326', # project as WGS 1984
'f': 'geojson' # return as geojson
}
# make the request
garrd_grid_geojson_response = requests.get(service_url, params=params)
garrd_grid_geojson_response
# this gets us the response as a geojson-like Python dictionary.
garrd_grid_geojson = garrd_grid_geojson_response.json()
# That gets us a `geojson` object of all pixels as a python dictionary.
# ### Area of Interest Polygons
#
# Next, we'll establish an area of interest using a polygon from an existing dataset: the Saw Mill Run watershed.
#
# Allegheny County has a watershed dataset in ArcGIS Online, so we'll use that for this example. It's available here:
#
# * http://openac-alcogis.opendata.arcgis.com/datasets/364f4c3613164f79a1d8c84aed6c03e0_0
#
# (Note that you could swap this out for any online geodata service that provides polygons, and this will work)
# #### With ArcGIS API for Python
# use the item ID from the link above to get the layer
watersheds_item = gis.content.get(itemid="364f4c3613164f79a1d8c84aed6c03e0")
watersheds_layer = watersheds_item.layers[0]
basin = watersheds_layer.query(where="DESCR like '%GIRTYS%'", out_sr=4326)
m2 = gis.map('Pittsburgh')
m2.add_layer(basin)
m2
# #### With `requests` library
# service URL - note how '/query' is at the end of the URL
service_url = 'https://services1.arcgis.com/vdNDkVykv9vEWFX4/arcgis/rest/services/Watersheds/FeatureServer/0/query'
# query string parameters
params = {
'where': "DESCR like '%GIRTYS%'", # get GIRTYS RUN
'outFields': ['DESCR', 'AREA'], # only include the GARRD 'PIXEL' field
'outSR': '4326', # project as WGS 1984
'f': 'geojson' # return as geojson
}
# make the request
watershed_geojson_response = requests.get(service_url, params=params)
watershed_geojson_response.json()
# > *Note that while we're pulling our data from online sources, you could also read in your own geometry here from a shapefile on disk.*
# ## 4. Intersecting Pixels w/ the Area of Interest
#
# Now that we know how to get pixel data, and know how to get area of interest data, we can perform a spatial intersection to IDs of the pixels in the area of interest, which we'll use in a query to the Teragon API.
#
# ### With the ArcGIS API for Python
#
# Using the `garrd_grid` feature layer and the `saw_mill_run` feature_set, running and intersect is pretty easy:
# construct the filter using the geometry module
sa_filter = geometry.filters.intersects(geometry=basin.features[0].geometry, sr=4326)
# then use that filter in a query of the the pixel data
pixels_of_interest = garrd_grid.query(geometry_filter=sa_filter, out_sr=4326)
m3 = gis.map('Pittsburgh')
m3.add_layer(pixels_of_interest)
m3
# There they are: pixels covering the Saw Mill Run watershed. Let's get a list of IDs, since that's what we're after.
#
# First, let's introspect so we know what to go after:
pixels_of_interest.features[0]
# We can see that each Feature object is represented as a Python dictionary, and the ID is stored under `attributes` in the `PIXEL` property. We can get all the Pixel IDS out into a list with a one-liner:
pixel_ids = list(set([f.attributes['PIXEL'] for f in pixels_of_interest.features]))
print(pixel_ids)
# ### With GeoPandas
#
# Alternatively, we can use the raw `geojson` that we've acquired in previous steps and find the spatial intersection using the GeoPandas library.
#
# > *To be completed*
# ## 5. Half Circle: getting calibrated radar rainfall data (for an area of interest)
#
# So far you've learned how to:
#
# * make a request to Teragon Rainfall Dataset API
# * get the Pixel reference geodata
# * get area of interest reference data
# * find the Pixels that are in the area of interest
#
# Now it's time to bring it all together.
#
# ### First, though...
#
# You'll recall that the Pixel list in the Teragon API looks something like this:
#
# `'pixels': '148,134;149,134;148,133;149,133'`.
#
# THAT is a semi-colon delimited list of Pixel IDs as a Python `str` object; each Pixel ID is split into two 3-digit IDs (which represents non-spatial grid coordinates). Our list of Pixel IDs does not look like that!
#
# However, we can construct that exact thing from our list above with another one-liner.
pixel_ids_for_api = ";".join(["{0},{1}".format(i[:3], i[-3:]) for i in pixel_ids])
pixel_ids_for_api
# Boom. A list of Pixel IDs in the format expected by the Teragon API.
#
# ### Make the Request
#
# Let's use that in a request to the Teragon API, just like before
data = {
'startyear': 2004,
'startmonth': 9,
'startday': 17,
'starthour': 3,
'endyear': 2004,
'endmonth': 9,
'endday': 18,
'endhour': 0,
'interval': 'Hourly',
'zerofill': 'yes',
'pixels': pixel_ids_for_api
}
rainfall_for_saw_mill_run = requests.post(
url="http://web.3riverswetweather.org/trp:API.pixel",
data=data
)
# And a quick reformat and print (using the helper function we defined earlier):
# +
rainfall_for_saw_mill_run_clean = clean_up_response(rainfall_for_saw_mill_run.text)
etl.vis.displayall(rainfall_for_saw_mill_run_clean)
# -
# ## 6. Full Circle: putting a summary of that calibrated radar rainfall data for an area of interest on a map
from statistics import mean, stdev
# +
rainfall_smr_totals = etl\
.transpose(rainfall_for_saw_mill_run_clean)\
.aggregate(
'Timestamp',
{
'sum': (list(etl.values(rainfall_for_saw_mill_run_clean, 'Timestamp')), list),
'mean': (list(etl.values(rainfall_for_saw_mill_run_clean, 'Timestamp')), list),
'stdev': (list(etl.values(rainfall_for_saw_mill_run_clean, 'Timestamp')), list),
'min': (list(etl.values(rainfall_for_saw_mill_run_clean, 'Timestamp')), list),
'max': (list(etl.values(rainfall_for_saw_mill_run_clean, 'Timestamp')), list)
}
)\
.convert('sum', lambda v: round(sum(list(v[0])), 3))\
.convert('mean', lambda v: round(mean(list(v[0])), 3))\
.convert('stdev', lambda v: round(stdev(list(v[0])), 3))\
.convert('min', lambda v: round(min(list(v[0])), 3))\
.convert('max', lambda v: round(max(list(v[0])), 3))\
.rename('Timestamp', 'PIXEL')\
.convert('PIXEL', lambda v: str("".join(v.split("-"))))
etl.vis.display(rainfall_smr_totals)
# -
pixels_geojson = json.loads(pixels_of_interest.to_geojson)
pixels_geojson
new_fs = []
# for f in pixels_geojson['features']:
for f in pixels_of_interest.features:
#p = f['properties']['PIXEL']
p = f.attributes['PIXEL']
t = etl\
.selecteq(rainfall_smr_totals, 'PIXEL', p)\
.cutout('PIXEL')\
.dicts()
#print(p, t[0])
#f['properties'].update(t[0])
f.attributes.update(t[0])
# +
new_fields = [
{
'name': f,
'type': 'esriFieldTypeDouble',
'alias': f,
'sqlType': 'sqlTypeOther',
'domain': None,
'defaultValue': None
} for f in [h for h in list(etl.header(rainfall_smr_totals)) if h != "PIXEL"]
]
pixels_of_interest.fields.extend(new_fields)
# -
options={"opacity":1, "renderer": "ClassedColorRenderer", "field_name":"sum"}
map_widget = gis.map('Pittsburgh')
map_widget.add_layer(pixels_of_interest, options=options)
map_widget
| rainfall/Getting Rainfall Data (3RWW Teragon API).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["header"]
# <table width="100%">
# <tr style="border-bottom:solid 2pt #009EE3">
# <td style="text-align:left" width="10%">
# <a href="generation_of_time_axis.dwipynb" download><img src="../../images/icons/download.png"></a>
# </td>
# <td style="text-align:left" width="10%">
# <a href="https://mybinder.org/v2/gh/biosignalsnotebooks/biosignalsnotebooks/biosignalsnotebooks_binder?filepath=biosignalsnotebooks_environment%2Fcategories%2FPre-Process%2Fgeneration_of_time_axis.dwipynb" target="_blank"><img src="../../images/icons/program.png" title="Be creative and test your solutions !"></a>
# </td>
# <td></td>
# <td style="text-align:left" width="5%">
# <a href="../MainFiles/biosignalsnotebooks.ipynb"><img src="../../images/icons/home.png"></a>
# </td>
# <td style="text-align:left" width="5%">
# <a href="../MainFiles/contacts.ipynb"><img src="../../images/icons/contacts.png"></a>
# </td>
# <td style="text-align:left" width="5%">
# <a href="https://github.com/biosignalsnotebooks/biosignalsnotebooks" target="_blank"><img src="../../images/icons/github.png"></a>
# </td>
# <td style="border-left:solid 2pt #009EE3" width="15%">
# <img src="../../images/ost_logo.png">
# </td>
# </tr>
# </table>
# + [markdown] tags=["intro_info_title"]
# <link rel="stylesheet" href="../../styles/theme_style.css">
# <!--link rel="stylesheet" href="../../styles/header_style.css"-->
# <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
#
# <table width="100%">
# <tr>
# <td id="image_td" width="15%" class="header_image_color_4"><div id="image_img" class="header_image_4"></div></td>
# <td class="header_text"> Generation of a time axis (conversion of samples into seconds) </td>
# </tr>
# </table>
# + [markdown] tags=["intro_info_tags"]
# <div id="flex-container">
# <div id="diff_level" class="flex-item">
# <strong>Difficulty Level:</strong> <span class="fa fa-star checked"></span>
# <span class="fa fa-star checked"></span>
# <span class="fa fa-star"></span>
# <span class="fa fa-star"></span>
# <span class="fa fa-star"></span>
# </div>
# <div id="tag" class="flex-item-tag">
# <span id="tag_list">
# <table id="tag_list_table">
# <tr>
# <td class="shield_left">Tags</td>
# <td class="shield_right" id="tags">pre-process☁time☁conversion</td>
# </tr>
# </table>
# </span>
# <!-- [OR] Visit https://img.shields.io in order to create a tag badge-->
# </div>
# </div>
# -
# All electrophysiological signals, collected by *Plux* acquisition systems, are, in its essence, time series.
#
# Raw data contained in the generated .txt, .h5 and .edf files consists in samples and each sample value is in a raw value with 8 or 16 bits that needs to be converted to a physical unit by the respective transfer function.
#
# Plux have examples of conversion rules for each sensor (in separate .pdf files), which may be accessed at <a href="http://biosignalsplux.com/en/learn/documentation">"Documentation>>Sensors" section <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> of <strong><span class="color2">biosignalsplux</span></strong> website.
#
# <img src="../../images/pre-process/sensors_section.gif">
#
# Although each file returned by <strong><span class="color2">OpenSignals</span></strong> contains a sequence number linked to each sample, giving a notion of "time order" and that can be used as x axis, working with real time units is, in many occasions, more intuitive.
#
# So, in the present **<span class="color5">Jupyter Notebook</span>** is described how to associate a time axis to an acquired signal, taking into consideration the number of acquired samples and the respective sampling rate.
# <hr>
# <p class="steps">1 - Importation of the needed packages </p>
# + tags=["hide_out"]
# Package dedicated to download files remotely
from wget import download
# Package used for loading data from the input text file and for generation of a time axis
from numpy import loadtxt, linspace
# Package used for loading data from the input h5 file
import h5py
# biosignalsnotebooks own package.
import biosignalsnotebooks as bsnb
# -
# <p class="steps"> A - Text Files</p>
# <p class="steps">A1 - Load of support data inside .txt file (described in a <span class="color5">Jupyter Notebook</span> entitled <a href="../Load/open_txt.ipynb"><strong> "Load acquired data from .txt file" <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a>) </p>
# + tags=["hide_out"]
# Download of the text file followed by content loading.
txt_file_url = "https://drive.google.com/uc?export=download&id=1m7E7PnKLfcd4HtOASH6vRmyBbCmIEkLf"
txt_file = download(txt_file_url, out="download_file_name.txt")
txt_file = open(txt_file, "r")
# + tags=["hide_in"]
# [Internal code for overwrite file if already exists]
import os
import shutil
txt_file.close()
if os.path.exists("download_file_name.txt"):
shutil.move(txt_file.name,"download_file_name.txt")
txt_file = "download_file_name.txt"
txt_file = open(txt_file, "r")
# -
# <p class="steps">A2 - Load of acquisition samples (in this case from the third column of the text file - list entry 2)</p>
txt_signal = loadtxt(txt_file)[:, 2]
# <p class="steps">A3 - Determination of the number of acquired samples</p>
# Number of acquired samples
nbr_samples_txt = len(txt_signal)
# <p class="steps"> B - H5 Files</p>
# <p class="steps">B1 - Load of support data inside .h5 file (described in the <span class="color5">Jupyter Notebook</span> entitled <a href="../Load/open_h5.ipynb"><strong> "Load acquired data from .h5 file"<img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a>) </p>
# + tags=["hide_out"]
# Download of the .h5 file followed by content loading.
h5_file_url = "https://drive.google.com/uc?export=download&id=1UgOKuOMvHTm3LlQ_e7b6R_qZL5cdL4Rv"
h5_file = download(h5_file_url, out="download_file_name.h5")
h5_object = h5py.File(h5_file)
# + tags=["hide_in"]
# [Internal code for overwrite file if already exists]
import os
import shutil
h5_object.close()
if os.path.exists("download_file_name.h5"):
shutil.move(h5_file,"download_file_name.h5")
h5_file = "download_file_name.h5"
h5_object = h5py.File(h5_file)
# -
# <p class="steps">B2 - Load of acquisition samples inside .h5 file</p>
# +
# Device mac-address.
mac_address = list(h5_object.keys())[0]
# Access to signal data acquired by the device identified by "mac_address" in "channel_1".
h5_signal = list(h5_object.get(mac_address).get("raw").get("channel_1"))
# -
# <p class="steps">B3 - Determination of the number of acquired samples</p>
# Number of acquired samples
nbr_samples_h5 = len(h5_signal)
# As it can be seen, the number of samples is equal for both file types.
# + tags=["hide_in"]
from sty import fg, rs
print(fg(98,195,238) + "\033[1mNumber of samples (.txt file):\033[0m" + fg.rs + " " + str(nbr_samples_txt))
print(fg(232,77,14) + "\033[1mNumber of samples (.h5 file):\033[0m" + fg.rs + " " + str(nbr_samples_h5))
# -
# So, we can simplify and reduce the number of variables:
nbr_samples = nbr_samples_txt
# Like described in the Notebook intro, for generating a time-axis it is needed the <strong><span class="color4">number of acquired samples</span></strong> and the <strong><span class="color7">sampling rate</span></strong>.
#
# Currently the only unknown parameter is the <strong><span class="color7">sampling rate</span></strong>, which can be easily accessed for .txt and .h5 files as described in <a href="../Load/signal_loading_preparatory_steps.ipynb" target="_blank">"Signal Loading - Working with File Header"<img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>.
#
# For our acquisition the sampling rate is:
sampling_rate = 1000 # Hz
# <p class="steps">AB4 - Determination of acquisition time in seconds</p>
# Conversion between sample number and seconds
acq_time = nbr_samples / sampling_rate
# + tags=["hide_in"]
print ("Acquisition Time: " + str(acq_time) + " s")
# -
# <p class="steps">AB5 - Creation of the time axis (between 0 and 417.15 seconds) through <span class="color4">linspace</span> function</p>
time_axis = linspace(0, acq_time, nbr_samples)
print ("Time-Axis: \n" + str(time_axis))
# <p class="steps">AB6 - Plot of the acquired signal (first 10 seconds) with the generated time-axis</p>
bsnb.plot(time_axis[:10*sampling_rate], txt_signal[:10*sampling_rate])
# *This procedure can be automatically done by **generate_time** function in **conversion** module of **<span class="color2">biosignalsnotebooks</span>** package*
# + tags=["hide_out"]
time_axis_auto = bsnb.generate_time(h5_file_url)
# + tags=["hide_in"]
from numpy import array
print ("Time-Axis returned by generateTime function:")
print (array(time_axis_auto))
# -
# Time is a really important "dimension" in our daily lives and particularly on signal processing analysis. Without a time "anchor" like <strong><span class="color7">sampling rate</span></strong> it is very difficult to link the acquired digital data with real events.
#
# Concepts like "temporal duration" or "time rate" become meaningless, being more difficult to take adequate conclusions.
#
# However, as can be seen, a researcher in possession of the data to process and a single parameter (sampling rate) can easily generate a time-axis, following the demonstrated procedure.
#
# <strong><span class="color7">We hope that you have enjoyed this guide. </span><span class="color2">biosignalsnotebooks</span><span class="color4"> is an environment in continuous expansion, so don't stop your journey and learn more with the remaining <a href="../MainFiles/biosignalsnotebooks.ipynb">Notebooks <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a></span></strong> !
# + [markdown] tags=["footer"]
# <hr>
# <table width="100%">
# <tr>
# <td style="border-right:solid 3px #009EE3" width="20%">
# <img src="../../images/ost_logo.png">
# </td>
# <td width="40%" style="text-align:left">
# <a href="../MainFiles/aux_files/biosignalsnotebooks_presentation.pdf" target="_blank">☌ Project Presentation</a>
# <br>
# <a href="https://github.com/biosignalsnotebooks/biosignalsnotebooks" target="_blank">☌ GitHub Repository</a>
# <br>
# <a href="https://pypi.org/project/biosignalsnotebooks/" target="_blank">☌ How to install biosignalsnotebooks Python package ?</a>
# <br>
# <a href="../MainFiles/signal_samples.ipynb">☌ Signal Library</a>
# </td>
# <td width="40%" style="text-align:left">
# <a href="../MainFiles/biosignalsnotebooks.ipynb">☌ Notebook Categories</a>
# <br>
# <a href="../MainFiles/by_diff.ipynb">☌ Notebooks by Difficulty</a>
# <br>
# <a href="../MainFiles/by_signal_type.ipynb">☌ Notebooks by Signal Type</a>
# <br>
# <a href="../MainFiles/by_tag.ipynb">☌ Notebooks by Tag</a>
# </td>
# </tr>
# </table>
# + tags=["hide_both"]
from biosignalsnotebooks.__notebook_support__ import css_style_apply
css_style_apply()
# + tags=["hide_both"] language="html"
# <script>
# // AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
# require(
# ['base/js/namespace', 'jquery'],
# function(jupyter, $) {
# $(jupyter.events).on("kernel_ready.Kernel", function () {
# console.log("Auto-running all cells-below...");
# jupyter.actions.call('jupyter-notebook:run-all-cells-below');
# jupyter.actions.call('jupyter-notebook:save-notebook');
# });
# }
# );
# </script>
| notebookToHtml/biosignalsnotebooks_html_publish/Categories/Pre-Process/generation_of_time_axis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: python3
# language: python
# name: python3
# ---
# # Tune a CNN on MNIST
#
# This tutorial walks through using Ax to tune two hyperparameters (learning rate and momentum) for a PyTorch CNN on the MNIST dataset trained using SGD with momentum.
#
# +
import torch
import numpy as np
from ax.plot.contour import plot_contour
from ax.plot.trace import optimization_trace_single_method
from ax.service.managed_loop import optimize
from ax.utils.notebook.plotting import render, init_notebook_plotting
from ax.utils.tutorials.cnn_utils import load_mnist, train, evaluate, CNN
init_notebook_plotting()
# -
torch.manual_seed(12345)
dtype = torch.float
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ## 1. Load MNIST data
# First, we need to load the MNIST data and partition it into training, validation, and test sets.
#
# Note: this will download the dataset if necessary.
BATCH_SIZE = 512
train_loader, valid_loader, test_loader = load_mnist(batch_size=BATCH_SIZE)
# ## 2. Define function to optimize
# In this tutorial, we want to optimize classification accuracy on the validation set as a function of the learning rate and momentum. The function takes in a parameterization (set of parameter values), computes the classification accuracy, and returns a dictionary of metric name ('accuracy') to a tuple with the mean and standard error.
def train_evaluate(parameterization):
net = CNN()
net = train(net=net, train_loader=train_loader, parameters=parameterization, dtype=dtype, device=device)
return evaluate(
net=net,
data_loader=valid_loader,
dtype=dtype,
device=device,
)
# ## 3. Run the optimization loop
# Here, we set the bounds on the learning rate and momentum and set the parameter space for the learning rate to be on a log scale.
best_parameters, values, experiment, model = optimize(
parameters=[
{"name": "lr", "type": "range", "bounds": [1e-6, 0.4], "log_scale": True},
{"name": "momentum", "type": "range", "bounds": [0.0, 1.0]},
],
evaluation_function=train_evaluate,
objective_name='accuracy',
)
# We can introspect the optimal parameters and their outcomes:
best_parameters
means, covariances = values
means, covariances
# ## 4. Plot response surface
#
# Contour plot showing classification accuracy as a function of the two hyperparameters.
#
# The black squares show points that we have actually run, notice how they are clustered in the optimal region.
render(plot_contour(model=model, param_x='lr', param_y='momentum', metric_name='accuracy'))
# ## 5. Plot best objective as function of the iteration
#
# Show the model accuracy improving as we identify better hyperparameters.
# `plot_single_method` expects a 2-d array of means, because it expects to average means from multiple
# optimization runs, so we wrap out best objectives array in another array.
best_objectives = np.array([[trial.objective_mean*100 for trial in experiment.trials.values()]])
best_objective_plot = optimization_trace_single_method(
y=np.maximum.accumulate(best_objectives, axis=1),
title="Model performance vs. # of iterations",
ylabel="Classification Accuracy, %",
)
render(best_objective_plot)
# ## 6. Train CNN with best hyperparameters and evaluate on test set
# Note that the resulting accuracy on the test set might not be exactly the same as the maximum accuracy achieved on the evaluation set throughout optimization.
data = experiment.fetch_data()
df = data.df
best_arm_name = df.arm_name[df['mean'] == df['mean'].max()].values[0]
best_arm = experiment.arms_by_name[best_arm_name]
best_arm
combined_train_valid_set = torch.utils.data.ConcatDataset([
train_loader.dataset.dataset,
valid_loader.dataset.dataset,
])
combined_train_valid_loader = torch.utils.data.DataLoader(
combined_train_valid_set,
batch_size=BATCH_SIZE,
shuffle=True,
)
net = train(
net=CNN(),
train_loader=combined_train_valid_loader,
parameters=best_arm.parameters,
dtype=dtype,
device=device,
)
test_accuracy = evaluate(
net=net,
data_loader=test_loader,
dtype=dtype,
device=device,
)
print(f"Classification Accuracy (test set): {round(test_accuracy*100, 2)}%")
| tutorials/tune_cnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1. Regression with Images and Text
#
# In this notebook we will go through a series of examples on how to combine all Wide & Deep components, the Wide component, the stack of dense layers for the "categorical embeddings" and numerical column (deepdense), the text data (deeptext) and images (deepimage).
#
# To that aim I will use the Airbnb listings dataset for London, which you can download from [here](http://insideairbnb.com/get-the-data.html). I have taken a sample of 1000 listings to keep the data tractable in this notebook. Also, I have preprocess the data and prepared it for this excercise. All preprocessing steps can be found in the notebook `airbnb_data_preprocessing.ipynb` in this `examples` folder. Note that you do not need to go through that notebook to get an understanding on how to use this library.
# +
import numpy as np
import pandas as pd
import os
import torch
from pytorch_widedeep.preprocessing import WidePreprocessor, DeepPreprocessor, TextPreprocessor, ImagePreprocessor
from pytorch_widedeep.models import Wide, DeepDense, DeepText, DeepImage, WideDeep
from pytorch_widedeep.initializers import *
from pytorch_widedeep.callbacks import *
from pytorch_widedeep.optim import RAdam
# -
df = pd.read_csv('data/airbnb/airbnb_sample.csv')
df.head()
# ### 1.1 Regression with the defaults
# There are a number of columns that are already binary. Therefore, no need to one hot encode them
crossed_cols = (['property_type', 'room_type'],)
already_dummies = [c for c in df.columns if 'amenity' in c] + ['has_house_rules']
wide_cols = ['is_location_exact', 'property_type', 'room_type', 'host_gender',
'instant_bookable'] + already_dummies
cat_embed_cols = [(c, 16) for c in df.columns if 'catg' in c] + \
[('neighbourhood_cleansed', 64), ('cancellation_policy', 16)]
continuous_cols = ['latitude', 'longitude', 'security_deposit', 'extra_people']
# it does not make sense to standarised Latitude and Longitude
already_standard = ['latitude', 'longitude']
# text and image colnames
text_col = 'description'
img_col = 'id'
# path to pretrained word embeddings and the images
word_vectors_path = 'data/glove.6B/glove.6B.100d.txt'
img_path = 'data/airbnb/property_picture'
# target
target_col = 'yield'
# ### 1.1.1 Prepare the data
#
# I will focus here on how to prepare the data and run the model. Check notebooks 1 and 2 to see what's going on behind the scences
#
# Preparing the data is rather simple
target = df[target_col].values
wide_preprocessor = WidePreprocessor(wide_cols=wide_cols, crossed_cols=crossed_cols)
X_wide = wide_preprocessor.fit_transform(df)
deep_preprocessor = DeepPreprocessor(embed_cols=cat_embed_cols, continuous_cols=continuous_cols)
X_deep = deep_preprocessor.fit_transform(df)
text_preprocessor = TextPreprocessor(word_vectors_path=word_vectors_path)
X_text = text_preprocessor.fit_transform(df, text_col)
image_processor = ImagePreprocessor()
X_images = image_processor.fit_transform(df, img_col, img_path)
# ### 1.1.2. Build the model components
# Linear model
wide = Wide(wide_dim=X_wide.shape[1], output_dim=1)
# DeepDense: 2 Dense layers
deepdense = DeepDense(hidden_layers=[128,64], dropout=[0.5, 0.5],
deep_column_idx=deep_preprocessor.deep_column_idx,
embed_input=deep_preprocessor.embeddings_input,
continuous_cols=continuous_cols)
# DeepText: 2 LSTMs
deeptext = DeepText(vocab_size=len(text_preprocessor.vocab.itos), hidden_dim=64,
n_layers=2, rnn_dropout=0.5,
embedding_matrix=text_preprocessor.embedding_matrix)
# Pretrained Resnet 18 (default is all but last 2 conv blocks) plus a FC-Head 512->256->128
deepimage = DeepImage(pretrained=True, head_layers=[512, 256, 128])
model = WideDeep(wide=wide, deepdense=deepdense, deeptext=deeptext, deepimage=deepimage)
# ### 1.1.3. Compile and fit
model.compile(method='regression')
model.fit(X_wide=X_wide, X_deep=X_deep, X_text=X_text, X_img=X_images,
target=target, n_epochs=1, batch_size=32, val_split=0.2)
# ### 1.2 Regression with varying parameters and a FC-Head receiving the deep side
#
# This would be the second architecture shown in the README file
wide = Wide(wide_dim=X_wide.shape[1], output_dim=1)
deepdense = DeepDense(hidden_layers=[128,64], dropout=[0.5, 0.5],
deep_column_idx=deep_preprocessor.deep_column_idx,
embed_input=deep_preprocessor.embeddings_input,
continuous_cols=continuous_cols)
deeptext = DeepText(vocab_size=len(text_preprocessor.vocab.itos), hidden_dim=128,
n_layers=2, rnn_dropout=0.5,
embedding_matrix=text_preprocessor.embedding_matrix)
deepimage = DeepImage(pretrained=True, head_layers=[512, 256, 128])
model = WideDeep(wide=wide, deepdense=deepdense, deeptext=deeptext, deepimage=deepimage, head_layers=[128, 64])
# Let's have a look to the model
# Both, the Text and Image components allow FC-heads on their own (referred very creatively as `texthead` and `imagehead`). Following this nomenclature, the FC-head that receives the concatenation of the whole deep component is called `deephead`.
#
# Now let's go "kaggle crazy". Let's use different optimizers, initializers and schedulers for different components. Moreover, let's use a different learning rate for different parameter groups, for the `DeepDense` component
deep_params = []
for childname, child in model.named_children():
if childname == 'deepdense':
for n,p in child.named_parameters():
if "emb_layer" in n: deep_params.append({'params': p, 'lr': 1e-4})
else: deep_params.append({'params': p, 'lr': 1e-3})
wide_opt = torch.optim.Adam(model.wide.parameters())
deep_opt = torch.optim.Adam(deep_params)
text_opt = RAdam(model.deeptext.parameters())
img_opt = RAdam(model.deepimage.parameters())
head_opt = torch.optim.Adam(model.deephead.parameters())
wide_sch = torch.optim.lr_scheduler.StepLR(wide_opt, step_size=5)
deep_sch = torch.optim.lr_scheduler.MultiStepLR(deep_opt, milestones=[3,8])
text_sch = torch.optim.lr_scheduler.StepLR(text_opt, step_size=5)
img_sch = torch.optim.lr_scheduler.MultiStepLR(deep_opt, milestones=[3,8])
head_sch = torch.optim.lr_scheduler.StepLR(head_opt, step_size=5)
# +
# remember, one optimizer per model components, for lr_schedures and initializers is not neccesary
optimizers = {'wide': wide_opt, 'deepdense':deep_opt, 'deeptext':text_opt, 'deepimage': img_opt, 'deephead': head_opt}
schedulers = {'wide': wide_sch, 'deepdense':deep_sch, 'deeptext':text_sch, 'deepimage': img_sch, 'deephead': head_sch}
# Now...we have used pretrained word embeddings, so you do not want to
# initialise these embeddings. However you might still want to initialise the
# other layers in the DeepText component. No probs, you can do that with the
# parameter pattern and your knowledge on regular expressions. Here we are
# telling to the KaimingNormal initializer to NOT touch the parameters whose
# name contains the string word_embed.
initializers = {'wide': KaimingNormal, 'deepdense':KaimingNormal,
'deeptext':KaimingNormal(pattern=r"^(?!.*word_embed).*$"),
'deepimage':KaimingNormal}
mean = [0.406, 0.456, 0.485] #BGR
std = [0.225, 0.224, 0.229] #BGR
transforms = [ToTensor, Normalize(mean=mean, std=std)]
callbacks = [LRHistory(n_epochs=10), EarlyStopping, ModelCheckpoint(filepath='model_weights/wd_out')]
# -
model.compile(method='regression', initializers=initializers, optimizers=optimizers,
lr_schedulers=schedulers, callbacks=callbacks, transforms=transforms)
model.fit(X_wide=X_wide, X_deep=X_deep, X_text=X_text, X_img=X_images,
target=target, n_epochs=1, batch_size=32, val_split=0.2)
# we have only run one epoch, but let's check that the LRHistory callback records the lr values for each group
model.lr_history
| examples/04_Regression_with_Images_and_Text.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#take boxes determined by marisol from kathleens biforcation data and calculate average environmental information for them
#want to calculate average SST, SSS, u^2+v^2, and var(u^2+v^2)
#recaluclate spd, dir from u,v after averaging in subset routine
import xarray as xr
import numpy as np
from math import pi
import datetime as dt
import os
from os.path import exists
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from datetime import *; from dateutil.relativedelta import *
from scipy.interpolate import make_interp_spline, BSpline
from scipy.signal import savgol_filter
import sys
import geopandas as gpd
import shapefile
import numpy as np
import matplotlib.pyplot as plt
import geopandas as gpd
import matplotlib
from shapely.geometry import Polygon
import pyproj
from shapely.geometry import Point
import matplotlib.path as mpltPath
from shapely.geometry.multipolygon import MultiPolygon
import xarray as xr
import cartopy.crs as ccrs
from cartopy.examples.arrows import sample_data
sys.path.append('./subroutines/')
from shapefile_reading import explode_polygon
from shapefile_reading import get_pices_mask
# +
dir_figs = 'F:/data/NASA_biophysical/pices/figures/'
dir_timeseries = 'F:/data/NASA_biophysical/timeseries_data/'
dir_shp = 'F:/data/NASA_biophysical/pices/shapefiles/LME/'
data_dir = 'F:/data/NASA_biophysical/pices/shapefiles/LME/'
# +
#process 66_LME file into individual masks and one mask with all values.
lats,lons=np.arange(-90,90,.1),np.arange(-180,180,.1)
shp_file_base = 'LME66.shp'
#create 2d grid from lats and lons
[lon2d,lat2d]=np.meshgrid(lons,lats)
#create a list of coordinates of all points within grid
points=[]
for latit in range(0,lats.size):
for lonit in range(0,lons.size):
point=(lon2d[latit,lonit],lat2d[latit,lonit])
points.append(point)
#turn into np array for later
points=np.array(points)
##get the cube data - useful for later
#fld=np.squeeze(cube.data)
#create a mask array of zeros, same shape as fld, to be modified by
#the code below
df = gpd.read_file(data_dir+shp_file_base)
crs_source = ('+proj=natearth +ellps=GRS80 +unit=m +lon_0=180')
df.crs = crs_source
df.plot(cmap='Set2', figsize=(10, 10),vmin=0,vmax=100);
#plt.legend()
plt.savefig('F:/data/NASA_biophysical/LME66.png')
indf = df.copy(deep=True)
outdf = gpd.GeoDataFrame(columns=indf.columns)
mask_all=np.zeros_like(lon2d)
init_data=0
sv_names=np.empty(67).astype('str')
for iob in range(1,67):
mask=np.zeros_like(lon2d)
subset = df.loc[df['OBJECTID']==iob]
name_region = subset['LME_NAME'][iob-1].replace(" ", "_")
num_region = subset['LME_NUMBER'][iob-1].astype('int')
Edf2=explode_polygon(subset)
for index,row in Edf2.iterrows():
#print('working on polygon', index)
mypolygon=[]
for pt in list(row['geometry'].exterior.coords):
#print(index,', ',pt)
mypolygon.append(pt)
path=mpltPath.Path(mypolygon)
inside=path.contains_points(points)
#find the results in the array that were inside the polygon ('True')
#and set them to missing. First, must reshape the result of the search
#('points') so that it matches the mask & original data
#reshape the result to the main grid array
inside=np.array(inside).reshape(lon2d.shape)
i=np.where(inside == True)
mask[i]=1
mask_all[i]=num_region
ds_mask_tem=xr.Dataset(data_vars={'region_mask': (('lat','lon'),mask) },coords={'lat':lats,'lon':lons})
ds_mask_tem['region_mask'].attrs['LME_name'] = name_region
mask_name = str(num_region)+'_mask'
filename_out = data_dir+mask_name+'_mask.nc'
ds_mask_tem.to_netcdf(filename_out)
sv_names[num_region]=name_region
ds_mask=xr.Dataset(data_vars={'region_mask': (('lat','lon'),mask_all),'LME_names':(('region'),sv_names )},coords={'lat':lats,'lon':lons,'region':np.arange(1,68,1)})
filename_out = data_dir+'all_mask.nc'
ds_mask.to_netcdf(filename_out)
# -
filename_out = data_dir+'all_mask.nc'
ds_mask=xr.open_dataset(filename_out)
ds_mask.close()
ds_mask2 = ds_mask.where(ds_mask!=0,np.nan)
import cartopy.crs as ccrs
from cartopy.examples.arrows import sample_data
plt.figure(figsize=(13,6.2))
ax = plt.subplot(111, projection=ccrs.PlateCarree(central_longitude=180))#,
# min_latitude=-80, max_latitude=80))
ax.background_img(name='ne_shaded', resolution='low')
#ax.background_img(name='BM', resolution='high')
ax.set_extent([1,-1,-80,80])
mm = ax.pcolormesh(ds_mask.lon,\
ds_mask.lat,\
ds_mask2.region_mask,vmin=11, vmax=24, transform=ccrs.PlateCarree(),cmap='jet' )
ax.coastlines(resolution='10m');
plt.title('LME regions')
plt.colorbar(mm,ax=ax,shrink=.7,pad=.01,label='LME region number')
plt.savefig('F:/data/NASA_biophysical/pices/figures/LME_all_regions_mask_be.png', dpi=100)
# +
#process PICES file into individual masks and one mask with all values.
#some are in lat/lon and some are in coordinate reference frame so I had to add a check for that
dir_shp = 'F:/data/NASA_biophysical/pices/shapefiles/pices/'
data_dir = 'F:/data/NASA_biophysical/pices/shapefiles/pices/'
data_fig = 'F:/data/NASA_biophysical/pices/figures/'
lats,lons=np.arange(-90,90,.1),np.arange(-180,180,.1)
#shp_file_base = 'PICES_NPESR_Region_12.shp'
#create 2d grid from lats and lons
[lon2d,lat2d]=np.meshgrid(lons,lats)
#create a list of coordinates of all points within grid
points=[]
for latit in range(0,lats.size):
for lonit in range(0,lons.size):
point=(lon2d[latit,lonit],lat2d[latit,lonit])
points.append(point)
#turn into np array for later
points=np.array(points)
##get the cube data - useful for later
#fld=np.squeeze(cube.data)
#create a mask array of zeros, same shape as fld, to be modified by
#the code below
#there are some masks that wrap 180 which causes problems for the 'find inside'
#for these there is a cheap below, instead of +lon_0=180
#I put in +lon_0=0, create the mask then shift it to the-180,180 mask and save
mask_all=np.zeros_like(lon2d)
dir_data='F:/data/NASA_biophysical/pices/shapefiles/pices/'
for root, dirs, files in os.walk(dir_data, topdown=False):
if root[len(dir_data):len(dir_data)+1]=='.':
continue
for name in files:
if not name.endswith('.shp'):
continue
filename=os.path.join(root, name)
print(name[:-4])
name_region = name[:-4]
num_region = int(name[-6:-4])
# if (num_region<12) | (num_region>12):
# continue
# if ((num_region<13)|(num_region>15)):
# continue
df = gpd.read_file(filename)
if ((num_region == 13) | (num_region==14) | (num_region==23) | (num_region==24) | (num_region==15)):
crs_source = ('+proj=natearth +ellps=GRS80 +unit=m +lon_0=0')
iwrap=1
else:
crs_source = ('+proj=natearth +ellps=GRS80 +unit=m +lon_0=180')
iwrap=0
df.crs = crs_source
#check if in ITM or geocoordinates
Edf2=explode_polygon(df)
for index,row in Edf2.iterrows():
#print('working on polygon', index)
mypolygon=[]
for pt in list(row['geometry'].exterior.coords):
#print(index,', ',pt)
mypolygon.append(pt)
if (pt[0]>-180) & (pt[0]<180):
df2 = df.copy(deep=True)
print('nope: dont change coordinates')
else:
df2 = df.to_crs(epsg=4326)
print('yup, change coordinates')
# df2.plot(cmap='Set2', figsize=(10, 10),vmin=0,vmax=100);
#plt.legend()
# plt.savefig(data_fig + name_region+'.png')
indf = df2.copy(deep=True)
outdf = gpd.GeoDataFrame(columns=indf.columns)
init_data=0
mask=np.zeros_like(lon2d)
# subset = df.loc[df['OBJECTID']==iob]
Edf2=explode_polygon(df2)
for index,row in Edf2.iterrows():
#print('working on polygon', index)
mypolygon=[]
for pt in list(row['geometry'].exterior.coords):
mypolygon.append(pt)
path=mpltPath.Path(mypolygon)
inside=path.contains_points(points)
#find the results in the array that were inside the polygon ('True')
#and set them to missing. First, must reshape the result of the search
#('points') so that it matches the mask & original data
#reshape the result to the main grid array
inside=np.array(inside).reshape(lon2d.shape)
i=np.where(inside == True)
mask[i]=1
# mask_all[i]=num_region
if (iwrap==1):
mask2=np.zeros(mask.shape)
mask2[:,1:1800]=mask[:,1801:3600]#,mask[:,1:1800]]
mask2[:,1800:3600]=mask[:,1:1801]
else:
mask2=mask
if (num_region==15):
mask2=mask
mask_all= np.where(mask2==1,num_region,mask_all)
ds_mask_tem=xr.Dataset(data_vars={'region_mask': (('lat','lon'),mask2) },coords={'lat':lats,'lon':lons})
ds_masked = ds_mask_tem.where(ds_mask_tem['region_mask'] != 0)
min_lat,max_lat = lat2d[np.isfinite(ds_masked.region_mask)].min(),lat2d[np.isfinite(ds_masked.region_mask)].max()
min_lon,max_lon = lon2d[np.isfinite(ds_masked.region_mask)].min(),lon2d[np.isfinite(ds_masked.region_mask)].max()
ds_mask_tem['region_mask'].attrs['PICES_name'] = name_region
ds_mask_tem['region_mask'].attrs['min_lon'] = min_lon
ds_mask_tem['region_mask'].attrs['max_lon'] = max_lon
ds_mask_tem['region_mask'].attrs['min_lat'] = min_lat
ds_mask_tem['region_mask'].attrs['max_lat'] = max_lat
mask_name = str(num_region)
filename_out = data_dir+mask_name+'_mask.nc'
ds_mask_tem.to_netcdf(filename_out)
ds_mask_tem=xr.Dataset(data_vars={'region_mask': (('lat','lon'),mask_all) },coords={'lat':lats,'lon':lons})
ds_masked = ds_mask_tem.where(ds_mask_tem['region_mask'] != 0)
min_lat,max_lat = lat2d[np.isfinite(ds_masked.region_mask)].min(),lat2d[np.isfinite(ds_masked.region_mask)].max()
min_lon,max_lon = lon2d[np.isfinite(ds_masked.region_mask)].min(),lon2d[np.isfinite(ds_masked.region_mask)].max()
ds_mask_tem['region_mask'].attrs['PICES_name'] = 'all'
ds_mask_tem['region_mask'].attrs['min_lon'] = min_lon
ds_mask_tem['region_mask'].attrs['max_lon'] = max_lon
ds_mask_tem['region_mask'].attrs['min_lat'] = min_lat
ds_mask_tem['region_mask'].attrs['max_lat'] = max_lat
mask_name = 'PICES_all'
filename_out = data_dir+mask_name+'_mask.nc'
ds_mask_tem.to_netcdf(filename_out)
# -
ds_mask_tem2 = ds_mask_tem.where(ds_mask_tem>0,np.nan)
ds_mask_tem2.region_mask.plot(cmap='jet',vmin=11,vmax=24)
import cartopy.crs as ccrs
from cartopy.examples.arrows import sample_data
plt.figure(figsize=(13,6.2))
ax = plt.subplot(111, projection=ccrs.Mercator(central_longitude=180,
min_latitude=20, max_latitude=70))
#ax.background_img(name='ne_shaded', resolution='low')
ax.background_img(name='BM', resolution='high')
ax.set_extent([118,-110,20,70])
mm = ax.pcolormesh(ds_mask_tem2.lon,\
ds_mask_tem2.lat,\
ds_mask_tem2.region_mask,vmin=11, vmax=24, transform=ccrs.PlateCarree(),cmap='jet' )
ax.coastlines(resolution='10m');
plt.title('PICES regions')
plt.colorbar(mm,ax=ax,shrink=.7,pad=.01,label='PICES region number')
plt.savefig('F:/data/NASA_biophysical/pices/figures/PICES_all_regions_mask_be.png', dpi=100)
#example on how to mask data for just one region
filename = 'F:/data/NASA_biophysical/pices/shapefiles/pices/PICES_all_mask.nc'
ds_pices = xr.open_dataset(filename)
ds_pices.close()
ds_mask = ds_pices.where(ds_pices==15,np.nan)
ds_mask.region_mask.plot(cmap='jet',vmin=11,vmax=24)
| Take LME GIS files and put into netcdf mask array.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="https://image.aladin.co.kr/product/16902/16/cover500/k202534274_1.jpg" width="200" height="200"><br>
#
# # Chapter 5 데이터 연결하기
#
# 이번에는 분석하기 좋은 데이터 집합을 만들기 위한 데이터 전처리를 알아보자.
#
# ## 5-1 분석하기 좋은 데이터
#
# ### 분석하기 좋은 데이터란?
# 분석하기 좋은 데이터란 데이터 집합을 분석하기 좋은 상태로 만들어 놓은 것을 의미한다. 데이터 분석 단계에서 데이터 정리는 아주 중요한데, 실제로 데이터 분석 작업의 70%이상을 차지 하고 있는 작업이 데이터 천처리 이기 때문이다. 분석하기 좋은 데이터는 다음 조건을 만족해야 하며 이 조건을 만족하는 데이터를 Tidy Data라고 부른다.
#
# #### 깔끔한 데이터의 조건
#
# - 데이터 분석 목적에 맞는 데이터를 모아 새로운 Table을 만든다.
# - 측정한 값은 row를 구성해야 한다.
# - 변수는 열로 구성되어야 한다.
#
# ## 5-2 데이터 연결 기초
#
# ### 데이터 연결하기
#
# #### 1. concat 메서드로 데이터 연결하기
# 데이터를 연결하려면 concat 메서드를 사용하자.
# +
import pandas as pd
df1 = pd.read_csv("data/concat_1.csv")
df2 = pd.read_csv("data/concat_2.csv")
df3 = pd.read_csv("data/concat_3.csv")
# -
# #### 2.
# concat 메서드를 연결하려는 데이터프레임을 리스트에 담아 전달하면 연결한 데이터 프레임을 반환 한다. concat 메서드는 데이터 프레임을 연결할때 위에서 아래 방향으로 연결한다.
row_concat = pd.concat([df1, df2, df3])
row_concat
# #### 3.
# 연결한 데이터프레임에서 행 데이터를 추출해보자. concat 메서드는 전달받은 리스트의 요소 순서대로 데이터를 연결한다. 그래서 기존 데이터프레임에 있던 인덱스도 그대로 유지가 된다. 다음은 데이터 프레임에서 네번째 행을 추출한 코드이다.
row_concat.iloc[3,]
# #### 4. 데이터프레임에 시리즈 연결하기
new_row_series = pd.Series(['n1','n2', 'n3', 'n4'])
new_row_series
# #### 5.
# concat 메서드로 데이터프레임과 시리즈를 연결해보자.
pd.concat([df1, new_row_series])
# ### 행이 1개라도 데이터프레임에 담아서 연결하자.
#
# ### 행 1개로 구성된 데이터프레임 생성하여 연결하기
#
# #### 1.
# 시리즈는 행이 1개인 데이터프레임이라고 생각해도 된다. 다음은 1개의 행을 가지는 데이터프레임을 생성하여 df1에 연결한 코드이다.
new_row_df = pd.DataFrame([['n1', 'n2', 'n3', 'n4']], columns=['A','B','C','D'])
new_row_df
# #### 2.
# concat 메서드는 한 번에 2개 이상의 데이터프레임을 연결할 수 있는 메서드이다. 만약 연결할 데이터프레임이 1개라면 append 메서드를 사용해도 된다.
df1.append(new_row_df)
# #### 3.
# append 메서드와 딕셔너리를 사용하면 더욱 간편하게 행을 연결할 수 있다. 이때 ignore_index를 True로 설정하면 데이터를 연결한 다음 데이터프레임의 인덱스를 0부터 다시 시정한다.
data_dict = {'A': 'n1', 'B': 'n2', 'C': 'n3', 'D': 'n4'}
df1.append(data_dict, ignore_index=True)
# ### 다양한 방법으로 데이터 연결하기
#
# 판다스는 데이터를 연결하는 다양한 방법을 제공한다. 다음 예제를 통해 데이터를 연결하는 다양한 방법에 대해 알아보자.
#
# ### 다양한 방덥으로 데이터 연결하기
#
# #### 1. ignore_index 인자 사용하기
# ignore_index를 True로 지정하면 데이터를 연결한 다음 데이터프레임의 인덱스를 0부터 다시 지정한다.
row_concat_i = pd.concat([df1, df2, df3], ignore_index=True)
row_concat_i
# #### 2. 열 방향으로 데이터 연결하기
#
# concat 메서드의 axis인자를 1로 지정하여 열방향으로 데이터를 연결 해보자.
col_concat = pd.concat([df1, df2, df3], axis=1)
col_concat
# #### 3.
# 만약 같은 열 이름이 있는 데이터 프레임에서 열 이름으로 데이터를 추출하면 해당 열 이름의 데이터를 모두 추출한다.
col_concat['A']
# #### 4.
# 다음과 같이 입력하면 간편하게 새로운 열을 추가할 수도 있다.
col_concat['new_col_list'] = ['n1', 'n2', 'n3', 'n4']
col_concat
# #### 5.
# 다음은 ignore_index를 True로 지정하여 열 이름을 다시 지정한 것이다. 이는 열 이름의 중복을 방지하기 위함이다.
col_concat = pd.concat([df1, df2, df3], axis=1, ignore_index=True)
col_concat
# #### 6. 공통 열과 공통 인덱스만 연결하기
# +
df1.columns = ['A','B', 'C','D']
df2.columns = ['E','F', 'G','H']
df3.columns = ['A','C', 'F','H']
print(df1)
print(type(df1))
# -
print(df2)
print(type(df2))
print(df3)
print(type(df3))
# #### 7.
# 새롭게 열 이름을 부여한 데이터프레임 3개를 concat 메서드로 연결해보자. 누락갑싱 생기는데 이를 방지 해보자.
# #### 8.
# 데이터프레임의 공통 열만 골라 연결하면 누락갑싱 생기지 않는다. 공통 열만 골라서 연결하려면 join 인자를 inner로 지정한다. 이 경우에는 공통된 열이 없기때문에 Empty 로 출력이 된다.
pd.concat([df1, df2, df3], join='inner')
# #### 9.
# df1, df3의 공통 열만 연결해보자. A,C만 출력이 될것이다.
pd.concat([df1, df3], ignore_index=False, join='inner')
# #### 10.
# 이번에는 데이터프레임을 행 방향으로 연결해보자. df1,df2, df3의 인덱스를 다시 지정하자.
# +
df1.index = [0,1,2,3]
df2.index = [4,5,6,7]
df3.index = [0,2,5,7]
print(df1)
print(df2)
print(df3)
# -
# #### 11.
# concat 메서드로 df1, df2, df3을 행 방향으로 견결 하면 누락값이 발생하게 된다.
col_concat = pd.concat([df1, df2, df3], axis=1)
col_concat
# #### 12.
# 이번에는 df1, df3의 공통 행만 연결해보자.
pd.concat([df1, df3], axis=1, join = 'inner')
# ### 외부 조인과 내부 조인
#
# - 내부 조인(inner join)
# 둘 이상의 데이터프레임에서 조건에 마즌 행을 연결
# - 외부 조인(outer join)
# 외부 조인은 두 데이터프레임 중 어떤 데이터프레임을 기준으로 할 것인지에 왼쪽, 오른쪽, 완전 외부조인으로 나뉘게 된다.
# ## 5-3 데이터 연결 마무리
#
# 판다스는 데이터 연결 전용 메서드인 merge를 제공한다. merge 메서드의 사용방법을 알아보자.
#
# ### merge 메서드 사용하기
#
# #### 1.
# 다음은 특정 위치의 날씨 정보에 필요한 데이터 집합을 모두 불러온 것이다. person은 관측한 사람의 이름, site는 관측 위치, visited는 관측 날짜, survey는 날씨 정보 이다.
person = pd.read_csv("data/survey_person.csv")
site = pd.read_csv("data/survey_site.csv")
survey = pd.read_csv("data/survey_survey.csv")
visited = pd.read_csv("data/survey_visited.csv")
person
site
visited
survey
# #### 2.
# visited 데이터프레임의 일부 데이터만 떼어 사용해보자.
visited_subset = visited.loc[[0,2,6],]
visited_subset
# #### 3.
# merge 메서드는 기본적으로 내부 조인을 실행하며 메서드를 사용한 데이터프레임의 왼족으로 지정하고 첫 번째 인잣값으로 지정한 데이터프레임을 오른쪽으로 지정한다. left_on, right_on인자는 갑싱 일치해야 할 왼쪽과 오른쪽 데이터프레임의 열을 지정한다. 즉, 왼쪽 데이터프레임의 열과 오른쪽 데이터프레임의 열갑싱 일치하게 됨녀 왼쪽 데이터 프레임을 기준으로 연결 한다.
o2o_merge = site.merge(visited_subset, left_on='name', right_on='site')
o2o_merge
# #### 4.
# 다음은 site, visited 데이터프레임을 이용하여 데이터를 연결한 것이다.
m2o_merge = site.merge(visited, left_on='name', right_on='site')
m2o_merge
# #### 5.
# 다음은 person, survery 데이터프레임과 visited, survey 데이터프레임을 merge 메서드로 연결한 것이다.
# +
ps = person.merge(survey, left_on = 'ident', right_on = 'person')
vs = visited.merge(survey, left_on = 'ident', right_on = 'taken')
ps
# -
vs
# #### 6.
# left_on, right_on에 전달하는 값은 여러 개라도 상관이 없다. 다음과 같이 여러개의 열 이름을 리스트에 담아 전달 해도 된다. ps 데이터프레임의 ident, taken, quant, reading 열의 값과 vs 데이터프레임의 person, ident, quant, reading 열의 값을 이용하녀 ps와 vs데이터프레임을 서로 연결한 것이다.
ps_vs = ps.merge(vs, left_on=['ident', 'taken', 'quant', 'reading'], right_on = ['person', 'ident', 'quant', 'reading'])
ps_vs
# #### 7.
# 직전에 진행한 ps_vs 데이터프레임의 첫 번째 행을 살펴보면 양쪽 데이터 프레임에 중복된 열이름에 _x, _y가 추가되어 있는걸을 확인할 수 있다. _x는 왼쪽 데이터프레임의 열을, _y는 오른쪽 데이터프레임의 열을 의미한다.
print(ps_vs.loc[0,])
| Chapter_5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Using the Metadata Explorer
# How do we know specifically **where** and **when** data is available? Before we start any analysis, we can answer this question by verifying existing data on the [Digital Earth Africa Metadata Explorer](https://explorer.digitalearth.africa/). The Metadata Explorer can be found at https://explorer.digitalearth.africa/.
# ## Map or Explorer?
#
# The [Digital Earth Africa Metadata Explorer](https://explorer.digitalearth.africa/) and [Digital Earth Africa Map](https://maps.digitalearth.africa/) look similar, but they are designed for different purposes.
#
# Use the [Digital Earth Africa Map](https://maps.digitalearth.africa/) if you:
#
# * Want to see what the product or dataset looks like
#
# Use the [Digital Earth Africa Metadata Explorer](https://explorer.digitalearth.africa/) if you:
#
# * Want to know exactly where and when you can find data
# ## Open the Digital Earth Africa Metadata Explorer
# 1. Open [https://explorer.digitalearth.africa/](https://explorer.digitalearth.africa/). This will display the Metadata Explorer user interface.
# <img align="middle" src="../_static/web_services/explorer/explorer_ui1.png" alt="The DE Africa Metadata Explorer." width=600>
# The Digital Earth Africa Metadata Explorer has four main sections.
#
# 1. **Product selection:** This shows the currently-selected product. Click the selected product name to open the products dropdown menu.
# 2. **Time period:** This shows the time period for which the selected product is being displayed. Click the selected time period to open the time selection dropdown menu.
# 3. **Map display:** This shows where data is available, for the selected time and product. Blue shaded tiles indicate the presence of data.
# 4. **Product information:** The sidebar shows more information about the data for the selected time and product. For example, this includes how many datasets are selected, the name of the datasets in the product, and its coordinate reference system.
# ### Select the Landsat 8 product
# 1. Click the **product selection** bar to open the dropdown menu. Select **ls8_sr**. This selects the Landsat 8 product.
# <img align="middle" src="../_static/web_services/explorer/explorer_ls8.png" alt="Metadata Explorer Landsat 8." width=600>
# 2. Click the **time** bar to open the dropdown menu. Select **2018**. This will show all Landsat 8 datasets for 2018.
# <img align="middle" src="../_static/web_services/explorer/explorer_2018.png" alt="Metadata Explorer Landsat 8 2018." width=600>
# 3. Click the **all months** bar to open the dropdown menu. Select **August**. This will show all the Landsat 8 datasets for August 2018.
# <img align="middle" src="../_static/web_services/explorer/explorer_august.png" alt="Metadata Explorer Landsat 8 2018." width=600>
# 4. Click the **all days** bar to open the dropdown menu. Select **11th**. This will show all the Landsat 8 datasets for 11 August 2018.
# <img align="middle" src="../_static/web_services/explorer/explorer_11august2018.png" alt="Metadata Explorer Landsat 8 2018." width=600>
# 5. The **map display** will now show all the Landsat 8 datasets for 11 August 2018 as blue shaded boxes. Use the **+** button on the map to zoom in, and click and drag to pan the map.
# <img align="middle" src="../_static/web_services/explorer/explorer_zoom.png" alt="Metadata Explorer Landsat 8 2018." width=600>
# We can see that there is only data available for some African countries. Let's take a closer look at Tanzania.
# ### Zoom in on Tanzania
# Use the map's **+** button to zoom in on Africa. Click and drag to see Tanzania.
#
# <img align="middle" src="../_static/web_services/explorer/explorer_tanzania.png" alt="Metadata Explorer Landsat 8 2018." width=600>
# We can see the data for 11 August 2018 (blue shaded boxes) covers certain parts of Tanzania, which means areas within Tanzania are a suitable location to choose for data analysis for that day. For example, for that day (11 August 2018) we would not be able to do any Landsat 8 analysis over the city of Dodoma in Tanzania, as it does not have any data available for that time.
| web_services/explorer_guide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="4DFrxkVDditH" outputId="f4d70991-d369-4a7e-d8a3-dcdb4c0c962c"
# !wget https://raw.githubusercontent.com/mln00b/eva6-assignments/main/sess10/sample_coco.csv
# + id="IXHM0WfhmRZd"
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="0FcJgJ6OeIiu" outputId="b7a9acfd-57fd-4de8-8f16-e7ad87a78eca"
df = pd.read_csv("sample_coco.csv")
df.head()
# + id="u4gHZyo7esFA"
unique, counts = np.unique(df["class_id"], return_counts=True)
# + colab={"base_uri": "https://localhost:8080/"} id="vRMMpPMqmOqr" outputId="dbe042e0-0242-4f3d-a4a6-bcba722780ad"
print("Num Classes: ", len(unique))
# + colab={"base_uri": "https://localhost:8080/", "height": 312} id="TKe75VNWmryR" outputId="88021fc0-d0e5-4aba-ff8a-fbf7cc9b440a"
plt.bar(unique, counts)
plt.title('Class Distribution')
plt.xlabel('Class Id')
plt.ylabel('Frequency')
# + colab={"base_uri": "https://localhost:8080/"} id="Znoqn7aRncQW" outputId="8dfa40a1-bb16-40b2-c53d-da65ecc8ee2d"
wh = df[["norm_bbox_w", "norm_bbox_h"]]
num_clusters = [3, 4, 5, 6]
anchors_ls = []
for i, num_cluster in enumerate(num_clusters):
kmeans = KMeans(n_clusters=num_cluster, init='k-means++', max_iter=300, n_init=10, random_state=0)
pred = kmeans.fit_predict(wh)
anchors = kmeans.cluster_centers_
print("Anchors: ", anchors)
anchors_ls.append(anchors)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="YqBCU0Woov_M" outputId="d37f54b0-6fd5-4cb9-cffc-affc3e9c40a4"
fig, axes = plt.subplots(len(num_clusters), 1, figsize = (5, 20))
for i, num_cluster in enumerate(num_clusters):
ax = axes[i]
ax.set_title(f"{num_cluster} clusters")
anchors = anchors_ls[i]
for anchor in anchors:
ax.add_patch(matplotlib.patches.Rectangle(
(-anchor[0]/2 + 0.5, -anchor[1]/2 + 0.5),anchor[0], anchor[1],
fill=False))
plt.show()
# + id="lZNwB2Y1r3jw"
| sess10/A10_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
# default_exp models.widedeep
# -
# # WideDeep
# > A pytorch implementation of wide&deep model.
#
# Wide and Deep Learning Model, proposed by Google, 2016, is a DNN-Linear mixed model, which combines the strength of memorization and generalization. It's useful for generic large-scale regression and classification problems with sparse input features (e.g., categorical features with a large number of possible feature values). It has been used for Google App Store for their app recommendation.
#
# 
#
# To understand the concept of deep & wide recommendations, it’s best to think of it as two separate, but collaborating, engines. The wide model, often referred to in the literature as the linear model, memorizes users and their past product choices. Its inputs may consist simply of a user identifier and a product identifier, though other attributes relevant to the pattern (such as time of day) may also be incorporated.
#
# 
#
# The deep portion of the model, so named as it is a deep neural network, examines the generalizable attributes of a user and their product choices. From these, the model learns the broader characteristics that tend to favor users’ product selections.
#
# Together, the wide and deep submodels are trained on historical product selections by individual users to predict future product selections. The end result is a single model capable of calculating the probability with which a user will purchase a given item, given both memorized past choices and generalizations about a user’s preferences. These probabilities form the basis for user-specific product rankings, which can be used for making recommendations.
#
# The goal with wide and deep recommenders is to provide the same level of customer intimacy that, for example, our favorite barista does. This model uses explicit and implicit feedback to expand the considerations set for customers. Wide and deep recommenders go beyond simple weighted averaging of customer feedback found in some collaborative filters to balance what is understood about the individual with what is known about similar customers. If done properly, the recommendations make the customer feel understood and this should translate into greater value for both the customer and the business.
#
# The intuitive logic of the wide-and-deep recommender belies the complexity of its actual construction. Inputs must be defined separately for each of the wide and deep portions of the model and each must be trained in a coordinated manner to arrive at a single output, but tuned using optimizers specific to the nature of each submodel. Thankfully, the **[Tensorflow DNNLinearCombinedClassifier estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNLinearCombinedClassifier)** provides a pre-packaged architecture, greatly simplifying the assembly of the overall model.
#hide
from nbdev.showdoc import *
from fastcore.nb_imports import *
from fastcore.test import *
# ## v1
# +
#export
import torch
from torch import nn
from recohut.models.layers.embedding import EmbeddingLayer
from recohut.models.layers.common import MLP_Layer, LR_Layer
from recohut.models.bases.ctr import CTRModel
# -
#export
class WideDeep(CTRModel):
def __init__(self,
feature_map,
model_id="WideDeep",
task="binary_classification",
learning_rate=1e-3,
embedding_initializer="torch.nn.init.normal_(std=1e-4)",
embedding_dim=10,
hidden_units=[64, 64, 64],
hidden_activations="ReLU",
net_dropout=0,
batch_norm=False,
**kwargs):
super(WideDeep, self).__init__(feature_map,
model_id=model_id,
**kwargs)
self.embedding_layer = EmbeddingLayer(feature_map, embedding_dim)
self.lr_layer = LR_Layer(feature_map, output_activation=None, use_bias=False)
self.dnn = MLP_Layer(input_dim=embedding_dim * feature_map.num_fields,
output_dim=1,
hidden_units=hidden_units,
hidden_activations=hidden_activations,
output_activation=None,
dropout_rates=net_dropout,
batch_norm=batch_norm,
use_bias=True)
self.output_activation = self.get_final_activation(task)
self.init_weights(embedding_initializer=embedding_initializer)
def forward(self, inputs):
feature_emb = self.embedding_layer(inputs)
y_pred = self.lr_layer(inputs)
y_pred += self.dnn(feature_emb.flatten(start_dim=1))
if self.output_activation is not None:
y_pred = self.output_activation(y_pred)
return y_pred
# Example
params = {'model_id': 'WideDeep',
'data_dir': '/content/data',
'model_root': './checkpoints/',
'learning_rate': 1e-3,
'optimizer': 'adamw',
'task': 'binary_classification',
'loss': 'binary_crossentropy',
'metrics': ['logloss', 'AUC'],
'embedding_dim': 10,
'hidden_units': [300, 300, 300],
'hidden_activations': 'relu',
'net_regularizer': 0,
'embedding_regularizer': 0,
'batch_norm': False,
'net_dropout': 0,
'batch_size': 64,
'epochs': 3,
'shuffle': True,
'seed': 2019,
'use_hdf5': True,
'workers': 1,
'verbose': 0}
model = WideDeep(ds.dataset.feature_map, **params)
pl_trainer(model, ds, max_epochs=5)
# ## v2
# +
#export
import torch
from recohut.models.layers.common import FeaturesEmbedding, FeaturesLinear, MultiLayerPerceptron
# -
#export
class WideDeep_v2(torch.nn.Module):
"""
A pytorch implementation of wide and deep learning.
Reference:
HT Cheng, et al. Wide & Deep Learning for Recommender Systems, 2016.
"""
def __init__(self, field_dims, embed_dim, mlp_dims, dropout):
super().__init__()
self.linear = FeaturesLinear(field_dims)
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x)
x = self.linear(x) + self.mlp(embed_x.view(-1, self.embed_output_dim))
return torch.sigmoid(x.squeeze(1))
# > **References:-**
# - HT Cheng, et al. Wide & Deep Learning for Recommender Systems, 2016. https://arxiv.org/abs/1606.07792.
# - https://github.com/rixwew/pytorch-fm/blob/master/torchfm/model/wd.py
#hide
# %reload_ext watermark
# %watermark -a "Sparsh A." -m -iv -u -t -d -p recohut
| nbs/models/models.widedeep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Meraki Python SDK Demo: Uplink Preference Backup
#
# *This notebook demonstrates using the Meraki Python SDK to back up Internet (WAN) and VPN traffic uplink preferences, as well as custom performance classes, to a single Excel file. If you have hundreds of WAN/VPN uplink preferences, they can be a challenge to manipulate. This demo seeks to prove how using the Meraki API and Python SDK can substantially streamline such complex deployments.*
#
# If an admin has any Internet traffic or VPN traffic uplink preferences, or custom performance classes, this tool will download them to an Excel file. This is a more advanced demo, intended for intermediate to advanced Python programmers, but has been documented thoroughly with the intention that even a determined Python beginner can understand the concepts involved.
#
# ---
#
# >NB: Throughout this notebook, we will print values for demonstration purposes. In a production Python script, the coder would likely remove these print statements to clean up the console output.
# In this first cell, we import the required `meraki` and `os` modules, and open the Dashboard API connection using the SDK. We also import `openpyxl` for working with Excel files, and `netaddr` for working with IP addresses.
# + tags=[]
# Install the relevant modules. If you are using a local editor (e.g. VS Code, rather than Colab) you can run these commands, without the preceding %, via a terminal. NB: Run `pip install meraki==` to find the latest version of the Meraki SDK.
# %pip install meraki
# %pip install openpyxl
# If you are using Google Colab, please ensure you have set up your environment variables as linked above, then delete the two lines of ''' to activate the following code:
'''
%pip install colab-env -qU
import colab_env
'''
# The Meraki SDK
import meraki
# The built-in OS module, to read environment variables
import os
# The openpyxl module, to manipulate Excel files
import openpyxl
# The datetime module, to generate timestamps
import datetime
# We're also going to import Python's built-in JSON module, but only to make the console output pretty. In production, you wouldn't need any of the printing calls at all, nor this import!
import json
# Setting API key this way, and storing it in the env variables, lets us keep the sensitive API key out of the script itself
# The meraki.DashboardAPI() method does not require explicitly passing this value; it will check the environment for a variable
# called 'MERAKI_DASHBOARD_API_KEY' on its own. In this case, API_KEY is shown simply as an reference to where that information is
# stored.
API_KEY = os.getenv('MERAKI_DASHBOARD_API_KEY')
# Initialize the Dashboard connection.
dashboard = meraki.DashboardAPI()
# We'll also create a few reusable strings for later interactivity.
CONFIRM_STRING = 'OK, are you sure you want to do this? This script does not have an "undo" feature.'
CANCEL_STRING = 'OK. Operation canceled.'
WORKING_STRING = 'Working...'
COMPLETE_STRING = 'Operation complete.'
NETWORK_SELECTED_STRING = 'Network selected.'
# Some of the parameters we'll work with are optional. This string defines what value will be put into a cell corresponding with a parameter that is not set on that rule.
NOT_APPLICABLE_STRING = 'N/A'
# -
# Let's make a basic pretty print formatter, `printj()`. It will make reading the JSON later a lot easier, but won't be necessary in production scripts.
def printj(ugly_json_object):
# The json.dumps() method converts a JSON object into human-friendly formatted text
pretty_json_string = json.dumps(ugly_json_object, indent = 2, sort_keys = False)
return print(pretty_json_string)
# ## Introducing a Python class
#
# To streamline user interaction in a re-usable way, we'll create a class called UserChoice. Think of classes like a superset of functions, where you can store related functions and variables. Later, we'll create an instance of this class to prompt the user for input, and validate that input.
#
# It may look complex, but it will streamline our code later, and is a great example of code-reuse in Python. For more information on classes, [click here](https://docs.python.org/3/tutorial/classes.html).
class UserChoice:
'A re-usable CLI option prompt.'
def __init__(self, options_list=[], subject_of_choice='available options', single_option_noun='option', id_parameter='id', name_parameter='name', action_verb='choose', no_valid_options_message='no valid options'):
# options_list is a list of dictionaries containing attributes id_parameter and name_parameter
self.options_list = options_list
# subject_of_choice is a string that names the subject of the user's choice. It is typically a plural noun.
self.subject_of_choice = subject_of_choice
# single_option_noun is a string that is a singular noun corresponding to the subject_of_choice
self.single_option_noun = single_option_noun
# id_parameter is a string that represents the name of the sub-parameter that serves as the ID value for the option in options_list. It should be a unique value for usability.
self.id_parameter = id_parameter
# name_paraemter is a string that represents the name of the sub-parameter that serves as the name value for the option in options_list. It does not need to be unique.
self.name_parameter = name_parameter
# action_verb is a string that represents the verb of the user's action. For example, to "choose"
self.action_verb = action_verb
# no_valid_options_message is a string that represents an error message if options_list is empty
self.no_valid_options_message = no_valid_options_message
# Confirm there are options in the list
if len(self.options_list):
print(f'We found {len(self.options_list)} {self.subject_of_choice}:')
# Label each option and show the user their choices.
option_index = 0
for option in self.options_list:
print(f"{option_index}. {option[self.id_parameter]} with name {option[self.name_parameter]}")
option_index+=1
print(f'Which {self.single_option_noun} would you like to {self.action_verb}?')
self.active_option = int(input(f'Choose 0-{option_index-1}:'))
# Ask until the user provides valid input.
while self.active_option not in list(range(option_index)):
print(f'{self.active_option} is not a valid choice. Which {self.single_option_noun} would you like to {self.action_verb}?')
self.active_option = int(input(f'Choose 0-{option_index-1}:'))
print(f'Your {self.single_option_noun} is {self.options_list[self.active_option][self.name_parameter]}.')
self.id = self.options_list[self.active_option][self.id_parameter]
self.name = self.options_list[self.active_option][self.name_parameter]
# ## Pulling organization and network IDs
#
# Most API calls require passing values for the organization ID and/or the network ID. In the below cell, we fetch a list of the organizations the API key can access, then pick the first org in the list, and the first network in that organization, to use for later operations. You could re-use this code presuming your API key only has access to a single organization, and that organization only contains a single network. Otherwise, you would want to review the organizations object declared and printed here to review its contents. As a side exercise, perhaps you could use the class that we defined above, `UserChoice`, to let the user decide which organization to use!
# + tags=[]
# Let's make it easier to call this data later
# getOrganizations will return all orgs to which the supplied API key has access
organizations = dashboard.organizations.getOrganizations()
print('Organizations:')
printj(organizations)
# This example presumes we want to use the first organization as the scope for later operations.
firstOrganizationId = organizations[0]['id']
firstOrganizationName = organizations[0]['name']
# Print a blank line for legibility before showing the firstOrganizationId
print('')
print(f'The firstOrganizationId is {firstOrganizationId}, and its name is {firstOrganizationName}.')
# -
# Let's see what networks are in the chosen organization.
# + tags=[]
networks = dashboard.organizations.getOrganizationNetworks(organizationId=firstOrganizationId)
print('Networks:')
printj(networks)
# -
# ## Identifying networks with MX appliances
#
# Now that we've got the organization and network values figured out, we can get to the task at hand:
#
# > Download any existing uplink selection preferences.
# > Optionally, upload a replacement list of preferences.
#
# We can only run this on networks that have appliance devices, so we have a `for` loop that checks each entry in the `networks` list. If the network's `productTypes` value contains `appliance`, then we'll ask the user to pick one, then pull the uplink selection rules from it.
# + tags=[]
# Create an empty list where we can store all of the organization's networks that have appliances
networks_with_appliances = []
# Let's fill up that list
for network in networks:
# We only want to examine networks that might contain appliances
if 'appliance' in network['productTypes']:
# Add the network to networks_with_appliances
networks_with_appliances.append(network)
NO_VALID_OPTIONS_MESSAGE = 'There are no networks with appliances in this organization. Please supply an API token that has access to an organization with an appliance in one of its networks.'
# -
# ## Prompt the user to choose a network
#
# Now let's ask the user which network they'd like to use. Remember that `UserChoice` class we created earlier? We'll call that and supply parameters defining what the user can choose. Notice how, having defined the class earlier, we can re-use it with only a single declaration.
# + tags=[]
# If any are found, let the user choose a network. Otherwise, let the user know that none were found. The logic for this class is defined in a cell above.
network_choice = UserChoice(
options_list=networks_with_appliances,
subject_of_choice='networks with appliances',
single_option_noun='network',
no_valid_options_message=NO_VALID_OPTIONS_MESSAGE
)
# -
# ## Pulling uplink preferences for the network
#
# Let's pull the uplink preferences via the API using the SDK's `getNetworkApplianceTrafficShapingUplinkSelection` and `getNetworkApplianceTrafficShapingCustomPerformanceClasses` methods. The associated endpoints will return all of the relevant information for the chosen network. We'll create an Excel file of it later.
#
# Let's define this operation as a function so we can re-use it later.
# + tags=[]
def getNetworkClassesAndPrefs(*, networkId):
# Get the uplink preferences (WAN and VPN)
uplink_prefs = dashboard.appliance.getNetworkApplianceTrafficShapingUplinkSelection(networkId=networkId)
# Get the custom performance classes
custom_performance_classes = dashboard.appliance.getNetworkApplianceTrafficShapingCustomPerformanceClasses(networkId=networkId)
# Create a dict network_classes_and_prefs with both that we can return and call later
network_classes_and_prefs = {
'uplinkPrefs': uplink_prefs,
'customPerformanceClasses': custom_performance_classes
}
return(network_classes_and_prefs)
current_classes_and_prefs = getNetworkClassesAndPrefs(networkId = network_choice.id)
# -
# The above returns all uplink preferences (e.g. Internet traffic and VPN traffic). We can review the output of each individually if we'd like to see the raw data:
# + tags=[]
# We can review the wanTrafficUplinkPreferences in plain text.
printj(current_classes_and_prefs['uplinkPrefs']['wanTrafficUplinkPreferences'])
# + tags=[]
# We can also review the vpnTrafficUplinkPreferences in plain text.
printj(current_classes_and_prefs['uplinkPrefs']['vpnTrafficUplinkPreferences'])
# + tags=[]
# We can also review the customPerformanceClasses in plain text.
printj(current_classes_and_prefs['customPerformanceClasses'])
# -
# ## Let's make a backup
#
# Before we start modifying any of these rules or classes, it'd be good to make a backup, don't you think? We'll use `openpyxl` to create a new Excel workbook with two worksheets: one for Internet traffic preferences and the other for VPN traffic preferences. If you don't care to use Excel, you could instead create a CSV or potentially any other type of file you like, using an appropriate Python module. Consider a self-paced excercise where you save these values into some other file format.
#
# We'll start with the creation of the workbook. We'll make this a function for easy calling later.
def create_workbook():
# Create a workbook with the appropriate column headers and/or worksheets
# First we'll create the workbook, then we'll design the worksheets. Just like in Excel, by default, the workbook has one worksheet, titled 'Sheet'.
new_workbook = openpyxl.Workbook()
# We'll specify that the active worksheet is our wan_prefs_worksheet
wan_prefs_worksheet = new_workbook.active
# Let's rename the worksheet from 'Sheet' to something more descriptive
wan_prefs_worksheet.title = 'wanUplinkPreferences'
# Name the columns for the wan_prefs_worksheet. Think of this like a single-line CSV:
wan_title_row_headers = (
'Protocol',
'Source',
'Src port',
'Destination',
'Dst port',
'Preferred uplink'
)
# Add that title row to the worksheet
wan_prefs_worksheet.append(wan_title_row_headers)
# Let's make the title row bold for easier reading
for row in wan_prefs_worksheet.iter_rows():
for cell in row:
cell.font = openpyxl.styles.Font(bold=True)
# Now let's do the same for the VPN uplink preferences, and custom performance classes.
# First, create a separate worksheet for the VPN uplink preferences
vpn_prefs_worksheet = new_workbook.create_sheet(title='vpnUplinkPreferences')
custom_performance_classes_worksheet = new_workbook.create_sheet(title='customPerformanceClasses')
# Name the columns for the vpn_prefs_worksheet and custom_performance_classes_worksheet.
vpn_title_row_headers = (
'Type',
'Protocol or App ID',
'Source or App Name',
'Src port',
'Destination',
'Dst port',
'Preferred uplink',
'Failover criterion',
'Performance class type',
'Performance class name',
'Performance class ID'
)
classes_title_row_headers = (
'ID',
'Name',
'Max Latency',
'Max Jitter',
'Max Loss Percentage'
)
# Add the title rows to the appropriate worksheets
vpn_prefs_worksheet.append(vpn_title_row_headers)
custom_performance_classes_worksheet.append(classes_title_row_headers)
# Let's make those title rows bold, too
for row in vpn_prefs_worksheet.iter_rows():
for cell in row:
cell.font = openpyxl.styles.Font(bold=True)
for row in custom_performance_classes_worksheet.iter_rows():
for cell in row:
cell.font = openpyxl.styles.Font(bold=True)
return(new_workbook)
# ### Function to add custom performance classes to a workbook
#
# VPN uplink prefs might reference performance classes. Accordingly, we need to back those up as well.
def add_custom_performance_classes_to_workbook(workbook):
# We'll specify that the active worksheet is our custom_performance_classes worksheet
custom_performance_classes_worksheet = workbook['customPerformanceClasses']
# We'll also count the number of classes to help the user know that it's working.
performance_class_count = 0
# Let's add all the vpnTrafficUplinkPreferences to the VPN worksheet
for performance_class in current_classes_and_prefs['customPerformanceClasses']:
performance_class_id = performance_class['customPerformanceClassId']
performance_class_name = performance_class['name']
performance_class_max_latency = performance_class['maxLatency']
performance_class_max_jitter = performance_class['maxJitter']
performance_class_max_loss_percentage = performance_class['maxLossPercentage']
# We assemble the parameters into a tuple that will represent a row.
performance_class_row = (
performance_class_id,
performance_class_name,
performance_class_max_latency,
performance_class_max_jitter,
performance_class_max_loss_percentage
)
# We then add that row to the worksheet.
custom_performance_classes_worksheet.append(performance_class_row)
# increase the rule_count
performance_class_count += 1
print(f'Added {performance_class_count} performance classes to customPerformanceClasses.')
return(workbook)
# ### Function to add WAN preferences to a workbook
#
# Transposing the settings into a two-dimensional table doesn't require anything too fancy. We simply iterate through the rules in the `uplinkPrefs['wanTrafficUplinkPreferences']` list, and pull out the relevant information for each key-value pair. We'll also make this a function so we can call it later.
# + tags=[]
def add_wan_prefs_to_workbook(workbook):
# We'll specify that the active worksheet is our wan_prefs_worksheet
wan_prefs_worksheet = workbook['wanUplinkPreferences']
# We'll also count the number of rules to help the user know that it's working.
rule_count = 0
# Let's add all the wanTrafficUplinkPreferences
for rule in current_classes_and_prefs['uplinkPrefs']['wanTrafficUplinkPreferences']:
rule_preferred_uplink = rule['preferredUplink']
rule_protocol = rule['trafficFilters'][0]['value']['protocol']
rule_source = rule['trafficFilters'][0]['value']['source']['cidr']
# An 'any' value in the protocol removes the need for either source or destination port numbers to be defined, so the API doesn't specify port numbers in the output if 'any' is the protocol. However, we don't want to leave those cells blank, so we will fill them in accordingly.
if rule_protocol == 'any':
rule_src_port = 'any'
rule_dst_port = 'any'
else:
rule_src_port = rule['trafficFilters'][0]['value']['source']['port']
rule_dst_port = rule['trafficFilters'][0]['value']['destination']['port']
rule_destination = rule['trafficFilters'][0]['value']['destination']['cidr']
# We assemble the parameters into a tuple that will represent a row.
rule_row = (
rule_protocol,
rule_source,
rule_src_port,
rule_destination,
rule_dst_port,
rule_preferred_uplink
)
# We then add that row to the worksheet.
wan_prefs_worksheet.append(rule_row)
# increase the rule_count
rule_count += 1
print(f'Added {rule_count} rules to wanUplinkPreferences.')
return(workbook)
# -
# ### Function to add VPN preferences to a workbook
#
# We'll do almost the exact same for the VPN traffic uplink preferences, but since there are more parameters available here, we employ a bit more `if`/`else` logic to handle the varied key structures.
def add_vpn_prefs_to_workbook(workbook):
# We'll specify that the active worksheet is our wan_prefs_worksheet
vpn_prefs_worksheet = workbook['vpnUplinkPreferences']
# We'll also count the number of rules to help the user know that it's working.
rule_count = 0
# Let's add all the vpnTrafficUplinkPreferences to the VPN worksheet
for rule in current_classes_and_prefs['uplinkPrefs']['vpnTrafficUplinkPreferences']:
rule_preferred_uplink = rule['preferredUplink']
rule_type = rule['trafficFilters'][0]['type']
# Application rules have different parameters. This checks, and assigns values accordingly.
if 'application' in rule_type:
rule_protocol = rule['trafficFilters'][0]['value']['id']
rule_source = rule['trafficFilters'][0]['value']['name']
rule_src_port = NOT_APPLICABLE_STRING
rule_destination = NOT_APPLICABLE_STRING
rule_dst_port = NOT_APPLICABLE_STRING
else:
rule_protocol = rule['trafficFilters'][0]['value']['protocol']
rule_source = rule['trafficFilters'][0]['value']['source']['cidr']
rule_destination = rule['trafficFilters'][0]['value']['destination']['cidr']
# An 'any' or 'icmp' value in the protocol removes the need for either source or destination port numbers to be defined, so the API doesn't specify port numbers in the output if 'any' or 'icmp' is the protocol. However, we don't want to leave those cells blank, so we will fill them in accordingly.
if rule_protocol == 'any':
rule_src_port = 'any'
rule_dst_port = 'any'
elif rule_protocol == 'icmp':
rule_src_port = NOT_APPLICABLE_STRING
rule_dst_port = NOT_APPLICABLE_STRING
else:
rule_src_port = rule['trafficFilters'][0]['value']['source']['port']
rule_dst_port = rule['trafficFilters'][0]['value']['destination']['port']
# A failover criterion and performance class are optional parameters, so we cannot assume they are there. We'll first check to see if they exist before pulling their values.
# Check if the rule has failOverCriterion set
if rule.get('failOverCriterion', False):
rule_failover_criterion = rule['failOverCriterion']
else: # No failOverCriterion set
rule_failover_criterion = NOT_APPLICABLE_STRING
# Check if the rule has performanceClass set
if rule.get('performanceClass', False):
rule_performance_class_type = rule['performanceClass']['type']
# If the performance class is set, and is 'builtin', then we use 'builtinPerformanceClassName'. If it's 'custom', then we use 'customPerformanceClassId' to identify it.
if rule_performance_class_type == 'builtin':
rule_performance_class_id = NOT_APPLICABLE_STRING
rule_performance_class_name = rule['performanceClass']['builtinPerformanceClassName']
else:
rule_performance_class_id = rule['performanceClass']['customPerformanceClassId']
# search current_classes_and_prefs['customPerformanceClasses'] for the class that has that ID, then pull the corresponding name
for performance_class in current_classes_and_prefs['customPerformanceClasses']:
if rule_performance_class_id == performance_class['customPerformanceClassId']:
rule_performance_class_name = performance_class['name']
# Else, there's no performanceClass set, so we'll set these values accordingly.
else:
rule_performance_class_type = NOT_APPLICABLE_STRING
rule_performance_class_name = NOT_APPLICABLE_STRING
rule_performance_class_id = NOT_APPLICABLE_STRING
# We assemble the parameters into a tuple that will represent a row.
rule_row = (
rule_type,
rule_protocol,
rule_source,
rule_src_port,
rule_destination,
rule_dst_port,
rule_preferred_uplink,
rule_failover_criterion,
rule_performance_class_type,
rule_performance_class_name,
rule_performance_class_id
)
# We then add that row to the worksheet.
vpn_prefs_worksheet.append(rule_row)
# increase the rule_count
rule_count += 1
print(f'Added {rule_count} rules to wanUplinkPreferences.')
return(workbook)
# ### Function to save a workbook
#
# This function takes a single workbook as an argument, and saves it.
# Function that saves a workbook
def save_workbook(workbook):
# Finally, we save the worksheet.
# Let's give it a name with a date/time stamp
downloaded_rules_workbook_filename = f'downloaded_rules_workbook_{datetime.datetime.now()}.xlsx'.replace(':','')
workbook.save(downloaded_rules_workbook_filename)
print(f'Saved {downloaded_rules_workbook_filename}.')
# ### Build and save the workbook
#
# Now that we've defined the functions that will build the workbook object in Python and save it to a file, we need to run them.
# + tags=[]
uplink_prefs_workbook = create_workbook()
add_custom_performance_classes_to_workbook(uplink_prefs_workbook)
add_wan_prefs_to_workbook(uplink_prefs_workbook)
add_vpn_prefs_to_workbook(uplink_prefs_workbook)
save_workbook(uplink_prefs_workbook)
# -
# # Final thoughts
#
# Hopefully this was a useful deep dive into Python programming and interacting with the Meraki SDK and Excel workbooks. We tackled a problem that is tough to solve in the Dashboard GUI and showed how it can be done very quickly via API and the Python SDK.
#
# But what if we want to RESTORE that backup? Well, that's the next challenge! [Return to the notebooks folder](https://github.com/meraki/dashboard-api-python/tree/master/notebooks) for an example notebook that can restore such a backup to the Dashboard.
#
# Here we used Excel workbooks, but you can imagine that there are all types of data structures that might be used instead of Excel workbooks, e.g. CSVs, plain text, YAML, XML, LibreOffice files or others, and with the right code you can use those instead of Excel.
#
# ## Further learning
#
# [Meraki Interactive API Docs](https://developer.cisco.com/meraki/api-v1/#!overview): The official (and interactive!) Meraki API and SDK documentation repository on DevNet.
| notebooks/merakiUplinkPreferenceBackup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# https://www.tensorflow.org/tutorials/text/word_embeddings
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
# +
vocab_size = 1000
embedding_size = 5
embedding_layer = layers.Embedding(vocab_size, embedding_size)
# -
result = embedding_layer(tf.constant([1, 2, 3]))
result.numpy()
result.shape # (samples, sequence_length, embedding_size)
# ### Sideroad - loading raw text into a word vocabulary
FILE_PATHS = ["b:/!DATASETS/oral2013/transcripts_debug/labels.txt",
"b:/!DATASETS/PDTSC/transcripts_debug/labels.txt"]
ENCODING = "windows-1250"
def labeler(example, index):
return example, tf.cast(index, tf.int64)
labeled_data_sets = []
for i, file_path in enumerate(FILE_PATHS):
lines_dataset = tf.data.TextLineDataset(file_path)
labeled_dataset = lines_dataset.map(lambda ex: labeler(ex, i))
labeled_data_sets.append(labeled_dataset)
for ex, lab in labeled_data_sets[0].take(5):
print(ex.numpy().decode(ENCODING))
# +
BUFFER_SIZE = 50000
BATCH_SIZE = 64
TAKE_SIZE = 5000
all_labeled_data = labeled_data_sets[0]
for labeled_dataset in labeled_data_sets[1:]:
all_labeled_data = all_labeled_data.concatenate(labeled_dataset)
all_labeled_data = all_labeled_data.shuffle(
BUFFER_SIZE, reshuffle_each_iteration=False)
# -
for ex, lab in all_labeled_data.take(5):
print(ex.numpy().decode(ENCODING))
# ## Build a vocabulary
# +
tokenizer = tfds.features.text.Tokenizer()
vocabulary_set = set()
for text_tensor, _ in all_labeled_data:
some_tokens = tokenizer.tokenize(text_tensor.numpy().decode(ENCODING))
vocabulary_set.update(some_tokens)
vocab_size = len(vocabulary_set)
vocab_size
# -
# ## Encode examples
encoder = tfds.features.text.TokenTextEncoder(vocabulary_set)
example_text = next(iter(all_labeled_data))[0].numpy().decode(ENCODING)
print(example_text)
encoded_example = encoder.encode(example_text)
print(encoded_example)
# ## Encode the dataset
# +
def encode(text_tensor, label):
encoded_text = encoder.encode(text_tensor.numpy().decode(ENCODING))
return encoded_text, label
def encode_map_fn(text, label):
# py_func doesn't set the shape of the returned tensors.
encoded_text, label = tf.py_function(encode,
inp=[text, label],
Tout=(tf.int64, tf.int64))
encoded_text.set_shape([None])
label.set_shape([])
return encoded_text, label
# -
all_encoded_data = all_labeled_data.map(encode_map_fn)
for ex, lab in all_encoded_data.take(1):
print(ex, lab)
| dev/embedding-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Understanding Types and Tags
# Using Woodwork effectively requires a good understanding of physical types, logical types, and semantic tags, all concepts that are core to Woodwork. This guide provides a detailed overview of types and tags, as well as how to work with them.
# ## Definitions of Types and Tags
#
# Woodwork has been designed to allow users to easily specify additional typing information for a DataFrame while providing the ability to interface with the data based on the typing information. Because a single DataFrame might store various types of data like numbers, text, or dates in different columns, the additional information is defined on a per-column basis.
#
# There are 3 main ways that Woodwork stores additional information about user data:
#
# * Physical Type: defines how the data is stored on disk or in memory.
# * Logical Type: defines how the data should be parsed or interpreted.
# * Semantic Tag(s): provides additional data about the meaning of the data or how it should be used.
# ### Physical Types
# Physical types define how the data is stored on disk or in memory. You might also see the physical type for a column referred to as the column’s `dtype`.
#
# For example, typical Pandas dtypes often used include `object`, `int64`, `float64` and `datetime64[ns]`, though there are many more. In Woodwork, there are 10 different physical types that are used, each corresponding to a Pandas dtype. When Woodwork is initialized on a DataFrame, the dtype of the underlying data is converted to one of these values, if it isn't already one of these types:
#
# * `bool`
# * `boolean`
# * `category`
# * `datetime64[ns]`
# * `float64`
# * `int64`
# * `Int64`
# * `object`
# * `string`
# * `timedelta64[ns]`
#
# The physical type conversion is done based on the `LogicalType` that has been specified or inferred for a given column.
#
# When using Woodwork with a Koalas DataFrame, the physical types used may be different than those listed above. For more information, refer to the guide [Using Woodwork with Dask and Koalas DataFrames](https://woodwork.alteryx.com/en/stable/guides/using_woodwork_with_dask_and_koalas.html#Notes-on-Koalas-Dtype-Conversions).
# ### Logical Types
#
# Logical types define how data should be interpreted or parsed. Logical types provide an additional level of detail beyond the physical type. Some columns might share the same physical type, but might have different parsing requirements depending on the information that is stored in the column.
#
# For example, email addresses and phone numbers would typically both be stored in a data column with a physical type of `string`. However, when reading and validating these two types of information, different rules apply. For email addresses, the presence of the `@` symbol is important. For phone numbers, you might want to confirm that only a certain number of digits are present, and special characters might be restricted to `+`, `-`, `(` or `)`. In this particular example Woodwork defines two different logical types to separate these parsing needs: `EmailAddress` and `PhoneNumber`.
#
# There are many different logical types defined within Woodwork. To get a complete list of all the available logical types, you can use the `list_logical_types` function.
from woodwork import list_logical_types
list_logical_types()
# In the table, notice that each logical type has a specific `physical_type` value associated with it. Any time a logical type is set for a column, the physical type of the underlying data is converted to the type shown in the `physical_type` column. There is only one physical type associated with each logical type.
# ### Semantic Tags
#
# Semantic tags provide more context about the meaning of a data column. This could directly affect how the information contained in the column is interpreted. Unlike physical types and logical types, semantic tags are much less restrictive. A column might contain many semantic tags or none at all. Regardless, when assigning semantic tags, users should take care to not assign tags that have conflicting meanings.
#
# As an example of how semantic tags can be useful, consider a dataset with 2 date columns: a signup date and a user birth date. Both of these columns have the same physical type (`datetime64[ns]`), and both have the same logical type (`Datetime`). However, semantic tags can be used to differentiate these columns. For example, you might want to add the `date_of_birth` semantic tag to the user birth date column to indicate this column has special meaning and could be used to compute a user’s age. Computing an age from the signup date column would not make sense, so the semantic tag can be used to differentiate between what the dates in these columns mean.
#
# #### Standard Semantic Tags
# As you can see from the table generated with the `list_logical_types` function above, Woodwork has some standard tags that are applied to certain columns by default. Woodwork adds a standard set of semantic tags to columns with LogicalTypes that fall under certain predefined categories.
#
# The standard tags are as follows:
#
# * `'numeric'` - The tag applied to numeric Logical Types.
# * `Integer`
# * `IntegerNullable`
# * `Double`
#
# * `'category'` - The tag applied to Logical Types that represent categorical variables.
# * `Categorical`
# * `CountryCode`
# * `Ordinal`
# * `PostalCode`
# * `SubRegionCode`
#
# There are also 2 tags that get added to index columns. If no index columns have been specified, these tags are not present:
#
# * `'index'` - on the index column, when specified
# * `'time_index'` on the time index column, when specified
#
# The application of standard tags, excluding the `index` and `time_index` tags, which have special meaning, can be controlled by the user. This is discussed in more detail in the Working with Semantic Tags section. There are a few different semantic tags defined within Woodwork. To get a list of the standard, index, and time index tags, you can use the `list_semantic_tags` function.
from woodwork import list_semantic_tags
list_semantic_tags()
# ## Working with Logical Types
#
# When initializing Woodwork, users have the option to specify the logical types for all, some, or none of the columns in the underlying DataFrame. If logical types are defined for all of the columns, these logical types are used directly, provided the data is compatible with the specified logical type. You can't, for example, use a logical type of `Integer` on a column that contains text values that can't be converted to integers.
#
# If users don't supply any logical type information during initialization, Woodwork infers the logical types based on the physical type of the column and the information contained in the columns. If the user passes information for some of the columns, the logical types are inferred for any columns not specified.
#
# These scenarios are illustrated in this section. To start, create a simple DataFrame to use for this example.
# +
import pandas as pd
import woodwork as ww
df = pd.DataFrame({
'integers': [-2, 30, 20],
'bools': [True, False, True],
'names': ["<NAME>", "<NAME>", "<NAME>"]
})
df
# -
# Importing Woodwork creates a special namespace on the DataFrame, called `ww`, that can be used to initialize and modify Woodwork information for a DataFrame. Now that you've created the data to use for the example, you can initialize Woodwork on this DataFrame, assigning logical type values to each of the columns. Then view the types stored for each column by using the `DataFrame.ww.types` property.
# +
logical_types = {
'integers': 'Integer',
'bools': 'Boolean',
'names': 'PersonFullName'
}
df.ww.init(logical_types=logical_types)
df.ww.types
# -
# As you can see, the logical types that you specified have been assigned to each of the columns. Now assign only one logical type value, and let Woodwork infer the types for the other columns.
logical_types = {
'names': 'PersonFullName'
}
df.ww.init(logical_types=logical_types)
df.ww
# With that input, you get the same results. Woodwork used the `PersonFullName` logical type you assigned to the `names` column and then correctly inferred the logical types for the `integers` and `bools` columns.
#
# Next, look at what happens if we do not specify any logical types.
df.ww.init()
df.ww
# In this case, Woodwork correctly inferred type for the `integers` and `bools` columns, but failed to recognize the `names` column should have a logical type of `PersonFullName`. In situations like this, Woodwork provides users the ability to change the logical type.
#
# Update the logical type of the `names` column to be `PersonFullName`.
df.ww.set_types(logical_types={'names': 'PersonFullName'})
df.ww
# If you look carefully at the output, you can see that several things happened to the `names` column. First, the correct `PersonFullName` logical type has been applied. Second, the physical type of the column has changed from `category` to `string` to match the standard physical type for the `PersonFullName` logical type. Finally, the standard tag of `category` that was previously set for the `names` column has been removed because it no longer applies.
#
# When setting the LogicalType for a column, the type can be specified by passing a string representing the camel-case name of the LogicalType class as you have done in previous examples. Alternatively, you can pass the class directly instead of a string or the snake-case name of the string. All of these would be valid values to use for setting the PersonFullName Logical type: `PersonFullName`, `"PersonFullName"` or `"person_full_name"`.
#
# Note—in order to use the class name, first you have to import the class.
# ## Working with Semantic Tags
#
# Woodwork provides several methods for working with semantic types. You can add and remove specific tags, or you can reset the tags to their default values. In this section, you learn how to use those methods.
# ### Standard Tags
# As mentioned above, Woodwork applies default semantic tags to columns by default, based on the logical type that was specified or inferred. If this behavior is undesirable, it can be controlled by setting the parameter `use_standard_tags` to `False` when initializing Woodwork.
df.ww.init(use_standard_tags=False)
df.ww
# As can be seen in the output above, when initializing Woodwork with `use_standard_tags` set to `False`, all semantic tags are empty. The only exception to this is if the index or time index column were set. We discuss that in more detail later on.
#
# Create a new Woodwork DataFrame with the standard tags, and specify some additional user-defined semantic tags during creation.
semantic_tags = {
'bools': 'user_status',
'names': 'legal_name'
}
df.ww.init(semantic_tags=semantic_tags)
df.ww
# Woodwork has applied the tags we specified along with any standard tags to the columns in our DataFrame.
#
# After initializing Woodwork, you have changed your mind and decided you don't like the tag of `user_status` that you applied to the `bools` column. Now you want to remove it. You can do that with the `remove_semantic_tags` method.
df.ww.remove_semantic_tags({'bools':'user_status'})
df.ww
# The `user_status` tag has been removed.
#
# You can also add multiple tags to a column at once by passing in a list of tags, rather of a single tag. Similarly, multiple tags can be removed at once by passing a list of tags.
df.ww.add_semantic_tags({'bools':['tag1', 'tag2']})
df.ww
df.ww.remove_semantic_tags({'bools':['tag1', 'tag2']})
df.ww
# All tags can be reset to their default values by using the `reset_semantic_tags` methods. If `use_standard_tags` is `True`, the tags are reset to the standard tags. Otherwise, the tags are reset to be empty sets.
df.ww.reset_semantic_tags()
df.ww
# In this case, since you initialized Woodwork with the default behavior of using standard tags, calling `reset_semantic_tags` resulted in all of our semantic tags being reset to the standard tags for each column.
# ### Index and Time Index Tags
#
# When initializing Woodwork, you have the option to specify which column represents the index and which column represents the time index. If these columns are specified, semantic tags of `index` and `time_index` are applied to the specified columns. Behind the scenes, Woodwork is performing additional validation checks on the columns to make sure they are appropriate. For example, index columns must be unique, and time index columns must contain datetime values or numeric values.
#
# Because of the need for these validation checks, you can't set the `index` or `time_index` tags directly on a column. In order to designate a column as the index, the `set_index` method should be used. Similarly, in order to set the time index column, the `set_time_index` method should be used. Optionally, these can be specified when initializing Woodwork by using the `index` or `time_index` parameters.
#
# #### Setting the index
#
# Create a new sample DataFrame that contains columns that can be used as index and time index columns and initialize Woodwork.
# +
df = pd.DataFrame({
'index': [0, 1, 2],
'id': [1, 2, 3],
'times': pd.to_datetime(['2020-09-01', '2020-09-02', '2020-09-03']),
'numbers': [10, 20, 30]
})
df.ww.init()
df.ww
# -
# Without specifying an index or time index column during initialization, Woodwork has inferred that the `index` and `id` columns are integers and the numeric semantic tag has been applied. You can now set the index column with the `set_index` method.
df.ww.set_index('index')
df.ww
# Inspecting the types now reveals that the `index` semantic tag has been added to the `index` column, and the `numeric` standard tag has been removed. You can also check that the index has been set correctly by checking the value of the `DataFrame.ww.index` attribute.
df.ww.index
# If you want to change the index column to be the `id` column instead, you can do that with another call to `set_index`.
df.ww.set_index('id')
df.ww
# The `index` tag has been removed from the `index` column and added to the `id` column. The `numeric` standard tag that was originally present on the `index` column has been added back.
#
# #### Setting the time index
#
# Setting the time index works similarly to setting the index. You can now set the time index with the `set_time_index` method.
df.ww.set_time_index('times')
df.ww
# After calling `set_time_index`, the `time_index` semantic tag has been added to the semantic tags for `times` column.
#
# ## Validating Woodwork's Typing Information
#
# The logical types, physical types, and semantic tags described above make up a DataFrame's typing information, which will be referred to as its "schema". For Woodowork to be useful, the schema must be valid with respect to its DataFrame.
df.ww.schema
# The Woodwork schema shown above can be seen reflected in the DataFrame below. Every column present in the schema is present in the DataFrame, the dtypes all match the physical types defined by each column's LogicalType, and the Woodwork index column is both unique and matches the DataFrame's underlying index.
df
df.dtypes
# Woodwork defines the elements of a valid schema, and maintaining schema validity requires that the DataFrame follow Woodwork's type system. For this reason, it is not recommended to perform DataFrame operations directly on the DataFrame; instead, you should go through the `ww` namespace. Woodwork will attempt to retain a valid schema for any operations performed through the `ww` namespace. If a DataFrame operation called through the `ww` namespace invalidates the Woodwork schema defined for that DataFrame, the typing information will be removed.
#
# Therefore, when performing Woodwork operations, you can be sure that if the schema is present on `df.ww.schema` then the schema is valid for that DataFrame.
#
# ### Defining a Valid Schema
#
# Given a DataFrame and its Woodwork typing information, the schema will be considered valid if:
#
# - All of the columns present in the schema are present on the DataFrame and vice versa
# - The physical type used by each column's Logical Type matches the corresponding series' `dtype`
# - If an index is present, the index column is unique [pandas only]
# - If an index is present, the DataFrame's underlying index matches the index column exactly [pandas only]
#
# Calling `sort_values` on a DataFrame, for example, will not invalidate a DataFrame's schema, as none of the above properties get broken. In the example below, a new DataFrame is created with the columns sorted in descending order, and it has Woodwork initialized. Looking at the schema, you will see that it's exactly the same as the schema of the original DataFrame.
sorted_df = df.ww.sort_values(['numbers'], ascending=False)
sorted_df
sorted_df.ww
# Conversely, changing a column's dtype so that it does not match the corresponding physical type by calling `astype` on a DataFrame will invalidate the schema, removing it from the DataFrame. The resulting DataFrame will not have Woodwork initialized, and a warning will be raised explaining why the schema was invalidated.
astype_df = df.ww.astype({'numbers':'float64'})
astype_df
assert astype_df.ww.schema is None
# Woodwork provides two helper functions that will allow you to check if a schema is valid for a given dataframe. The `ww.is_schema_valid` function will return a boolean indicating whether or not the schema is valid for the dataframe.
#
# Check whether the schema from `df` is valid for the `sorted_df` created above.
ww.is_schema_valid(sorted_df, df.ww.schema)
# The function `ww.get_invalid_schema_message` can be used to obtain a string message indicating the reason for an invalid schema. If the schema is valid, this function will return `None`.
#
# Use the function to determine why the schema from `df` is invalid for the `astype_df` created above.
ww.is_schema_valid(astype_df, df.ww.schema)
ww.get_invalid_schema_message(astype_df, df.ww.schema)
| docs/source/guides/understanding_types_and_tags.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn import model_selection, metrics
from sklearn import tree
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
# +
train_file='D:\\My Personal Documents\\Learnings\\Data Science\\Data Sets\\AmericanExpress\\train.csv'
test_file='D:\\My Personal Documents\\Learnings\\Data Science\\Data Sets\\AmericanExpress\\test.csv'
user_logs='D:\\My Personal Documents\\Learnings\\Data Science\\Data Sets\\AmericanExpress\\historical_user_logs.csv'
train_result='D:/My Personal Documents/Learnings/Data Science/Data Sets/AmericanExpress/train_result.csv'
train=pd.read_csv(train_file)
test=pd.read_csv(test_file)
user_log=pd.read_csv(user_logs)
test_results = pd.read_csv(train_result)
# -
target = 'is_click'
IDcol = 'session_id'
is_click_count= len(train[train.is_click==1])
non_click_indices = train[train.is_click==0].index
random_indices = np.random.choice(non_click_indices,is_click_count*3, replace=False)
click_indices = train[train.is_click==1].index
under_sample_indices = np.concatenate([click_indices,random_indices])
under_sample = train.loc[under_sample_indices]
train=under_sample
user_log_view=user_log[user_log.action=='view']
user_log_interest=user_log[user_log.action=='interest']
user_log_interest=user_log_interest.groupby(['user_id','product']).action.count().reset_index()
user_log_interest.rename(columns={'action': 'interest'}, inplace=True)
user_log_interest.head()
user_log_view=user_log_view.groupby(['user_id','product']).action.count().reset_index()
user_log_view.rename(columns={'action': 'view'}, inplace=True)
user_log_view.head()
train.shape
# +
train['source']='train'
test['source']='test'
train=train.append(test)
train.info()
# -
train=pd.merge(train,user_log_interest,how='left', on=['user_id','product'])
train=pd.merge(train,user_log_view,how='left', on=['user_id','product'])
train.info()
# +
#t_click1=train[train.is_click==1]
#train=train.append(t_click1)
#train=train.append(t_click1)
# -
train.isnull().sum()
train.gender=train.gender.fillna('Unknown')
train.city_development_index= train.city_development_index.fillna(method='pad')
train.age_level= train.age_level.fillna(method='pad')
train.user_depth= train.user_depth.fillna(method='pad')
train.user_group_id= train.user_group_id.fillna(method='pad')
train.view=train.view.fillna(0)
train.interest=train.interest.fillna(0)
train=pd.get_dummies(train,columns=['gender','product'])
train.head()
train= train.drop(['DateTime','user_id','product_category_2','session_id'],axis=1)
train.head()
test=train[train.source=='test']
train=train[train.source=='train']
train.drop('source',axis=1,inplace=True)
test.drop('source',axis=1,inplace=True)
train_label=train.is_click
train=train[~(train.city_development_index.isna())]
train.isna().sum()
y_train=train['is_click']
x_train=train.drop(['is_click'],axis=1).values
x_test=test.drop(['is_click'],axis=1).values
#y_test=titanic_result.Survived
target = 'is_click'
IDcol = ['session_id']
predictors = [x for x in train.columns if x not in [target]+[IDcol]]
#predictors=['Item_MRP','Outlet_Type_Grocery Store','Outlet_Type_Supermarket Type3','Outlet_Age','Item_Visibility','Outlet_Type_Supermarket Type1','Item_Weight','Outlet_Type_Supermarket Type2']
from sklearn.model_selection import train_test_split
trainset, testset = train_test_split(train, test_size=0.2)
# +
def modelfit(alg, dtrain, dtest, predictors,useTrainCV=True, cv_folds=3, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(dtrain[predictors].values, label=dtrain[target].values)
xgtest = xgb.DMatrix(dtest[predictors].values)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
metrics='auc', early_stopping_rounds=early_stopping_rounds)
alg.set_params(n_estimators=cvresult.shape[0])
#Fit the algorithm on the data
alg.fit(dtrain[predictors], dtrain[target],eval_metric='auc')
#Predict training set:
dtrain_predictions = alg.predict(dtrain[predictors])
dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1]
#Print model report:
print ("\nModel Report")
print ("Accuracy : %.4g" % metrics.accuracy_score(dtrain[target].values, dtrain_predictions))
print ("AUC Score (Train): %f" % metrics.roc_auc_score(dtrain[target], dtrain_predprob))
# Predict on testing data:
dtest['predprob'] = alg.predict_proba(dtest[predictors])[:,1]
# results = test_results.merge(dtest[['session_id','predprob']], on='session_id')
# print ('AUC Score (Test): %f' % metrics.roc_auc_score(results['is_click'], results['predprob']))
feat_imp = pd.Series(alg.get_booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
# -
predictors = [x for x in train.columns if x not in [target, IDcol]]
xgb1 = XGBClassifier(
learning_rate =0.01,
n_estimators=500,
max_depth=5,
min_child_weight=4,
reg_alpha=1,
gamma=0,
subsample=0.85,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb1, trainset, testset, predictors)
prediction=pd.Series(test_pred.tolist()).astype(int)
prediction.to_csv('D:/My Personal Documents/Learnings/Data Science/Data Sets/AmericanExpress/predictionuxgb.csv')
test.shape
| notebooks/American Express-UnderSampling-GXBOOST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pytorch38
# language: python
# name: pytorch38
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" tags=[]
import torch
import transformers
import datasets
print(f"Running on torch {torch.__version__}v, transformers {transformers.__version__}v, datasets {datasets.__version__}")
# + tags=[]
import numpy as np
from transformers import (AdamW, get_linear_schedule_with_warmup, logging,
BertConfig, BertTokenizer, BertForSequenceClassification)
from datasets import load_dataset
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import accuracy_score, f1_score
import os
import sys
import random
import warnings
from tqdm.notebook import tqdm
from IPython.display import clear_output
logging.set_verbosity_error()
warnings.filterwarnings('ignore')
SEED = 1618
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# + tags=[]
def set_seed(seed = 0):
np.random.seed(seed)
random_state = np.random.RandomState(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ['PYTHONHASHSEED'] = str(seed)
return random_state
random_state = set_seed(SEED)
# -
ds = load_dataset("health_fact", "regular")
clear_output()
np.unique(ds['train']['label'])
ds = (ds
.map(lambda x : {'label_updated': x['label'] + 1}, remove_columns=['label'])
.rename_column('label_updated', 'label'))
clear_output()
np.unique(ds['train']['label'])
cp = "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext"
tokenizer = BertTokenizer.from_pretrained(cp)
config = BertConfig.from_pretrained(cp)
config.update({'num_labels': 5})
model = BertForSequenceClassification.from_pretrained(cp, config=config)
model.to(DEVICE)
clear_output()
lb = LabelBinarizer()
lb = lb.fit(ds['train']['label'])
ds = (ds
.map(lambda x : {'label_list': lb.transform([x['label']])[0]}, remove_columns=['label'])
.rename_column('label_list', 'label'))
clear_output()
ds
# +
MAX_LENGTH = 120
def tokenize_and_encode(examples):
return tokenizer.batch_encode_plus(examples["claim"], truncation=True, padding='max_length', max_length=MAX_LENGTH)
cols = ds["train"].column_names
cols.remove("label")
ds_enc = ds.map(tokenize_and_encode, batched=True, remove_columns=cols, num_proc=14)
clear_output()
ds_enc
# -
ds_enc.set_format("torch")
ds_enc = (ds_enc
.map(lambda x : {"float_label": x["label"].to(torch.float)}, remove_columns=["label"])
.rename_column("float_label", "label"))
clear_output()
ds_enc['train'][0]
# +
LR = 2e-5
EPS = 1e-8
EPOCHS = 3
def evaluate(model, val_dataloader):
model.eval()
loss_val_total = 0
for batch in val_dataloader:
inputs = {
'attention_mask': batch['attention_mask'].to(DEVICE),
'input_ids': batch['input_ids'].to(DEVICE),
'token_type_ids': batch['token_type_ids'].to(DEVICE),
'labels': batch['label'].to(DEVICE),
}
with torch.no_grad():
output = model(**inputs)
loss = output.loss
loss_val_total += loss.item()
loss_val_avg = loss_val_total/len(val_dataloader)
return loss_val_avg
def train(model, train_dataloader, val_dataloader):
optimizer = AdamW(model.parameters(), lr = LR, eps = EPS)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=len(train_dataloader) * EPOCHS)
best_val_loss = 1
model.train()
for epoch in range(EPOCHS):
loss_train_total = 0
for batch in tqdm(train_dataloader):
model.zero_grad()
inputs = {
'attention_mask': batch['attention_mask'].to(DEVICE),
'input_ids': batch['input_ids'].to(DEVICE),
'token_type_ids': batch['token_type_ids'].to(DEVICE),
'labels': batch['label'].to(DEVICE),
}
output = model(**inputs)
loss = output.loss
loss_train_total += loss.item()
loss.backward()
optimizer.step()
scheduler.step()
loss_train_avg = loss_train_total / len(train_dataloader)
loss_val_avg = evaluate(model, val_dataloader)
print(f'epoch:{epoch+1}/{EPOCHS} train loss={loss_train_avg} val loss={loss_val_avg}')
if loss_val_avg < best_val_loss:
best_val_loss = loss_val_avg
return best_val_loss
# -
train_dataloader = torch.utils.data.DataLoader(ds_enc['train'], batch_size=32)
val_dataloader = torch.utils.data.DataLoader(ds_enc['validation'], batch_size=32)
train(model, train_dataloader, val_dataloader)
# +
model.eval()
predictions = []
test_dataloader = torch.utils.data.DataLoader(ds_enc['test'], batch_size=32)
for batch in test_dataloader:
inputs = {
'attention_mask': batch['attention_mask'].to(DEVICE),
'input_ids': batch['input_ids'].to(DEVICE),
'token_type_ids': batch['token_type_ids'].to(DEVICE),
}
with torch.no_grad():
output = model(**inputs)
batch_predictions = torch.argmax(output.logits, dim=1)
predictions.extend(batch_predictions.cpu().detach().numpy().ravel().tolist())
labels = lb.inverse_transform(ds_enc['test']['label'])
print(f"Accuracy of base model is {accuracy_score(y_true=labels, y_pred=predictions):.4f} and f-score is {f1_score(y_true=labels, y_pred=predictions, average='weighted'):.4f}")
# -
| notebooks/healthcare-claims-baseline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:snorkel_advanced]
# language: python
# name: conda-env-snorkel_advanced-py
# ---
# # Using Labels from Different Relation Types to Predict Compound Treats Disease Sentences
# This notebook is designed to predict the compound treats disease (CtD) relation. The first step in this process is to load our pre-labeled annotation matricies (train, dev, and test). These matriceis contain sentences as the rows and the label function output as the columns (features). The working hypothesis here is there are shared information (i.e. similar keywords, same kind of sentence structure) between different relations, which in turn should aid in predicting disease associates gene relations.
#
# After loading the matricies, the next step is to train a generative model that will estimate the likelihood of the positive class: $P(\hat{Y}=1 \mid \text{label functions})$. The generative model does this by estimating the parameter $\mu$. This parameter represents the probability of a label function given the true class: $ P(\text{label function={0,1,2}} \mid Y={1 (+) / 0 (-)})$. Once $\mu$ has been estimated, calculating the likelihood becomes: $P(\hat{Y}=1 \mid \text{label functions}) = (\prod_{i=1}^{N} P(\text{label function}_{i} = 1 \mid \text{Y} = 1)) * P(Y = 1)$
#
# Note: This process doesn't involve any sentence context, so the only information used here are categorical output.
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import glob
import sys
import os
sys.path.append(os.path.abspath('../../../modules'))
import pandas as pd
from sklearn.metrics import (
precision_recall_curve,
precision_recall_fscore_support,
confusion_matrix,
auc, roc_curve,
average_precision_score
)
from utils.notebook_utils.train_model_helper import get_model_performance, train_model_random_lfs, sample_lfs
from utils.notebook_utils.dataframe_helper import load_candidate_dataframes
import plotnine
import seaborn as sns
import logging
logging.basicConfig(filename='logs.log', level='INFO')
# -
label_destinations = {
'train':"../data/label_matricies/train_sparse_matrix.tsv.xz",
'dev':"../data/label_matricies/dev_sparse_matrix.tsv.xz",
'test':"../data/label_matricies/test_sparse_matrix.tsv.xz"
}
label_matricies = {
key:pd.read_csv(label_destinations[key], sep="\t").to_sparse()
for key in label_destinations
}
label_matricies['train'] = label_matricies['train'].fillna(-2).to_dense().replace({-1:0, -2:-1})
label_matricies['dev'] = label_matricies['dev'].fillna(-2).to_dense().replace({-1:0, -2:-1})
label_matricies['test'] = label_matricies['test'].fillna(-2).to_dense().replace({-1:0, -2:-1})
correct_L = label_matricies['train'].drop("candidate_id", axis=1).to_numpy()
correct_L_dev = label_matricies['dev'].drop("candidate_id", axis=1).to_numpy()
correct_L_test = label_matricies['test'].drop("candidate_id", axis=1).to_numpy()
spreadsheet_names = {
#'train': 'data/sentences/sentence_labels_train.xlsx',
'dev': '../data/sentences/sentence_labels_dev.xlsx',
'test': '../data/sentences/sentence_labels_test.xlsx'
}
# +
candidate_dfs = {
key:load_candidate_dataframes(spreadsheet_names[key], "curated_ctd")
for key in spreadsheet_names
}
for key in candidate_dfs:
print("Size of {} set: {}".format(key, candidate_dfs[key].shape[0]))
# -
baseline_index = list(range(0,3))
train_grid_results, dev_grid_results, test_grid_results, models = (
train_model_random_lfs(
[baseline_index], correct_L,
correct_L_dev, candidate_dfs['dev'].curated_ctd, correct_L_test,
pd.np.round(pd.np.linspace(0.01, 5, num=5), 2)
)
)
(
pd.DataFrame({key:train_grid_results[key][:,1] for key in train_grid_results})
.assign(candidate_id=label_matricies['train'].candidate_id.values)
.to_csv(f"results/CtD/marginals/baseline_sampled.tsv.xz", compression="xz", sep="\t", index=False)
)
dev_baseline_df = get_model_performance(candidate_dfs['dev'].curated_ctd, dev_grid_results, 0)
dev_baseline_df.head(2)
test_baseline_df = get_model_performance(candidate_dfs['test'].curated_ctd, test_grid_results, 0)
test_baseline_df.head(2)
# # Compound Treats Disease Sources Predicts Compound Treats Disease Sentences
# Here we are using label functions, designed to predict the Compound treats Disease relation, to predict Compound treats Disease sentences. To estimate the performance boost over the baseline model, we implement a label function sampling appoach. The sampling approach works as follows:
# 1. randomly sample X amount of label functions that are not within the database category
# 2. incorporate the sampled label functions with the database label functions
# 3. train the generative model on the combined resources
# 4. use the generative model to predict the tuning set and test set
# 5. Report performance in terms of AUROC and AUPR
# 6. repeat the above process 50 times for each sample size (1, 6, 11, 16, all).
#
# Given that these label functions are designed to predict the given relation, we expect that adding more label functions will increase in performance. This means that auroc when sampling 1 label function should be greater than the auroc of the baseline. This trend should continue when sampling 6, 11, 16 and then all of the label functions.
# +
ctd_start = 3
ctd_end = 25
#Spaced out number of sampels including total
size_of_samples = [1,6,11,16,ctd_end-ctd_start]
number_of_samples = 50
ctd_lf_range = range(ctd_start, ctd_end)
# -
sampled_lfs_dict = {
sample_size:(
sample_lfs(
list(ctd_lf_range),
len(list(ctd_lf_range)),
sample_size,
number_of_samples,
random_state=100
)
)
for sample_size in size_of_samples
}
dev_records = []
test_records = []
for num_lf in sampled_lfs_dict:
train_grid_results, dev_grid_results, test_grid_results, models = (
train_model_random_lfs(
[baseline_index + sample for sample in sampled_lfs_dict[num_lf]],
correct_L, correct_L_dev, candidate_dfs['dev'].curated_ctd,
correct_L_test,pd.np.round(pd.np.linspace(0.01, 5, num=5), 2)
)
)
(
pd.DataFrame({key:train_grid_results[key][:,1] for key in train_grid_results})
.assign(candidate_id=label_matricies['train'].candidate_id.values)
.to_csv(f"results/CtD/marginals/{num_lf}_sampled_train.tsv.xz", compression="xz", index=False, sep="\t")
)
(
pd.DataFrame({key:dev_grid_results[key][:,1] for key in dev_grid_results})
.to_csv(f"results/CtD/marginals/{num_lf}_sampled_dev.tsv", index=False, sep="\t")
)
(
pd.DataFrame({key:test_grid_results[key][:,1] for key in test_grid_results})
.to_csv(f"results/CtD/marginals/{num_lf}_sampled_test.tsv", index=False, sep="\t")
)
(
pd.DataFrame({key:models[key].get_weights() for key in models})
.to_csv(f"results/CtD/weights/{num_lf}_sampled_weights.tsv", index=False, sep="\t")
)
dev_records.append(get_model_performance(candidate_dfs['dev'].curated_ctd, dev_grid_results, num_lf))
test_records.append(get_model_performance(candidate_dfs['test'].curated_ctd, test_grid_results, num_lf))
dev_full_results_df = pd.concat([dev_baseline_df] + dev_records).reset_index(drop=True)
dev_full_results_df.to_csv("results/CtD/results/dev_sampled_results.tsv", index=False, sep="\t")
dev_full_results_df.head(2)
sns.pointplot(x='lf_num', y='auroc', data=dev_full_results_df)
sns.pointplot(x='lf_num', y='aupr', data=dev_full_results_df)
test_full_results_df = pd.concat([test_baseline_df] + test_records).reset_index(drop=True)
test_full_results_df.to_csv("results/CtD/results/test_sampled_results.tsv", index=False, sep="\t")
test_full_results_df.head(2)
sns.pointplot(x='lf_num', y='auroc', data=test_full_results_df)
sns.pointplot(x='lf_num', y='aupr', data=test_full_results_df)
# # Disease Associates Gene Sources Predicts Compound Treats Disease Sentences
# Here we are using label functions, designed to predict the Disease associates Gene relation, to predict Compound treats Disease sentences. To estimate the performance boost over the baseline model, we implement a label function sampling appoach. The sampling approach works as follows:
# 1. randomly sample X amount of label functions that are not within the database category
# 2. incorporate the sampled label functions with the database label functions
# 3. train the generative model on the combined resources
# 4. use the generative model to predict the tuning set and test set
# 5. Report performance in terms of AUROC and AUPR
# 6. repeat the above process 50 times for each sample size (1, 6, 11, 16, all).
#
# Given that these label functions are not designed to predict the given relation, we expect that adding more label functions will decrease in performance. This means that auroc when sampling 1 label function should be less than the auroc of the baseline. This trend should continue when sampling 6, 11, 16 and then all of the label functions.
# +
dag_start = 25
dag_end = 55
#Spaced out number of sampels including total
size_of_samples = [1,6,11,16,dag_end-dag_start]
number_of_samples = 50
dag_lf_range = range(dag_start, dag_end)
# -
sampled_lfs_dict = {
sample_size:(
sample_lfs(
list(dag_lf_range),
len(list(dag_lf_range)),
sample_size,
number_of_samples,
random_state=100
)
)
for sample_size in size_of_samples
}
dev_records = []
test_records = []
for num_lf in sampled_lfs_dict:
train_grid_results, dev_grid_results, test_grid_results, models = (
train_model_random_lfs(
[baseline_index + sample for sample in sampled_lfs_dict[num_lf]],
correct_L, correct_L_dev, candidate_dfs['dev'].curated_ctd,
correct_L_test, pd.np.round(pd.np.linspace(0.01, 5, num=5), 2)
)
)
(
pd.DataFrame({key:train_grid_results[key][:,1] for key in train_grid_results})
.assign(candidate_id=label_matricies['train'].candidate_id.values)
.to_csv(f"results/DaG/marginals/{num_lf}_sampled_train.tsv.xz", compression="xz", index=False, sep="\t")
)
(
pd.DataFrame({key:dev_grid_results[key][:,1] for key in dev_grid_results})
.to_csv(f"results/DaG/marginals/{num_lf}_sampled_dev.tsv", index=False, sep="\t")
)
(
pd.DataFrame({key:test_grid_results[key][:,1] for key in test_grid_results})
.to_csv(f"results/DaG/marginals/{num_lf}_sampled_test.tsv", index=False, sep="\t")
)
(
pd.DataFrame({key:models[key].get_weights() for key in models})
.to_csv(f"results/DaG/weights/{num_lf}_sampled_weights.tsv", index=False, sep="\t")
)
dev_records.append(get_model_performance(candidate_dfs['dev'].curated_ctd, dev_grid_results, num_lf))
test_records.append(get_model_performance(candidate_dfs['test'].curated_ctd, test_grid_results, num_lf))
dev_full_results_df = pd.concat([dev_baseline_df] + dev_records).reset_index(drop=True)
dev_full_results_df.to_csv("results/DaG/results/dev_sampled_results.tsv", index=False, sep="\t")
dev_full_results_df.head(2)
sns.pointplot(x='lf_num', y='auroc', data=dev_full_results_df)
sns.pointplot(x='lf_num', y='aupr', data=dev_full_results_df)
test_full_results_df = pd.concat([test_baseline_df] + test_records).reset_index(drop=True)
test_full_results_df.to_csv("results/DaG/results/test_sampled_results.tsv", index=False, sep="\t")
test_full_results_df.head(2)
sns.pointplot(x='lf_num', y='auroc', data=test_full_results_df)
sns.pointplot(x='lf_num', y='aupr', data=test_full_results_df)
# # Compound Binds Gene Sources Predicts Compound Treats Disease Sentences
# Here we are using label functions, designed to predict the Compound binds Gene relation, to predict Compound treats Disease sentences. To estimate the performance boost over the baseline model, we implement a label function sampling appoach. The sampling approach works as follows:
# 1. randomly sample X amount of label functions that are not within the database category
# 2. incorporate the sampled label functions with the database label functions
# 3. train the generative model on the combined resources
# 4. use the generative model to predict the tuning set and test set
# 5. Report performance in terms of AUROC and AUPR
# 6. repeat the above process 50 times for each sample size (1, 6, 11, 16, all).
#
# Given that these label functions are not designed to predict the given relation, we expect that adding more label functions will decrease in performance. This means that auroc when sampling 1 label function should be less than the auroc of the baseline. This trend should continue when sampling 6, 11, 16 and then all of the label functions.
# +
cbg_start = 55
cbg_end = 75
#Spaced out number of sampels including total
size_of_samples = [1,6,11,16,cbg_end-cbg_start]
number_of_samples = 50
cbg_lf_range = range(cbg_start, cbg_end)
# -
sampled_lfs_dict = {
sample_size:(
sample_lfs(
list(cbg_lf_range),
len(list(cbg_lf_range)),
sample_size,
number_of_samples,
random_state=100
)
)
for sample_size in size_of_samples
}
dev_records = []
test_records = []
for num_lf in sampled_lfs_dict:
train_grid_results, dev_grid_results, test_grid_results, models = (
train_model_random_lfs(
[baseline_index + sample for sample in sampled_lfs_dict[num_lf]],
correct_L, correct_L_dev, candidate_dfs['dev'].curated_ctd,
correct_L_test,pd.np.round(pd.np.linspace(0.01, 5, num=5), 2)
)
)
(
pd.DataFrame({key:train_grid_results[key][:,1] for key in train_grid_results})
.assign(candidate_id=label_matricies['train'].candidate_id.values)
.to_csv(f"results/CbG/marginals/{num_lf}_sampled_train.tsv.xz", compression="xz", index=False, sep="\t")
)
(
pd.DataFrame({key:dev_grid_results[key][:,1] for key in dev_grid_results})
.to_csv(f"results/CbG/marginals/{num_lf}_sampled_dev.tsv", index=False, sep="\t")
)
(
pd.DataFrame({key:test_grid_results[key][:,1] for key in test_grid_results})
.to_csv(f"results/CbG/marginals/{num_lf}_sampled_test.tsv", index=False, sep="\t")
)
(
pd.DataFrame({key:models[key].get_weights() for key in models})
.to_csv(f"results/CbG/weights/{num_lf}_sampled_weights.tsv", index=False, sep="\t")
)
dev_records.append(get_model_performance(candidate_dfs['dev'].curated_ctd, dev_grid_results, num_lf))
test_records.append(get_model_performance(candidate_dfs['test'].curated_ctd, test_grid_results, num_lf))
dev_full_results_df = pd.concat([dev_baseline_df] + dev_records).reset_index(drop=True)
dev_full_results_df.to_csv("results/CbG/results/dev_sampled_results.tsv", index=False, sep="\t")
dev_full_results_df.head(2)
sns.pointplot(x='lf_num', y='auroc', data=dev_full_results_df)
sns.pointplot(x='lf_num', y='aupr', data=dev_full_results_df)
test_full_results_df = pd.concat([test_baseline_df] + test_records).reset_index(drop=True)
test_full_results_df.to_csv("results/CbG/results/test_sampled_results.tsv", index=False, sep="\t")
test_full_results_df.head(2)
sns.pointplot(x='lf_num', y='auroc', data=test_full_results_df)
sns.pointplot(x='lf_num', y='aupr', data=test_full_results_df)
# # Gene Interacts Gene Sources Predicts Compound Treats Disease Sentences
# Here we are using label functions, designed to predict the Gene inteacts Gene relation, to predict Compound treats Disease sentences. To estimate the performance boost over the baseline model, we implement a label function sampling appoach. The sampling approach works as follows:
# 1. randomly sample X amount of label functions that are not within the database category
# 2. incorporate the sampled label functions with the database label functions
# 3. train the generative model on the combined resources
# 4. use the generative model to predict the tuning set and test set
# 5. Report performance in terms of AUROC and AUPR
# 6. repeat the above process 50 times for each sample size (1, 6, 11, 16, all).
#
# Given that these label functions are not designed to predict the given relation, we expect that adding more label functions will decrease in performance. This means that auroc when sampling 1 label function should be less than the auroc of the baseline. This trend should continue when sampling 6, 11, 16 and then all of the label functions.
# +
gig_start = 75
gig_end = 103
#Spaced out number of sampels including total
size_of_samples = [1,6,11,16,gig_end-gig_start]
number_of_samples = 50
gig_lf_range = range(gig_start, gig_end)
# -
sampled_lfs_dict = {
sample_size:(
sample_lfs(
list(gig_lf_range),
len(list(gig_lf_range)),
sample_size,
number_of_samples,
random_state=100
)
)
for sample_size in size_of_samples
}
dev_records = []
test_records = []
for num_lf in sampled_lfs_dict:
train_grid_results, dev_grid_results, test_grid_results, models = (
train_model_random_lfs(
[baseline_index + sample for sample in sampled_lfs_dict[num_lf]],
correct_L, correct_L_dev, candidate_dfs['dev'].curated_ctd,
correct_L_test, pd.np.round(pd.np.linspace(0.01, 5, num=5), 2)
)
)
(
pd.DataFrame({key:train_grid_results[key][:,1] for key in train_grid_results})
.assign(candidate_id=label_matricies['train'].candidate_id.values)
.to_csv(f"results/GiG/marginals/{num_lf}_sampled_train.tsv.xz", compression="xz", sep="\t", index=False)
)
(
pd.DataFrame({key:dev_grid_results[key][:,1] for key in dev_grid_results})
.to_csv(f"results/GiG/marginals/{num_lf}_sampled_dev.tsv", sep="\t", index=False)
)
(
pd.DataFrame({key:test_grid_results[key][:,1] for key in test_grid_results})
.to_csv(f"results/GiG/marginals/{num_lf}_sampled_test.tsv", sep="\t", index=False)
)
(
pd.DataFrame({key:models[key].get_weights() for key in models})
.to_csv(f"results/GiG/weights/{num_lf}_sampled_weights.tsv", sep="\t", index=False)
)
dev_records.append(get_model_performance(candidate_dfs['dev'].curated_ctd, dev_grid_results, num_lf))
test_records.append(get_model_performance(candidate_dfs['test'].curated_ctd, test_grid_results, num_lf))
dev_full_results_df = pd.concat([dev_baseline_df] + dev_records).reset_index(drop=True)
dev_full_results_df.to_csv("results/GiG/results/dev_sampled_results.tsv", index=False, sep="\t")
dev_full_results_df.head(2)
sns.pointplot(x='lf_num', y='auroc', data=dev_full_results_df)
sns.pointplot(x='lf_num', y='aupr', data=dev_full_results_df)
test_full_results_df = pd.concat([test_baseline_df] + test_records).reset_index(drop=True)
test_full_results_df.to_csv("results/GiG/results/test_sampled_results.tsv", index=False, sep="\t")
test_full_results_df.head(2)
sns.pointplot(x='lf_num', y='auroc', data=test_full_results_df)
sns.pointplot(x='lf_num', y='aupr', data=test_full_results_df)
# # All Sources Predicts Compound Treats Disease Sentences
# Here we are using every hand constructed label function to predict Compound treats Disease sentences. To estimate the performance boost over the baseline model, we implement a label function sampling appoach. The sampling approach works as follows:
# 1. randomly sample X amount of label functions that are not within the database category
# 2. incorporate the sampled label functions with the database label functions
# 3. train the generative model on the combined resources
# 4. use the generative model to predict the tuning set and test set
# 5. Report performance in terms of AUROC and AUPR
# 6. repeat the above process 50 times for each sample size (1, 33, 65, 97, all).
#
# Given that some of these label functions are used to predict the given relation, we expect that adding more label functions might slightly increase performance. This means that auroc when sampling 1 label function should be higher than the auroc of the baseline; however, at 33, 65, 97 the auroc should start to decrease as we are adding more irrelevant label functions towards the baseline model.
# +
all_start = 3
all_end = 103
#Spaced out number of sampels including total
size_of_samples = [1,33,65,97,all_end-all_start]
number_of_samples = 50
cbg_lf_range = range(all_start, all_end)
# -
sampled_lfs_dict = {
sample_size:(
sample_lfs(
list(cbg_lf_range),
len(list(cbg_lf_range)),
sample_size,
number_of_samples,
random_state=100
)
)
for sample_size in size_of_samples
}
dev_records = []
test_records = []
for num_lf in sampled_lfs_dict:
train_grid_results, dev_grid_results, test_grid_results, models = (
train_model_random_lfs(
[baseline_index + sample for sample in sampled_lfs_dict[num_lf]],
correct_L, correct_L_dev, candidate_dfs['dev'].curated_ctd,
correct_L_test, pd.np.round(pd.np.linspace(0.01, 5, num=5), 2)
)
)
(
pd.DataFrame({key:train_grid_results[key][:,1] for key in train_grid_results})
.assign(candidate_id=label_matricies['train'].candidate_id.values)
.to_csv(f"results/all/marginals/{num_lf}_sampled_train.tsv.xz", compression="xz", index=False, sep="\t")
)
(
pd.DataFrame({key:dev_grid_results[key][:,1] for key in dev_grid_results})
.to_csv(f"results/all/marginals/{num_lf}_sampled_dev.tsv", index=False, sep="\t")
)
(
pd.DataFrame({key:test_grid_results[key][:,1] for key in test_grid_results})
.to_csv(f"results/all/marginals/{num_lf}_sampled_test.tsv", index=False, sep="\t")
)
(
pd.DataFrame({key:models[key].get_weights() for key in models})
.to_csv(f"results/all/weights/{num_lf}_sampled_weights.tsv", index=False, sep="\t")
)
dev_records.append(get_model_performance(candidate_dfs['dev'].curated_ctd, dev_grid_results, num_lf))
test_records.append(get_model_performance(candidate_dfs['test'].curated_ctd, test_grid_results, num_lf))
dev_full_results_df = pd.concat([dev_baseline_df] + dev_records).reset_index(drop=True)
dev_full_results_df.to_csv("results/all/results/dev_sampled_results.tsv", index=False, sep="\t")
dev_full_results_df.head(2)
sns.pointplot(x='lf_num', y='auroc', data=dev_full_results_df)
sns.pointplot(x='lf_num', y='aupr', data=dev_full_results_df)
test_full_results_df = pd.concat([test_baseline_df] + test_records).reset_index(drop=True)
test_full_results_df.to_csv("results/all/results/test_sampled_results.tsv", index=False, sep="\t")
test_full_results_df.head(2)
sns.pointplot(x='lf_num', y='auroc', data=test_full_results_df)
sns.pointplot(x='lf_num', y='aupr', data=test_full_results_df)
| compound_disease/compound_treats_disease/label_sampling_experiment/label_sampling_experiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.2
# language: julia
# name: julia-1.6
# ---
using Plots
function plot_frame(angle)
scatter(
[cos(angle)],
[sin(angle)],
ratio=:equal,
xlims=(-1.5, 1.5), ylims=(-1.5, 1.5),
legend=:none)
end
anim = Animation()
for angle in 0:0.05:pi
plt = plot_frame(angle)
frame(anim, plt)
end
anim
gif(anim)
gif(anim, "first_animation.gif", fps=50, loop=-1)
| Chapter04/Plots_animation_verbose.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from tools import *
from models import *
import plotly.graph_objects as go
import plotly.figure_factory as ff
from Bio.SeqUtils import GC
import pickle
import warnings
warnings.filterwarnings('ignore')
# +
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
batch_size = 100
# -
# ## JUND cofactors
# cofactors - MCC:
# * SP1 0.349809
# * MAFG 0.307209
# * MAFF 0.268983
# * NFIC 0.253942
# * CEBPB 0.245328
# ### JUND multi-model
# +
#inspecting both - cofactors and random tfs
#data = h5py.File("../for_Manu/TRAIN_DATA_COFACTORS_SUBSAMPLE_I_False/JUND_multi_1/h5_files/tf_peaks_JUND.h5", 'r')
data = h5py.File("../for_Manu/TRAIN_DATA_RANDOM_SUBSAMPLE_I_False/JUND_multi_1/h5_files/tf_peaks_JUND.h5", 'r')
x = torch.Tensor(data['train_in'])
y = torch.Tensor(data['valid_in'])
z = torch.Tensor(data['test_in'])
x_lab = torch.Tensor(data['train_out'])
y_lab = torch.Tensor(data['valid_out'])
z_lab = torch.Tensor(data['test_out'])
res = torch.cat((x, y, z), dim=0)
res_lab = torch.cat((x_lab, y_lab, z_lab), dim=0)
all_dataset = torch.utils.data.TensorDataset(res, res_lab)
dataloader = torch.utils.data.DataLoader(all_dataset,
batch_size=100, shuffle=False,
num_workers=0)
# +
model = ConvNetDeep(5).to(device)
#model.load_state_dict(torch.load("../for_Manu/MODEL_WEIGHTS_COFACTORS_SUBSAMPLE_I_False/JUND_real_multimodel_weights_1/model_epoch_4_.pth"))
model.load_state_dict(torch.load("../for_Manu/MODEL_WEIGHTS_RANDOM_SUBSAMPLE_I_False/JUND_real_multimodel_weights_1/model_epoch_4_.pth"))
model.eval();
# #copy trained model weights to motif extraction model
motif_model = motifCNN(model, 5).to(device)
motif_model.load_state_dict(model.state_dict())
motif_model.eval();
# +
# run predictions with full model on all data
running_outputs = []
running_labels = []
sequences = []
sigmoid = nn.Sigmoid()
with torch.no_grad():
for seq, lbl in dataloader:
sequences.extend(seq.numpy())
seq = seq.to(device)
out = model(seq)
out = sigmoid(out.detach().cpu()) #for BCEWithLogits
running_outputs.extend(out.numpy()) #for BCEWithLogits
running_labels.extend(lbl.numpy())
running_labels = np.array(running_labels)
running_outputs = np.array(running_outputs)
sequences = np.array(sequences)
# -
pred_full_round = np.round(running_outputs)
arr_comp = np.equal(pred_full_round, running_labels)
idx = np.argwhere(np.sum(arr_comp, axis=1) >= 5).squeeze() #43563
sampled_idx = np.random.choice(idx, size=80000, replace=False)
res2 = res[sampled_idx, :, :]
res_lab2 = res_lab[sampled_idx, :]
dataset = torch.utils.data.TensorDataset(res2, res_lab2)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=100, shuffle=False,
num_workers=0)
predictions, activations = get_motifs(data_loader, motif_model, device)
#output_file_path = "../for_Manu/motifs/motifs_for_JUND_multimodel.meme"
output_file_path = "../for_Manu/motifs/motifs_for_JUND_random_multimodel.meme"
get_memes(activations, res2, res_lab2, output_file_path)
# ### JUND individual
# +
#data = h5py.File("../for_Manu/TRAIN_DATA_COFACTORS_SUBSAMPLE_I_False/JUND_indiv_1/h5_files/JUND_tl.h5", 'r')
data = h5py.File("../for_Manu/TRAIN_DATA_RANDOM_SUBSAMPLE_I_False/JUND_indiv_1/h5_files/JUND_tl.h5", 'r')
x = torch.Tensor(data['train_in'])
y = torch.Tensor(data['valid_in'])
z = torch.Tensor(data['test_in'])
x_lab = torch.Tensor(data['train_out'])
y_lab = torch.Tensor(data['valid_out'])
z_lab = torch.Tensor(data['test_out'])
res = torch.cat((x, y, z), dim=0)
res_lab = torch.cat((x_lab, y_lab, z_lab), dim=0)
all_dataset = torch.utils.data.TensorDataset(res, res_lab)
dataloader = torch.utils.data.DataLoader(all_dataset,
batch_size=100, shuffle=False,
num_workers=0)
# +
model = ConvNetDeep(1).to(device)
#model.load_state_dict(torch.load("../for_Manu/MODEL_WEIGHTS_COFACTORS_SUBSAMPLE_I_False/JUND_real_indiv_weights_1/JUND_tl_weights/model_epoch_2_.pth"))
model.load_state_dict(torch.load("../for_Manu/MODEL_WEIGHTS_RANDOM_SUBSAMPLE_I_False/JUND_real_indiv_weights_1/JUND_tl_weights/model_epoch_3_.pth"))
model.eval();
# #copy trained model weights to motif extraction model
motif_model = motifCNN(model, 1).to(device)
motif_model.load_state_dict(model.state_dict())
motif_model.eval();
# +
# run predictions with full model on all data
running_outputs = []
running_labels = []
sequences = []
sigmoid = nn.Sigmoid()
with torch.no_grad():
for seq, lbl in dataloader:
sequences.extend(seq.numpy())
seq = seq.to(device)
out = model(seq)
out = sigmoid(out.detach().cpu()) #for BCEWithLogits
running_outputs.extend(out.numpy()) #for BCEWithLogits
running_labels.extend(lbl.numpy())
running_labels = np.array(running_labels)
running_outputs = np.array(running_outputs)
sequences = np.array(sequences)
# -
pred_full_round = np.round(running_outputs)
# +
arr_comp = np.equal(pred_full_round, running_labels)
idx = np.argwhere(np.sum(arr_comp, axis=1) >= 1).squeeze() #43563
res2 = res[idx, :, :]
res_lab2 = res_lab[idx, :]
dataset = torch.utils.data.TensorDataset(res2, res_lab2)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=100, shuffle=False,
num_workers=0)
# -
predictions, activations = get_motifs(data_loader, motif_model, device)
output_file_path = "../for_Manu/motifs/motifs_for_JUND_random_individual.meme"
get_memes(activations, res2, res_lab2, output_file_path)
# +
#results for multimodel_cofactors (q.value 0.01)
multi_cofactors = {'filter11':'JUND', 'filter30':'MAFG/MAFF', 'filter5':'SP1',
'filter72':'Fos:Jun', 'filter75':'CEBPB', 'filter64':'CTCF',
'filter40':'CEBPB', 'filter8':'MAFG', 'filter37':'MAFG/MAFF',
'filter19':'MAFG/MAFF','filter80':'MAFG/MAFF','filter94':'JUND',
'filter60':'NRL', 'filter43':'Gmeb1'}
indiv_cofactors = {'filter11':'JUND', 'filter30':'MAFG/MAFF', 'filter72':'Fos:Jun',
'filter5':'SP1','filter8':'Fos:JUN', 'filter94':'JUND', 'filter40':'CEBPB',
'filter37':'Fos:JUN', 'filter76':'JUND', 'filter90':'JUND',
'filter52':'JUND', 'filter36':'MAFG', 'filter39':'JUND',
'filter43':'NRL', 'filter93':'JUND', 'filter27':'JUND', 'filter1':'JUND',
'filter60':'NRL', 'filter50':'JUND', 'filter73':'JUND', 'filter63':'JUND',
'filter19':'MAFG', 'filter82':'JUND', 'filter18':'MAFG'}
#['ZNF143', 'TP63', 'GATA3', 'ELK1', 'RXRA']
multi_random = {'filter17':'RXRA', 'filter87':'TP73', 'filter96':'ELF5', 'filter68':'CTCF',
'filter74':'NRL', 'filter41':'CTCF', 'filter44':'CTCF',
'filter43':'TP73', 'filter88':'CTCF', 'filter99':'TP73', 'filter':'HOXD3',
'filter42':'TP73', 'filter66':'CTCF', 'filter91':'Rhox11',
'filter89':'Gmeb1'}
indiv_random = {'filter85':'JUND', 'filter74':'NRL', 'filter87':'TP73',
'filter75':'HOXD3', 'filter30':'JUND', 'filter96':'ELF5',
'filter39':'Gmeb1', 'filter89':'Gmeb1', 'filter91':"Rhox11",
'filter43':'TP73'}
# -
# ## HNF4A cofactors
# cofactors - MCC
# * NR2F2 0.253237
# * FOXA2 0.238407
# * FOXA1 0.235406
# * SP1 0.212225
# * MYBL2 0.197924
# ### HNF4A multi-model
# +
#data = h5py.File("../for_Manu/TRAIN_DATA_COFACTORS_SUBSAMPLE_I_False/HNF4A_multi_1/h5_files/tf_peaks_HNF4A.h5", 'r')
data = h5py.File("../for_Manu/TRAIN_DATA_RANDOM_SUBSAMPLE_I_False/HNF4A_multi_1/h5_files/tf_peaks_HNF4A.h5", 'r')
x = torch.Tensor(data['train_in'])
y = torch.Tensor(data['valid_in'])
z = torch.Tensor(data['test_in'])
x_lab = torch.Tensor(data['train_out'])
y_lab = torch.Tensor(data['valid_out'])
z_lab = torch.Tensor(data['test_out'])
res = torch.cat((x, y, z), dim=0)
res_lab = torch.cat((x_lab, y_lab, z_lab), dim=0)
all_dataset = torch.utils.data.TensorDataset(res, res_lab)
dataloader = torch.utils.data.DataLoader(all_dataset,
batch_size=100, shuffle=False,
num_workers=0)
# +
model = ConvNetDeep(5).to(device)
#model.load_state_dict(torch.load("../for_Manu/MODEL_WEIGHTS_COFACTORS_SUBSAMPLE_I_False/HNF4A_real_multimodel_weights_1/model_epoch_4_.pth"))
model.load_state_dict(torch.load("../for_Manu/MODEL_WEIGHTS_RANDOM_SUBSAMPLE_I_False/HNF4A_real_multimodel_weights_1/model_epoch_4_.pth"))
model.eval();
# #copy trained model weights to motif extraction model
motif_model = motifCNN(model, 5).to(device)
motif_model.load_state_dict(model.state_dict())
motif_model.eval();
# +
# run predictions with full model on all data
running_outputs = []
running_labels = []
sequences = []
sigmoid = nn.Sigmoid()
with torch.no_grad():
for seq, lbl in dataloader:
sequences.extend(seq.numpy())
seq = seq.to(device)
out = model(seq)
out = sigmoid(out.detach().cpu()) #for BCEWithLogits
running_outputs.extend(out.numpy()) #for BCEWithLogits
running_labels.extend(lbl.numpy())
running_labels = np.array(running_labels)
running_outputs = np.array(running_outputs)
sequences = np.array(sequences)
# -
pred_full_round = np.round(running_outputs)
arr_comp = np.equal(pred_full_round, running_labels)
idx = np.argwhere(np.sum(arr_comp, axis=1) >= 5).squeeze() #43563
# +
sampled_idx = np.random.choice(idx, size=80000, replace=False)
res2 = res[sampled_idx, :, :]
res_lab2 = res_lab[sampled_idx, :]
dataset = torch.utils.data.TensorDataset(res2, res_lab2)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=100, shuffle=False,
num_workers=0)
# -
predictions, activations = get_motifs(data_loader, motif_model, device)
#output_file_path = "../for_Manu/motifs/motifs_for_HNF4A_multimodel.meme"
output_file_path = "../for_Manu/motifs/motifs_for_HNF4A_random_multimodel.meme"
get_memes(activations, res2, res_lab2, output_file_path)
# ### HNF4A individual
# +
#data = h5py.File("../for_Manu/TRAIN_DATA_COFACTORS_SUBSAMPLE_I_False/HNF4A_indiv_1/h5_files/HNF4A_tl.h5", 'r')
data = h5py.File("../for_Manu/TRAIN_DATA_RANDOM_SUBSAMPLE_I_False/HNF4A_indiv_1/h5_files/HNF4A_tl.h5", 'r')
x = torch.Tensor(data['train_in'])
y = torch.Tensor(data['valid_in'])
z = torch.Tensor(data['test_in'])
x_lab = torch.Tensor(data['train_out'])
y_lab = torch.Tensor(data['valid_out'])
z_lab = torch.Tensor(data['test_out'])
res = torch.cat((x, y, z), dim=0)
res_lab = torch.cat((x_lab, y_lab, z_lab), dim=0)
all_dataset = torch.utils.data.TensorDataset(res, res_lab)
dataloader = torch.utils.data.DataLoader(all_dataset,
batch_size=100, shuffle=False,
num_workers=0)
# +
target_labels = list(data['target_labels'])
target_labels = [i.decode("utf-8") for i in target_labels]
target_labels
# +
model = ConvNetDeep(1).to(device)
#model.load_state_dict(torch.load("../for_Manu/MODEL_WEIGHTS_COFACTORS_SUBSAMPLE_I_False/HNF4A_real_indiv_weights_1/HNF4A_tl_weights/model_epoch_4_.pth"))
model.load_state_dict(torch.load("../for_Manu/MODEL_WEIGHTS_RANDOM_SUBSAMPLE_I_False/HNF4A_real_indiv_weights_1/HNF4A_tl_weights/model_epoch_4_.pth"))
model.eval();
# #copy trained model weights to motif extraction model
motif_model = motifCNN(model, 1).to(device)
motif_model.load_state_dict(model.state_dict())
motif_model.eval();
# +
# run predictions with full model on all data
running_outputs = []
running_labels = []
sequences = []
sigmoid = nn.Sigmoid()
with torch.no_grad():
for seq, lbl in dataloader:
sequences.extend(seq.numpy())
seq = seq.to(device)
out = model(seq)
out = sigmoid(out.detach().cpu()) #for BCEWithLogits
running_outputs.extend(out.numpy()) #for BCEWithLogits
running_labels.extend(lbl.numpy())
running_labels = np.array(running_labels)
running_outputs = np.array(running_outputs)
sequences = np.array(sequences)
# -
pred_full_round = np.round(running_outputs)
arr_comp = np.equal(pred_full_round, running_labels)
idx = np.argwhere(np.sum(arr_comp, axis=1) >= 1).squeeze() #43563
# +
res2 = res[idx, :, :]
res_lab2 = res_lab[idx, :]
dataset = torch.utils.data.TensorDataset(res2, res_lab2)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=100, shuffle=False,
num_workers=0)
# -
predictions, activations = get_motifs(data_loader, motif_model, device)
#output_file_path = "../for_Manu/motifs/motifs_for_HNF4A_individual.meme"
output_file_path = "../for_Manu/motifs/motifs_for_HNF4A_random_individual.meme"
get_memes(activations, res2, res_lab2, output_file_path)
# +
#results for multimodel_cofactors (q.value 0.01)
#NR2F2 FOXA2 FOXA1 SP1 MYBL2
#NR2F2 and RXRA - 0.24 (RXRA and HNF4A are 0.41), FOXA2/FOXA1 and RXRA - 0.16/0.12
#SP1 and RXRA - 0.23, MYBL2 and RXRA - 0.20;
#and RXRA and HNFA are the same BM - 13
multi_cofactors = {'filter28':'RXRA/HNF4G/NR2C2', 'filter66':"Gmeb1", "filter5":'FOXA1',
'filter34':'Gmeb1', 'filter12':"FOXJ3", "filter19":'Gmeb1',
"filter64":"Fos:JUN", "filter99":"FOXA2", "filter6":"FOXA1",
"filter2":"NR2F1", "filter92":"RXRA/NR4A2/HNF4G", "filter40":"NR1H4",
"filter96":"PPARA:RXRA/HNF4G", "filter29":"PPARA:RXRA/HNF4G",
"filter62":"RARA:RXRG/FOXA1", "filter71":"MEOX1",
"filter78":"Gmeb1", "filter54":"Gmeb1", "filter13":"FOXJ3/NR1H3:RXRA",
"filter21":"Fos:JUN/CREB1"}
indiv_cofactors = {'filter28':'HNF4G', 'filter96':'HNF4G', 'filter66':'Gmeb1',
'filter5':'FOXA1', 'filter29':'HNF4G', 'filter2':'RXRA/HNF4G',
'filter99':'FOXA2', 'filter92':'HNF4G', 'filter12':'FOXJ3',
'filter34':'Gmeb1', 'filter16':"HNF4G", "filter19":"HNF4G",
'filter71':'MEOX2', 'filter62':'FOXA1', 'filter27':'Gmeb1',
'filter67':'FOXA1', 'filter95':'Gmeb1', 'filter13':'FOXJ3',
'filter64':'Gmeb1', 'filter47':'Gmeb1', 'filter6':'FOXK1',
'filter40':'NR1A4:RXRA'}
#['NR3C1', 'MEF2A', 'TFAP4 (NEUROD1/TWIST1/FIGLA)', 'KLF1', 'ATF1 (FOS:JUN)']
multi_random = {'filter18':'Gmeb1/FOS:JUN', 'filter64':'Gmeb1', 'filter14':'HES2/MYC',
'filter17':'FIGLA/NEUROD1', 'filter28':'FOS:JUN', 'filter79':'NEUROD1/TWIST1',
'filter2':'Gmeb1', 'filter76':'MITF/USF2', 'filter9':"NEUROD1/TWIST1",
'filter5':'Gmeb1', 'filter31':'Gmeb1', 'filter70':'TWIST1',
'filter43':'RARA:RXRG', 'filter33':'Gmeb1', 'filter29':'FOS:JUN',
'filter85':'GATA1:TAL1', 'filter27':'Gmeb1', 'filter7':'USF2/ZEB1',
'filter47':'FOS:JUN', 'filter60':'Gmeb1', 'filter12':'Myog',
'filter36':'Gmeb1'}
indiv_random = {}
| notebooks/Interpretation_of_models_finetuned_with_cofactors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h2>Load data</h2>
# +
from numpy import loadtxt
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import pandas as pd
from sklearn.utils import shuffle
# Load data
df = pd.read_csv("../data/iris.csv")
np_data = df.values
# Split data into X and y
X_raw = np_data[:,0:-1].astype(float)
y_raw = np_data[:,-1]
# Convert text labels to integers
encoder = LabelEncoder()
encoder.fit(y_raw)
y = encoder.transform(y_raw)
# Normalize data to avoid high input values
scaler = StandardScaler()
scaler.fit(X_raw)
X = scaler.transform(X_raw)
# Shuffle data
X, y = shuffle(X, y, random_state=0)
print("Examples: {}".format(X.shape[0]))
print("Attributes: {}".format(X.shape[1]))
print("Possible categories: {} encoded to {}".format(np.unique(y_raw),np.unique(y)))
# -
# <h2>Function for evaluating model accuracy</h2>
# +
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
def evaluate(model):
print("-- Training data --")
# train model on training dataset
model.fit(X, y)
# evaluate dataset
y_pred = model.predict(X)
# calculate accuracy
accuracy = accuracy_score(y, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
# confusion matrix
print("Confusion Matrix:")
conf_mx = confusion_matrix(y, y_pred)
print(conf_mx)
print(classification_report(y, y_pred))
print("")
print("-- 5-fold CV --")
# 5-fold CV
y_pred = cross_val_predict(model, X, y, cv=5)
# calculate accuracy
accuracy = accuracy_score(y, y_pred)
print("Average accuracy: %.2f%%" % (accuracy * 100.0))
# confusion matrix
print("Confusion Matrix:")
conf_mx = confusion_matrix(y, y_pred)
print(conf_mx)
print(classification_report(y, y_pred))
# -
# <h2>Linear classifier</h2>
# +
from sklearn import linear_model
model = linear_model.SGDClassifier(max_iter=1000, tol=1e-5, random_state=42)
evaluate(model)
# -
# <h2>Neural Network classifier</h2>
# +
from sklearn.neural_network import MLPClassifier
model = MLPClassifier(max_iter=2000, random_state=42)
evaluate(model)
# -
# <h2>Decision Tree classifier</h2>
# +
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(min_samples_leaf=10, max_depth=8, random_state=42)
evaluate(model)
# -
# <h2>SVM classifier</h2>
# +
from sklearn import svm
model = svm.SVC(random_state=42, gamma="scale")
evaluate(model)
# -
# <h2>kNN classifier</h2>
# +
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=3)
evaluate(model)
# -
# <h2>XGBoost classifier</h2>
# +
from xgboost import XGBClassifier
# Ignore deprecation warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
model = XGBClassifier(random_state=42)
evaluate(model)
# -
# ## Random Forest
# +
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=5)
evaluate(model)
# -
# <h2>Plot attributes</h2>
# %matplotlib inline
import matplotlib.pyplot as plt
df.hist(bins=50, figsize=(20,15))
plt.show()
| sklearn/Iris.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import glob
import os
import numpy as np
import pickle as pkl
import time
from fastparquet import write
import fastparquet
import dask.dataframe as dd
all_pkl_files=[]
pkl_file_sizes=[]
path='/data/dharp/compounds/datasets/googleV3/'
for filename in glob.glob(path+'*pkl'):
pkl_file_sizes.append(os.path.getsize(filename))
all_pkl_files.append(filename)
#df = pd.concat(dfs)
pkl_df=pd.DataFrame(all_pkl_files,pkl_file_sizes)
pkl_df.reset_index(inplace=True)
pkl_df.columns=['fsize','fname']
pkl_df['fsize_perc']=pkl_df.fsize/pkl_df.fsize.sum()*100
pkl_df.sort_values(by=['fsize'],ascending=False,inplace=True,ignore_index=True)
pkl_df.fsize/=1024*1024
pkl_df
# +
maxvalue = 30_000
lastvalue = 0
newcum = []
labels=[]
cur_label=1
for row in pkl_df.itertuples():
thisvalue = row.fsize + lastvalue
if thisvalue > maxvalue:
thisvalue = 0
cur_label+=1
newcum.append( thisvalue )
labels.append(cur_label)
lastvalue = thisvalue
pkl_df['fcat']=labels
pkl_df
# -
# results[0][['modifier','head']].value_counts().sample(50)
# def write_to_hdf(data_bin):
# if data_bin.shape[0]!=0:
# df_list=[]
# print(data_bin.shape[0])
# for row in data_bin.itertuples():
# #print(row.fname)
# tmp_pkl=pd.read_pickle(row.fname)
# tmp_pkl.reset_index(inplace=True)
# df_list.append(tmp_pkl)
# tmp_df=pd.concat(df_list,ignore_index=True,sort=False)
# print('Done concatenating')
# tmp_df.groupby(['fivegram_pos','year'])['count'].sum().to_frame().reset_index(inplace=True)
# print('Done grouping')
# tmp_df.to_hdf(path+'/entire_df_'+str(row.fcat)+'.h5',format='table', key='df',complib='zlib', complevel=5)
def write_to_parquet(data_bin):
print(data_bin.iloc[0].fcat)
cur_time=time.time()
df_list=[]
print(data_bin.shape[0])
for row in data_bin.itertuples():
#print(row.fname)
cur_df=pd.read_pickle(row.fname)
cur_df.reset_index(inplace=True)
df_list.append(cur_df)
concat_df=pd.concat(df_list,ignore_index=True,sort=False)
print(concat_df.shape[0])
total_df_shape=concat_df.shape[0]
print('Done concatenating')
ddf = dd.from_pandas(concat_df, npartitions=30)
ddf=ddf.groupby(['lemma_pos','pos_sent','year','comp_class'])['count'].sum()
print('Done grouping')
ddf=ddf.to_frame().reset_index().compute()
print(ddf.shape[0])
after_shape=ddf.shape[0]
ddf.to_parquet(
path=f'{save_path}/df_{row.fcat}.parq',
engine='fastparquet',
compression='snappy',
row_group_offsets=10_000_000)
print(f"Finished df {row.fcat} ; Before : {total_df_shape}, After : {after_shape} Change in percentage : {(total_df_shape-after_shape)/total_df_shape*100:0.2f}%")
print(f'Time taken {time.time()-cur_time} secs')
save_path='/data/dharp/compounds/datasets/entire_df'
pkl_df.groupby('fcat').apply(write_to_parquet)
pkl_df.groupby('fcat')
| Pickle_Files_Combiner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# Author: <NAME>
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
torch.manual_seed(1)
# +
def to_scalar(var):
# returns a python float
return var.view(-1).data.tolist()[0]
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
tensor = torch.LongTensor(idxs)
return autograd.Variable(tensor)
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
# -
class BiLSTM_CRF(nn.Module):
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
super(BiLSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.tag_to_ix = tag_to_ix
self.tagset_size = len(tag_to_ix)
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,
num_layers=1, bidirectional=True)
# Maps the output of the LSTM into tag space.
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
# Matrix of transition parameters. Entry i,j is the score of
# transitioning *to* i *from* j.
self.transitions = nn.Parameter(
torch.randn(self.tagset_size, self.tagset_size))
# These two statements enforce the constraint that we never transfer
# to the start tag and we never transfer from the stop tag
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
self.hidden = self.init_hidden()
def init_hidden(self):
return (autograd.Variable(torch.randn(2, 1, self.hidden_dim // 2)),
autograd.Variable(torch.randn(2, 1, self.hidden_dim // 2)))
def _forward_alg(self, feats):
# Do the forward algorithm to compute the partition function
init_alphas = torch.Tensor(1, self.tagset_size).fill_(-10000.)
# START_TAG has all of the score.
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
# Wrap in a variable so that we will get automatic backprop
forward_var = autograd.Variable(init_alphas)
# Iterate through the sentence
for feat in feats:
alphas_t = [] # The forward variables at this timestep
for next_tag in range(self.tagset_size):
# broadcast the emission score: it is the same regardless of
# the previous tag
emit_score = feat[next_tag].view(
1, -1).expand(1, self.tagset_size)
# the ith entry of trans_score is the score of transitioning to
# next_tag from i
trans_score = self.transitions[next_tag].view(1, -1)
# The ith entry of next_tag_var is the value for the
# edge (i -> next_tag) before we do log-sum-exp
next_tag_var = forward_var + trans_score + emit_score
# The forward variable for this tag is log-sum-exp of all the
# scores.
alphas_t.append(log_sum_exp(next_tag_var))
forward_var = torch.cat(alphas_t).view(1, -1)
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
def _get_lstm_features(self, sentence):
self.hidden = self.init_hidden()
embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
lstm_out, self.hidden = self.lstm(embeds, self.hidden)
lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
lstm_feats = self.hidden2tag(lstm_out)
return lstm_feats
def _score_sentence(self, feats, tags):
# Gives the score of a provided tag sequence
score = autograd.Variable(torch.Tensor([0]))
tags = torch.cat([torch.LongTensor([self.tag_to_ix[START_TAG]]), tags])
for i, feat in enumerate(feats):
score = score + \
self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]
score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
return score
def _viterbi_decode(self, feats):
backpointers = []
# Initialize the viterbi variables in log space
init_vvars = torch.Tensor(1, self.tagset_size).fill_(-10000.)
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
# forward_var at step i holds the viterbi variables for step i-1
forward_var = autograd.Variable(init_vvars)
for feat in feats:
bptrs_t = [] # holds the backpointers for this step
viterbivars_t = [] # holds the viterbi variables for this step
for next_tag in range(self.tagset_size):
# next_tag_var[i] holds the viterbi variable for tag i at the
# previous step, plus the score of transitioning
# from tag i to next_tag.
# We don't include the emission scores here because the max
# does not depend on them (we add them in below)
next_tag_var = forward_var + self.transitions[next_tag]
best_tag_id = argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id])
# Now add in the emission scores, and assign forward_var to the set
# of viterbi variables we just computed
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
backpointers.append(bptrs_t)
# Transition to STOP_TAG
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.tag_to_ix[START_TAG] # Sanity check
best_path.reverse()
return path_score, best_path
def neg_log_likelihood(self, sentence, tags):
feats = self._get_lstm_features(sentence)
forward_score = self._forward_alg(feats)
gold_score = self._score_sentence(feats, tags)
return forward_score - gold_score
def forward(self, sentence): # dont confuse this with _forward_alg above.
# Get the emission scores from the BiLSTM
lstm_feats = self._get_lstm_features(sentence)
# Find the best path, given the features.
score, tag_seq = self._viterbi_decode(lstm_feats)
return score, tag_seq
# +
START_TAG = "<START>"
STOP_TAG = "<STOP>"
EMBEDDING_DIM = 5
HIDDEN_DIM = 4
# Make up some training data
training_data = [(
"the wall street journal reported today that apple corporation made money".split(),
"B I I I O O O B I O O".split()
), (
"georgia tech is a university in georgia".split(),
"B I O O O O B".split()
)]
word_to_ix = {}
for sentence, tags in training_data:
for word in sentence:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
tag_to_ix = {"B": 0, "I": 1, "O": 2, START_TAG: 3, STOP_TAG: 4}
model = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)
optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)
# Check predictions before training
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
precheck_tags = torch.LongTensor([tag_to_ix[t] for t in training_data[0][1]])
print(model(precheck_sent))
# Make sure prepare_sequence from earlier in the LSTM section is loaded
for epoch in range(
300): # again, normally you would NOT do 300 epochs, it is toy data
for sentence, tags in training_data:
# Step 1. Remember that Pytorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
# Step 2. Get our inputs ready for the network, that is,
# turn them into Variables of word indices.
sentence_in = prepare_sequence(sentence, word_to_ix)
targets = torch.LongTensor([tag_to_ix[t] for t in tags])
# Step 3. Run our forward pass.
neg_log_likelihood = model.neg_log_likelihood(sentence_in, targets)
# Step 4. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
neg_log_likelihood.backward()
optimizer.step()
# Check predictions after training
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
print(model(precheck_sent))
# We got it!
| doc/test/NLP_DL/Word Embedding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Developing an AI application
#
# Going forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications.
#
# In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below.
#
# <img src='assets/Flowers.png' width=500px>
#
# The project is broken down into multiple steps:
#
# * Load and preprocess the image dataset
# * Train the image classifier on your dataset
# * Use the trained classifier to predict image content
#
# We'll lead you through each part which you'll implement in Python.
#
# When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.
#
# First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.
# +
# Define Imports
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import seaborn as sb
import torch
from torch import nn, optim
from torchvision import datasets, models, transforms
# -
# ## Load the data
#
# Here you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.
#
# The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.
#
# The pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.
#
# +
# Load Image Data
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
TRAIN = 'train'
VALID = 'valid'
TEST = 'test'
normal_means = [0.485, 0.456, 0.406]
normal_std_dev = [0.229, 0.224, 0.225]
# Transforms:
# - All: Resize and crop to 224x224
# - All: Apply normalization via mean & std dev
# - Train Only: Apply random scaling, cropping & flipping
# Define your transforms for the training, validation, and testing sets
data_transforms = {TRAIN: transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(normal_means, normal_std_dev)]),
VALID: transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(normal_means, normal_std_dev)]),
TEST: transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(normal_means, normal_std_dev)])}
# Load the datasets with ImageFolder
image_datasets = {TRAIN: datasets.ImageFolder(train_dir, transform=data_transforms[TRAIN]),
VALID: datasets.ImageFolder(valid_dir, transform=data_transforms[VALID]),
TEST: datasets.ImageFolder(test_dir, transform=data_transforms[TEST])}
# Using the image datasets and the trainforms, define the dataloaders
dataloaders = {TRAIN: torch.utils.data.DataLoader(image_datasets[TRAIN], batch_size=64, shuffle=True),
VALID: torch.utils.data.DataLoader(image_datasets[VALID], batch_size=32),
TEST: torch.utils.data.DataLoader(image_datasets[TEST], batch_size=32)}
print("Number of training images: {}".format(len(dataloaders[TRAIN].dataset)))
print("Number of validation images: {}".format(len(dataloaders[VALID].dataset)))
print("Number of test images: {}".format(len(dataloaders[TEST].dataset)))
# -
# ### Label mapping
#
# You'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.
# +
# Load Label Mapping
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# -
# # Building and training the classifier
#
# Now that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.
#
# We're going to leave this part up to you. If you want to talk through it with someone, chat with your fellow students! You can also ask questions on the forums or join the instructors in office hours.
#
# Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:
#
# * Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)
# * Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout
# * Train the classifier layers using backpropagation using the pre-trained network to get the features
# * Track the loss and accuracy on the validation set to determine the best hyperparameters
#
# We've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!
#
# When training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.
# +
# Create class that creates our network and model
class Network():
def __init__(self, learning_rate=0.0001, dropout_rate=0.2, input_size=25088, hidden_sizes=[12544], output_size=102, model_state_dict=None, optimizer_state_dict=None, epochs=None, class_to_idx=None):
# Model Hyperparameters
self.learning_rate = learning_rate
self.dropout_rate = dropout_rate
self.input_size = input_size
self.hidden_sizes = hidden_sizes
self.output_size = output_size
self.epochs = epochs
self.class_to_idx = class_to_idx
# Build model using transfer learning based off of VGG16
self.model = models.vgg16(pretrained=True)
# Freeze VGG16 parameters so we don't backprop through them
for param in self.model.parameters():
param.requires_grad = False
self.model.classifier = self.create_classifier(dropout_rate, input_size, hidden_sizes, output_size)
self.criterion = nn.NLLLoss()
self.optimizer = optim.Adam(self.model.classifier.parameters(), lr=learning_rate)
if model_state_dict:
self.model_state_dict = model_state_dict
self.model.load_state_dict(model_state_dict)
if optimizer_state_dict:
self.optimizer_state_dict = optimizer_state_dict
self.optimizer.load_state_dict(optimizer_state_dict)
def create_classifier(self, dropout_rate, input_size, hidden_sizes, output_size):
layers = OrderedDict([
('fcstart', nn.Linear(input_size, hidden_sizes[0])),
('relustart', nn.ReLU()),
('dropoutstart', nn.Dropout(dropout_rate)),
])
for i in range(len(hidden_sizes)-1):
layers['fc{}'.format(i)] = nn.Linear(hidden_sizes[i], hidden_sizes[i+1])
layers['relu{}'.format(i)] = nn.ReLU()
layers['dropout{}'.format(i)] = nn.Dropout(dropout_rate)
layers['output'] = nn.Linear(hidden_sizes[-1], output_size)
layers['logsoftmax'] = nn.LogSoftmax(dim=1)
classifier = nn.Sequential(layers)
return classifier
# +
# Create a new network from our class
# Define hyperparameters for our network
learning_rate=0.0001
dropout_rate=0.2
input_size=25088
hidden_sizes=[12544]
output_size=102
nw = Network(learning_rate=learning_rate, dropout_rate=dropout_rate, input_size=input_size, hidden_sizes=hidden_sizes, output_size=output_size)
nw.model
# +
# Define Function to test loss & accuracy on training/validation sets
def validate_test_loader(model, testloader, criterion):
# Setup Cuda
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
# Set model to eval mode so it does not use dropout & other training features
model.eval()
test_loss = 0
accuracy = 0
with torch.no_grad():
for test_images, test_labels in testloader:
test_images, test_labels = test_images.to(device), test_labels.to(device)
test_outputs = model(test_images)
test_loss += criterion(test_outputs, test_labels).item()
probabilities = torch.exp(test_outputs)
equality = (test_labels.data == probabilities.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
# Set model back to train mode
model.train()
return test_loss, accuracy
# +
# Define a function to train the network
def train_network(model, epochs, criterion, optimizer):
# Setup Cuda
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
# Set model to train mode so it uses dropout & other features
model.train()
steps = 0
print_every = 32
for e in range(epochs):
running_loss = 0
for images, labels in dataloaders[TRAIN]:
images, labels = images.to(device), labels.to(device)
steps += 1
# Clear the gradients because gradients are accumulated
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
testloader = dataloaders[VALID]
test_loss, accuracy = validate_test_loader(model, testloader, criterion)
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
running_loss = 0
# Make sure training is back on
model.train()
# +
# Train the network
num_epochs = 6
train_network(nw.model, num_epochs, nw.criterion, nw.optimizer)
nw.epochs = num_epochs
nw.class_to_idx = image_datasets[TRAIN].class_to_idx
# -
# ## Testing your network
#
# It's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.
# +
# Define a function to test a model with the given loader and criterion
def test_network(model, testloader, criterion):
print("Validating Test Set...")
model.eval()
test_loss, accuracy = validate_test_loader(model, testloader, criterion)
print("Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
# +
# Test the network
test_network(nw.model, dataloaders[TEST], nw.criterion)
# -
# ## Save the checkpoint
#
# Now that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.
#
# ```model.class_to_idx = image_datasets['train'].class_to_idx```
#
# Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.
# +
# Write a function that saves a checkpoint
# NOTE: We are not saving the optimizer because the file is too large. This needs to be saved, however if you want to
# train this network further after loading it from a checkpoint. To save the optimizer, use the following:
#
# 'optimizer_state_dict': network.optimizer.state_dict()
checkpoint_filepath = 'checkpoint.pth'
def save_checkpoint(network, checkpoint_filepath):
checkpoint = {
'learning_rate': network.learning_rate,
'dropout_rate': network.dropout_rate,
'input_size': network.input_size,
'hidden_sizes': network.hidden_sizes,
'output_size': network.output_size,
'epochs': network.epochs,
'class_to_idx': network.class_to_idx,
'model_state_dict': network.model.state_dict()
}
torch.save(checkpoint, checkpoint_filepath)
save_checkpoint(nw, checkpoint_filepath)
# -
# ## Loading the checkpoint
#
# At this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.
# +
# Write a function that loads a checkpoint and rebuilds the model
# NOTE: To load the optimizer, use the following.
#
# optimizer_state_dict=checkpoint['optimizer_state_dict']
checkpoint_filepath = 'checkpoint.pth'
def load_checkpoint(checkpoint_filepath):
checkpoint = torch.load(checkpoint_filepath)
network = Network(
learning_rate=checkpoint['learning_rate'],
dropout_rate=checkpoint['dropout_rate'],
input_size=checkpoint['input_size'],
hidden_sizes=checkpoint['hidden_sizes'],
output_size=checkpoint['output_size'],
epochs=checkpoint['epochs'],
class_to_idx=checkpoint['class_to_idx'],
model_state_dict=checkpoint['model_state_dict']
)
return network
network_loaded = load_checkpoint(checkpoint_filepath)
# +
# Test the newly loaded network. It should perform exactly the same (given the same dataset) as the test run before
# the network was saved.
test_network(network_loaded.model, dataloaders[TEST], network_loaded.criterion)
# -
# # Inference for classification
#
# Now you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like
#
# ```python
# probs, classes = predict(image_path, model)
# print(probs)
# print(classes)
# > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
# > ['70', '3', '45', '62', '55']
# ```
#
# First you'll need to handle processing the input image such that it can be used in your network.
#
# ## Image Preprocessing
#
# You'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training.
#
# First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.
#
# Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.
#
# As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation.
#
# And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.
# +
# Given a path to a file, preprocess that image in preparation for making a prediction.
def process_image(image):
normal_means = [0.485, 0.456, 0.406]
normal_std_dev = [0.229, 0.224, 0.225]
im_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(normal_means, normal_std_dev)
])
# Open image
im = Image.open(image)
# Transform it: creates pytorch tensor
im_transformed_tensor = im_transforms(im)
# Return np array
np_image = np.array(im_transformed_tensor)
return np_image
# -
# To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).
def imshow(image, ax=None, title=None):
if ax is None:
fig, ax = plt.subplots()
if title:
ax.set_title(title)
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
# +
# Image to use for testing
# image = 'flowers/test/1/image_06743.jpg'
# image = 'flowers/test/1/image_06752.jpg'
# image = 'flowers/test/1/image_06754.jpg'
image = 'flowers/test/1/image_06760.jpg'
# image = 'flowers/test/1/image_06764.jpg'
# +
# Test out image processing
np_image = process_image(image)
imshow(np_image, title="Pretty Flower")
# -
# ## Class Prediction
#
# Once you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.
#
# To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.
#
# Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.
#
# ```python
# probs, classes = predict(image_path, model)
# print(probs)
# print(classes)
# > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
# > ['70', '3', '45', '62', '55']
# ```
def predict(image_path, network, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
model = network.model
# Setup Cuda
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
# Make sure model is in eval mode
model.eval()
# Process image into numpy image, then convert to torch tensor
np_image = process_image(image_path)
torch_image = torch.from_numpy(np_image)
torch_image = torch_image.to(device)
with torch.no_grad():
output = model(torch_image.unsqueeze_(0))
probabilities = torch.exp(output)
kprobs, kindex = probabilities.topk(topk)
kprobs_list = kprobs[0].cpu().numpy().tolist()
kindex_list = kindex[0].cpu().numpy().tolist()
# For every kindex value, look up the class and return it instead of the index
idx_to_class = {v: k for k, v in network.class_to_idx.items()}
class_list = [idx_to_class[idx] for idx in kindex_list]
return kprobs_list, class_list
# +
# Test out prediction function
probs, classes = predict(image, network_loaded, 5)
print(probs)
print(classes)
# -
# ## Sanity Checking
#
# Now that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:
#
# <img src='assets/inference_example.png' width=300px>
#
# You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.
# +
# Test bar chart
def display_chart(probs, classes, ax=None):
if ax is None:
fig, ax = plt.subplots()
probs = probs[::-1]
classes = classes[::-1]
class_names = [cat_to_name[class_val] for class_val in classes]
y_pos = np.arange(len(classes))
ax.barh(y_pos, probs, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(class_names)
ax.set_xlabel('Probability')
probs, classes = predict(image, network_loaded, 5)
display_chart(probs, classes)
# +
# Create function to display an image along with the top 5 classes
def prediction_display(network, image, topk=5):
fig, axes = plt.subplots(2,1)
np_image = process_image(image)
probs, classes = predict(image, network, topk)
img_title = cat_to_name[classes[0]]
imshow(np_image, ax=axes[0], title=img_title)
display_chart(probs, classes, ax=axes[1])
prediction_display(network_loaded, image)
| Image Classifier Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: fengine
# language: python
# name: fengine
# ---
# ## Discretisation plus Encoding
#
# What shall we do with the variable after discretisation? should we use the buckets as a numerical variable? or should we use the intervals as categorical variable?
#
# The answer is, you can do either.
#
# If you are building decision tree based algorithms and the output of the discretisation are integers (each integer referring to a bin), then you can use those directly, as decision trees will pick up non-linear relationships between the discretised variable and the target.
#
# If you are building linear models instead, the bins may not necessarily hold a linear relationship with the target. In this case, it may help improve model performance to treat the bins as categories and to one hot encoding, or target guided encodings like mean encoding, weight of evidence, or target guided ordinal encoding.
#
# We can easily do so by combining feature-engine's discretisers and encoders.
#
# ## In this demo
#
# We will perform equal frequency discretisation followed by target guided orginal encoding using the titanic dataset
#
# If instead you would like to do weight of evidence or mean target encoding, you need only replace the Feature-engine's encoder.
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from feature_engine.discretisation import EqualFrequencyDiscretiser
from feature_engine.encoding import OrdinalEncoder
# +
# load the the Titanic Dataset
data = pd.read_csv('../titanic.csv',
usecols=['age', 'fare', 'survived'])
data.head()
# +
# Let's separate into train and test set
X_train, X_test, y_train, y_test = train_test_split(
data[['age', 'fare']],
data['survived'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# -
# The variables Age and Fare contain missing data, that I will fill by extracting a random sample of the variable.
def impute_na(data, variable):
df = data.copy()
# random sampling
df[variable + '_random'] = df[variable]
# extract the random sample to fill the na
random_sample = X_train[variable].dropna().sample(
df[variable].isnull().sum(), random_state=0)
# pandas needs to have the same index in order to merge datasets
random_sample.index = df[df[variable].isnull()].index
df.loc[df[variable].isnull(), variable + '_random'] = random_sample
return df[variable + '_random']
# +
# replace NA in both train and test sets
X_train['age'] = impute_na(data, 'age')
X_test['age'] = impute_na(data, 'age')
X_train['fare'] = impute_na(data, 'fare')
X_test['fare'] = impute_na(data, 'fare')
# +
# let's explore the distribution of age
X_train[['age', 'fare']].hist(bins=30, figsize=(8,4))
plt.show()
# -
# ## Equal frequency discretisation with Feature-Engine
# +
# set up the equal frequency discretiser
# to encode variables we need them returned as objects for feature-engine
disc = EqualFrequencyDiscretiser(
q=10, variables=['age', 'fare'], return_object=True)
# find the intervals
disc.fit(X_train)
# transform train and text
train_t = disc.transform(X_train)
test_t = disc.transform(X_test)
# -
train_t.dtypes
train_t.head()
# +
# let's explore if the bins have a linear relationship
# with the target:
pd.concat([train_t, y_train], axis=1).groupby('age')['survived'].mean().plot()
plt.ylabel('mean of survived')
# -
pd.concat([train_t, y_train], axis=1).groupby('fare')['survived'].mean().plot()
plt.ylabel('mean of survived')
# None of the variables show a monotonic relationship between the intervals of the discrete variable and the mean of survival. We can encode the intervals to return a monotonic relationship:
#
# # Ordinal encoding with Feature-Engine
# +
enc = OrdinalEncoder(encoding_method = 'ordered')
enc.fit(train_t, y_train)
train_t = enc.transform(train_t)
test_t = enc.transform(test_t)
# +
# in the map, we map bin to position
enc.encoder_dict_
# -
pd.concat([train_t, y_train], axis=1).groupby('age')['survived'].mean().plot()
plt.ylabel('mean of survived')
pd.concat([train_t, y_train], axis=1).groupby('fare')['survived'].mean().plot()
plt.ylabel('mean of survived')
# Now we obtained a monotonic relationship between variables and target.
# **That is all for this demonstration. I hope you enjoyed the notebook, and see you in the next one.**
| Section-08-Discretisation/08.04-Discretisation-plus-Encoding.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# +
using CSV
using DataFrames
using PyPlot
using ScikitLearn # machine learning package
using StatsBase
using Random
using LaTeXStrings # for L"$x$" to work instead of needing to do "\$x\$"
using Printf
using PyCall
sns = pyimport("seaborn")
# (optional)change settings for all plots at once, e.g. font size
rcParams = PyPlot.PyDict(PyPlot.matplotlib."rcParams")
rcParams["font.size"] = 16
# (optional) change the style. see styles here: https://matplotlib.org/3.1.1/gallery/style_sheets/style_sheets_reference.html
PyPlot.matplotlib.style.use("seaborn-white")
# -
# ## classifying breast tumors as malignant or benign
#
# source: [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic))
#
# > Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image.
#
# The mean radius and smoothness of the cell nuclei (the two features) and the outcome (M = malignant, B = benign) of the tumor are in the `breast_cancer_data.csv`.
df = CSV.read("breast_cancer_data.csv")
df[!, :class] = map(row -> row == "B" ? 0 : 1, df[:, :outcome])
first(df, 5)
# ## visualize the two classes distributed in feature space
# Where SVM just computes a dividing plane, logistic regression calculates a probability that each point is in a partaicular class
# +
markers = Dict("M" => "x", "B" => "o")
fig, ax = subplots(figsize=(8, 8))
ax.set_xlabel("mean radius")
ax.set_ylabel("mean smoothness")
ax.set_facecolor("#efefef")
for df_c in groupby(df, :outcome)
outcome = df_c[1, :outcome]
ax.scatter(df_c[:, :mean_radius], df_c[:, :mean_smoothness], label="$outcome",
marker=markers[outcome], alpha=0.5)
end
legend()
axis("equal")
sns.despine()
# -
# ## get data ready for classifiation in scikitlearn
#
# scikitlearn takes as input:
# * a feature matrix `X`, which must be `n_samples` by `n_features`
# * a target vector `y`, which must be `n_samples` long (of course)
# +
n_tumors = nrow(df)
X = zeros(n_tumors, 2)
y = zeros(n_tumors)
for (i, tumor) in enumerate(eachrow(df))
X[i, 1] = tumor[:mean_radius]
X[i, 2] = tumor[:mean_smoothness]
y[i] = tumor[:class]
end
X # look at y too!
# -
# ## logistic regression
#
# let $\mathbf{x} \in \mathbb{R}^2$ be the feature vector describing a tumor. let $T$ be the random variable that denotes whether the tumor is benign (0) or malignant (1). the logistic model is a probabilistic model for the probability that a tumor is malignant given its feature vector:
#
# \begin{equation}
# \log \frac{Pr(T=1 | \mathbf{x})}{1-Pr(T=1 | \mathbf{x})} = \beta_0 + \boldsymbol \beta^\intercal \mathbf{x}
# \end{equation}
# where $\beta_0$ is the intercept and $\boldsymbol \beta \in \mathbb{R}$ are the weights for the features.
#
# we will use scikitlearn to learn the $\beta_0$ and $\boldsymbol \beta$ that maximize the likelihood.
@sk_import linear_model : LogisticRegression
# $$\vec{\nabla}_{\vec{B}}\ell = \vec{0}$$
# +
# default LR in sklearn has an L1 regularization, so we have to set penalty to none to fit this model
# solver minimizes grad_b l = 0
lr = LogisticRegression(penalty="none", solver="newton-cg")
lr.fit(X, y)
println("β = ", lr.coef_)
println("β₀ = ", lr.intercept_)
# -
# prediction of the probability that a new tumor is 0 (benign) or 1 (malignant)
# x = [20.0 5.0]
x = [15.0 2.5]
lr.predict(x) # should be malignant for x 0
lr.predict_proba(x) # [Pr(y=0|x) PR(y-1|x)]
# ## visualize the learned model $Pr(T=1|\mathbf{x})$
# +
radius = 5:0.25:30
smoothness = 0.0:0.25:20.0
lr_prediction = zeros(length(smoothness), length(radius))
for i = 1:length(radius)
for j = 1:length(smoothness)
# consider this feature vector
x = [radius[i] smoothness[j]]
# use logistic regression to predict P(y=1|x)
lr_prediction[j, i] = lr.predict_proba(x)[2] # second elem bc want y=1
end
end
# -
fig, ax = subplots(figsize=(8, 8))
ax.set_xlabel("mean radius")
ax.set_ylabel("mean smoothness")
asdf = ax.pcolor(radius, smoothness, lr_prediction, cmap="viridis", vmin=0.0, vmax=1.0)
colorbar(asdf, label="Pr(y=1|x)")
sns.despine()
# TODO: add the data points in the above plot
# ## making decisions: the ROC curve
#
# this depends on the cost of a false positive versus false negative. (here, "positive" is defined as testing positive for "malignant")
#
# > "I equally value minimizing (1) false positives and (2) false negatives."
#
# $\implies$ choose $Pr(T=1|\mathbf{x})=0.5$ as the decision boundary.
#
# > "I'd rather predict that a benign tumor is malignant (false positive) than predict that a malignant tumor is benign (false negative)."
#
# $\implies$ choose $Pr(T=1|\mathbf{x})=0.2$ as the decision boundary. Even if there is a relatively small chance that the tumor is malignant, we still take action and classify it as malignant...
#
# the receiver operator characteristic (ROC) curve is a way we can evaluate a classification algorithm without imposing our values and specifying where the decision boundary should be.
@sk_import metrics : roc_curve
@sk_import metrics : auc
# DIY
# +
prob_pred = lr.predict_log_proba(X)[:, 2] # PR(Y = 1 | x)
p_star = 0.2 # choose some threshold
y_pred = prob_pred .> p_star
nb_positive_examples = sum(y)
FP = sum((y_pred .== 1) & (y .== 0)) # lmao this is broken
# calculate TPR, FPR, and sweep through p*
# -
# Using sklearn
#
# # NOTE: Something is fucky here. P stars should be on 0, 1
fpr, tpr, p_stars = roc_curve(y, prob_pred)
# +
figure()
title("ROC Curve")
xlabel("FPR")
ylabel("TPR")
plot([0, 1], [0, 1], c="k", label="Pr(Y=1|x)=uniform(0, 1)")
plot(fpr, tpr, c="darkorange", label="LR Model")
scatter(fpr, tpr, c=p_stars, cmap="viridis")#, vmin=0.0, vmax=1.0)
colorbar(label="threshold")
legend()
println("AUC = ", auc(fpr, tpr))
# -
# tradeoff:
# * threshold too small: classify all of the tumors as malignant, false positive rate very high
# * threshold too large: classify all of the tumors as benign, false negative rate very high
#
# somewhere in the middle (but still depending on the cost of a false positive versus false negative) is where we should operate.
#
# the `auc`, area under the curve, has a probabilistic interpretation:
# > the area under the curve (often referred to as simply the AUC) is equal to the probability that a classifier will rank a randomly chosen positive instance higher than a randomly chosen negative one (assuming 'positive' ranks higher than 'negative') -[Wikipedia](https://en.wikipedia.org/wiki/Receiver_operating_characteristic)
# **warning**: always split your data into test or train or do cross-validation to assess model performance. we trained on all data here to see the mechanics of fitting a logistic regression model to data, visualizing the model, and creating an ROC curve.
| In-Class Notes/Logistic Regression/logistic regression_sparse.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
from pymongo import MongoClient
import urllib.parse
import os
import sys
import json
from json import dumps
# From within the docker-compose application, use "mongodb_container", for local development use "localhost"
dbServer = os.getenv("MONGO_DB_SERVER", "mongodb_container:27017")
dbUser = os.getenv("MONGO_USERNAME", "root")
dbPW = os.getenv("MONGO_PASSWORD", "<PASSWORD>")
client = MongoClient(dbServer, username=dbUser, password=<PASSWORD>)
print("Mongo DB Connection -----")
print("server:" + dbServer)
print("user:" + dbUser)
collection = client.images.images
# +
def imagelistpage(page_id: int):
pagesize = 50
skipoffset = (page_id - 1) * pagesize
data = []
for image in (
collection.find(
{},
{
"datasetprovider": 1,
"filenameHash": 1,
"datasetname": 1,
"imageFilename": 1,
"timestamp": 1,
"velocity_lon": 1,
"yolov5": 1,
"_id": 0,
},
)
.skip(skipoffset)
.limit(pagesize)
):
data.append(image)
returnString = data
return returnString
imagelistpage(1)
# +
def find_velocity(min_velocity):
data = []
for image in (
collection.find(
{"velocity_lon": {"$gt": min_velocity}},
{
"datasetprovider": 1,
"filenameHash": 1,
"imageFilename": 1,
"timestamp": 1,
"velocity_lon": 1,
"_id": 0,
},
)
):
data.append(image)
returnString = data
return returnString
find_velocity(12.0)
# +
def find_yolo(class_label):
data = []
for image in (
collection.find(
{"yolov5": {"$elemMatch": { "name": class_label}}},
{
"datasetname" : 1,
"filenameHash" : 1,
"yolov5": 1,
"_id": 0,
},
)
):
data.append(image)
returnString = data
return returnString
find_yolo('skateboard')
# +
def find_complex():
data = []
for image in (
collection.find(
{"datasetname": {"$in": ['2011_09_26_drive_0002_sync','2011_09_28_drive_0047_sync']}, "yolov5.name": { "$in": ['skateboard','car']} },
{
"datasetname" : 1,
"filenameHash" : 1,
"yolov5": 1,
"_id": 0,
},
).limit(5)
):
data.append(image)
returnString = data
return returnString
find_complex()
# +
def distinct_datasetsnames():
data = []
result=collection.distinct("datasetname"),
data.append(result)
returnString = data
return returnString
distinct_datasetsnames()
# +
def distinct_clip_datasetsnames():
data = []
result=collection.distinct("datasetname", {"clip":{"$exists": "true", "$not": {"$size": 0}}}),
data.append(result)
returnString = data
return returnString
distinct_clip_datasetsnames()
# +
def distinct_yolo_classes():
data = []
result=collection.distinct("yolov5.name"),
data.append(result)
returnString = data
return returnString
distinct_yolo_classes()
# +
import matplotlib.pyplot as plt
def show_image(filenameHash):
url = 'http://localhost:8000/imagethumbnail/'+filenameHash
f = urllib.request.urlopen(url)
a=plt.imread(f, format='JPG')
plt.imshow(a)
show_image('73b71cfcd213b9f75055dd9e7d327803')
# +
def delete_dataset(datasetname):
result=collection.delete_many({"datasetname": datasetname}),
return result
x=delete_dataset('2011_09_28_drive_0047_sync')
# +
#var collection = "images"; var field = "datasetname";
#db[collection].distinct(field).forEach(function(value){print(field + ", " + value + ": " + db.images.count({[field]: value}))})
| datamining/notebooks/mongoDB_client.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/adityakalra581/Data-Science/blob/master/Deep%20Learning/Hand-Written-Digit-Recognizer/MNIST.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="HZ3FxiNnviP0" colab_type="code" colab={}
import tensorflow as tf
import tensorflow.keras as keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
# + id="mwZnMJt9xM6w" colab_type="code" outputId="53c69fe5-fce6-43f3-f0e3-21b076b7adb2" colab={"base_uri": "https://localhost:8080/", "height": 34}
tf.__version__
# + id="1CnL36oGxM23" colab_type="code" colab={}
mnist = tf.keras.datasets.mnist
# + id="TwcK_o33xM1M" colab_type="code" colab={}
(train_x,train_y),(test_x,test_y)=mnist.load_data()
# + id="y9g0qnUR0KEZ" colab_type="code" outputId="1b63dc7c-b0ca-4c10-9254-771e17b0354e" colab={"base_uri": "https://localhost:8080/", "height": 34}
type(test_y)
# + id="CUXUpC_ZxMyM" colab_type="code" outputId="27626d80-719d-42d8-b843-7ca139dd08e9" colab={"base_uri": "https://localhost:8080/", "height": 264}
plt.imshow(train_x[2])
plt.show()
# + id="JgRds9BYxMto" colab_type="code" outputId="64f18a69-2112-4e49-8161-1c30723d5fde" colab={"base_uri": "https://localhost:8080/", "height": 87}
print("Shape of train_x: ",train_x.shape)
print("Shape of train_y: ",train_y.shape)
print("Shape of test_x: ",test_x.shape)
print("Shape of test_y: ",test_y.shape)
# + id="ttkU9RFKxMrj" colab_type="code" outputId="ed19090b-559d-45ca-8ca3-96a8b79af9a7" colab={"base_uri": "https://localhost:8080/", "height": 264}
## Conversion into grayscale:
plt.imshow(train_x[2],cmap= plt.cm.binary)
plt.show()
# + id="SPiRiFI4xMpF" colab_type="code" outputId="411042fe-f714-47cc-94e5-ba5b10caa643" colab={"base_uri": "https://localhost:8080/", "height": 1000}
## Clearly the data in csv is in pixels.
train_x[0]
# + id="y0IXD9hFxMlp" colab_type="code" outputId="2ef232ed-6301-4e5a-bdfc-c79049b3f257" colab={"base_uri": "https://localhost:8080/", "height": 283}
## Let's see the structure of the data:
train_x[0:2]
# + id="tGtcqa6KxMR9" colab_type="code" colab={}
## Normalize the Datset:
## x_train = tf.keras.utils.normalize(x_train, axis=1)
train_x = tf.keras.utils.normalize(train_x,axis=1)
test_x = tf.keras.utils.normalize(test_x, axis=1)
# + id="18aapcbzw6F7" colab_type="code" colab={}
#train_x[0]
# + id="7Zc6PuOiw9ta" colab_type="code" colab={}
# Let's build a model:
## Reference: https://www.tensorflow.org/api_docs/python/tf/keras/Sequential
model = tf.keras.models.Sequential()
# + [markdown] id="B9Z-GMhIyqQg" colab_type="text"
# Now, we'll pop in layers. Recall our neural network image? Was the input layer flat, or was it multi-dimensional? It was flat. So, we need to take this 28x28 image, and make it a flat 1x784. There are many ways for us to do this, but keras has a Flatten layer built just for us, so we'll use that.
# + id="uoTsnFhuxw40" colab_type="code" colab={}
## Let's flatten the it
model.add(tf.keras.layers.Flatten())
# + id="gDopiNRJxw2h" colab_type="code" colab={}
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
# + [markdown] id="tN2E0Rsz1r2s" colab_type="text"
# ReLU stands for rectified linear unit, and is a type of activation function. Mathematically, it is defined as y = max(0, x). ... ReLU is the most commonly used activation function in neural networks, especially in CNNs. If you are unsure what activation function to use in your network, ReLU is usually a good first choice.
# + [markdown] id="22hlBDKL1tcf" colab_type="text"
# Softmax is an activation function. ... Softmax is exponential and enlarges differences - push one result closer to 1 while another closer to 0. It turns scores aka logits into probabilities. Cross entropy (cost function) is often computed for output of softmax and true labels (encoded in one hot encoding).
# + id="jh29mh_Dxwze" colab_type="code" colab={}
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# + id="nbjcWHXfxww3" colab_type="code" outputId="0552ed8b-1d22-49ca-a0e2-6f218a4ab1b8" colab={"base_uri": "https://localhost:8080/", "height": 158}
model.fit(train_x,train_y, epochs=3)
# + id="FlRHsNGDxwuL" colab_type="code" outputId="b5f2e894-06b2-48cd-b87d-ff8a7a0a40d3" colab={"base_uri": "https://localhost:8080/", "height": 70}
val_loss, val_acc = model.evaluate(test_x, test_y)
print(val_loss)
print(val_acc)
# + id="3EfoFEdsxwrn" colab_type="code" colab={}
predictions = model.predict(test_x)
# + id="oh-z5ARyxwpQ" colab_type="code" outputId="921a6936-ee64-414c-8cb8-03ae21da1324" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(np.argmax(predictions[66]))
# + id="wO19Thmqxwmi" colab_type="code" outputId="a68f0fd9-dd38-423b-998d-fcd92e88e0d1" colab={"base_uri": "https://localhost:8080/", "height": 264}
plt.imshow(test_x[66])
plt.show()
# + id="v1iU1-P5xweq" colab_type="code" colab={}
# + [markdown] id="Wxd1C1n10lRT" colab_type="text"
# Importing Kaggle test data as dimensions may not be similar
# + id="HkMZdagF5p54" colab_type="code" colab={}
test=pd.read_csv(r'test.csv')
# + id="gMtTgd1m6KzG" colab_type="code" outputId="21fc32ec-3b11-452a-ce62-af7feca1c478" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(test.shape)
# + id="5Bz78qjD6WMw" colab_type="code" colab={}
test = test / 255.0
# + id="mylBgaIq7S2h" colab_type="code" colab={}
test = test.values.reshape(28000,28,28,1)
# + id="_kZYZzX87df3" colab_type="code" colab={}
# predict = model.predict(test)
# ## Need to convert into pandas series for kaggle submission:
# # select the indix with the maximum probability
# predict = np.argmax(predict,axis = 1)
# predict = pd.Series(predict,name="Label")
# + id="6EYUTp4c6c3t" colab_type="code" outputId="37da9c6b-9168-4fa8-8268-f7b5aca3ec55" colab={"base_uri": "https://localhost:8080/", "height": 52}
print(type(predict))
print(predict.shape)
# + id="89O22CQr7jyQ" colab_type="code" colab={}
# submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),predict],axis = 1)
# submission.to_csv("mnist_tensorflow.csv",index=False)
# + id="TOA9tSN0Kt3i" colab_type="code" colab={}
# + [markdown] id="gRlpBE20_zMS" colab_type="text"
# Score achieved on first attempt: 0.97028
# Rank: 1729
# + id="xK-46ltbKve5" colab_type="code" colab={}
## Need to configure the dataset:
import tensorflow as tf
mnist = tf.keras.datasets.mnist
(train_x, train_y), (test_x, test_y) = mnist.load_data()
train_x=train_x.reshape(60000, 28, 28, 1)
train_x = train_x/255.0
test_x = test_x.reshape(10000, 28, 28, 1)
test_x = test_x/255.0
# + id="1GWApxRCLLSM" colab_type="code" colab={}
## Let's set Kaggle test Data:
# + id="U1hIR9TE534H" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 194} outputId="4434364c-95bb-4cb6-ef17-01590af51b7b"
## Let's try building a model using CNN for better accuracy.
cnn_mod = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64,(3,3),activation = 'relu',
input_shape= (28,28,1)),
tf.keras.layers.MaxPooling2D(2,2),
(tf.keras.layers.Flatten()),
(tf.keras.layers.Dense(128, activation=tf.nn.relu)),
(tf.keras.layers.Dense(128, activation=tf.nn.relu)),
(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
])
cnn_mod.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
cnn_mod.fit(train_x,train_y, epochs=3)
val_loss, val_acc = cnn_mod.evaluate(test_x, test_y)
print(val_loss)
print(val_acc)
# + id="rJUYViwu53xU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="74db0243-b1f3-4e1f-90f7-96c12e00bb60"
## Let's apply the model on Kaggle test data:
predict = cnn_mod.predict(test)
## Need to convert into pandas series for kaggle submission:
# select the indix with the maximum probability
predict = np.argmax(predict,axis = 1)
predict = pd.Series(predict,name="Label")
print(type(predict))
print(predict.shape)
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),predict],axis = 1)
submission.to_csv("mnist_tensorflow2.csv",index=False)
# + [markdown] id="-im8HK7RP9-N" colab_type="text"
# - The Submission Score is 0.99242
#
# - Rank is 642.
# + id="0cx1fI3Y5gER" colab_type="code" colab={}
# + id="5I5TuoM25f_7" colab_type="code" colab={}
# + id="AlbnNm4u5f9L" colab_type="code" colab={}
# + id="EdtgY0oA5f6f" colab_type="code" colab={}
# + id="UTzZNgYG5f4R" colab_type="code" colab={}
# + id="dU1iFUZ45f11" colab_type="code" colab={}
# + id="AZUsf2oz5fmM" colab_type="code" colab={}
# + id="i3_STbtk0tjK" colab_type="code" colab={}
# + id="irbXHxh506k_" colab_type="code" colab={}
# + id="X8VAA6RR08Py" colab_type="code" colab={}
### Let's convert it into a numpy array>>>
# + id="bo_-cYOL2VNM" colab_type="code" colab={}
# + id="SLXBvPpE2Xb1" colab_type="code" colab={}
## Reshaping it
# + id="SszLkkGX2l13" colab_type="code" colab={}
## Now directly just fit the model:
# + id="6qcNGMeG28QP" colab_type="code" colab={}
# + id="uQfhsxoH2-na" colab_type="code" colab={}
# + id="Nku_ZFSW3ZOd" colab_type="code" colab={}
# + [markdown] id="B-uJN1UZ4h8x" colab_type="text"
# 5166 clearly
# needed 28000.
# + id="EoV_5TSP3byF" colab_type="code" colab={}
# + id="RmvKXukn3pKi" colab_type="code" colab={}
| Multiclass Image Classifiers/Hand-Written-Digit-Recognizer/MNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Machine-Learning-Tokyo/MLT-x-fastai/blob/master/mini-lessons/text/ymr_classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="sTD7xCqqdZNX" colab_type="code" colab={}
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# + id="bl8mMgPvdZN4" colab_type="code" colab={}
from fastai import *
from fastai.text import *
# + [markdown] id="AJVerh6JdZOR" colab_type="text"
# ## Language Modeling
#
# One of the most common tasks done in NLP is called language modeling. A language model is an NLP model which learns to predict the next word in a sentence. We do this is because we assume that if a language model is quite accurate at guessing the next probable word in a sentnce, it needs a lot of world knowledge and a deep understanding of grammar, semantics, and other elements of natural language.
#
# We will show how to train a simple language model.
#
# + id="Sy9Js-0pdZOY" colab_type="code" colab={}
#install mecab dependencies
# #!sudo apt install swig
# #!sudo apt install mecab
# #!sudo apt install libmecab-dev
# #!sudo apt install mecab-ipadic-utf8
# #!sudo pip3 install mecab-python3
# + id="NHOyR3XudZOp" colab_type="code" colab={}
import re
import MeCab
tagger = MeCab.Tagger("-Owakati")
class MeCabTokenizer(BaseTokenizer):
def __init__(self, lang:str):
self.lang = 'ja'
def add_special_cases(self, toks:Collection[str]): pass
def tokenizer(self,raw_sentence):
result = tagger.parse(raw_sentence)
words = result.split()
if len(words) == 0:
return []
if words[-1] == "\n":
words = words[:-1]
return words
# + [markdown] id="ODK8iBwhdZO6" colab_type="text"
# ## Tokenization and Numericalization
#
# The most common preprocessing on NLP tasks in tokenization i.e splitting the sentence into words. This is much easier in space-seperated words like English however, for Japanese we require Morphological Analysis tools to get words from sentences.
#
# Numericalizing in the second preprocessing step. Since models can only take numbers as inputs, we make a dictionary mapping unique words to indices and replace the words with the words in the sentence with their corresponding index. Here we limit our dictionary size to 60000 words that appear at least twice in our corpus.
#
# + id="kAZ60tXYdZPA" colab_type="code" colab={}
tokenizer = Tokenizer(MeCabTokenizer, 'ja')
processor = [TokenizeProcessor(tokenizer=tokenizer), NumericalizeProcessor(max_vocab=60000,min_freq=2)]
# + id="ecCbXBp_dZPO" colab_type="code" colab={}
#get data
# #!wget https://github.com/dennybritz/sentiment-analysis/raw/master/data/yahoo-movie-reviews.json.tar.gz
# #!mkdir data
# #!tar xvzf yahoo-movie-reviews.json.tar.gz
# + id="x6Ypx_74dZPo" colab_type="code" colab={}
path = Path("data")
# + id="T2QKVGA8dZP7" colab_type="code" colab={} outputId="fdaa69dc-20d8-454b-abc8-de3affaf1298"
def load_ymr_data(path):
with path.open() as f:
data = pd.read_json(f)
data.movieName = data.movieName.str.strip()
data.text = data.text.str.strip()
data.title = data.title.str.strip()
data = data[data.text.str.len() > 0]
data.url = data.url.str.strip()
return data
def make_polar(data, balance=True):
data_polar = data.loc[data.rating != 3].copy()
data_polar.loc[data_polar.rating <= 2, 'rating'] = 0
data_polar.loc[data_polar.rating >= 4, 'rating'] = 1
if balance:
# Subsample - We want the same number of positive and negative examples
grouped_ratings = data_polar.groupby('rating')
K = grouped_ratings.rating.count().min()
indices = itertools.chain(
*[np.random.choice(v, K, replace=False) for k, v in grouped_ratings.groups.items()])
data_polar = data_polar.reindex(indices).copy()
return data_polar
mov_df = load_ymr_data(path/'yahoo-movie-reviews.json')
mov_df_polar = make_polar(mov_df)
mov_df_polar.head()
# + id="YNOCF0z7dZQZ" colab_type="code" colab={} outputId="6bdaeab4-d653-4797-968b-e231a83b8539"
sample_df = mov_df_polar.sample(n=1000,replace=True)
sample_df.head()
# + id="waG4nI5IdZQ-" colab_type="code" colab={} outputId="61b9edfc-5c58-4bea-b438-de89ccd1c9cd"
len(sample_df[sample_df["rating"]==0])
# + id="Qu8OzmcedZRP" colab_type="code" colab={} outputId="480b7ebb-92ee-4207-d77e-7c453d5cb5be"
len(sample_df)
# + id="zma83DJEdZRi" colab_type="code" colab={}
data_lm = (TextList.from_df(sample_df,path,cols='text',processor=processor)
.split_by_rand_pct()
.label_for_lm()
.databunch())
# + id="lGM7-_rGdZRv" colab_type="code" colab={} outputId="5397f03b-003e-4c39-8e7a-0ac52367ecaa"
data_lm.show_batch()
# + id="80OtQMLudZR-" colab_type="code" colab={}
config = awd_lstm_lm_config.copy()
config["n_hid"] = 1150
# + id="IjPM2Wbsdiok" colab_type="code" colab={}
#mounting your drive
#from google.colab import drive
#drive.mount('/content/gdrive')
# + [markdown] id="uhg9NiyNdmnR" colab_type="text"
# You can download the pretrained model file from this [link](https://drive.google.com/open?id=1KRUEV_3R-JVhcftvWJ66rwU7e_EZWtxE). Make sure to put them in your `path/models/` folder.
# + id="_KbVfyAFdZSD" colab_type="code" colab={}
learn = language_model_learner(data_lm,arch=AWD_LSTM,config=config,
drop_mult=0.3,pretrained_fnames=['ja-wiki','ja-wiki-itos'])
# + id="J7zEpXf3dZST" colab_type="code" colab={} outputId="936810e5-672f-4f88-9b66-1bd6c5b5b676"
learn.lr_find()
learn.recorder.plot(skip_end=15)
# + id="8MKiFMdadZSl" colab_type="code" colab={} outputId="9073fcb6-1543-413d-b8ea-7c981e70339d"
learn.fit_one_cycle(1, 1e-2, moms=(0.8,0.7))
# + id="QBOiCkhodZS2" colab_type="code" colab={} outputId="1f684ad0-4e9c-4ce2-cf46-e04759f7c7c5"
learn.unfreeze()
learn.fit_one_cycle(10, 1e-3, moms=(0.8,0.7))
# + id="jxW6YRrJdZTF" colab_type="code" colab={}
learn.save_encoder("encoder")
# + id="Yev0gE6MdZTS" colab_type="code" colab={} outputId="01c07889-7cd7-4aae-b0d6-ebe03ccb1f51"
learn.predict("この映画を見て思ったのは", 70)
# + [markdown] id="rhPikwIIdZTh" colab_type="text"
# ## Text Classification
#
# One particular area that was challenging until recently with deep learning for NLP, was text classification.
#
# Similar to classifying images in text we can also use transfer learning to train accurate classifiers with few training examples. We will leverage weights from a language model trained on a large corpus as our pretrained weights. We will fine-tune the language model to our target dataset, attach a classification layer to our model and train by gradual unfreezing. The text classifying task will be sentiment analysis on Yahoo Movie Reviews.
#
# + id="_ynPAn-vdZTk" colab_type="code" colab={}
data_clas = (TextList.from_df(sample_df,path,cols="text",vocab=data_lm.vocab,processor=processor)
.split_by_rand_pct()
.label_from_df("rating")
.databunch())
# + id="kyGHodHDdZT2" colab_type="code" colab={} outputId="2eb59c06-7598-4cd0-8353-df48e2fd1484"
data_clas.show_batch()
# + id="yhKXV6_ZdZUC" colab_type="code" colab={}
config = awd_lstm_clas_config.copy()
config["n_hid"] = 1150
# + id="RPhBPzAzdZUH" colab_type="code" colab={}
learn = text_classifier_learner(data_clas, AWD_LSTM,config=config,drop_mult=0.5)
learn.load_encoder("encoder")
# + id="Xjqoq74IdZUT" colab_type="code" colab={} outputId="b2b71238-0352-4df3-efc6-e0069a8e4cd1"
learn.lr_find()
learn.recorder.plot()
# + id="LT-k6784dZUl" colab_type="code" colab={} outputId="5f83e457-e0fc-4696-e3c8-376b1187331d"
learn.fit_one_cycle(1, 2e-2, moms=(0.8,0.7))
# + id="jv4rXOPsdZU0" colab_type="code" colab={} outputId="e56f0aa6-db83-4c8e-8daa-4b2bbac447b6"
learn.freeze_to(-2)
learn.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2), moms=(0.8,0.7))
# + id="M5iQzvfddZU8" colab_type="code" colab={} outputId="f7673648-7009-43e9-b832-f8ed99f35c72"
learn.freeze_to(-3)
learn.fit_one_cycle(1, slice(5e-3/(2.6**4),5e-3), moms=(0.8,0.7))
# + id="dGKypKa-dZVL" colab_type="code" colab={} outputId="381af1da-538b-4255-dd17-c4f306e227aa"
learn.unfreeze()
learn.fit_one_cycle(2, slice(1e-3/(2.6**4),1e-3), moms=(0.8,0.7))
# + id="G-y0zq2JdZVV" colab_type="code" colab={} outputId="274d91e9-8e33-471b-c4cc-c38559587832"
learn.predict("演技が悪かった")
# + id="9NVwgIhjdZVb" colab_type="code" colab={}
| mini-lessons/text/ymr_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
data = pd.read_excel("statewise_sugarcane.xlsx")
data
data['MADHYAPRADESH']= data['MADHYAPRADESH'].fillna(data['MADHYAPRADESH'].mean())
data
from scipy.stats import kruskal
stat, p = kruskal(data["ANDHRAPRADESH"],data["GUJARAT"],data["KARNATAKA"],data["MADHYAPRADESH"],data["MAHARASTRA"],data["ORISSA"],data["TAMILNADU"])
print('Statistics=%.3f, p=%.3f' % (stat, p))
alpha = 0.05
if p > alpha:
print('Same distributions (fail to reject H0)')
else:
print('Different distributions (reject H0)')
| Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Unitary Learning with qgrad
#
# In this example, we shall try to learn an abitrary $8 \times 8$
# unitary matrix $U$, via gradient descent. We shall start with a
# random parametrized unitary matrix $U(\vec{\theta}, \vec{\phi}, \vec{\omega})$.
#
# Parametrized unitaries in `qgrad` are available via `Unitary` class
# with $\vec{\theta}, \vec{\phi}, \vec{\omega}$ as parameter
# vectors with $\vec{\theta}, \vec{\phi}$ being $\frac{(N) (N-1)}{2}$
# dimensional and $\vec{\omega}$ being $N$-dimensional.
#
#
# Here the input dataset consists of $8 \times 1$ random
# kets, call them
# $| \psi_{i} \rangle$ and output dataset is the action of the
# target unitary $U$ on these kets, $U |\psi_{i} \rangle$. The
# maximum value of $i$ is $80$, meaning that we merely use 80
# data points (kets in this case) to efficiently learn the
# target unitary, $U$.
#
#
# This tutorial is different from the
# [Qubit Rotation](https://github.com/qgrad/qgrad/blob/master/examples/QubitRotation.py),
# in that it learns the unitary matrix to not only take a fixed _specific_
# state to another _fixed_ state. Here the unitary
# $U( \vec{\theta}, \vec{\phi}, \vec{\omega})$ is learnt to evolve _any_ same
# dimensional ket as the target unitary, $U$ would evolve it.
#
#
# **Note**: Another version of this tutorial is implemented
# without `qgrad` that uses the parametrization used in
# [<NAME> and <NAME>, 2020](https://arxiv.org/pdf/1901.03431.pdf)
# and reproduces part of the results of that paper. This tutorial
# shows similar results, only with different unitary paramterization
# $U(\vec{\theta}, \vec{\phi}, \vec{\omega})$ since the
# parametrization used in the original paper
# uses hamiltonians
# in the powers of exponents, whose autodifferentiation is
# not currently supported in JAX. For further reading
# on this autodifferentiation incompatibility and unitary learning,
# please refer to companion blogs
# [here](https://araza6.github.io/posts/hamiltonian-differentiation/)
# and [here](https://araza6.github.io/posts/unitary-learning/)
#
# +
import jax.numpy as jnp
from jax import grad
from jax.experimental import optimizers
from jax.random import PRNGKey, uniform
import numpy as onp
#Visualization
import matplotlib.pyplot as plt
from qgrad.qgrad_qutip import fidelity, Unitary
from qutip import rand_ket # only to make the dataset
from scipy.stats import unitary_group
# +
def make_dataset(m, d):
"""Prepares a dataset of input and output
kets to be used for training.
Args:
m (int): Number of data points, 80% of
which would be used for training
d (int): Dimension of a (square) unitary
matrix to be approximated
Returns:
tuple: tuple of lists containing (JAX Device
Arrays of) input and output kets
respectively
"""
ket_input = []
ket_output = []
for i in range(m):
ket_input.append(jnp.array(rand_ket(d, seed=300).full()))
#Output data -- action of unitary on a ket states
ket_output.append(jnp.dot(tar_unitr, ket_input[i]))
return (ket_input, ket_output)
m = 100 # number of training data points
N = 8 # Dimension of the unitary to be learnt
train_len = int(m * 0.8)
# tar_unitr gives a different unitary each time
tar_unitr = jnp.asarray(unitary_group.rvs(N))
ket_input, ket_output = make_dataset(m, N)
# -
# ## Cost Function
#
# We use the same cost function as the authors
# [<NAME> and <NAME>, 2020](https://arxiv.org/pdf/1901.03431.pdf)
# define
#
# \begin{equation}
# E = 1 - (\frac{1}{M})\sum_{i} \langle \psi_{i}|U^{\dagger} U(\vec{\theta}, \vec{\phi}, \vec{\omega})|\psi_{i}\rangle
# \end{equation}
#
# where $|\psi_{i}\rangle$ is the training (or testing)
# data points -- in this case, kets, $U$ and
# $U(\vec{\theta}, \vec{\phi}, \vec{\omega})$ are the target and
# parameterized unitaries respectively and $M$ is the total
# number of training data points, which in our example is
# $80$
#
def cost(params, inputs, outputs):
r"""Calculates the cost on the whole
training dataset.
Args:
params (obj:`jnp.ndarray`): parameter vectors
:math:`\vec{\theta}, \vec{\phi},
\vec{\omega}`
inputs (obj:`jnp.ndarray`): input kets
:math:`|\psi_{i} \rangle`in the dataset
outputs (obj:`jnp.ndarray`): output kets
:math:`U(\vec{\theta}, \vec{\phi},
\vec{\omega})|ket_{input} \rangle`
in the dataset
Returns:
float: cost (evaluated on the entire dataset)
of parametrizing :math:`U(\vec{\theta},
\vec{\phi}, \vec{\omega})` with `params`
"""
loss = 0.0
thetas, phis, omegas = params
unitary = Unitary(N)(thetas, phis, omegas)
for k in range(train_len):
pred = jnp.dot(unitary, inputs[k])
loss += jnp.absolute(jnp.real(jnp.dot(outputs[k].conjugate().T, pred)))
loss = 1 - (1 / train_len) * loss
return loss[0][0]
# ## Performance Metric -- Fidelity
#
# While cost is a valid metric to judge the learnability.
# We introduce another commonly used metric, the _average_
# fidelity between the predicted and the output (label)
# states, as another metric to track during training. Average
# fidelity over the dataset over a particular set of
# parameters is defined as:
#
# \begin{equation}
# F_{avg} = \frac{1}{M}\sum_{i}| \langle \psi_{in} | \psi_{pred} \rangle |^2
# \end{equation}
#
# where $\psi_{label}$
# represents the resulting (or the output)
# ket evolved under the target unitary,
# $U$ as $U|\psi_{i}\rangle$ and
# $\psi_{pred}$ represents the ket $\psi_{i}$
# evolved under
# $U(\vec{\theta}, \vec{\phi}, \vec{\omega})$
# as $U(\vec{\theta}, \vec{\phi}, \vec{\omega})|\psi_{i}\rangle$.
def test_score(params, inputs, outputs):
"""Calculates the average fidelity between the
predicted and output kets for given parameters
(averaged over the whole training set).
Args:
params (obj:`jnp.ndarray`): parameter vectors
:math:`\vec{\theta}, \vec{\phi}, \vec{\omega}`
inputs (obj:`jnp.ndarray`): input kets
:math:`|\psi_{l} \rangle`in the dataset
outputs (obj:`jnp.ndarray`): output kets
:math:`U(\vec{t}, \vec{\tau})|ket_{input} \rangle`
in the dataset
Returns:
float: fidelity between :math:`U(\vec{\theta},
\vec{\phi}, \vec{\omega})|ket_{input} \rangle`
and the output (label) kets for given `params`
"""
fidel = 0
thetas, phis, omegas = params
unitary = Unitary(N)(thetas, phis, omegas)
for i in range(train_len):
pred = jnp.dot(unitary, inputs[i])
step_fidel = fidelity(pred, outputs[i])
fidel += step_fidel
return (fidel / train_len)[0][0]
# +
# Fixed PRNGKeys to pick the same starting params
params = uniform(PRNGKey(0), (N**2, ),
minval=0.0, maxval=2 * jnp.pi)
thetas = params[:N * (N-1) // 2]
phis = params[N * (N - 1) // 2 : N * (N - 1)]
omegas = params[N * (N - 1):]
params = [thetas, phis, omegas]
opt_init, opt_update, get_params = optimizers.adam(step_size=1e-1)
opt_state = opt_init(params)
def step(i, opt_state, opt_update):
params = get_params(opt_state)
g = grad(cost)(params, ket_input, ket_output)
return opt_update(i, g, opt_state)
epochs = 40
loss_hist = []
params_hist = []
fidel_hist = []
for i in range(epochs):
opt_state = step(i, opt_state, opt_update)
params = get_params(opt_state)
params_hist.append(params)
loss = cost(params, ket_input, ket_output)
loss_hist.append(loss)
avg_fidel = test_score(params, ket_input, ket_output)
fidel_hist.append(avg_fidel)
progress = [i+1, loss, avg_fidel]
if (i % 10 == 9):
print("Epoch: {:2f} | Loss: {:3f} | Fidelity: {:3f}".
format(*jnp.asarray(progress)))
# -
# ## Analyzing Learning Routine
#
# We see that we _efficiently_ (with $ \sim 99 \%$
# fidelity) reconstruct the target unitary $U$ starting
# from a random initial guess. We merely use $80$
# examples for training, and in $40$ crude gradient
# steps, we almost perfectly approximate the target
# unitary, $U$. Below is a plot of how fidelity increases
# and loss gets down to zero in the training schedule.
#
# In the graph below, $M$ represents
# the total size of the train set, $\psi_{label}$
# represents the resulting (or the output)
# ket evolved under the target unitary,
# $U$ as $U |\psi_{i} \rangle$ and
# $\psi_{pred}$ represents the ket $\psi_{i}$
# evolved under
# $U(\vec{\theta}, \vec{\phi}, \vec{\omega})$
# as $U(\vec{\theta}, \vec{\phi}, \vec{\omega})|\psi_{i}\rangle$.
#
# Each marker on the graph is the fidelity between
# the predicted and the target/label kets averaged over the
# whole train set and the cost on whole training set
# respectively.
# +
plt.figure(figsize=(9, 6))
plt.plot(fidel_hist, marker = 'o',
label=r"$F_{avg} = \frac{1}{M}\sum_{i}| \langle \psi_{label} | \psi_{pred} \rangle |^2$")
plt.plot(loss_hist, marker = 'x',
label=r'''$L = 1 - (\frac{1}{M})\sum_{i}\langle \psi_{i} | U ^{\dagger} U( \vec{\theta}, \vec{\phi}, \vec{\omega)} | \psi_{i} \rangle$''')
plt.title("Fidelity and Cost Trends", fontweight = "bold")
plt.legend(["Fidelity","Loss"])
plt.xlabel("epoch")
plt.legend(loc=0, prop = {'size': 15})
# -
# ## Testing on unseen kets
#
# We reserved the last $20$ (which is $20 \%$ of the total dataset)
# kets for testing.
# Now we shall apply our learned unitary matrix, call it
# $U_{opt}(\vec{\theta}, \vec{\phi}, \vec{\omega})$
# to the unseen kets and measure the fidelity of the evolved ket
# under $U_{opt}(\vec{\theta}, \vec{\phi}, \vec{\omega})$
# with those that evolved under the target unitary, $U$.
theta_opt, phi_opt, omega_opt = params_hist[-1]
opt_unitary = Unitary(N)(theta_opt, phi_opt, omega_opt)
fidel = []
for i in range(train_len, m): # unseen data
pred = jnp.dot(opt_unitary, ket_input[i])
fidel.append(fidelity(pred, ket_output[i])[0][0])
fidel
# ## Conclusion
#
# We see that the testing fidelity is
# $\sim 98 \%$, as opposed to training
# fidelity $\sim 99 \%$. One would expect
# this since drop as the unitary now
# acts on unseen data. We, however, note
# that we generalize well with
# $\sim 98 \%$ accuracy, if you will.
#
# This learnt unitary
# $U_{opt}(\vec{\theta}, \vec{\phi}, \vec{\omega})$
# can now be used to emulate the original
# target unitary, $U$, for more general
# datasets as well.
# ## References
# 1. Lloyd, Seth, and <NAME>. "Efficient implementation of unitary transformations." arXiv preprint arXiv:1901.03431 (2019).
| examples/notebooks/Unitary-Learning-qgrad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from matplotlib import pyplot as mp
import numpy as np
# import sys
# # !{sys.executable} -m pip install -U ovito
# %matplotlib inline
import numpy as np
import pandas as pd
import sys
import numpy as np
import pandas as pd
import sys
import os
# # !{sys.executable} -m pip install git+https://github.com/rajkubp020/fictive.git
# %matplotlib inline
import fictive
from fictive import *
from sklearn import linear_model
from sklearn import linear_model
import sys
import os
def peak(x, c):
return np.exp(-np.power(x - c, 2) / 16.0)
def lin_interp(x, y, i, half):
return x[i] + (x[i+1] - x[i]) * ((half - y[i]) / (y[i+1] - y[i]))
def half_max_x(x, y):
half = max(y)/2.0
signs = np.sign(np.add(y, -half))
# print(signs[1:-1])
zero_crossings = (signs[0:-2] != signs[1:-1])
# print(zero_crossings)
zero_crossings_i = np.where(zero_crossings)[0]
return [lin_interp(x, y, zero_crossings_i[0], half),
lin_interp(x, y, zero_crossings_i[1], half)]
# # make some fake data
# x=np.linspace(0,9,201)
# y=np.sin(x)
file='SQ75B.csv'
sq=pd.read_csv(file,header=None,skiprows = 2)
sq.columns = ['a','b']
sq=sq.sort_values(by='a')
x=sq.a.to_numpy()
y=sq.b.to_numpy()
# find the two crossing points
hmx = half_max_x(x,y)
# print the answer
fwhm = hmx[1] - hmx[0]
x11=x[np.where(np.logical_and(x>=0, x<=hmx[1]))]
y11=y[np.where(np.logical_and(x>=0, x<=hmx[1]))]
fsdp=float(x11[y11==max(y11)][0])
fp=[fsdp ,max(y11)]
print("FWHM:{:.3f}".format(fwhm),"FSDP:{:.3f}".format(fsdp))
half = max(y)/2.0
plt.plot(x,y,'-b', label='Pristine ')
plt.plot(hmx, [half, half],'s-r', lw=3.5)
txt=str("FSDP: {:.2f}".format(fsdp))+ '\n'+str("FWHM: {:.2f}".format(fwhm))
plt.xlabel('Wave number (Å$^-$$^1$)')
plt.ylabel('Structure factor (a.u.)')
plt.text(half*2, .3,txt , fontsize=20)
plt.xlim([0, max(x)/3])
legend_on()
plt.legend(loc=3)
print('./plot/'+str(file.split('.')[0])+'_fsdp.png')
plt.savefig('./'+'_fsdp.png',dpi=200, bbox_inches='tight')
# +
# fig, ax = plt.subplots()
fig, [ax] = panel(1,1)
legend_on(ax=ax)
# pd.read_csv('pdf_intial_62b.csv', header=None)[11].plot()
pdf=pd.read_csv('pdf_intial_62b.csv', header=None)#[[0,10]][200:]
# pdf[0]=pdf[0]-2
pdf1=pdf.rolling(window=50).mean()
# plt.plot(pdf1[0],pdf1[11])
# pdf[0],pdf[10]
pdf1=pdf1.dropna()[200:]
x=pdf1[0]-2.25
y=pdf1[11]
# plt.plot(pdf1[0],pdf1[11].rolling(window=20).mean())
# x,y=x.rolling(window=20).mean(),y.rolling(window=20).mean()
# plt.plot(x.rolling(window=20).mean(),y.rolling(window=20).mean())
from IPython.display import display, Math, Latex
# display(Math(r'F(k) = \int_{-\infty}^{\infty} f(x) e^{2\pi i k} dx'))
plt.plot(x.rolling(window=20).mean(),y.rolling(window=20).mean(),'k',label=r'$g(r)$')
plt.xlabel('Distance (r)')
plt.ylabel('Pair distribution function g(r)')
# plt.ylabel(r'$\int_0^y du/(1+u^{2})$')
# plt.ylabel()
# ax.text(0.18, 0.18, Math(r'F(k) = \int_{-\infty}^{\infty} f(x) e^{2\pi i k} dx'), color="C0", fontsize=16)
pdf1[0]
limit=267
plt.fill_between(x.rolling(window=20).mean()[:limit],y.rolling(window=20).mean()[:limit],facecolor='b', alpha=0.7,label=r'CN=$\int_0^{ r_{cut}} {{4\pi\rho}r^{2}g(r)dr}$')
plt.legend(loc = "upper right",labelcolor='linecolor')
# plt.plot(x,y)
# legend_on()
squarefig(8)
y
plt.savefig('_Fig1_gr_CN.png',dpi=300,bbox_inches='tight')
# -
| example/master_fsdp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# ### 1. What is the most popular programming language in 2019 ?
# **Business understanding** : There are three fundamental questions that arise when someone wants to learn a programming language.
# The first question is about what programming language is the most popular at the moment? This is necessary to know before you jump to learn programming languege to fit your expectation about getting job. For who want to get job after studying programming language before that you need to acknowledge the supply on the market.
#
#
# Let's take a look the necessary libraries we will need to wrangle our data.
# We need pandas, numpy, itertools, and matplotlib
import pandas as pd
import numpy as np
import itertools
import matplotlib.pyplot as plt
# %matplotlib inline
df_2019 = pd.read_csv("./input/2019 Survey Result.csv")
df_2019['Year'] = '2019'
# **Data understanding** : Lets take a look statistic of the data we used.
df_2019.head()
df_2019.describe()
# Raw DataFrame above contains 88,883 respondents which average respondents age is 30 y.o
# We only need information of the language that respondent worked with
# so column Respondent and Language is enough for the analysis
df_2019_lang = df_2019[['Respondent','LanguageWorkedWith']]
df_2019_lang.head()
# As shown above the LanguageWorkedWith columns has grouping Language item, every language is separated by semicolon.
# We need to split the grouping item into one per row to be able to analyze it. With use of itertools library we can make functions to split it.
# +
def splitter(values):
'''
Description : split grouping values using semicolon (;) separator
INPUT:
values - each row in a dataframe
OUTPUT:
list - list that containing items (language) that have been split
'''
return list(itertools.product( *[str(v).split(';') for v in values]))
def expand(df):
'''
INPUT:
df - a dataframe that contains grouping values inside a column/columns that needs to be splitted
OUTPUT:
a fresh rebuild dataframe
'''
tuples=list()
for i,row in df.iterrows():
tuples.extend(splitter(row))
return pd.DataFrame.from_records(tuples,columns=df.columns)
# -
# Now we can use expand function and take a look at the results
df_2019_lang_expand = expand(df_2019_lang)
df_2019_lang_expand.head()
df_2019_lang_expand.describe()
# **Data understanding** : From table above we can quickly know that **JavaScript** is the top Language used by programmer, however let's see further about that
# **Data Preparation** : we clean the data by removing the missing values, I choose to dropped missing values rather than fill it because the data we need here is the actual/real number not prediction. Also, I change the column name to represent what actuallly the value is. Now that the grouping language already splitted, we can calculate how much each language appears in the dataset. Use percentage to understand easier
# +
most_used_lang = pd.DataFrame(df_2019_lang_expand['LanguageWorkedWith'].value_counts().reset_index())
most_used_lang.rename(columns={'index':'LanguageWorkedWith','LanguageWorkedWith':'count'}, inplace=True)
most_used_lang = most_used_lang.sort_values('count', ascending=True)
# Besides null values can cause an error for 'Percentage' calculation,
# we only take into account respondent that answered the question about language to get actual condition
total_respondent = len(df_2019[df_2019['LanguageWorkedWith'].notnull()]) # total respondent that answer language question
most_used_lang['perc'] = (most_used_lang['count']/total_respondent)*100
# +
# Create plot function to visualize the result in bar chart
def plot_barh(df,x_col,y_col):
'''
INPUT :
df - clean dataframe, df that has numbers that are ready to be plotted
x_col - object type
y_cl - integer or float type
OUTPUT :
bar chart
'''
x_pos = [i for i, _ in enumerate(df[x_col])]
plt.figure(figsize=(10,10))
plt.barh(x_pos,df[y_col])
plt.yticks(x_pos, df[x_col])
plt.title('Most Popular Language 2019', fontsize=20)
plt.xlabel('Percentage of Respondents (%)', fontsize=12)
plt.show()
# -
plot_barh(most_used_lang,'LanguageWorkedWith','perc')
# **Result Evaluation** : As result above, Javascript is the most popular language nowadays. Followed by HTML/CSS and SQL. More than 60% respondent have worked with JavaScript.
# We may also want to know, who use Javascript or what kind of programmer is using JavaScript.
# Let's take a look at below simple analysis
# ### Users of Javascript
# +
# Now we want to know which developer are using JavaScript
df_2019_lang_user = df_2019[['Respondent','LanguageWorkedWith','DevType']]
# for this analysis we need real condition not prediction so that all NULL values can be deleted
df_2019_lang_user = df_2019_lang_user[df_2019_lang_user['DevType'].notnull()]
df_2019_lang_user = expand(df_2019_lang_user)
# -
# Calculate total user per language
df_2019_lang_user = df_2019_lang_user.groupby(['LanguageWorkedWith','DevType']).size().reset_index().rename(columns={0:'count'}).sort_values('count', ascending=False)
# Show total user for javascript
df_2019_lang_user[df_2019_lang_user['LanguageWorkedWith']=='JavaScript'][:10]
# **Result Evaluation** : **JavaScript** is commonly used by **Full Stack Developer**, followed by Back End Developer.
# No wonder as the javascript is widely known for its capability to made interactive application of website
| MostPopularLanguage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# cd ../Localization_and_Detection/NoahSim/
import GRBgenerator
# Below is the fastcube class, simulations for burstcube that run really fast but still pretty accurate. What sorts of customizations should I include?
#
# Sample, samples n,
#
# Keep that test feature:
# maybe have a bruteforce vs. quicker version, all about fullsky sims now though.
#
# Also need to remember how I treated past horizon spots, and what the goal should be for those.
# +
#The following cell contains the "FastCube" class. This is the simulation I hope to use to be able to run quicker simulations.
import numpy as np
import healpy as hp
import burstutils as bf
import random as rand
import statistics as s
import time as time
class FastCube():
def __init__(self,background,dettilt,alternating=False):
if alternating == False:
self.tilt = np.deg2rad(dettilt)
self.tiltA = self.tiltB = self.tiltC = self.tiltD = self.tilt
else:
self.tiltB = (float(input("Please enter the second tilt (deg) ")))
self.tiltB = np.deg2rad(self.tiltB)
self.tiltC = self.tiltA = np.deg2rad(dettilt)
self.tiltD = self.tiltB
self.zenith = [0 , 0]
self.bg = background
@property
def detA(self):
"""BurstCube is composed of 4 separate scintillators to detect and localize events.
In this software package, they are labelled A through D.
"""
return [ self.zenith[0] + self.tiltA , self.zenith[1] ]
@property
def detB(self):
"""BurstCube is composed of 4 separate scintillators to detect and localize events.
In this software package, they are labelled A through D.
"""
return [ self.zenith[0] + self.tiltB , self.zenith[1] + np.pi/2 ]
@property
def detC(self):
"""BurstCube is composed of 4 separate scintillators to detect and localize events.
In this software package, they are labelled A through D.
"""
return [ self.zenith[0] + self.tiltC , self.zenith[1] + np.pi ]
@property
def detD(self):
"""BurstCube is composed of 4 separate scintillators to detect and localize events.
In this software package, they are labelled A through D.
"""
return [ self.zenith[0] + self.tiltD , self.zenith[1] + 3*np.pi/2 ]
@property
def normA(self):
return hp.ang2vec(self.detA[0],self.detA[1])
@property
def normB(self):
return hp.ang2vec(self.detB[0],self.detB[1])
@property
def normC(self):
return hp.ang2vec(self.detC[0],self.detC[1])
@property
def normD(self):
return hp.ang2vec(self.detD[0],self.detD[1])
@property
def dets(self):
return [self.normA,self.normB,self.normC,self.normD]
def response2GRB(self, GRB, test=True): #is this how I inherit?
start = time.time()
#first need to include the GRB.
"""
Using least squares regression, respond2GRB will determine the sky position of an array of GRB sources assuming some inherent background noise within
detectors, along with fluctuations of either Gaussian or Poissonian nature.
Parameters
----------
GRB : object
An instance of the separately defined "GRBs" class that contains a number of evenly spaced sky positions of a given strength.
test : boolean
For sanity purposes, if the simulation seems to give unrealistic results, switching to test mode allows for much quicker sampling, allowing it easier to spot potential errors.
Returns
----------
localizationerrors : array
numpy array that contains the average localization uncertainty at each sky position.
Additionally, response2GRB will print the sky position it is currently sampling, along with the average offset of localizations at that spot.
"""
if test:
sample = 1
samples = 50 #times per sky pos
bottheta = 0
toptheta = 90
botphi = 0
topphi = 360
botA = 0
topA = 1000
ntheta = 10 #over sky chi points
nphi = 37
nA = 100
else:
sample = len(GRB.sourceangs)
samples = 30 #times per sky pos
bottheta = 0
toptheta = 90
botphi = 0
topphi = 360
botA = 400
topA = 1000
ntheta = 31 #over sky chi points
nphi = 120
nA = 12
self.localizationerrors = []
self.X_all = []
self.y_all = []
for i in range(sample):
sourceAng = GRB.sourceangs[i]
# print("Testing " + str(np.rad2deg(sourceAng)))
#this check passes.
# print("Testing at " + str(np.rad2deg(GRB.sourceangs)))
sourcexyz = hp.ang2vec(sourceAng[0],sourceAng[1]) #cartesian position of the burst
loop = 0 #I'm going to want to sample each sky position more than once,
#here's where I define how many times that is
locunc = []
while loop<samples:
sepA=bf.angle(sourcexyz,self.normA)
# print("separation from A is " + str(np.rad2deg(sepA)))
#this check passes.
if sepA < np.pi/2: # meaning if >90, would not be facing detector.
dtheoryA=GRB.Ao*bf.response(bf.angle(sourcexyz,self.normA)) #still need to define strength, brb and gonna do that
else: #like I was saying, has to face it!
dtheoryA = 0
# print("dtheory test: " + str(dtheory))
# this check passes too.
countsA = dtheoryA + self.bg #another artifact, incl this background effect somewhere
unccountsA = np.sqrt(countsA)
detactualA = rand.gauss(countsA,unccountsA) #there is a lot of noise, present, updating it now.
if detactualA-self.bg < 0:
detactualA = self.bg
detcountsA = detactualA
sepB=bf.angle(sourcexyz,self.normB)
# print("separation from B is " + str(np.rad2deg(sepB)))
#this check passes.
if sepB < np.pi/2: # meaning if >90, would not be facing detector.
dtheoryB=GRB.Ao*bf.response(bf.angle(sourcexyz,self.normB)) #still need to define strength, brb and gonna do that
else: #like I was saying, has to face it!
dtheoryB = 0
# print("dtheory test: " + str(dtheory))
# this check passes too.
countsB = dtheoryB + self.bg #another artifact, incl this background effect somewhere
unccountsB = np.sqrt(countsB)
detactualB = rand.gauss(countsB,unccountsB) #there is a lot of noise, present, updating it now.
if detactualB-self.bg < 0:
detactualB = self.bg
detcountsB = detactualB
sepC=bf.angle(sourcexyz,self.normC)
# print("separation from C is " + str(np.rad2deg(sepC)))
#this check passes.
if sepC < np.pi/2: # meaning if >90, would not be facing detector.
dtheoryC=GRB.Ao*bf.response(bf.angle(sourcexyz,self.normC)) #still need to define strength, brb and gonna do that
else: #like I was saying, has to face it!
dtheoryC = 0
# print("dtheory test: " + str(dtheory))
# this check passes too.
countsC = dtheoryC + self.bg #another artifact, incl this background effect somewhere
unccountsC = np.sqrt(countsC)
detactualC = rand.gauss(countsC,unccountsC) #there is a lot of noise, present, updating it now.
if detactualC-self.bg < 0:
detactualC = self.bg
detcountsC = detactualC
sepD=bf.angle(sourcexyz,self.normD)
# print("separation from D is " + str(np.rad2deg(sepD)))
#this check passes.
if sepD < np.pi/2: # meaning if >90, would not be facing detector.
dtheoryD=GRB.Ao*bf.response(bf.angle(sourcexyz,self.normD)) #still need to define strength, brb and gonna do that
else: #like I was saying, has to face it!
dtheoryD = 0
# print("dtheory test: " + str(dtheory))
# this check passes too.
countsD = dtheoryD + self.bg #another artifact, incl this background effect somewhere
unccountsD = np.sqrt(countsD)
detactualD = rand.gauss(countsD,unccountsD) #there is a lot of noise, present, updating it now.
if detactualD-self.bg < 0:
detactualD = self.bg
detcountsD = detactualD
self.X_all.append([detcountsA,detcountsB,detcountsC,detcountsD])
#coarse to fine optimization
chiA = bf.quad_solver(detcountsA,self.normA,bottheta,toptheta,botphi,topphi,botA,topA,ntheta,nphi,nA,self.bg)
chiB = bf.quad_solver(detcountsB,self.normB,bottheta,toptheta,botphi,topphi,botA,topA,ntheta,nphi,nA,self.bg)
chiC = bf.quad_solver(detcountsC,self.normC,bottheta,toptheta,botphi,topphi,botA,topA,ntheta,nphi,nA,self.bg)
chiD = bf.quad_solver(detcountsD,self.normD,bottheta,toptheta,botphi,topphi,botA,topA,ntheta,nphi,nA,self.bg)
chisquared = np.add(np.add(chiA,chiB),np.add(chiC,chiD)) #adds it all up for total chi2
#print("Chi squareds: " +str(chisquared))
thetaloc, philoc, Aguess = bf.indexer(chisquared,bottheta,toptheta,botphi,topphi,botA,topA,ntheta,nphi,nA)
recvec = hp.ang2vec(np.deg2rad(thetaloc),np.deg2rad(philoc))
locoffset = np.rad2deg(bf.angle(sourcexyz,recvec))
# print("Loc offset = " + str(locoffset) + " deg")
self.y_all.append([thetaloc,philoc,Aguess])
locunc.append(locoffset)
loop +=1
#print("Avg loc offset = " + str(s.mean(locunc)) + " deg.")
self.localizationerrors.append(s.mean(locunc))
return self.localizationerrors
# -
GRBtest = GRBgenerator.Sky(8,500)
import time
fastguy = FastCube(1000,45,alternating=False)
# +
fastguy.response2GRB(GRBtest,test=False)
# -
len(fastguy.X_all)
from sklearn.neural_network import MLPClassifier
model = MLPClassifier()
# +
xall = np.array(fastguy.X_all)
yall = np.array(fastguy.y_all)
# -
model.fit(xall,yall)
yes = model.predict(xall)
yes
# Now the NN Part!
from keras.models import Sequential
from keras.layers import Dense
import tensorflow
# +
xall = np.array(fastguy.X_all)
yall = np.array(fastguy.y_all)
# +
x = np.array([1, 2, 3])
# row vector via reshape
np.shape(xall)
# -
x = x.reshape((1, 3))
np.shape(x)
xtrain = np.split(xall)
model = Sequential()
model.add(Dense(12, input_dim=4, activation='linear'))
model.add(Dense(102, activation='linear'))
model.add(Dense(3, activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
# Mean Absolute Error (MAE): MAE measures the average magnitude of the errors in a set of predictions, without considering their direction. It’s the average over the test sample of the absolute differences between prediction and actual observation where all individual differences have equal weight.
#
#
model.fit(X_train, y_train, epochs=50, batch_size=1000)
len(X_train)
model.predict(np.array([[1440.91398024, 1375.13145513, 1330.10174408, 1290.14606751]]))
X_train[0]
X_train
y_train
| Notebooks/NoahSim/Localization_fastCube.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import matplotlib as mpl
from tqdm.notebook import tqdm
rng = np.random.default_rng()
# Note -- place your own MNIST files in the appropriate directory
train_data = np.loadtxt("./data/mnist/mnist_train.csv", delimiter=',')
test_data = np.loadtxt("./data/mnist/mnist_test.csv", delimiter=',')
train_imgs = train_data[:, 1:] # (60000, 784)
test_imgs = test_data[:, 1:] # (10000, 784)
train_labels = train_data[:, 0] # (60000, )
test_labels = test_data[:, 0] # (10000, )
num_classes = 10
# Change the top k input values to 1, rest of the values to 0
def k_cap(input, cap_size):
output = np.zeros_like(input)
if len(input.shape) == 1:
idx = np.argsort(input)[-cap_size:]
output[idx] = 1
else:
idx = np.argsort(input, axis=-1)[:, -cap_size:]
np.put_along_axis(output, idx, 1, axis=-1)
return output
from scipy.signal import convolve
# k-cap on convolved input pixels
cap_size = 200
n_examples = 5000
examples = np.zeros((num_classes, n_examples, 784))
for i in range(num_classes):
examples[i] = k_cap(convolve(train_imgs[train_labels == i][:n_examples].reshape(-1, 28, 28), np.ones((1, 3, 3)), mode='same').reshape(-1, 28 * 28), cap_size)
# +
n_in = 784
n_neurons = cap_size * num_classes
n_out = n_neurons
sparsity = 0.1
n_rounds = 300
num_hidden = 1
beta = 1.0
# Random mask on input-learning area connections
mask_a = rng.random((n_in, n_neurons)) < sparsity
A = np.ones((n_in, n_neurons)) * mask_a
A /= A.sum(axis=0)
# Random mask on output-learning area connections
mask_b = rng.random((n_neurons, n_neurons)) < sparsity
B = np.ones((n_neurons, n_neurons)) * mask_b
B /= B.sum(axis=0)
# Random mask on intra-area recurrent connections
# Choose 10% of connections and not the diagnal
mask_w = (rng.random((num_hidden*2+1, n_neurons, n_neurons)) < sparsity) \
& np.repeat(np.logical_not(np.eye(n_neurons, dtype=bool))[np.newaxis, ...], num_hidden*2+1, axis=0)
W = np.ones((num_hidden*2+1, n_neurons, n_neurons)) * mask_w
W /= W.sum(axis=1, keepdims=True)
if num_hidden != 0:
# Random mask on inter-area forward connections
# Choose 10% of connections
mask_fw = (rng.random((num_hidden, n_neurons, n_neurons)) < sparsity)
C_fw = np.ones((num_hidden, n_neurons, n_neurons)) * mask_fw
C_fw /= C_fw.sum(axis=1, keepdims=True)
# Random mask on intra-area feedback connections
# Choose 10% of connections
mask_fb = (rng.random((num_hidden, n_neurons, n_neurons)) < sparsity)
C_fb = np.ones((num_hidden, n_neurons, n_neurons)) * mask_fb
C_fb /= C_fb.sum(axis=1, keepdims=True)
# +
# Model with reccurent, forward, and feedback connections
bias = np.zeros((num_hidden*2+1, n_neurons))
b = -1
activations = np.zeros((num_classes, n_rounds, num_hidden*2+1+1, n_neurons))
# Iterate over each class
for i in range(num_classes):
# Iterate over several examples
for t in range(n_rounds):
input = examples[i, t]
# Apply supervision: set assembly in the output area corresponding to class i to 1
activations[i, t, -1, i*cap_size:i*cap_size+cap_size] = 1
output = activations[i, t, -1]
if t == 0:
continue
if num_hidden != 0:
# calculate forward activations
activations[i, t, 0] = k_cap(input@A + activations[i, t-1, 0]@W[0] + bias[0], cap_size)
for l in range(1, num_hidden):
activations[i, t, l] = k_cap(
activations[i, t-1, l-1]@C_fw[l-1] + \
activations[i, t-1, l]@W[l] + \
bias[l], cap_size
)
# calculate middle learning area activations
activations[i, t, num_hidden] = k_cap(
activations[i, t-1, num_hidden-1]@C_fw[num_hidden-1] + \
activations[i, t-1, num_hidden]@W[num_hidden] + \
activations[i, t-1, num_hidden+1]@C_fb[0] + \
bias[num_hidden], cap_size
)
# calculate backward activations
for l in range(num_hidden+1, num_hidden*2):
activations[i, t, l] = k_cap(
activations[i, t-1, l]@W[l] + \
activations[i, t-1, l+1]@C_fb[l-num_hidden] + \
bias[l], cap_size
)
activations[i, t, -2] = k_cap(output@B + activations[i, t-1, -2]@W[-1] + bias[-1], cap_size)
# update weights
A[(input > 0)[:, np.newaxis] & (activations[i, t, 0] > 0)[np.newaxis, :]] *= 1 + beta
for l in range(num_hidden):
W[l][(activations[i, t-1, l] > 0)[:, np.newaxis] & (activations[i, t, l] > 0)[np.newaxis, :]] *= 1 + beta
C_fw[l][((activations[i, t-1, l] > 0)[:, np.newaxis] & (activations[i, t, l+1] > 0)[np.newaxis, :]) & (mask_fw[l] > 0)] *= 1 + beta
for l in range(num_hidden, num_hidden*2):
W[l][(activations[i, t-1, l] > 0)[:, np.newaxis] & (activations[i, t, l] > 0)[np.newaxis, :]] *= 1 + beta
C_fb[l-num_hidden][((activations[i, t-1, l+1] > 0)[:, np.newaxis] & (activations[i, t, l] > 0)[np.newaxis, :]) & (mask_fb[l-num_hidden] > 0)] *= 1 + beta
W[-1][(activations[i, t-1, -1] > 0)[:, np.newaxis] & (activations[i, t, -1] > 0)[np.newaxis, :]] *= 1 + beta
B[(output > 0)[:, np.newaxis] & (activations[i, t, -2] > 0)[np.newaxis, :]] *= 1 + beta
else:
activations[i, t, 0] = k_cap(
input@A + activations[i, t-1, 0]@W[0] + output@B + bias[0], cap_size
)
A[(input > 0)[:, np.newaxis] & (activations[i, t, 0] > 0)[np.newaxis, :]] *= 1 + beta
W[0][(activations[i, t-1, 0] > 0)[:, np.newaxis] & (activations[i, t, 0] > 0)[np.newaxis, :]] *= 1 + beta
B[(output > 0)[:, np.newaxis] & (activations[i, t, 0] > 0)[np.newaxis, :]] *= 1 + beta
A /= A.sum(axis=0, keepdims=True)
B /= B.sum(axis=0, keepdims=True)
W /= W.sum(axis=1, keepdims=True)
if num_hidden != 0:
C_fw /= C_fw.sum(axis=1, keepdims=True)
C_fb /= C_fb.sum(axis=1, keepdims=True)
bias[activations[i, t, :-1] > 0] += b
# inverse feedback weights and normalize
B = B.T / B.T.sum(axis=0, keepdims=True)
if num_hidden != 0:
C_fb = np.transpose(C_fb, (0, 2, 1)) / np.transpose(C_fb, (0, 2, 1)).sum(axis=1, keepdims=True)
C = np.concatenate([C_fw, C_fb], axis=0)
# -
if num_hidden != 0:
l = 0
fig, ax = plt.subplots(1, 2, figsize=(10, 5), sharey=True)
ax[0].imshow(C[l, :100, :100])
ax[1].imshow(C[l, :100, :100])
# +
num_learn_area = num_hidden * 2 + 1
n_rounds = num_learn_area + 2
act_test = np.zeros((num_classes, n_rounds+1, num_learn_area+1, n_examples, n_neurons))
for i in tqdm(np.arange(num_classes)):
input = examples[i]
# Run each example through the model n_round times
for t in range(n_rounds):
act_test[i, t+1, 0] = k_cap(input@A + act_test[i, t, 0]@W[0] + bias[0], cap_size)
for l in range(1, num_learn_area):
act_test[i, t+1, l] = k_cap(act_test[i, t, l-1]@C[l-1] + act_test[i, t, l]@W[l] + bias[l], cap_size)
act_test[i, t+1, -1] = k_cap(act_test[i, t, -2]@B, cap_size)
# -
# output area
outputs = act_test[:, :, -1, ...]
outputs.shape
# middle learning area
act_l = act_test[:, :, num_hidden, ...]
act_l.shape
# We know the assembly of the output area by construction
c = np.zeros((num_classes, n_neurons))
for i in range(num_classes):
c[i, i*cap_size: i*cap_size+cap_size] = 1
# +
# outputs (10, n_rounds+1, n_examples, n_neurons)
predictions = (outputs[:, -1] @ c.T).argmax(axis=-1)
acc = (predictions == np.arange(num_classes)[:, np.newaxis]).sum(axis=-1) / n_examples
acc, acc.mean()
# -
idx = np.arange(n_out)
idx
idx_l = np.full(n_neurons, -1, dtype=int)
act = activations[:, -1, num_hidden, :].copy()
for i in range(num_classes):
idx_l[i*cap_size:(i+1)*cap_size] = act[i].argsort()[-cap_size:][::-1]
act[:, idx_l[i*cap_size:(i+1)*cap_size]] = -1
fig, axes = plt.subplots(num_classes, n_rounds, figsize=(10, 2 * num_classes), sharex=True, sharey=True)
for ax, output in zip(axes, outputs):
for i in range(n_rounds):
ax[i].imshow((output[i+1] > 0)[:n_neurons, idx])
ax[i].set_axis_off()
fig.text(0.5, 0.04, 'Neurons in Learning Area', ha='center', va='center')
fig.text(0.04, 0.5, 'Samples', ha='center', va='center', rotation='vertical')
fig, ax = plt.subplots(figsize=(10, 4))
for i in range(num_classes):
# Pass each sample to the model and get its result
ax.bar(np.arange(n_neurons), act_l[i, -1].mean(axis=0)[idx_l], label=i)
ax.legend(loc='upper right', ncol=2)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_ylim([0, 1.1])
ax.set_xticklabels([])
ax.set_xlabel('Neurons in Middle Area')
ax.set_ylabel('Firing Probability')
fig, ax = plt.subplots(figsize=(10, 4))
for i in range(num_classes):
# Pass each sample to the model and get its result
ax.bar(np.arange(n_neurons), outputs[i, -1].mean(axis=0)[idx], label=i)
ax.legend(loc='upper right', ncol=2)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_ylim([0, 1.1])
ax.set_xticklabels([])
ax.set_xlabel('Neurons in Output Area')
ax.set_ylabel('Firing Probability')
def softmax(x):
return np.exp(x) / np.exp(x).sum(axis=-1, keepdims=True)
# +
batch_size = 10
v = 0.1 * rng.standard_normal((num_classes, n_neurons))
targets = np.zeros((batch_size*num_classes, num_classes))
momentum = 0.9
lr = 1e-2
epochs = 10
for i in range(10):
targets[i*10:(i+1)*10, i] = 1
update = np.zeros_like(v)
# -
outputs.shape
for _ in tqdm(range(epochs)):
permutation = rng.permutation(n_examples - 1000)
for j in range((n_examples - 1000) // batch_size):
batch = outputs[:, -1, permutation[j*10:(j+1)*10]].reshape(num_classes*batch_size, n_neurons)
scores = softmax((batch[:, :, np.newaxis] * v.T[np.newaxis, :, :]).sum(axis=1))
update = momentum * update + lr * (batch[:, np.newaxis, :] * (scores - targets)[:, :, np.newaxis]).sum(axis=0) / batch_size
v -= update
((outputs[:, -1, :-1000] @ v.T).argmax(axis=-1) == np.arange(10)[:, np.newaxis]).sum() / 40000
((outputs[:, -1, -1000:] @ v.T).argmax(axis=-1) == np.arange(10)[:, np.newaxis]).sum() / 10000
# +
from sklearn.linear_model import LogisticRegression
train_X = outputs[:, -1, :-1000, :].reshape(-1, n_neurons)
train_y = np.arange(10).repeat(4000)
clf = LogisticRegression(random_state=0, max_iter=100).fit(train_X, train_y)
clf.score(train_X, train_y)
# -
test_X = outputs[:, -1, -1000:, :].reshape(-1, n_neurons)
test_y = np.arange(10).repeat(1000)
clf.score(test_X, test_y)
| structural_experiments/MNIST_reverse_supervision.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Debug ways to size the viewer
#
# Especially: how can we control the height of the viewer?
from IPython.display import display, HTML, Javascript
import gatenlp
from gatenlp import Document
from gatenlp.gatenlpconfig import gatenlpconfig
from gatenlp.serialization.default import HtmlAnnViewerSerializer
from gatenlp import init_notebook
# init_notebook()
gatenlpconfig.notebook_js_initialized
doc = Document("This is just some document")
anns = doc.annset()
anns.add(0,2,"ANN")
anns.add(4,7,"ANN")
anns.add(9,13,"ANN")
html = doc.save_mem(fmt="html-ann-viewer", offline=True, add_js=True, notebook=True)
with open("debug-viewer-size-html.html", "wt" ) as outfp:
print(html, file=outfp)
# read in the manually modified HTML and inject here
with open("debug-viewer-size-html-modified.html", "rt") as infp:
mhtml = infp.read()
display(HTML(mhtml))
# Notes:
# * When I remove the height for -content alltogether, nothing changes
# * when I set the height to a smaller value, the docview size is the same, but shown in a smaller viewport with a vertical scrollbar
with open("debug1.html", "rt") as infp:
mhtml = infp.read()
display(HTML(mhtml))
with open("debug2.html", "rt") as infp:
html2 = infp.read()
display(HTML(html2))
from gatenlp import gatenlpconfig
gatenlpconfig.doc_html_repr_height1_nostretch = "max-height: 10em;"
doc = Document("this is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\nthis is just some text\n")
anns = doc.annset()
anns.add(0,1,"Ann1")
anns.add(0,1,"Ann2")
anns.add(0,1,"Ann3")
anns.add(0,1,"Ann4")
anns.add(0,1,"Ann5")
anns.add(0,1,"Ann6")
doc
html = doc.save_mem(fmt="html-ann-viewer", stretch_height=True, notebook=True)
display(HTML(html))
doc2 = Document("")
doc2
| debug/debug-viewer-size.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
import logging
import torch.nn as nn
import torch.nn.functional as F
import emmental
from cxr_dataset import CXR8Dataset
from emmental import Meta
from emmental.data import EmmentalDataLoader
from emmental.learner import EmmentalLearner
from emmental.model import EmmentalModel
from emmental.scorer import Scorer
from emmental.task import EmmentalTask
from modules.classification_module import ClassificationModule
from modules.torch_vision_encoder import TorchVisionEncoder
from task_config import CXR8_TASK_NAMES
from transforms import get_data_transforms
# -
logger = logging.getLogger(__name__)
emmental.init("logs")
Meta.update_config(
config={
"meta_config": {"seed": 1701, "device": 0},
"learner_config": {
"n_epochs": 20,
"valid_split": "val",
"optimizer_config": {"optimizer": "sgd", "lr": 0.001, "l2": 0.000},
"lr_scheduler_config": {
"warmup_steps": None,
"warmup_unit": "batch",
"lr_scheduler": "linear",
"min_lr": 1e-6,
},
},
"logging_config": {"evaluation_freq": 4000, "checkpointing": False},
}
)
# +
DATA_NAME = "CXR8"
CXRDATA_PATH = (
f"/dfs/scratch1/senwu/mmtl/emmental-tutorials/chexnet/data/nih_labels.csv"
)
CXRIMAGE_PATH = f"/dfs/scratch1/senwu/mmtl/emmental-tutorials/chexnet/data/images"
BATCH_SIZE = 16
CNN_ENCODER = "densenet121"
BATCH_SIZES = {"train": 16, "val": 64, "test": 64}
# -
cxr8_transform = get_data_transforms(DATA_NAME)
# +
datasets = {}
for split in ["train", "val", "test"]:
datasets[split] = CXR8Dataset(
name=DATA_NAME,
path_to_images=CXRIMAGE_PATH,
path_to_labels=CXRDATA_PATH,
split=split,
transform=cxr8_transform[split],
sample=0,
seed=1701,
)
logger.info(f"Loaded {split} split for {DATA_NAME}.")
# -
task_to_label_dict = {task_name: task_name for task_name in CXR8_TASK_NAMES}
print(task_to_label_dict)
# +
dataloaders = []
for split in ["train", "val", "test"]:
dataloaders.append(
EmmentalDataLoader(
task_to_label_dict=task_to_label_dict,
dataset=datasets[split],
split=split,
shuffle=True if split == "train" else False,
batch_size=BATCH_SIZES[split],
num_workers=8,
)
)
logger.info(f"Built dataloader for {datasets[split].name} {split} set.")
# -
# # Build Emmental task
from functools import partial
def ce_loss(task_name, immediate_ouput, Y, active):
return F.cross_entropy(
immediate_ouput[f"classification_module_{task_name}"][0], Y.view(-1) - 1
)
def output(task_name, immediate_ouput):
return F.softmax(immediate_ouput[f"classification_module_{task_name}"][0], dim=1)
# +
input_shape = (3, 224, 224)
cnn_module = TorchVisionEncoder(CNN_ENCODER, pretrained=True)
classification_layer_dim = cnn_module.get_frm_output_size(input_shape)
# -
tasks = [
EmmentalTask(
name=task_name,
module_pool=nn.ModuleDict(
{
"cnn": cnn_module,
f"classification_module_{task_name}": ClassificationModule(
classification_layer_dim, 2
),
}
),
task_flow=[
{"name": "cnn", "module": "cnn", "inputs": [("_input_", "image")]},
{
"name": f"classification_module_{task_name}",
"module": f"classification_module_{task_name}",
"inputs": [("cnn", 0)],
},
],
loss_func=partial(ce_loss, task_name),
output_func=partial(output, task_name),
scorer=Scorer(metrics=["accuracy", "roc_auc"]),
)
for task_name in CXR8_TASK_NAMES
]
mtl_model = EmmentalModel(name="Chexnet", tasks=tasks)
emmental_learner = EmmentalLearner()
emmental_learner.learn(mtl_model, dataloaders)
emmental_learner.learn(mtl_model, dataloaders)
| chexnet/CXR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python_3.6
# ---
# ### N Steps
# You have to climb N stairs. In how many ways can you climb N stairs in steps of 1 or 2 stairs?
#
# Example<br>
# Input: 4<br>
# Output:<br>
# Ways of climbing 4 stairs:<br>
# [2,2]<br>
# [2,1,1]<br>
# [1,2,1]<br>
# [1,1,2]<br>
# [1,1,1,1]
# ### Solution
# Before climbing the N stairs, the last step was either 1 or 2. So the number of ways of climbing N stairs, S(N), is equal to the number of ways of climbing the previous N-1 stairs, S(N-1), plus the number of ways of climbing the previous N02 stairs, S(N-2). So we obtain that S(N) = S(N-1) + S(N-2).<br>
#
# Stating from S(1) = 1, and S(2) = 2, we can compute S(N) in a recursive way.
#
def steps(N):
if N == 1:
return 1
elif N == 2:
return 2
else:
return steps(N-1) + steps(N-2)
steps(3)
steps(4)
steps(5)
# Suppose we also want to print all possible combinations of steps. Here's how we can do that.
def print_steps(N, list_of_steps=[]):
if sum(list_of_steps) == N:
print(list_of_steps)
elif sum(list_of_steps) <= N-2:
print_steps(N, list_of_steps+[1])
print_steps(N, list_of_steps+[2])
elif sum(list_of_steps) == N-1:
print_steps(N, list_of_steps+[1])
print_steps(5, [])
| Problem_010_-_N_Steps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Plotly tests ##
# Go to https://plot.ly/#/ and sign up for free account.
# Plug username and api key in the cell below.
# +
import json
import pandas as pd
import matplotlib.pyplot as plt
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.graph_objs as go
import plotly
plotly.tools.set_credentials_file(username='<your username>', api_key='<your api key>')
with open("smt_sample.json", "r") as f:
data = f.read()
payloads = [json.loads(s) for s in data.split("\n\n")]
locations = [pl["data"] for pl in payloads if pl["type"] ]#== "location"]
line_crossings = [pl["data"] for pl in payloads if pl["type"] == "linecrossing"]
other = [pl for pl in payloads if pl["type"] != "location"]
df = pd.DataFrame(locations)
traces = []
for name, group in df.groupby("no"):
traces.append(go.Scatter(
x=group['lat'],
y=group['lon'],
mode='markers+lines',
name=name
))
py.iplot(traces, filename='jupyter-basic_bar.html')
# -
| plotlytest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### ***Goal of this notebook:***
# #### The purpose of this notebook is to give instructions to visualize cosmoDC2 halos and redMaPPer detection position on the composite co-add DC2 image in tract 3828, usinh hscmap tool.
# ### ***Intructions***
#
# #### You can see the tract 3828 validation result by going to https://hscmap.mtk.nao.ac.jp/hscMap4/
# #### and by clicking on analysis->external_tile->add and then point to the CC URL provided by
# #### <NAME> on the desc-dm-DC2 slack channel.
# #### A little unlabelled square box should appear that you can zoom on.
#
# #### Then run this notebook to extract corresponding cosmoDC2 halos and redmaPPer detections
# #### in the field and save them in ascii. You can then simply drop the files on the HSC map window.
# +
import GCRCatalogs
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table as Table
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.cosmology import FlatLambdaCDM
from astropy.io import ascii
from cluster_validation.opening_catalogs_functions import *
from cluster_validation.association_methods import *
from cluster_validation.plotting_functions import *
from cluster_validation.association_statistics import *
# %matplotlib inline
plt.rcParams['figure.figsize'] = [9.5, 6]
plt.rcParams.update({'font.size': 18})
#plt.rcParams['figure.figsize'] = [10, 8] for big figures
# -
from astropy.io import ascii
# # 0 - opening catalogs
# +
RM_cat_name = 'cosmoDC2_v1.1.4_redmapper_v0.2.1py'
DC2_cat_name = 'cosmoDC2_v1.1.4'
min_richness = 20
min_halo_mass = 1e14 #Msun
#tract 3828 ra, dec
ra_min = 55.68
ra_max = 57.6
dec_min = -37.23
dec_max = -35.67
# -
# ## Open cosmoDC2 and select halos in the tract
# load DC2 calalog
DC2_catalog = GCRCatalogs.load_catalog(DC2_cat_name)
# load quantities of interest (may take a few minutes)
query_dc2 = GCRCatalogs.GCRQuery('(is_central==True) & (ra>'+str(ra_min)+') & (ra<'+str(ra_max)+') & (dec>'+str(dec_min)+') & (dec<'+str(dec_max)+') & ( halo_mass >'+str(min_halo_mass)+')')
halo_data = Table(DC2_catalog.get_quantities(['ra', 'dec','redshift','halo_mass', 'halo_id','is_central'], [query_dc2]))
# ## Open redMaPPer on cosmoDC2 and select detections in the tract
# Get the redMaPPer catalog
gc = GCRCatalogs.load_catalog('cosmoDC2_v1.1.4_redmapper_v0.2.1py')
# Select out the cluster and member quantities into different lists
quantities = gc.list_all_quantities()
cluster_quantities = [q for q in quantities if 'member' not in q]
member_quantities = [q for q in quantities if 'member' in q]
# +
# Read in the cluster and member data
query_rm = GCRCatalogs.GCRQuery('(ra>'+str(ra_min)+') & (ra<'+str(ra_max)+') & (dec>'+str(dec_min)+') & (dec<'+str(dec_max)+')')
cluster_data = Table(gc.get_quantities(cluster_quantities, [query_rm]))
query_rm = GCRCatalogs.GCRQuery('(ra_member>'+str(ra_min)+') & (ra_member<'+str(ra_max)+') & (dec_member>'+str(dec_min)+') & (dec_member<'+str(dec_max)+')')
member_data = Table(gc.get_quantities(member_quantities, [query_rm]))
# -
#save DC2 sub-catalog as ascii
ascii.write(halo_data, 'DC2_halo_list_tract3828.csv', format='csv', fast_writer=False,overwrite=True)
#save redmapper sub-catalog as ascii
ascii.write(cluster_data, 'rm_detection_list_tract3828.csv', format='csv', fast_writer=False,overwrite=True)
| RedMapper_DC2_tract3828_visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Convert a Trove list into a CSV file
#
# This notebook converts [Trove lists](https://trove.nla.gov.au/list/result?q=) into CSV files (spreadsheets). Separate CSV files are created for newspaper articles and works from Trove's other zones. You can also save the OCRd text, a PDF, and an image of each newspaper article.
# <div class="alert alert-block alert-warning">
# <p>If you haven't used one of these notebooks before, they're basically web pages in which you can write, edit, and run live code. They're meant to encourage experimentation, so don't feel nervous. Just try running a few cells and see what happens!.</p>
#
# <p>
# Some tips:
# <ul>
# <li>Code cells have boxes around them.</li>
# <li>To run a code cell either click on the cell and then hit <b>Shift+Enter</b>. The <b>Shift+Enter</b> combo will also move you to the next cell, so it's a quick way to work through the notebook.</li>
# <li>While a cell is running a <b>*</b> appears in the square brackets next to the cell. Once the cell has finished running the asterix will be replaced with a number.</li>
# <li>In most cases you'll want to start from the top of notebook and work your way down running each cell in turn. Later cells might depend on the results of earlier ones.</li>
# <li>To edit a code cell, just click on it and type stuff. Remember to run the cell once you've finished editing.</li>
# </ul>
# </p>
# </div>
# ## Add your values to these two cells
#
# This is the only section that you'll need to edit. Paste your API key and list id in the cells below as indicated.
#
# If necessary, follow the instructions in the Trove Help to [obtain your own Trove API Key](http://help.nla.gov.au/trove/building-with-trove/api).
#
# The list id is the number in the url of your Trove list. So [the list](https://trove.nla.gov.au/list/83774) with this url `https://trove.nla.gov.au/list/83774` has an id of `83774`.
# Paste you API key between the quotes, and then run the cell
API_KEY = 'YOUR API KEY GOES HERE'
print('Your API key is: {}'.format(API_KEY))
# Paste your list id below, and set your preferences for saving newspaper articles.
# +
# Paste your list id between the quotes, and then run the cell
list_id = '83777'
# If you don't want to save all the OCRd text, change True to False below
save_texts = True
# Change this to True if you want to save PDFs of newspaper articles
save_pdfs = False
# Change this to False if you don't want to save images of newspaper articles
save_images = True
# -
# ## Set things up
#
# Run the cell below to load the necessary libraries and set up some directories to store the results.
# +
import requests
from requests.exceptions import HTTPError, Timeout
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import pandas as pd
import os
import re
import shutil
from tqdm.auto import tqdm
from IPython.core.display import display, HTML
from pathlib import Path
from bs4 import BeautifulSoup
from PIL import Image
from io import BytesIO
import re
import time
s = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[ 500, 502, 503, 504 ])
s.mount('http://', HTTPAdapter(max_retries=retries))
s.mount('https://', HTTPAdapter(max_retries=retries))
# -
# ## Define some functions
#
# Run the cell below to set up all the functions we'll need for the conversion.
# +
def listify(value):
'''
Sometimes values can be lists and sometimes not.
Turn them all into lists to make life easier.
'''
if isinstance(value, (str, int)):
try:
value = str(value)
except ValueError:
pass
value = [value]
return value
def get_url(identifiers, linktype):
'''
Loop through the identifiers to find the request url.
'''
url = ''
for identifier in identifiers:
if identifier['linktype'] == linktype:
url = identifier['value']
break
return url
def save_as_csv(list_dir, data, data_type):
df = pd.DataFrame(data)
df.to_csv('{}/{}-{}.csv'.format(list_dir, list_id, data_type), index=False)
def make_filename(article):
'''
Create a filename for a text file or PDF.
For easy sorting/aggregation the filename has the format:
PUBLICATIONDATE-NEWSPAPERID-ARTICLEID
'''
date = article['date']
date = date.replace('-', '')
newspaper_id = article['newspaper_id']
article_id = article['id']
return '{}-{}-{}'.format(date, newspaper_id, article_id)
def get_list(list_id):
list_url = f'https://api.trove.nla.gov.au/v2/list/{list_id}?encoding=json&reclevel=full&include=listItems&key={API_KEY}'
response = s.get(list_url)
return response.json()
def get_article(id):
article_api_url = f'https://api.trove.nla.gov.au/v2/newspaper/{id}/?encoding=json&reclevel=full&include=articletext&key={API_KEY}'
response = s.get(article_api_url)
return response.json()
def make_dirs(list_id):
list_dir = Path('data', 'converted-lists', list_id)
list_dir.mkdir(parents=True, exist_ok=True)
Path(list_dir, 'text').mkdir(exist_ok=True)
Path(list_dir, 'image').mkdir(exist_ok=True)
Path(list_dir, 'pdf').mkdir(exist_ok=True)
return list_dir
def ping_pdf(ping_url):
'''
Check to see if a PDF is ready for download.
If a 200 status code is received, return True.
'''
ready = False
# req = Request(ping_url)
try:
# urlopen(req)
response = s.get(ping_url, timeout=30)
response.raise_for_status()
except HTTPError:
if response.status_code == 423:
ready = False
else:
raise
else:
ready = True
return ready
def get_pdf_url(article_id, zoom=3):
'''
Download the PDF version of an article.
These can take a while to generate, so we need to ping the server to see if it's ready before we download.
'''
pdf_url = None
# Ask for the PDF to be created
prep_url = f'https://trove.nla.gov.au/newspaper/rendition/nla.news-article{article_id}/level/{zoom}/prep'
response = s.get(prep_url)
# Get the hash
prep_id = response.text
# Url to check if the PDF is ready
ping_url = f'https://trove.nla.gov.au/newspaper/rendition/nla.news-article{article_id}.{zoom}.ping?followup={prep_id}'
tries = 0
ready = False
time.sleep(2) # Give some time to generate pdf
# Are you ready yet?
while ready is False and tries < 5:
ready = ping_pdf(ping_url)
if not ready:
tries += 1
time.sleep(2)
# Download if ready
if ready:
pdf_url = f'https://trove.nla.gov.au/newspaper/rendition/nla.news-article{article_id}.{zoom}.pdf?followup={prep_id}'
return pdf_url
def get_box(zones):
'''
Loop through all the zones to find the outer limits of each boundary.
Return a bounding box around the article.
'''
left = 10000
right = 0
top = 10000
bottom = 0
page_id = zones[0]['data-page-id']
for zone in zones:
if int(zone['data-y']) < top:
top = int(zone['data-y'])
if int(zone['data-x']) < left:
left = int(zone['data-x'])
if (int(zone['data-x']) + int(zone['data-w'])) > right:
right = int(zone['data-x']) + int(zone['data-w'])
if (int(zone['data-y']) + int(zone['data-h'])) > bottom:
bottom = int(zone['data-y']) + int(zone['data-h'])
return {'page_id': page_id, 'left': left, 'top': top, 'right': right, 'bottom': bottom}
def get_article_boxes(article_url):
'''
Positional information about the article is attached to each line of the OCR output in data attributes.
This function loads the HTML version of the article and scrapes the x, y, and width values for each line of text
to determine the coordinates of a box around the article.
'''
boxes = []
response = s.get(article_url)
soup = BeautifulSoup(response.text, 'lxml')
# Lines of OCR are in divs with the class 'zone'
# 'onPage' limits to those on the current page
zones = soup.select('div.zone.onPage')
boxes.append(get_box(zones))
off_page_zones = soup.select('div.zone.offPage')
if off_page_zones:
current_page = off_page_zones[0]['data-page-id']
zones = []
for zone in off_page_zones:
if zone['data-page-id'] == current_page:
zones.append(zone)
else:
boxes.append(get_box(zones))
zones = [zone]
current_page = zone['data-page-id']
boxes.append(get_box(zones))
return boxes
def get_page_images(list_dir, article, size=3000):
'''
Extract an image of the article from the page image(s), save it, and return the filename(s).
'''
# Get position of article on the page(s)
boxes = get_article_boxes(f'http://nla.gov.au/nla.news-article{article["id"]}')
image_filename = make_filename(article)
for box in boxes:
# print(box)
# Construct the url we need to download the page image
page_url = f'https://trove.nla.gov.au/ndp/imageservice/nla.news-page{box["page_id"]}/level7'
# Download the page image
response = s.get(page_url)
# Open download as an image for editing
img = Image.open(BytesIO(response.content))
# Use coordinates of top line to create a square box to crop thumbnail
points = (box['left'], box['top'], box['right'], box['bottom'])
# Crop image to article box
cropped = img.crop(points)
# Resize if necessary
if size:
cropped.thumbnail((size, size), Image.ANTIALIAS)
# Save and display thumbnail
cropped_file = Path(list_dir, 'image', f'{image_filename}-{box["page_id"]}.jpg')
cropped.save(cropped_file)
def harvest_list(list_id, save_text=True, save_pdfs=False, save_images=False):
list_dir = make_dirs(list_id)
data = get_list(list_id)
works = []
articles = []
for item in tqdm(data['list'][0]['listItem']):
for zone, record in item.items():
if zone == 'work':
work = {
'id': record.get('id', ''),
'title': record.get('title', ''),
'type': '|'.join(listify(record.get('type', ''))),
'issued': '|'.join(listify(record.get('issued', ''))),
'contributor': '|'.join(listify(record.get('contributor', ''))),
'trove_url': record.get('troveUrl', ''),
'fulltext_url': get_url(record.get('identifier', ''), 'fulltext'),
'thumbnail_url': get_url(record.get('identifier', ''), 'thumbnail')
}
works.append(work)
elif zone == 'article':
article = {
'id': record.get('id'),
'title': record.get('heading', ''),
'category': record.get('category', ''),
'date': record.get('date', ''),
'newspaper_id': record.get('title', {}).get('id'),
'newspaper_title': record.get('title', {}).get('value'),
'page': record.get('page', ''),
'page_sequence': record.get('pageSequence', ''),
'trove_url': f'http://nla.gov.au/nla.news-article{record.get("id")}'
}
full_details = get_article(record.get('id'))
article['words'] = full_details['article'].get('wordCount', '')
article['illustrated'] = full_details['article'].get('illustrated', '')
article['corrections'] = full_details['article'].get('correctionCount', '')
if 'trovePageUrl' in full_details['article']:
page_id = re.search(r'page\/(\d+)', full_details['article']['trovePageUrl']).group(1)
article['page_url'] = f'http://trove.nla.gov.au/newspaper/page/{page_id}'
else:
article['page_url'] = ''
filename = make_filename(article)
if save_texts:
text = full_details['article'].get('articleText')
text_file = Path(list_dir, 'text', f'{filename}.txt')
if text:
text = re.sub('<[^<]+?>', '', text)
text = re.sub("\s\s+", " ", text)
text_file = Path(list_dir, 'text', f'{filename}.txt')
with open(text_file, 'wb') as text_output:
text_output.write(text.encode('utf-8'))
if save_pdfs:
pdf_url = get_pdf_url(record['id'])
if pdf_url:
pdf_file = Path(list_dir, 'pdf', f'{filename}.pdf')
response = s.get(pdf_url, stream=True)
with open(pdf_file, 'wb') as pf:
for chunk in response.iter_content(chunk_size=128):
pf.write(chunk)
if save_images:
get_page_images(list_dir, article)
articles.append(article)
if articles:
save_as_csv(list_dir, articles, 'articles')
if works:
save_as_csv(list_dir, works, 'works')
return works, articles
# -
# ## Let's do it!
#
# Run the cell below to start the conversion.
works, articles = harvest_list(list_id, save_texts, save_pdfs, save_images)
# ## View the results
#
# You can browse the harvested files in the `data/converted-lists/[your list id]` directory.
#
# Run the cells below for a preview of the CSV files.
# Preview newspaper articles CSV
df_articles = pd.DataFrame(articles)
df_articles
# Preview works CSV
df_works = pd.DataFrame(works)
df_works
# ## Download the results
#
# Run the cell below to zip up all the harvested files and create a download link.
list_dir = Path('data', 'converted-lists', list_id)
shutil.make_archive(list_dir, 'zip', list_dir)
display(HTML(f'<a download="{list_id}.zip" href="{list_dir}.zip">Download your harvest</a>'))
# ----
#
# Created by [<NAME>](https://timsherratt.org/) for the [GLAM Workbench](https://glam-workbench.github.io/).
| Convert-a-Trove-list-into-a-CSV-file.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hello World
#
# Este notebook é para verificação de pacotes.
# Importando a trindade de pacotes de datascience
import numpy as np
import matplotlib.pyplot as plt
data = np.array([1,2,3,4,5])
print(data)
# +
# Criando meu dataframe
df = pd.DataFrame(data)
# -
# Droga, meu código deu um erro. Preciso solucionar isso mais tarde :(
| notebooks/1-hello-world.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math as math
pok = pd.read_csv("./Data/pokemon.csv",usecols=['Name'],squeeze=True)
pok[73]
# +
#pok[64,73]
# -
pok[[64,73]]
pok[2:7]
pok[795:]
pok[795:799]
pok[:4]
pok[-2:]
| Data_Series/Pobieranie_dannych_przez_indeks_Data_Series.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="DGPlYumZnO1t"
# # Exploratory Data Analysis Using Python and BigQuery
#
#
#
# ## Learning Objectives
#
# 1. Analyze a Pandas Dataframe
# 2. Create Seaborn plots for Exploratory Data Analysis in Python
# 3. Write a SQL query to pick up specific fields from a BigQuery dataset
# 4. Exploratory Analysis in BigQuery
#
#
# ## Introduction
# This lab is an introduction to linear regression using Python and Scikit-Learn. This lab serves as a foundation for more complex algorithms and machine learning models that you will encounter in the course. We will train a linear regression model to predict housing price.
#
# Each learning objective will correspond to a __#TODO__ in the [student lab notebook](../labs/python.BQ_explore_data.ipynb) -- try to complete that notebook first before reviewing this solution notebook.
#
# + [markdown] colab_type="text" id="AsHg6SD2nO1v"
# ### Import Libraries
# -
# Run the chown command to change the ownership
# !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Install the Google Cloud BigQuery library
# !pip install --user google-cloud-bigquery==1.25.0
# Please ignore any incompatibility warnings and errors.
#
# **Restart** the kernel before proceeding further (On the Notebook menu - Kernel - Restart Kernel).
#
# + colab={} colab_type="code" id="gEXV-RxPnO1w"
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the results of that search
# to a name in the local scope.
import os
import pandas as pd
import numpy as np
# Import matplotlib to visualize the model
import matplotlib.pyplot as plt
# Seaborn is a Python data visualization library based on matplotlib
import seaborn as sns
# %matplotlib inline
# + [markdown] colab_type="text" id="dr2TkzKRnO1z"
# ### Load the Dataset
#
#
# -
# Here, we create a directory called usahousing. This directory will hold the dataset that we copy from Google Cloud Storage.
# Create a directory to hold the dataset
if not os.path.isdir("../data/explore"):
os.makedirs("../data/explore")
# Next, we copy the Usahousing dataset from Google Cloud Storage.
# Copy the file using `gsutil cp` from Google Cloud Storage in the required directory
# !gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/housing_pre-proc_toy.csv ../data/explore
# Then we use the "ls" command to list files in the directory. This ensures that the dataset was copied.
# `ls` shows the working directory's contents.
# The `l` flag list the all files with permissions and details
# !ls -l ../data/explore
# Next, we read the dataset into a Pandas dataframe.
# + colab={} colab_type="code" id="CzrXJI8VnO10"
# TODO 1
# Read a comma-separated values (csv) file into a DataFrame using the read_csv() function
df_USAhousing = pd.read_csv('../data/explore/housing_pre-proc_toy.csv')
# -
# ### Inspect the Data
# + colab={"base_uri": "https://localhost:8080/", "height": 272} colab_type="code" id="Y6VJQ1tdnO12" outputId="7a1d4eed-3e83-44a8-f495-a9b74444d3ec"
# Get the first five rows using the head() method
df_USAhousing.head()
# -
# Let's check for any null values.
# `isnull()` finds a null value in a column and `sum()` counts it
df_USAhousing.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="nRTsvSzqnO17" outputId="f44ad14e-5fb4-4c70-e71c-9d149bca4869"
# Get some basic statistical details using describe() method
df_stats = df_USAhousing.describe()
# Transpose index and columns of the dataframe
df_stats = df_stats.transpose()
df_stats
# -
# Get a concise summary of a DataFrame
df_USAhousing.info()
# Let's take a peek at the first and last five rows of the data for all columns.
print ("Rows : " ,df_USAhousing.shape[0])
print ("Columns : " ,df_USAhousing.shape[1])
print ("\nFeatures : \n" ,df_USAhousing.columns.tolist())
print ("\nMissing values : ", df_USAhousing.isnull().sum().values.sum())
print ("\nUnique values : \n",df_USAhousing
.nunique())
# + [markdown] colab_type="text" id="QWVdsrmgnO1_"
# ## Explore the Data
#
# Let's create some simple plots to check out the data!
# -
# `heatmap` plots a rectangular data in a color-encoded matrix and
# `corr` finds the pairwise correlation of all columns in the dataframe
sns.heatmap(df_USAhousing.corr())
# Create a displot showing "median_house_value".
# + colab={"base_uri": "https://localhost:8080/", "height": 296} colab_type="code" id="SOsTLClWnO2B" outputId="b8a78674-5ddb-4706-90b4-37d7d83e8092"
# TODO 2a
# Plot a univariate distribution of observations using seaborn `distplot()` function
sns.displot(df_USAhousing['median_house_value'])
# -
# Set the aesthetic style of the plots
sns.set_style('whitegrid')
# Plot a histogram using `hist()` function
df_USAhousing['median_house_value'].hist(bins=30)
plt.xlabel('median_house_value')
# +
x = df_USAhousing['median_income']
y = df_USAhousing['median_house_value']
# Scatter plot of y vs x using scatter() and `show()` display all open figures
plt.scatter(x, y)
plt.show()
# -
# Create a jointplot showing "median_income" versus "median_house_value".
# TODO 2b
# `joinplot()` draws a plot of two variables with bivariate and univariate graphs.
sns.jointplot(x='median_income',y='median_house_value',data=df_USAhousing)
# `countplot()` shows the counts of observations in each categorical bin using bars
sns.countplot(x = 'ocean_proximity', data=df_USAhousing)
# takes numeric only?
# plt.figure(figsize=(20,20))
# Draw a multi-plot on every facet using `FacetGrid()`
g = sns.FacetGrid(df_USAhousing, col="ocean_proximity")
# Pass a function and the name of one or more columns in the dataframe
g.map(plt.hist, "households");
# takes numeric only?
# plt.figure(figsize=(20,20))
# Draw a multi-plot on every facet using `FacetGrid()`
g = sns.FacetGrid(df_USAhousing, col="ocean_proximity")
# Pass a function and the name of one or more columns in the dataframe
g.map(plt.hist, "median_income");
# You can see below that this is the state of California!
# +
x = df_USAhousing['latitude']
y = df_USAhousing['longitude']
# Scatter plot of y vs x and display all open figures
plt.scatter(x, y)
plt.show()
# -
# # Explore and create ML datasets
#
# In this notebook, we will explore data corresponding to taxi rides in New York City to build a Machine Learning model in support of a fare-estimation tool. The idea is to suggest a likely fare to taxi riders so that they are not surprised, and so that they can protest if the charge is much higher than expected.
#
# ## Learning objectives
# * Access and explore a public BigQuery dataset on NYC Taxi Cab rides
# * Visualize your dataset using the Seaborn library
#
#
# First, **restart the Kernel**. Now, let's start with the Python imports that we need.
# Import the python libraries
from google.cloud import bigquery
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# <h3> Extract sample data from BigQuery </h3>
#
# The dataset that we will use is <a href="https://console.cloud.google.com/bigquery?project=nyc-tlc&p=nyc-tlc&d=yellow&t=trips&page=table">a BigQuery public dataset</a>. Click on the link, and look at the column names. Switch to the Details tab to verify that the number of records is one billion, and then switch to the Preview tab to look at a few rows.
#
# Let's write a SQL query to pick up interesting fields from the dataset. It's a good idea to get the timestamp in a predictable format.
# %%bigquery
# SQL query to get a fields from dataset which prints the 10 records
SELECT
FORMAT_TIMESTAMP(
"%Y-%m-%d %H:%M:%S %Z", pickup_datetime) AS pickup_datetime,
pickup_longitude, pickup_latitude, dropoff_longitude,
dropoff_latitude, passenger_count, trip_distance, tolls_amount,
fare_amount, total_amount
# TODO 3
FROM
`nyc-tlc.yellow.trips`
LIMIT 10
# Let's increase the number of records so that we can do some neat graphs. There is no guarantee about the order in which records are returned, and so no guarantee about which records get returned if we simply increase the LIMIT. To properly sample the dataset, let's use the HASH of the pickup time and return 1 in 100,000 records -- because there are 1 billion records in the data, we should get back approximately 10,000 records if we do this.
#
# We will also store the BigQuery result in a Pandas dataframe named "trips"
# %%bigquery trips
SELECT
FORMAT_TIMESTAMP(
"%Y-%m-%d %H:%M:%S %Z", pickup_datetime) AS pickup_datetime,
pickup_longitude, pickup_latitude,
dropoff_longitude, dropoff_latitude,
passenger_count,
trip_distance,
tolls_amount,
fare_amount,
total_amount
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1
print(len(trips))
# We can slice Pandas dataframes as if they were arrays
trips[:10]
# <h3> Exploring data </h3>
#
# Let's explore this dataset and clean it up as necessary. We'll use the Python Seaborn package to visualize graphs and Pandas to do the slicing and filtering.
# TODO 4
# Use Seaborn `regplot()` function to plot the data and a linear regression model fit.
ax = sns.regplot(
x="trip_distance", y="fare_amount",
fit_reg=False, ci=None, truncate=True, data=trips)
ax.figure.set_size_inches(10, 8)
# Hmm ... do you see something wrong with the data that needs addressing?
#
# It appears that we have a lot of invalid data that is being coded as zero distance and some fare amounts that are definitely illegitimate. Let's remove them from our analysis. We can do this by modifying the BigQuery query to keep only trips longer than zero miles and fare amounts that are at least the minimum cab fare ($2.50).
#
# Note the extra WHERE clauses.
# %%bigquery trips
# SQL query with where clause to save the results in the trips dataframe
SELECT
FORMAT_TIMESTAMP(
"%Y-%m-%d %H:%M:%S %Z", pickup_datetime) AS pickup_datetime,
pickup_longitude, pickup_latitude,
dropoff_longitude, dropoff_latitude,
passenger_count,
trip_distance,
tolls_amount,
fare_amount,
total_amount
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1
# TODO 4a
AND trip_distance > 0
AND fare_amount >= 2.5
print(len(trips))
# Use Seaborn `regplot()` function to plot the data and a linear regression model fit.
ax = sns.regplot(
x="trip_distance", y="fare_amount",
fit_reg=False, ci=None, truncate=True, data=trips)
ax.figure.set_size_inches(10, 8)
# What's up with the streaks around 45 dollars and 50 dollars? Those are fixed-amount rides from JFK and La Guardia airports into anywhere in Manhattan, i.e. to be expected. Let's list the data to make sure the values look reasonable.
#
# Let's also examine whether the toll amount is captured in the total amount.
tollrides = trips[trips["tolls_amount"] > 0]
tollrides[tollrides["pickup_datetime"] == "2012-02-27 09:19:10 UTC"]
notollrides = trips[trips["tolls_amount"] == 0]
notollrides[notollrides["pickup_datetime"] == "2012-02-27 09:19:10 UTC"]
# Looking at a few samples above, it should be clear that the total amount reflects fare amount, toll and tip somewhat arbitrarily -- this is because when customers pay cash, the tip is not known. So, we'll use the sum of fare_amount + tolls_amount as what needs to be predicted. Tips are discretionary and do not have to be included in our fare estimation tool.
#
# Let's also look at the distribution of values within the columns.
# Print the distribution of values within the columns using `describe()`
trips.describe()
# Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| courses/machine_learning/deepdive2/launching_into_ml/solutions/python.BQ_explore_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Notes & Code Snippets from MIDS Fundamentals of Data Structures and Algorithms Bridge Course
#
# Trees & Graphs
# ## Implementing a Tree
#
# Represent each node as an object with links to its children.
# +
# NOTE: Check to make sure a child is not already somewhere else in the tree when adding it.
class TreeNode( ):
"""A node for a general tree"""
def __init__(self, value = None): # Constructor, create list of children, pass value to store at that node
self.value = value
self.children = [ ]
def add_child(self, other) : # Append child to end of children list
self.children.append(other)
def get_children(self) :
return self.children
# -
# ## Implementing a Tree Traversal: Depth-First Search
#
# The Python code follows the recursive rule.
def Traverse(T): # node of tree or subtree we want to traverse
"""Traverse a tree and print all values"""
print(T.value) # Print node
for child in T.get_children():
Traverse(child) # Traverse each child node
# <img src="files/images/tree.png">
#
# - Traverse is first called on the root, and we visit the root.
# - We take the first child of the root and call traverse on it, thereby visiting it.
# - We then take the first child of that node and call traverse on it.
# ## Implementing a Tree Traversal: Breadth-First Search
#
# We use a queue to store nodes we need to visit.
def breadth_first_traverse(T):
"""Traverse a tree, breadth first,
and print all values"""
Q = Queue() # use a queue to store nodes we need to visit.
Q.enqueue(T)
while not Q.is_empty():
v = Q.dequeue() # Pull the node off of the front of the queue (dequeue)
print(v.value) # Print out value to show you've done it
for child in v.get_children(): # For every child of that node, and add to end of the queue
# Every time you visit a node at some level (k), taking the children (k+1 level nodes) and adding to end of queue
Q.enqueue(child)
# <img src="files/images/tree2.png">
#
# - This means that we visit the root.
# - Then we visit each level 1 node.
# - Then we visit each level 2 node.
#
# This might seem harder, because you have to explicitly keep track of nodes you've visited, **but it's actually easy with a queue**.
# ## Implementing a Tree: Binary Tree
class BinaryTreeNode(object):
def __init__(self, value = None):
self.value = value
self.left = None #Separate variables instead of a queue/list
self.right = None
def add_left(self, other)
self.left = other
def add_right(self, other)
self.right = other
# <img src="files/images/tree3.png">
#
# - This is constructed so that the left child of a node is always less than the node, and the right child is always greater.
# - New items are always added in the right place, to maintain the ordering of the tree.
# - If you want to find a specific item, you compare it to the root.
# - If the item you want is smaller, follow the left branch.
# - Otherwise, follow the right.
# ## Implementing a Tree: Array-Based Binary Tree
class ArrayTree(object):
"""An array-based binary tree"""
# methods to find a position in the tree
root_position = 1
def left_child(self, position): # Takes position, returns 2x position
return 2 * position
def right_child(self, position):
return 2 * position + 1
def parent(self, position):
if position == 1:
return None
return position // 2
def __init__(self):
self.data = []
def insert(self, value, position): # Insert this value into the correct place in the array
# extend data array if needed
if len(self.data) <= position: # Is the array long enough to contain the new position
self.data.extend(
[None]*(position - len(self.data) + 1))
self.data[position] = value # If not, then extend
def get_value(self, position):
return self.data[position]
# <img src="files/images/tree4.png">
#
# - Number the positions of the binary tree, row by row, left to right, starting from 1.
# - Write whether or not there's actually a node there.
# - Write the tree contents into an array in this order.
# - The number of the node becomes the array index.
# - We start with node v.
# - 2v is the left child.
# - 2v+1 is the right child
# - v//2 is the parent
#
#
# ## Graph Implementations: A Linked Graph
#
# The linked graph is similar to our linked tree data structure.
class GraphNode(object):
"""A node for an undirected linked graph"""
def __init__(self, value = None): # Instead of list of children, list of connections, neighbors for each node
self.value = value
self.connections = [] # If
# We don't need to worry about cycles, but we need to make sure both nodes in an undirected edge know about each other.
def add_connections(self, other):
if other not in self.connections:
self.connections.append(other)
if self not in other.connections:
other.connections.append(self)
def get_connections(self):
return self.connections
# ## Dijkstra’s Shortest Path Algorithm
# <a href='https://www.geeksforgeeks.org/python-program-for-dijkstras-shortest-path-algorithm-greedy-algo-7/'>Code from GeeksforGeeks</a>
# +
# Python program for Dijkstra's single
# source shortest path algorithm. The program is
# for adjacency matrix representation of the graph
# Library for INT_MAX
import sys
class Graph():
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
def printSolution(self, dist):
print ("Vertex tDistance from Source")
for node in range(self.V):
print (node, "t", dist[node])
# A utility function to find the vertex with
# minimum distance value, from the set of vertices
# not yet included in shortest path tree
def minDistance(self, dist, sptSet):
# Initilaize minimum distance for next node
min = sys.maxsize
# Search not nearest vertex not in the
# shortest path tree
for v in range(self.V):
if dist[v] < min and sptSet[v] == False:
min = dist[v]
min_index = v
return min_index
# Funtion that implements Dijkstra's single source
# shortest path algorithm for a graph represented
# using adjacency matrix representation
def dijkstra(self, src):
dist = [sys.maxsize] * self.V
dist[src] = 0
sptSet = [False] * self.V
for cout in range(self.V):
# Pick the minimum distance vertex from
# the set of vertices not yet processed.
# u is always equal to src in first iteration
u = self.minDistance(dist, sptSet)
# Put the minimum distance vertex in the
# shotest path tree
sptSet[u] = True
# Update dist value of the adjacent vertices
# of the picked vertex only if the current
# distance is greater than new distance and
# the vertex in not in the shotest path tree
for v in range(self.V):
if self.graph[u][v] > 0 and \
sptSet[v] == False and \
dist[v] > dist[u] + self.graph[u][v]:dist[v] = dist[u] + self.graph[u][v]
self.printSolution(dist)
# Driver program
g = Graph(9)
g.graph = [[0, 4, 0, 0, 0, 0, 0, 8, 0],
[4, 0, 8, 0, 0, 0, 0, 11, 0],
[0, 8, 0, 7, 0, 4, 0, 0, 2],
[0, 0, 7, 0, 9, 14, 0, 0, 0],
[0, 0, 0, 9, 0, 10, 0, 0, 0],
[0, 0, 4, 14, 10, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 2, 0, 1, 6],
[8, 11, 0, 0, 0, 0, 1, 0, 7],
[0, 0, 2, 0, 0, 0, 6, 7, 0]
]
g.dijkstra(0)
# This code is contributed by <NAME>
# -
# ## Page Rank Algorithm
#
# <a href='https://www.geeksforgeeks.org/page-rank-algorithm-implementation/'>Code from GeeksforGeeks</a>
def pagerank(G, alpha=0.85, personalization=None,
max_iter=100, tol=1.0e-6, nstart=None, weight='weight',
dangling=None):
"""Return the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
----------
G : graph
A NetworkX graph. Undirected graphs will be converted to a directed
graph with two directed edges for each undirected edge.
alpha : float, optional
Damping parameter for PageRank, default=0.85.
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key for every graph node and nonzero personalization value for each node.
By default, a uniform distribution is used.
max_iter : integer, optional
Maximum number of iterations in power method eigenvalue solver.
tol : float, optional
Error tolerance used to check convergence in power method solver.
nstart : dictionary, optional
Starting value of PageRank iteration for each node.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
dangling: dict, optional
The outedges to be assigned to any "dangling" nodes, i.e., nodes without
any outedges. The dict key is the node the outedge points to and the dict
value is the weight of that outedge. By default, dangling nodes are given
outedges according to the personalization vector (uniform if not
specified). This must be selected to result in an irreducible transition
matrix (see notes under google_matrix). It may be common to have the
dangling dict to be the same as the personalization dict.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value
Notes
-----
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop
after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
The PageRank algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs by converting each edge in the
directed graph to two edges.
"""
if len(G) == 0:
return {}
if not G.is_directed():
D = G.to_directed()
else:
D = G
# Create a copy in (right) stochastic form
W = nx.stochastic_graph(D, weight=weight)
N = W.number_of_nodes()
# Choose fixed starting vector if not given
if nstart is None:
x = dict.fromkeys(W, 1.0 / N)
else:
# Normalized nstart vector
s = float(sum(nstart.values()))
x = dict((k, v / s) for k, v in nstart.items())
if personalization is None:
# Assign uniform personalization vector if not given
p = dict.fromkeys(W, 1.0 / N)
else:
missing = set(G) - set(personalization)
if missing:
raise NetworkXError('Personalization dictionary '
'must have a value for every node. '
'Missing nodes %s' % missing)
s = float(sum(personalization.values()))
p = dict((k, v / s) for k, v in personalization.items())
if dangling is None:
# Use personalization vector if dangling vector not specified
dangling_weights = p
else:
missing = set(G) - set(dangling)
if missing:
raise NetworkXError('Dangling node dictionary '
'must have a value for every node. '
'Missing nodes %s' % missing)
s = float(sum(dangling.values()))
dangling_weights = dict((k, v/s) for k, v in dangling.items())
dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
# power iteration: make up to max_iter iterations
for _ in range(max_iter):
xlast = x
x = dict.fromkeys(xlast.keys(), 0)
danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
for n in x:
# this matrix multiply looks odd because it is
# doing a left multiply x^T=xlast^T*W
for nbr in W[n]:
x[nbr] += alpha * xlast[n] * W[n][nbr][weight]
x[n] += danglesum * dangling_weights[n] + (1.0 - alpha) * p[n]
# check convergence, l1 norm
err = sum([abs(x[n] - xlast[n]) for n in x])
if err < N*tol:
return x
raise NetworkXError('pagerank: power iteration failed to converge '
'in %d iterations.' % max_iter)
| 0_Trees_Graphs_Notes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 64-bit
# name: python3
# ---
# # 🚀 **Aula 03 - Automação de processos, criação de interfaces e projetos!**
# ## Nesta aula estaremos construindo interfaces gráficas para o úsuario final e realizando automação de processos, utilizando as bibliotecas **pyautogui** para automação de processos, **Selenium** para automação web e **PySimpleGUI** para construção das interfaces.
# ---
# # **1. Criacao de interfaces** ⚡
# <img src="images/interface.jpg" alt="Drawing" style="width: 400px;"/>
# ## Geralmente quando falamos nna criacao de interfaces para o usuario final é um procedimento demorado, no entanto, com Python é uma tarefa simples, utilizando a biblioteca **PySimpleGUI**.
# ## **1.1 O que é GUI?**
# ### GUI - Graphical user interface (Interface gráfica do utilizador) <br>
# ### Em informática, interface gráfica do utilizador ou usuário é um tipo de interface do utilizador que permite a interação com dispositivos digitais por meio de elementos gráficos como ícones e outros indicadores visuais, em contraste a interface de linha.
# ## **1.2 Instalando a biblioteca PySimpleGUI**
# ### Para instalarmos a bibliotca iremos utilizar o seguinte comando
# !pip install PySimpleGUI
# ## **1.3 Criando nossa primeira tela**
# ### Com a biblioteca PySimpleGUI é muito facil criar uma interface gráfica. <br>
# ### Segue abaixo a criacao de nossa primeira interface com apenas uma **linha de código**
import PySimpleGUI as sg
sg.popup('Minha primeira tela!', 'Minha primeira tela com apenas uma linha de código!')
# ### Que tal uma GUI customizada para usuario final com apenas uma linha de código?
event, values = sg.Window('Colete o nome do arquivo', [[sg.Text('Nome do arquivo')], [sg.Input(), sg.FileBrowse(button_text = 'Buscar')], [sg.OK(), sg.Cancel(button_text = 'Cancelar')] ]).read(close=True)
# ## The Beauty of Simplicity
# ## One day I will find the right words, and they will be simple. ― <NAME>
# ### É bom que você possa dividir as coisas em uma linha, como no exemplo acima, mas não é legível. <br>
# ### Vamos adicionar alguns espaços em branco para que você possa ver a beleza do código **PySimpleGUI**.
# +
sg.theme('DarkRed') # Vamos definir o tema do programa
layout = [ [sg.Text('Nome do Arquivo')],
[sg.Input(), sg.FileBrowse(button_text = 'Buscar')],
[sg.OK(), sg.Cancel(button_text = 'Cancelar')]]
window = sg.Window('Colete o nome do arquivo', layout)
evento, valores = window.read()
window.close()
# -
# ## **1.4 Lista de temas**
# ### A biblioteca contém varios temas pré definidos. <br>
# ### Caso o programador deseje ver o nome de todos basta chamar a seguinte funcao
lista_de_temas = sg.theme_list()
print(lista_de_temas)
# ### E para visualizar todos
sg.theme_previewer()
# ### Ou melhor, podemos criar uma GUI interativa para o suario ter um preview de cada tema.
# +
sg.theme('DarkTeal9') #Tema padrão
layout = [[sg.Text('Lista de temas pré prontos')],
[sg.Text('Escolha um tema para ter uma pré visualicao')],
[sg.Listbox(values = sg.theme_list(),
size =(20, 12),
key ='-LIST-',
enable_events = True)],
[sg.Button('Sair')]]
janela = sg.Window('Lista de temas', layout)
# Evento para permanecer na tela
while True:
evento, valores = janela.read()
# Se o evento for sair ou cancelar quebrar o loop, o que fara a janela fechar.
if evento in (None, 'Sair'):
break
nome_tema = valores['-LIST-'][0]
sg.theme(nome_tema)
sg.popup_get_text(f'Este é o tema {nome_tema}')
# Fechar a janela
janela.close()
# -
# ---
# # **2. Automação de processos** ⚡
# ### Para automação de processos em geral iremos utilizar a biblioteca **Pyautogui** e para automacao na web especificos iremos utilizar a biblioteca **Selenium**
# ---
# # **3. Pyautogui** ⚡
# ### **pyautogui** é um módulo de automação de GUI para Python3 que fornece métodos para controlar mouse e teclado. <br>
# ### Esse módulo pode ser usado para criar bots para automatizar tarefas repetitivas, enquanto você pode desfrutar do seu café.
# ### “Pyautogui pode fazer qualquer coisa que um usuário humano sentado na frente do computador pode fazer, exceto derramar café no teclado”, diz o geek responsável por esse módulo.
# ## **3.1 Importando módulos**
# !pip install PyAutoGUI
import pyautogui as pg
import time
# ## **3.2 Escrevendo texto no bloco de notas**
# ### Para nosso primeiro exemplo, iremos criar uma funcao que escrever algumas frases no bloco de notas de forma automática.
def escrever_bloco_de_notas():
time.sleep(0.5)
pg.press('winleft')
time.sleep(1)
pg.write('bloco de notas', interval = 0.1)
time.sleep(1)
pg.press('enter')
time.sleep(2)
comidas = ['arroz', 'macarrao', 'feijao', 'pao de queijo', 'frango', 'camarao']
for comida in comidas:
pg.write(comida.capitalize() + '\n', interval = 0.1)
time.sleep(1)
pg.hotkey('ctrl', 's')
time.sleep(2)
pg.write('Lista de Comidas')
pg.press("tab", presses=4, interval= 0.1)
time.sleep(0.5)
pg.press('enter')
escrever_bloco_de_notas()
# ### **OBS:** <br>
#
# ### Para parar de executar o programa simplesmente coloque o mouse na posicao (0,0) da tela
# <img src="images/Coordenadas.png" alt="Drawing" style="width: 400px;"/>
# ## **3.3 clicando em coordenadas específicas**
pg.click(x=100, y=200)
# ---
# # **4. Selenium** ⚡
# ### **Selenium**, nada mais é, do que uma biblioteca que permite com que o programa abra o seu navegador para executar os comandos desejados.
# ### **Selenium** é totalmente implementado e compatível com JavaScript (Node.js), Python, Ruby, Java e C#
# ## **4.1 Instalando e importando pacotes**
# ### Para utilizarmos o Selenium, além de importar as bibliotecas necessárias iremos precisar de um web driver.
# ### Para esse curso, irei estar utilizando o web driver do google chrome.
# !pip install selenium
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
# ## **4.2 Automatizendo busca de produtos aliexpress**
# ### Com selenium, podemos simular todos os tipos de comandos humanos em um website. <br>
# ### Vejamos um pequeno exemplo abaixo:
def busca_de_produtos(produto):
driver = webdriver.Chrome()
site = 'https://www.aliexpress.com'
driver.get(site)
barra_de_pesquisa = driver.find_element_by_xpath('//*[@id="search-key"]')
barra_de_pesquisa.send_keys(produto)
driver.implicitly_wait(10)
icone_pesquisa = driver.find_element_by_xpath('//*[@id="form-searchbar"]/div[1]/input')
icone_pesquisa.click()
driver.implicitly_wait(10)
driver.close()
produto_procurado = input("Digite o nome do produto: ")
busca_de_produtos(produto_procurado)
# ---
# # **5. Projeto final - Desenvolvendo novos fornecedores no site do Alibaba** ⚡
# ### Como projeto final de nosso curso, estaremos desenvolvendo um programa para desenvolver novos fornecedores de determinado produto no site do alibaba, utilizando interface gráfica, coleta de dados, automacao de processos e web scraping.
# ## **5.1 Descrição do projeto**
# ### Criar um programa com interface gráfica para o usuário final, que recebe um input de um produto e seja capaz de
# - ### Coletar o website, nome da empresa e os principis produtos de cada fornecedor
# - ### Dar ao usuario final o controle de quantos fornecedores se deseja
# - ### Retornar um arquivo do tipo excel, com os dados coletados de cada fornecedor
# - ### Contatar os fornecedores automaticamente, com uma mensagem escolhida pelo usuário
# - ### Criar uma opção onde o usuário possa escolher se quer apenas os dados de cada fornecedor (1) ou contatar-los automaticamente (2)
# - ### Retornar o tempo total da operacao (1) ou (2)
# ---
| Aula 03 - Automacao de processos, criacao de interface e projetos/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Triangle areas for mesh spheres
#
# This notebook calculates the area histograms for the triangulated spheres.
# + jupyter={"outputs_hidden": false}
import numpy as np
import pyradi.ryfiles as ryfiles
import pyradi.ryplot as ryplot
# + jupyter={"outputs_hidden": false}
filelist = ryfiles.listFiles('.','trianglessphere*.*')
p=ryplot.Plotter(1,2,3,'Triangle area histograms',(18,12),doWarning=False)
for subplot,filename in enumerate( filelist):
vtxfilename = filename.replace('triangles','vertex')
tri=np.loadtxt(filename,dtype='i4')
vtx = np.loadtxt(vtxfilename)
areaLst = []
angleLst = []
areaTot = 0
for triangle in tri:
length = []
peri = 0
for i in range(0,3):
vtxa = vtx[triangle[i]]
vtxb = vtx[triangle[(i+1)%3]]
angle = np.arccos(np.dot(vtxa,vtxb))
length.append(angle)
angleLst.append(angle)
peri += angle
# from heron's theorem the planar (not spherical) area between three points
# is given by Area=SQRT(s(s-a)(s-b)(s-c)), where s=(a+b+c)/2 or perimeter/2.
s = peri/2
area = np.sqrt(s * (s-length[0]) * (s-length[1]) * (s-length[2]) )
areaLst.append(area )
areaTot += area
areaA = np.asarray(areaLst)
hist,bins= np.histogram(areaA, bins=100)
binc = bins[:-1]+np.diff(bins)
agvArea = np.mean(areaA)
agvAngle = 180 * np.mean(angleLst) / np.pi
binc = binc / agvArea
# print(areaTot)
# print(f'tot area error={100*(4-(areaTot/np.pi)):.4f}')
print(f'{vtxfilename:10s} avg area={agvArea:.2e} tot area error={100*(4-(areaTot/np.pi)):.4f} Resolution={agvAngle:.3f} deg')
lowerExl = 2
if(subplot > lowerExl and subplot < 8):
ptitle = '{0} (TriArea={1:.4e})'.format(filename,agvArea)
p.plot(subplot-lowerExl,binc,hist,xlabel='Normalised area',ylabel='Count',ptitle=ptitle)
p.saveFig('triangleareas.eps')
# -
# Evidently the total surface error is relatively small for spheres with large number of vertices, but the area distribution is slightly skew around the average value, towards smaller areas.
# + [markdown] jupyter={"outputs_hidden": false}
# See here http://corysimon.github.io/articles/uniformdistn-on-sphere/ for random placement on the sphere.
| pyradi/data/plotspherical/sphereanalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table class="ee-notebook-buttons" align="left">
# <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Visualization/nwi_wetlands_symbology.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
# <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Visualization/nwi_wetlands_symbology.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
# <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Visualization/nwi_wetlands_symbology.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
# <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Visualization/nwi_wetlands_symbology.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
# </table>
# ## Install Earth Engine API
# Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
# The magic command `%%capture` can be used to hide output from a specific cell.
# +
# # %%capture
# # !pip install earthengine-api
# # !pip install geehydro
# -
# Import libraries
import ee
import folium
import geehydro
# Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
# if you are running this notebook for this first time or if you are getting an authentication error.
# ee.Authenticate()
ee.Initialize()
# ## Create an interactive map
# This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
# The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# ## Add Earth Engine Python script
# +
# NWI legend: https://www.fws.gov/wetlands/Data/Mapper-Wetlands-Legend.html
def nwi_add_color(fc):
emergent = ee.FeatureCollection(
fc.filter(ee.Filter.eq('WETLAND_TY', 'Freshwater Emergent Wetland')))
emergent = emergent.map(lambda f: f.set(
'R', 127).set('G', 195).set('B', 28))
# print(emergent.first())
forested = fc.filter(ee.Filter.eq(
'WETLAND_TY', 'Freshwater Forested/Shrub Wetland'))
forested = forested.map(lambda f: f.set('R', 0).set('G', 136).set('B', 55))
pond = fc.filter(ee.Filter.eq('WETLAND_TY', 'Freshwater Pond'))
pond = pond.map(lambda f: f.set('R', 104).set('G', 140).set('B', 192))
lake = fc.filter(ee.Filter.eq('WETLAND_TY', 'Lake'))
lake = lake.map(lambda f: f.set('R', 19).set('G', 0).set('B', 124))
riverine = fc.filter(ee.Filter.eq('WETLAND_TY', 'Riverine'))
riverine = riverine.map(lambda f: f.set(
'R', 1).set('G', 144).set('B', 191))
fc = ee.FeatureCollection(emergent.merge(
forested).merge(pond).merge(lake).merge(riverine))
base = ee.Image(0).mask(0).toInt8()
img = base.paint(fc, 'R') \
.addBands(base.paint(fc, 'G')
.addBands(base.paint(fc, 'B')))
return img
fromFT = ee.FeatureCollection("users/wqs/Pipestem/Pipestem_HUC10")
Map.addLayer(ee.Image().paint(fromFT, 0, 2), {}, 'Watershed')
huc8_id = '10160002'
nwi_asset_path = 'users/wqs/NWI-HU8/HU8_' + huc8_id + '_Wetlands' # NWI wetlands for the clicked watershed
clicked_nwi_huc = ee.FeatureCollection(nwi_asset_path)
nwi_color = nwi_add_color(clicked_nwi_huc)
Map.centerObject(clicked_nwi_huc, 10)
Map.addLayer(nwi_color, {'gamma': 0.3, 'opacity': 0.7}, 'NWI Wetlands Color')
# -
# ## Display Earth Engine data layers
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
| Visualization/nwi_wetlands_symbology.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def test_exercise_3_1a(x) -> bool:
countries = ["India", "USA", "France", "UK"]
return final_list_of_word == x
def test_exercise_3_1b(x) -> bool:
capitals = ["Delhi", "Washington", "Paris", "London"]
return final_list_of_word == x
def test_exercise_3_2(x) -> bool:
countries = ["India", "USA", "France", "UK"]
capitals = ["Delhi", "Washington", "Paris", "London"]
countries_and_capitals = [t for t in zip(countries, capitals)]
return countries_and_capitals == x
def test_exercise_3_3(x) -> bool:
countries = ["India", "USA", "France", "UK"]
capitals = ["Delhi", "Washington", "Paris", "London"]
countries_and_capitals_as_dict = dict(zip(countries, capitals))
return countries_and_capitals_as_dict == x
| Chapter06/unit_testing/Exercise 6.04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/KshitijMShah/Projectile_Air_Resistance-/blob/main/Projectile_Motion_with_Air_Friction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="uEuhhLyStUfy"
import numpy as np
import scipy as sp
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from sympy import*
import sympy as sp
from sympy.interactive import printing
printing.init_printing(use_latex = True)
import matplotlib.pyplot as plt
# + [markdown] id="hJhjwLN4uTpl"
# The net force for moving with air friction under gravity is
#
#
# $\vec{F_{net}} = \vec{F_{g}} + \vec{F_{f}} = -mg\hat{y} \ - b \mid \vec{\nu} \mid \vec{\nu} $
#
#
# and noting that $\vec{v} = \dot{x} \hat{x} + \dot{y} \hat{y}$
#
#
#
# or in vector form
#
# $\vec{F_{net}} = \begin{bmatrix}
# -b\dot{x}\sqrt{\dot{x}^{2} \ +\ \dot{y}^{2}}\\
# -mg\ -b\dot{y}\sqrt{\dot{x}^{2} +\dot{y}^{2}}
# \end{bmatrix}$
#
#
# Using the fact
#
#
# $\vec{F}_{net} = m \vec{a} = m \langle \ddot{x} \ddot{y}\rangle$
#
#
# $m \begin{bmatrix}\ddot{x} \\ \ddot{y} \end{bmatrix} = \begin{bmatrix}
# -b\dot{x}\sqrt{\dot{x}^{2} \ +\ \dot{y}^{2}}\\
# -mg\ -b\dot{y}\sqrt{\dot{x}^{2} +\dot{y}^{2}}
# \end{bmatrix}$
#
#
# and thus have two coupled differential equations
#
# $\ddot{x} = - \frac{b}{m} \dot{x} \sqrt{\dot{x}^2 + \dot{y}^2}$
#
#
# $\ddot{y} = -g - \frac{b}{m} \dot{y} \sqrt{\dot{x}^2 + \dot{y}^2}$
#
#
# Defining $x' = \frac{x}{g} $ and $y' = \frac{y}{g} $ we get
#
#
# $\ddot{x'} = -\frac{bg}{m} \dot{x'}\sqrt{\dot{x'}^2 + \dot{y'}^2}$
#
#
# $\ddot{y'} = -1 - \frac{bg}{m} \dot{y'} \sqrt{\dot{x'}^2 + \dot{y'}^2}$
#
# In python we can only solve ODEs, so defining $v_x = \dot{x}$ and $v_y = \dot{y}$ we get a system of 4 coupled first order ODEs
#
# * $\dot{x} = v_x$
# * $\dot{y} = v_y$
# * $\dot{v_x} = -\textbf{B}\dot{x} \sqrt{\dot{x}^2 + \dot{y}^2}$
# * $\dot{v_y} = -1 - \textbf{B}\dot{y} \sqrt{\dot{x}^2 + \dot{y}^2}$
#
#
# Where $\textbf{B} = \frac{bg}{m}$
#
# Define $\vec{S} = \langle x, v_x, y, v_y \rangle$. To solve ODEs in python, we need to write a fucntion that takes $\vec{S}$ and time t, and returns $d\vec{S}/dt$. In other words we want $f$ in
#
#
# <h2><center>$ \frac{d\vec{S}}{dt} = f(\vec{S}, t) $ </center></h2>
#
#
# + id="zUcJR72s1lfn"
#Define function f above
def dsdt(t, S, B):
x, vx, y, vy = S
return [vx, -B*np.sqrt(vx**2+vy**2)*vx, vy, -1-B*np.sqrt(vx**2+vy**2)*vy]
# + id="vCupXJQS6NRl"
B = 10
V = 1
t1 = 40*np.pi / 180
t2 = 45 * np.pi /180
t3 = 50 * np.pi / 180
# + [markdown] id="ear2oBnP-dyR"
# Solve the ODE using scipy- The fucntion takes in $d\vec{S}/dt$ function, time period to solve over [0, 2] seconds, initial conditions, and additional arguments B(friction force) for the fucntion
# + id="DSS8ZgXp-dBG"
sol1 = solve_ivp(dsdt, [0, 2], y0=[0,V*np.cos(t1),0,V*np.sin(t1)], t_eval=np.linspace(0,2,1000), args=(B,))
sol2 = solve_ivp(dsdt, [0, 2], y0=[0,V*np.cos(t2),0,V*np.sin(t2)], t_eval=np.linspace(0,2,1000), args=(B,))
sol3 = solve_ivp(dsdt, [0, 2], y0=[0,V*np.cos(t3),0,V*np.sin(t3)], t_eval=np.linspace(0,2,1000), args=(B,))
# + colab={"base_uri": "https://localhost:8080/", "height": 509} id="qIIGUI7E_YaY" outputId="f79c064e-d77e-4fae-b093-f3a34834ec2f"
#sol1.y is the list of all the solution to the diffrential equation
#not to be confused with the y we
plt.figure(figsize=(20,8))
plt.plot(sol1.y[0],sol1.y[2], label=r'$\theta_0=40^{\circ}$')
plt.plot(sol2.y[0],sol2.y[2], label=r'$\theta_0=45^{\circ}$')
plt.plot(sol3.y[0],sol3.y[2], label=r'$\theta_0=50^{\circ}$')
plt.ylim(bottom = 0)
plt.legend()
plt.xlabel('$x/g$', fontsize = 20)
plt.ylabel('$y/g$', fontsize = 20)
plt.show()
# + id="fd0tghIO_4b3"
def get_distance(angle, B= 0, V= 1, t= 2):
v0x = V*np.cos(angle*np.pi/180)
v0y = V*np.sin(angle*np.pi/180)
sol = solve_ivp(dsdt, [0, t], y0=[0,v0x,0,v0y], t_eval=np.linspace(0,t,10000), args=(B,), atol=1e-7, rtol=1e-4)
just_above_idx = np.where(np.diff(np.sign(sol.y[2])) < 0)[0][0]
just_below_idx = just_above_idx + 1
x_loc = (sol.y[0][just_above_idx] + sol.y[0][just_below_idx])/2
return x_loc
# + colab={"base_uri": "https://localhost:8080/"} id="iRhLF_wBMlgM" outputId="e3421e15-0ae6-4789-f7d3-965f50b3e2e1"
print(f'Launch angel 45 degrees distance travelled: {get_distance(45, B=0, V=1)}')
print(f'Launch angel 40 degrees distance travelled: {get_distance(40, B=0, V=1)}')
# + id="xwJ6MMIrOsUE"
angles = np.linspace(0, 90, 200)
x_locs = np.vectorize(get_distance)(angles, B= 1000, V= 1)
# + colab={"base_uri": "https://localhost:8080/", "height": 523} id="5pRxI9EQcYvh" outputId="2710348a-6b0e-41da-b93c-3c529f42ad9c"
plt.figure(figsize=(20,8))
plt.plot(angles, x_locs)
plt.xlabel('Launch Angle [degrees]', fontsize = 20)
plt.ylabel('Maximum Distance[distance/gravity]', fontsize = 20)
plt.axvline(angles[np.argmax(x_locs)], ls = '--', color = 'r')
plt.show()
print("Optimal Launch angle", angles[np.argmax(x_locs)])
# + id="mTWiHgNKAJag"
V1 = 1
V2 = 20
angles = np.linspace(35, 45, 200)
Bs = np.linspace(0, 1, 50)
results_v1 = [np.vectorize(get_distance)(angles, B=B, V=V1) for B in Bs]
opt_angles_v1 = [angles[np.argmax(result)] for result in results_v1]
results_v2 = [np.vectorize(get_distance)(angles, B=B, V=V2, t=6) for B in Bs]
opt_angles_v2 = [angles[np.argmax(result)] for result in results_v2]
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="YP-QtPS9V9kz" outputId="ec194080-28c1-429c-a7b8-d418bbd3bde4"
plt.plot(Bs, opt_angles_v1, 'o--', label='$v_0/g=1$s')
#plt.plot(Bs, opt_angles_v2, 'o--', label='$v_0/g=2$s')
plt.legend(fontsize=17)
plt.xlabel('bg/m [1/$s^2$]', fontsize=20)
plt.ylabel('Optimal Angle', fontsize=20)
plt.grid()
# + id="pfQlaL0jWBFZ"
| Projectile_Motion_with_Air_Friction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
# %load_ext autoreload
# %autoreload 2
# + deletable=true editable=true
from __future__ import division, print_function
import itertools as it
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
import tqdm
from ..src.ros.spirit.src.helpers import Pose, Frame
# %matplotlib inline
# + [markdown] deletable=true editable=true
# Let's generate the combinations:
# + deletable=true editable=true
def combos(combo_min, combo_max, combo_len):
for combo in it.product(xrange(combo_min, combo_max + 1),
repeat=combo_len):
yield combo
def combo_dicts(param_names, combo_min, combo_max, combo_len):
for d in (OrderedDict(it.izip(param1_names, combo)) for combo in combos(combo_min, combo_max, combo_len)):
yield d
# + [markdown] deletable=true editable=true
# We have 5 coefficients. If we make the maximum ratio a 10:1 ratio, and use integer steps, that's 100,000 tests we need to do. 200,000 if we consider that centrality and centrality2 are mutually exclusive. Can we reduce them by taking equivalent ratios? (e.g., [1, 2, 3, 4, 5] and [2, 4, 6, 8, 10] are equivalent.)
# + deletable=true editable=true
def reduction_percentage(max_ratio, n_slots=5):
all_combos = list(combos(1, max_ratio, n_slots))
return 100*(len(all_combos) - len(set(tuple(np.array(i) / max(i)) for i in all_combos))) / len(all_combos)
# + deletable=true editable=true
response = raw_input("This cell takes about 4 minutes to run. Press 'y' to continue: ").lower()
if response == "y":
x = np.arange(1, 25)
y = [reduction_percentage(i) for i in tqdm.tqdm_notebook(x)]
# + deletable=true editable=true
if response == "y":
plt.plot(x, y)
plt.title("Maximum iteration reduction")
plt.ylabel("Reduction (%)")
plt.xlabel("Maximum ratio")
# + [markdown] deletable=true editable=true
# Well, that's not very efficient. As the ratio increases, the computation takes an exponentially longer time. Checking up to 24:1 takes us almost 4 minutes, with almost 55 seconds needed for the last iteration. Even early on, the best improvement was about 3.5%, and that stayed consistent. It's not worth keeping the entire list in memory just for this tiny gain.
# + deletable=true editable=true
combo_min = 1
combo_max = 10
combo_len = 5
param1_names = ["centrality", "direction", "distance", "direction_with_current", "distance_with_current"]
param2_names = ["centrality2", "direction", "distance", "direction_with_current", "distance_with_current"]
params1 = combo_dicts(param1_names, combo_min, combo_max, combo_len)
params2 = combo_dicts(param2_names, combo_min, combo_max, combo_len)
# + deletable=true editable=true
combo = next(params1)
# + deletable=true editable=true
import rospy
rospy.init_node("selector", log_level=rospy.INFO)
rospy.set_param("~ref_distance", 1.5)
rospy.set_param("~image_queue_length", 60)
rospy.set_param("~eval_method", "Spirit")
rospy.set_param("~thresh_distance", 0.01)
rospy.set_param("~thresh_yaw", 0.02)
# + deletable=true editable=true
from ..src.ros.spirit.src.past_image_selector import Selector
from ..src.ros.spirit.src.evaluators import Spirit
my_selector = Selector(debug=True)
spirit = Spirit(my_selector)
# + deletable=true editable=true
p = Pose.from_components([0, 1, 2], [3, 4, 5, 6])
f = Frame(Pose.generate_stamped([0, 1, 3], [3, 4, 5, 6]), 1)
# + deletable=true editable=true
# Create first frame
my_selector.tracked = True
my_selector.pose_callback(p.pose_stamped)
my_selector.image_callback(1)
# Select current frame
my_selector.pose_callback(p.pose_stamped)
# Update current frame
my_selector.pose_callback(p.pose_stamped)
# + deletable=true editable=true
my_selector.frames
# + deletable=true editable=true
spirit.select_best_frame()
# + deletable=true editable=true
spirit._evaluate_frame(p, f)
# + deletable=true editable=true
sum(coeff * getattr(spirit, fn)(p, f) for fn, coeff in combo.iteritems())
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
| notebooks/01_parameter-selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Open AMTI .bsf file
#
# <NAME>
# When using the [NetForce](http://www.amti.biz/NetForce.aspx) software to acquire data from [AMTI](http://www.amti.biz/) force platforms, the data is saved in a proprietary binary format with the extension .bsf, but the NetForce software has an option to export data to ASCII format. The open source software [b-tk Biomechanical ToolKit](https://code.google.com/p/b-tk/) can open a .bsf file, but it works only for the old file style of NetForce (verison 100) and the current style is version 105. In the new version, 105, the acquired data is saved as 8-byte doubles and already converted to engineering units, at least when the new AMTI amplifier is used and the full conditioned mode is selected in the AMTI configuration software. In version 100, the raw data from the analog-to-digital converter was saved as 2-byte integers. In the NetForce manual, AMTI provides the necessary information for anyone to write a code and extract all the data from .bsf files (although the manual still refers to .bsf file version 100).
#
# `AMTIbsf.py` (code at the end of this notebook) is a Python function to open .bsf files in the new style and extract the file metadata and data from force platforms. The function signature for its typical use is:
# ```Python
# from AMTIbsf import loadbsf
# data, mh, ih = loadbsf(filename, plot=1, axs=None)
# ```
# Where:
# - filename, plot, axs are: string with path and name of the file to be opened; option to plot the data; and option with the plot axes handle.
# - data, mh, ih are: numpy array with all the data; object Main header; and object with all Instrument headers (iH[0], iH[1], ...).
#
# Or from command line:
# ```
# python AMTIbsf.py filename
# ```
# This code can also open the 'shfile' memory-mapped file by NetForce, just use `shfile` as the filename.
#
# Let's see this function in action, first the customary imports and customization for the Python environment:
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set_context("notebook", font_scale=1.2,
rc={"lines.linewidth": 2, "lines.markersize": 8, "axes.titlesize": 'x-large'})
# In my setup, functions are in a specific diretory, which I have to add it to Python's path:
# +
import sys
sys.path.insert(1, r'./../functions')
from AMTIbsf import loadbsf
# -
# Openning a .bsf file with acquired data of one force platform and with the option to plot the data:
data, mh, ih = loadbsf(filename='./../data/AMTIdata.bsf', plot=1)
# The headers (metadata from the .bsf file) are objects of the class AMTIbsf. You can visualize all the information from the headers typing for instance `mh` or ih[0]. Each information can be accessed as a propertiy of the header typing for instance `mh.rate`. The headers can also be acessed as dictionaries typing for instance `mh.__dict__`.
mh.rate
mh
ih[0].__dict__
# ## Function `AMTIbsf.py`
# +
# # %load ./../functions/AMTIbsf.py
"""Reads .bsf file (version 105) from AMTI NetForce software.
This module reads .bsf file (version 105) from AMTI NetForce software and
extracts the file metadata and data from force platforms.
The typical use of this module is, for example:
from AMTIbsf import loadbsf
data, mh, ih = loadbsf(filename, plot=1, axs=None)
Or from command line, for example:
python AMTIbsf.py filename
Where:
filename, plot, axs are: string with path and name of the file to be
opened; option to plot the data; and option with the plot axes handle.
data, mh, ih are: numpy array with all the data; object Main header;
and object with all Instrument headers (iH[0], iH[1], ...).
This code can also open the 'shfile' memory-mapped file by NetForce.
All the data is converted to SI units.
"""
__author__ = "<NAME>, https://github.com/demotu/BMC"
__version__ = "1.0.1"
__license__ = "MIT"
import sys
import mmap
from struct import unpack
import numpy as np
def loadbsf(filename, plot=1, axs=None):
"""Load .bsf file (version 105) from AMTI NetForce software.
Parameters
----------
filename : string
Path and name of the .bsf file (version 105) to be opened.
plot : int or bool, optional (default = 1)
If 1 (True), plot data in matplotlib figure.
axs : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
data : 2-D numpy array (possibly [Fx, Fy, Fz, Mx, My, Mz])
Data is expressed in engineering units (N and Nm for a force plate).
mh : object
Object with all the information from the Main header.
ih : object
Object with all the information from the Instrument header(s).
(ih[0], ih[1], ...)
Notes
-----
The headers are objects of the class AMTIbsf. You can visualize all the
information from the headers typing mh or ih[0]. Each information can be
accessed as a property of the header typing for instance mh.rate
The headers can also be accessed as dictionaries (useful for saving) typing
mh.__dict__
AMTI .bsf file has two versions: 100 (old) and 105 (new).
In the new version, 105, the acquired data is saved as 8-byte doubles and
already converted to engineering units, at least when the new AMTI amplifier
is used and the full conditioned mode is selected in the AMTI configuration
software. In version 100, the raw data from the analog-to-digital converter
was saved as 2-byte integers.
This code can also open the 'shfile' memory-mapped file by NetForce.
All the data is converted to SI units.
See the NetForce Users Manual from AMTI [1]_
See this IPython notebook [2]_
References
----------
.. [1] http://www.amti.biz/NetForce.aspx
.. [2] https://github.com/demotu/BMC/blob/master/notebooks/AMTIbsfFile.ipynb
Examples
--------
>>> from AMTIbsf import loadbsf
>>> data, mh, ih = loadbsf(filename='./../data/AMTIdata.bsf', plot=1)
"""
MNC = 32 # Maximum number of channels defined by AMTI
lbforce_per_N = 1.0/4.44822162 # AMTI conversion factor (version 105)
# this constant is derived from:
# g = 9.80665 # standard acceleration of free fall in m/s2 by ISO 80000-3:2006
# onelb = 0.45359237 # 1 lb in kg by International yard and pound
plot = int(plot) # in case of command line input
if filename == 'shfile': # memory-mapped file by NetForce
try:
# bug in Python mmap: file can't be opened with unknown size
# read at least up to the first instrument:
nbytes = 4 + 968 + 948
f = mmap.mmap(fileno=-1, length=nbytes, tagname='shfile')
f.seek(0, 0)
except IOError as err:
print('{0} I/O error: {1}'.format(filename, err))
f.close()
return
else: # file in the hard disk
try:
f = open(filename, 'rb')
except IOError as err:
print('{0} I/O error: {1}'.format(filename, err))
f.close()
return
# read Main header
mh = ReadMainHeader(f)
if filename == 'shfile':
try:
# try to open for all bytes in file:
nbytes = 4 + mh.size_header + 948*mh.instHeadCount + 8*int(mh.numDatasets*mh.TNC)
f = mmap.mmap(fileno=-1, length=nbytes, tagname='shfile')
except IOError as err:
pass
try:
# instrument header may have size < 948, do not try to open for all bytes yet:
nbytes = 4 + mh.size_header + 948*mh.instHeadCount + 4*int(mh.numDatasets*mh.TNC)
f = mmap.mmap(fileno=-1, length=nbytes, tagname='shfile')
except IOError as err:
print('{0} I/O error: {1}'.format(filename, err))
f.close()
return
# read Instrument header
ih = []
f.seek(4 + mh.size_header, 0) # advances file to the first instrument header
for i in range(mh.instHeadCount):
ih.append(ReadInstHeader(f, MNC, mh.TNC))
# go to the next instrument header
f.seek(4 + mh.size_header + ih[i].size_header - f.tell(), 1)
# check the file size and adjust for the shfile:
current = f.tell()
f.seek(0, 2)
filesize = f.tell()
if filesize - current != 8*int(mh.numDatasets*mh.TNC):
if filename == 'shfile': # open the file for all its bytes
try:
nbytes = current + 8*int(mh.numDatasets*mh.TNC)
f = mmap.mmap(fileno=-1, length=nbytes, tagname='shfile')
except:
print('Error: unnexpected number of bytes for data in %s.' %filename)
f.close()
return
else:
print('Error: unnexpected number of bytes for data in %s.' %filename)
f.close()
return
f.seek(current, 0)
# read data
try:
data = unpack('<'+int(mh.numDatasets*mh.TNC)*'d', f.read(int(mh.numDatasets*mh.TNC)*8))
except:
print('Error reading data in %s.' %filename)
f.close()
return
data = np.array(data).reshape((mh.numDatasets, mh.TNC))
# In NetForce file, data is always in Imperial units, scale factor for force platform:
scale = np.array([1, 1, 1, 0.0254, 0.0254, 0.0254]) / lbforce_per_N
for i in range(mh.num_of_plats):
# In the NetForce file version 105, raw data is already converted
data[:, ih[i].chans] = data[:, ih[i].chans] * scale
f.close()
if plot:
plotGRF(data, mh, ih, axs=None)
return data, mh, ih
def deco(b):
"""Custom decoder for reading .bsf file from AMTI NetForce software."""
return b.decode('latin1', errors='ignore').partition('\x00')[0]
def print_attr(classe, header='\nHeader:'):
"""print class attributes (variables) and their values."""
attributes = sorted([attr for attr in vars(classe) if not attr.startswith('__')], key=str.lower)
print(header)
for attr in attributes:
print(attr, ':', classe.__dict__[attr])
def plotGRF(grf, mh, ih, axs=None):
"""Plot ground reaction forces of a .bsf file (version 105) from AMTI NetForce software.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
return
time = np.linspace(0, (grf.shape[0]-1)/mh.rate, grf.shape[0])
if axs is None:
fig, axs = plt.subplots(4, 2, figsize=(10, 6), sharex=True)
fig.subplots_adjust(left = 0.1, right = 0.96, top = 0.92, wspace = 0.25, hspace = 0.15)
axs = axs.flatten()
ylabel = ['Fx (N)', 'Fy (N)', 'Fz (N)',
'Mx (Nm)', 'My (Nm)', 'Mz (Nm)', 'COPx (m)', 'COPy (m)']
for p in range(int(grf.shape[1]/6)):
cop = np.vstack((-grf[:, 4 + 6*p], grf[:, 3 + 6*p])/grf[:, 2 + 6*p]).T
for i, axi in enumerate(axs):
if i < 6:
axi.plot(time, grf[:, i + 6*p], label='FP %d'%p)
else:
axi.plot(time, cop[:, i - 6], label='FP %d'%p)
if p == 0:
axi.set_ylabel(ylabel[i])
axi.yaxis.set_major_locator(plt.MaxNLocator(4))
axi.yaxis.set_label_coords(-.16, 0.5)
if p:
axs[7].legend(loc='best', frameon=True, framealpha=.5)
axs[6].set_xlabel('Time (s)')
axs[7].set_xlabel('Time (s)')
title = 'GRF data'
if len(mh.name) and len(mh.test_type):
title = 'GRF data (%s, %s)' %(mh.name, mh.test_type)
plt.suptitle(title, fontsize='x-large')
#plt.tight_layout(h_pad=.1) # doesn't work well with suptitle
plt.show()
class ReadMainHeader:
"""Reader of the Main header of a .bsf file (version 105) from AMTI NetForce software.
A class is used for the convenience of introspection of the header metadata.
"""
def __init__(self, f):
f.seek(0, 0) # just in case
self.version_number = unpack('<i', f.read(4))[0] # .bsf file version number
if self.version_number != 105:
print('Error: this function was written for NetForce file version 105;'
' the version of this file seems to be %d.' %self.version_number)
sys.exit(1)
self.size_header = unpack('<i', f.read(4))[0] # Size of the structure in bytes
self.num_of_plats = unpack('<i', f.read(4))[0] # Number of active platforms
self.num_of_instrs = unpack('<i', f.read(4))[0] # Number of active instruments
name = deco(unpack('<100s', f.read(100))[0]) # Subject's name
name = ' '.join(name.split(', ')[::-1]).strip() # name is saved as 'Lastname, Firstname'
self.name = ' '.join(name.split()) # remove multiple whitespaces
self.test_date = deco(unpack('<12s', f.read(12))[0 ]) # Test date
self.sub_dob = deco(unpack('<12s', f.read(12))[0]) # Subject's date of birth
self.weight = unpack('<d', f.read(8))[0] # Subject's weight
self.height = unpack('<d', f.read(8))[0] # Subject's height
self.sex = deco(unpack('<4c', f.read(4))[0]) # Subject's sex
self.trl_num = unpack('<i', f.read(4))[0] # Number of trials
self.trl_lth = unpack('<d', f.read(8))[0] # Length of the trial in seconds
self.zmth = unpack('<i', f.read(4))[0] # Zero method
self.wtmth = unpack('<i', f.read(4))[0] # Weight method
self.delayks = unpack('<i', f.read(4))[0] # Delay after keystroke
self.trigmth = unpack('<i', f.read(4))[0] # Trigger method, 0:keystroke, 1:by chan, 2:external
self.trigchan = unpack('<i', f.read(4))[0] # Triggering platform
self.pre_trig = unpack('<i', f.read(4))[0] # Pre trigger values
self.post_trig = unpack('<i', f.read(4))[0] # Post trigger values
self.trigval = unpack('<d', f.read(8))[0] # Trigger value
f.seek(4, 1) # 4 unidentified bytes
self.rate = unpack('<i', f.read(4))[0] # Rate of acquisition
self.protocol = deco(unpack('<150s', f.read(150))[0]) # Protocol file used
self.test_type = deco(unpack('<200s', f.read(200))[0]) # Type of test, e.g., eyes open
self.cmnfl = deco(unpack('<150s', f.read(150))[0]) # A file name which contains comments
self.trldscfl = deco(unpack('<150s', f.read(150))[0]) # A file name with trial descriptions
self.test_by = deco(unpack('<100s', f.read(100))[0]) # Examiner's name
f.seek(2, 1) # 2 unidentified bytes
self.units = unpack('<i', f.read(4))[0] # Units where 0 is English and 1 is metric
self.instHeadCount = self.num_of_plats + self.num_of_instrs # Instrument header count
self.numDatasets = self.trl_lth * self.rate # Number of data sets in the file
# Total number of channels:
TNC = 0
for i in range(self.instHeadCount):
instsize_header = unpack('<l', f.read(4))[0]
f.seek(28, 1)
TNC += unpack('<l', f.read(4))[0] # add number of channels in this instrumemt
if i < self.instHeadCount - 1:
f.seek(instsize_header - 40, 1) # go to next instrument header
else:
f.seek(4 + self.size_header, 0) # rewinds file to first instrument header
self.TNC = TNC
def __repr__(self):
print_attr(self, header='\nMain header:')
return '\n'
class ReadInstHeader:
"""Reader of the Instrument header of a .bsf file (version 105) from AMTI NetForce software.
In version 105, length is always in inch, do the conversion to meter.
WARNING: AMTI NetForce version 3.5.3 uses 39.4 instead of 39.37 to convert inch to meter.
A class is used for the convenience of introspection of the header metadata.
"""
def __init__(self, f, MNC, TNC):
self.MNC = MNC # Maximum number of channels
self.size_header = unpack('<i', f.read(4))[0] # Size of the structure in bytes
self.ser_num = unpack('<i', f.read(4))[0] # Serial number of the instrument or platform
self.layout_num = unpack('<i', f.read(4))[0] # The layout or platform number
self.model = deco(unpack('<20s', f.read(20))[0]) # Name of the instrument
self.num_chans = unpack('<i', f.read(4))[0] # Number of channels
self.tr_strt_chan = unpack('<i', f.read(4))[0] # True start channel, depends on hardware setup
self.tr_end_chan = unpack('<i', f.read(4))[0] # True end channel, depends on hardware setup
self.data_strt_chan = unpack('<i', f.read(4))[0] # Start channel in the data file
self.data_end_chan = unpack('<i', f.read(4))[0] # End channel in the data file
self.length = unpack('<f', f.read(4))[0] * 0.0254 # Length of the platform
self.width = unpack('<f', f.read(4))[0] * 0.0254 # Width of the platform
self.offset = np.array(unpack('<fff', f.read(3*4))) / 39.4 # Instrument x,y,z offset
self.sens = np.array(unpack('<'+MNC*'f', f.read(MNC*4))[:TNC]) # Sensitivities (possible 32 channels)
self.chans = np.array(unpack('<'+MNC*'i', f.read(MNC*4))[:TNC]) # Channel numbers (possible 32 channels)
self.coord = np.array(unpack('<'+16*'f', f.read(16*4))) # Coordinate transformation
self.interdist = np.array(unpack('<fff', f.read(3*4))) / 39.4 # x,y,z distances from the previous platform
self.ampgain = np.array(unpack('<'+MNC*'f', f.read(MNC*4))[:TNC]) # Amplifier Gains (possible 32 channels)
self.extvoltage = np.array(unpack('<'+MNC*'f', f.read(MNC*4))[:TNC]) # Excitation voltage
self.acqrange = np.array(unpack('<'+MNC*'f', f.read(MNC*4))[:TNC]) # Acquisition card range (bits/volt)
self.zero_period = unpack('<f', f.read(4))[0] # Tz
self.latency_period = unpack('<f', f.read(4))[0] # Tl
self.trigger_time = unpack('<f', f.read(4))[0] # Ttrig
self.end_time = unpack('<f', f.read(4))[0] # Tend
self.post_trig_time = unpack('<f', f.read(4))[0] # Tpost
self.zero = np.array(unpack('<'+MNC*'i', f.read(MNC*4))[:TNC]) # Zero values
self.rate = unpack('<i', f.read(4))[0] # Data set interval
self.trig_val = unpack('<f', f.read(4))[0] # Trigger value
self.end_val = unpack('<f', f.read(4))[0] # End value
def __repr__(self):
print_attr(self, header='\nInstrument header:')
return '\n'
if __name__ == "__main__":
loadbsf(*sys.argv[1:])
| notebooks/AMTIbsfFile.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py27]
# language: python
# name: conda-env-py27-py
# ---
# MVA - MCMC
# TP4 - Bayesian analysis of a one-way random effects model
# <NAME>
# # Exercise 3: Bayesian analysis of a one-way random effects model
# Inverse Gamma distribution :
# $$ x \rightarrow \frac{1}{x^{a+1}} \exp\left(-\frac{b}{x}\right) \mathbb{1}_{\mathbb{R}^+}(x) $$
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
from scipy.stats import invgamma
def inv_gamma(a,b) :
return invgamma.rvs(a = a, scale = b)
## Test of the inverse gamma sampler :
l = []
for i in range(6000):
l.append(inv_gamma(2,2))
plt.hist(l , range = (0,4), bins = np.linspace(0,4,17) , width= 0.02 ) ;
| TP4/tp4_exo3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.4
# language: julia
# name: julia-1.5
# ---
# # Functions and Modeling Applications
# Contents:
#
# - [Functions and Modeling Applications](#Functions-and-Modeling-Applications)
# - [Creating Functions](#Creating-Functions)
# - [Solvers and Optimization](#Solvers-and-Optimization)
# - [Linear Algebra](#Linear-Algebra)
# - [Finite Markov Chains](#Finite-Markov-Chains)
#
#
# This lab covers:
#
# (1) User-defined functions;
#
# (2) Solvers and optimization;
#
# (3) Linear algebra applications;
#
# (4) Modeling finite Markov chains.
#
# ## Creating Functions
#
# ## Solvers and Optimization
#
# ## Linear Algebra
#
# ## Finite Markov Chains
#
| week02/.ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # MNIST Visualization Example
#
# Real-time visualization of MNIST training on a CNN, using TensorFlow and [TensorDebugger](https://github.com/ericjang/tdb)
#
# The visualizations in this notebook won't show up on http://nbviewer.ipython.org. To view the widgets and interact with them, you will need to download this notebook and run it with a Jupyter Notebook server.
# + [markdown] deletable=true editable=true
# ## Step 1: Load TDB Notebook Extension
# + deletable=true editable=true language="javascript"
# Jupyter.utils.load_extensions('tdb_ext/main')
# + deletable=true editable=true
#import sys
#sys.path.append('/home/evjang/thesis/tensor_debugger')
import tdb
from tdb.examples import mnist, viz
import matplotlib.pyplot as plt
import tensorflow as tf
import urllib
# + [markdown] deletable=true editable=true
# ## Step 2: Build TensorFlow Model
# + deletable=true editable=true
(train_data_node,
train_labels_node,
validation_data_node,
test_data_node,
# predictions
train_prediction,
validation_prediction,
test_prediction,
# weights
conv1_weights,
conv2_weights,
fc1_weights,
fc2_weights,
# training
optimizer,
loss,
learning_rate,
summaries) = mnist.build_model()
# + [markdown] deletable=true editable=true
# ## Step 3: Attach Plotting Ops
# + deletable=true editable=true
def viz_activations(ctx, m):
plt.matshow(m.T,cmap=plt.cm.gray)
plt.title("LeNet Predictions")
plt.xlabel("Batch")
plt.ylabel("Digit Activation")
# + deletable=true editable=true
# plotting a user-defined function 'viz_activations'
p0=tdb.plot_op(viz_activations,inputs=[train_prediction])
# weight variables are of type tf.Variable, so we need to find the corresponding tf.Tensor instead
g=tf.get_default_graph()
p1=tdb.plot_op(viz.viz_conv_weights,inputs=[g.as_graph_element(conv1_weights)])
p2=tdb.plot_op(viz.viz_conv_weights,inputs=[g.as_graph_element(conv2_weights)])
p3=tdb.plot_op(viz.viz_fc_weights,inputs=[g.as_graph_element(fc1_weights)])
p4=tdb.plot_op(viz.viz_fc_weights,inputs=[g.as_graph_element(fc2_weights)])
p2=tdb.plot_op(viz.viz_conv_hist,inputs=[g.as_graph_element(conv1_weights)])
ploss=tdb.plot_op(viz.watch_loss,inputs=[loss])
# + [markdown] deletable=true editable=true
# ## Step 4: Download the MNIST dataset
#
# + deletable=true editable=true
download_dir='/tmp/'
# + [markdown] deletable=true editable=true
# ## Step 5: Debug + Visualize!
#
# Upon evaluating plot nodes p1,p2,p3,p4,ploss, plots will be generated in the Plot view on the right.
# + deletable=true editable=true
# return the TF nodes corresponding to graph input placeholders
(train_data,
train_labels,
validation_data,
validation_labels,
test_data,
test_labels) = mnist.get_data(download_dir)
# + deletable=true editable=true
# start the TensorFlow session that will be used to evaluate the graph
s=tf.InteractiveSession()
tf.global_variables_initializer().run()
# + deletable=true editable=true
BATCH_SIZE = 64
NUM_EPOCHS = 5
TRAIN_SIZE=10000
for step in range(NUM_EPOCHS * TRAIN_SIZE // BATCH_SIZE):
offset = (step * BATCH_SIZE) % (TRAIN_SIZE - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), :, :, :]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
feed_dict = {
train_data_node: batch_data,
train_labels_node: batch_labels
}
# run training node and visualization node
status,result=tdb.debug([optimizer,p0], feed_dict=feed_dict, session=s)
if step % 10 == 0:
status,result=tdb.debug([loss,p1,p2,p3,p4,ploss], feed_dict=feed_dict, breakpoints=None, break_immediately=False, session=s)
print('loss: %f' % (result[0]))
# -
| mnist_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:aind]
# language: python
# name: conda-env-aind-py
# ---
# # Assignment 2 (Data Exploration and Preprocessing):
#
# #### import modules :
# import modules
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# #### load and explore dataset :
# read the data
data = pd.read_csv('segmentation.data.txt', header = None, delimiter = ",")
#get the attributes (20 attribute)
atrribute_names = data[:1]
print ("number of attributes : ", len(atrribute_names))
data = data.sort_values([0], ascending=True)
# get the data (2310 sample)
print ("number of dataset : ", len(data[1:]))
# get the number of classes (7 class)
print ("number of classes :",len(set(data[0])))
print("classes name: ", set(data[0]))
# get pair for each attribute index togeth in pair
pair_attribute = [(i, j) for i in range(20) for j in range(20)]
# ### Data Exploration :
# #### Pearson’s correlation:
#
np.corrcoef(data,data)
| lab_2/.ipynb_checkpoints/Image_Segmentation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
path = r"D:\datasets\mimiciii"
import pandas as pd
import vaex
import os
import datetime
import re
import time
os.chdir(path)
# %%time
chartevents = vaex.open("CHARTEVENTS.csv.hdf5")
labevents = vaex.open("LABEVENTS.csv.hdf5")
inputevents_cv = vaex.open("INPUTEVENTS_CV.csv.hdf5")
inputevents_mv = vaex.open("INPUTEVENTS_MV.csv.hdf5")
predcriptions = vaex.open("PRESCRIPTIONS.csv.hdf5")
# %%time
icustays = pd.read_csv("ICUSTAYS.csv",index_col=0,parse_dates=[9,10])
procedure_mv = pd.read_csv("PROCEDUREEVENTS_MV.csv",index_col=0,parse_dates=[4,5])
procedure_icd = pd.read_csv("PROCEDURES_ICD.csv")
os.chdir(r"F:\myresearch\TraumaHba1\data2")
admissions = pd.read_csv("trauma_admissions.csv",index_col=0,parse_dates=[3,4,5,14,15,21,22,23,24])
icu_trauma = admissions[admissions.HADM_ID.isin(icustays.HADM_ID)]
icu_hadm_id = icu_trauma.HADM_ID
# +
labevents_trauma = labevents[labevents.HADM_ID.isin(icu_hadm_id)]
chartevents_trauma = chartevents[chartevents.HADM_ID.isin(icu_hadm_id)]
predcriptions_trauma = predcriptions[predcriptions.HADM_ID.isin(icu_hadm_id)]
procedure_icd_trauma = procedure_icd[procedure_icd.HADM_ID.isin(icu_hadm_id)]
procedure_mv_trauma = procedure_mv[procedure_mv.HADM_ID.isin(icu_hadm_id)]
# -
"""
从labevents 表中提取首次指标,加入总表
lab_item_id:字符串,所需项目的ITEMID,为单一值
variable_name:目标表格中新变量(新列)的名称
注意事项:1、总表需要定义成全局变量,并且需要根据具体情况改名
2、仅适用vaex DataFrame格式数据表
"""
def add_vari_labevents_id_hdf(lab_item_id,variable_name):
global icu_trauma
items = labevents_trauma[labevents_trauma["ITEMID"]==lab_item_id]
items_df = items.to_pandas_df()
# bpd_chart_df.info()
items_df["CHARTTIME"] = items_df["CHARTTIME"].apply(lambda x:pd.to_datetime(x))
first_time = items_df.groupby("HADM_ID").agg({"CHARTTIME":"min"})
events=pd.merge(first_time,items_df,on="CHARTTIME",how="left")[["HADM_ID","VALUENUM"]]
events.drop_duplicates(subset=["HADM_ID"],inplace=True)
icu_trauma = pd.merge(icu_trauma,events,on="HADM_ID",how="left")
icu_trauma.rename(columns={"VALUENUM":variable_name},inplace=True)
labitems = pd.read_excel("labitems_id.xlsx")
# %%time
for itemid,lab_item in zip(labitems.itemid,labitems.lab_items):
#print(itemid)
#print(lab_item)
add_vari_labevents_id_hdf(itemid,lab_item)
print(lab_item+" has been added.")
"""
从chartevents 表中提取首次指标,加入总表
chart_id_list:列表,所需项目的ITEMID集合
variable_name:目标表格中新变量(新列)的名称
注意事项:1、总表需要定义成全局变量,并且需要根据具体情况改名
2、仅适用vaex DataFrame格式数据表
"""
def add_vari_chartevents(chart_id_list,variable_name):
global icu_trauma
chart = chartevents_trauma[chartevents_trauma["ITEMID"].isin(chart_id_list)]
chart_df = chart.to_pandas_df()
# bpd_chart_df.info()
chart_df[["CHARTTIME","STORETIME"]] = chart_df[["CHARTTIME","STORETIME"]].apply(lambda x:pd.to_datetime(x))
first_time = chart_df.groupby("HADM_ID").agg({"CHARTTIME":"min"})
events=pd.merge(first_time,chart_df,on="CHARTTIME",how="left")[["HADM_ID","VALUENUM"]]
events.drop_duplicates(subset=["HADM_ID"],inplace=True)
icu_trauma = pd.merge(icu_trauma,events,on="HADM_ID",how="left")
icu_trauma.rename(columns={"VALUENUM":variable_name},inplace=True)
chartitems = pd.read_excel("chartitems_id.xlsx")
dict(zip(chartitems.chart_items,chartitems.itemid))
chartitems_dict = {'HeartRate': [211,220045],
'SysBP': [51,442,455,6701,220179,220050],
'DiasBP': [8368,8440,8441,8555,220180,220051],
'MeanBP': [456,52,6702,443,220052,220181,225312],
'RespRate': [615,618,220210,224690],
'TempF': [223761,678],
'TempC': [223762,676],
'SpO2': [646,220277],
'Glucose': [807,811,1529,3745,3744,225664,220621,226537]
}
# %%time
for chart_item,itemid in chartitems_dict.items():
#print(itemid)
#print(chart_item)
add_vari_chartevents(itemid,chart_item)
print(chart_item+" has been added.")
icu_trauma.shape
icu_trauma.to_csv("icu_trauma.csv",index=0)
| icu_trauma.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nlpbook
# language: python
# name: nlpbook
# ---
# +
import collections
import numpy as np
import pandas as pd
import re
from argparse import Namespace
# -
args = Namespace(
raw_train_dataset_csv="data/yelp/raw_train.csv",
raw_test_dataset_csv="data/yelp/raw_test.csv",
train_proportion=0.7,
val_proportion=0.3,
output_munged_csv="data/yelp/reviews_with_splits_full.csv",
seed=1337
)
# Read raw data
train_reviews = pd.read_csv(args.raw_train_dataset_csv, header=None, names=['rating', 'review'])
train_reviews = train_reviews[~pd.isnull(train_reviews.review)]
test_reviews = pd.read_csv(args.raw_test_dataset_csv, header=None, names=['rating', 'review'])
test_reviews = test_reviews[~pd.isnull(test_reviews.review)]
train_reviews.head()
test_reviews.head()
# Unique classes
set(train_reviews.rating)
# Splitting train by rating
# Create dict
by_rating = collections.defaultdict(list)
for _, row in train_reviews.iterrows():
by_rating[row.rating].append(row.to_dict())
# +
# Create split data
final_list = []
np.random.seed(args.seed)
for _, item_list in sorted(by_rating.items()):
np.random.shuffle(item_list)
n_total = len(item_list)
n_train = int(args.train_proportion * n_total)
n_val = int(args.val_proportion * n_total)
# Give data point a split attribute
for item in item_list[:n_train]:
item['split'] = 'train'
for item in item_list[n_train:n_train+n_val]:
item['split'] = 'val'
# Add to final list
final_list.extend(item_list)
# -
for _, row in test_reviews.iterrows():
row_dict = row.to_dict()
row_dict['split'] = 'test'
final_list.append(row_dict)
# Write split data to file
final_reviews = pd.DataFrame(final_list)
final_reviews.split.value_counts()
final_reviews.review.head()
final_reviews[pd.isnull(final_reviews.review)]
# +
# Preprocess the reviews
def preprocess_text(text):
if type(text) == float:
print(text)
text = text.lower()
text = re.sub(r"([.,!?])", r" \1 ", text)
text = re.sub(r"[^a-zA-Z.,!?]+", r" ", text)
return text
final_reviews.review = final_reviews.review.apply(preprocess_text)
# -
final_reviews['rating'] = final_reviews.rating.apply({1: 'negative', 2: 'positive'}.get)
final_reviews.head()
final_reviews.to_csv(args.output_munged_csv, index=False)
| chapters/chapter_3/3_5_yelp_dataset_preprocessing_FULL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # EECS 445: Machine Learning
#
# ## Hands On 10: Bias Variance Tradeoff
#
# Consider a sequence of IID random variable:
# $$
# X_i =
# \begin{cases}
# 100 & \text{ with prob. } 0.02 \\
# 0 & \text{ with prob. } 0.97 \\
# -100 & \text{ with prob. } 0.01 \\
# \end{cases}
# $$
# The true mean of $X_i$ is
# $$
# 0.02 \times 100 + 0.97 \times 0 + 0.01 \times -100 = 1
# $$
#
# We want to estimate the true mean of this distribution. We will consider two different estimators of the true mean.
# Let's say you take three samples $X_1, X_2, X_3$, and you compute the **empirical mean** $Z=\frac{X_1 + X_2 + X_3}{3}$ and **empirical median** $Y$ of these three samples (recall that the median is obtained by sorting $X_1, X_2, X_3$ and then choosing the middle (2nd) entry).
#
#
# ### What is the bias-variance tradeoff of the $Y$ and $Z$ for estimating the true mean of the above distribution?
#
# * They are both unbiased estimators of the true mean, and have the same variance.
# * The median has higher bias and higher variance.
# * The mean has higher bias and higher variance.
# * They both have no bias, but the mean has lower variance.
# * The mean has no bias but some variance, and the median has non-zero bias but less variance
#
#
# ### Solution
# > The last answer is correct.
#
# > The empirical mean of a sample of random $n$ IID random variables is always an unbiased estimate of the true mean. However, the empirical mean estimator can have high variance. Here it is $ \text{Var}(Z) = \frac{\text{Var}(X_i)}{3} = \frac{(100-1)^2 \times 0.02 + (-100 - 1)^2 \times 0.01 + (0-1)^2 \times 0.97}{3} = 99 \frac 2 3.$
#
# >The median, on the other hand, is a biased estimator. It is a little bit hard to calculate exactly, but here goes:
# $$
# median = \begin{cases} 100 & w.p. 0.02^3 + \binom{3}{1} 0.02^2 \times 0.98 \\
# -100 & w.p. 0.01^3 + \binom{3}{1} 0.01^2 \times 0.99
# \end{cases}
# $$
# If you work this out, you see that the median on average is $0.089$. This means that the $\text{bias}^2 \approx (1-0.089)^2$ which is no more than 1. Using a similar argument, you can check that the variance of the median is no more than 20. This can be checked experimentally!
# ## Derivation of Bias-Variance Tradeoff eqaution
# Assume that we have noisy data, modeled by $f = y + \epsilon$, where $\epsilon \in \mathcal{N}(0,\sigma)$. Given an estimator $\hat{f}$, the squared error can be derived as follows:
#
# $$
# \begin{align}
# \mathbb{E}\left[\left(\hat{f} - f\right)^2\right] &= \mathbb{E}\left[\hat{f}^2 - 2f\hat{f} + f^2\right]\\
# &= \mathbb{E}\left[\hat{f}^2\right] + \mathbb{E}\left[f^2\right] - 2\mathbb{E}\left[f\hat{f}^2\right] \text{ By linearity of expectation} \\
# \end{align}
# $$
# Now, by definition, $Var(x) = \mathbb{E}\left[x^2\right] - \left(\mathbb{E}\left[x\right]\right)^2$. Subsituting this definition into the eqaution above, we get:
# $$
# \begin{align}
# \mathbb{E}\left[\hat{f}^2\right] + \mathbb{E}\left[f^2\right] - 2\mathbb{E}\left[f\hat{f}^2\right] &= Var(\hat{f}) + \left(\mathbb{E}[\hat{f}]\right)^2 + Var(f) + \left(\mathbb{E}[f]\right)^2 - 2f\mathbb{E}[\hat{F}^2] \\
# &= Var(\hat{f}) + Var(f) + \left(\mathbb{E}[\hat{f}] - f\right)^2\\
# &= \boxed{\sigma + Var(\hat{f}) + \left(\mathbb{E}[\hat{f}] - f\right)^2}
# \end{align}
# $$
#
# The first term $\sigma$ is the irreducible error due to the noise in the data (from the distribution of $\epsilon$). The second term is the **variance** of the estimator $\hat{f}$ and the final term is the **bias** of the estimator. There is an inherent tradeoff between the bias and variance of an estimator. Generally, more complex estimators (think of high-degree polynomials as an example) will have a low bias since they will fit the sampled data really well. However, this accuracy will not be maintained if we continued to resample the data, which implies that the variance of this estimator is high.
# ## Activity 1: Bias Variance Tradeoff
# We will now see try to see the inherent tradeoff between bias and variance of estimators through linear regression. Consider the following dataset.
# +
import numpy as np
import matplotlib.pyplot as plt
from numpy.matlib import repmat
from sklearn
degrees = [1,2,3,4,5]
#define data
n = 20
sub = 1000
mean = 0
std = 0.25
#define test set
Xtest = np.random.random((n,1))*2*np.pi
ytest = np.sin(Xtest) + np.random.normal(mean,std,(n,1))
#pre-allocate variables
preds = np.zeros((n,sub))
bias = np.zeros(len(degrees))
variance = np.zeros(len(degrees))
mse = np.zeros(len(degrees))
values = np.expand_dims(np.linspace(0,2*np.pi,100),1)
# -
# Let's try several polynomial fits to the data:
#
for j,degree in enumerate(degrees):
for i in range(sub):
#create data - sample from sine wave
x = np.random.random((n,1))*2*np.pi
y = np.sin(x) + np.random.normal(mean,std,(n,1))
#TODO
#create features corresponding to degree - ex: 1, x, x^2, x^3...
A =
#TODO:
#fit model using least squares solution (linear regression)
#later include ridge regression/normalization
coeffs =
#store predictions for each sampling
preds[:,i] = poly.fit_transform(Xtest).dot(coeffs)[:,0]
#plot 9 images
if i < 9:
plt.subplot(3,3,i+1)
plt.plot(values,poly.fit_transform(values).dot(coeffs),x,y,'.b')
plt.axis([0,2*np.pi,-2,2])
plt.suptitle('PolyFit = %i' % (degree))
plt.show()
#TODO
#Calculate mean bias, variance, and MSE (UNCOMMENT CODE BELOW!)
#bias[j] =
#variance[j] =
#mse[j] =
# Let's plot the data with the estimators!
plt.subplot(3,1,1)
plt.plot(degrees,bias)
plt.title('bias')
plt.subplot(3,1,2)
plt.plot(degrees,variance)
plt.title('variance')
plt.subplot(3,1,3)
plt.plot(degrees,mse)
plt.title('MSE')
plt.show()
| handsOn_lecture10_bias-variance_tradeoff/draft/bias_variance_solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Training a generative model on MNIST
#
# ### Preliminaries
#
# First, we have to import all the necessary packages and classes.
import torch
import numpy as np
from matplotlib import pyplot as plt
from torch import nn
from torch.autograd import Variable
from torch_two_sample import SmoothKNNStatistic, SmoothFRStatistic
from IPython.display import clear_output
from torchvision.utils import make_grid
# We will need the following function later on to show the generated images.
# +
# It is from the torchvision documentation on ``make_grid``.
# %matplotlib inline
def show(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')
# We run on the GPU whenever possible.
cuda = torch.cuda.is_available()
# Let us fix the seed.
torch.manual_seed(0)
if cuda:
torch.cuda.manual_seed(0)
# -
# ### Step 1: Define the generative model
#
# We will present a simple example of learning an implicit generative model on MNIST. As the base measure we will use a 10-dimensional Gaussian, and the following generative model:
# +
noise_dim = 10 # The dimension of Q_0.
ambient_dim = 28 * 28 # The dimension of the generated samples.
generator = nn.Sequential(
nn.Linear(noise_dim, 64),
nn.ReLU(),
nn.Linear(64, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, 1024),
nn.ReLU(),
nn.Linear(1024, ambient_dim),
nn.Tanh()) # Squash the output to [-1, 1].
if cuda: # Move the model to the GPU if necessary.
generator = generator.cuda()
# -
# ### Step 2: Choose an optimizer
#
# To optimize over the parameters of the generator we will use the Adam optimizer.
optimizer = torch.optim.Adam(generator.parameters(), lr=1e-3)
# ### Step 3: Pick a loss function
#
# As a loss function we will use the smoothed 1-NN loss with a batch size of 256.
batch_size = 256
# The 1-NN is the fastest, but the results seem to look better with FR.
loss_fn = SmoothKNNStatistic(
batch_size, batch_size, cuda, 1, compute_t_stat=True)
# loss_fn = SmoothFRStatistic(
# batch_size, batch_size, cuda, compute_t_stat=True)
# ### Step 4: Load the data
#
# Next, let us load the MNIST dataset using torchvision.
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
dataset = MNIST('mnist_dir', transform=ToTensor(), download=True)
# ### Step 5: Train the model
#
# We can then train the model for 100 epochs.
# +
from torch.utils.data import DataLoader
# We will use tqdm to show a progress bar.
# If you don't have it, you can install it with ``pip install tqdm``.
from tqdm import tqdm_notebook as tqdm
alphas=[0.1] # The smoothing strength used.
n_epochs = 100 # Number of epochs.
avg_losses = []
for epoch in range(1, n_epochs + 1):
data_loader = DataLoader(dataset, batch_size=batch_size, drop_last=True,
pin_memory=cuda, shuffle=True)
# Note that we drop the last batch as each batch has to be of same size.
# We create two tensors that will be filled with noise.
# The first one is used for training, while the second one for visualization.
if cuda:
noise_tensor = torch.cuda.FloatTensor(batch_size, noise_dim)
noise_plot = torch.cuda.FloatTensor(100, noise_dim)
else:
noise_tensor = torch.FloatTensor(batch_size, noise_dim)
noise_plot = torch.FloatTensor(100, noise_dim)
losses = []
noise = Variable(noise_tensor)
for batch, _ in tqdm(data_loader, leave=False):
# We want one observation per row.
# We moreover scale the images to [-1, 1] as we use a Tanh layer.
batch = 2 * batch.view(batch_size, -1) - 1
if cuda:
batch = batch.cuda()
noise_tensor.normal_() # Sample the noise.
optimizer.zero_grad()
loss = loss_fn(Variable(batch), generator(noise), alphas=alphas)
loss.backward()
optimizer.step()
losses.append(loss.data[0])
# Now that the epochs has finished, we will draw samples from the model.
clear_output()
avg_loss = sum(losses) / len(losses)
print('epoch {0:>2d}, avg loss {1}'.format(epoch, avg_loss))
avg_losses.append(avg_loss)
# We will draw 100 images and show them on a 10x10 grid.
noise_plot.normal_()
samples = generator(Variable(noise_plot)).view(-1, 1, 28, 28).cpu().data
show(make_grid(samples, nrow=10))
plt.show()
plt.plot(avg_losses)
plt.show()
| notebooks/mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/viaa-bot/Linear-Algebra-58019/blob/main/Midterm_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="UWQ4bxqP7EBw"
# ##Question 1. Create a Python code that displays a square matrix whose length is 5 (10 points)
#
# + colab={"base_uri": "https://localhost:8080/"} id="HogCW5UV60gv" outputId="0422ed55-ac12-4a73-8b4b-87f2a793be3f"
import numpy as np
A = np.array([[5,5,5,5,5],[6,6,6,6,6],[7,7,7,7,7],[8,8,8,8,8],[9,9,9,9,9]]) #creation of matrix A
print(A)
# + [markdown] id="M8kom2EI8OUU"
# ##Question 2. Create a Python code that displays a square matrix whose elements below the principal diagonal are zero (10 points)
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="nBjAci_s72Uj" outputId="cf152307-bf9a-4753-9fc7-e4ec98e52c8b"
import numpy as np
C = np.eye(5)
A = np.triu(a)
print(A)
# + [markdown] id="5q6NjamoBZw6"
# ##Question 3. Create a Python code that displays a square matrix which is symmetrical (10 points)
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="K_RBL4vX854X" outputId="db72db34-f309-4476-b8af-e4f99bcb4684"
import numpy as np
A = np.array([[1,2,3], [4,5,6], [7,8,9]]) #creation of matrix A
print('Matrix A:')
print(A)
print('\nTranspose of A:')
print(A.T)
B = np.matmul(A,A.T)
print('\nSymmetric Matrix:')
print(B)
# + [markdown] id="gqRIidHu5mqK"
# ##Question 4. What is the inverse of matrix C? Show your solution by python coding. (20 points)
# 
#
# + colab={"base_uri": "https://localhost:8080/"} id="MdwfXhxW6cfB" outputId="8ef7e681-ce7e-43df-f2ff-4ecab8d74135"
import numpy as np
A = np.array([[1,2,3],[2,3,3],[3,4,-2]]) #creation of matrix A
print("Matrix A:")
print(A)
inv_A = np.linalg.inv(A) #inverse of matrix A
print ("\nInverse of Matrix A:")
print(inv_A)
# + [markdown] id="C8T4GYGK4UGm"
# ##Question 5. What is the determinant of the given matrix in Question 4? Show your solution by python coding. (20 points)
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="cGB7tGGb3ho9" outputId="606644ef-fda1-46bb-a0ef-de380ff1479e"
import numpy as np #numpy function
A = np.array([[1,2,3],[2,3,3],[3,4,-2]]) #creation of matrix A
print("Matrix A:")
print(A)
print("\nDeterminant of Matrix A:")
print(round(np.linalg.det(A))) #printing the determinant of matrix A
# + [markdown] id="H0wskT9d4oP9"
# ##Question 6. Find the roots of the linear equations by showing its python codes (30 points)
#
# 5X1 + 4X2 + X3 = 3.4
#
# 10X1 + 9X2 + 4X3 = 8.8
#
# 10X1 + 13X2 + 15X3 = 19.2
# + colab={"base_uri": "https://localhost:8080/"} id="ZvF1tQ9w4rQF" outputId="725c2725-151d-425d-e480-be29813fca17"
import numpy as np
A = np.array([[5,4,1],[10,9,4],[10,13,15]]) #creation of matrix A
print("Matrix A:")
print(A)
print ("\nInverse of Matrix A:")
inv_A = np.linalg.inv(A) #inverse of matrix A
print(inv_A)
B = np.array([[3.4],[8.8],[19.2]]) #creation of matrix B (constants of the eq.)
print("\nMatrix B:")
print(B)
print("\nValue of X1, X2, X3:")
X = np.dot(inv_A,B) #dot product (values of x1,x2,x3)
print(X)
| Midterm_Exam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# 
# <center>
# Figure 1: <i>The graph showing the relationship between experts, documents and expertise topics</i>
# </center>
#
# In this notebook, we demonstrate the example of the entire ExpFinder process with restricted example, as shown in the Figure 1. In this example, we set some restrictions as follows:
#
# 1. $e1$ is expertise in NLP and its applications in healthcare based on the associated documents (e.g. $d1$ and $d2$).
# 2. $e2$ is expertise in three different areas such as NLP, computer vision and their applications in healthcare based on the associated documents (e.g. $d1$ and $d2$).
# 3. $e3$ is only expertise in NLP because he/she only associates to a document $d2$.
# # Libraries
# +
import pandas as pd
import numpy as np
from ast import literal_eval
from transformers import BertTokenizer, BertModel
import networkx as nx
from src.controller import generator, trainer
from src.lib import extractor
# -
# # Data preparation
DATA_PATH = './data/'
def prepare_data():
''' This function reads data from the existing data source'''
global DATA_PATH
doc_df = pd.read_csv('{}raw_data.csv'.format(DATA_PATH))
ed_df = pd.read_csv('{}ep_df.csv'.format(DATA_PATH))
with open('{}stopword.txt'.format(DATA_PATH)) as f:
stopwords = literal_eval(f.read())
with open('{}topics.txt'.format(DATA_PATH)) as f:
topics = literal_eval(f.read())
return doc_df, ed_df, stopwords, topics
doc_df, ed_df, stopwords, topics = prepare_data()
display(doc_df)
display(ed_df)
# # Data generation
# ## Expert-document matrix
ed_matrix = generator.generate_ed_matrix(ed_df)
display(ed_matrix)
# ## Document-phrase matrix
def dp_pipeline(doc_df, stopwords):
''' This function contains the pipeline for generating the
document-phrase matrix '''
# Construct corpus (of tokens and noun phrases)
corpus = doc_df['text'].values
X_train = extractor.tokenise_doc(corpus, stopwords, max_phrase_len=3)
# Generate TF for terms and noun phrases
tf_terms = generator.generate_tf(X_train['tokens'])
tf_phrases = generator.generate_tf(X_train['np'])
# Generate document-phrase matrix
dp_matrix = generator.generate_dp_matrix(tf_terms, tf_phrases,
doc_df['doc_id'], method="indirect")
return pd.DataFrame(dp_matrix['matrix'].todense(),
index=dp_matrix['index'], columns=dp_matrix['columns'])
dp_matrix = dp_pipeline(doc_df, stopwords)
display(dp_matrix)
# ## Document-topic matrix
MODEL_PATH = './model/'
def dtopic_pipeline(dp_matrix, topics):
''' This function contaisn the pipeline for generating the
document-topic matrix'''
# Load Scibert model
MODEL_DIR = '{}scibert_scivocab_uncased'.format(MODEL_PATH)
model = BertModel.from_pretrained(MODEL_DIR)
tokenizer = BertTokenizer.from_pretrained(MODEL_DIR)
# Prepare model dictionary
# Note: For the pretrained vectors of phrases, you will need to read here.
# This example does not contain pretrained vectors
model_dict = {
'model': model,
'tokenizer': tokenizer,
'trained_vectors': None
}
# Generate document-topic matrix
dtopic_matrix, topic_phrase = generator.generate_dtop_matrix(dp_matrix, topics,
model_dict, top_n=1)
topic_vec = generator.generate_topic_vector(dtopic_matrix)
dtopic_matrix = pd.DataFrame(dtopic_matrix['matrix'].todense(),
index=dtopic_matrix['index'],
columns=dtopic_matrix['columns'])
return dtopic_matrix, topic_vec, topic_phrase
dtopic_matrix, topic_vec, topic_phrase = dtopic_pipeline(dp_matrix, topics)
display(topic_phrase)
display(dtopic_matrix)
display(topic_vec)
# ## Personalised matrices
def personalised_pipeline(ed_df, ed_matrix, dtopic_matrix, topic_vec):
# Generate expoert-document graph
G = generator.generate_ecg(ed_df)
# Generate personalised matrices
etop_matrix, dtop_matrix = generator.generate_pr_matrix(ed_matrix,
dtopic_matrix,
topic_vec['weights'].values,
G, alpha=0.0)
# Construct DataFrame
etop_matrix = pd.DataFrame(etop_matrix['matrix'].todense(),
index=etop_matrix['index'],
columns=etop_matrix['columns'])
dtop_matrix = pd.DataFrame(dtop_matrix['matrix'].todense(),
index=dtop_matrix['index'],
columns=dtop_matrix['columns'])
return etop_matrix, dtop_matrix, G
exp_pr_df, doc_pr_df, ed_graph = personalised_pipeline(ed_df, ed_matrix, dtopic_matrix, topic_vec)
display(exp_pr_df.loc[['e1', 'e2', 'e3']])
display(doc_pr_df.loc[['d1', 'd2', 'd3']])
# ## Counted vectors
def cv_pipeline(ed_matrix, ed_graph):
# Generate CV expert-document
exp_vec, doc_vec = generator.generate_ed_vector(ed_matrix, ed_graph)
return exp_vec, doc_vec
ed_count, de_count = cv_pipeline(ed_matrix, ed_graph)
display(ed_count)
display(de_count)
# # ExpFinder algorithm
def ef_pipeline(ed_matrix, ed_graph, exp_pr_df, doc_pr_df, ed_count, de_count):
# Intialise parameters
params = {
'ed_graph': ed_graph,
'ed_matrix': ed_matrix,
'et_matrix': exp_pr_df,
'dt_matrix': doc_pr_df,
'lamb_e': 1.0,
'lamb_d': 0.7,
'max_iter': 5,
'ed_count': ed_count,
'de_count': de_count
}
topics = doc_pr_df.columns
# Run model
etop_matrix = trainer.run_expfinder(topics, params)
display(etop_matrix)
ef_pipeline(ed_matrix, ed_graph, exp_pr_df, doc_pr_df, ed_count, de_count)
| experimental pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# # Machine Learning Engineer Nanodegree
# ## Reinforcement Learning
# ## Project: Train a Smartcab to Drive
#
# Welcome to the fourth project of the Machine Learning Engineer Nanodegree! In this notebook, template code has already been provided for you to aid in your analysis of the *Smartcab* and your implemented learning algorithm. You will not need to modify the included code beyond what is requested. There will be questions that you must answer which relate to the project and the visualizations provided in the notebook. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide in `agent.py`.
#
# >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
# -----
#
# ## Getting Started
# In this project, you will work towards constructing an optimized Q-Learning driving agent that will navigate a *Smartcab* through its environment towards a goal. Since the *Smartcab* is expected to drive passengers from one location to another, the driving agent will be evaluated on two very important metrics: **Safety** and **Reliability**. A driving agent that gets the *Smartcab* to its destination while running red lights or narrowly avoiding accidents would be considered **unsafe**. Similarly, a driving agent that frequently fails to reach the destination in time would be considered **unreliable**. Maximizing the driving agent's **safety** and **reliability** would ensure that *Smartcabs* have a permanent place in the transportation industry.
#
# **Safety** and **Reliability** are measured using a letter-grade system as follows:
#
# | Grade | Safety | Reliability |
# |:-----: |:------: |:-----------: |
# | A+ | Agent commits no traffic violations,<br/>and always chooses the correct action. | Agent reaches the destination in time<br />for 100% of trips. |
# | A | Agent commits few minor traffic violations,<br/>such as failing to move on a green light. | Agent reaches the destination on time<br />for at least 90% of trips. |
# | B | Agent commits frequent minor traffic violations,<br/>such as failing to move on a green light. | Agent reaches the destination on time<br />for at least 80% of trips. |
# | C | Agent commits at least one major traffic violation,<br/> such as driving through a red light. | Agent reaches the destination on time<br />for at least 70% of trips. |
# | D | Agent causes at least one minor accident,<br/> such as turning left on green with oncoming traffic. | Agent reaches the destination on time<br />for at least 60% of trips. |
# | F | Agent causes at least one major accident,<br />such as driving through a red light with cross-traffic. | Agent fails to reach the destination on time<br />for at least 60% of trips. |
#
# To assist evaluating these important metrics, you will need to load visualization code that will be used later on in the project. Run the code cell below to import this code which is required for your analysis.
# +
# Import the visualization code
import visuals as vs
# Pretty display for notebooks
# %matplotlib inline
# -
# ### Understand the World
# Before starting to work on implementing your driving agent, it's necessary to first understand the world (environment) which the *Smartcab* and driving agent work in. One of the major components to building a self-learning agent is understanding the characteristics about the agent, which includes how the agent operates. To begin, simply run the `agent.py` agent code exactly how it is -- no need to make any additions whatsoever. Let the resulting simulation run for some time to see the various working components. Note that in the visual simulation (if enabled), the **white vehicle** is the *Smartcab*.
# ### Question 1
# In a few sentences, describe what you observe during the simulation when running the default `agent.py` agent code. Some things you could consider:
# - *Does the Smartcab move at all during the simulation?*
# - *What kind of rewards is the driving agent receiving?*
# - *How does the light changing color affect the rewards?*
#
# **Hint:** From the `/smartcab/` top-level directory (where this notebook is located), run the command
# ```bash
# 'python smartcab/agent.py'
# ```
# **Answer:**
# 1. Does the Smartcab move at all during the simulation?
# The Smartcab seems to be not moving.
# 2. What kind of rewards is the driving agent receiving?
# "Agent properly idled at a red light. (rewarded 2.09)" It appears that the traffic light state and oncoming traffic will determine the award.
# 3. How does the light changing color affect the rewards?
# From the environment.py file, if the agent did not move while it is safe to consistently(meaning, a green light state with no oncoming traffic) the agent will receive a penalty as well.
# * Red Light: Positive reward for idling, the car should not move.
# * Green Light: Negative reward for idling, the car should move.
# * Green Light with Oncoming Traffic: When the light is green and oncoming traffic is coming, then if the car follow the Right-of-Way rule, it will recevie positive reward.
# ### Understand the Code
# In addition to understanding the world, it is also necessary to understand the code itself that governs how the world, simulation, and so on operate. Attempting to create a driving agent would be difficult without having at least explored the *"hidden"* devices that make everything work. In the `/smartcab/` top-level directory, there are two folders: `/logs/` (which will be used later) and `/smartcab/`. Open the `/smartcab/` folder and explore each Python file included, then answer the following question.
# ### Question 2
# - *In the *`agent.py`* Python file, choose three flags that can be set and explain how they change the simulation.*
# - *In the *`environment.py`* Python file, what Environment class function is called when an agent performs an action?*
# - *In the *`simulator.py`* Python file, what is the difference between the *`'render_text()'`* function and the *`'render()'`* function?*
# - *In the *`planner.py`* Python file, will the *`'next_waypoint()`* function consider the North-South or East-West direction first?*
# **Answer:**
# 1. In the agent.py Python file, choose three flags that can be set and explain how they change the simulation.
# agent.py three key flags for creating the driving agent:
# * 'learning': if it is True, the agent is forced to use Q-learning.
# * 'epsilon': This is the exploration factor, if it is too low, the agent will not learn new behavior because it will use previous learning behaviors. If it is too high, the agent will always take risks and won't learn from the past learning habits.
# * 'alpha': This is the learning rate and between 0 and 1. When it equal to 0, the agent will not take any risk and do not change behavior; when it equal to 1, the agent constantly changes behaviors and learn something new.
# 2. In the environment.py Python file, what Environment class function is called when an agent performs an action?
# * In the act() function of 'environment.py.' file, when an agent performs an action, it will receive rewards or penalties based on the environmental states. Each update from environment.py will impact the rewards of the agent's action.
# 3. In the simulator.py Python file, what is the difference between the 'render_text()' function and the 'render()' function?
# * The simulator.py Python file, has two rendering functions, render_text and render. The 'render_text()' is the non-GUI render a display of the simulation and Simulated trial data will be rendered in the terminal/command prompt.
# * 'render()' is the GUI render a display of the simulation and it supplementary trial data can be found from render_text.
# 4. In the planner.py Python file, will the 'next_waypoint() function consider the North-South or East-West direction first?
# * In the 'planner.py.' file, it consider the East-West direction first then evaluates the North-South direction.
#
# -----
# ## Implement a Basic Driving Agent
#
# The first step to creating an optimized Q-Learning driving agent is getting the agent to actually take valid actions. In this case, a valid action is one of `None`, (do nothing) `'Left'` (turn left), `'Right'` (turn right), or `'Forward'` (go forward). For your first implementation, navigate to the `'choose_action()'` agent function and make the driving agent randomly choose one of these actions. Note that you have access to several class variables that will help you write this functionality, such as `'self.learning'` and `'self.valid_actions'`. Once implemented, run the agent file and simulation briefly to confirm that your driving agent is taking a random action each time step.
# ### Basic Agent Simulation Results
# To obtain results from the initial simulation, you will need to adjust following flags:
# - `'enforce_deadline'` - Set this to `True` to force the driving agent to capture whether it reaches the destination in time.
# - `'update_delay'` - Set this to a small value (such as `0.01`) to reduce the time between steps in each trial.
# - `'log_metrics'` - Set this to `True` to log the simluation results as a `.csv` file in `/logs/`.
# - `'n_test'` - Set this to `'10'` to perform 10 testing trials.
#
# Optionally, you may disable to the visual simulation (which can make the trials go faster) by setting the `'display'` flag to `False`. Flags that have been set here should be returned to their default setting when debugging. It is important that you understand what each flag does and how it affects the simulation!
#
# Once you have successfully completed the initial simulation (there should have been 20 training trials and 10 testing trials), run the code cell below to visualize the results. Note that log files are overwritten when identical simulations are run, so be careful with what log file is being loaded!
# Run the agent.py file after setting the flags from projects/smartcab folder instead of projects/smartcab/smartcab.
#
# Load the 'sim_no-learning' log file from the initial simulation results
vs.plot_trials('sim_no-learning.csv')
# ### Question 3
# Using the visualization above that was produced from your initial simulation, provide an analysis and make several observations about the driving agent. Be sure that you are making at least one observation about each panel present in the visualization. Some things you could consider:
# - *How frequently is the driving agent making bad decisions? How many of those bad decisions cause accidents?*
# - *Given that the agent is driving randomly, does the rate of reliabilty make sense?*
# - *What kind of rewards is the agent receiving for its actions? Do the rewards suggest it has been penalized heavily?*
# - *As the number of trials increases, does the outcome of results change significantly?*
# - *Would this Smartcab be considered safe and/or reliable for its passengers? Why or why not?*
# **Answer:**
# 1. How frequently is the driving agent making bad decisions? How many of those bad decisions cause accidents?
# The frequency of adverse actions is between 0.25 to 0.38. The Major Accidents rate is about 0.038 to 0.045 range; the Minor Accidents rate is about 0.035 to 0.045 range; the Minor Violation rate is about 0.07 to 0.1 range; and the Major Violation rate is about 0.12 to 0.22 range.
# 2. Given that the agent is driving randomly, does the rate of reliability make sense?
# Given that the agent is driving randomly, the rate of reliability is as expected which is between 10% to 20%.
# 3. What kind of rewards is the agent receiving for its actions? Do the rewards suggest it has been penalized heavily?
# Given the overall agent reward per actions were all negative, which indicates that the agent only made bad decisions. Yes, it showed the agent had been penalized heavily.
# 4. As the number of trials increases, does the outcome of results change significantly?
# No, it has not changed as the number of trials increases. Since the agent made decisions randomly, it did not learn from the previous behaviors.
# 5. Would this Smartcab be considered safe and reliable for its passengers? Why or why not?
# Because the safety and reliability rating are F, I do not think this Smartcab is safe and/or reliable.
#
# -----
# ## Inform the Driving Agent
# The second step to creating an optimized Q-learning driving agent is defining a set of states that the agent can occupy in the environment. Depending on the input, sensory data, and additional variables available to the driving agent, a set of states can be defined for the agent so that it can eventually *learn* what action it should take when occupying a state. The condition of `'if state then action'` for each state is called a **policy**, and is ultimately what the driving agent is expected to learn. Without defining states, the driving agent would never understand which action is most optimal -- or even what environmental variables and conditions it cares about!
# ### Identify States
# Inspecting the `'build_state()'` agent function shows that the driving agent is given the following data from the environment:
# - `'waypoint'`, which is the direction the *Smartcab* should drive leading to the destination, relative to the *Smartcab*'s heading.
# - `'inputs'`, which is the sensor data from the *Smartcab*. It includes
# - `'light'`, the color of the light.
# - `'left'`, the intended direction of travel for a vehicle to the *Smartcab*'s left. Returns `None` if no vehicle is present.
# - `'right'`, the intended direction of travel for a vehicle to the *Smartcab*'s right. Returns `None` if no vehicle is present.
# - `'oncoming'`, the intended direction of travel for a vehicle across the intersection from the *Smartcab*. Returns `None` if no vehicle is present.
# - `'deadline'`, which is the number of actions remaining for the *Smartcab* to reach the destination before running out of time.
# ### Question 4
# *Which features available to the agent are most relevant for learning both **safety** and **efficiency**? Why are these features appropriate for modeling the *Smartcab* in the environment? If you did not choose some features, why are those features* not *appropriate?*
# **Answer:**
# 1. Which features available to the agent are most relevant for learning both safety and efficiency?
# * The features for learning safety and effectiveness will be the 'inputs' and 'waypoint' They both provide safety features and the effectiveness of actions. Assuming the agent is training in the right-hand traffic, the 'inputs' will give information for the agent, in term of the safety. On the other hand, a waypoint is used to achieve efficiency, which helps the agent to reach the destination promptly.
# 2. Why are these features appropriate for modeling the Smartcab in the environment?
# Because those features also are used in our daily driving actions. As the U.S. Right-of-Way rules, on a green light, a left turn is permitted if no oncoming traffic and on the red light, a right turn is allowed if no oncoming traffic.
# 3. If you did not choose some features, why are those features not appropriate?
# I did not choose the 'deadline,' because of the safety purpose. I do not want the agent to speed up because of the deadline. Another feature that I did not choose would be the 'None' value in the waypoint, which the waypoint will only have three possible values(['left,' 'right,' 'forward']).
#
# ### Define a State Space
# When defining a set of states that the agent can occupy, it is necessary to consider the *size* of the state space. That is to say, if you expect the driving agent to learn a **policy** for each state, you would need to have an optimal action for *every* state the agent can occupy. If the number of all possible states is very large, it might be the case that the driving agent never learns what to do in some states, which can lead to uninformed decisions. For example, consider a case where the following features are used to define the state of the *Smartcab*:
#
# `('is_raining', 'is_foggy', 'is_red_light', 'turn_left', 'no_traffic', 'previous_turn_left', 'time_of_day')`.
#
# How frequently would the agent occupy a state like `(False, True, True, True, False, False, '3AM')`? Without a near-infinite amount of time for training, it's doubtful the agent would ever learn the proper action!
# ### Question 5
# *If a state is defined using the features you've selected from **Question 4**, what would be the size of the state space? Given what you know about the evironment and how it is simulated, do you think the driving agent could learn a policy for each possible state within a reasonable number of training trials?*
# **Hint:** Consider the *combinations* of features to calculate the total number of states!
# **Answer:**
# 1. what would be the size of the state space? Given what you know about the evironment?
# Possiable Waypoint: left, right, forward
# Possiable light: red, green
# Possiable oncoming: left, right, forward, none
# Possiable left: left, right, forward, none
# state space = 3x2x4x4X4 = 384
# 2. how it is simulated, do you think the driving agent could learn a policy for each possible state within a reasonable number of training trials?
# I think so. Only 384 possible combinations of features. The agent shall learn a policy for each possible state within a reasonable number of training trials.
# ### Update the Driving Agent State
# For your second implementation, navigate to the `'build_state()'` agent function. With the justification you've provided in **Question 4**, you will now set the `'state'` variable to a tuple of all the features necessary for Q-Learning. Confirm your driving agent is updating its state by running the agent file and simulation briefly and note whether the state is displaying. If the visual simulation is used, confirm that the updated state corresponds with what is seen in the simulation.
#
# **Note:** Remember to reset simulation flags to their default setting when making this observation!
# -----
# ## Implement a Q-Learning Driving Agent
# The third step to creating an optimized Q-Learning agent is to begin implementing the functionality of Q-Learning itself. The concept of Q-Learning is fairly straightforward: For every state the agent visits, create an entry in the Q-table for all state-action pairs available. Then, when the agent encounters a state and performs an action, update the Q-value associated with that state-action pair based on the reward received and the interative update rule implemented. Of course, additional benefits come from Q-Learning, such that we can have the agent choose the *best* action for each state based on the Q-values of each state-action pair possible. For this project, you will be implementing a *decaying,* $\epsilon$*-greedy* Q-learning algorithm with *no* discount factor. Follow the implementation instructions under each **TODO** in the agent functions.
#
# Note that the agent attribute `self.Q` is a dictionary: This is how the Q-table will be formed. Each state will be a key of the `self.Q` dictionary, and each value will then be another dictionary that holds the *action* and *Q-value*. Here is an example:
#
# ```
# { 'state-1': {
# 'action-1' : Qvalue-1,
# 'action-2' : Qvalue-2,
# ...
# },
# 'state-2': {
# 'action-1' : Qvalue-1,
# ...
# },
# ...
# }
# ```
#
# Furthermore, note that you are expected to use a *decaying* $\epsilon$ *(exploration) factor*. Hence, as the number of trials increases, $\epsilon$ should decrease towards 0. This is because the agent is expected to learn from its behavior and begin acting on its learned behavior. Additionally, The agent will be tested on what it has learned after $\epsilon$ has passed a certain threshold (the default threshold is 0.01). For the initial Q-Learning implementation, you will be implementing a linear decaying function for $\epsilon$.
# ### Q-Learning Simulation Results
# To obtain results from the initial Q-Learning implementation, you will need to adjust the following flags and setup:
# - `'enforce_deadline'` - Set this to `True` to force the driving agent to capture whether it reaches the destination in time.
# - `'update_delay'` - Set this to a small value (such as `0.01`) to reduce the time between steps in each trial.
# - `'log_metrics'` - Set this to `True` to log the simluation results as a `.csv` file and the Q-table as a `.txt` file in `/logs/`.
# - `'n_test'` - Set this to `'10'` to perform 10 testing trials.
# - `'learning'` - Set this to `'True'` to tell the driving agent to use your Q-Learning implementation.
#
# In addition, use the following decay function for $\epsilon$:
#
# $$ \epsilon_{t+1} = \epsilon_{t} - 0.05, \hspace{10px}\textrm{for trial number } t$$
#
# If you have difficulty getting your implementation to work, try setting the `'verbose'` flag to `True` to help debug. Flags that have been set here should be returned to their default setting when debugging. It is important that you understand what each flag does and how it affects the simulation!
#
# Once you have successfully completed the initial Q-Learning simulation, run the code cell below to visualize the results. Note that log files are overwritten when identical simulations are run, so be careful with what log file is being loaded!
# Load the 'sim_default-learning' file from the default Q-Learning simulation
vs.plot_trials('sim_default-learning.csv')
# ### Question 6
# Using the visualization above that was produced from your default Q-Learning simulation, provide an analysis and make observations about the driving agent like in **Question 3**. Note that the simulation should have also produced the Q-table in a text file which can help you make observations about the agent's learning. Some additional things you could consider:
# - *Are there any observations that are similar between the basic driving agent and the default Q-Learning agent?*
# - *Approximately how many training trials did the driving agent require before testing? Does that number make sense given the epsilon-tolerance?*
# - *Is the decaying function you implemented for $\epsilon$ (the exploration factor) accurately represented in the parameters panel?*
# - *As the number of training trials increased, did the number of bad actions decrease? Did the average reward increase?*
# - *How does the safety and reliability rating compare to the initial driving agent?*
# **Answer:**
# *. Are there any observations that are similar to the basic driving agent and the default Q-Learning agent?
# Yes, there is similarity. Both agents (the basic and the default) both achieved F-scores for safety and reliability ratings. However, the basic driving agent's overall adverse actions have been reduced over time from 0.25 to 0.16.
# *. Approximately how many training trials did the driving agent require before testing? Does that number make sense given the epsilon-tolerance?
# The agent required 20 training trials; this makes sense given the epsilon tolerance takes 20 trials to reduce to 0 which is below the default tolerance level of 0.05 for testing to begin. (1/0.05 = 20).
# *. Is the decaying function you implemented for ϵ(the exploration factor) accurately represented in the parameters panel? As the number of training trials increased, did the number of bad actions decrease? Did the average reward increase?
# Even though the scores are F's, as the number of training trials increased, the number of adverse actions decreased and also the average rewards increased. The rate of reliability has been reminding the same below 40%, but the graph had shown the increased of positive average rewards, which indicates that increase of trail numbers may increase the reliability rating.
# -----
# ## Improve the Q-Learning Driving Agent
# The third step to creating an optimized Q-Learning agent is to perform the optimization! Now that the Q-Learning algorithm is implemented and the driving agent is successfully learning, it's necessary to tune settings and adjust learning paramaters so the driving agent learns both **safety** and **efficiency**. Typically this step will require a lot of trial and error, as some settings will invariably make the learning worse. One thing to keep in mind is the act of learning itself and the time that this takes: In theory, we could allow the agent to learn for an incredibly long amount of time; however, another goal of Q-Learning is to *transition from experimenting with unlearned behavior to acting on learned behavior*. For example, always allowing the agent to perform a random action during training (if $\epsilon = 1$ and never decays) will certainly make it *learn*, but never let it *act*. When improving on your Q-Learning implementation, consider the impliciations it creates and whether it is logistically sensible to make a particular adjustment.
# ### Improved Q-Learning Simulation Results
# To obtain results from the initial Q-Learning implementation, you will need to adjust the following flags and setup:
# - `'enforce_deadline'` - Set this to `True` to force the driving agent to capture whether it reaches the destination in time.
# - `'update_delay'` - Set this to a small value (such as `0.01`) to reduce the time between steps in each trial.
# - `'log_metrics'` - Set this to `True` to log the simluation results as a `.csv` file and the Q-table as a `.txt` file in `/logs/`.
# - `'learning'` - Set this to `'True'` to tell the driving agent to use your Q-Learning implementation.
# - `'optimized'` - Set this to `'True'` to tell the driving agent you are performing an optimized version of the Q-Learning implementation.
#
# Additional flags that can be adjusted as part of optimizing the Q-Learning agent:
# - `'n_test'` - Set this to some positive number (previously 10) to perform that many testing trials.
# - `'alpha'` - Set this to a real number between 0 - 1 to adjust the learning rate of the Q-Learning algorithm.
# - `'epsilon'` - Set this to a real number between 0 - 1 to adjust the starting exploration factor of the Q-Learning algorithm.
# - `'tolerance'` - set this to some small value larger than 0 (default was 0.05) to set the epsilon threshold for testing.
#
# Furthermore, use a decaying function of your choice for $\epsilon$ (the exploration factor). Note that whichever function you use, it **must decay to **`'tolerance'`** at a reasonable rate**. The Q-Learning agent will not begin testing until this occurs. Some example decaying functions (for $t$, the number of trials):
#
# $$ \epsilon = a^t, \textrm{for } 0 < a < 1 \hspace{50px}\epsilon = \frac{1}{t^2}\hspace{50px}\epsilon = e^{-at}, \textrm{for } 0 < a < 1 \hspace{50px} \epsilon = \cos(at), \textrm{for } 0 < a < 1$$
# You may also use a decaying function for $\alpha$ (the learning rate) if you so choose, however this is typically less common. If you do so, be sure that it adheres to the inequality $0 \leq \alpha \leq 1$.
#
# If you have difficulty getting your implementation to work, try setting the `'verbose'` flag to `True` to help debug. Flags that have been set here should be returned to their default setting when debugging. It is important that you understand what each flag does and how it affects the simulation!
#
# Once you have successfully completed the improved Q-Learning simulation, run the code cell below to visualize the results. Note that log files are overwritten when identical simulations are run, so be careful with what log file is being loaded!
# Load the 'sim_improved-learning' file from the improved Q-Learning simulation
vs.plot_trials('sim_improved-learning.csv')
# ### Question 7
# Using the visualization above that was produced from your improved Q-Learning simulation, provide a final analysis and make observations about the improved driving agent like in **Question 6**. Questions you should answer:
# - *What decaying function was used for epsilon (the exploration factor)?*
# - *Approximately how many training trials were needed for your agent before begining testing?*
# - *What epsilon-tolerance and alpha (learning rate) did you use? Why did you use them?*
# - *How much improvement was made with this Q-Learner when compared to the default Q-Learner from the previous section?*
# - *Would you say that the Q-Learner results show that your driving agent successfully learned an appropriate policy?*
# - *Are you satisfied with the safety and reliability ratings of the *Smartcab*?*
# **Answer:**
# * What decaying function was used for epsilon (the exploration factor)?
# I choose the ϵ=e^(−at),for 0<a<1. This was picked because it is important to have a great wealth of learning early on, with a gradual decline into exploration.
#
# * Approximately how many training trials were needed for your agent before beginning testing?
# I set tolerance=0.005, this allowed for around about 7000 training trials with alpha=0.002. Plenty of training trials is necessary to increase the safety rating. The small alpha number(0.002) which reduce the high fluctuations in the policy, and low tolerance(0.005) which increase the number of training trials.
#
# * How much improvement was made with this Q-Learner when compared to the default Q-Learner from the previous section?
# There is enormous improvement comparing with initial/default Q-Learning driving agent.
#
#
# | Agnets | Safety Score | Reliability Score |
# |------------------------|--------------|-------------------|
# | initial/default Agents | F | F |
# | optimized Agent | A+ | A |
#
#
# Both the default Agent and optimized agent total bad actions, accidents, and violations were decreaseing as the trails increase. However, the optimized agent had ~7000 trails to learn but the default Agent only had ~20 trails to learn.
#
# * Would you say that the Q-Learner results show that your driving agent successfully learned an appropriate policy?
# Yes, the rating scores have shown that the Smartcab agent was successfully learned an appropriate system.
# However, in the real word situation, the traffic is much more complicated than the traffic rules, so the Smartcab still has a long way to learn.
# ### Define an Optimal Policy
#
# Sometimes, the answer to the important question *"what am I trying to get my agent to learn?"* only has a theoretical answer and cannot be concretely described. Here, however, you can concretely define what it is the agent is trying to learn, and that is the U.S. right-of-way traffic laws. Since these laws are known information, you can further define, for each state the *Smartcab* is occupying, the optimal action for the driving agent based on these laws. In that case, we call the set of optimal state-action pairs an **optimal policy**. Hence, unlike some theoretical answers, it is clear whether the agent is acting "incorrectly" not only by the reward (penalty) it receives, but also by pure observation. If the agent drives through a red light, we both see it receive a negative reward but also know that it is not the correct behavior. This can be used to your advantage for verifying whether the **policy** your driving agent has learned is the correct one, or if it is a **suboptimal policy**.
# ### Question 8
# Provide a few examples (using the states you've defined) of what an optimal policy for this problem would look like. Afterwards, investigate the `'sim_improved-learning.txt'` text file to see the results of your improved Q-Learning algorithm. _For each state that has been recorded from the simulation, is the **policy** (the action with the highest value) correct for the given state? Are there any states where the policy is different than what would be expected from an optimal policy?_ Provide an example of a state and all state-action rewards recorded, and explain why it is the correct policy.
# **Answer:**
# Here are a few examples of policies produced by the Q-Learner. I went through the log and could not find any incorrect policy, the only policy I had found could consider incorrect policy will be Case 1 which all policy scores are 0.00. Otherwise, all of the learned policies are optimal.
#
# * Case 1:
# ('right', 'green', 'right', 'left', 'right')
# * forward : 0.00
# * None : 0.00
# * right : 0.00
# * left : 0.00
#
# * Case 2:
#
# ('right', 'red', 'left', 'right', 'left')
# * forward : -0.04
# * None : 0.00
# * right : 0.03
# * left : -0.02
# * The agent took the 'Right' action when there is an red light which indicate that the agent had learn the 'Right-of-way' rule-- when there is red light and in the safe condition, it is ok to turn 'right'.
#
# * Case 3:
#
# ('forward', 'green', 'right', None, 'forward')
# * forward : 0.14
# * None : -0.02
# * right : 0.00
# * left : -0.16
# * The agent took the 'forward' action since it has the highest q-value. It indicates that the agent was successfully learned the policy and it moved forward when there was a green light with a right turn oncoming traffic.
#
# * Case 4:
#
# ('forward', 'green', 'left', 'left', 'right')
# * forward : 0.07
# * None : 0.00
# * right : 0.00
# * left : 0.00
# * It shows that the agent took the 'forward' action when the green light was on.
# -----
# ### Optional: Future Rewards - Discount Factor, `'gamma'`
# Curiously, as part of the Q-Learning algorithm, you were asked to **not** use the discount factor, `'gamma'` in the implementation. Including future rewards in the algorithm is used to aid in propogating positive rewards backwards from a future state to the current state. Essentially, if the driving agent is given the option to make several actions to arrive at different states, including future rewards will bias the agent towards states that could provide even more rewards. An example of this would be the driving agent moving towards a goal: With all actions and rewards equal, moving towards the goal would theoretically yield better rewards if there is an additional reward for reaching the goal. However, even though in this project, the driving agent is trying to reach a destination in the allotted time, including future rewards will not benefit the agent. In fact, if the agent were given many trials to learn, it could negatively affect Q-values!
# ### Optional Question 9
# *There are two characteristics about the project that invalidate the use of future rewards in the Q-Learning algorithm. One characteristic has to do with the *Smartcab* itself, and the other has to do with the environment. Can you figure out what they are and why future rewards won't work for this project?*
# **Answer:**
# * What kind of information does the smartcab receive from the environment? Does it receive "global" information, such as its position about the destination? Or merely "local" information such as the status of the intersection it's in?
#
# The smartcab only receive the 'local' information --the traffic and the light of the intersection.
#
# * Do the agent and the destination begin each trial in the same place, or do they move around? What about the lights and the other cars in the grid world, do they follow a predictable pattern? Can the agent learn the best route by route?
#
# No, the start point and destination changes with every trail. The agent took different actions to reach the goal from time to time. If the smartcab had a localized view of the environment, and we would eventually (given enough trials), propagate reward away from every intersection.
#
# * Is there an "extra reward" for reaching the destination? In other words, is there anything to propagate back, other than the reward for obeying traffic rules and following the planner?
#
# No, it does not have an extra reward for reaching the destination, according to the environment.py file. This setting ensure the safety behavior of the agent, which the agent will not try to take risks to get 'extra reward'.
#
# > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to
# **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
| Smartcab_project/smartcab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 3: Asking a Statistical Question
#
# ## <NAME>, partners with <NAME>
#
# Within this report we will be focusing on how to ask a statistical question. Asking and answering a statistical question is generally requires the following three steps:
#
# 1) Writing down in words _precisely_ what question we are trying to ask.
# 2) Translating the precise english into a mathematical expression. Often containing the PDF of the background - determining of which can be considered a substep of this step. Then evaluating the integral.
# 3) Converting the probability from the previous step into a sigma.
#
# In this lab we will presume that we know the background distribution and will focus on asking the statistical question and getting the correct results in a clear manor.
# +
# general imports
# %matplotlib inline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy
from scipy import stats
# plotting setup
plt.rcParams["figure.figsize"] = (20,15)
# -
# ## Problem 1
#
# Let's presume that we are performing an experiment which requires precise temperature control to work reliably. Specifically, we would like our temperature to be 12 Kelvin. Our data shows that the temperature stays around a mean value of 12 Kelvin but has a standard deviation of 0.4 degrees which is the result of the thermal control systems.
#
# At some points our system misbehaves and we see situations where the temperature is not near 12 K, additionally the thermometry has various glitches which results in very high or low readings that do not correspond to the true temperature. While sometimes the outliner values are true readings they generally are not so we would like to look at how we can eliminate these outliers.
#
# Let's begin by simulating some data for our hypothetical situation. Lets generate some good data from a Gaussian distribution and then add some outlier readings:
# +
# distirbution information
mean_temp = 12
std_temp = 0.4
# generation of good points and bad points
true_data_N = 100000
outliers = [10., 10.3, 2.1, 0., 0., 15.6, 22.3, 12.7]
draw = np.append(stats.norm.rvs(loc=mean_temp, scale=std_temp, size=true_data_N), outliers)
# plotting
fig, ax = plt.subplots(1, 1)
ax.hist(draw, 100, density=True, label="Lab Data")
plt.tick_params(labelsize = 24)
plt.yscale('log')
plt.xlabel('Temperature (Kevlin)', fontsize=24)
plt.ylabel('Log Probability Density', fontsize=24)
plt.legend(fontsize=24)
plt.show()
# -
# ### Part A
#
# As previously stated, our goal is to identify and remove bad data.
#
# Let's begin by first exploring our data when it is viewed on a non-logarithmic scale:
# +
xs_true_gaus = np.linspace(0, 25, 5000)
probs_true_gaus = stats.norm.pdf(xs_true_gaus, loc=mean_temp, scale=std_temp)
fig, ax = plt.subplots(1, 1)
ax.hist(draw, 100, density=True, label="Lab Data")
ax.plot(xs_true_gaus, probs_true_gaus, linewidth=8, alpha=0.7, label="True Gaussian")
plt.tick_params(labelsize = 24)
plt.xlabel('Temperature (Kelvin)', fontsize=24)
plt.ylabel('Probability Density', fontsize=24)
plt.legend(fontsize=24)
plt.show()
# -
# From the above graph we see that when we view our distribution on a non-logarithmic plot the outliers are no longer visible because they are inconsequential compared to the rest of the data. This reinforces the benefits of always looking at our plots on a semi-log scale. Now lets go back to the semi-log plot and look at out true distribution and it's comparison to our data points:
# +
xs_true_gaus_truncated = np.linspace(10.1, 13.9, 1000)
probs_true_gaus_truncated = stats.norm.pdf(xs_true_gaus_truncated, loc=mean_temp, scale=std_temp)
fig, ax = plt.subplots(1, 1)
ax.hist(draw, 100, density=True, label="Lab Data")
ax.plot(xs_true_gaus_truncated, probs_true_gaus_truncated, linewidth=8, alpha=0.7, label="True Gaussian")
plt.tick_params(labelsize = 24)
plt.yscale('log')
plt.xlabel('Temperature (Kelvin)', fontsize=24)
plt.ylabel('Log Probability Density', fontsize=24)
plt.legend(fontsize=24)
plt.show()
# -
# From the above plot we see that the outliers are unrelated to the true Gaussian distribution entirely. We also see from the plot on a non-logarithmic scale that the Gaussian rare events begin when the temperature is less than about 10 Kelvin or larger than around 14 Kelvin. Thus we can view these outliers as rare events when we have this Gaussian distribution - the event for which our background would produce one of these outliers has a very small probability.
#
# Now that we have explored our data, lets propose a statistical question for removing the outliers in our data:
#
# > What outlier temperature must I have measured in order for the background Gaussian distribution ($\mu=12$, $\sigma=0.4$) to have a probability of $5\sigma$ or $-5\sigma$ to have produced that signal?
#
# The reason I choose the above question is for the following reasons: first we know that we would like to remove the outliers so the goal of the question is to address how we can identify an outlier. I have choose to identify an outlier as an event that has a probability equal to or smaller than the probability associated with a $5\sigma$ event or $-5\sigma$ event. A quantity of $\pm 5\sigma$ is chosen because the probability associated with this value is small - thus a rare event to be produced by the background. This bound will ensure that only rare events for the distribution are considered outliers and, we know, based on the shape of the distribution from the above graphs the outliers correspond to rare events.
#
# Transforming this calculation into math, we will find all data points that have an $x$ value which is greater than or equal to $12 + T_{threshold}$ Kelvin or less than or equal to $12 - T_{threshold}$ Kelvin where $T_{threshold}$ is calculated using the following equation:
#
# $$\int_5^\infty \text{pdf}_{normal}(x) dx = \int_{12 + T_{threshold}}^\infty \text{pdf}_{temperature \text{ } distribution}(x) dx$$
#
# $$\int_5^\infty \frac{1}{\sqrt{2\pi}} e^{-x^2 / 2} dx = \int_{12 + T_{threshold}}^\infty \frac{1}{0.4 * \sqrt{2\pi}} e^{-(x-12)^2/(2*0.4^2)} dx$$
#
# $$\int_5^\infty e^{-x^2 / 2} dx = \int_{12 + T_{threshold}}^\infty \frac{1}{0.4} e^{-(x-12)^2/(0.32)} dx$$
#
# Let's now perform the reverse integral (specifically the inverse survival function) to determine the value of $T_{threshold}$:
# +
# define the sigma value
sigma = 5
# determine the probability associated with this sigma
prob_sigma = stats.norm.sf(sigma, loc=0, scale=1)
# use the inverse survival function to determine the value that has the same associated probability
T_same_prob = stats.norm.isf(prob_sigma, loc=mean_temp, scale=std_temp)
# compute T_threshold
T_threshold = T_same_prob - mean_temp
print(f'The value of T_threshold is {T_threshold}')
# -
# Now we know that for a data point to be considered an outlier based on the requirements previously outlined, that data point must have a temperature equal to or less than 10 Kelvin or greater than or equal to 14 Kelvin. Lets now calculate how many values fit this metric:
# +
# print information we already know about the true outlier and true data counts
print(f'In total we have {len(draw)} temperatures we would like to identify.')
print(f'The number of true temperature data points is: {true_data_N}')
print(f'The number of true outliers is: {len(outliers)}')
# now calculate how many outliers are identified
def is_outlier(value):
return value <= mean_temp - T_threshold or mean_temp + T_threshold <= value
N_identified_outliers = 0
for d in draw:
if (is_outlier(d)):
N_identified_outliers += 1
print(f'The number of identified outliers based on our metric is: {N_identified_outliers}')
print(f'The number of identified data points based on our metric is: {len(draw) - N_identified_outliers}')
# -
# Based on the above results, lets construct a truth table that captures the accuracy of our previously defined performance metric to determine if data should be considered an outlier or not:
#
# | | **True T** | **Bad T** |
# |--------------|-----------|------------|
# | Identified as Good T | 100000 | 2 |
# | Identified as Bad T | 0 | 6 |
# ### Part B
#
# In the above table we see that all of our real temperature data points were identified as such however two of our false readings were identified as true readings. We see that the proposed metric is does a fairly good job of separating the true and false readings. The number of omissions of true data is directly related to the initial threshold parameter that we set. In our proposed method, we set that threshold at $5\sigma$. As this value is decreased, we will omit more and more of the true data but we will be more certain that we are not including any false readings. The percentage of true temperature readings that would be omitted is a predictable value because our data is from a Gaussian. We know that on a Gaussian, 68% of the readings, on average, will lie between $-1\sigma$ and $1\sigma$. Thus we would omit, on average, 32% of our true temperature readings.
#
# If instead we had set our value at $2\sigma$ we know that 95% of our readings, on average, will lie between $-2\sigma$ and $2\sigma$ thus we will omit 5% of the true temperature readings. Similarly, if we set our value to $3\sigma$ we would omit 0.3% of our true temperature readings. Even at this point of 3% we see that the probability of omitting a true reading is already fairly small so any value above $3\sigma$ should perform fairly well in correctly discarding the outliers while keeping the true temperature readings.
# ### Part C
#
# Similar to the conversation in the previous part, we see that there are mistakes of commission (bad data being considered good) with our statistical threshold value of $5\sigma$ and, if we follow the same line of reasoning as in the previous part, we can conclude that the number of mistakes of commission is related to the statistical threshold. As we decrease the value of our statistical threshold, the number of mistakes of commission will also decrease. Additionally, these mistakes of commission are entirely avoidable, however it _can_ come with the trade off of throwing out true temperature readings as well.
# ## Problem 2
#
# In this example we will be looking for asteroids. If we look at the alignment of stars on subsequent images, they don't perfectly align due to atmospheric and instrumental effects (even ignoring proper motion). The resulting distribution is two dimensional, and for this lab let's assume it is a 2D Gaussian with 1 arcsecond RMS. Or said another way, if we histogram how far all the (stationary) stars appear to have moved then we get something like this:
a = np.vstack((stats.norm.rvs( scale = 1, size = 100000), stats.norm.rvs( scale = 1, size = 100000)))
a.shape
fig, ax = plt.subplots(1, 1)
h = ax.hist2d(a[0,:],a[1,:],bins=100, density=True);
ax.set_aspect('equal', 'box')
plt.xlim([-3 , 3])
plt.ylim([-3 , 3])
plt.title("2D Histogram of positional uncertainty", fontsize = 24)
plt.ylabel("$\Delta$y arcseconds", fontsize = 18)
plt.xlabel("$\Delta$x arcseconds", fontsize = 18)
plt.colorbar(h[3], ax=ax, label="Probability Density")
# **If we have a potential asteroid, it will have some true movement between the images. We would like a '5 sigma' detection of movement. What is that distance in arcseconds?**
#
# If we have a moving asteroid we would expect said asteroid to be a rare event on the above plot as the above plot shows that the background stars do not move very much - although they do move. We would like to determine what distance in arcseconds corresponds to a 5-sigma detection of movement. Recall that the distance to a point on the above plot will be defined as $\sqrt{X^2 + Y^2}$ where $X$ is the Gaussian random variable for $\delta x$ arc seconds and $Y$ is the Gaussian random variable for $\delta y$ arcseconds.
#
# Provided this information we need to determine what out background distribution for the distance is. Looking at the wikipedia page for the Rayleigh distribution we find that when computing the length of a vector composed of random variables (here we have $V=X\widehat{i} + Y\widehat{j}$) we end up with a Rayleigh distribution if the two random variables $X$ and $Y$ are Gaussian distributed with the same mean and standard deviation - which is the case here. The final Rayleigh distribution will have the same standard deviation of the two Gaussian random variables. Lets visualize our resulting probability distribution function, $\text{pdf}_{movement}(x)$:
# +
# variable setup
rayleigh_mean = 0
rayleigh_std = 1
rayleigh_xs = np.linspace(0, 10, 1000)
rayleigh_probs = stats.rayleigh.pdf(rayleigh_xs, loc=rayleigh_mean, scale=rayleigh_std)
# plot
plt.plot(rayleigh_xs, rayleigh_probs, linewidth=8, label="Rayleigh: $pdf_{movement}(x)$")
plt.tick_params(labelsize = 24)
plt.xlabel('Distance (arcseconds)', fontsize=24)
plt.ylabel('Probability Density', fontsize=24)
plt.xlim(0, 10)
plt.ylim(0, 0.63)
plt.legend(fontsize=24)
plt.show()
# -
# From this we can formulate our statistical question:
#
# > What distance in arcseconds would I have to measure, for the asteroid movement, in order for me to have a 5-sigma probability that my background - a Rayleigh distribution (parameters mean=0, std=1) - produced that distance measurement?
#
# Thus if we would like to determine the arcsecond distance, $D$, associated with a 5-sigma event we will begin by determining the probability of a 5-sigma event then we will use that probability to perform the inverse integral of the cumulative distribution function for the proper Rayleigh distribution to determine the required arcsecond distance for a 5-sigma detection of movement. Mathematically, we can can write this in the following way:
#
# $$\int_5^\infty \text{pdf}_{normal}(x) dx = \int_{D}^\infty \text{pdf}_{movement}(x) dx$$
#
# $$\int_5^\infty \frac{1}{\sqrt{2\pi}} e^{-x^2 / 2} dx = \int_{D}^\infty x e^{-x^2 / 2} dx$$
#
# Now lets determine the distance, in arcseconds, required for a 5-sigma detection of motion.
# +
# define the sigma value
sigma = 5
# determine the probability associated with this sigma
prob_sigma = stats.norm.sf(sigma, loc=0, scale=1)
# use the inverse survival function to determine the value that has the same associated probability
D_same_prob = stats.rayleigh.isf(prob_sigma, loc=0, scale=1)
print(f'The distance required is: {D_same_prob:.3f} arcseconds')
# -
# From the above result we find that a distance of $5.489$ arcseconds is required for the background distribution to have a probability of $5\sigma$ to produce that same distance. This tells us that if we observe a movement larger than this value then we can confidently classify that movement to be produced by an astroid rather than produced by the background distribution. Note that as this is a _distance_ a circle will be drawn out on the **2D Histogram of positional uncertainty** plot with a radius of $5.489$ arcseconds. Any distance measurement, in arcseconds, that lies outside of this circle can be confidently classified as not being produced by the background but rather as a true signal - or true asteroid.
# ## Problem 3
#
# As we have previously discussed, a key background for gamma-ray telescopes is the cosmic-ray background. Cosmic rays are charged particles—usually protons or electrons but can include atomic nuclei such alpha particles (helium) or iron. Because of their charge cosmic rays spiral in the magnetic field of the galaxy. From the perspective of the Earth they appear to be coming uniformly from all directions like a high energy gas, and the direction the cosmic ray is traveling when it reaches the Earth tells us nothing about where it came from because we don't know what tortured path it has taken through the galaxy to reach us. However, at trillion electron volt energies and above, the spiral loops are fairly big and the sun and the moon will block cosmic rays. This means the sun and the moon appear as holes in the cosmic ray sky (cosmic rays from that direction are absorbed).
#
# Assume in a moon sized patch on the sky we normally have a cosmic ray rate of 1 cosmic ray per minute (arrivals are random in time). If we can observe where the moon is for 8 hours per night (not too close to the horizon) and we observe for 15 days and see 6800 cosmic rays, what is the significance of our moon shadow detection?
#
# Here the distribution for 1 minute will be a Poisson distribution with parameter 1 cosmic-ray / minute. We can draw this conclusion because we know that we must measure an integer amount of cosmic rays which restricts us to discrete distributions. Further, the Poisson distribution is widely applicable in particle physics and it is known that the cosmic background is Poisson distributed. Lets visualize this distribution:
# +
# variables
lambda_cosmic_ray = 1 # cosmic ray / minute
xs_cosmic_ray = np.linspace(0, 10, 11)
ys_cosmic_ray = stats.poisson.pmf(xs_cosmic_ray, lambda_cosmic_ray)
# plot
plt.plot(xs_cosmic_ray, ys_cosmic_ray, linewidth=8, label="Poisson: $pmf_{cosmic}$")
plt.tick_params(labelsize = 24)
plt.xlabel('Number of cosmic-rays', fontsize=24)
plt.ylabel('Probability Density', fontsize=24)
plt.xlim(0, 10)
plt.ylim(0, 0.38)
plt.legend(fontsize=24)
plt.show()
# -
# Now recall that we are looking at the moon for 8 hours every night for 15 days. This is a total time of $(15 days)*(8 hours/day)*(60 min/hour)=7200$ minutes. To get the overall distribution then we would then sum 7200 Poisson distributions by convolving the Poisson distribution with itself 7200 times. However rather than performing the convolution, it is known that the sum of $n$ Poisson random variables is Poisson distributed with a parameter $\lambda = \sum_{i=1}^n \lambda_i$ where $\lambda_i$ is the parameter of the $i$th Poisson random variable. Thus our overall distribution is as follows:
#
# $$\text{pdf}_{cosmic}(k) = \frac{7200^k e^{-7200}}{k!}$$
#
# Lets graph our overall distribution:
# +
# variables
lambda_cosmic_ray_sum = 7200 # cosmic ray / minute
xs_cosmic_ray_sum = np.linspace(0, 10000, 10001)
ys_cosmic_ray_sum = stats.poisson.pmf(xs_cosmic_ray_sum, lambda_cosmic_ray_sum)
# plot
plt.plot(xs_cosmic_ray_sum, ys_cosmic_ray_sum, linewidth=8, label="Poisson: $pmf_{cosmic}$")
plt.tick_params(labelsize = 24)
plt.xlabel('Number of cosmic-rays', fontsize=24)
plt.ylabel('Probability Density', fontsize=24)
plt.xlim(6000, 8000)
plt.legend(fontsize=24)
plt.show()
# -
# Now that we have our distribution, lets ask the statistical that we would like to answer:
#
# > What is the probability that my background, a Poisson distribution (parameter=7200), produced a signal as signal-like or more than my signal of 6800 cosmic rays over the time period of 7200 minutes? Where being more 'signal-like' is defined as having a smaller number of cosmic-rays over the time period of 7200 minutes (i.e., a lack of cosmic-rays is more signal-like).
#
# Mathematically, we can write this question in the following way where we are looking for our statistical-significance $N\sigma$:
#
# $$\sum_{i=0}^{6800} \text{pmf}_{cosmic}(i,7200) = \int_{N}^{\infty} \text{pdf}_{normal}(x) dx$$
#
# $$\sum_{i=0}^{6800} \frac{7200^i e^{-7200}}{i!} = \int_{N}^{\infty} \frac{1}{\sqrt{2\pi}} e^{-x^2 / 2} dx$$
#
# Lets now calculate our statistical significance:
# +
# define the signal
signal = 6800 # cosmic-rays
background_parameter = 7200
# determine the probability associated with this sigma
prob_signal = stats.poisson.cdf(signal, background_parameter)
# use the inverse survival function to determine the value that has the same associated probability
signal_sigma = stats.norm.isf(prob_signal, loc=0, scale=1)
print(f'The statistical significance of the moon shadow detection is {signal_sigma:.3f} sigma')
# -
# Now we can conclude that the statistical significance of the moon shadow detection of 6800 cosmic-rays being detected over the time interval of 7200 minutes is $4.751\sigma$.
| labs/lab3/lab3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Deep Learning from Scractch
# > This directory contains Jupyter's notebook-based documentation for the **Deep Learning from Scractch** course, June 16-17, 2016, Barcelona
#
#
# Deep learning is one of the fastest growing areas of machine learning and a hot topic in both academia and industry.
# This course will cover the basics of deep learning by using a hands-on approach.
#
# ### Approach
# We will illustrate all contents with Jupyter notebooks, a web application that allows you to create and share documents that contain live code, equations, visualizations and explanatory text.
#
# ### Target Audience
# This course is targeted for developers, data scientists and researchers that have a basic knowledge of machine learning.
#
# ### Prerequisites
# Minimal experience on Python programming, basic knowledge of calculus, linear algebra, and probability theory. Attendees are expected to bring their own laptops for the hands-on practical work.
#
# ### Who
# This course is organized by the Data Science Group @ UB
#
# INSTRUCTORS: <NAME>, Adjunct Lecturer at UB, <NAME>, Associate Professor at UB. Santi Seguí, Lecturer at UB. <NAME>. Full Professor at UB.
#
# COURSE ASSISTANTS : <NAME>, <NAME>, <NAME>.
#
# ### Why
# By the end of this course, you will be able to:
# + Describe how a neural network works and combine different types of layers and activation functions.
# + Describe how these models can be applied in computer vision, text analytics, etc.
# + Develop your own models in Tensorflow.
#
# ## Topics
#
# * 1. Basic Concepts I
# * 2. Basic Concepts II
# * 3. Tensorflow
# * 4. Convolutional Neural Networks
# * 5. Recurrent Neural Networks
# * 6. Unsupervised Learning
# * 7. Advanced Applications
| 0. Deep Learning from Scratch .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # INF560 HW2 python scripts
# ## <NAME>
# ### This is the note book for running the 3 scripts for generating, processing and visualizing random numbers
# Firstly, we run the random number generator to generate 1000 numbers between 0 and 100, and save the list into a JSON file.
# run the random number generator
# !pip install -r requirements.txt
# %run -i 'number_generator.py'
# Visualization of the distribution of the random numbers. It's a uniformed distribution so the image looks like a line.
# + pycharm={"name": "#%%\n"}
import json
import matplotlib.pyplot as plt
import numpy as np
with open('output1.json') as f:
x = json.load(f)
data = x['result']
val = 0. # this is the value where you want the data to appear on the y-axis.
plt.plot(data, np.zeros_like(data) + val, 'x')
plt.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# Secondly, we run the script for applying the equation to the JSON data from the first step and save the result into a JSON file.
# -
# Secondly, we run the script for applying the equation to the JSON data from the first step and save the result into a JSON file.
# run the number transformer
# %run -i 'number_equition.py'
# Visualization of the intermidiate result. The points are projected to a range from 0 to 300 instead of 0 to 100.
with open('output2.json') as f:
x = json.load(f)
data = x['result']
val = 0. # this is the value where you want the data to appear on the y-axis.
plt.plot(data, np.zeros_like(data) + val, 'x')
plt.show()
# Finally we run the visualization script for visualizing the points, in the figure, one point corresponds to a random number and the result of applying the equation to it.
# run the visualization script
# %run -i 'visualization.py'
| HW2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7
# language: python
# name: python3
# ---
# # Data Cleansing and Feature Engineering
#
# ## 1. Setting Up Spark Context
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
# +
sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]"))
spark = SparkSession \
.builder \
.getOrCreate()
# -
# ## 2. Download data from Object Store
# +
import os
import getpass
def get_or_set_environment_variable(variable):
try:
var = os.environ[variable]
except KeyError:
var = getpass.getpass('Please enter value for {:}: '.format(variable))
os.environ[variable] = var
return var
ibm_api_key_id = get_or_set_environment_variable('IBM_API_KEY_ID')
ibm_cloud_store_bucket = get_or_set_environment_variable('IBM_OBJECT_STORE_BUCKET')
# +
import json
import os
import types
from botocore.client import Config
import ibm_boto3
def __iter__(self): return 0
client = ibm_boto3.client(service_name='s3',
ibm_api_key_id=ibm_api_key_id,
ibm_auth_endpoint="https://iam.cloud.ibm.com/oidc/token",
config=Config(signature_version='oauth'),
endpoint_url='https://s3-api.us-geo.objectstorage.service.networklayer.com')
body = client.get_object(Bucket=ibm_cloud_store_bucket,
Key='etl_parquet_files.json')['Body']
# add missing __iter__ method
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
files = json.load(body)
files
# +
def load_dataframe(files, **kargs):
dfs = []
for fn in files:
body = client.get_object(Bucket=ibm_cloud_store_bucket,
Key=fn)['Body']
if not hasattr(body, "__iter__"):
body.__iter__ = types.MethodType( __iter__, body )
tfn = 'temp_{:}'.format(fn)
with open(tfn, 'wb') as temp:
temp.write(body.read())
dfs.append(spark.read.options(**kargs).parquet(tfn))
df = dfs.pop()
for other in dfs:
df = df.union(other)
return df
df_train = load_dataframe(files['train'])
df_test = load_dataframe(files['test'])
df_label = load_dataframe(files['label'])
# -
df_train.schema == df_test.schema
# ## 2. Data Cleansing
# +
def drop_unused_cols(df):
return df.drop('location', 'keyword')
df_train = drop_unused_cols(df_train)
df_test = drop_unused_cols(df_test)
df_train.limit(10).toPandas()
# -
# !pip install unidecode
# +
import re
import string
from unidecode import unidecode
import pyspark.sql.functions as sfun
def clean_text(text):
@sfun.udf
def normalize_alphabet(text):
text = unidecode(text)
text = text.encode('ascii', errors='ignore').decode('utf-8', errors='ignore')
text = text.lower()
return text
def no_squares(text):
return sfun.regexp_replace(text, '\[.*?\]', '')
def no_links(text):
return sfun.regexp_replace(text, 'https?://\S+|www\.\S+', '')
def no_angles(text):
return sfun.regexp_replace(text, '<.*?>+', '')
def no_punctuation(text):
return sfun.regexp_replace(text, '[%s]' % re.escape(string.punctuation), '')
def no_newline(text):
return sfun.regexp_replace(text, '\n', '')
def no_words_with_nums(text):
return sfun.regexp_replace(text, '\w*\d\w*\s?', '')
text = normalize_alphabet(text)
text = no_squares(text)
text = no_links(text)
text = no_angles(text)
text = no_punctuation(text)
text = no_newline(text)
text = no_words_with_nums(text)
return text
df_train = df_train.withColumn('clean_text', clean_text('text'))
df_test = df_test.withColumn('clean_text', clean_text('text'))
df_train.limit(10).toPandas()
# +
df_train = df_train.select('id', 'text', sfun.split('clean_text', '\s+').alias('clean_text'))
df_test = df_test.select('id', 'text', sfun.split('clean_text', '\s+').alias('clean_text'))
df_train.limit(10).toPandas()
# +
import re
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
stop = stopwords.words('english') + ["I'm", 'via', 'u'] + [word + "'s" for word in stopwords.words('english')]
stop = list(map(lambda word: re.sub('[{:}]'.format(re.escape(string.punctuation)), '', word).lower(),
stop))
stop[:10]
# +
@sfun.udf
def remove_stopwords(text):
return list(filter(lambda word: not word in stop, text))
df_train = df_train.select('id', 'text', remove_stopwords('clean_text').alias('clean_text'))
df_test = df_test.select('id', 'text', remove_stopwords('clean_text').alias('clean_text'))
df_train.limit(10).toPandas()
# +
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
lemma = WordNetLemmatizer()
@sfun.udf
def lemmatize_words(words):
def exhaustive_lemmatization(word):
for pos in ['n', 'v', 'a']:
lem = lemma.lemmatize(word, pos=pos)
if lem != word:
return lem
return word
return list(map(exhaustive_lemmatization, words))
df_train = df_train.select('id', 'text', lemmatize_words('clean_text').alias('clean_text'))
df_test = df_test.select('id', 'text', lemmatize_words('clean_text').alias('clean_text'))
df_train.limit(10).toPandas()
# +
@sfun.udf
def join_words(words):
return ' '.join(words)
df_train = df_train.select('id', 'text', join_words('clean_text').alias('clean_text'))
df_test = df_test.select('id', 'text', join_words('clean_text').alias('clean_text'))
df_train.limit(10).toPandas()
# +
df = df_train.join(df_label, on='id', how='inner')
words = df.rdd.flatMap(
lambda row: [(word, (int(row['target']), 1 - int(row['target']))) for word in row['clean_text'].split()])
wordcount = words.reduceByKey(lambda agg, cat: (agg[0] + cat[0], agg[1] + cat[1]))
print(wordcount.count())
top20_words = wordcount.sortBy(keyfunc=lambda row: -sum(row[1])).take(20)
top20_words
# -
import plotly.io as pio
pio.renderers.default = 'notebook_connected'
# +
import pandas as pd
import plotly.express as px
c_words, counts = zip(*top20_words)
c_words *= 2
count1, count0 = zip(*counts)
c_counts = count0 + count1
c_target = ['0'] * len(counts) + ['1'] * len(counts)
fig = px.bar(pd.DataFrame({'word': c_words,
'count': c_counts,
'target': c_target
}),
x='word',
y='count',
text='count',
title='Top 20 Words per Target',
color='target')
fig.show()
# -
# ## 3. Feature engineering
#
# We create 3 features from the data and will evaluate performance.
#
# * `CountVectorizer`: Converts a text document to a sparse vector of token counts.
# * `TF-IDF`: Short for ‘term frequency–inverse document frequency’, is a numerical statistic that is intended to reflect how important a word is. In short we count the occurences of the word per document and normalize the count by taking the overall frequency of the term into account.
# * `Word2Vec`: A neural network model is trained to learn word associations from a large corpus of text.
# +
from pyspark.ml.feature import RegexTokenizer, CountVectorizer, HashingTF, IDF, Word2Vec
from pyspark.ml import Pipeline
regex_tokenizer = RegexTokenizer(inputCol='clean_text', outputCol='words', pattern=r'\W')
counter = CountVectorizer(inputCol='words', outputCol='features_count', vocabSize=2500, minDF=5)
hashing_tf = HashingTF(inputCol='words', outputCol='raw_features', numFeatures=2500)
idf = IDF(inputCol='raw_features', outputCol='features_tfidf', minDocFreq=5)
word2vec = Word2Vec(inputCol='words', outputCol='features_w2v', maxSentenceLength=50)
pipeline = Pipeline(stages=[regex_tokenizer,
counter,
hashing_tf,
idf,
word2vec]).fit(df_train)
df_eng_train = pipeline.transform(df_train).select('id', 'text', 'features_count', 'features_tfidf', 'features_w2v')
df_eng_test = pipeline.transform(df_test).select('id', 'text', 'features_count', 'features_tfidf', 'features_w2v')
df_eng_train.show()
# -
# ## 4. Serializing the dataframes in *Parquet* format
# !rm -r ./disaster_detection_*
# +
import glob
temp_parquet_file = os.path.join(os.path.curdir,
'disaster_detection_clean_{}')
df_eng_train.write.parquet(temp_parquet_file.format('train'), mode='overwrite')
df_eng_test.write.parquet(temp_parquet_file.format('test'), mode='overwrite')
glob.glob(temp_parquet_file.format('*'))
# -
# ## 5. Uploading the files to object cloud
# +
def upload_parquet(client, path):
parts = glob.glob(os.path.join(path, '*.parquet'))
parquets = ['{:s}-{:04d}.parquet'.format(os.path.split(path)[-1], i)
for i in range(len(parts))]
for part, parquet in zip(parts, parquets):
with open(part, 'rb') as parquetF:
client.put_object(Bucket=ibm_cloud_store_bucket,
Body=parquetF,
Key=parquet
)
return parquets
client = ibm_boto3.client(service_name='s3',
ibm_api_key_id=ibm_api_key_id,
ibm_auth_endpoint="https://iam.cloud.ibm.com/oidc/token",
config=Config(signature_version='oauth'),
endpoint_url='https://s3-api.us-geo.objectstorage.service.networklayer.com')
parquets = {}
for dataset in ('train', 'test'):
parquets[dataset] = upload_parquet(client, temp_parquet_file.format(dataset))
print(parquets)
# +
import json
parquets['label'] = files['label']
client.put_object(Bucket=ibm_cloud_store_bucket,
Body=json.dumps(parquets),
Key='feature_eng_parquet_files.json')
# -
| disaster_detection.feature_eng.py37.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import scipy as scp
from scipy.fftpack import fft
from scipy.fftpack import ifft
from scipy.fftpack import rfft
import matplotlib.pyplot as plt
dt = 0.001
t = np.arange(0, 1, dt)
x_sin = t * np.sin(2 * np.pi * 50 * t) #+ np.sin(2 * np.pi * 120 * t) #+ np.sin(2 * np.pi * 180 * t) + np.sin(2 * np.pi * 240 * t)
x_cos = np.cos(2 * np.pi * 50 * t) + np.cos(2 * np.pi * 120 * t)
#x_sin_noise = x_sin + np.random.normal(size = len(x_sin))
plt.plot(t[:100], x_sin[:100])
N = len(t)
y = fft(x_sin, N)
psd = y * y.conjugate() / N
freq = 1 / (dt*N) * t * N
freq_2 = 1 / (dt) * t
#L = np.arange(int(np.floor(N / 2)))
y[40:60]
plt.plot(freq, np.real(psd)) #* (psd[L] > 10000))
y_test = np.sin(2 * np.pi * t * - 50)
plt.plot(t, y_test)
y_clean = y * (psd > 30)
x_clean = ifft(y_clean)
plt.plot(t[:100], np.real(x_clean)[:100])
np.random.uniform(size = (300, 300)) > 0.5
1e+02
| tmp/2_sin_example_fft.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Write fake accuracies
import tensorflow as tf
writer = tf.summary.create_file_writer('./graphs')
accuracy = [0.1, 0.4, 0.6, 0.8, 0.9, 0.95] # fake accuracy values
with writer.as_default():
for step, acc in enumerate(accuracy):
tf.summary.scalar('Accuracy', acc, step) # add summary
writer.flush() # make sure everything is written to disk
writer.close() # not really needed, but good habit
# +
# Visualize a basic graph
"""
As TensorFlow 2.0 generally operates in eager mode, there is often no graph created, and thus no way to
visualize it.
The following is a possible workaround using @tf.function.
"""
import tensorflow as tf
from tensorflow.python.ops import summary_ops_v2
# Graph
a = tf.Variable(2, name='a')
b = tf.Variable(3, name='b')
@tf.function
def graph_to_visualize(a, b):
c = tf.add(a, b, name='Add')
# Visualize
writer = tf.summary.create_file_writer('./graphs')
with writer.as_default():
graph = graph_to_visualize.get_concrete_function(a, b).graph # get graph from function
summary_ops_v2.graph(graph.as_graph_def()) # visualize
writer.close()
# +
# Visualize a giant graph
import tensorflow as tf
a = tf.Variable(2.0, name='a')
b = tf.Variable(3.0, name='b')
c = tf.Variable(7.0, name='c')
@tf.function
def graph_to_visualize(a, b, c):
d = tf.multiply(a, b, name='d-mul')
e = tf.add(b, c, name='e-add')
f = tf.subtract(e, a, name='f-sub')
g = tf.multiply(d, b, name='g-mul')
h = tf.divide(g, d, name='h-div')
i = tf.add(h, f, name='i-add')
writer = tf.summary.create_file_writer('./graphs')
with writer.as_default():
graph = graph_to_visualize.get_concrete_function(a, b, c).graph # get graph from function
summary_ops_v2.graph(graph.as_graph_def()) # visualize
writer.close()
# -
# Visualize a U-Net generator
"""Using the previous techniques, it is needlessly hard to visualize a Keras model.
Instead, use the Keras TensorBoard callback when compiling your model.
We will talk more about this later.
"""
# +
# Make a simple normal distribution
import tensorflow as tf
normal_dist = tf.random.normal(shape=(10, 10, 10, 10, 10), mean=0, stddev=1) # tensor to be logged, shape is irrelevant
writer = tf.summary.create_file_writer('./graphs')
with writer.as_default():
tf.summary.histogram("NormalDistribution", normal_dist, step=0) # log tensor
writer.flush()
writer.close()
# +
# Stack 100 normal distributions
import tensorflow as tf
writer = tf.summary.create_file_writer('./graphs')
with writer.as_default():
for i in range(100):
normal_dist = tf.random.normal(shape=(10, 10, 10, 10, 10), mean=i/50, stddev=1) # tensor to be logged, shape is irrelevant
tf.summary.histogram("NormalDistribution", normal_dist, step=i) # summary that logs tensor
writer.flush()
writer.close()
# +
# Visualize a multimodal distributon
import tensorflow as tf
writer = tf.summary.create_file_writer('./graphs')
with writer.as_default():
for i in range(100):
normal_dist_0 = tf.random.normal(shape=(10000,), mean=-i/20, stddev=0.5)
normal_dist_1 = tf.random.normal(shape=(10000,), mean=i/20, stddev=0.5)
normal_dist_cat = tf.concat([normal_dist_0, normal_dist_1], axis=0) # concatenate both normal dists
tf.summary.histogram("MultimodalDistribution", normal_dist_cat, step=i) # summary that logs tensor
writer.flush()
writer.close()
# +
# Visualize some random nosie
import tensorflow as tf
image = tf.random.uniform((1, 210, 160, 3), 0, 1, dtype=tf.float32) # batch_size, height, width, channels
writer = tf.summary.create_file_writer('./graphs')
with writer.as_default():
img_summary = tf.summary.image("InputImage", image, step=0)
writer.flush()
writer.close()
# +
# Visuaize the first 4 examples of MNIST
import tensorflow as tf
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
images = x_train[:4].reshape((-1, 28, 28, 1))
writer = tf.summary.create_file_writer('./graphs')
with writer.as_default():
tf.summary.image("TrainingExamples", images, max_outputs=4, step=0)
writer.flush()
writer.close()
# +
# Display some text
import tensorflow as tf
text = tf.convert_to_tensor('Hello world!')
writer = tf.summary.create_file_writer('./graphs')
with writer.as_default():
tf.summary.text("Text", text, step=0)
writer.flush()
writer.close()
# -
# Merge summaries
"""There is no equivalent to tf.summary.merge in TensorFlow 2.0"""
# Merge all summaries
"""There is no equivalent to tf.summary.merge_all in TensorFlow 2.0"""
# +
# Use the TensorBoard callback in a Keras model
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape((-1, 28, 28, 1))
x_test = x_test.reshape((-1, 28, 28, 1))
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Dropout(0.25),
Flatten(),
Dense(128), # we will visualize neurons in this layer
Dropout(0.5),
Dense(10, activation='softmax')
])
tensorboard_callback = tf.keras.callbacks.TensorBoard('./graphs',
histogram_freq=1, # how often to compute activation and weight histograms
write_graph=True, # visualize the graph
write_grads=True, # visual gradient histogram
write_images=True, # visualize model weights as an image
# embeddings_freq=1, # how often to visualize embeddings
# embeddings_layer_names=['...'], # names of embedding layers to visualize; wouldn't work on this model
update_freq='epoch' # update TensorBoard every epoch
)
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=32,
epochs=15,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
| pytorch_tutorials/using-tensorboard/tensorboard-examples_tf2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#import packages
import numpy as np
from numpy import loadtxt
import pylab as pl
from IPython import display
from RcTorch import *
from matplotlib import pyplot as plt
from scipy.integrate import odeint
import time
import matplotlib.gridspec as gridspec
#this method will ensure that the notebook can use multiprocessing (train multiple
#RC's in parallel) on jupyterhub or any other linux based system.
try:
mp.set_start_method("spawn")
except:
pass
torch.set_default_tensor_type(torch.FloatTensor)
# %matplotlib inline
start_time = time.time()
# +
# # ! pip install rctorch==0.7162
# -
lineW = 3
lineBoxW=2
plt.rcParams['text.usetex'] = True
# ### This notebook demonstrates how to use RcTorch to find optimal hyper-paramters for the differential equation $\dot y + q(t) y = f(t) $.
#
# Simple population: <font color='blue'>$\dot y + y =0$ </font>
# * Analytical solution: <font color='green'>$y = y_0 e^{-t}$</font>
#define a reparameterization function, empirically we find that g= 1-e^(-t) works well)
def reparam(t, order = 1):
exp_t = torch.exp(-t)
derivatives_of_g = []
g = 1 - exp_t
g_dot = 1 - g
return g, g_dot
# +
def plot_predictions(RC, results, integrator_model, ax = None):
"""plots a RC prediction and integrator model prediction for comparison
Parameters
----------
RC: RcTorchPrivate.esn
the RcTorch echostate network to evaluate. This model should already have been fit.
results: dictionary
the dictionary of results returned by the RC after fitting
integrator model: function
the model to be passed to odeint which is a gold standard integrator numerical method
for solving ODE's written in Fortran. You may find the documentation here:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.odeint.html
ax: matplotlib.axes._subplots.AxesSubplot
If provided, the function will plot on this subplot axes
"""
X = RC.X.cpu()
if not ax:
fig, ax = plt.subplots(1,1, figsize = (6,6))
for i, y in enumerate(results["ys"]):
y = y.cpu()
if not i:
labels = ["RC", "Integrator Solution"]
else:
labels = [None, None]
ax.plot(X, y, color = "dodgerblue", label = labels[0], linewidth = lineW + 1, alpha = 0.9)
#calculate the integrator prediction:
int_sol = odeint(integrator_model, y0s[i], np.array(X.cpu().squeeze()))
int_sol = torch.tensor(int_sol)
#plot the integrator prediction
ax.plot(X, int_sol, '--', color = "red", alpha = 0.9, label = labels[1], linewidth = lineW)
plt.ylabel(r'$y(t)$');
ax.legend();
ax.tick_params(labelbottom=False)
plt.tight_layout()
def covert_ode_coefs(t, ode_coefs):
""" converts coefficients from the string 't**n' or 't^n' where n is any float
Parameters
----------
t: torch.tensor
input time tensor
ode_coefs: list
list of associated floats. List items can either be (int/floats) or ('t**n'/'t^n')
Returns
-------
ode_coefs
"""
type_t = type(t)
for i, coef in enumerate(ode_coefs):
if type(coef) == str:
if coef[0] == "t" and (coef[1] == "*" or (coef[1] == "*" and coef[2] == "*")):
pow_ = float(re.sub("[^0-9.-]+", "", coef))
ode_coefs[i] = t ** pow_
print("alterning ode_coefs")
elif type(coef) in [float, int, type_t]:
pass
else:
assert False, "ode_coefs must be a list floats or strings of the form 't^pow', where pow is a real number."
return ode_coefs
def plot_rmsr(RC, results, force, ax = None):
"""plots the residuals of a RC prediction directly from the loss function
Parameters
----------
RC: RcTorchPrivate.esn
the RcTorch echostate network to evaluate. This model should already have been fit.
results: dictionary
the dictionary of results returned by the RC after fitting
force: function
the force function describing the force term in the population equation
ax: matplotlib.axes._subplots.AxesSubplot
If provided, the function will plot on this subplot axes
"""
if not ax:
fig, ax = plt.subplots(1,1, figsize = (10, 4))
X = RC.X.cpu()
ys, ydots = results["ys"], results["ydots"]
residuals = []
force_t = force(X)
for i, y in enumerate(ys):
ydot = ydots[i]
y = y.cpu()
ydot = ydot.cpu()
ode_coefs = covert_ode_coefs(t = X, ode_coefs = RC.ode_coefs)
resids = custom_loss(X, y, ydot, None,
force_t = force_t,
ode_coefs = RC.ode_coefs,
mean = False)
if not i:
resids_tensor = resids
label = r'{Individual Trajectory RMSR}'
else:
resids_tensor = torch.cat((resids_tensor, resids), axis = 1)
label = None
resids_specific_rmsr = torch.sqrt(resids/1)
ax.plot(X, resids_specific_rmsr, color = "orangered", alpha = 0.4, label = label, linewidth = lineW-1)
residuals.append(resids)
mean_resid = torch.mean(resids_tensor, axis =1)
rmsr = torch.sqrt(mean_resid)
ax.plot(X, rmsr,
color = "blue",
alpha = 0.9,
label = r'{RMSR}',
linewidth = lineW-0.5)
ax.legend(prop={"size":16});
ax.set_xlabel(r'$t$')
ax.set_yscale("log")
ax.set_ylabel(r'{RMSR}')
# -
# common cv arguments:
cv_declaration_args = {"interactive" : True,
"batch_size" : 8, #batch size is parallel
"cv_samples" : 2, #number of cv_samples, random start points
"initial_samples" : 50, #number of random samples before optimization starts
"validate_fraction" : 0.3, #validation prop of tr+val sets
"log_score" : True, #log-residuals
"random_seed" : 209, # random seed
"ODE_order" : 1, #order of eq
#see turbo ref:
"length_min" : 2 ** (-7),#2 **(-7),
"success_tolerance" : 10}
# ## task 1: cross check burn in for all three experiments (burn in should be embedded into hps)
#
# +
def driven_force(X, A = 1):
""" a force function, specifically f(t) = sin(t)
Parameters
----------
X: torch.tensor
the input time tensor
Returns
-------
the force, a torch.tensor of equal dimension to the input time tensor.
"""
return A*torch.sin(X)
def no_force(X):
""" a force function (returns 0)
Parameters
----------
X: torch.tensor
the input time tensor
Returns
-------
the force, in this case 0.
"""
return 0
lam =1
def custom_loss(X , y, ydot, out_weights, lam = lam, force_t = None, reg = False,
ode_coefs = None, init_conds = None,
enet_alpha = None, enet_strength =None, mean = True):
""" The loss function of the ODE (in this case the population equation loss)
Parameters
----------
X: torch.tensor
The input (in the case of ODEs this is time t)
y: torch.tensor
The response variable
ydot: torch.tensor
The time derivative of the response variable
enet_strength: float
the magnitude of the elastic net regularization parameter. In this case there is no e-net regularization
enet_alpha: float
the proportion of the loss that is L2 regularization (ridge). 1-alpha is the L1 proportion (lasso).
ode_coefs: list
this list represents the ODE coefficients. They can be numbers or t**n where n is some real number.
force: function
this function needs to take the input time tensor and return a new tensor f(t)
reg: bool
if applicable (not in the case below) this will toggle the elastic net regularization on and off
reparam: function
a reparameterization function which needs to take in the time tensor and return g and gdot, which
is the reparameterized time function that satisfies the initial conditions.
init_conds: list
the initial conditions of the ODE.
mean: bool
if true return the cost (0 dimensional float tensor) else return the residuals (1 dimensional tensor)
Returns
-------
the residuals or the cost depending on the mean argument (see above)
"""
#with paramization
L = ydot + lam * y - force_t
# if reg:
# #assert False
# weight_size_sq = torch.mean(torch.square(out_weights))
# weight_size_L1 = torch.mean(torch.abs(out_weights))
# L_reg = enet_strength*(enet_alpha * weight_size_sq + (1- enet_alpha) * weight_size_L1)
# L = L + 0.1 * L_reg
L = torch.square(L)
if mean:
L = torch.mean(L)
return L
# -
#declare the initial conditions (each initial condition corresponds to a different curve)
y0s = np.arange(0.1, 2.1, 0.1)
len(y0s)
# ### Simple population
# +
#declare the bounds dict. We search for the variables within the specified bounds.
# if a variable is declared as a float or integer like n_nodes or dt, these variables are fixed.
bounds_dict = {"connectivity" : (-2.2, -0.12), #log space
"spectral_radius" : (1, 10), #lin space
"n_nodes" : 250,
"regularization" : (-4, 4), #log space
"leaking_rate" : (0, 1), #linear space
"dt" : -2.5, #log space
"bias": (-0.75,0.75) #linear space
}
#set up data
x0, xf = 0, 5
nsteps = int(abs(xf - x0)/(10**bounds_dict["dt"]))
xtrain = torch.linspace(x0, xf, nsteps, requires_grad=False).view(-1,1)
int(xtrain.shape[0] * 0.5)
# -
# %%time
#declare the esn_cv optimizer: this class will run bayesian optimization to optimize the bounds dict.
#for more information see the github.
esn_cv = EchoStateNetworkCV(bounds = bounds_dict,
esn_burn_in = 500, #states to throw away before calculating output
subsequence_length = int(xtrain.shape[0] * 0.8), #combine len of tr + val sets
**cv_declaration_args
)
#optimize the network:
simple_pop_hps = esn_cv.optimize(x = xtrain,
reparam_f = reparam,
ODE_criterion = custom_loss,
init_conditions = [y0s],
force = no_force,
ode_coefs = [1, 1],
n_outputs = 1,
reg_type = "simple_pop")
# +
# %%time
pop_RC = EchoStateNetwork(**simple_pop_hps,
random_state = 209,
dtype = torch.float32)
train_args = {"X" : xtrain.view(-1,1),
"burn_in" : 500,
"ODE_order" : 1,
"force" : no_force,
"reparam_f" : reparam,
"ode_coefs" : [1, 1]}
pop_results = pop_RC.fit(init_conditions = [y0s,1],
SOLVE = True,
train_score = True,
ODE_criterion = custom_loss,
**train_args)
# -
def simple_pop(y, t, t_pow = 0, force_k = 0, k = 1):
dydt = -k * y *t**t_pow + force_k*np.sin(t)
return dydt
# +
#TODO: show results outside BO range
# +
# some particularly good runs:
# simple_pop_hps = {'dt': 0.0031622776601683794,
# 'n_nodes': 250,
# 'connectivity': 0.13615401772200952,
# 'spectral_radius': 4.1387834548950195,
# 'regularization': 0.00028325262824591835,
# 'leaking_rate': 0.2962796092033386,
# 'bias': -0.5639935731887817}
# opt_hps = {'dt': 0.0031622776601683794,
# 'n_nodes': 250,
# 'connectivity': 0.7170604557008349,
# 'spectral_radius': 1.5755887031555176,
# 'regularization': 0.00034441529823729916,
# 'leaking_rate': 0.9272222518920898,
# 'bias': 0.1780446171760559}
# opt_hps = {'dt': 0.0017782794100389228,
# 'n_nodes': 250,
# 'connectivity': 0.11197846061157432,
# 'spectral_radius': 1.7452095746994019,
# 'regularization': 0.00012929296298723957,
# 'leaking_rate': 0.7733328938484192,
# 'bias': 0.1652531623840332}
# +
fig = plt.figure(figsize = (9, 7)); gs1 = gridspec.GridSpec(3, 3);
ax = plt.subplot(gs1[:-1, :])
gts = plot_predictions(RC = pop_RC,
results = pop_results,
integrator_model = simple_pop,
ax = ax)
ax = plt.subplot(gs1[-1, :])
plot_data = plot_rmsr(pop_RC,
results = pop_results,
force = no_force,
ax = ax)
# -
# ### Driven population:
#declare the bounds dict. We search for the variables within the specified bounds.
# if a variable is declared as a float or integer like n_nodes or dt, these variables are fixed.
bounds_dict = {"connectivity" : (-2, -0.12), #log space
"spectral_radius" : (1, 10), #lin space
"n_nodes" : 400,
"regularization" : (-4, 4), #log space
"leaking_rate" : (0, 1), #linear space
"dt" : -2.5, #log space
"bias": (-0.75,0.75) #linear space
}
# %%time
#declare the esn_cv optimizer: this class will run bayesian optimization to optimize the bounds dict.
#for more information see the github.
esn_cv = EchoStateNetworkCV(bounds = bounds_dict,
esn_burn_in = 500, #states to throw away before calculating output
subsequence_length = int(xtrain.shape[0] * 0.8), #combine len of tr + val sets
**cv_declaration_args
)
#optimize the network:
driven_pop_hps = esn_cv.optimize(x = xtrain,
reparam_f = reparam,
ODE_criterion = custom_loss,
init_conditions = [y0s],
force = driven_force,
ode_coefs = [1, 1],
n_outputs = 1,
reg_type = "driven_pop")
y0s = np.arange(-10, 10.1, 1)
len(y0s)
# +
# %%time
driven_RC = EchoStateNetwork(**driven_pop_hps,
random_state = 209,
dtype = torch.float32)
train_args = {"X" : xtrain.view(-1,1),
"burn_in" : 500,
"ODE_order" : 1,
"force" : driven_force,
"reparam_f" : reparam,
"ode_coefs" : [1, 1]}
driven_results = driven_RC.fit(init_conditions = [y0s,1],
SOLVE = True,
train_score = True,
ODE_criterion = custom_loss,
**train_args)
# -
def driven_pop(y, t, t_pow = 0, force_k = 1, k = 1):
dydt = -k * y *t**t_pow + force_k*np.sin(t)
return dydt
driven_pop_hps
# +
fig = plt.figure(figsize = (9, 7)); gs1 = gridspec.GridSpec(3, 3);
ax = plt.subplot(gs1[:-1, :])
gts = plot_predictions(RC = driven_RC,
results = driven_results,
integrator_model = driven_pop,
ax = ax)
ax = plt.subplot(gs1[-1, :])
plot_data = plot_rmsr(driven_RC,
results = driven_results,
force = driven_force,
ax = ax)
# -
# #### Driven t^2 Population:
#declare the initial conditions (each initial condition corresponds to a different curve)
y0s = np.arange(-10, 10.1, 0.1)
len(y0s)
np.log10(0.005)
# +
#declare the bounds dict. We search for the variables within the specified bounds.
# if a variable is declared as a float or integer like n_nodes or dt, these variables are fixed.
t2_hps = {'n_nodes': 500,
'connectivity': 0.09905712745750006,
'spectral_radius': 1.8904799222946167,
'regularization': 714.156090350679,
'leaking_rate': 0.031645022332668304,
'bias': -0.24167031049728394,
'dt' : 0.005}
bounds_dict = {"connectivity" : (-1.1, -0.9), #log space
"spectral_radius" : (1.8, 2.0), #lin space
"n_nodes" : 500,
"regularization" : (2.5, 3.5), #log space
"leaking_rate" : (0.02, .04), #linear space
"dt" : -2.3, #log space
"bias": (0,1) #linear space
}
# -
# %%time
#declare the esn_cv optimizer: this class will run bayesian optimization to optimize the bounds dict.
#for more information see the github.
esn_cv = EchoStateNetworkCV(bounds = bounds_dict,
esn_burn_in = 1000, #states to throw away before calculating output
subsequence_length = int(xtrain.shape[0] * 0.8), #combine len of tr + val sets
**cv_declaration_args
)
#optimize the network:
t2_pop_hps = esn_cv.optimize(x = xtrain,
reparam_f = reparam,
ODE_criterion = custom_loss,
init_conditions = [y0s],
force = driven_force,
ode_coefs = ["t^2", 1],
n_outputs = 1,
reg_type = "driven_pop")
# +
#solution run:
# t2_hps = {'n_nodes': 500,
# 'connectivity': 0.09905712745750006,
# 'spectral_radius': 1.8904799222946167,
# 'regularization': 714.156090350679,
# 'leaking_rate': 0.031645022332668304,
# 'bias': -0.24167031049728394,
# 'dt' : 0.005}
# -
def t2_pop(y, t, t_pow = 2, force_k = 1, k = 1):
dydt = -k * y *t**t_pow + force_k*np.sin(t)
return dydt
# +
# %%time
t2_RC = EchoStateNetwork(**t2_pop_hps,
random_state = 209,
dtype = torch.float32)
train_args = {"X" : xtrain.view(-1,1),
"burn_in" : 1000,
"ODE_order" : 1,
"force" : driven_force,
"reparam_f" : reparam,
"ode_coefs" : ["t^2", 1]}
t2_results = t2_RC.fit(init_conditions = [y0s,1],
SOLVE = True,
train_score = True,
ODE_criterion = custom_loss,
**train_args)
# -
t2_RC.ode_coefs[0]
# +
fig = plt.figure(figsize = (9, 7)); gs1 = gridspec.GridSpec(3, 3);
ax = plt.subplot(gs1[:-1, :])
gts = plot_predictions(RC = t2_RC,
results = t2_results,
integrator_model = t2_pop,
ax = ax)
ax = plt.subplot(gs1[-1, :])
plot_data = plot_rmsr(t2_RC,
results = t2_results,
force = driven_force,
ax = ax)
# -
end_time = time.time()
print(f'Total notebook runtime: {end_time - start_time:.2f} seconds')
| final_notebooks/.ipynb_checkpoints/Linear_BO-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import requests
import re
# +
#load list of links in
import pickle
with open('pickle-files/link_list_1004.pkl', 'rb') as picklefile:
link_list = pickle.load(picklefile)
#check how many url lists are loaded
print(len(link_list))
# -
def build_dict(test_soup):
test = {}
test['title'] = test_soup.find(class_='bold margin-none').text.strip()
#square feet - num
test['sqft'] = (test_soup.find('div',itemtype = 'http://schema.org/Offer')
.find(class_='listing-size col-5-sm col-3-md')
.text.strip().rstrip('sqft').rstrip()
)
# location address - string
test['address'] = test_soup.find(text=re.compile('NY')).strip()
# floor level - num
test['floor_level'] = test_soup.find(class_='listing-name col-6-sm col-3-md col-center').text.strip()
f_xpath = test_soup.find(class_='features section')
# building construction year - num
if f_xpath.find(text=re.compile('Constructed')):
test['construct_year'] = (f_xpath.find(text=re.compile('Constructed'))
.parent.parent.find(class_='strong').text.strip()
)
else:
test['construct_year'] = 'None'
# building renovation year - num
if f_xpath.find(text=re.compile('Renovated')):
test['renovate_year'] = (f_xpath.find(text=re.compile('Renovated'))
.parent.parent.find(class_='strong')
.text.strip()
)
else:
test['renovate_year'] = 'None'
# metro transition lines near the building - string
if test_soup.find(class_='features section').find(text=re.compile('Public Transit')):
test['public_transit'] = (test_soup.find(class_='features section')
.find(text=re.compile('Public Transit'))
.parent.parent.find(class_='strong')
.text
.strip()
)
else:
test['public_transit'] = 'None'
# building class - categorical
if f_xpath.find(text=re.compile('Building')):
test['building_class'] = (f_xpath.find(text=re.compile('Building'))
.parent.parent.find(class_='strong')
.text.strip()
)
else:
test['building_class'] = 'None'
#Common Kitchen
if f_xpath.find(text=re.compile('Kitchen')):
test['common_kitchen'] = '1'
else:
test['common_kitchen'] = '0'
# Showers
if f_xpath.find(text=re.compile('Showers')):
test['showers'] = '1'
else:
test['showers'] = '0'
# Key Card Access
if f_xpath.find(text=re.compile('Key Card Access')):
test['key_card_access'] = '1'
else:
test['key_card_access'] = '0'
# On site Security
if f_xpath.find(text=re.compile('Security')):
test['on_site_security'] = '1'
else:
test['on_site_security'] = '0'
#list posted date - datetime
post_xpath = test_soup.find(class_='listing-touched_at col-3-md hide-sm')
if post_xpath:
test['post_date'] = post_xpath.text.strip().split('\n')[0]
else:
test['post_date'] = 'None'
features_xpath = test_soup.find('div',class_='features grid grid-top grid-nest margin-v')
#lease term length - num
if features_xpath.find(text=re.compile('Term')):
test['term_length'] = (features_xpath.find(text=re.compile('Term'))
.parent.parent.find('span', class_='text-nowrap text-bold')
.text)
else:
test['term_length'] = 'None'
#building construction type -
f_xpath = test_soup.find("div",class_="grid grid-nest grid-top")
if f_xpath.find(text=re.compile('Construction Type')):
test['construction_type'] = (f_xpath.find(text=re.compile('Construction Type'))
.parent.parent.find('div',class_='strong')
.text
.strip())
else:
test['construction_type'] = 'None'
#Amenities
amenity_xpath = test_soup.find("div",class_="margin-v grid grid-top grid-nest")
#Furniture
if amenity_xpath:
for i in amenity_xpath:
if amenity_xpath.find(text=re.compile('Kitchen')):
test['furniture'] = '1'
else:
test['furniture'] = '0'
#Turnkey
if amenity_xpath.find(text=re.compile('Turnkey')):
test['turnkey'] = '1'
else:
test['turnkey'] = '0'
#Natural Light
if amenity_xpath.find(text=re.compile('Natural Light')):
test['natural_light'] = '1'
else:
test['natural_light'] = '0'
#High Ceilings
if amenity_xpath.find(text=re.compile('High Ceilings')):
test['high_ceilings'] = '1'
else:
test['high_ceilings'] = '0'
#Plug and Play
if amenity_xpath.find(text=re.compile('Plug')):
test['plug_and_play'] = '1'
else:
test['plug_and_play'] = '0'
#price rate
rate_xpath = test_soup.find(class_='listing-rate col-2-md hide-sm ')
if rate_xpath:
test['rate_price'] = rate_xpath.text.strip().split('\n')[0]
test['rate_term'] = rate_xpath.text.strip().split('\n')[1].strip().lstrip('/')
else:
test['rate_price'] = 'None'
test['rate_term'] = 'None'
return test
# +
master_data_list = []
error_list = []
good_list = []
for url in link_list:
test_url = 'https://42floors.com'+url
test_page = requests.get(test_url).text
test_soup = BeautifulSoup(test_page, "lxml")
try:
master_data_list.append(build_dict(test_soup))
good_list.append(url)
except:
error_list.append(url)
pass
print (len(master_data_list),len(error_list))
# -
print(len(master_data_list))
#save data
with open('pickle-files/master_data_1007.pkl', 'wb') as picklefile:
pickle.dump(master_data_list,picklefile)
| code/2-scrape-date-from-link-lists.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# #GFF plotter
# ##Helping hands
#
# http://nbviewer.ipython.org/github/herrfz/dataanalysis/blob/master/week2/getting_data.ipynb
#
# http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/master/cookbook/Chapter%201%20-%20Reading%20from%20a%20CSV.ipynb
#
# ##Imports
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import matplotlib as plt
#plt.use('TkAgg')
import operator
import pylab
pylab.show()
# %pylab inline
# -
# ##Definitions
# +
fileUrl = "https://dl.dropboxusercontent.com/u/753166/jbrowse/gbrowse_repeats.gff3.Gypsy.gff3"
FULL_FIG_W , FULL_FIG_H = 16, 8
CHROM_FIG_W, CHROM_FIG_H = FULL_FIG_W, 20
# -
# ##Setup
# ###Figure sizes controller
class size_controller(object):
def __init__(self, w, h):
self.w = w
self.h = h
def __enter__(self):
self.o = rcParams['figure.figsize']
rcParams['figure.figsize'] = self.w, self.h
return None
def __exit__(self, type, value, traceback):
rcParams['figure.figsize'] = self.o
# ###Column type definition
# +
col_type_int = np.int64
col_type_flo = np.float64
col_type_str = np.str_ #np.object
col_type_char = np.character
col_info =[
[ "chromosome", col_type_str ],
[ "source" , col_type_str ],
[ "type" , col_type_str ],
[ "start" , col_type_int ],
[ "end" , col_type_int ],
[ "qual" , col_type_int ],
[ "strand" , col_type_char ],
[ "frame" , col_type_char ],
[ "info" , col_type_str ],
]
col_names=[cf[0] for cf in col_info]
col_types=dict(zip([c[0] for c in col_info], [c[1] for c in col_info]))
col_types
# -
# ##Read GFF
# ###Parse INFO column
# +
info_keys = set()
def filter_conv(fi):
global info_keys
vs = []
for pair in fi.split(";"):
kv = pair.split("=")
info_keys.add(kv[0])
if len(kv) == 2:
#in case of key/value pairs
vs.append(kv)
else:
#in case of flags such as INDEL
vs.append([kv[0], True])
x = dict(zip([x[0] for x in vs], [x[1] for x in vs]))
#z = pd.Series(x)
#print z
return x
# -
# ###Read GFF
# http://nbviewer.ipython.org/github/herrfz/dataanalysis/blob/master/week2/getting_data.ipynb
CONVERTERS = {
'info': filter_conv
}
SKIP_ROWS = 3
NROWS = None
#index_col=['chromosome', 'start'], usecols=col_names,
gffData = pd.read_csv(fileUrl, header=None, names=col_names, dtype=col_types, nrows=NROWS, skiprows=SKIP_ROWS, converters=CONVERTERS, verbose=True, delimiter="\t", comment="#")
print gffData.shape
gffData.head()
# ###Add length column
gffData['length'] = gffData['end'] - gffData['start']
gffData.head()
# ###Split INFO column
info_keys = list(info_keys)
info_keys.sort()
info_keys
info_keys_types = {
'score': col_type_int
}
# +
def gen_val_extracter(info_keys_g):
def val_extracter_l(info_row, **kwargs):
vals = [None] * len(info_keys_g)
for k,v in info_row.items():
if k in info_keys_g:
vals[info_keys_g.index(k)] = v
else:
pass
return vals
return val_extracter_l
gffData[info_keys] = gffData['info'].apply(gen_val_extracter(info_keys), axis=1).apply(pd.Series, 1)
gffData.head()
# -
# ##Good part
# http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/master/cookbook/Chapter%201%20-%20Reading%20from%20a%20CSV.ipynb
#
# http://pandas.pydata.org/pandas-docs/dev/visualization.html
#
# https://bespokeblog.wordpress.com/2011/07/11/basic-data-plotting-with-matplotlib-part-3-histograms/
#
# http://nbviewer.ipython.org/github/mwaskom/seaborn/blob/master/examples/plotting_distributions.ipynb
#
# http://nbviewer.ipython.org/github/herrfz/dataanalysis/blob/master/week3/exploratory_graphs.ipynb
#
# http://pandas.pydata.org/pandas-docs/version/0.15.0/visualization.html
#
# http://www.gregreda.com/2013/10/26/working-with-pandas-dataframes/
# ###Column types
gffData.dtypes
# ###Global statistics
gffData.describe()
# ###List of chromosomes
chromosomes = np.unique(gffData['chromosome'].values)
chromosomes
# ###Quality distribution
with size_controller(FULL_FIG_W, FULL_FIG_H):
bq = gffData.boxplot(column='qual', return_type='dict')
# ###Quality distribution per chromosome
with size_controller(FULL_FIG_W, FULL_FIG_H):
bqc = gffData.boxplot(column='qual', by='chromosome', return_type='dict')
# ###Start position distribution per chromosome
with size_controller(FULL_FIG_W, FULL_FIG_H):
bqc = gffData.boxplot(column='start', by='chromosome', return_type='dict')
# ###Position distribution
with size_controller(FULL_FIG_W, FULL_FIG_H):
hs = gffData['start'].hist()
# ###Position distribution per chromosome
hsc = gffData['start'].hist(by=gffData['chromosome'], figsize=(CHROM_FIG_W, CHROM_FIG_H), layout=(len(chromosomes),1))
# ###Length distribution
with size_controller(FULL_FIG_W, FULL_FIG_H):
hl = gffData['length'].hist()
# ###Length distribution per chromosome
hlc = gffData['length'].hist(by=gffData['chromosome'], figsize=(CHROM_FIG_W, CHROM_FIG_H), layout=(len(chromosomes),1))
# +
#http://stackoverflow.com/questions/27934885/how-to-hide-code-from-cells-in-ipython-notebook-visualized-with-nbviewer
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
var classes_to_hide = ['div.input', 'div.output_stderr', 'div.output_prompt', 'div.input_prompt', 'div.prompt'];
if (code_show){
for ( var c in classes_to_hide ) {
$(classes_to_hide[c]).hide();
}
} else {
for ( var c in classes_to_hide ) {
$(classes_to_hide[c]).show();
}
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Clickk here to toggle on/off the raw code."></form>''')
| opticalmapping/gff_reader.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import time
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn import tree
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
# +
filename_glass = './glass.csv'
df_glass = pd.read_csv(filename_glass)
print(df_glass.shape)
display(df_glass.head())
display(df_glass.describe())
# +
def get_train_test(df, y_col, x_cols, ratio):
mask = np.random.rand(len(df)) < ratio
df_train = df[mask]
df_test = df[~mask]
Y_train = df_train[y_col].values
Y_test = df_test[y_col].values
X_train = df_train[x_cols].values
X_test = df_test[x_cols].values
return df_train, df_test, X_train, Y_train, X_test, Y_test
y_col_glass = 'Type'
x_cols_glass = list(df_glass.columns.values)
x_cols_glass.remove(y_col_glass)
train_test_ratio = 0.7
df_train, df_test, X_train, Y_train, X_test, Y_test = get_train_test(df_glass, y_col_glass, x_cols_glass, train_test_ratio)
# -
dict_classifiers = {
"Logistic Regression": LogisticRegression(),
"Nearest Neighbors": KNeighborsClassifier(),
"Linear SVM": SVC(),
"Gradient Boosting Classifier": GradientBoostingClassifier(n_estimators=1000),
"Decision Tree": tree.DecisionTreeClassifier(),
"Random Forest": RandomForestClassifier(n_estimators=1000),
"Neural Net": MLPClassifier(alpha = 1),
"Naive Bayes": GaussianNB(),
#"AdaBoost": AdaBoostClassifier(),
#"QDA": QuadraticDiscriminantAnalysis(),
#"Gaussian Process": GaussianProcessClassifier()
}
# +
def batch_classify(X_train, Y_train, X_test, Y_test, no_classifiers = 5, verbose = True):
"""
This method, takes as input the X, Y matrices of the Train and Test set.
And fits them on all of the Classifiers specified in the dict_classifier.
The trained models, and accuracies are saved in a dictionary. The reason to use a dictionary
is because it is very easy to save the whole dictionary with the pickle module.
Usually, the SVM, Random Forest and Gradient Boosting Classifier take quiet some time to train.
So it is best to train them on a smaller dataset first and
decide whether you want to comment them out or not based on the test accuracy score.
"""
dict_models = {}
for classifier_name, classifier in list(dict_classifiers.items())[:no_classifiers]:
t_start = time.clock()
classifier.fit(X_train, Y_train)
t_end = time.clock()
t_diff = t_end - t_start
train_score = classifier.score(X_train, Y_train)
test_score = classifier.score(X_test, Y_test)
dict_models[classifier_name] = {'model': classifier, 'train_score': train_score, 'test_score': test_score, 'train_time': t_diff}
if verbose:
print("trained {c} in {f:.2f} s".format(c=classifier_name, f=t_diff))
return dict_models
def display_dict_models(dict_models, sort_by='test_score'):
cls = [key for key in dict_models.keys()]
test_s = [dict_models[key]['test_score'] for key in cls]
training_s = [dict_models[key]['train_score'] for key in cls]
training_t = [dict_models[key]['train_time'] for key in cls]
df_ = pd.DataFrame(data=np.zeros(shape=(len(cls),4)), columns = ['classifier', 'train_score', 'test_score', 'train_time'])
for ii in range(0,len(cls)):
df_.loc[ii, 'classifier'] = cls[ii]
df_.loc[ii, 'train_score'] = training_s[ii]
df_.loc[ii, 'test_score'] = test_s[ii]
df_.loc[ii, 'train_time'] = training_t[ii]
display(df_.sort_values(by=sort_by, ascending=False))
# -
dict_models = batch_classify(X_train, Y_train, X_test, Y_test, no_classifiers = 8)
display_dict_models(dict_models)
| classification examples/simple classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="QdRPzSdYSAbf" outputId="7ab4726a-80d3-4bed-fbf2-749e264c5e4f"
import os
import json
import csv
import numpy as np
from lightgbm import LGBMClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import RFE
from tqdm import tqdm
from sklearn.feature_extraction import FeatureHasher
from sklearn.feature_extraction import FeatureHasher
# 데이터 파일 경로: 본인의 컴퓨터에 맞게 수정
file_path = "/Users/bumseok/workspace/2021_Information_protection/Project 2/데이터"
# + id="bNVKxXs7SAbk"
SEED = 41
# helper function for read csv file
def read_label_csv(path):
label_table = dict()
with open(path, "r", encoding="ISO-8859-1") as f:
for line in f.readlines()[1:]:
fname, label = line.strip().split(",")
label_table[fname] = int(label)
return label_table
# helper function for read json file
def read_json(path):
with open(path, "r") as f:
return json.load(f)
# helper function for test model with these 8 algorithm
def load_model(**kwargs):
if kwargs["model"] == "rf":
return RandomForestClassifier(random_state=kwargs["random_state"], n_jobs=4)
elif kwargs["model"] == "dt":
return DecisionTreeClassifier(random_state=kwargs["random_state"])
elif kwargs["model"] == "lgb":
return LGBMClassifier(random_state=kwargs["random_state"])
elif kwargs["model"] == "svm":
return SVC(random_state=kwargs["random_state"], probability=True)
elif kwargs["model"] == "lr":
return LogisticRegression(
random_state=kwargs["random_state"], n_jobs=-1, max_iter=25000
)
elif kwargs["model"] == "knn":
return KNeighborsClassifier(n_jobs=-1)
elif kwargs["model"] == "adaboost":
return AdaBoostClassifier(random_state=kwargs["random_state"])
elif kwargs["model"] == "mlp":
return MLPClassifier(random_state=kwargs["random_state"])
else:
print("Unsupported Algorithm")
return None
def train(X_train, y_train, model):
"""
머신러닝 모델을 선택하여 학습을 진행하는 함수
:param X_train: 학습할 2차원 리스트 특징벡터
:param y_train: 학습할 1차원 리스트 레이블 벡터
:param model: 문자열, 선택할 머신러닝 알고리즘
:return: 학습된 머신러닝 모델 객체
"""
clf = load_model(model=model, random_state=SEED)
clf.fit(X_train, y_train)
return clf
def evaluate(X_test, y_test, model):
"""
학습된 머신러닝 모델로 검증 데이터를 검증하는 함수
:param X_test: 검증할 2차원 리스트 특징 벡터
:param y_test: 검증할 1차원 리스트 레이블 벡터
:param model: 학습된 머신러닝 모델 객체
"""
predict = model.predict(X_test)
print(f"{model}정확도", model.score(X_test, y_test))
# + [markdown] id="bpemrbrwSAbq"
# ## 특징 벡터 생성 예시
# - PEMINER 정보는 모두 수치형 데이터이므로 특별히 가공을 하지 않고 사용 가능
# - EMBER, PESTUDIO 정보는 가공해서 사용해야 할 특징들이 있음 (e.g. imports, exports 등의 문자열 정보를 가지는 데이터)
# - 수치형 데이터가 아닌 데이터(범주형 데이터)를 어떻게 가공할 지가 관건 >> 인코딩 (e.g. 원핫인코딩, 레이블인코딩 등)
# + id="p3rAFyogSAbz"
from sklearn.feature_extraction import FeatureHasher
class PeminerParser:
def __init__(self, path):
self.report = read_json(path)
self.vector = []
def process_report(self):
"""
전체 데이터 사용
"""
self.vector = [
value for _, value in sorted(self.report.items(), key=lambda x: x[0])
]
return self.vector
class EmberParser:
def __init__(self, path):
self.report = read_json(path)
self.vector = []
def get_histogram_info(self):
histogram = np.array(self.report["histogram"])
total = histogram.sum()
vector = histogram / total
return vector.tolist()
def get_string_info(self):
strings = self.report["strings"]
hist_divisor = (
float(strings["printables"]) if strings["printables"] > 0 else 1.0
)
parseList = [
"numstrings",
"avlength",
"printables",
"entropy",
"paths",
"urls",
"registry",
"MZ",
]
vector = []
for i in parseList:
vector.append(strings[i])
vector += (np.asarray(strings["printabledist"]) / hist_divisor).tolist()
return vector
def get_general_file_info(self):
general = self.report["general"]
parseList = [
"size",
"vsize",
"has_debug",
"exports",
"imports",
"has_relocations",
"has_resources",
"has_signature",
"has_tls",
"symbols",
]
vector = []
for i in parseList:
vector.append(general[i])
return vector
##############################
# 직접 추가한 특징 추출 함수 #
##############################
# import하는 라이브러리등 feature 해시로 추출
def get_imports_info(self):
data = self.report["imports"]
vector = []
libraries = list(set([l.lower() for l in data]))
vector.extend(
FeatureHasher(256, input_type="string").transform([libraries]).toarray()[0]
) # libraries_hashed
imports = [lib.lower() + ":" + e for lib, elist in data.items() for e in elist]
vector.extend(
FeatureHasher(1024, input_type="string").transform([imports]).toarray()[0]
) # imports_hashed
return vector
# 악성코드의 경우 export를 많이 할것으로 예상됨
def get_exports_info(self):
exports = self.report["exports"]
vector = []
vector.extend(
FeatureHasher(128, input_type="string").transform([exports]).toarray()[0]
) # exports_hashed
return vector
# header의 size가 지나치게 많거나
# version이 잘 못 되는 등의 상황이 의심됨
def get_header_info(self):
header = self.report["header"]
parseList = [
"major_image_version",
"minor_image_version",
"major_linker_version",
"minor_linker_version",
"major_operating_system_version",
"major_subsystem_version",
"sizeof_code",
"sizeof_headers",
"sizeof_heap_commit",
]
vector = [header["coff"]["timestamp"]]
for i in parseList:
vector.append(header["optional"][i])
return vector
def process_report(self):
vector = []
vector += self.get_general_file_info()
vector += self.get_histogram_info()
vector += self.get_string_info()
"""
특징 추가
"""
vector += self.get_general_file_info()
vector += self.get_header_info()
vector += self.get_exports_info()
vector += self.get_imports_info()
return vector
class PestudioParser:
def __init__(self, path):
self.report = read_json(path)
self.vector = []
"""
사용할 특징을 선택하여 벡터화 할 것을 권장
"""
# libraries의 blacklist = "x"인 비율
def get_libraries_info(self):
try:
libraries = self.report["image"]["libraries"]
except KeyError:
return [-1]
if libraries == "n/a" or len(libraries) == 1:
return [0]
try:
vector = [
sum(1 for i in libraries["library"] if i["@blacklist"] == "x")
/ len(libraries["library"])
]
except TypeError:
vector = [1 if libraries["library"]["@blacklist"] == "x" else 0]
return vector
# imports의 blacklist = "x"인 비율
def get_imports_info(self):
try:
imports = self.report["image"]["imports"]
except KeyError:
return [-1]
if imports == "n/a" or len(imports) == 1:
return [0]
try:
vector = [
sum(1 for i in imports["import"] if i["@blacklist"] == "x")
/ len(imports["import"])
]
except TypeError:
vector = [1 if imports["import"]["@blacklist"] == "x" else 0]
return vector
# exports의 export 요소 개수 추출
def get_exports_info(self):
try:
exports = self.report["image"]["exports"]
except KeyError:
return [-1]
if exports == "n/a":
return [0]
vector = [len(exports["export"])]
return vector
# 인증서가 있으면 0 없다면 1
def get_certificate(self):
try:
cert = self.report["image"]["certificate"]
if cert == "n/a":
return [0]
else:
return [1]
except:
return [0]
def process_report(self):
vector = []
vector += self.get_libraries_info()
vector += self.get_imports_info()
vector += self.get_exports_info()
vector += self.get_certificate()
return vector
# + [markdown] id="DPb9HsS4SAb2"
# ## 학습데이터 구성
# - 특징 벡터 구성은 2차원이 되어야함 e.g. [vector_1, vector_2, ..., vector_n]
#
# - 각 벡터는 1차원 리스트, 벡터 크기는 모두 같아야함
# + id="P3VFElF-KlPW"
# 데이터의 특징 벡터 모음(2차원 리스트) : X
# 데이터의 레이블 모음(1차원 리스트) : y
label_table = read_label_csv(f"{file_path}/학습데이터_정답.csv")
X, y = [], []
l = os.listdir(f"{file_path}/EMBER/학습데이터")
for fname in tqdm(l):
feature_vector = []
label = label_table[fname.split(".")[0]]
for data in ["PEMINER", "EMBER", "PESTUDIO"]:
path = f"{file_path}/{data}/학습데이터/{fname}"
if data == "PEMINER":
feature_vector += PeminerParser(path).process_report()
elif data == "EMBER":
feature_vector += EmberParser(path).process_report()
elif data == "PESUDIO":
try:
feature_vector += PestudioParser(path).process_report()
except FileNotFoundError:
pass
# print(path)
X.append(feature_vector)
y.append(label)
np.asarray(X).shape, np.asarray(y).shape
# -
# ## 검증데이터 구성
# +
val_label_table = read_label_csv(f"{file_path}/검증데이터_정답.csv")
X_val, y_val = [], []
# 데이터 이름들 추출
l_val = os.listdir(f"{file_path}/EMBER/검증데이터")
for fname in tqdm(l_val):
feature_vector = []
label = val_label_table[fname.split(".")[0]]
for data in ["PEMINER", "EMBER", "PESTUDIO"]:
path = f"{file_path}/{data}/검증데이터/{fname}"
if data == "PEMINER":
feature_vector += PeminerParser(path).process_report()
elif data == "EMBER":
feature_vector += EmberParser(path).process_report()
elif data == "PESUDIO":
try:
feature_vector += PestudioParser(path).process_report()
except FileNotFoundError:
pass
X_val.append(feature_vector)
y_val.append(label)
np.asarray(X_val).shape, np.asarray(y_val).shape
# + [markdown] id="LiaocTsNSAb7"
# ## 학습 및 검증
# +
models = []
model_list = ["rf", "dt", "lgb", "svm", "lr", "knn", "adaboost", "mlp"]
for model in tqdm(model_list): # 여기에 dt lgb svm lr knn adaboost mlp 추가해서 모델 설정
clf = train(X, y, model)
models.append(clf)
# 검증
# 실제 검증 시에는 제공한 검증데이터를 검증에 사용해야 함
for model in tqdm(models):
evaluate(X_val, y_val, model)
# + id="MiyPyVGPSAb8"
# 학습
models = []
for model in tqdm(["rf", "lgb"]): # 여기에 dt lgb svm lr knn adaboost mlp 추가해서 모델 설정
clf = train(X, y, model)
models.append(clf)
# 검증
# 실제 검증 시에는 제공한 검증데이터를 검증에 사용해야 함
for model in tqdm(models):
evaluate(X_val, y_val, model)
# + [markdown] id="ViOp94FKSAb_"
# ## 앙상블 예제
# + id="w1q0M2r_SAb_"
def ensemble_result(X, y, models):
"""
학습된 모델들의 결과를 앙상블하는 함수
:param X: 검증할 2차원 리스트 특징 벡터
:param y: 검증할 1차원 리스트 레이블 벡터
:param models: 1개 이상의 학습된 머신러닝 모델 객체를 가지는 1차원 리스트
"""
# Soft Voting
# https://devkor.tistory.com/entry/Soft-Voting-%EA%B3%BC-Hard-Voting
predicts = []
for i in tqdm(range(len(X))):
probs = []
for model in models:
prob = model.predict_proba(X)[i][1]
probs.append(prob)
predict = 1 if np.mean(probs) >= 0.5 else 0
predicts.append(predict)
print("정확도", accuracy_score(y, predicts))
# + id="b0-seTGfSAcB"
ensemble_result(X_val, y_val, models)
# + [markdown] id="VvqfEqTsSAcE"
# ## 특징 선택 예제 (RFE 알고리즘 사용)
# + id="oXTMP0t2SAcE"
def select_feature(X, y, model):
"""
주어진 특징 벡터에서 특정 알고리즘 기반 특징 선택
본 예제에서는 RFE 알고리즘 사용
https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFE.html#sklearn.feature_selection.RFE.fit_transform
:param X: 검증할 2차원 리스트 특징 벡터
:param y: 검증할 1차원 리스트 레이블 벡터
:param model: 문자열, 특징 선택에 사용할 머신러닝 알고리즘
"""
model = load_model(model=model, random_state=SEED)
rfe = RFE(estimator=model)
return rfe.fit_transform(X, y)
# -
# 학습데이터에서 특징선택 알고리즘을 활용한 새로운 데이터 추출
selected_X = select_feature(X, y, "rf")
print("특징 추출 전 특징의 개수 : ", np.asarray(X_val).shape[1])
print("특징 추출 후 특징의 개수 : ", selected_X.shape[1])
# RFE를 통해 993개의 특징을 선택
# +
# 추출된 학습데이터를 활용한 모델 학습
new_model = train(selected_X, y, "rf")
# 검증데이터에서 특징선택 알고리즘을 활용한 새로운 검증데이터 추출
selected_X_val = select_feature(X_val, y_val, "rf")
# 새로운 검증데이터를 활용한 새로운 모델 검증
evaluate(selected_X_val, y_val, new_model)
# -
# # CSV 파일 추출
# +
X_test = []
l_test = os.listdir(f"{file_path}/EMBER/테스트데이터")
for fname in l_test:
feature_vector = []
for data in ["PEMINER", "EMBER", "PESTUDIO"]:
path = f"{file_path}/{data}/테스트데이터/{fname}"
if data == "PEMINER":
feature_vector += PeminerParser(path).process_report()
elif data == "EMBER":
feature_vector += EmberParser(path).process_report()
elif data == "PESUDIO":
try:
feature_vector += PestudioParser(path).process_report()
except FileNotFoundError:
pass
X_test.append(feature_vector)
# y_valid.append(label)
np.asarray(X_test).shape
# 10000 이 맞는 출력
# -
def ensemble_test_result(X, models):
test_predict = []
predicts = []
for model in models:
prob = [result for _, result in model.predict_proba(X)]
predicts.append(prob)
predict = np.mean(predicts, axis=0)
predict = [1 if x >= 0.5 else 0 for x in predict]
with open("predict.csv", "w", encoding="utf-8", newline="") as f:
wr = csv.writer(f)
wr.writerow(["file", "predict"])
for a in range(10000):
wr.writerow([os.path.splitext(l_test[a])[0], predict[a]])
print(predict)
# 10000개의 데이터에 대한 csv 생성
ensemble_test_result(X_test,models)
| Project 2/ai_malware_detect.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Welcome to Machine Learning Assignment 2
# ## Setup
import numpy as np
import pandas as pd
import io
import os
import IPython
from pycaret.regression import *
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import pickle
import seaborn as sns
import sklearn
import tarfile
import urllib.request
import matplotlib as mpl
import matplotlib.pyplot as plt
import requests
from pandas.plotting import scatter_matrix
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.impute import SimpleImputer
from zlib import crc32
# ## Importing the data
train = pd.read_csv("data/train.csv")
test = pd.read_csv("data/test.csv")
# ## Visualisation
train.head()
train.info()
train.runtime.min(), train.runtime.max()
# %matplotlib inline
train.hist(bins=50, figsize=(20,15))
plt.show()
#We see that a big part of our budget is zero so we'll have to fix that later
# %matplotlib inline
#test.hist(bins=50, figsize=(20,15))
#plt.show()
# We create a new attribute popularity per day, which gives the average of the popularity each day.
train["pop_per_day"] = train["popularity"]/train["runtime"]
train.plot(kind="scatter", x = "revenue", y = "pop_per_day", alpha = 0.1)
#These are the attributes we are currently working with
attributes = ["budget", "runtime", "popularity", "revenue"]
train = train[attributes]
#Correlation matrix for the attributes we are using
corr_matrix = train.corr()
corr_matrix["revenue"].sort_values(ascending=False)
sns.heatmap(corr_matrix, annot=True,cmap='RdYlGn',linewidths=0.2)
# ## Prepare data for Algorithm
#Drop rows who are NaN
train = train.dropna()
# #### Filling in 0 values - Budget
#Function for filling in the values
def budget_fill(df, median_budget):
df.loc[df.budget == 0, 'budget'] = median_budget
return df
# +
#Get the median budget but without the 0 values, this would change the value drastically
median_budget = train[train['budget'] > 0]['budget'].median()
#Fill the 0 values with median values
train = budget_fill(train, median_budget)
# -
# ### Data Cleaning
#Feature scale the data, before feeding it to the algorithm
scaler = StandardScaler()
train_scaled = scaler.fit_transform(train)
train_ready = pd.DataFrame(train, columns=['budget', 'runtime', 'popularity','revenue'])
train_ready.head(20)
# ## Random forest algorithm
# #### Setup for evaluation
#get the rmse from the trained model
def rmse(y_pred,y_true):
return np.sqrt(mean_squared_error(y_pred, y_true))
def print_scores(model):
print(f'Score Train R2: {model.score(X_train,y_train)}')
print(f'RMSE Train R2: {rmse(model.predict(X_train), y_train)}')
#Setup for Random forest
seed = 42
np.random.seed(seed)
X_train, X_valid, y_train, y_valid = train_test_split(
train_ready.drop('revenue', axis = 1), train_ready['revenue'],
test_size=0.1,
random_state=42
)
rf = RandomForestRegressor(n_estimators = 30, min_samples_leaf = 4, max_features = 0.3, n_jobs = 1, oob_score = True, random_state = 42)
rf.fit(X_train, y_train)
print_scores(rf)
print(rf.predict([[8000000,96.0,6.496259]]))
# ## Export model
pickle.dump(rf, open('../model/box_office_model.pkl', 'wb'))
| nbs/.ipynb_checkpoints/ML_Assignment-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Project for Fundamentals of Data Analytics ##
#
# ### An investigation into Box Plots and its uses ###
#
# From the Wikipedia plage it explained the 'boxplot is a method for <br>graphically depicting groups of numerical data through their quartiles. Box plots may also have <br>lines extending vertically from the boxes (whiskers) indicating variability outside the <br>upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram'.
#
# I found this exercise useful in terms of understanding the method and results <br>behind a boxplot.
# https://www.khanacademy.org/math/statistics-probability/summarizing-quantitative-data/box-whisker-plots/v/constructing-a-box-and-whisker-plot <br>
# This video demonstrating the creatin of a box plot manaully using the scenario of a restaurant where the manger wishes to ascertain the distances customers travel.
#
# Reference to understanding boxplots: <br>
# https://medium.com/dayem-siddiqui/understanding-and-interpreting-box-plots-d07aab9d1b6c
#
# ### About the Box Plot ..The where and the why###
# The box plot was first introduced by the mathematician Johnn W Turkey. <br> one site said this occurred in 1969 and another stated the box plot was first presented <br>and discussed in 1977 in his book called "Exploratory Data Analysis".
#
# 
#
# The information presents as a five number summary, with each value describing a <br>specific part of the set from the low to high.<br>
#
# - Median: Centre of th Dataset, Q2
# - Lower Quartile and lower Extreme , Q1
# - Upper Quartile and upper extreme, Q3
#
#
# ### Its uses ###
# The spread of data provides a useful summary of the dataset. It does not provide <br> the high level detail of other plots, but its advantage is noted in the quick way large <br>sets of data can be summarized and give an extremely useful overview, and will also inform on <br> outliers
# ### Whiskers & Outliers ###
# A plot whisker is a line that goes from the box to the whisker boundries. <br>A crossbar type line is sometimes drawn at the top of the boundry. Points beyond that are outliers.
#
# ### Percentiles ###
# The box part is defined by 2 lines at the 25th percentile & 75th percentile. <br>
# The 25th percentile is the value at which 25% of the data values are below this value.<br>
# The middle 50% of the data falls between the 25th percentile and 75th percentile.<br>
# The distance between the 75th percentile & the 25th percetnile is called the <br> interquartile range or IQR and is often used as measure for the spread of data.
#
# References:
#
# https://www150.statcan.gc.ca/n1/edu/power-pouvoir/ch12/5214889-eng.htm
#
# https://en.wikipedia.org/wiki/Box_plot
#
# https://citoolkit.com/articles/histograms-and-boxplots/
#
#
#
# https://machinelearningmastery.com/data-visualization-methods-in-python
#
# Boxplots are useful to summarize the distribution of a data sample as an alternative to the histogram. They can help to quickly get an idea of the range of common and sensible values in the box and in the whisker respectively. Because we are not looking at the shape of the distribution explicitly, this method is often used when the data has an unknown or unusual distribution, such as non-Gaussian.
# +
#https://machinelearningmastery.com/data-visualization-methods-in-python
# example of a box and whisker plot
from numpy.random import seed
from numpy.random import randn
from matplotlib import pyplot
# seed the random number generator
seed(1)
# random numbers drawn from a Gaussian distribution
x = [randn(1000), 5 * randn(1000), 10 * randn(1000)]
# create box and whisker plot
pyplot.boxplot(x)
# show line plot
pyplot.show()
# -
# https://www.khanacademy.org/math/statistics-probability/summarizing-quantitative-data/box-whisker-plots/v/constructing-a-box-and-whisker-plot
#
#
#
import pandas as pd
np.random.seed(3675)
df = pd.DataFrame(np.random.randn(10,4),
columns=['June', 'July', 'Aug', 'Sep'])
boxplot = df.boxplot(column=['June', 'Aug', 'Sep','July'])
# +
#mocked up a quick dataset to see how it works
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
1
787
data = pd.DataFrame({'Country':['UK','IRE', 'FR','ESP'],
''
'Sales jan':[89,128,329,299],
'Sales Feb':[102,200,390,450]})
data
data.set_index('Country')
data
# -
data.boxplot(column='Sales jan', by='Country')
# ### Used dataset about life expectancy at###<br>
# https://citoolkit.com/articles/histograms-and-boxplots/
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
df =pd.read_csv("c)
df.columns
# -
df.boxplot(column='lifeExp', by='continent')
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
df =pd.read_csv("c)
df.columns
# +
### Same dataset using catplot from seaborn
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# %matplotlib inline
# Load data
world =pd.read_csv("http://bit.ly/2cLzoxH")
sns.catplot(x="lifeExp", y="continent", kind="bar", data=world)
plt.show()
# -
# My opinion is the box plot provides nmore information alot of <br> information is quickly achieved.
# +
#Exapmple of violin plot from Seaborn using Tips dataset
import seaborn as sns
sns.set(style="whitegrid")
tips = sns.load_dataset("tips")
ax = sns.violinplot(x=tips["total_bill"])
# the plot looks very beautiful but at first look doesn't make
#the same immediate impact informatin wise
# +
### https://seaborn.pydata.org/generated/seaborn.boxplot.html
import seaborn as sns # load the "iris" dataset
import matplotlib.pyplot as plt
sns.set(style="whitegrid")
iris= sns.load_dataset("iris")
sns.boxplot(data=iris, orient="h", palette="Set2")
# +
#Example of Iris data set using violin plot.
import seaborn as sns # load the "iris" dataset
import matplotlib.pyplot as plt
iris = sns.load_dataset('iris')
df = iris.iloc[:,:4] # exclude the last column ("species")
plt.violinplot(df.T)
plt.xticks(range(1,df.shape[1]+1), df.columns, rotation='vertical')
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df =pd.read_csv("http://bit.ly/2cLzoxH")
# +
# Import the necessary libraries
import matplotlib.pyplot as plt
import pandas as pd
# Initialize Figure and Axes object
fig, ax = plt.subplots()
# Load in data
tips = pd.read_csv("https://raw.githubusercontent.com# Create violinplot
ax.violinplot(tips["total_bill"], vert=False)
# Show the plot
plt.show()/mwaskom/seaborn-data/master/tips.csv")
# +
# Import the libraries
import matplotlib.pyplot as plt
import seaborn as sns
# Load data
titanic = sns.load_dataset("titanic")
titanic
# +
# Import Titanic dataset
import matplotlib.pyplot as plt
import seaborn as sns
# Load data
titanic = sns.load_dataset("titanic")
titanic
titanic.boxplot(column='age', by='survived',)
# -
import seaborn as sns
>>> sns.set(style="whitegrid")
>>> titanic = sns.load_dataset("titanic")
>>> ax = sns.violinplot(x=titanic["pclass"])
# +
#group by pandas
titanic = sns.load_dataset("titanic")
groupby_gender_survival = titanic.groupby(["sex", "survived"])
groupby_gender_survival.apply(len)
titanic.groupby
# +
import matplotlib.pyplot as plt
import seaborn as sns
# Load data
titanic = sns.load_dataset("titanic")
titanic
titanic.boxplot(column='pclass', by='sex')
ax = sns.stripplot(x="sex", y="pclass",
data=titanic,
edgecolor="gray")
# +
import matplotlib.pyplot as plt
import seaborn as sns
# Load data
titanic = sns.load_dataset("titanic")
# Set up a factorplot
g = sns.factorplot("class", "survived", "sex", data=titanic, kind="bar", size=6, aspect=2, palette="autumn", legend=False)
# Show plot
plt.show()
# +
import matplotlib.pyplot as plt
import seaborn as sns
# Reset default params
sns.set(rc={"font.size":8,"axes.labelsize":5})
# Load
titanic = sns.load_dataset("titanic")
sns.swarmplot(x="survived", y="age", data=titanic)
# Show plot
plt.show()
# +
import matplotlib.pyplot as plt
import seaborn as sns
# Load
titanic = sns.load_dataset("titanic")
ax = sns.boxplot(x="survived", y="age",
data=titanic)
ax = sns.stripplot(x="survived", y="age",
data=titanic, jitter=True,
edgecolor="red")
sns.plt.title("Survival by age",fontsize=12)
# -
| Pro Funda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NUME
#
# $\newcommand{\e}[1]{\mathrm{e}^{#1}}$
#
# **Métodos Numéricos**
#
# Instructor: Dr. <NAME>
#
#
p = 3
x = range(10 ** p)
x_squared = (xi ** 2 for xi in x)
x_squared
x_squared_list = [xi ** 2 for xi in x]
type(x_squared_list), x_squared_list[-5:]
# %timeit for x2 in x_squared: x2
# %timeit for x2 in x_squared_list: x2
def squared_generator(x_iterable):
for x in x_iterable:
yield x ** 2
# 0 - False
# 1 - True
# 2 - True
# 3 - False
# 0.0110
# .011
(a:=0.375 * 2) > 1.0
a = a * 2
a, a >= 1.0
import math
a = (a - int(math.trunc(a))) * 2
a, a >= 1.0
a = (a - int(math.trunc(a))) * 2
a, a >= 1.0
0.1
f'{0.1:0.55f}'
from decimal import Decimal
Decimal.from_float(0.1)
import numpy as np
a = np.array([0., 1, 2], dtype=np.float32)
type(a)
a.dtype
for e in [0., 1, '2']: print(type(e))
# IEEE 754
# +
# int a, -127, 128 ( 127)
# unsigned int, a, 0, 255--- 0 - 127 = -127, 255 -127 = 128
L = -3
U = 4
_ _ _
0 0 0 = 0
1 1 1 = 7
-3 + 3 = 0
4 + 3 = 7
# -
a = np.array([65450, 65443, 65447], dtype=np.float16)
a[:]
a[0]
| src/nume-s1-p1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
# # Show EOG artifact timing
#
#
# Compute the distribution of timing for EOG artifacts.
#
#
#
# +
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
# -
# Set parameters
#
#
# +
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
events = mne.find_events(raw, 'STI 014')
eog_event_id = 512
eog_events = mne.preprocessing.find_eog_events(raw, eog_event_id)
raw.add_events(eog_events, 'STI 014')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=True, eog=False)
tmin, tmax = -0.2, 0.5
event_ids = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks)
# Get the stim channel data
pick_ch = mne.pick_channels(epochs.ch_names, ['STI 014'])[0]
data = epochs.get_data()[:, pick_ch, :].astype(int)
data = np.sum((data.astype(int) & 512) == 512, axis=0)
# -
# Plot EOG artifact distribution
#
#
plt.stem(1e3 * epochs.times, data)
plt.xlabel('Times (ms)')
plt.ylabel('Blink counts (from %s trials)' % len(epochs))
plt.show()
| 0.13/_downloads/plot_eog_artifact_histogram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from wordcloud import WordCloud
import numpy as np
import nltk
import re
import os
import matplotlib.pyplot as plt
from PIL import Image # From the Pillow library import the Image module.
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
# %matplotlib inline
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
# !pip install wordcloud
# +
f = open("./book_three_little_pigs.txt",'r',encoding='UTF8')
my_book = f.readlines()
f.close()
# -
n_min = 4 # Minimum number of characters.
corpus = []
lemmatizer = WordNetLemmatizer()
for a_line in my_book:
pre = re.sub(r'\W', ' ', a_line) # Substitute the non-alphanumerics character by space.
pre = re.sub(r'\d+','', pre) # Remove numbers.
pre = nltk.word_tokenize(pre) # Tokenize into words.
pre = [x for x in pre if len(x) > n_min] # Minimum length.
pre = [x.lower() for x in pre] # Convert into the lowercase.
pre = [x for x in pre if x not in stopwords.words('english')] # Remove stopwords.
pre = [lemmatizer.lemmatize(x) for x in pre] # Lemmatize.
corpus += pre # Back to the corpus.
len(corpus)
# +
a_long_sentence = ' '.join(corpus)
# -
wc = WordCloud(background_color='white', max_words=30) # Customize the output.
wc.generate(a_long_sentence)
# wc.words_ # Check for the top ranking words.
plt.figure(figsize=(10,10))
plt.imshow(wc, interpolation='bilinear')
plt.axis("off") # Turn off the axes.
plt.show()
# Pick a background mask.
#img = Image.open('background_1.png') # Elipse.
#img = Image.open('background_2.png') # Speech bubble.
img = Image.open('background_3.png') # Heart.
#img = Image.open('background_4.png') # Circle.
back_mask = np.array(img)
wc = WordCloud(background_color='white', max_words=30, mask=back_mask) # Customize the output.
wc.generate(a_long_sentence)
plt.figure(figsize=(10,10))
plt.imshow(wc, interpolation='bilinear')
plt.axis("off") # Turn off the axes.
plt.savefig("out.png") # Save to an external file.
plt.show()
| Rafay notes/Samsung Course/Chapter 7/Lecture/Lecture 3/.ipynb_checkpoints/Lecture 03 3rd november-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Data download
# +
#XXX changes
# !wget ftp://ftp.ncbi.nlm.nih.gov/hapmap/genotypes/hapmap3/plink_format/draft_2/hapmap3_r2_b36_fwd.consensus.qc.poly.map.bz2
# !wget ftp://ftp.ncbi.nlm.nih.gov/hapmap/genotypes/hapmap3/plink_format/draft_2/hapmap3_r2_b36_fwd.consensus.qc.poly.ped.bz2
# !wget ftp://ftp.ncbi.nlm.nih.gov/hapmap/genotypes/hapmap3/plink_format/draft_2/relationships_w_pops_121708.txt
# -
# !bunzip2 hapmap3_r2_b36_fwd.consensus.qc.poly.map.bz2
# !bunzip2 hapmap3_r2_b36_fwd.consensus.qc.poly.ped.bz2
# # Preparation
import os
from collections import defaultdict
# ## Loading HapMap meta-data
f = open('relationships_w_pops_121708.txt')
pop_ind = defaultdict(list)
f.readline() # header
offspring = []
for l in f:
toks = l.rstrip().split('\t')
fam_id = toks[0]
ind_id = toks[1]
mom = toks[2]
dad = toks[3]
if mom != '0' or dad != '0':
offspring.append((fam_id, ind_id))
pop = toks[-1]
pop_ind[pop].append((fam_id, ind_id))
f.close()
# ## Sub-sampling
os.system('plink --recode --file hapmap3_r2_b36_fwd.consensus.qc.poly --noweb --out hapmap10 --thin 0.1 --geno 0.1')
os.system('plink --recode --file hapmap3_r2_b36_fwd.consensus.qc.poly --noweb --out hapmap1 --thin 0.01 --geno 0.1')
# ## Getting only autosomal data
def get_non_auto_SNPs(map_file, exclude_file):
f = open(map_file)
w = open(exclude_file, 'w')
for l in f:
toks = l.rstrip().split('\t')
chrom = int(toks[0])
rs = toks[1]
if chrom > 22:
w.write('%s\n' % rs)
w.close()
get_non_auto_SNPs('hapmap10.map', 'exclude10.txt')
get_non_auto_SNPs('hapmap1.map', 'exclude1.txt')
#get_non_auto_SNPs('hapmap3_r2_b36_fwd.consensus.qc.poly.map', 'exclude.txt')
# !plink --recode --file hapmap10 --noweb --out hapmap10_auto --exclude exclude10.txt
# !plink --recode --file hapmap1 --noweb --out hapmap1_auto --exclude exclude1.txt
#geno!!!
# #!plink --recode --file hapmap3_r2_b36_fwd.consensus.qc.poly --noweb --out hapmap_auto --exclude exclude.txt
# ## Removing offspring
# !plink --file hapmap10_auto --filter-founders --recode --out hapmap10_auto_noofs
# ## LD-prunning
# !plink --file hapmap10_auto_noofs --indep-pairwise 50 10 0.1 --out keep
# !plink --file hapmap10_auto_noofs --extract keep.prune.in --recode --out hapmap10_auto_noofs_ld
# ## Different coding
# !plink --file hapmap10_auto_noofs_ld --recode12 tab --out hapmap10_auto_noofs_ld_12
# !plink --make-bed --file hapmap10_auto_noofs_ld --out hapmap10_auto_noofs_ld
# ## Single chromosome
# !plink --recode --file hapmap10_auto_noofs --chr 2 --out hapmap10_auto_noofs_2
| Chapter04/Data_Formats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
from matplotlib.ticker import PercentFormatter
df = pd.read_csv('df_all.csv')
'Number of female subjects = {}, Number of male subjects = {},'.format(df['Gender'].value_counts()[0.0], df['Gender'].value_counts()[1.0])
'Number of ADHD subjects = {}, Number of typically developing subjects = {}'.format(df['DX'].value_counts()[1]+df['DX'].value_counts()[2]+df['DX'].value_counts()[3], df['DX'].value_counts()[0])
'Number of ADHD-Combined subjects = {}, Number of ADHD-Hyperactive/Impulsive subjects = {}, Number of ADHD-Inattentive subjects = {}, Number of typically developing subjects = {}'.format(df['DX'].value_counts()[1], df['DX'].value_counts()[2], df['DX'].value_counts()[3], df['DX'].value_counts()[0])
'Number of subjects with pending diagnosis = {}'.format(df['DX'].value_counts()['pending'])
'{} Right-handed, {} Left-handed, {} Ambidextrous'.format(df['Handedness'].value_counts()[1], df['Handedness'].value_counts()[0], df['Handedness'].value_counts()[2])
print('some of the handedness values are decimals and also these handedness counts are simply not correct')
'Age: mean = {}, std = {}, range = {}-{}'.format(df['Age'].mean(), df['Age'].std(), df['Age'].min(), df['Age'].max())
Age = df['Age']
plt.hist(Age,bins=15,color='peachpuff')
plt.title('Subject Age')
plt.xlabel('Age')
plt.ylabel('Number of Subjects')
ndf = df[df['DX'] != 'pending']
print('ndf = df without pending values')
ADHDdf = ndf[ndf['DX'] != '0']
controldf = ndf[ndf['DX'] == '0']
ADHDage = ADHDdf['Age']
plt.hist(ADHDage, alpha=0.6, color = 'purple')
controlage = controldf['Age']
plt.hist(controlage, alpha=0.3, color='darkturquoise')
plt.title('Subject Age')
plt.xlabel('Age')
plt.ylabel('Number of Subjects')
plt.legend(['ADHD', 'Typically Developing'])
femaledf = ndf[ndf['Gender']!='1.0']
maledf = ndf[ndf['Gender']!='0.0']
femaleDX = femaledf['DX']
plt.hist(femaleDX, color = 'orchid')
plt.title('Female Subject Diagnosis')
plt.xlabel('Diagnosis')
plt.ylabel('Number of Subjects')
plt.xticks([0,1,2,3],['Typically Developing', 'ADHD-Combined', 'ADHD-Hyperactive/Impulsive', 'ADHD-Inattentive'])
maleDX = maledf['DX']
plt.hist(maleDX, color = 'tomato')
plt.title('Male Subject Diagnosis')
plt.xlabel('Diagnosis')
plt.ylabel('Number of Subjects')
plt.xticks([0,1,2,3],['Typically Developing', 'ADHD-Combined', 'ADHD-Hyperactive/Impulsive', 'ADHD-Inattentive'])
plt.hist([femaleDX,maleDX], color = ['orchid', 'tomato'])
plt.title('Diagnosis by Gender')
plt.xlabel('Subject Diagnosis')
plt.ylabel('Number of Subjects')
plt.legend(['Female','Male'])
plt.xticks([0,1,2,3],['Typically Developing', 'ADHD-Combined', 'ADHD-Hyperactive/Impulsive', 'ADHD-Inattentive'])
print('why isnt this working they shouldnt have the same size bars and how to make labels not overlap')
nndf = ndf[ndf['Full4 IQ']>0]
print('nndf = df without pending or -999 values')
'IQ: mean = {}, std = {}, range = {}-{}'.format(nndf['Full4 IQ'].mean(), nndf['Full4 IQ'].std(), nndf['Full4 IQ'].min(), nndf['Full4 IQ'].max())
ADHD1df = nndf[nndf['DX']=='1']
ADHD2df = nndf[nndf['DX']=='2']
ADHD3df = nndf[nndf['DX']=='3']
controliqdf = nndf[nndf['DX']=='0']
type1IQ = ADHD1df['Full4 IQ']
type2IQ = ADHD2df['Full4 IQ']
type3IQ = ADHD3df['Full4 IQ']
controlIQ = controliqdf['Full4 IQ']
plt.hist(type1IQ, alpha=0.7, color='cyan')
plt.hist(type2IQ, color='fuchsia')
plt.hist(type3IQ, alpha=0.7, color='salmon')
plt.hist(controlIQ, alpha=0.4, color='paleturquoise')
plt.title('IQ')
plt.xlabel('Subject IQ')
plt.ylabel('Number of Subjects')
plt.legend(['ADHD-Combined', 'ADHD-Hyperactive/Impulsive', 'ADHD-Inattentive', 'Typically Developing'])
print('1 the legend is covering the whole thing, 2 can barely see the hyperactive-impulsive')
| Data/.ipynb_checkpoints/DescriptiveStatistcsFinal-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import numpy as np
import numpy.random as rd
# import tensorflow as tf
# from tqdm import tqdm_notebook, tqdm
# import cv2
# +
EXAMPLE_IMG = os.path.join(
'C:\\Users\\felix\\OneDrive\\Documentos\\Git\\cat-rec\\data\\test data\\Ashera',
'pic_009.jpg')
EXAMPLE_FOLDER = 'C:\\Users\\felix\\OneDrive\\Documentos\\Git\\cat-rec\\data\\test data'
ONE_HOT = np.array(['American Shorthair','Angora','Ashera','British Shorthair',
'Exotic','Himalayan','Maine Coon','Persian','Ragdoll','Siamese','Sphynx'])
TRAIN_PATH = 'C:\\Users\\felix\\OneDrive\\Documentos\\Git\\cat-rec\\data\\vectorized data\\train_data.npy'
TEST_PATH = 'C:\\Users\\felix\\OneDrive\\Documentos\\Git\\cat-rec\\data\\vectorized data\\test_data.npy'
IMG_SIZE = 150
# -
test = np.array([0,1,2,3,4,5,6,7,8,9])
np.save('\\testing\\still testing\\ok\\done\\arr', test)
| pre processing/get_data/testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1. FeatureExtraction_data
#
# Reference:
# - https://www.kaggle.com/asraful70/talkingdata-new-features-in-lightgbm-lb-0-9784
# - https://www.kaggle.com/danieleewww/talkingdata-added-new-features-in-lightg-50cf9b/code
# - https://www.kaggle.com/anttip/talkingdata-wordbatch-fm-ftrl-lb-0-9769
# - https://www.kaggle.com/pranav84/talkingdata-eda-to-model-evaluation-lb-0-9683
# - https://www.kaggle.com/aharless/kaggle-runnable-version-of-baris-kanber-s-lightgbm
# - https://www.kaggle.com/pranav84/lgb-entire-dataset-in-2-hrs-lb-0-9718
# - https://www.kaggle.com/panjianning/talkingdata-simple-lightgbm-0-9772
# ## Run name
# +
import time
project_name = 'TalkingdataAFD2018'
step_name = 'FeatureExtraction_data'
time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
run_name = '%s_%s_%s' % (project_name, step_name, time_str)
print('run_name: %s' % run_name)
t0 = time.time()
# -
# ## Important params
# +
date = 8
# print('date: ', date)
is_debug = False
print('is_debug: %s' % is_debug)
if is_debug:
test_n_rows = 1 * 10000
else:
test_n_rows = None
# test_n_rows = 18790469
# -
day_rows = {
0: {
'n_skiprows': 1,
'n_rows': 1 * 10000
},
1: {
'n_skiprows': 1 * 10000,
'n_rows': 2 * 10000
},
6: {
'n_skiprows': 1,
'n_rows': 9308568
},
7: {
'n_skiprows': 1 + 9308568,
'n_rows': 59633310
},
8: {
'n_skiprows': 1 + 9308568 + 59633310,
'n_rows': 62945075
},
9: {
'n_skiprows': 1 + 9308568 + 59633310 + 62945075,
'n_rows': 53016937
}
}
# n_skiprows = day_rows[date]['n_skiprows']
# n_rows = day_rows[date]['n_rows']
# ## Import PKGs
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from IPython.display import display
import os
import sys
import gc
import time
import random
import zipfile
import h5py
import pickle
import math
from PIL import Image
import shutil
from tqdm import tqdm
import multiprocessing
from multiprocessing import cpu_count
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score
random_num = np.random.randint(10000)
print('random_num: %s' % random_num)
# -
# ## Project folders
# +
cwd = os.getcwd()
input_folder = os.path.join(cwd, 'input')
output_folder = os.path.join(cwd, 'output')
model_folder = os.path.join(cwd, 'model')
feature_folder = os.path.join(cwd, 'feature')
log_folder = os.path.join(cwd, 'log')
print('input_folder: \t\t\t%s' % input_folder)
print('output_folder: \t\t\t%s' % output_folder)
print('model_folder: \t\t\t%s' % model_folder)
print('feature_folder: \t\t%s' % feature_folder)
print('log_folder: \t\t\t%s' % log_folder)
train_csv_file = os.path.join(input_folder, 'train.csv')
train_sample_csv_file = os.path.join(input_folder, 'train_sample.csv')
test_csv_file = os.path.join(input_folder, 'test.csv')
sample_submission_csv_file = os.path.join(input_folder, 'sample_submission.csv')
print('\ntrain_csv_file: \t\t%s' % train_csv_file)
print('train_sample_csv_file: \t\t%s' % train_sample_csv_file)
print('test_csv_file: \t\t\t%s' % test_csv_file)
print('sample_submission_csv_file: \t%s' % sample_submission_csv_file)
# -
# ## Load data
train_columns = ['ip', 'app', 'device', 'os', 'channel', 'click_time', 'is_attributed']
test_columns = ['ip', 'app', 'device', 'os', 'channel', 'click_time', 'click_id']
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32'
}
# +
sample_submission_csv = pd.read_csv(sample_submission_csv_file)
print('sample_submission_csv.shape: \t', sample_submission_csv.shape)
display(sample_submission_csv.head(2))
print('train_csv: %.2f Mb' % (sys.getsizeof(sample_submission_csv)/1024./1024.))
# -
def do_click_time(df):
df['day'] = df['click_time'].dt.day.astype('uint8')
df['hour'] = df['click_time'].dt.hour.astype('uint8')
df['minute'] = df['click_time'].dt.minute.astype('uint8')
df['second'] = df['click_time'].dt.second.astype('uint8')
return df
# +
def do_prev_click(df, group_cols, agg_type='float32'):
agg_suffix = 'prevClick'
new_feature = new_feature = '{}_{}'.format('_'.join(group_cols), agg_suffix)
all_features = group_cols + ['click_time']
df[new_feature] = (df.click_time - df[all_features].groupby(group_cols).click_time.shift(+1) ).dt.seconds.astype(agg_type)
return df
def do_next_click(df, group_cols, agg_type='float32'):
agg_suffix = 'nextClick'
new_feature = new_feature = '{}_{}'.format('_'.join(group_cols), agg_suffix)
all_features = group_cols + ['click_time']
df[new_feature] = (df[all_features].groupby(group_cols).click_time.shift(-1) - df.click_time).dt.seconds.astype(agg_type)
return df
# +
## Below a function is written to extract count feature by aggregating different cols
def do_count( df, group_cols, agg_type='uint32', show_max=False, show_agg=True ):
agg_name='{}_count'.format('_'.join(group_cols))
if show_agg:
print( "Aggregating by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols][group_cols].groupby(group_cols).size().rename(agg_name).to_frame().reset_index()
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
# predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
## Below a function is written to extract unique count feature from different cols
def do_countuniq( df, group_cols, counted, agg_type='uint32', show_max=False, show_agg=True ):
agg_name= '{}_by_{}_countuniq'.format(('_'.join(group_cols)),(counted))
if show_agg:
print( "Counting unqiue ", counted, " by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].nunique().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
# predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
### Below a function is written to extract cumulative count feature from different cols
def do_cumcount( df, group_cols, counted,agg_type='uint32', show_max=False, show_agg=True ):
agg_name= '{}_by_{}_cumcount'.format(('_'.join(group_cols)),(counted))
if show_agg:
print( "Cumulative count by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].cumcount()
df[agg_name]=gp.values
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
# predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
### Below a function is written to extract mean feature from different cols
def do_mean( df, group_cols, counted, agg_type='float32', show_max=False, show_agg=True ):
agg_name= '{}_by_{}_mean'.format(('_'.join(group_cols)),(counted))
if show_agg:
print( "Calculating mean of ", counted, " by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].mean().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
# predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
def do_var( df, group_cols, counted, agg_type='float32', show_max=False, show_agg=True ):
agg_name= '{}_by_{}_var'.format(('_'.join(group_cols)),(counted))
if show_agg:
print( "Calculating variance of ", counted, " by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].var().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
# predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
# +
def save_feature(x_data, y_data, file_name):
print(y_data[:5])
if os.path.exists(file_name):
os.remove(file_name)
print('File removed: \t%s' % file_name)
with h5py.File(file_name) as h:
h.create_dataset('x_data', data=x_data)
h.create_dataset('y_data', data=y_data)
print('File saved: \t%s' % file_name)
def load_feature(file_name):
with h5py.File(file_name, 'r') as h:
x_data = np.array(h['x_data'])
y_data = np.array(h['y_data'])
print('File loaded: \t%s' % file_name)
print(y_data[:5])
return x_data, y_data
def save_test_feature(x_test, click_ids, file_name):
print(click_ids[:5])
if os.path.exists(file_name):
os.remove(file_name)
print('File removed: \t%s' % file_name)
with h5py.File(file_name) as h:
h.create_dataset('x_test', data=x_test)
h.create_dataset('click_ids', data=click_ids)
print('File saved: \t%s' % file_name)
def load_test_feature(file_name):
with h5py.File(file_name, 'r') as h:
x_test = np.array(h['x_test'])
click_ids = np.array(h['click_ids'])
print('File loaded: \t%s' % file_name)
print(click_ids[:5])
return x_test, click_ids
# +
def save_feature_map(feature_map, file_name):
print(feature_map[:5])
feature_map_encode = []
for item in feature_map:
feature_name_encode = item[1].encode('UTF-8')
feature_map_encode.append((item[0], feature_name_encode))
if os.path.exists(file_name):
os.remove(file_name)
print('File removed: \t%s' % file_name)
with h5py.File(file_name) as h:
h.create_dataset('feature_map', data=feature_map_encode)
print('File saved: \t%s' % file_name)
def load_feature_map(file_name):
with h5py.File(file_name, 'r') as h:
feature_map_encode = np.array(h['feature_map'])
print('File loaded: \t%s' % file_name)
feature_map = []
for item in feature_map_encode:
feature_name = item[1].decode('UTF-8')
feature_map.append((int(item[0]), feature_name))
print(feature_map[:5])
return feature_map
# -
def do_feature(train_csv):
train_csv = do_click_time(train_csv)
for cols in do_prev_click_cols:
print('>> ', cols)
train_csv = do_prev_click( train_csv, cols ); gc.collect()
for cols in do_next_click_cols:
print('>> ', cols)
train_csv = do_next_click( train_csv, cols ); gc.collect()
for cols in do_count_cols:
print('>> ', cols)
train_csv = do_count( train_csv, cols ); gc.collect()
for cols in do_countuniq_cols:
print('>> ', cols[:-1], cols[-1])
train_csv = do_countuniq( train_csv, cols[:-1], cols[-1] ); gc.collect()
for cols in do_cumcount_cols:
print('>> ', cols[:-1], cols[-1])
train_csv = do_cumcount( train_csv, cols[:-1], cols[-1] ); gc.collect()
for cols in do_mean_cols:
print('>> ', cols[:-1], cols[-1])
train_csv = do_mean( train_csv, cols[:-1], cols[-1] ); gc.collect()
for cols in do_var_cols:
print('>> ', cols[:-1], cols[-1])
train_csv = do_var( train_csv, cols[:-1], cols[-1] ); gc.collect()
train_csv.drop(['click_time'], axis=1, inplace=True)
print(train_csv.shape)
display(train_csv.head())
print(train_csv.columns)
print('data_size: %.2f Mb' % (sys.getsizeof(train_csv)/1024./1024.))
return train_csv
# +
template = [
# 5 choice 2
['ip', 'app'],
['ip', 'device'],
['ip', 'os'],
['ip', 'channel'],
['app', 'device'],
['app', 'os'],
['app', 'channel'],
['device', 'os'],
['device', 'channel'],
['os', 'channel'],
# 5 choice 3
['device', 'os', 'channel'],
['app', 'os', 'channel'],
['app', 'device', 'channel'],
['app', 'device', 'os'],
['ip', 'os', 'channel'],
['ip', 'device', 'channel'],
['ip', 'device', 'os'],
['ip', 'app', 'channel'],
['ip', 'app', 'os'],
['ip', 'app', 'device'],
]
template_hour = [
# 5 choice 2
['ip', 'app', 'hour'],
['ip', 'device', 'hour'],
['ip', 'os', 'hour'],
['ip', 'channel', 'hour'],
['app', 'device', 'hour'],
['app', 'os', 'hour'],
['app', 'channel', 'hour'],
['device', 'os', 'hour'],
['device', 'channel', 'hour'],
['os', 'channel', 'hour'],
# 5 choice 3
['device', 'os', 'channel', 'hour'],
['app', 'os', 'channel', 'hour'],
['app', 'device', 'channel', 'hour'],
['app', 'device', 'os', 'hour'],
['ip', 'os', 'channel', 'hour'],
['ip', 'device', 'channel', 'hour'],
['ip', 'device', 'os', 'hour'],
['ip', 'app', 'channel', 'hour'],
['ip', 'app', 'os', 'hour'],
['ip', 'app', 'device', 'hour'],
]
# +
do_prev_click_cols = [
['ip', 'device'],
['ip', 'app', 'device'],
['ip', 'app'],
['ip', 'app', 'device', 'os'],
['ip', 'app', 'device', 'os', 'channel'],
['ip', 'app', 'os', 'channel'],
['ip', 'device', 'os', 'channel'],
['ip', 'os'],
['ip', 'device', 'channel'],
['ip', 'channel'], # ref
]
do_next_click_cols = [
['ip', 'device'],
['ip', 'app', 'device'],
['ip', 'app'],
['ip', 'app', 'device', 'os'], # ref
['ip', 'os'],
['ip', 'device', 'os'], # ref
['ip', 'device', 'os', 'channel'],
['ip', 'os', 'channel'],
['ip', 'app', 'os', 'channel'],
['ip', 'app', 'os'],
['ip', 'device', 'channel'],
['ip', 'app', 'device', 'os', 'channel'], # ref
['device', 'channel'], # ref
['app', 'device', 'channel'], # ref
['device', 'hour'], # ref
# ['ip', 'device'],
# ['ip', 'app', 'device', 'channel'],
# ['ip', 'os'],
# ['ip', 'app', 'channel'],
# ['ip' ,'channel'],
]
do_count_cols = [
['ip', 'device'],
['app', 'channel'],
['device', 'os', 'channel', 'hour'],
['ip', 'device', 'hour'],
['app', 'device', 'os'],
['app', 'os', 'channel', 'hour'],
['app', 'os'],
['app', 'hour'],
['ip', 'day', 'hour'], # ref
['ip', 'app'], # ref
['ip', 'app', 'os'], # ref
]
do_countuniq_cols = [
['ip', 'app'], # ref
['ip', 'device', 'channel'],
['ip', 'device', 'os'], # ref
['ip', 'channel'], # ref
['ip', 'device', 'os', 'hour'],
['ip', 'day', 'hour'], # ref
['ip', 'app', 'os'], # ref
['ip', 'device'],
['app', 'channel'],
]
do_cumcount_cols = [
['app', 'os', 'hour'],
['app', 'device', 'channel'],
['app', 'device'],
['app', 'device', 'os'],
['device', 'os'],
['app', 'channel', 'hour'],
['os', 'channel'],
['device', 'channel', 'hour'],
['device', 'os', 'channel'],
['os', 'channel', 'hour'],
['device', 'os', 'hour'],
['app', 'device', 'channel', 'hour'],
['app', 'os', 'channel'],
['ip', 'os'], # ref
['ip', 'device', 'os'], # ref
]
do_mean_cols = [
['ip', 'app'],
['ip', 'app', 'channel'],
['ip', 'os', 'channel'],
['ip', 'device', 'os'],
['ip', 'os'],
['ip', 'device', 'hour'],
['ip', 'channel'],
['ip', 'app', 'os'],
['ip', 'device', 'channel'],
['os', 'channel', 'hour'],
['app', 'os', 'channel'],
['device', 'channel', 'hour'],
['ip', 'app', 'channel', 'hour'],
['ip', 'app', 'hour'],
['ip', 'os', 'hour'],
['ip', 'device', 'os', 'hour'],
['ip', 'os', 'channel', 'hour'],
['app', 'channel', 'hour'],
['app', 'device', 'os', 'hour'],
# ['ip', 'os', 'channel'],
# ['ip', 'app', 'os'],
# ['ip', 'device', 'channel']
]
do_var_cols = [
['ip', 'os', 'channel'],
['ip', 'app'],
['ip', 'app', 'channel'],
['ip', 'device', 'hour'],
['ip', 'device', 'channel'],
['ip', 'app', 'os'],
['ip', 'device', 'os'],
['ip', 'channel'],
['ip', 'os'],
['app', 'os', 'channel', 'hour'],
['ip', 'device', 'os', 'hour'],
['device', 'os', 'channel', 'hour'],
['os', 'channel'],
['app', 'channel', 'hour'],
['ip', 'device', 'channel', 'hour'],
['ip', 'app', 'device'],
['app', 'os', 'hour'],
['ip', 'app', 'hour'],
['ip', 'app', 'device', 'hour'],
# ['ip', 'os', 'hour'],
['app', 'os', 'channel'],
# ['ip', 'channel']
]
for cols in do_count_cols:
print(cols[:-1], cols[-1])
feature_files = []
# +
# # %%time
# test_csv = pd.read_csv(
# test_csv_file,
# nrows=test_n_rows,
# usecols=test_columns,
# dtype=dtypes,
# parse_dates=['click_time']
# )
# +
# # %%time
# print('test_csv.shape: \t\t', test_csv.shape)
# display(test_csv.head(2))
# print('test_csv: %.2f Mb' % (sys.getsizeof(test_csv)/1024./1024.))
# # print('*' * 80)
# click_ids = test_csv['click_id']
# test_csv.drop(['click_id'], axis=1, inplace=True)
# display(click_ids.head())
# display(test_csv.head())
# # print('*' * 80)
# test_csv = do_feature(test_csv)
# y_proba_file = os.path.join(feature_folder, 'feature_%s_test.p' % run_name)
# feature_files.append(y_proba_file)
# save_test_feature(
# test_csv,
# click_ids,
# y_proba_file
# )
# x_test, click_ids = load_test_feature(y_proba_file)
# print(x_test.shape)
# print(len(click_ids))
# feature_map = []
# print('[')
# for i, col in enumerate(test_csv.columns):
# feature_map.append((i, col))
# print(' (%s,\t"%s")' % (i, col))
# print(']')
# feature_map_file_name = y_proba_file = os.path.join(feature_folder, 'feature_map_%s.p' % run_name)
# save_feature_map(feature_map, feature_map_file_name)
# feature_map1 = load_feature_map(feature_map_file_name)
# print(len(feature_map1))
# print(feature_map1[:5])
# # del test_csv
# # del x_test
# # del click_ids
# # gc.collect()
# -
for key in day_rows.keys():
key_str = str(key)
print('date key: %s' % key_str)
if is_debug and key > 1:
print('is_debug=%s, skip date: %s' % (is_debug, key_str))
continue
if not is_debug and key <= 1:
print('is_debug=%s, skip date: %s' % (is_debug, key_str))
continue
# +
# %%time
# for key in day_rows.keys():
print('*' * 80)
key = date
key_str = str(key)
print('date key: %s' % key_str)
n_skiprows = day_rows[key]['n_skiprows']
n_rows = day_rows[key]['n_rows']
train_csv = pd.read_csv(
train_csv_file,
skiprows=range(1, n_skiprows),
nrows=n_rows,
usecols=train_columns,
dtype=dtypes,
parse_dates=['click_time']
)
print('train_csv.shape: \t\t', train_csv.shape)
display(train_csv.head(2))
print('train_csv: %.2f Mb' % (sys.getsizeof(train_csv)/1024./1024.))
# print('*' * 80)
y_data = train_csv['is_attributed']
train_csv.drop(['is_attributed'], axis=1, inplace=True)
display(y_data.head())
display(train_csv.head())
# print('*' * 80)
train_csv = do_feature(train_csv)
y_proba_file = os.path.join(feature_folder, 'feature_%s_date%s.p' % (run_name, key_str))
feature_files.append(y_proba_file)
save_feature(
train_csv,
y_data,
y_proba_file
)
x_data, y_data = load_feature(y_proba_file)
print(x_data.shape)
print(y_data.shape)
print('[')
for i, col in enumerate(train_csv.columns):
print(' (%s,\t"%s")' % (i, col))
print('')
# del train_csv
# del x_data
# del y_data
# gc.collect()
# +
y_proba_file = os.path.join(feature_folder, 'feature_%s_date%s_p1.p' % (run_name, key_str))
# feature_files.append(y_proba_file)
# save_feature(
# train_csv[: 2000*10000],
# y_data[: 2000*10000],
# y_proba_file
# )
y_proba_file = os.path.join(feature_folder, 'feature_%s_date%s_p2.p' % (run_name, key_str))
# feature_files.append(y_proba_file)
# save_feature(
# train_csv[2000*10000: 4000*10000],
# y_data[2000*10000: 4000*10000],
# y_proba_file
# )
y_proba_file = os.path.join(feature_folder, 'feature_%s_date%s_p3.p' % (run_name, key_str))
# feature_files.append(y_proba_file)
# save_feature(
# train_csv[4000*10000: ],
# y_data[4000*10000: ],
# y_proba_file
# )
# x_data, y_data = load_feature(y_proba_file)
# print(x_data.shape)
# print(y_data.shape)
# -
# print(x_data.shape)
# print(y_data.shape)
# print(x_test.shape)
# print(click_ids.shape)
for name in feature_files:
print(name)
print('Time cost: %.2f s' % (time.time() - t0))
print(run_name)
print('Done!')
| talkingdata-adtracking-fraud-detection/1-Copy2. FeatureExtraction_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: python3
# ---
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn import model_selection, datasets, linear_model
X, y = datasets.load_diabetes(return_X_y=True)
print(X.shape)
X[0]
X = X[:, np.newaxis, 3]
X.shape
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y)
model = linear_model.LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
plt.scatter(X_test, y_test, color = 'black')
plt.plot(X_test, y_pred, color = 'blue', linewidth = 3)
plt.show()
| MY LEARNINGS/ML1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="S19jleua1_GE" outputId="5f02988d-b608-47a4-b718-1d312622694f"
import sys
print(sys.version)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="XGXh32Yl3E5s" outputId="ccdbcea1-bf6a-4d2b-c3e6-c07546fb2a1b"
# Importing dependencies
import numpy as np
np.random.seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
import pandas as pd
import matplotlib.pyplot as plt
from keras.models import Sequential, load_model
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM
from keras import optimizers
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error,r2_score
from math import sqrt
import datetime as dt
import time
plt.style.use('ggplot')
# + colab={} colab_type="code" id="FNioyc6mZUAJ"
# Setting up an early stop
earlystop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=80, verbose=1, mode='min')
callbacks_list = [earlystop]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="r5O0a39R9Z_Z" outputId="39b12f66-129a-463c-bcc4-63827d80558e"
# Loading the dataset
'''
Connect to internet for executing this block.
'''
# Loading the dataset
#url = 'https://raw.githubusercontent.com/ninja3697/dataset/master/CSV.csv'
url = '../CSV.csv'
df = pd.read_csv(url,parse_dates = True , index_col = 0)
df.drop(df[df['Volume']==0].index, inplace = True)
df['Close'].plot()
# -
print("Correlation =",df['Close'].corr(df['Adj Close'])) # to check if the stocks are splitted or not in the past
# + [markdown] colab_type="text" id="X42scYzM5y0n"
# Since the Adj. Close and Close columns are highly correlated, we do not need to think about splittting the series.
# + colab={} colab_type="code" id="zRVEzZ1FXj_p"
#Build and train the model
def fit_model(train,val,timesteps,hl,lr,batch,epochs):
X_train = []
Y_train = []
X_val = []
Y_val = []
# Loop for training data
for i in range(timesteps,train.shape[0]):
X_train.append(train[i-timesteps:i])
Y_train.append(train[i])
X_train,Y_train = np.array(X_train),np.array(Y_train)
# Loop for val data
for i in range(timesteps,val.shape[0]):
X_val.append(val[i-timesteps:i])
Y_val.append(val[i])
X_val,Y_val = np.array(X_val),np.array(Y_val)
# Adding Layers to the model
model = Sequential()
model.add(LSTM(1,input_shape = (X_train.shape[1],1),return_sequences = True, activation = 'relu'))
for i in range(len(hl)-1):
model.add(LSTM(hl[i],activation = 'relu',return_sequences = True))
model.add(LSTM(hl[-1],activation = 'relu'))
model.add(Dense(1))
model.compile(optimizer = optimizers.Adam(lr=lr), loss = 'mean_squared_error')
#print(model.summary())
# Training the data
history = model.fit(X_train,Y_train,epochs = epochs, batch_size = batch, validation_data = (X_val, Y_val),
verbose = 0, shuffle = False, callbacks=callbacks_list)
model.reset_states()
return model, history.history['loss'], history.history['val_loss']
# + colab={} colab_type="code" id="LpwHmJeQJqyI"
# Evaluating the model
def evaluate_model(model,test,timesteps):
X_test = []
Y_test = []
# Loop for testing data
for i in range(timesteps,test.shape[0]):
X_test.append(test[i-timesteps:i])
Y_test.append(test[i])
X_test,Y_test = np.array(X_test),np.array(Y_test)
#print(X_test.shape,Y_test.shape)
# Prediction Time !!!!
Y_hat = model.predict(X_test)
mse = mean_squared_error(Y_test,Y_hat)
rmse = sqrt(mse)
r2 = r2_score(Y_test,Y_hat)
return mse, rmse, r2, Y_test, Y_hat
# + colab={} colab_type="code" id="pI0q18ajCLx6"
# Plotting the predictions
def plot_data(Y_test,Y_hat):
plt.plot(Y_test,c = 'r')
plt.plot(Y_hat,c = 'y')
plt.xlabel('Day')
plt.ylabel('Price')
plt.title("Stock Price Prediction using Univariate-LSTM")
plt.legend(['Actual','Predicted'],loc = 'lower right')
plt.show()
# + colab={} colab_type="code" id="4NeqKRBZZr0Q"
# Plotting the training errors
def plot_error(train_loss,val_loss):
plt.plot(train_loss,c = 'r')
plt.plot(val_loss,c = 'b')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.title('Train Loss and Validation Loss Curve')
plt.legend(['train','val'],loc = 'upper right')
plt.show()
# -
# ## Model Building
# + colab={"base_uri": "https://localhost:8080/", "height": 381} colab_type="code" id="gAvOMIyjIQO-" outputId="e9ba11a8-5f91-45d8-d5ba-3c75695511ab"
# Extracting the series
series = df['Close']
print(series.shape)
series.plot()
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="Tjso-RjNDmbs" outputId="8d00fa2b-ec63-482e-dbfa-d5ae05514471"
# Train Val Test Split
train_start = dt.date(1997,1,1)
train_end = dt.date(2006,12,31)
train_data = series.loc[train_start:train_end].values.reshape(-1,1)
val_start = dt.date(2007,1,1)
val_end = dt.date(2008,12,31)
val_data = series.loc[val_start:val_end].values.reshape(-1,1)
test_start = dt.date(2009,1,1)
test_end = dt.date(2010,12,31)
test_data = series.loc[test_start:test_end].values.reshape(-1,1)
print(train_data.shape,val_data.shape,test_data.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="NWXR5oL2ZnY4" outputId="91b899d4-6aa1-4d85-fc58-577afd393ab7"
# Normalisation
sc = MinMaxScaler()
train = sc.fit_transform(train_data)
val = sc.transform(val_data)
test = sc.transform(test_data)
print(train.shape,val.shape,test.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 55} colab_type="code" id="RKSRtn3o6SA2" outputId="455f23da-7554-4ceb-d766-6197a855f952"
timesteps = 40
hl = [40,35]
lr = 1e-3
batch_size = 64
num_epochs = 250
# + colab={"base_uri": "https://localhost:8080/", "height": 10171} colab_type="code" id="FKY1Nsji6Y6Y" outputId="e2730667-b828-4dfb-87a9-f3818cb654b2"
model,train_error,val_error = fit_model(train,val,timesteps,hl,lr,batch_size,num_epochs)
plot_error(train_error,val_error)
# + colab={"base_uri": "https://localhost:8080/", "height": 435} colab_type="code" id="lP3iJINF65yp" outputId="6c145ea3-ecc0-468b-ae5e-62a710c8ec62"
rmse, r2_value,true,predicted = evaluate_model(model,test,40)
print('RMSE = {}'.format(rmse))
print('MSE = {}'.format(rmse*rmse))
print('R-Squared Score = {}'.format(r2_value))
plot_data(true,predicted)
# + colab={} colab_type="code" id="5mJnzFIPCZnw"
# Save a model
model.save('UV-LSTM_40_[40,35]_1e-3_64.h5')
del model
# Load a model
#model = load_model('UV-LSTM_40_[40,35]_1e-3_64.h5')
# + [markdown] colab_type="text" id="jTNRGuoTWRLj"
# ## Cross-Validation
# + colab={} colab_type="code" id="vF3LyPA67rDv"
# Hyperparameters
timesteps = 40
hl = [40,35]
lr = 1e-3
batch_size = 64
num_epochs = 50
# + colab={"base_uri": "https://localhost:8080/", "height": 92} colab_type="code" id="dwBzALCYWc6A" outputId="37106252-758f-4895-c4cc-9ae14bcfcd17"
# Extracting the series
series = df['Close'].values.reshape(-1,1)
print(series.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 146} colab_type="code" id="DJTfM-wBWi5o" outputId="34df40c8-06a2-4902-bbeb-0103945f4eed"
# Normalisation
sc = MinMaxScaler()
series = sc.fit_transform(series[:5400])
series
# -
# ### Split I
#Splitting the data for initial model creation
splits = 5
split_size = 600
train = series[:3*split_size]
test = series[4*split_size:5*split_size]
cross_val_results = list()
train_loss = pd.DataFrame()
val_loss = pd.DataFrame()
model,train_error,val_error = fit_model(train,timesteps,hl,lr,batch_size,num_epochs)
train_loss['Split1'] = train_error
val_loss['Split1'] = val_error
mse, rmse, r2_value,true,predicted = evaluate_model(model,test,timesteps)
print("Split 1")
print('MSE = {}'.format(mse))
print('RMSE = {}'.format(rmse))
print('R-Squared Score = {}'.format(r2_value))
plot_data(true,predicted)
cross_val_results.append([mse,rmse,r2_value,0])
model.save("UV-LSTM-Split1.h5")
# ### Split II
# +
train = series[:4*split_size]
test = series[4*split_size:5*split_size]
X_train,Y_train = [],[]
# Loop for training data
for i in range(timesteps,train.shape[0]):
X_train.append(train[i-timesteps:i])
Y_train.append(train[i])
X_train,Y_train = np.array(X_train),np.array(Y_train)
start = time.time()
history = model.fit(X_train,Y_train,epochs = num_epochs,batch_size = batch_size,validation_split = 0.2,verbose = 0,
shuffle = False)
end = time.time()
train_loss["Split2"] = history.history['loss']
val_loss["Split2"] = history.history['val_loss']
mse, rmse, r2_value,true,predicted = evaluate_model(model,test,timesteps)
print("Split 2")
print('MSE = {}'.format(mse))
print('RMSE = {}'.format(rmse))
print('R-Squared Score = {}'.format(r2_value))
plot_data(true,predicted)
cross_val_results.append([mse,rmse,r2_value,end-start])
model.save("UV-LSTM-Split2.h5")
# -
# ### Split III
# +
train = series[:5*split_size]
test = series[5*split_size:6*split_size]
X_train,Y_train = [],[]
# Loop for training data
for i in range(timesteps,train.shape[0]):
X_train.append(train[i-timesteps:i])
Y_train.append(train[i])
X_train,Y_train = np.array(X_train),np.array(Y_train)
start = time.time()
history = model.fit(X_train,Y_train,epochs = num_epochs,batch_size = batch_size,validation_split = 0.2,verbose = 0,
shuffle = False)
end = time.time()
train_loss["Split3"] = history.history['loss']
val_loss["Split3"] = history.history['val_loss']
mse, rmse, r2_value,true,predicted = evaluate_model(model,test,timesteps)
print("Split 3")
print('MSE = {}'.format(mse))
print('RMSE = {}'.format(rmse))
print('R-Squared Score = {}'.format(r2_value))
plot_data(true,predicted)
cross_val_results.append([mse,rmse,r2_value,end-start])
model.save("UV-LSTM-Split3.h5")
# -
# ### Split IV
# +
train = series[:6*split_size]
test = series[6*split_size:7*split_size]
X_train,Y_train = [],[]
# Loop for training data
for i in range(timesteps,train.shape[0]):
X_train.append(train[i-timesteps:i])
Y_train.append(train[i])
X_train,Y_train = np.array(X_train),np.array(Y_train)
start = time.time()
history = model.fit(X_train,Y_train,epochs = num_epochs,batch_size = batch_size,validation_split = 0.2,verbose = 0,
shuffle = False)
end = time.time()
train_loss["Split4"] = history.history['loss']
val_loss["Split4"] = history.history['val_loss']
mse, rmse, r2_value,true,predicted = evaluate_model(model,test,timesteps)
print("Split 4")
print('MSE = {}'.format(mse))
print('RMSE = {}'.format(rmse))
print('R-Squared Score = {}'.format(r2_value))
plot_data(true,predicted)
cross_val_results.append([mse,rmse,r2_value,end-start])
model.save("UV-LSTM-Split4.h5")
# -
# ### Split V
# +
train = series[:7*split_size]
test = series[7*split_size:8*split_size]
X_train,Y_train = [],[]
# Loop for training data
for i in range(timesteps,train.shape[0]):
X_train.append(train[i-timesteps:i])
Y_train.append(train[i])
X_train,Y_train = np.array(X_train),np.array(Y_train)
start = time.time()
history = model.fit(X_train,Y_train,epochs = num_epochs,batch_size = batch_size,validation_split = 0.2,verbose = 0,
shuffle = False)
end = time.time()
train_loss["Split5"] = history.history['loss']
val_loss["Split5"] = history.history['val_loss']
mse, rmse, r2_value,true,predicted = evaluate_model(model,test,timesteps)
print("Split 5")
print('MSE = {}'.format(mse))
print('RMSE = {}'.format(rmse))
print('R-Squared Score = {}'.format(r2_value))
plot_data(true,predicted)
cross_val_results.append([mse,rmse,r2_value,end-start])
model.save("UV-LSTM-Split5.h5")
# + colab={"base_uri": "https://localhost:8080/", "height": 72} colab_type="code" id="TJ3YoQUkXcCd" outputId="0016c16f-7baa-4d84-d4bb-868bb5aae445"
CV_results = pd.DataFrame(cross_val_results,columns=['MSE','RMSE','R2_Score','Train_Time'])
print("Avg. MSE = {}".format(CV_results['MSE'].mean()))
print("Avg. RMSE = {}".format(CV_results['RMSE'].mean()))
print("Avg. R2-score = {}".format(CV_results['R2_Score'].mean()))
# + colab={} colab_type="code" id="kPhF46XAXgYA"
CV_results.to_csv('UV-LSTM_CrossValidation.csv')
train_loss.to_csv('UV-LSTM_CrossValidation_TrainLoss.csv')
val_loss.to_csv('UV-LSTM_CrossValidation_ValLoss.csv')
| Univariate -LSTM/Univariate_LSTM.ipynb |
% ---
% jupyter:
% jupytext:
% text_representation:
% extension: .m
% format_name: light
% format_version: '1.5'
% jupytext_version: 1.14.4
% kernelspec:
% display_name: Matlab
% language: matlab
% name: matlab
% ---
% # Stability (a case study)
%
% Broadly speaking, stability refers to the sensitivity of an algorithm to perturbations. Since our algorithms are performed in double precision arithmetic, they are constantly undergoing small perturbations. If the result is more strongly affected by these than the conditioning of the underlying problem suggests, then we call the algorithm unstable.
%
% ## Quadratic formula
%
% Consider a polynomial whose roots are widely separated in scales.
format long e
p = poly([1.0e-6,1.0e6])
% The roots of this polynomial are well conditioned with respect to perturbations in its coefficients.
delta = [];
r = sort(roots(p));
for j = 1:2000
pp = p + 1e-11*p.*(2*rand(1,3)-1);
rr = roots(pp);
delta = [ delta; norm( (sort(real(rr))-r)./r, Inf ) ];
end
histogram(log10(delta),24);
xlabel("relative change to roots (log)"); ylabel("number of cases");
% According to this experiment, we should be able to compute the polynomial roots accurately in floating point.
%
% Let's return to middle school and apply the quadratic formula to find the roots. First is the discriminant.
a = p(1); b = p(2); c = p(3);
d = sqrt(b^2-4*a*c)
% And now, the roots.
r1 = (-b+d)/(2*a)
r2 = (-b-d)/(2*a)
% WTF? The r2 result has just 5 accurate digits!
% While multiplication and division are perfectly conditioned operations, addition/subtraction are not when cancellation is involved; the condition number in the infinity norm is inversely related to the absolute value of the result.
s = -b-d;
kappa = 2*norm([-b,d],Inf)/abs(s)
% The condition number of a series of calculations is the product of the condition numbers of the steps (basically, the chain rule). By introducing this step into the process, we get a condition number of about $10^{10}$ in the calculation, rather than the underlying problem's condition number, which is close to 1.
%
% We can avoid this ill-conditioned step by using the identity
%
% $$r_1r_2 = \frac{c}{a}$$
%
% to compute the smaller root accurately.
c/(a*r1) - 1e-6
% ## Three levels of performance
%
% Ultimately we would like **accuracy**, that is, as small an error as the machine allows. For poorly conditioned problems, this is not a realistic expectation. Instead, we can hope for a small **backward error**, which is the distance between the original data and the data that produces the result we actually computed.
%
% It's interesting to check the backward error of our "bad" root calculation. One polynomial whose exact roots were found is
pp = poly([r1,r2])
% All polynomials with these roots are multiples of this one. We can find the one that's closest to the original data of the problem using a linear least squares calculation:
alpha = pp(:)\p(:)
% Hence the minimal (2-norm) difference in coefficients is
p - alpha*pp
% The first and second values are both $\mathcal{O}(\epsilon_M)$ with respect to the original polynomial coefficients, but the one from the polynomial constant term is not. The backward error here, like the forward error, is $\mathcal{O}(10^{-5})$.
%
% An algorithm that can guarantee a small backward error is **backward stable**. The accuracy of a backward stable algorithm is proportional jointly to the condition number of its problem and the machine precision.
%
% Finally, even backward stability is not always realistic. For example, if a result is supposed to have a particular structure, like symmetry or orthogonality, we cannot hope that the perturbations of floating point representation will preserve that structure. Hence no data could have produced it exactly. Instead we have to look for nearby data that would produce a nearby result having the required structure. Algorithms that can guarantee such situations are called **stable**. In some computations that is all we can assert.
| matlab/Stability.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sakasa/notebooks/blob/master/pandas_test.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="qIbKqlVfxps7"
import pandas as pd
# + id="RB5jO_wKxvKS"
url = 'https://raw.githubusercontent.com/tokyo-metropolitan-gov/covid19/development/data/data.json'
# + id="wPHWE9-Axxre"
df = pd.read_json(url)
# + id="x6ZmdTh3Uf2P"
# + colab={"base_uri": "https://localhost:8080/"} id="6j_7YcMmx8nF" outputId="ceb6a9ba-dc7e-4969-994e-0452172aff4b"
df.get('patients_summary').get('data')
# + id="v8fxLP5ZyOs8"
df = pd.DataFrame(df['patients_summary']['data'])
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="s5jL0YXLzoki" outputId="e05549ad-6657-4f5f-aa63-ce8857163fb7"
df
# + id="RK4D4jeNzpGq"
df.index = pd.to_datetime(df['日付'])
# + colab={"base_uri": "https://localhost:8080/", "height": 455} id="9un-pCSjzu5T" outputId="ccce1dd3-59ed-41e6-cc32-0c915f79c912"
_df = pd.DataFrame(df['小計'], index=df.index)
_df
# + colab={"base_uri": "https://localhost:8080/"} id="ql0BOGyFsBpY" outputId="ac2c97e4-0ae9-4e1f-c548-10ef5cff537f"
df.index
# + id="bhRnEB5Y1-a4"
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import ticker
from tqdm.notebook import tqdm
# %matplotlib inline
# + id="8KQ9i2Ze2ByF"
sns.set()
# + id="bByXHZZmzvph"
w_mean = df.groupby(pd.Grouper(freq='W')).mean()
# + id="8iaj-Ufh0loX"
smonth_mean = df.groupby(pd.Grouper(freq='SM')).mean()
# + id="hFMEXnYv03pY"
month_mean = df.groupby(pd.Grouper(freq='M')).mean()
# + id="v-NVAy281CX1"
q_mean = df.groupby(pd.Grouper(freq='Q')).mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 665} id="xkycnuUg2XKN" outputId="d12787e2-46fc-43d9-b50c-ddf2979ba2ae"
fig, ax = plt.subplots(figsize = (18,10))
x = df['日付']
ax.plot(_df, label='src')
ax.plot(w_mean, label='w')
ax.plot(smonth_mean, label='sm')
ax.plot(month_mean, label='m')
ax.plot(q_mean, label='q')
ax.set(xlabel='date',ylabel='cnt' )
ax.legend()
ax.set_xticklabels(labels=df.index, rotation=90, ha='center')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax.xaxis.set_major_locator(ticker.MultipleLocator(20))
# + id="bSzXT0eW1FPX"
# + id="EMWl8B8b1jEu"
w_mean_2 = df.resample('W').mean()
# + id="hGM6Edas1qpI"
smonth_mean_2 = df.resample('SM').mean()
# + id="eE2o5Mpu1yJQ"
month_mean_2 = df.resample('M').mean()
# + id="mBtkPwY-12cY"
q_mean_2 = df.resample('Q').mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 665} id="qXjAYahN3D7z" outputId="f73fb1ef-825e-4888-cdf8-d30d13c70409"
fig, ax = plt.subplots(figsize = (18,10))
x = df['日付']
ax.plot(w_mean_2, label='w')
ax.plot(smonth_mean_2, label='sm')
ax.plot(month_mean_2, label='m')
ax.plot(q_mean_2, label='q')
ax.set(xlabel='date',ylabel='cnt' )
ax.legend()
ax.set_xticklabels(labels=df.index, rotation=90, ha='center')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax.xaxis.set_major_locator(ticker.MultipleLocator(20))
# + id="Wwu70tsx14_2"
# + id="bKAHfcZC4LxW"
| pandas_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use('classic')
# +
# ------- file: myplot.py ------
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 10, 100)
plt.plot(x, np.sin(x))
plt.plot(x, np.cos(x))
plt.show()
# -
fig.savefig('my_figure.png')
| Geographical plottings/.ipynb_checkpoints/Geographical plottings 2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''genpen'': conda)'
# name: python3
# ---
# +
import itertools
import numpy as np
import os
import seaborn as sns
from tqdm import tqdm
from dataclasses import asdict, dataclass, field
import vsketch
import shapely.geometry as sg
from shapely.geometry import box, MultiLineString, Point, MultiPoint, Polygon, MultiPolygon, LineString
import shapely.affinity as sa
import shapely.ops as so
import matplotlib.pyplot as plt
import pandas as pd
import vpype_cli
from typing import List, Generic
from genpen import genpen as gp
from genpen.utils import Paper
from scipy import stats
import geopandas
from shapely.errors import TopologicalError
import functools
import vpype
from skimage import io
from pathlib import Path
from sklearn.preprocessing import minmax_scale
from skimage import feature
from skimage import exposure
from skimage import filters
from skimage.color import rgb2gray
from skimage.transform import rescale, resize, downscale_local_mean
from skimage.morphology import disk
from numpy.random import default_rng
def local_angle(dx, dy):
"""Calculate the angles between horizontal and vertical operators."""
return np.mod(np.arctan2(dy, dx), np.pi)
from PIL import Image
import cv2
from rasterio import features
import rasterio
# %load_ext autoreload
# %autoreload 2
# -
import pydiffvg as dg
import torch
import skimage
import numpy as np
from torchvision.transforms import functional as TF
from IPython import display
def finalize_image(img, gamma = 2.2, normalize = False, as_Image=False):
if not isinstance(img, np.ndarray):
img = img.data.numpy()
if normalize:
img_rng = np.max(img) - np.min(img)
if img_rng > 0:
img = (img - np.min(img)) / img_rng
img = np.clip(img, 0.0, 1.0)
if img.ndim==2:
#repeat along the third dimension
img=np.expand_dims(img,2)
img[:, :, :3] = np.power(img[:, :, :3], 1.0/gamma)
img = (img * 255).astype(np.uint8)
if as_Image:
img = Image.fromarray(img)
return img
class LineTensor(object):
def __init__(
self,
linestring,
):
self.init_ls = linestring
self.pts = [p for p in self.init_ls.coords]
self.tensor = torch.tensor(self.pts, requires_grad=True)
self.init_loc_pt = self.ls.centroid
self.init_loc_tensor = torch.tensor(np.array(self.init_loc_pt), requires_grad=True)
@property
def ls(self):
return LineString(self.tensor.cpu().data.numpy())
# # diffvg
from copy import deepcopy
import fn
class SpinLine(object):
def __init__(
self,
offset_xy=None,
angular_loc_deg=0.,
radial_loc=0.,
rotation_deg=0.,
length=1.
):
if offset_xy is None:
offset_xy = np.array((0., 0.))
self.offset_xy = offset_xy
self.theta = angular_loc_deg
self.r = radial_loc
self.deg = rotation_deg
self.length = length
self.x = np.cos(self.theta) * self.r
self.y = np.sin(self.theta) * self.r
self.loc = np.array((self.x, self.y)) + self.offset_xy
self.rel_coords = [np.array((np.cos(self.deg), np.sin(self.deg))) * self.length/2 * ii for ii in [-1, 1]]
self.coords = [c + self.loc for c in self.rel_coords]
self.line = LineString(self.coords)
# Use GPU if available
dg.set_use_gpu(torch.cuda.is_available())
width = 1200
height = 1600
drawbox = box(0, 0, width, height)
db = gp.Shape(drawbox)
drawbox = box(0, 0, width, height)
db = gp.Shape(drawbox)
out_shape = np.array((db.height, db.width)).round().astype(int)
# +
nft_id = fn.new_nft_id()
raster_videos_dir = Path('/home/naka/art/raster_videos')
nft_dir = raster_videos_dir.joinpath(nft_id)
if not nft_dir.exists():
os.mkdir(nft_dir)
# -
# # single
# +
t=0.00
center = np.array(db.p.centroid.xy).ravel()
n_circles = 7
max_rad = (db.width/2) * 0.8
min_rad = (db.width/2) * 0.05
radii = np.linspace(min_rad, max_rad, n_circles)
loc_xy_spacing = 55
sls = []
for radius in radii:
circumference = radius * 360
angular_locs = np.arange(0, circumference, loc_xy_spacing) / radius
for angular_loc_deg in angular_locs:
rotation_deg = angular_loc_deg * 4 + 60 * np.sin(0.01 * angular_loc_deg * t + 0.02*radius) + np.sin(0.00013 * angular_loc_deg) * 20
length = 8. + np.sin(radius * angular_loc_deg) * 90 + np.sin(angular_loc_deg*0.001) * 40 + np.sin(0.00013 * angular_loc_deg) * 70
rad = radius + np.sin(t * angular_loc_deg) * 20
sl = SpinLine(offset_xy=center, angular_loc_deg=angular_loc_deg, radial_loc=rad, rotation_deg=rotation_deg, length=length)
sls.append(LineString(sl.coords))
gp.merge_LineStrings(sls)
lts = [LineTensor(ls) for ls in sls]
canvas_width, canvas_height = width, height
num_control_points = torch.tensor([0])
shapes = []
shape_groups = []
for ii, lt in enumerate(lts):
path = dg.Path(num_control_points = num_control_points,
points = lt.tensor,
is_closed = False,
stroke_width = torch.tensor(0.1))
shapes.append(path)
path_group = dg.ShapeGroup(shape_ids = torch.tensor([ii]),
fill_color = torch.tensor([0.0, 0.0, 0.0, 0.0]),
stroke_color = torch.tensor([1., 1., 1., 1]))
shape_groups.append(path_group)
scene_args = dg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = dg.RenderFunction.apply
img = render(canvas_width, # width
canvas_height, # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None, # background_image
*scene_args)
# target = img.clone()
angle_targets = [torch.tensor(0) for shape in shapes]
# init
rendered_img = finalize_image(img.cpu(), as_Image=True)
img = render(canvas_width, # width
canvas_height, # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None, # background_image
*scene_args)
img = finalize_image(img.cpu(), as_Image=True)
background = Image.new('RGBA', img.size, (0, 0, 0))
alpha_composite = Image.alpha_composite(background, img)
alpha_composite
# -
quality_val = 100
now = fn.get_time()
filepath = nft_dir.joinpath(f'{nft_id}_{now}_0000.jpeg')
alpha_composite.convert('RGB').save(filepath, quality=quality_val)
# # movie
# +
ts = np.arange(0., 1., 0.001)
quality_val = 100
# + tags=[]
for img_no, t in enumerate(tqdm(ts)):
center = np.array(db.p.centroid.xy).ravel()
n_circles = 10
max_rad = (db.width/2) * 0.8
min_rad = (db.width/2) * 0.05
radii = np.linspace(min_rad, max_rad, n_circles)
loc_xy_spacing = 85
sls = []
for radius in radii:
circumference = radius * 360
angular_locs = np.arange(0, circumference, loc_xy_spacing) / radius
for angular_loc_deg in angular_locs:
rotation_deg = angular_loc_deg * 0.5 + 40 * np.sin(t* 0.001 * radius ) + 40 * np.sin(0.01 * angular_loc_deg * t)
length = 88. + np.sin(t)
sl = SpinLine(offset_xy=center, angular_loc_deg=angular_loc_deg, radial_loc=radius, rotation_deg=rotation_deg, length=length)
sls.append(LineString(sl.coords))
gp.merge_LineStrings(sls)
lts = [LineTensor(ls) for ls in sls]
canvas_width, canvas_height = width, height
num_control_points = torch.tensor([0])
shapes = []
shape_groups = []
for ii, lt in enumerate(lts):
path = dg.Path(num_control_points = num_control_points,
points = lt.tensor,
is_closed = False,
stroke_width = torch.tensor(0.1))
shapes.append(path)
path_group = dg.ShapeGroup(shape_ids = torch.tensor([ii]),
fill_color = torch.tensor([0.0, 0.0, 0.0, 0.0]),
stroke_color = torch.tensor([1., 1., 1., 1]))
shape_groups.append(path_group)
scene_args = dg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = dg.RenderFunction.apply
img = render(canvas_width, # width
canvas_height, # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None, # background_image
*scene_args)
# target = img.clone()
angle_targets = [torch.tensor(0) for shape in shapes]
# init
rendered_img = finalize_image(img.cpu(), as_Image=True)
img = render(canvas_width, # width
canvas_height, # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None, # background_image
*scene_args)
img = finalize_image(img.cpu(), as_Image=True)
background = Image.new('RGBA', img.size, (0, 0, 0))
alpha_composite = Image.alpha_composite(background, img)
filepath = nft_dir.joinpath(f'{nft_id}_{img_no:0004}.jpeg')
alpha_composite.convert('RGB').save(filepath, quality=quality_val)
# -
filenames = [fp.as_posix() for fp in nft_dir.glob('.jpeg)')]
import moviepy
clipout = moviepy.video.io.ImageSequenceClip.ImageSequenceClip(filenames, fps=10)
clipout.write_videofile(nft_dir.joinpathnpath(f'{nft_id}.mp4'))
# # moire
# + heading_collapsed="false"
from sklearn.preprocessing import minmax_scale
from skimage import feature
from genpen.utils import Paper
from scipy import spatial, stats
from scipy.ndimage import gaussian_filter
from scipy.integrate import odeint
# make page
paper_size = '11x14 inches'
border:float=30
paper = Paper(paper_size)
drawbox = paper.get_drawbox(border)
# + heading_collapsed="false"
center = drawbox.centroid
# + heading_collapsed="false"
n_lines = 8421
thetas = np.linspace(0, np.pi*27, n_lines)
radii = np.linspace(0.8, 28, n_lines)
# + heading_collapsed="false"
pts = []
for theta, radius in zip(thetas, radii):
x = np.cos(theta) * radius - 0
y = np.sin(theta) * radius + 0.
pts.append(Point(x, y))
# -
def ode(y, t, a, b, c, d):
v, u = y
dvdt = np.sin(b * u) + v * c
dudt = np.cos(a * v * u) + u * d
dydt = [dvdt, dudt]
return dydt
# +
t_max = 5.7
t = np.linspace(0, t_max, 41)
a = 0.1
b = 0.95
c = - 0.02
d = -0.02
all_polys = Polygon()
break_dist = 0.01
lines = []
lfs = MultiLineString()
allowed_counter = 0
for ii, pt in enumerate(tqdm(pts)):
sol = odeint(ode, [pt.x, pt.y], t, args=(a, b, c, d))
mpt = MultiPoint(sol)
if ii == 0:
ls = LineString(mpt)
lfs = gp.merge_LineStrings([lfs, ls])
lines.append(ls)
else:
allowed_counter = 0
for _pt in mpt:
dist = _pt.distance(lfs)
# if dist < break_dist:
# break
allowed_counter += 1
if allowed_counter > 1:
ls = LineString(mpt[:allowed_counter])
lfs = gp.merge_LineStrings([lfs, ls])
lines.append(ls)
lbs0 = gp.merge_LineStrings([l for l in lines if l.length > 0.9])
lbs0 = gp.make_like(gp.merge_LineStrings(lbs0), drawbox)
# -
# Use GPU if available
dg.set_use_gpu(torch.cuda.is_available())
width = 2000
height = 2000
drawbox = box(0, 0, width, height)
db = gp.Shape(drawbox)
sls = gp.make_like(gp.merge_LineStrings(lbs0), drawbox)
out_shape = np.array((db.height, db.width)).round().astype(int)
# +
nft_id = fn.new_nft_id()
raster_videos_dir = Path('/home/naka/art/raster_videos')
nft_dir = raster_videos_dir.joinpath(nft_id)
if not nft_dir.exists():
os.mkdir(nft_dir)
# -
sls
lts = []
for ls in sls:
for ii in range(len(ls.coords)-1):
sub_ls = LineString(ls.coords[ii:ii+2])
lt = LineTensor(sub_ls)
lts.append(lt)
# +
canvas_width, canvas_height = width, height
num_control_points = torch.tensor([0])
shapes = []
shape_groups = []
for ii, lt in enumerate(lts):
path = dg.Path(num_control_points = num_control_points,
points = lt.tensor,
is_closed = False,
stroke_width = torch.tensor(0.45))
shapes.append(path)
path_group = dg.ShapeGroup(shape_ids = torch.tensor([ii]),
fill_color = torch.tensor([0.0, 0.0, 0.0, 0.0]),
stroke_color = torch.tensor([1., 1., 1., 0.6]))
shape_groups.append(path_group)
scene_args = dg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = dg.RenderFunction.apply
img = render(canvas_width, # width
canvas_height, # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None, # background_image
*scene_args)
# target = img.clone()
angle_targets = [torch.tensor(0) for shape in shapes]
# init
rendered_img = finalize_image(img.cpu(), as_Image=True)
img = render(canvas_width, # width
canvas_height, # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None, # background_image
*scene_args)
img = finalize_image(img.cpu(), as_Image=True)
background = Image.new('RGBA', img.size, (0, 0, 0))
alpha_composite = Image.alpha_composite(background, img)
alpha_composite
# -
quality_val = 100
now = fn.get_time()
filepath = nft_dir.joinpath(f'{nft_id}_{now}_0000.jpeg')
alpha_composite.convert('RGB').save(filepath, quality=quality_val)
from genpen import subdivide as sd
from functools import partial
from genpen.grower import Grower, GrowerParams
# Use GPU if available
dg.set_use_gpu(torch.cuda.is_available())
drawbox
# + heading_collapsed="false"
# make page
paper_size = 'A2'
border:float=45
paper = Paper(paper_size)
drawbox = paper.get_drawbox(border)
split_func = functools.partial(sd.split_random_bezier, x0=0.2, x1=0.75, n_eval_points=50)
xgen = stats.uniform(loc=0.4, scale=0.01).rvs
split_func = functools.partial(sd.split_along_longest_side_of_min_rectangle, xgen=xgen)
# x0gen = ss.uniform(loc=0.15, scale=0.01).rvs
# x1gen = ss.uniform(loc=0.65, scale=0.01).rvs
# split_func = functools.partial(sd.split_random_line_gen, x0gen=x0gen, x1gen=x1gen)
target = Point(140, 325)
target = drawbox.centroid
dist_from_center = partial(sd.distance_from_pt, target=target, p_range=(0.99, 0.3,), d_range=(0, 200))
cp = sd.ContinuePolicy(dist_from_center)
polys = sd.very_flex_rule_recursive_split(poly=drawbox, split_func=split_func, continue_func=cp, depth_limit=14, buffer_kwargs={'distance':1e-6})
bps = gp.merge_Polygons(polys)
sk = vsketch.Vsketch()
sk.size(paper.page_format_mm)
sk.scale('1mm')
sk.penWidth('0.5mm')
sk.geometry(bps.boundary)
# tolerance=0.5
sk.display()
# + heading_collapsed="false"
n_layers = 1
layers = []
for ii in range(n_layers):
fills = []
for p in bps:
xjitter_func = 0
yjitter_func = stats.norm(loc=0, scale=np.random.uniform(0.1, 1)).rvs
bhf = gp.BezierHatchFill(
spacing=np.random.uniform(0.1, 0.5),
degrees=np.random.uniform(10,80),
poly_to_fill=p,
xjitter_func=xjitter_func,
yjitter_func=yjitter_func,
fill_inscribe_buffer=1.4,
n_nodes_per_line=5,
n_eval_points=6,
)
fills.append(bhf.p)
fills = [f for f in fills if f.length > 0]
layer = gp.merge_LineStrings(fills)
layers.append(layer)
sk = vsketch.Vsketch()
sk.size(paper.page_format_mm)
sk.scale('1mm')
sk.penWidth('0.3mm')
for i, layer in enumerate(layers):
sk.stroke(i+1)
sk.geometry(layer)
for tolerance in [0.1, 0.3, 0.5, 0.7]:
sk.vpype(f'linemerge --tolerance {tolerance}mm')
sk.vpype('linesimplify --tolerance 0.1 linesort')
sk.display(color_mode='layer')
# -
width = 1200
height = 1600
drawbox = box(0, 0, width, height)
db = gp.Shape(drawbox)
sls = gp.make_like(gp.merge_LineStrings(layer), drawbox)
lts = []
for ls in sls:
for ii in range(len(ls.coords)-1):
sub_ls = LineString(ls.coords[ii:ii+2])
lt = LineTensor(sub_ls)
lts.append(lt)
# +
canvas_width, canvas_height = width, height
num_control_points = torch.tensor([0])
shapes = []
shape_groups = []
for ii, lt in enumerate(lts):
path = dg.Path(num_control_points = num_control_points,
points = lt.tensor,
is_closed = False,
stroke_width = torch.tensor(0.1))
shapes.append(path)
path_group = dg.ShapeGroup(shape_ids = torch.tensor([ii]),
fill_color = torch.tensor([0.0, 0.0, 0.0, 0.0]),
stroke_color = torch.tensor([1., 1., 1., 1]))
shape_groups.append(path_group)
scene_args = dg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = dg.RenderFunction.apply
img = render(canvas_width, # width
canvas_height, # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None, # background_image
*scene_args)
# target = img.clone()
angle_targets = [torch.tensor(0) for shape in shapes]
# init
rendered_img = finalize_image(img.cpu(), as_Image=True)
img = render(canvas_width, # width
canvas_height, # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None, # background_image
*scene_args)
img = finalize_image(img.cpu(), as_Image=True)
background = Image.new('RGBA', img.size, (0, 0, 0))
alpha_composite = Image.alpha_composite(background, img)
alpha_composite
# +
nft_id = fn.new_nft_id()
raster_videos_dir = Path('/home/naka/art/raster_videos')
nft_dir = raster_videos_dir.joinpath(nft_id)
if not nft_dir.exists():
os.mkdir(nft_dir)
# -
quality_val = 100
now = fn.get_time()
filepath = nft_dir.joinpath(f'{nft_id}_{now}_0000.jpeg')
alpha_composite.convert('RGB').save(filepath, quality=quality_val)
# # simple
# +
diffvg_images_dir = Path('/home/naka/art/diffvg_images')
# -
width = 1600
height = 1600
drawbox = box(0, 0, width, height)
db = gp.Shape(drawbox)
paper.page_format_mm
sk = vsketch.Vsketch()
sk.size(f'{width}mmx{height}mm')
sk.scale('1mm')
sk.penWidth('0.3mm')
n_circles = 1
circles = [db.p.centroid.buffer(600) for ii in range(n_circles)]
n_eval_points = 50
clipped_filled_polys = []
for c in circles:
filled = gp.BezierHatchFill(
poly_to_fill=c,
spacing=20,
degrees=0,
xjitter_func=stats.norm(loc=0, scale=0.1).rvs,
yjitter_func=stats.norm(loc=0, scale=5).rvs,
fill_inscribe_buffer=1.4,
n_nodes_per_line=10,
n_eval_points=40,
alternate_direction=False,
)
fills = filled.fill
random_walk = gp.gaussian_random_walk(len(fills), step_init=0.5, step_mu=0., step_std=3, scale=True)
clipped_lss = []
for ii, ls in enumerate(fills):
eval_pts = np.linspace(0, random_walk[ii], n_eval_points)
clipped_ls = LineString([ls.interpolate(pt, normalized=True) for pt in eval_pts])
clipped_lss.append(clipped_ls)
clipped_filled_polys.append(gp.merge_LineStrings(clipped_lss))
gp.merge_LineStrings(clipped_filled_polys)
sls = gp.merge_LineStrings(clipped_filled_polys)
lts = []
for ls in sls:
for ii in range(len(ls.coords)-1):
sub_ls = LineString(ls.coords[ii:ii+2])
lt = LineTensor(sub_ls)
lts.append(lt)
# +
canvas_width, canvas_height = width, height
num_control_points = torch.tensor([0])
shapes = []
shape_groups = []
for ii, lt in enumerate(lts):
path = dg.Path(num_control_points = num_control_points,
points = lt.tensor,
is_closed = False,
stroke_width = torch.tensor(0.1))
shapes.append(path)
path_group = dg.ShapeGroup(shape_ids = torch.tensor([ii]),
fill_color = torch.tensor([0.0, 0.0, 0.0, 0.0]),
stroke_color = torch.tensor([1., 1., 1., 0.8]))
shape_groups.append(path_group)
scene_args = dg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = dg.RenderFunction.apply
img = render(canvas_width, # width
canvas_height, # height
4, # num_samples_x
4, # num_samples_y
0, # seed
None, # background_image
*scene_args)
img = finalize_image(img.cpu(), as_Image=True)
background = Image.new('RGBA', img.size, (0, 0, 0))
alpha_composite = Image.alpha_composite(background, img)
alpha_composite
# +
nft_id = fn.new_nft_id()
quality_val = 100
now = fn.get_time()
filepath = diffvg_images_dir.joinpath(f'{nft_id}.jpeg')
alpha_composite.convert('RGB').save(filepath, quality=quality_val)
# -
# # flow beam graph
# + heading_collapsed="false"
import networkx as nx
class GraphGram(object):
def __init__(self, graph, xoff=0, yoff=0, scale=1, layout_method='kamada_kawai_layout'):
self.graph = graph
self._nodes = None
self.xoff = xoff
self.yoff = yoff
self.scale = scale
self.layout_method = layout_method
@property
def center(self):
return np.array((self.xoff, self.yoff))
@property
def edges(self):
return list(self.graph.edges)
@property
def layout_function(self):
try:
f = getattr(nx.layout, self.layout_method)
except AttributeError:
layout_functions = [a for a in dir(nx.layout) if 'layout' in a]
error_string = f'''{self.layout_method} not found in networkx.layout module;
choose from {layout_functions}
'''
print(error_string)
return f
@functools.lru_cache
def get_layout(self, *args, **kwargs):
self._nodes = self.layout_function(
self.graph,
scale=self.scale,
center=self.center,
*args, **kwargs)
@property
def nodes(self):
if self._nodes is None:
self.get_layout()
return self._nodes
@property
def node_pts(self):
return {k:Point(xy) for k, xy in self.nodes.items()}
@property
def pts(self):
return MultiPoint([p for p in self.node_pts.values()])
@property
def lines(self):
lines = []
for n0,n1 in self.edges:
p0 = self.node_pts[n0]
p1 = self.node_pts[n1]
lines.append(LineString([p0, p1]))
return MultiLineString(lines)
# +
diffvg_images_dir = Path('/home/naka/art/diffvg_images')
# -
width = 1600
height = 1600
drawbox = box(0, 0, width, height)
db = gp.Shape(drawbox)
# + heading_collapsed="false"
# make page
# + heading_collapsed="false"
DEGREE = 32
SCALE = 200
(xbins, ybins), (xs, ys) = gp.overlay_grid(drawbox, xstep=400, ystep=400, flatmesh=True)
# + heading_collapsed="false"
p_gen = lambda x: np.interp(x, [xs.min(), xs.max()], [0., 0.5] )
_p_gen = gp.make_callable(p_gen)
# + heading_collapsed="false"
k_gen = 2
_k_gen = gp.make_callable(k_gen)
# + heading_collapsed="false"
df = pd.DataFrame({
'x':xs,
'y':ys,
'k':_k_gen(xs),
'p':_p_gen(xs)
})
df['k'] = df['k'].astype(int)
# + heading_collapsed="false"
new_rows = []
for i, row in df.iterrows():
k = row['k'].astype(int)
G = nx.connected_watts_strogatz_graph(n=DEGREE, k=k, p=row['p'])
gg = GraphGram(graph=G, layout_method='spring_layout',
xoff=row['x'], yoff=row['y'], scale=SCALE)
bezs = []
for ls in gg.lines:
bez = gp.LineString_to_jittered_bezier(
ls, xstd=0., ystd=0., normalized=True, n_eval_points=4)
bezs.append(bez)
bezs = gp.merge_LineStrings(bezs)
new_row = row.to_dict()
new_row['geometry'] = bezs
new_rows.append(new_row)
gdf = geopandas.GeoDataFrame(new_rows)
layers = []
layers.append(gp.merge_LineStrings(gdf.geometry))
# -
layers[0]
buffer_gen = stats.uniform(loc=18, scale=20).rvs
d_buffer_gen = functools.partial(np.random.uniform, low=-0.8, high=-1.)
angles_gen = stats.uniform(loc=0, scale=360).rvs
angles_gen = gp.make_callable(80)
d_translate_factor_gen = stats.uniform(loc=0.5, scale=0.8).rvs
fills = []
all_polys = Polygon()
for i, row in gdf.iterrows():
p = row.geometry.buffer(0.5, cap_style=2, join_style=2, resolution=8)
p = p.buffer(buffer_gen(), cap_style=2, join_style=2)
stp = gp.ScaleTransPrms(d_buffer=d_buffer_gen(),angles=angles_gen(),d_translate_factor=d_translate_factor_gen(), n_iters=300)
stp.d_buffers += np.random.uniform(-0.15, 0.15, size=stp.d_buffers.shape)
P = gp.Poly(p)
P.fill_scale_trans(**stp.prms)
visible_area = p.difference(all_polys)
visible_fill = P.fill.intersection(visible_area.buffer(1e-6))
fills.append(visible_fill)
all_polys = so.unary_union([all_polys, p])
all_polys
fills = [f for f in fills if f.length > 0]
all_fills = gp.merge_LineStrings(fills)
all_fills = gp.make_like(all_fills, db.p.buffer(-20))
lts = []
for ls in all_fills:
for ii in range(len(ls.coords)-1):
sub_ls = LineString(ls.coords[ii:ii+2])
lt = LineTensor(sub_ls)
lts.append(lt)
# +
canvas_width, canvas_height = width, height
num_control_points = torch.tensor([0])
shapes = []
shape_groups = []
for ii, lt in enumerate(lts):
path = dg.Path(num_control_points = num_control_points,
points = lt.tensor,
is_closed = False,
stroke_width = torch.tensor(0.2))
shapes.append(path)
path_group = dg.ShapeGroup(
shape_ids = torch.tensor([ii]),
fill_color = torch.tensor([0.0, 0.0, 0.0, 0.0]),
stroke_color = torch.tensor([1., 1., 1., 0.8]))
shape_groups.append(path_group)
scene_args = dg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = dg.RenderFunction.apply
img = render(canvas_width, # width
canvas_height, # height
4, # num_samples_x
4, # num_samples_y
0, # seed
None, # background_image
*scene_args)
img = finalize_image(img.cpu(), as_Image=True)
background = Image.new('RGBA', img.size, (0, 0, 0))
alpha_composite = Image.alpha_composite(background, img)
alpha_composite
# -
# +
diffvg_images_dir = Path('/home/naka/art/diffvg_images')
# +
nft_id = fn.new_nft_id()
quality_val = 100
now = fn.get_time()
filepath = diffvg_images_dir.joinpath(f'{nft_id}.jpeg')
alpha_composite.convert('RGB').save(filepath, quality=quality_val)
# -
# # bez circles
width = 1600
height = 1600
drawbox = box(0, 0, width, height)
db = gp.Shape(drawbox)
bps = gp.circle_pack_within_poly(drawbox, rads=[400,200, 100, 55,35])
bps
bps2 = gp.circle_pack_within_poly(drawbox, rads=[400,200, 100, 55,35])
bps = bps.difference(bps2.boundary.buffer(1.5))
# + heading_collapsed="false"
n_layers = 1
# + heading_collapsed="false"
layers = []
for ii in range(n_layers):
fills = []
for p in bps:
xjitter_func = 0
yjitter_func = stats.norm(loc=0, scale=np.random.uniform(3, 8.5)).rvs
dist_from_center = p.centroid.distance(bps.centroid)
a = np.interp(dist_from_center, [0, 800], [0, 720])
bhf = gp.BezierHatchFill(
spacing=np.random.uniform(0.8, 1.2),
degrees=a,
poly_to_fill=p,
xjitter_func=xjitter_func,
yjitter_func=yjitter_func,
fill_inscribe_buffer=1.4,
n_nodes_per_line=15,
n_eval_points=40,
)
fills.append(bhf.p)
fills = [f for f in fills if f.length > 0]
layer = gp.merge_LineStrings(fills)
layers.append(layer)
# -
layer
mlayers = []
for layer in tqdm(layers):
mlayers.append(layer.buffer(0.001).buffer(-0.001).boundary)
mlayers = [gp.merge_LineStrings([l for l in layer if l.length > 0.2]) for layer in mlayers]
sns.displot([np.log10(l.length) for l in mlayers[0]])
lss = gp.merge_LineStrings(layers)
lts = []
for ls in lss:
for ii in range(len(ls.coords)-1):
sub_ls = LineString(ls.coords[ii:ii+2])
lt = LineTensor(sub_ls)
lts.append(lt)
len(lts)
# +
canvas_width, canvas_height = width, height
num_control_points = torch.tensor([0])
shapes = []
shape_groups = []
for ii, lt in enumerate(lts):
path = dg.Path(num_control_points = num_control_points,
points = lt.tensor,
is_closed = False,
stroke_width = torch.tensor(0.1))
shapes.append(path)
path_group = dg.ShapeGroup(
shape_ids = torch.tensor([ii]),
fill_color = torch.tensor([0.0, 0.0, 0.0, 0.0]),
stroke_color = torch.tensor([1., 1., 1., 0.8]))
shape_groups.append(path_group)
scene_args = dg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = dg.RenderFunction.apply
img = render(canvas_width, # width
canvas_height, # height
4, # num_samples_x
4, # num_samples_y
0, # seed
None, # background_image
*scene_args)
img = finalize_image(img.cpu(), as_Image=True)
background = Image.new('RGBA', img.size, (0, 0, 0))
alpha_composite = Image.alpha_composite(background, img)
alpha_composite
# -
# +
diffvg_images_dir = Path('/home/naka/art/diffvg_images')
# +
nft_id = fn.new_nft_id()
quality_val = 100
now = fn.get_time()
filepath = diffvg_images_dir.joinpath(f'{nft_id}.jpeg')
alpha_composite.convert('RGB').save(filepath, quality=quality_val)
# -
| scratch/059_diffvg_exploration.ipynb |