code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Initialize a game
# +
from ConnectN import ConnectN
game_setting = {'size':(6,6), 'N':4, 'pie_rule':True}
game = ConnectN(**game_setting)
# +
% matplotlib notebook
from Play import Play
gameplay=Play(ConnectN(**game_setting),
player1=None,
player2=None)
# -
# # Define our policy
#
# Please go ahead and define your own policy! See if you can train it under 1000 games and with only 1000 steps of exploration in each move.
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import *
import numpy as np
from ConnectN import ConnectN
game_setting = {'size':(6,6), 'N':4}
game = ConnectN(**game_setting)
class Policy(nn.Module):
def __init__(self, game):
super(Policy, self).__init__()
# input = 6x6 board
# convert to 5x5x8
self.conv1 = nn.Conv2d(1, 16, kernel_size=2, stride=1, bias=False)
# 5x5x16 to 3x3x32
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, bias=False)
self.size=3*3*32
# the part for actions
self.fc_action1 = nn.Linear(self.size, self.size//4)
self.fc_action2 = nn.Linear(self.size//4, 36)
# the part for the value function
self.fc_value1 = nn.Linear(self.size, self.size//6)
self.fc_value2 = nn.Linear(self.size//6, 1)
self.tanh_value = nn.Tanh()
def forward(self, x):
y = F.leaky_relu(self.conv1(x))
y = F.leaky_relu(self.conv2(y))
y = y.view(-1, self.size)
# action head
a = self.fc_action2(F.leaky_relu(self.fc_action1(y)))
avail = (torch.abs(x.squeeze())!=1).type(torch.FloatTensor)
avail = avail.view(-1, 36)
maxa = torch.max(a)
exp = avail*torch.exp(a-maxa)
prob = exp/torch.sum(exp)
# value head
value = self.tanh_value(self.fc_value2(F.leaky_relu( self.fc_value1(y) )))
return prob.view(6,6), value
policy = Policy(game)
# -
# # Define a MCTS player for Play
# +
import MCTS
from copy import copy
def Policy_Player_MCTS(game):
mytree = MCTS.Node(copy(game))
for _ in range(1000):
mytree.explore(policy)
mytreenext, (v, nn_v, p, nn_p) = mytree.next(temperature=0.1)
return mytreenext.game.last_move
import random
def Random_Player(game):
return random.choice(game.available_moves())
# -
# # Play a game against a random policy
# +
% matplotlib notebook
from Play import Play
gameplay=Play(ConnectN(**game_setting),
player1=Policy_Player_MCTS,
player2=None)
# -
# # Training
# +
# initialize our alphazero agent and optimizer
import torch.optim as optim
game=ConnectN(**game_setting)
policy = Policy(game)
optimizer = optim.Adam(policy.parameters(), lr=.01, weight_decay=1.e-5)
# ! pip install progressbar
# -
# Beware, training is **VERY VERY** slow!!
# +
# train our agent
from collections import deque
import MCTS
# try a higher number
episodes = 2000
import progressbar as pb
widget = ['training loop: ', pb.Percentage(), ' ',
pb.Bar(), ' ', pb.ETA() ]
timer = pb.ProgressBar(widgets=widget, maxval=episodes).start()
outcomes = []
policy_loss = []
Nmax = 1000
for e in range(episodes):
mytree = MCTS.Node(game)
logterm = []
vterm = []
while mytree.outcome is None:
for _ in range(Nmax):
mytree.explore(policy)
if mytree.N >= Nmax:
break
current_player = mytree.game.player
mytree, (v, nn_v, p, nn_p) = mytree.next()
mytree.detach_mother()
loglist = torch.log(nn_p)*p
constant = torch.where(p>0, p*torch.log(p),torch.tensor(0.))
logterm.append(-torch.sum(loglist-constant))
vterm.append(nn_v*current_player)
# we compute the "policy_loss" for computing gradient
outcome = mytree.outcome
outcomes.append(outcome)
loss = torch.sum( (torch.stack(vterm)-outcome)**2 + torch.stack(logterm) )
optimizer.zero_grad()
loss.backward()
policy_loss.append(float(loss))
optimizer.step()
if e%10==0:
print("game: ",e+1, ", mean loss: {:3.2f}".format(np.mean(policy_loss[-20:])),
", recent outcomes: ", outcomes[-10:])
if e%500==0:
torch.save(policy,'6-6-4-pie-{:d}.mypolicy'.format(e))
del loss
timer.update(e+1)
timer.finish()
# -
# # setup environment to pit your AI against the challenge policy '6-6-4-pie.policy'
# +
challenge_policy = torch.load('6-6-4-pie.policy')
def Challenge_Player_MCTS(game):
mytree = MCTS.Node(copy(game))
for _ in range(1000):
mytree.explore(challenge_policy)
mytreenext, (v, nn_v, p, nn_p) = mytree.next(temperature=0.1)
return mytreenext.game.last_move
# -
# # Let the game begin!
% matplotlib notebook
gameplay=Play(ConnectN(**game_setting),
player2=Policy_Player_MCTS,
player1=Challenge_Player_MCTS)
| alpha_go/alphazero-TicTacToe-advanced.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Read compressed dicom without success
# +
import SimpleITK as sitk
from pathlib import Path
import pydicom
pydicom.__version__
# -
pth = r"G:/Můj disk/data/biomedical/pigs/transplantation/Tx19D/Tx019D.CT.ThrAbd2FAdult.7.101.2020.06.02.20.25.52.269.43011157.dcm"
pth = Path(pth)
pth.exists()
# sitk.ReadImage(str(pth))
dataset = pydicom.dcmread(str(pth))
# dataset.pixel_array
dataset.pixel_array.to_bytes()
| devel/read_compressed_dicom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import seaborn as sns
import missingno as msno
import matplotlib.pyplot as plt
import datetime as dt
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
# #### Importando dados de Top 250, TV-Shows e Movies:
#
# Fontes:
#
# https://www.imdb.com/chart/top/?ref_=nv_mv_250
#
# https://www.imdb.com/chart/toptv/?ref_=nv_tvv_250
#
df_tv_250 = pd.read_csv('D:\Data Scientist\Accenture_Case\Data\Top250_IMDb_tvshows.csv',sep=';')
# Padronizando a string:
df_tv_250['title'] = df_tv_250['title'].apply(lambda x : x.strip().capitalize())
df_tv_250.info()
df_mov_250 = pd.read_csv('D:\Data Scientist\Accenture_Case\Data\Top250_IMDb_movies.csv',sep=';')
# Padronizando a string:
df_mov_250['title'] = df_mov_250['title'].apply(lambda x : x.strip().capitalize())
df_mov_250.info()
# +
df = pd.read_csv('[SD] DESAFIO.csv')
# Padronizando a string:
df['title'] = df['title'].apply(lambda x : x.strip().capitalize())
# -
df.title
df_bool_mov = df.title.isin(df_mov_250.title)
# ### Número de filmes que a netflix contém que estão no top 250 IMDb.
df_bool_mov.value_counts()
# ### Número de tv-shows que a netflix contém que estão no top 250 IMDb.
df_bool_tv = df.title.isin(df_tv_250.title)
df_bool_tv.value_counts()
| Python/EDA_IMDB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#import dependencies
import pandas as pd
import json
import gzip
from collections import Counter
from datetime import datetime as dt
text_sample = ["I walk the dog! I have fun.", "Shrek 3 is a great movie. \n I love the scenes with Donkey in them", "You have to try a little tenderness <3."]
print(text_sample)
for words in text_sample:
print((int(len(words.split()))))
#reading in the data
df = pd.read_json('../data/original/DNA_DATA_FULL.gz', compression='gzip')
#dropping all the columns except for body
text = df['body'].fillna("Nothing")
print(text)
text_subset = text.head(5)
print(text_subset)
# +
word_count = []
for words in text_subset:
word_count.append(int(len(words.split())))
#print(word_count)
#did not work because it is not a data frame but a series
#text_subset.insert(1, 'Word Count', word_count)
#attempting with pandas series
# adding list data
# creating new column
#text_subset['Word count']= df['body'].add(word_count)
#also did not work trying original method
#text_subset["Word Count"] = word_count
#using assign method
#text_subset.assign(words = word_count)
#print(text_subset)
#print(type(word_count))
# +
text_body = []
for body in text_subset:
text_body.append(body)
# +
#Word Count – should probably not be over 10,000 words,
#we may want consider less than 100 words invalid as well (NA- agree?)
validity = []
#creates a loop where any text with less than 100 words or more than 10,000 words is considered an invalid
#data point. The reason for these numbers are that anything less than 100 words does not fit our definition of
#an article and anything longer than 10,000 words is too long for us to check?
for number in word_count:
if number < 100:
validity.append(0)
elif number > 10000:
validity.append(0)
else:
validity.append(1)
print(validity)
# +
#Validity percentage
total_valid = sum(validity)
print('{}% of the data are valid.'.format(total_valid/len(validity)*100) )
# +
#creates a data frame with all the
sample_df = pd.DataFrame()
sample_df['body'] = text_body
sample_df['word_count'] = word_count
sample_df['validity'] = validity
print(sample_df)
# +
word_count_all = []
for words in text:
word_count_all.append(int(len(words.split())))
# +
text_body_all = []
for body in text:
text_body_all.append(body)
# +
validity_all = []
#creates a loop where any text with less than 100 words or more than 10,000 words is considered an invalid
#data point. The reason for these numbers are that anything less than 100 words does not fit our definition of
#an article and anything longer than 10,000 words is too long for us to check?
for number in word_count_all:
if number < 100:
validity_all.append(0)
elif number > 10000:
validity_all.append(0)
else:
validity_all.append(1)
# +
#Validity percentage
all_total_valid = sum(validity_all)
print('{}%, or'.format(round(all_total_valid/len(validity_all)*100)), all_total_valid, 'of the data in body are valid')
# + jupyter={"outputs_hidden": true}
#creates a data frame with all the valid body points along with the body info
valid_df = pd.DataFrame()
valid_df['body'] = text_body_all
valid_df['word_count'] = word_count_all
valid_df['validity'] = validity_all
valid_df.head(25)
# -
#
dates = df['publication_datetime']
dates_sub = dates.head(10)
# +
dates_iso = []
#for date in dates:
# dates_iso.append(date.today())
from datetime import datetime as dt
for date in dates_sub:
dates_iso.append(dt.fromtimestamp(date/1000.0).strftime('%Y-%m-%d'))
#dates_is.append(timestamp.strftime('%Y-%m-%d %H:%M:%S'))
#print())
#print(timestamp)
#print(dt.strptime(timestamp, '%Y-%m-%d'))
print(dates_iso)
# +
#valid_date_df.iloc[valid_date_df[(valid_date_df['Date'] > '2015-01-01') & (valid_date_df['Date'] < '2020-02-01')]]
#
validity_date = list((valid_date_df['Date'] > '2015,1,1') & (valid_date_df['Date'] < '2018,1,1'))
#alidity_date = []
# (valid_date_df['Date'] > '2015,1,1') & (valid_date_df['Date'] < '2018,1,1') == 'False':
#lidity_date.append(int)
# +
valid_date_df = pd.DataFrame()
valid_date_df['Date'] = dates_iso
valid_date_df['Validity'] = validity_date
valid_date_df
# -
print('{}%, or'.format(round(sum(valid_date_df['Validity'] == True) / len(valid_date_df['Validity']) * 100)), '{} of the data in modification datetime are valid'.format(sum(valid_date_df['Validity'] == True)))
# +
dates_all_iso = []
for date in dates:
dates_all_iso.append(dt.fromtimestamp(date/1000.0).strftime('%Y'))
# -
valid_date_all_df = pd.DataFrame()
valid_date_all_df['Date'] = dates_all_iso
# +
#years should be 2010 and onwards. We realllly expect to see 2013-2018 though
validity_date_all = list((valid_date_all_df['Date'] > '2009'))
valid_date_all_df['Validity'] = validity_date_all
# -
valid_date_all_df.tail(25)
print('{}%, or'.format(round(sum(valid_date_all_df['Validity'] == True) / len(valid_date_all_df['Validity']) * 100)), '{} of the data in modification datetime are valid'.format(sum(valid_date_all_df['Validity'] == True)))
#valid_date_all_df.dtypes
type(valid_date_all_df['Date'][0])
# +
#Bar plot of the number of articles in DNA per year
#Trying to answer the question: does it meet our expecation?: We realllly expect to see 2013-2018 though
#count each occurence of year value in df and plot
valid_date_all_df['Date'].value_counts().sort_index().plot(kind = 'bar')
# -
df['Year'] = valid_date_all_df['Date']
# + jupyter={"outputs_hidden": true}
df.head(10)
# +
#csv subsets for 2013-2018 year
#df2013 = df[df['Year'] == '2013']
for i in range(2013, 2019):
df_year = df[df['Year'] == str(i)]
df_year.to_csv("../data/working/DNA_" + str(i) + ".csv")
| src/Profiling/.ipynb_checkpoints/validity_date_body-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pandas-cookbook-nNG2axCD
# language: python
# name: pandas-cookbook-nng2axcd
# ---
import pandas as pd
import numpy as np
pd.reset_option('all')
df = pd.read_csv('data/be_subscribers_auto_recurring.csv')
df.head()
df.columns
df = df[['Login ID (SSO ID)', 'Invoice',
'Cardholder Name', 'Last Charge Date',
'Nex Renewal Date', 'Service Enabled',
'Amount', 'E-mail', 'Company Name', 'AS Order #', 'Serial #']]
df.head()
pd.pivot_table(df, index=['Company Name', 'Login ID (SSO ID)'])
ucompanies = df['Company Name'].unique()
len(ucompanies)
# grouping columns: Company Name Cardholder name
# aggregating column: Amount
# aggregating function: sum
df.groupby(['Company Name', 'Cardholder Name']).agg({'Amount': 'sum'})
# flights.groupby(['AIRLINE', 'WEEKDAY']).agg({'CANCELLED': 'sum'}).head(7)
pd.pivot_table(df, index=['Company Name'])
# mylist = .pivot(index='Company Name', columns='Cardholder Name', values='Amount')
# mylist
df2 = pd.read_csv('data/be_subscribers_auto_recurring.csv')
df2.head()
df3 = df2[['Login ID (SSO ID)', 'Invoice',
'Cardholder Name', 'Last Charge Date',
'Nex Renewal Date', 'Service Enabled',
'Amount', 'E-mail', 'Company Name', 'AS Order #', 'Serial #']]
pd.pivot_table(df3, index=['Company Name', 'Cardholder Name', 'E-mail', 'Service Enabled', 'Serial #'], values=['Amount'],aggfunc=[np.sum], margins=True)
df4 = df3[['Company Name', 'Cardholder Name', 'E-mail', 'Serial #', 'Amount']]
df4
pd.pivot_table(df4, index=['Company Name', 'Cardholder Name', 'Serial #'])
df5 = pd.DataFrame(df4.stack())
df5
# +
be = pd.read_csv('data/be_subscribers_auto_recurring.csv', index_col=['Company Name', 'Cardholder Name', 'Serial #'], parse_dates=['Last Charge Date', 'Nex Renewal Date'])
be2 = be[['Login ID (SSO ID)', 'Invoice', 'Service Enabled', 'Last Charge Date',
'Nex Renewal Date',
'Amount', 'E-mail', 'AS Order #']]
be2.sort_values('Company Name')
# -
be2.dtypes
criteria = (be2['Nex Renewal Date'] < np.datetime64('2019-01-31')) & \
(be2['Nex Renewal Date'] > np.datetime64('2018-11-01'))
be2[criteria]
be3 = be.set_index('Nex Renewal Date')
be3.loc['2018-11-23':'2019-01-22'].sort_index()
be3.dtypes
be3.index
df_orgs = pd.read_csv('data/orgs.csv')
df_orgs.tail(10)
df_orgs.shape
df_orgs.columns
df_orgs.describe()
df_orgs.info()
import numpy as np
df_orgs.loc[df_orgs['SSO ID'] == 4263793]
result = df_orgs.loc[df_orgs['SSO ID'] == 4263793]
np.array(result['Licenses'])
result2 = df.loc[df['Login ID (SSO ID)'] == 4263793]
result2
series_id = df.loc[df['Login ID (SSO ID)'] == 4285717]
series_id
series2_id = df_orgs.loc[df_orgs['SSO ID'] == 4285717]
series2_id
# +
## I want to join the BE spreadsheet table (df) and the Organizations table (df_orgs)
## on the SSO ID column and return only those in the BE spreadsheet
# -
# !cat data/orgs.csv | grep "S696C2CA2DD6"
# !cat data/orgs.csv
| Auto-recurring BE subscribers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
def extract_city_name(df):
"""
Chicago, IL -> Chicago for city_name column
"""
df["city_name"] = df["city_and_code"].str.split(",").str.get(0)
return df
def add_country_name(df, country_name=None):
"""
Chicago -> Chicago-US for city_name column
"""
col = "city_name"
df["city_and_country"] = df[col] + country_name
return df
df_p = pd.DataFrame({"city_and_code":["Chicago, IL"]})
add_country_name(extract_city_name(df_p), country_name="US")
# Pandas encourages us to use pipe() for the problem above, which is known as 'method chaining'. pipe makes it easy to use your own or another library’s functions in method chains, alongside Pandas’ methods. Compare the first approach with following:
(df_p.pipe(extract_city_name)
.pipe(add_country_name, country_name="US"))
# ## row or column-wise function application
# Arbitrary functions can be applied along the axes of a DataFrame using the apply() method, which, like the descriptive statistics methods, takes an optional axis argument.
df = pd.DataFrame({
'one': pd.Series(np.random.randn(3), index=['a', 'b', 'c']),
'two': pd.Series(np.random.randn(4), index=['a', 'b', 'c', 'd']),
'three': pd.Series(np.random.randn(3), index=['b', 'c', 'd'])})
df
df.apply(np.mean) # on columns
df.apply(np.mean, axis=1) # on index
df.apply(lambda x: x.max() - x.min())
df.apply(np.cumsum)
df.apply(np.exp)
# +
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
df.apply(subtract_and_divide, args=(5,3)) # pass function into apply()
# -
| w2/w2-Day_2/Pandas_apply_functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import jsonlines
from collections import defaultdict, Counter
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['font.family'] = 'Times New Roman'
# -
lang2name = {
'en': 'ENG',
'ar': 'ARB',
'be': 'BEL',
'bg': 'BUL',
'da': 'DAN',
'et': 'EST',
'de': 'DEU',
'el': 'ELL',
'fr': 'FRA',
'id': 'IND',
'ja': 'JPN',
'ko': 'KOR',
'zh': 'CMN',
'pt': 'POR',
'ru': 'RUS',
'es': 'SPA',
'sw': 'SWA',
'ta': 'TAM',
'tr': 'TUR',
'vi': 'VIE',
}
train_base = "/science/image/nlp-datasets/emanuele/data/wit/annotations/"
test_base = "/science/image/nlp-datasets/emanuele/data/wit/annotations/"
# +
langs = ['ar', 'bg', 'da', 'el', 'en', 'et', 'id', 'ja', 'ko', 'tr', 'vi']
print("lang", "#img", "#cap")
for lang in langs:
df = pd.read_csv(test_base + f"test_{lang}.tsv", sep='\t')
with jsonlines.open(test_base + f"test_{lang}.jsonl") as reader:
caps = [e for e in reader]
for cap in caps:
assert cap['image_url'] in df['image_url'].values
print(lang, len(df), len(caps))
# -
for cap in caps:
assert cap['image_url'] in df['image_url'].values
# ## Distribution of captions per image
lang2vals = {}
for lang in langs:
with jsonlines.open(test_base + f"test_{lang}.jsonl") as reader:
caps = [e for e in reader]
img_count = defaultdict(int)
for cap in caps:
img_count[cap['image_url']] += 1
lang2vals[lang] = list(img_count.values())
print(lang, np.mean(list(img_count.values())), np.max(list(img_count.values())))
f, ax = plt.subplots(figsize=(12,8))
width=0.1
n_langs = len(langs)
for ix, lang in enumerate(langs):
cnt = Counter(lang2vals[lang])
xs = list(cnt.keys())
ixs = np.argsort(xs)
xs = np.array(xs)[ixs]
ys = [v/len(lang2vals[lang]) for v in cnt.values()]
ys = np.array(ys)[ixs]
print(lang, ys[0])
ax.plot(xs, ys, '-o', label=lang)
ax.legend(title="Language")
ax.set_yscale("log")
ax.grid(alpha=0.2)
# ## Overlaps
langs = ['en', 'ar', 'bg', 'da', 'el', 'et', 'id', 'ja', 'ko', 'tr', 'vi']
# +
lang2urls = {l: set() for l in langs}
for lang in langs:
with jsonlines.open(test_base + f"test_{lang}.jsonl") as reader:
caps = [e for e in reader]
for cap in caps:
lang2urls[lang].add(cap['image_url'])
mat = np.zeros((len(lang2urls), len(lang2urls)), dtype=float)
for ix1, l1 in enumerate(langs):
for ix2, l2 in enumerate(langs):
if ix1 != ix2:
mat[ix1, ix2] = len(lang2urls[l1].intersection(lang2urls[l2]))
else:
mat[ix1, ix2] = np.nan
# +
import seaborn as sns
f, ax = plt.subplots(figsize=(12,12))
mask = np.zeros_like(mat, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
cmap = sns.color_palette("OrRd", as_cmap=True)
annot = [[np.nan for _ in range(len(langs))] for _ in range(len(langs))]
for (i, j), z in np.ndenumerate(mat):
if i != j:
annot[i][j] = int(mat[i][j])
sns_plot = sns.heatmap(mat, mask=mask, cmap=cmap, alpha=0.7, vmin=0, vmax=150, center=0, annot=annot, fmt=".0f",
square=True, linewidths=1, cbar=False, annot_kws={'fontsize': 22, 'color': 'w'}) #, cbar_kws={"shrink": .5})
ax.set_xticklabels([lang2name[lang] for lang in langs], fontsize=20)
ax.set_yticklabels([lang2name[lang] for lang in langs], fontsize=20)
f.savefig("wit-overlaps.pdf", bbox_inches="tight")
# -
# ## Captions length distribution
lang2test = {}
for lang in langs:
with jsonlines.open(test_base + f"test_{lang}.jsonl") as reader:
lang2test[lang] = [item for item in reader]
lang2test_lens = {lang: [len(e['caption_reference_description']) for e in l] for lang, l in lang2test.items()}
# +
from scipy import stats
f, ax = plt.subplots(1, 1, figsize=(14,8))
colors = ['#000000', '#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3', '#999999', '#e41a1c', '#dede00', '#dddddd']
x = np.arange(0, 215, 1)
for ix, (lang, l) in enumerate(lang2test_lens.items()):
print(lang, max(l))
density = stats.kde.gaussian_kde(l)
ax.plot(x, density(x), lw=2, label=lang2name[lang], color=colors[ix])
ax.grid(alpha=0.3)
ax.tick_params(axis='both', which='major', labelsize=24)
ax.set_xlabel('Sentence length [# characters]', fontsize=32)
ax.set_ylabel('Density', fontsize=32)
ax.legend(title='Language', loc='upper right', ncol=1, fontsize=22, title_fontsize=24)
f.savefig("wit-lens.pdf", bbox_anchor="tight")
# -
# ## Leakage
# +
train_urls = set()
with jsonlines.open(train_base + f"train_en.jsonl") as reader:
caps = [e for e in reader]
for cap in caps:
train_urls.add(cap['image_url'])
len(caps), len(train_urls)
# -
# URL-based leakage
for l2 in langs:
print('train',l2, len(train_urls.intersection(lang2urls[l2])))
# base64-based leakage
train_imgs = set(df['base64_image'].values)
for l2 in langs:
print('train',l2, len(train_imgs.intersection(lang2imgs[l2])))
| notebooks/Analysis-WIT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from astropy.io import fits
from astropy.coordinates import SkyCoord
import astropy.units as u
import glob
import time
import gc
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from matplotlib import rcParams
rcParams["font.family"] = "sans-serif"
rcParams['font.sans-serif'] = ['DejaVu Sans']
# %matplotlib notebook
# -
# ## Merge the PS1 RF catalogs and the Gaia obvi stars catalog
gaia_dir = "/Users/adamamiller/Desktop/PS1_fits/gaia_stars/"
ps1_files = glob.glob("/Users/adamamiller/Desktop/PS1_fits/hdf5/*h5")
# ### Parallax comparison
gaia_df = pd.read_hdf(gaia_dir + "parallax_ps1_gaia_mag_pm_plx.h5")
gaia_df.set_index("objid", inplace=True)
tstart = time.time()
for file_num, rf_file in enumerate(ps1_files):
rf_df = pd.read_hdf(rf_file)
rf_df.set_index("objid", inplace=True)
tmp_df = gaia_df.join(rf_df, how='inner')
coords = SkyCoord(np.array(tmp_df.raStack)*u.deg, np.array(tmp_df.decStack)*u.deg)
tmp_df["b"] = coords.galactic.b
tmp_df.to_hdf(gaia_dir + "tmp{}.h5".format(file_num), "d1")
if file_num == 0:
print(file_num, rf_file.split("/")[-1],
len(tmp_df),
time.time() - tstart)
else:
print(file_num, rf_file.split("/")[-1],
len(tmp_df),
time.time() - tnow)
tnow = time.time()
del rf_df
del tmp_df
gc.collect()
del gaia_df
gc.collect()
# +
tmp_files = glob.glob(gaia_dir + "tmp*h5")
for file_num, tmp_file in enumerate(tmp_files[0:100]):
exec("tmp_df{} = pd.read_hdf(tmp_file)".format(file_num))
# -
df_list = [tmp_df0, tmp_df1, tmp_df2, tmp_df3, tmp_df4, tmp_df5, tmp_df6, tmp_df7, tmp_df8, tmp_df9, tmp_df10, tmp_df11, tmp_df12, tmp_df13, tmp_df14, tmp_df15, tmp_df16, tmp_df17, tmp_df18, tmp_df19, tmp_df20, tmp_df21, tmp_df22, tmp_df23, tmp_df24, tmp_df25, tmp_df26, tmp_df27, tmp_df28, tmp_df29, tmp_df30, tmp_df31, tmp_df32, tmp_df33, tmp_df34, tmp_df35, tmp_df36, tmp_df37, tmp_df38, tmp_df39, tmp_df40, tmp_df41, tmp_df42, tmp_df43, tmp_df44, tmp_df45, tmp_df46, tmp_df47, tmp_df48, tmp_df49, tmp_df50, tmp_df51, tmp_df52, tmp_df53, tmp_df54, tmp_df55, tmp_df56, tmp_df57, tmp_df58, tmp_df59, tmp_df60, tmp_df61, tmp_df62, tmp_df63, tmp_df64, tmp_df65, tmp_df66, tmp_df67, tmp_df68, tmp_df69, tmp_df70, tmp_df71, tmp_df72, tmp_df73, tmp_df74, tmp_df75, tmp_df76, tmp_df77, tmp_df78, tmp_df79, tmp_df80, tmp_df81, tmp_df82, tmp_df83, tmp_df84, tmp_df85, tmp_df86, tmp_df87, tmp_df88, tmp_df89, tmp_df90, tmp_df91, tmp_df92, tmp_df93, tmp_df94, tmp_df95, tmp_df96, tmp_df97, tmp_df98, tmp_df99, tmp_df100, tmp_df101, tmp_df102, tmp_df103, tmp_df104, tmp_df105, tmp_df106, tmp_df107, tmp_df108, tmp_df109, tmp_df110, tmp_df111, tmp_df112, tmp_df113, tmp_df114, tmp_df115, tmp_df116, tmp_df117, tmp_df118, tmp_df119, tmp_df120, tmp_df121, tmp_df122, tmp_df123, tmp_df124, tmp_df125, tmp_df126, tmp_df127, tmp_df128, tmp_df129, tmp_df130, tmp_df131, tmp_df132, tmp_df133, tmp_df134, tmp_df135, tmp_df136, tmp_df137, tmp_df138, tmp_df139, tmp_df140, tmp_df141, tmp_df142, tmp_df143, tmp_df144, tmp_df145, tmp_df146, tmp_df147, tmp_df148, tmp_df149, tmp_df150, tmp_df151, tmp_df152, tmp_df153, tmp_df154, tmp_df155, tmp_df156, tmp_df157, tmp_df158, tmp_df159, tmp_df160, tmp_df161, tmp_df162, tmp_df163, tmp_df164, tmp_df165, tmp_df166, tmp_df167, tmp_df168, tmp_df169, tmp_df170, tmp_df171, tmp_df172, tmp_df173, tmp_df174, tmp_df175, tmp_df176, tmp_df177, tmp_df178, tmp_df179, tmp_df180, tmp_df181, tmp_df182, tmp_df183, tmp_df184, tmp_df185, tmp_df186, tmp_df187, tmp_df188, tmp_df189, tmp_df190, tmp_df191, tmp_df192, tmp_df193, tmp_df194, tmp_df195, tmp_df196, tmp_df197, tmp_df198, tmp_df199, tmp_df200, tmp_df201, tmp_df202, tmp_df203, tmp_df204, tmp_df205, tmp_df206, tmp_df207, tmp_df208, tmp_df209, tmp_df210, tmp_df211, tmp_df212, tmp_df213, tmp_df214, tmp_df215, tmp_df216, tmp_df217, tmp_df218, tmp_df219, tmp_df220, tmp_df221, tmp_df222, tmp_df223, tmp_df224, tmp_df225, tmp_df226, tmp_df227, tmp_df228, tmp_df229, tmp_df230, tmp_df231, tmp_df232, tmp_df233, tmp_df234, tmp_df235, tmp_df236, tmp_df237, tmp_df238, tmp_df239, tmp_df240, tmp_df241, tmp_df242, tmp_df243, tmp_df244, tmp_df245, tmp_df246, tmp_df247, tmp_df248, tmp_df249, tmp_df250, tmp_df251, tmp_df252, tmp_df253, tmp_df254, tmp_df255, tmp_df256, tmp_df257, tmp_df258, tmp_df259, tmp_df260, tmp_df261, tmp_df262, tmp_df263, tmp_df264, tmp_df265, tmp_df266, tmp_df267, tmp_df268, tmp_df269, tmp_df270, tmp_df271, tmp_df272, tmp_df273, tmp_df274, tmp_df275, tmp_df276, tmp_df277, tmp_df278, tmp_df279, tmp_df280, tmp_df281, tmp_df282, tmp_df283, tmp_df284, tmp_df285, tmp_df286, tmp_df287, tmp_df288, tmp_df289, tmp_df290, tmp_df291, tmp_df292, tmp_df293, tmp_df294, tmp_df295, tmp_df296, tmp_df297, tmp_df298, tmp_df299, tmp_df300, tmp_df301, tmp_df302, tmp_df303, tmp_df304, tmp_df305, tmp_df306, tmp_df307, tmp_df308, tmp_df309, tmp_df310, tmp_df311, tmp_df312, tmp_df313, tmp_df314, tmp_df315, tmp_df316, tmp_df317, tmp_df318, tmp_df319, tmp_df320, tmp_df321, tmp_df322, tmp_df323, tmp_df324, tmp_df325, tmp_df326, tmp_df327, tmp_df328, tmp_df329, tmp_df330, tmp_df331, tmp_df332, tmp_df333, tmp_df334, tmp_df335, tmp_df336, tmp_df337, tmp_df338, tmp_df339, tmp_df340, tmp_df341, tmp_df342, tmp_df343, tmp_df344, tmp_df345, tmp_df346, tmp_df347, tmp_df348, tmp_df349, tmp_df350, tmp_df351, tmp_df352, tmp_df353, tmp_df354, tmp_df355, tmp_df356, tmp_df357, tmp_df358, tmp_df359, tmp_df360, tmp_df361, tmp_df362, tmp_df363, tmp_df364, tmp_df365]
merge_df = pd.concat(df_list)
merge_df.to_hdf("parallax_ps1_gaia_cat_merge.h5", "d1")
del merge_df
gc.collect()
# ### Proper Motion comparison
pm_df = pd.read_hdf(gaia_dir + "pm_ps1_gaia_mag_pm_plx.h5")
pm_df.set_index("objid", inplace=True)
tstart = time.time()
last_quit = 30
for file_num, rf_file in enumerate(ps1_files):
if file_num < last_quit:
continue
rf_df = pd.read_hdf(rf_file)
rf_df.set_index("objid", inplace=True)
tmp_df = pm_df.join(rf_df, how='inner')
coords = SkyCoord(np.array(tmp_df.raStack)*u.deg, np.array(tmp_df.decStack)*u.deg)
tmp_df["b"] = coords.galactic.b
tmp_df.to_hdf(gaia_dir + "pm{}.h5".format(file_num), "d1")
if file_num == last_quit:
print(file_num, rf_file.split("/")[-1],
len(tmp_df),
time.time() - tstart)
else:
print(file_num, rf_file.split("/")[-1],
len(tmp_df),
time.time() - tnow)
tnow = time.time()
del rf_df
del tmp_df
gc.collect()
del pm_df
gc.collect()
# +
pm_files = glob.glob(gaia_dir + "pm*h5")
for file_num, pm_file in enumerate(pm_files):
exec("pm_df{} = pd.read_hdf(pm_file)".format(file_num))
# -
pm_list = [pm_df0, pm_df1, pm_df2, pm_df3, pm_df4, pm_df5, pm_df6, pm_df7, pm_df8, pm_df9, pm_df10, pm_df11, pm_df12, pm_df13, pm_df14, pm_df15, pm_df16, pm_df17, pm_df18, pm_df19, pm_df20, pm_df21, pm_df22, pm_df23, pm_df24, pm_df25, pm_df26, pm_df27, pm_df28, pm_df29, pm_df30, pm_df31, pm_df32, pm_df33, pm_df34, pm_df35, pm_df36, pm_df37, pm_df38, pm_df39, pm_df40, pm_df41, pm_df42, pm_df43, pm_df44, pm_df45, pm_df46, pm_df47, pm_df48, pm_df49, pm_df50, pm_df51, pm_df52, pm_df53, pm_df54, pm_df55, pm_df56, pm_df57, pm_df58, pm_df59, pm_df60, pm_df61, pm_df62, pm_df63, pm_df64, pm_df65, pm_df66, pm_df67, pm_df68, pm_df69, pm_df70, pm_df71, pm_df72, pm_df73, pm_df74, pm_df75, pm_df76, pm_df77, pm_df78, pm_df79, pm_df80, pm_df81, pm_df82, pm_df83, pm_df84, pm_df85, pm_df86, pm_df87, pm_df88, pm_df89, pm_df90, pm_df91, pm_df92, pm_df93, pm_df94, pm_df95, pm_df96, pm_df97, pm_df98, pm_df99, pm_df100, pm_df101, pm_df102, pm_df103, pm_df104, pm_df105, pm_df106, pm_df107, pm_df108, pm_df109, pm_df110, pm_df111, pm_df112, pm_df113, pm_df114, pm_df115, pm_df116, pm_df117, pm_df118, pm_df119, pm_df120, pm_df121, pm_df122, pm_df123, pm_df124, pm_df125, pm_df126, pm_df127, pm_df128, pm_df129, pm_df130, pm_df131, pm_df132, pm_df133, pm_df134, pm_df135, pm_df136, pm_df137, pm_df138, pm_df139, pm_df140, pm_df141, pm_df142, pm_df143, pm_df144, pm_df145, pm_df146, pm_df147, pm_df148, pm_df149, pm_df150, pm_df151, pm_df152, pm_df153, pm_df154, pm_df155, pm_df156, pm_df157, pm_df158, pm_df159, pm_df160, pm_df161, pm_df162, pm_df163, pm_df164, pm_df165, pm_df166, pm_df167, pm_df168, pm_df169, pm_df170, pm_df171, pm_df172, pm_df173, pm_df174, pm_df175, pm_df176, pm_df177, pm_df178, pm_df179, pm_df180, pm_df181, pm_df182, pm_df183, pm_df184, pm_df185, pm_df186, pm_df187, pm_df188, pm_df189, pm_df190, pm_df191, pm_df192, pm_df193, pm_df194, pm_df195, pm_df196, pm_df197, pm_df198, pm_df199, pm_df200, pm_df201, pm_df202, pm_df203, pm_df204, pm_df205, pm_df206, pm_df207, pm_df208, pm_df209, pm_df210, pm_df211, pm_df212, pm_df213, pm_df214, pm_df215, pm_df216, pm_df217, pm_df218, pm_df219, pm_df220, pm_df221, pm_df222, pm_df223, pm_df224, pm_df225, pm_df226, pm_df227, pm_df228, pm_df229, pm_df230, pm_df231, pm_df232, pm_df233, pm_df234, pm_df235, pm_df236, pm_df237, pm_df238, pm_df239, pm_df240, pm_df241, pm_df242, pm_df243, pm_df244, pm_df245, pm_df246, pm_df247, pm_df248, pm_df249, pm_df250, pm_df251, pm_df252, pm_df253, pm_df254, pm_df255, pm_df256, pm_df257, pm_df258, pm_df259, pm_df260, pm_df261, pm_df262, pm_df263, pm_df264, pm_df265, pm_df266, pm_df267, pm_df268, pm_df269, pm_df270, pm_df271, pm_df272, pm_df273, pm_df274, pm_df275, pm_df276, pm_df277, pm_df278, pm_df279, pm_df280, pm_df281, pm_df282, pm_df283, pm_df284, pm_df285, pm_df286, pm_df287, pm_df288, pm_df289, pm_df290, pm_df291, pm_df292, pm_df293, pm_df294, pm_df295, pm_df296, pm_df297, pm_df298, pm_df299, pm_df300, pm_df301, pm_df302, pm_df303, pm_df304, pm_df305, pm_df306, pm_df307, pm_df308, pm_df309, pm_df310, pm_df311, pm_df312, pm_df313, pm_df314, pm_df315, pm_df316, pm_df317, pm_df318, pm_df319, pm_df320, pm_df321, pm_df322, pm_df323, pm_df324, pm_df325, pm_df326, pm_df327, pm_df328, pm_df329, pm_df330, pm_df331, pm_df332, pm_df333, pm_df334, pm_df335, pm_df336, pm_df337, pm_df338, pm_df339, pm_df340, pm_df341, pm_df342, pm_df343, pm_df344, pm_df345, pm_df346, pm_df347, pm_df348, pm_df349, pm_df350, pm_df351, pm_df352, pm_df353, pm_df354, pm_df355, pm_df356, pm_df357, pm_df358, pm_df359, pm_df360, pm_df361, pm_df362, pm_df363, pm_df364, pm_df365]
merge_pm = pd.concat(pm_list)
merge_df.to_hdf("pm_ps1_gaia_cat_merge.h5", "d1")
# ## Start here if the merge file has already been created
#
# ### Parallax analysis
#
# Gaia-PS1 crossmatch sources with `parallax_over_error` > 8.
pxl_df = pd.read_hdf("parallax_ps1_gaia_cat_merge.h5")
pxl_df[0:10]
N_pxl = len(pxl_df)
print("There are {} stars with PS1 scores that pass the pxl cut".format(N_pxl))
print("\t")
print("There are {} stars not in the PS1 RF catalog".format(38764553 - N_pxl))
np.percentile(pxl_df["rf_score"], [0.2325,1.925,50,86.39,86.4])
# The above percentile call reveals the following:
#
# - 14.6% of these Gaia stars have PS1_rf = 1
# - 50% of these Gaia stars have PS1_df > 0.99104167
# - 98.1% of these Gaia stars have PS1_df > .83 [the threshold for scanning]
# - 99.76% of these Gaia stars have PS1_df > .5 [classical classification threshold
high_lat = np.where(np.abs(pxl_df['b'] > 30))
low_lat = np.where(np.abs(pxl_df['b'] < 10))
col_dict = {'mustard': (226/256,153/256,48/256,1),
'blue': (33/256,124/256,163/256, 1),
'asphalt': (50/256,56/256,77/256, 1),
'pale_mustard': (226/256,153/256,48/256,0.4),
'pale_blue': (33/256,124/256,163/256,0.4),
'pale_asphalt': (50/256,56/256,77/256,0.4)
}
thresh = 0.83 #for nDet>2 sources
# +
fig, ax = plt.subplots()
ax.hist(pxl_df["rf_score"], bins=50,
histtype='stepfilled',
edgecolor=col_dict['blue'], linewidth=2,
facecolor=col_dict['pale_blue'], label="Gaia stars")
ax.hist(pxl_df["rf_score"].iloc[low_lat], bins=50,
histtype='stepfilled',
edgecolor=col_dict['mustard'], linewidth=2,
facecolor=col_dict['pale_mustard'], label=r"$|b| < 10\degree$")
ax.hist(pxl_df["rf_score"].iloc[high_lat], bins=50,
histtype='stepfilled',
edgecolor=col_dict['asphalt'], linewidth=2,
facecolor=col_dict['pale_asphalt'], label=r"$|b| > 30\degree$")
ax.set_yscale("log")
ax.set_xlim(-0.01, 1.01)
ax.set_ylim(3e1, 3e7)
ax.tick_params(which="both", top=True, right=True, labelsize=11)
ax.xaxis.set_major_locator(MultipleLocator(0.1))
ax.xaxis.set_minor_locator(MultipleLocator(0.025))
ax.set_xlabel("RF score", fontsize=14)
ax.set_ylabel("N", fontsize=14)
ax.vlines([thresh], 10, 1e9,
linestyles=":",
color='k', lw=2, zorder=11)
# ax.text(thresh, 6e5, 'FoM threshold',
# color='k',
# rotation=90, ha="right", fontsize=14)
ax.text(thresh, 8e6, r'$\longleftarrow \mathrm{Galaxies}$ ',
color='k',
ha="right", fontsize=13)
ax.text(thresh, 8e6, r' $\mathrm{Stars} \longrightarrow$',
color='k',
ha="left", fontsize=13)
ax.legend(loc=2, bbox_to_anchor = (0.05,0,1,1),
bbox_transform=ax.transAxes)
fig.subplots_adjust(left=0.1,right=0.99,top=0.98,bottom=0.1)
fig.savefig("../paper/Figures/Gaia_PS1_cat_hist.pdf")
# -
fig, ax = plt.subplots()
ax.hist(merge_df["rf_score"][0:100000], bins=50,
histtype='stepfilled',
edgecolor=col_dict['blue'], linewidth=2,
facecolor=col_dict['pale_blue'], label="Gaia stars")
ax.set_yscale('log')
# ### Proper Motion Analysis
#
# Gaia sources with a pm SNR $\ge$ 7.5 are selected as stars in this subsample.
pm_df = pd.read_hdf("pm_ps1_gaia_cat_merge.h5")
N_pm = len(pm_df)
N_pm_and_pxl = len(np.where(pm_df["parallax_over_error"] >= 8)[0])
print("There are {} stars with PS1 scores that pass the pm cut".format(N_pm))
print("\tOf these, {:d} ({:d}) do (not) pass the pxl cut".format(N_pm_and_pxl, N_pm - N_pm_and_pxl))
print("\tThere are 35599830 stars in the pxl sample")
print("\t")
print("There are {} stars not in the PS1 RF catalog".format(234176264 - N_pm))
np.percentile(pm_df["rf_score"], [0.552,5.551,50,91.7])
# The above percentile call reveals the following:
#
# - 8.3% of these Gaia stars have PS1_rf = 1
# - 50% of these Gaia stars have PS1_df > 0.97919643
# - 94.45% of these Gaia stars have PS1_df > .83 [the threshold for scanning]
# - 99.45% of these Gaia stars have PS1_df > .5 [classical classification threshold
np.percentile(pm_df["g_mag"], [1,5,25,50,75,95,99])
high_lat = np.where(np.abs(pm_df['b'] > 30))
low_lat = np.where(np.abs(pm_df['b'] < 10))
col_dict = {'mustard': (226/256,153/256,48/256,1),
'blue': (33/256,124/256,163/256, 1),
'asphalt': (50/256,56/256,77/256, 1),
'pale_mustard': (226/256,153/256,48/256,0.4),
'pale_blue': (33/256,124/256,163/256,0.4),
'pale_asphalt': (50/256,56/256,77/256,0.4)
}
thresh = 0.83 #for nDet>2 sources
# +
fig, ax = plt.subplots(figsize=(7,5))
ax.hist(pm_df["rf_score"], range=(0,1), bins=50,
histtype='stepfilled',
edgecolor=col_dict['blue'], linewidth=2,
facecolor=col_dict['pale_blue'], label="Gaia stars")
ax.hist(pm_df["rf_score"].iloc[low_lat], range=(0,1), bins=50,
histtype='stepfilled',
edgecolor=col_dict['mustard'], linewidth=2,
facecolor=col_dict['pale_mustard'], label=r"$|b| < 10\degree$")
ax.hist(pm_df["rf_score"].iloc[high_lat], range=(0,1), bins=50,
histtype='stepfilled',
edgecolor=col_dict['asphalt'], linewidth=2,
facecolor=col_dict['pale_asphalt'], label=r"$|b| > 30\degree$")
ax.set_yscale("log")
ax.set_xlim(-0.01, 1.01)
ax.set_ylim(5e3, 1.4e8)
ax.tick_params(which="both", top=True, right=True, labelsize=15)
ax.xaxis.set_major_locator(MultipleLocator(0.1))
ax.xaxis.set_minor_locator(MultipleLocator(0.025))
ax.set_xlabel("RF score", fontsize=15)
ax.set_ylabel("N", fontsize=15)
ax.vlines([thresh], 10, 3e10,
linestyles=":",
color='k', lw=2, zorder=11)
ax.text(thresh, 8e5, 'FoM threshold',
color='k',
rotation=90, ha="right", fontsize=14)
ax.text(thresh, 4e7, r'$\longleftarrow \mathrm{Galaxies}$ ',
color='k',
ha="right", fontsize=13)
ax.text(thresh, 4e7, r' $\mathrm{Stars} \longrightarrow$',
color='k',
ha="left", fontsize=13)
ax.legend(loc=2, bbox_to_anchor = (0.05,0,1,1),
bbox_transform=ax.transAxes, fontsize=13)
fig.subplots_adjust(left=0.105,right=0.98,top=0.98,bottom=0.11)
fig.savefig("../paper/Figures/Gaia_PS1_pm_hist.pdf")
# -
| gaia/Gaia_PS1_compare.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import requests
from IPython.display import HTML
import json
from bs4 import BeautifulSoup
import datetime
# +
delta_weeks = 1
df = pd.read_csv('df/stories_no_lm.csv', usecols = ['id', 'title', 'absolute_url', 'time_start', 'time_end', 'minutes',
'delta_str', 'link', 'media', 'media_id', 'story_time', 'agency', 'editor', 'heading', 'domains'])
# +
# Get and check the last week's time frame
today = datetime.datetime.now()
delta_days = (today.weekday() + 1) % 7
today - datetime.timedelta(7 + delta_days)
last_sunday = today - datetime.timedelta(delta_days)
last_monday = last_sunday - datetime.timedelta(6)
print('Last Sunday should be {}, {} days ago.'.format(last_sunday.strftime('%B %d') , delta_days ))
print('The previous Monday should be {}.'.format(last_monday.strftime('%B %d') , delta_days ))
# +
# For previous weeks
last_monday = last_monday - datetime.timedelta(delta_weeks*7)
last_sunday = last_sunday - datetime.timedelta(delta_weeks*7)
print('Selected Monday is now {}.'.format(last_monday.strftime('%B %d')))
print('Selected Sunday is now {}.'.format(last_sunday.strftime('%B %d')))
# +
# Date index
df['time_start'] = pd.to_datetime(df['time_start'])
df['time_end'] = pd.to_datetime(df['time_end'])
df.index = df['time_start']
start_timestamp = last_monday.strftime('%Y-%m-%d')
end_timestamp = last_sunday.strftime('%Y-%m-%d')
df[start_timestamp:end_timestamp].shape
# -
df = df[start_timestamp:end_timestamp].copy()
df[df['media_id'] == 19][['heading', 'absolute_url']].to_csv('/Users/paul/Sites/d3_v5/indices/data/time_series.csv')
| utils/Time_metrics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.11 64-bit (''base'': conda)'
# name: python3
# ---
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df_item_categories=pd.read_csv("./item_categories.csv")
df_item_categories
df_item_categories.isnull().sum()
df_items=pd.read_csv("./items.csv")
df_items
df_items.isnull().sum()
df_sales_train=pd.read_csv("./sales_train.csv")
df_sales_train
df_sales_train.isna().sum()
df_shops=pd.read_csv("./shops.csv")
df_shops
df_shops.isnull().sum()
df_merge=pd.merge(df_item_categories,df_items)
df_merge=pd.merge(df_sales_train,df_shops)
df_merge
df_merge.isnull().sum()
df_merge.drop(columns="shop_name",inplace=True)
df_merge
import xgboost
from xgboost import XGBRegressor
df_y=df_merge["item_cnt_day"]
df_y
df_merge.drop(columns="item_cnt_day",inplace=True)
df_merge.drop(columns="date",inplace=True)
df_merge["ID"]=df_merge.index
df_merge
df_merge=df_merge[["ID","shop_id","item_id"]]
df_merge
model=XGBRegressor().fit(df_merge,df_y)
df_test=pd.read_csv("./test.csv")
df_test
y_pred=model.predict(df_test)
y_pred
submssion=pd.DataFrame({"ID":df_test["ID"],"item_cnt_month":y_pred})
submssion.to_csv("./submission1.csv",index=False)
| Predict_Future_Sales.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Baseline Classifier Test
#
# Provides evidence that an SVM should be used over a Random Forest when operating on a bag of words representation.
# +
# Make common scripts visible
import sys
sys.path.append('../common/')
import numpy as np
from sklearn.metrics import f1_score
from sklearn.ensemble import RandomForestClassifier
from experiments_common import shuffled_train_test_split
from term_document_matrix import TermDocumentMatrixCreator
from classification import run_support_vector_classifier
from loader import load_preprocessed_data
# To run a Random Forest on bag of words
def run_random_forest(train_x, train_y, test_x):
tdm_creator = TermDocumentMatrixCreator(train_x)
train_tdm = tdm_creator.create_term_document_matrix(train_x)
test_tdm = tdm_creator.create_term_document_matrix(test_x)
classifier = RandomForestClassifier(n_estimators=200, random_state=42, class_weight='balanced')
classifier.fit(train_tdm, train_y)
predict_y = classifier.predict(test_tdm)
return predict_y
# Load the already lowercased, lemmatised data
train_x, train_y = load_preprocessed_data('data/uvigomed_train.csv')
test_x, test_y = load_preprocessed_data('data/uvigomed_test.csv')
# Join the data back together and obtain a train/test split
x = train_x + test_x
y = train_y + test_y
train_x, train_y, test_x, test_y = shuffled_train_test_split(x, y)
svc_results = []
rf_results = []
# Run the classifiers at each training set size
train_sizes = [100, 200, 500, 1000, 2000, 5000, 10000, 20000, len(train_x)]
for train_size in train_sizes:
# Random Forest
predict_y = run_random_forest(train_x[:train_size], train_y[:train_size], test_x)
micro = f1_score(test_y, predict_y, average='micro')
rf_results.append(micro)
# Support Vector Machine
predict_y = run_support_vector_classifier(train_x[:train_size], train_y[:train_size], test_x, C=0.01)
micro = f1_score(test_y, predict_y, average='micro')
svc_results.append(micro)
# -
# Now plot the results.
# +
import matplotlib.pyplot as plt
plt.title('SVM vs Random Forest on UVigoMED Bag of Words')
plt.ylabel('Micro $F_1$ Score')
plt.xlabel('Training Set Size')
plt.plot(train_sizes, svc_results,'-s', label='Support Vector Machine')
plt.plot(train_sizes, rf_results, '-o', label='Random Forest')
plt.legend(loc='lower right')
plt.savefig('rf_vs_svm_bow_uvigomed.pdf', bbox_inches='tight')
plt.show()
# -
| uvigomed/rf_vs_svm_bow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''caiman-online'': conda)'
# language: python
# name: python37664bitcaimanonlineconda66bc9a4a48f5458998df98246591df70
# ---
# +
import bokeh.plotting as bpl
import cv2
import logging
try:
cv2.setNumThreads(0)
except():
pass
import sys
import json
from glob import glob
import h5py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from ScanImageTiffReader import ScanImageTiffReader
import caiman as cm
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.source_extraction.cnmf import params as params
from caiman_analysis import load_as_obj, load_and_parse_json, make_trialwise
from caiman.utils.visualization import plot_contours, nb_view_patches, nb_plot_contour
bpl.output_notebook()
# +
data_path = 'E:/caiman_scratch/ori/out/'
js = glob(data_path + '*.json')[0]
hs = glob(data_path + '*.hdf5')[0]
jdat = load_and_parse_json(js)
cnm = load_as_obj(hs)
# -
c = jdat['c']
splits = jdat['splits']
splits = np.array([int(s) for s in splits]) #temporary for missed change in caiman_main
traces = make_trialwise(c, splits)
traces.shape # trials, cells, time
# +
f, axes = plt.subplots(1,traces.shape[0], figsize=(10,5))
axes = axes.ravel()
for i,ax in enumerate(axes):
ax.imshow(traces[i,:,:], aspect='auto')
# -
plt.plot(c[300,:])
# + tags=[]
Yr, dims, T = cm.load_memmap(cnm.mmap_file)
images = np.reshape(Yr.T, [T] + list(dims), order='F')
Cn = cm.local_correlations(images.transpose(1,2,0))
Cn[np.isnan(Cn)] = 0
cnm.estimates.plot_contours_nb(img=Cn)
# -
| notebooks/.ipynb_checkpoints/online_test-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Pointwise-Local-Reconstruction-Error" data-toc-modified-id="Pointwise-Local-Reconstruction-Error-1"><span class="toc-item-num">1 </span>Pointwise Local Reconstruction Error</a></span></li></ul></div>
# -
# Pointwise Local Reconstruction Error
# ====================================
# Example for the usage of the `skcosmo.metrics.pointwise_local_reconstruction_error` as pointwise local reconstruction error (LFRE) on the degenerate CH4 manifold. We apply the local reconstruction measure on the degenerate CH4 manifold dataset. This dataset was specifically constructed to be representable by a 4-body features (bispectrum) but not by a 3-body features (power spectrum). In other words the dataset contains environments which are different, but have the same 3-body features. For more details about the dataset please refer to [Pozdnyakov 2020](https://doi.org/10.1103/PhysRevLett.125.166001) .
#
# The skcosmo dataset already contains the 3 and 4-body features computed with [librascal](https://github.com/cosmo-epfl/librascal) so we can load it and compare it with the LFRE.
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rc('font', size=20)
from skcosmo.datasets import load_degenerate_CH4_manifold
from skcosmo.metrics import pointwise_local_reconstruction_error
# load features
degenerate_manifold = load_degenerate_CH4_manifold()
power_spectrum_features = degenerate_manifold.data.SOAP_power_spectrum
bispectrum_features = degenerate_manifold.data.SOAP_bispectrum
# -
print(degenerate_manifold.DESCR)
# +
n_local_points = 20
print("Computing pointwise LFRE...")
# local reconstruction error of power spectrum features using bispectrum features
power_spectrum_to_bispectrum_pointwise_lfre = pointwise_local_reconstruction_error(
power_spectrum_features,
bispectrum_features,
n_local_points,
train_idx = np.arange(0, len(power_spectrum_features), 2),
test_idx = np.arange(0, len(power_spectrum_features)),
estimator=None,
n_jobs=4,
)
# local reconstruction error of bispectrum features using power spectrum features
bispectrum_to_power_spectrum_pointwise_lfre = pointwise_local_reconstruction_error(
bispectrum_features,
power_spectrum_features,
n_local_points,
train_idx = np.arange(0, len(power_spectrum_features), 2),
test_idx = np.arange(0, len(power_spectrum_features)),
estimator=None,
n_jobs=4,
)
print("Computing pointwise LFRE finished.")
print(
"LFRE(3-body, 4-body) = ",
np.linalg.norm(power_spectrum_to_bispectrum_pointwise_lfre)/np.sqrt(len(power_spectrum_to_bispectrum_pointwise_lfre))
)
print(
"LFRE(4-body, 3-body) = ",
np.linalg.norm(bispectrum_to_power_spectrum_pointwise_lfre)/np.sqrt(len(power_spectrum_to_bispectrum_pointwise_lfre))
)
# +
fig, (ax34, ax43) = plt.subplots(
1, 2, constrained_layout=True, figsize=(16, 7.5), sharey="row", sharex=True
)
vmax = 0.5
X, Y = np.meshgrid(np.linspace(0.7, 0.9, 9), np.linspace(-0.1, 0.1, 9))
pcm = ax34.contourf(
X,
Y,
power_spectrum_to_bispectrum_pointwise_lfre[81:].reshape(9, 9).T,
vmin=0,
vmax=vmax,
)
ax43.contourf(
X,
Y,
bispectrum_to_power_spectrum_pointwise_lfre[81:].reshape(9, 9).T,
vmin=0,
vmax=vmax,
)
ax34.axhline(y=0, color="red", linewidth=5)
ax43.axhline(y=0, color="red", linewidth=5)
ax34.set_ylabel("v/π")
ax34.set_xlabel("u/π")
ax43.set_xlabel("u/π")
ax34.set_title(r"$X^-$ LFRE(3-body, 4-body)")
ax43.set_title(r"$X^-$ LFRE(4-body, 3-body)")
cbar = fig.colorbar(pcm, ax=(ax34, ax43), label="LFRE", location="bottom")
plt.show()
# -
# The environments span a manifold which is described by the coordinates v/π and u/π (please refer to [Pozdnyakov 2020](https://doi.org/10.1103/PhysRevLett.125.166001) for a concrete understanding of the manifold). The LFRE is presented for each environment in the manifold in the two contour plots. It can be seen that the reconstruction error of 4-body features using 3-body features (the left plot) is most significant along the degenerate line (the horizontal red line). This agrees with the fact that the 3-body features remain the same on the degenerate line and can therefore not reconstruct the 4-body features. On the other hand the 4-body features can perfectly reconstruct the 3-body features as seen in the right plot.
| examples/PlotLFRE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/thegreatgupta/ObjectRecognition/blob/master/MobileNet_02_TensorFlow_with_GPU.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="BlmQIFSLZDdc"
# # Confirm TensorFlow can see the GPU
#
# Simply select "GPU" in the Accelerator drop-down in Notebook Settings (either through the Edit menu or the command palette at cmd/ctrl-shift-P).
# + colab_type="code" id="3IEVK-KFxi5Z" outputId="c190e986-93ce-44d1-80cf-5cb4a9e9f1a7" colab={"base_uri": "https://localhost:8080/", "height": 33}
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
# + [markdown] colab_type="text" id="QXRh0DPiZRyG"
# # Observe TensorFlow speedup on GPU relative to CPU
#
# This example constructs a typical convolutional neural network layer over a
# random image and manually places the resulting ops on either the CPU or the GPU
# to compare execution speed.
# + colab_type="code" id="t9ALbbpmY9rm" outputId="2243eeda-89d2-485a-caa5-0b90bd1e03ef" colab={"base_uri": "https://localhost:8080/", "height": 124}
import tensorflow as tf
import timeit
# See https://www.tensorflow.org/tutorials/using_gpu#allowing_gpu_memory_growth
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.device('/cpu:0'):
random_image_cpu = tf.random_normal((100, 100, 100, 3))
net_cpu = tf.layers.conv2d(random_image_cpu, 32, 7)
net_cpu = tf.reduce_sum(net_cpu)
with tf.device('/gpu:0'):
random_image_gpu = tf.random_normal((100, 100, 100, 3))
net_gpu = tf.layers.conv2d(random_image_gpu, 32, 7)
net_gpu = tf.reduce_sum(net_gpu)
sess = tf.Session(config=config)
# Test execution once to detect errors early.
try:
sess.run(tf.global_variables_initializer())
except tf.errors.InvalidArgumentError:
print(
'\n\nThis error most likely means that this notebook is not '
'configured to use a GPU. Change this in Notebook Settings via the '
'command palette (cmd/ctrl-shift-P) or the Edit menu.\n\n')
raise
def cpu():
sess.run(net_cpu)
def gpu():
sess.run(net_gpu)
# Runs the op several times.
print('Time (s) to convolve 32x7x7x3 filter over random 100x100x100x3 images '
'(batch x height x width x channel). Sum of ten runs.')
print('CPU (s):')
cpu_time = timeit.timeit('cpu()', number=10, setup="from __main__ import cpu")
print(cpu_time)
print('GPU (s):')
gpu_time = timeit.timeit('gpu()', number=10, setup="from __main__ import gpu")
print(gpu_time)
print('GPU speedup over CPU: {}x'.format(int(cpu_time/gpu_time)))
sess.close()
# + id="GO3sqkl_OSPj" colab_type="code" colab={}
import tarfile
# + id="ImN5tYJgPGHJ" colab_type="code" colab={}
tar = tarfile.open("object-classification-dataset.tar")
# + id="CRui2MLLPIGm" colab_type="code" colab={}
tar.extractall()
# + id="l-utfnsbPIYp" colab_type="code" colab={}
tar.close()
# + id="PM97xo3HbhVQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="dd8b0996-31ea-46ac-dfcb-5efa73147551"
import pandas as pd
import numpy as np
import os
import keras
import matplotlib.pyplot as plt
from keras.applications.xception import Xception
from keras.applications.resnet50 import ResNet50
from keras.applications.mobilenet import MobileNet
from keras.layers import Dense,GlobalAveragePooling2D
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam
# + id="qz24FNC8bj0j" colab_type="code" colab={}
def model_Xception(img_shape=(299, 299, 3), n_classes=50,
load_pretrained=False, freeze_layers_from='base_model'):
# Decide if load pretrained weights from imagenet
if load_pretrained:
weights = 'imagenet'
else:
weights = None
# Get base model
#base_model = Xception(include_top=False, weights=weights,
#input_tensor=None, input_shape=img_shape)
#base_model = ResNet50(include_top=False, weights=weights,
#input_tensor=None, input_shape=img_shape)
base_model = MobileNet(include_top=False, weights=weights,
input_tensor=None, input_shape=img_shape)
# Add final layers
x = base_model.output
x = Dense(n_classes, activation='relu')(x)
x = GlobalAveragePooling2D()(x)
predictions = Dense(n_classes, activation='softmax')(x)
# This is the model we will train
model = Model(input=base_model.input, output=predictions)
# Model Summary
# print(model.summary())
# # let's visualize layer names and layer indices to see how many layers/blocks to re-train
# # uncomment when choosing based_model_last_block_layer
# for i, layer in enumerate(model.layers):
# print(i, layer.name)
# Freeze some layers
if freeze_layers_from is not None:
if freeze_layers_from == 'base_model':
print (' Freezing base model layers')
for layer in base_model.layers:
layer.trainable = False
else:
for i, layer in enumerate(model.layers):
print(i, layer.name)
print (' Freezing from layer 0 to ' + str(freeze_layers_from))
for layer in model.layers[:freeze_layers_from]:
layer.trainable = False
for layer in model.layers[freeze_layers_from:]:
layer.trainable = True
print(model.summary())
return model
# + id="Tw4LCO5CbnBS" colab_type="code" colab={}
def train_Xception(train_set_path, val_set_path, validation_split = 0.2,
batch_size = 32, class_mode = 'categorical', horizontal_flip = False,
vertical_flip = False, rotation_range = None, target_size = (299, 299),
model = None, epochs = 1):
# Load Image Data Set Using Keras
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = horizontal_flip,
vertical_flip = vertical_flip,
rotation_range=rotation_range,
validation_split=validation_split)
training_set = train_datagen.flow_from_directory(train_set_path,
target_size = target_size,
batch_size = batch_size,
class_mode = class_mode,
subset = 'training')
validation_set = train_datagen.flow_from_directory(val_set_path,
target_size = target_size,
batch_size = batch_size,
class_mode = class_mode,
subset = 'validation')
# Compile Model
learning_rate = 0.0001
loss = 'categorical_crossentropy'
# opt_rms = keras.optimizers.rmsprop(lr=learning_rate,decay=1e-6)
adam_optimizer = Adam(lr = learning_rate)
model.compile(optimizer = adam_optimizer, loss = loss, metrics = ['accuracy'])
model.fit_generator(training_set,
steps_per_epoch = training_set.samples,
epochs = epochs,
validation_data = validation_set,
validation_steps = validation_set.samples)
return model
# + id="iYffUFt4br42" colab_type="code" colab={}
# Declare Constants
img_shape = (128, 128, 3)
target_size = (128, 128)
n_classes = 49
epochs = 10
train_set_path = '101_ObjectCategories' #'dataset2/training_set_test'
val_set_path = train_set_path
validation_split = 0.1
batch_size = 32
horizontal_flip = True
rotation_range = None
learning_rate = 0.0001
loss = 'categorical_crossentropy'
# + id="uHSN8z05b3__" colab_type="code" outputId="6e96d642-bcbf-4c81-fea2-f8ad1b4d38ed" colab={"base_uri": "https://localhost:8080/", "height": 3450}
# Build Model
model = model_Xception(img_shape = img_shape, n_classes = n_classes, load_pretrained = True)
# + id="sT58uExSb7_Q" colab_type="code" outputId="f5c15a9a-5120-4ec9-e4a0-903c85449bc6" colab={"base_uri": "https://localhost:8080/", "height": 391}
model = train_Xception(train_set_path = train_set_path, val_set_path = val_set_path, validation_split = validation_split,
batch_size = batch_size, horizontal_flip = horizontal_flip, rotation_range = rotation_range,
model = model, epochs = epochs, target_size = target_size)
# + id="QDjWYQ69k2U-" colab_type="code" colab={}
model.save_weights('obj_recognition_xception_normal_bt16_ep10.h5')
# + id="URhAKBgSXVlA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 481} outputId="9e630cad-be98-4a23-e4c6-322ea551bcef"
model = train_Xception(train_set_path = train_set_path, val_set_path = val_set_path, validation_split = validation_split,
batch_size = batch_size, horizontal_flip = horizontal_flip, rotation_range = rotation_range,
model = model, epochs = epochs, target_size = target_size)
# + id="nRbOAiGYpoG9" colab_type="code" colab={}
| Colab_Old/MobileNet_02_TensorFlow_with_GPU.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Lattice paths
#
# Starting in the top left corner of a 2×2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner.
#
# How many such routes are there through a 20×20 grid?
# +
# %%time
import math
npaths = math.factorial(40)//(math.factorial(20)*math.factorial(20))
print(npaths)
# -
| 0001/Problem_15.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JimKing100/DS-Unit-2-Regression-Classification/blob/master/module4/assignment_regression_classification_4pop.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="jxFhqMBTqPji" colab_type="code" colab={}
# Installs
# %%capture
# !pip install --upgrade category_encoders plotly
# !pip install fiona
# !pip install geopandas
# + id="Cz1aBBrrpa3L" colab_type="code" outputId="086baaae-061f-4e01-9eb0-22107ab34850" colab={"base_uri": "https://localhost:8080/", "height": 102}
# Imports
import os, sys
os.chdir('/content')
# !git init .
# !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Regression-Classification.git
# !git pull origin master
os.chdir('module4')
# + id="--8zMv9Iq_3B" colab_type="code" colab={}
# Disable warning
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
# + [markdown] id="MQeB6OCfrt04" colab_type="text"
# ### Load Data
# + id="md_dzn0CrUdE" colab_type="code" colab={}
import pandas as pd
train_features = pd.read_csv('../data/tanzania/train_features.csv')
train_labels = pd.read_csv('../data/tanzania/train_labels.csv')
test_features = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
assert train_features.shape == (59400, 40)
assert train_labels.shape == (59400, 2)
assert test_features.shape == (14358, 40)
assert sample_submission.shape == (14358, 2)
# + [markdown] id="tsv1j8i1ryE_" colab_type="text"
# ### Train/Validate/Test Split
# + id="I5MwknJoreoz" colab_type="code" outputId="7013c0f1-e294-43f5-89d4-26aeaf8e9e02" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Load initial train features and labels
from sklearn.model_selection import train_test_split
X_train = train_features
y_train = train_labels['status_group']
X_train.shape, y_train.shape
# + id="vwwYWwWxsGmP" colab_type="code" outputId="0e16a4f4-49d0-42c8-df4e-3a1994887c67" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Split the initial train features and labels 80% into new train and new validation
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, train_size = 0.80, test_size = 0.20,
stratify = y_train, random_state=42
)
X_train.shape, X_val.shape, y_train.shape, y_val.shape
# + id="8XxcRPT4sLpg" colab_type="code" outputId="3dd31737-28f1-4bb6-ba24-1a309df80bed" colab={"base_uri": "https://localhost:8080/", "height": 85}
# Check values of new train labels
y_train.value_counts(normalize=True)
# + id="snw_P3-asTCt" colab_type="code" outputId="98674e98-6b3d-409d-ddb9-60aa75574cda" colab={"base_uri": "https://localhost:8080/", "height": 85}
# Check values of new validation labels
y_val.value_counts(normalize=True)
# + [markdown] id="hDyEKbbPsZsG" colab_type="text"
# ### One-Hot Encoding - Quantity
# + id="OgXptAiYsjPH" colab_type="code" outputId="4bc253aa-6f8c-463f-9b58-cf7d47a8429c" colab={"base_uri": "https://localhost:8080/", "height": 119}
# Check values of quantity feature
X_train['quantity'].value_counts(normalize=True)
# + id="2-euYaI6sw7X" colab_type="code" outputId="e4b6d6bc-979f-48fe-d0ed-6523742518e2" colab={"base_uri": "https://localhost:8080/", "height": 306}
# Recombine X_train and y_train, for exploratory data analysis
train = X_train.copy()
train['status_group'] = y_train
train.groupby('quantity')['status_group'].value_counts(normalize=True)
# + id="m-T5P9Z5s_6-" colab_type="code" outputId="3cdc86bc-4e12-4495-f664-a1bd411ad4cb" colab={"base_uri": "https://localhost:8080/", "height": 399}
# Plot the values, dry shows a strong relationship to functional
import matplotlib.pyplot as plt
import seaborn as sns
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='quantity', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Water Quantity')
# + [markdown] id="vIpG2-d53NtY" colab_type="text"
# ### One-Hot Encoding - Waterpoint Type
# + id="Lu_JKfVD0uQq" colab_type="code" outputId="77f2b27e-cf2d-4303-a8de-4adf800f6996" colab={"base_uri": "https://localhost:8080/", "height": 153}
X_train['waterpoint_type'].value_counts(normalize=True)
# + id="j22sVuil1Xry" colab_type="code" outputId="eed6d2b2-6642-4d10-e826-de6e93b39479" colab={"base_uri": "https://localhost:8080/", "height": 399}
# Recombine X_train and y_train, for exploratory data analysis
train = X_train.copy()
train['status_group'] = y_train
train.groupby('waterpoint_type')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='waterpoint_type', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Waterpoint Type')
# + [markdown] id="OpwyrxfE3n6s" colab_type="text"
# ### One-Hot Encoding - Extraction Type
# + id="uWh6pw4X2QoG" colab_type="code" outputId="53a3124d-f097-43d4-db6c-9c39dc0a039c" colab={"base_uri": "https://localhost:8080/", "height": 419}
X_train['extraction_type'].value_counts(normalize=True)
train = X_train.copy()
train['status_group'] = y_train
train.groupby('extraction_type')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='extraction_type', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Extraction Type')
# + [markdown] id="V4tmIM413yIE" colab_type="text"
# ### Bin and One-Hot Encoding - Installer
# + id="k_FZM59h4pGm" colab_type="code" outputId="54867481-2eed-4745-bed3-aae6dece6163" colab={"base_uri": "https://localhost:8080/", "height": 1000}
X_train['installer'] = X_train['installer'].str.lower()
X_val['installer'] = X_val['installer'].str.lower()
X_train['installer'] = X_train['installer'].str.lower()
X_val['installer'] = X_val['installer'].str.lower()
X_train['installer'] = X_train['installer'].str.replace('danid', 'danida')
X_val['installer'] = X_val['installer'].str.replace('danid', 'danida')
X_train['installer'] = X_train['installer'].str.replace('disti', 'district council')
X_val['installer'] = X_val['installer'].str.replace('disti', 'district council')
X_train['installer'] = X_train['installer'].str.replace('commu', 'community')
X_val['installer'] = X_val['installer'].str.replace('commu', 'community')
X_train['installer'] = X_train['installer'].str.replace('central government', 'government')
X_val['installer'] = X_val['installer'].str.replace('central government', 'government')
X_train['installer'] = X_train['installer'].str.replace('kkkt _ konde and dwe', 'kkkt')
X_val['installer'] = X_val['installer'].str.replace('kkkt _ konde and dwe', 'kkkt')
X_train['installer'].value_counts(normalize=True)
top10 = X_train['installer'].value_counts()[:5].index
X_train.loc[~X_train['installer'].isin(top10), 'installer'] = 'Other'
X_val.loc[~X_val['installer'].isin(top10), 'installer'] = 'Other'
train = X_train.copy()
train['status_group'] = y_train
train.groupby('installer')['status_group'].value_counts(normalize=True)
# + id="M84Vm5jM8KkU" colab_type="code" outputId="7d007805-638b-47c2-d60e-e5de86d150ca" colab={"base_uri": "https://localhost:8080/", "height": 419}
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='installer', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Installer')
# + [markdown] id="y1sLVgVJ4oyc" colab_type="text"
# ### New Feature - Pump Age
# + id="oUahkHzQCrRs" colab_type="code" outputId="fb2d411e-37a5-4958-ab22-4e800ab00f33" colab={"base_uri": "https://localhost:8080/", "height": 725}
X_train['pump_age'] = 2013 - X_train['construction_year']
X_train.loc[X_train['pump_age'] == 2013, 'pump_age'] = 0
X_val['pump_age'] = 2013 - X_val['construction_year']
X_val.loc[X_val['pump_age'] == 2013, 'pump_age'] = 0
X_train.loc[X_train['pump_age'] == 0, 'pump_age'] = 10
X_val.loc[X_val['pump_age'] == 0, 'pump_age'] = 10
train = X_train.copy()
train['status_group'] = y_train
train.groupby('pump_age')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='pump_age', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Pump Age')
# + [markdown] id="26uJs-7GQAIk" colab_type="text"
# ### Bin and One-Hot Encoding - Funder
# + id="aP1m7Dar_zy9" colab_type="code" outputId="06209ad6-d30e-4008-f739-6338e21409b1" colab={"base_uri": "https://localhost:8080/", "height": 929}
X_train['funder'] = X_train['funder'].str.lower()
X_val['funder'] = X_val['funder'].str.lower()
X_train['funder'] = X_train['funder'].str[:3]
X_val['funder'] = X_val['funder'].str[:3]
X_train['funder'].value_counts(normalize=True)
top10 = X_train['funder'].value_counts()[:20].index
X_train.loc[~X_train['funder'].isin(top10), 'funder'] = 'Other'
X_val.loc[~X_val['funder'].isin(top10), 'funder'] = 'Other'
train = X_train.copy()
train['status_group'] = y_train
train.groupby('funder')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='funder', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Funder')
# + [markdown] id="3umZyUzSQLVZ" colab_type="text"
# ### One-Hot Encoding - Water Quality
# + id="dggjPbnUHb2k" colab_type="code" outputId="624b6bc9-0b39-4645-8053-e93d0bb95682" colab={"base_uri": "https://localhost:8080/", "height": 419}
X_train['water_quality'].value_counts(normalize=True)
train = X_train.copy()
train['status_group'] = y_train
train.groupby('water_quality')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='water_quality', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Water Quality')
# + [markdown] id="S9ymhThWUFu0" colab_type="text"
# ### One-Hot Encoding - Basin
# + id="7BDTgkUsQqo9" colab_type="code" outputId="0c666d43-306e-42a8-e240-33482de8df0c" colab={"base_uri": "https://localhost:8080/", "height": 419}
X_train['basin'].value_counts(normalize=True)
train = X_train.copy()
train['status_group'] = y_train
train.groupby('basin')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='basin', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Basin')
# + [markdown] id="C_cTtfqtUKgr" colab_type="text"
# ### One-Hot Encoding - Region
# + id="DM30270XSLrd" colab_type="code" outputId="ac5ae466-9332-4819-d2e0-4451c9dccea0" colab={"base_uri": "https://localhost:8080/", "height": 419}
X_train['region'].value_counts(normalize=True)
train = X_train.copy()
train['status_group'] = y_train
train.groupby('region')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='region', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Region')
# + [markdown] id="RprzY6FEQVB5" colab_type="text"
# ### Use Mean for GPS Height Missing Values
# + id="p3sVlnFR3GcR" colab_type="code" outputId="381d5e72-59f6-411d-9e0a-f033550f77d7" colab={"base_uri": "https://localhost:8080/", "height": 119}
X_train.loc[X_train['gps_height'] == 0, 'gps_height'] = X_train['gps_height'].mean()
X_val.loc[X_val['gps_height'] == 0, 'gps_height'] = X_val['gps_height'].mean()
train = X_train.copy()
train['status_group'] = y_train
train.groupby('gps_height')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']];
#sns.catplot(x='amount_tsh', y='functional', data=train, kind='bar', color='grey')
#plt.title('% of Waterpumps Functional by Pump Age')
# + [markdown] id="EZFGWgTEUUsT" colab_type="text"
# ### One-Hot Encoding - Payment
# + id="TBqAbmAgTT89" colab_type="code" outputId="23454a17-8ba0-46e8-cb6e-6d4e48799e0b" colab={"base_uri": "https://localhost:8080/", "height": 419}
X_train['payment'].value_counts(normalize=True)
train = X_train.copy()
train['status_group'] = y_train
train.groupby('payment')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='payment', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Payment')
# + [markdown] id="OlqyEg2Gh-qg" colab_type="text"
# ### One-Hot Encoded - Source
# + id="wrXDpYMmWl9i" colab_type="code" outputId="a03ce49a-3ffc-411a-eff5-d12f49b26468" colab={"base_uri": "https://localhost:8080/", "height": 419}
X_train['source'].value_counts(normalize=True)
train = X_train.copy()
train['status_group'] = y_train
train.groupby('source')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='source', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Source')
# + [markdown] id="a1-u7T-piC3n" colab_type="text"
# ### Bin and One-Hot Encoded - LGA
# + id="t15-lLReXwS2" colab_type="code" outputId="c556da18-1175-4f52-bb17-8110d9ed886c" colab={"base_uri": "https://localhost:8080/", "height": 521}
X_train['lga'].value_counts(normalize=True)
top10 = X_train['lga'].value_counts()[:10].index
X_train.loc[~X_train['lga'].isin(top10), 'lga'] = 'Other'
X_val.loc[~X_val['lga'].isin(top10), 'lga'] = 'Other'
train = X_train.copy()
train['status_group'] = y_train
train.groupby('lga')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='lga', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by LGA')
# + [markdown] id="ao00IqhJiMqq" colab_type="text"
# ### Bin and One-Hot Encoded - Ward
# + id="q1P52FVDaCCh" colab_type="code" outputId="c1a87d39-8ab5-4835-cf01-bda35763c8da" colab={"base_uri": "https://localhost:8080/", "height": 521}
X_train['ward'].value_counts(normalize=True)
top10 = X_train['ward'].value_counts()[:20].index
X_train.loc[~X_train['ward'].isin(top10), 'ward'] = 'Other'
X_val.loc[~X_val['ward'].isin(top10), 'ward'] = 'Other'
train = X_train.copy()
train['status_group'] = y_train
train.groupby('ward')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='ward', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Ward')
# + [markdown] id="xusT0WUqizpU" colab_type="text"
# ### One-Hot Encode - Scheme Management
# + id="eP93vksddCiU" colab_type="code" outputId="4a75e05e-034b-4be5-d1ab-5341f97a14b7" colab={"base_uri": "https://localhost:8080/", "height": 419}
X_train['scheme_management'].value_counts(normalize=True)
train = X_train.copy()
train['status_group'] = y_train
train.groupby('scheme_management')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='scheme_management', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Scheme Management')
# + [markdown] id="Gp1QWXm1i7b3" colab_type="text"
# ### One-Hot Encode - Management
# + id="cU4lUhvQ7BYA" colab_type="code" outputId="31342975-656a-49d3-b8c1-2c3ed637cea7" colab={"base_uri": "https://localhost:8080/", "height": 419}
X_train['management'].value_counts(normalize=True)
train = X_train.copy()
train['status_group'] = y_train
train.groupby('management')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='management', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Management')
# + [markdown] id="wZpED25kEsOI" colab_type="text"
# ### Create a Region/District Feature
# + id="Mb6j1BbQfSpN" colab_type="code" outputId="71b23246-4d3b-4c1a-8c17-8e77c79b82b2" colab={"base_uri": "https://localhost:8080/", "height": 419}
X_train['region_code'].value_counts(normalize=True)
train = X_train.copy()
train['status_group'] = y_train
train.groupby('region_code')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='region_code', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Region Code')
# + id="ZAk00mJv-tfm" colab_type="code" outputId="900038b8-4838-43e5-8592-53585ff58b5f" colab={"base_uri": "https://localhost:8080/", "height": 623}
X_train['region_district'] = X_train['region_code'].astype(str) + X_train['district_code'].astype(str)
X_val['region_district'] = X_val['region_code'].astype(str) + X_val['district_code'].astype(str)
train = X_train.copy()
train['status_group'] = y_train
train.groupby('region_district')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='region_district', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Region/District')
# + [markdown] id="Q7548-TuLCSd" colab_type="text"
# ### One-Hot Encode - Subvillage
# + id="Fx77-ltOooSl" colab_type="code" outputId="f23a7b24-ecfd-4d73-8420-6fd4fddfb23e" colab={"base_uri": "https://localhost:8080/", "height": 521}
X_train['subvillage'].value_counts(normalize=True)
top10 = X_train['subvillage'].value_counts()[:10].index
X_train.loc[~X_train['subvillage'].isin(top10), 'subvillage'] = 'Other'
X_val.loc[~X_val['subvillage'].isin(top10), 'subvillage'] = 'Other'
train = X_train.copy()
train['status_group'] = y_train
train.groupby('subvillage')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='subvillage', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Subvillage')
# + [markdown] id="Bkh6JlEKMLcz" colab_type="text"
# ### One-Hot Encoding - Water Quality
# + id="UIxID3VTfZyu" colab_type="code" outputId="0667c8b1-88bb-4e97-87fd-d23a863a24fc" colab={"base_uri": "https://localhost:8080/", "height": 419}
X_train['water_quality'].value_counts(normalize=True)
train = X_train.copy()
train['status_group'] = y_train
train.groupby('water_quality')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']]
sns.catplot(x='water_quality', y='functional', data=train, kind='bar', color='grey')
plt.title('% of Waterpumps Functional by Quality')
# + [markdown] id="l1TtWafvZCX4" colab_type="text"
# ### Lat/Long Cleanup
# + id="qPlDqabSCv_2" colab_type="code" outputId="389ccc74-e8c2-439f-8d8c-99fcebd70d62" colab={"base_uri": "https://localhost:8080/", "height": 119}
#test['region'].value_counts()
average_lat = X_train.groupby('region').latitude.mean().reset_index()
average_long = X_train.groupby('region').longitude.mean().reset_index()
shinyanga_lat = average_lat.loc[average_lat['region'] == 'Shinyanga', 'latitude']
shinyanga_long = average_long.loc[average_lat['region'] == 'Shinyanga', 'longitude']
X_train.loc[(X_train['region'] == 'Shinyanga') & (X_train['latitude'] > -1), ['latitude']] = shinyanga_lat[17]
X_train.loc[(X_train['region'] == 'Shinyanga') & (X_train['longitude'] == 0), ['longitude']] = shinyanga_long[17]
mwanza_lat = average_lat.loc[average_lat['region'] == 'Mwanza', 'latitude']
mwanza_long = average_long.loc[average_lat['region'] == 'Mwanza', 'longitude']
X_train.loc[(X_train['region'] == 'Mwanza') & (X_train['latitude'] > -1), ['latitude']] = mwanza_lat[13]
X_train.loc[(X_train['region'] == 'Mwanza') & (X_train['longitude'] == 0) , ['longitude']] = mwanza_long[13]
# + id="jctHl8W0egLL" colab_type="code" colab={}
#X_train['water_pop'] = X_train['amount_tsh']/X_train['population']
#X_val['water_pop'] = X_val['amount_tsh']/X_val['population']
#train = X_train.copy()
#train['status_group'] = y_train
#train.groupby('water_pop')['status_group'].value_counts(normalize=True)
#train['functional']= (train['status_group'] == 'functional').astype(int)
#train[['status_group', 'functional']]
#sns.catplot(x='water_pop', y='functional', data=train, kind='bar', color='grey')
#plt.title('% of Waterpumps Functional by Pump Age')
# + id="7CHqONMio3CY" colab_type="code" outputId="34771ef3-7633-4c02-ca79-0168b6fa9aac" colab={"base_uri": "https://localhost:8080/", "height": 187}
import geopandas
import json
from geopy.distance import vincenty as get_geodesic_distance
tnz_cities = pd.read_csv('https://raw.githubusercontent.com/JimKing100/DS-Unit-2-Regression-Classification/master/data/tanzania_cities_pop2.csv')
def well_distance(source_longitude, source_latitude):
source_lonlat = source_longitude, source_latitude
source_table = tnz_cities
target_table = pd.DataFrame(source_table, columns = ['Longitude', 'Latitude', 'Population'])
def get_distance(row):
target_lonlat = row['Longitude'], row['Latitude']
return get_geodesic_distance(target_lonlat, source_lonlat).meters
target_table['distance'] = target_table.apply(get_distance, axis=1)
# Get the nearest location
nearest_target = target_table.sort_values(['distance'])[:1]
nearest_target = nearest_target.reset_index()
return nearest_target['Population'][0]
X_train['near_pop'] = X_train.apply(lambda x: well_distance(x['longitude'], x['latitude']), axis=1)
X_val['near_pop'] = X_val.apply(lambda x: well_distance(x['longitude'], x['latitude']), axis=1)
train = X_train.copy()
train['status_group'] = y_train
train.groupby('near_pop')['status_group'].value_counts(normalize=True)
train['functional']= (train['status_group'] == 'functional').astype(int)
train[['status_group', 'functional']];
# + id="0ZmeDoxqOKtp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="0488802d-f23f-4dae-8fed-e70af80d9c18"
X_train
# + id="yCCpk1giFicw" colab_type="code" colab={}
#X_train.loc[X_train['permit'].isnull(), 'permit'] = False
#X_val.loc[X_val['permit'].isnull(), 'permit'] = False
#train = X_train.copy()
#train['status_group'] = y_train
#train.groupby('permit')['status_group'].value_counts(normalize=True)
#train['functional']= (train['status_group'] == 'functional').astype(int)
#train[['status_group', 'functional']];
#sns.catplot(x='permit', y='functional', data=train, kind='bar', color='grey')
#plt.title('% of Waterpumps Functional by Permit')
# + id="MOq5hz42rxs8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="20cb9fb0-19dd-48e5-a928-0afc8cc4c7df"
def tsh_calc(tsh, source, base, waterpoint):
if tsh == 0:
if (source, base, waterpoint) in tsh_dict:
new_tsh = tsh_dict[source, base, waterpoint]
return new_tsh
else:
return tsh
return tsh
temp = X_train[X_train['amount_tsh'] != 0].groupby(['source_class',
'basin',
'waterpoint_type_group'])['amount_tsh'].median()
tsh_dict = dict(temp)
X_train['amount_tsh'] = X_train.apply(lambda x: tsh_calc(x['amount_tsh'], x['source_class'], x['basin'], x['waterpoint_type_group']), axis=1)
# + [markdown] id="vBATDBn30mLX" colab_type="text"
# ### Run the Logistic Regression
# + id="Oc7PYIt2texi" colab_type="code" colab={}
import sklearn
sklearn.__version__
# Import the class
from sklearn.linear_model import LogisticRegressionCV
# Import package and scaler
import category_encoders as ce
from sklearn.preprocessing import StandardScaler
# + id="fBcw2yiWtSVk" colab_type="code" outputId="25160907-8ee5-4bba-a900-3f0b50960f5f" colab={"base_uri": "https://localhost:8080/", "height": 165}
# use quantity feature and the numerical features but drop id
categorical_features = ['quantity', 'waterpoint_type', 'extraction_type', 'installer',
'basin', 'region', 'payment', 'source', 'lga', 'public_meeting',
'scheme_management', 'permit', 'management', 'region_district',
'subvillage', 'funder', 'water_quality', 'ward']
#
numeric_features = X_train.select_dtypes('number').columns.drop('id').drop('num_private').drop('population').tolist()
features = categorical_features + numeric_features
# make subsets using the quantity feature all numeric features except id
X_train_subset = X_train[features]
X_val_subset = X_val[features]
# Do the encoding
encoder = ce.OneHotEncoder(use_cat_names=True)
X_train_encoded = encoder.fit_transform(X_train_subset)
X_val_encoded = encoder.transform(X_val_subset)
# Use the scaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train_encoded)
X_val_scaled = scaler.transform(X_val_encoded)
# Fit the model and check the accuracy
model = LogisticRegressionCV(n_jobs = -1)
model.fit(X_train_scaled, y_train)
print('Validation Accuracy', model.score(X_val_scaled, y_val));
# + [markdown] id="BJNF5xa4LPw1" colab_type="text"
# ### Run RandomForestClassifier
# + id="LAkZOXEX5rlk" colab_type="code" outputId="719b8f91-8f40-4d50-a1a5-8a75dc46925e" colab={"base_uri": "https://localhost:8080/", "height": 238}
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=1000,
random_state=42,
max_features = 'auto',
n_jobs=-1,
verbose = 1)
model.fit(X_train_scaled, y_train)
print('Validation Accuracy', model.score(X_val_scaled, y_val));
# + id="0o8dPt50N0MN" colab_type="code" outputId="c3283bc9-6cab-498b-873c-5c1066fca683" colab={"base_uri": "https://localhost:8080/", "height": 119}
test_features['pump_age'] = 2013 - test_features['construction_year']
test_features.loc[test_features['pump_age'] == 2013, 'pump_age'] = 0
test_features['near_pop'] = test_features.apply(lambda x: well_distance(x['longitude'], x['latitude']), axis=1)
#test_features['water_pop'] = X_train['amount_tsh']/X_train['population']
test_features['region_district'] = test_features['region_code'].astype(str) + test_features['district_code'].astype(str)
test_features.drop(columns=['num_private'])
X_test_subset = test_features[features]
X_test_encoded = encoder.transform(X_test_subset)
X_test_scaled = scaler.transform(X_test_encoded)
assert all(X_test_encoded.columns == X_train_encoded.columns)
y_pred = model.predict(X_test_scaled)
# + id="eUMIufHhUKu2" colab_type="code" colab={}
submission = sample_submission.copy()
submission['status_group'] = y_pred
submission.to_csv('/content/submission-01.csv', index=False)
| module4/assignment_regression_classification_4pop.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.1 64-bit
# name: python_defaultSpec_1597625217819
# ---
#连接ERP的DB2数据库,获取TACAIA2(前台:ACAI25),TACACM4(ACAC80非产成品),TACACMM(ACACMM),TACACJ5(废钢入库履历)
import ibm_db
import pandas as pd
connStr='DATABASE=BHBXA1;HOSTNAME=192.168.2.28;PORT=50000;PROTOCOL=TCPIP;UID=bhbxa1;PWD=<PASSWORD>'
con=ibm_db.connect(connStr,'','')
import ibm_db_dbi
conn=ibm_db_dbi.Connection(con)
# + tags=[]
tacaia2=pd.read_sql('select * from tacaia2',conn,parse_dates={'APP_TRNC_TIME':'%Y%m%d%H%M%S'})
# -
from pandas.api.types import CategoricalDtype
tacaia2['ACCOUNT_TITLE_ITEM']=tacaia2['ACCOUNT_TITLE_ITEM'].astype(CategoricalDtype(['01','33','03','31','50','51']))#这里对账务代码是01-33-03-31-50-51的顺序,因为先有产出这个材料,再有后续的专用充当和投入或销售,这里暂不考虑一个材料号被二次以上转用的情况,如上一个木材料转用经过工序加工后,转用产出一个材料号,当期这个材料又直接被专用挂了另外的订单号的情况
tacaia2['ACCOUNT_TITLE_ITEM'].cat.categories
tacaia2.set_index(['ACCOUNT_TITLE_ITEM','MAT_NO','UNIT_CODE','SALE_ORDER_SUB_NO'],inplace=True) #将账务代码提到顺序第1位,以方便下面在同一投入账务代码下,按有投入的材料号迭代。
tacaia2.sort_index(level='MAT_NO',inplace=True) #如果按多层索引综合排序,不行,则单按账务代码排序
from numpy import nan as NA
tacaia2=tacaia2.dropna() #剔除第一行的空行
tacaia2['MAT_TYPE']=tacaia2['MAT_TYPE'].astype('int64')
tacaia2.insert(tacaia2.shape[1],'MAT_VALUE',0.00)
tacaia2.insert(tacaia2.shape[1],'MAT_PRICE',0.00)
tacaia2.insert(tacaia2.shape[1],'LOST_VALUE',0.00)
tacaia2.insert(tacaia2.shape[1],'LOST_PRICE',0.00)
tacaia2.insert(tacaia2.shape[1],'SALE_VALUE',0.00)
tacaia2.insert(tacaia2.shape[1],'SALE_PRICE',0.00)
def InputPrices(MAT_NO,PRODUCT_CODE,MTRL_NO,MAT_WT,ACC_TITLE):
if PRODUCT_CODE[0] in ['0','1','2']:
MatPrice=(tacacm4.loc[PRODUCT_CODE,'PRST_VRNC_AMOUNT']+tacacm4.loc[PRODUCT_CODE,'OTHR_INCM_VRNC_AMT']-tacacm4.loc[PRODUCT_CODE,'OTHER_SEND_VRNC_AMT']+tacacm4.loc[PRODUCT_CODE,'CRRT_OTPT_VRNC_AMT']+tacacm4.loc[PRODUCT_CODE,'IN_MATCH_VRNC_AMT'])/(tacacm4.loc[PRODUCT_CODE,'PERIOD_START_N']+tacacm4.loc[PRODUCT_CODE,'OTHERS_RECEIVE_N']-tacacm4.loc[PRODUCT_CODE,'OTHERS_SEND_N']+tacacm4.loc[PRODUCT_CODE,'CURRENT_N']+tacacm4.loc[PRODUCT_CODE,'IN_MATCH_N'])#投入材料单价=(期初金额+其他收入-其他支出+本期产出+本期转入)的金额/重量
elif PRODUCT_CODE[0] in ['3','4','5','9']:
MatPrice=(tacacmm.loc[PRODUCT_CODE,'START_AMOUNT']+tacacmm.loc[PRODUCT_CODE,'OTHR_INCM_ACT_AMT']-tacacmm.loc[PRODUCT_CODE,'OTHR_SEND_ACT_AMT']+tacacmm.loc[PRODUCT_CODE,'CRRT_OTPT_ACT_AMT']+tacacmm.loc[PRODUCT_CODE,'NEXT_MATCH_AMT']
)/(tacacmm.loc[PRODUCT_CODE,'PERIOD_START_N']+tacacmm.loc[PRODUCT_CODE,'OTHERS_RECEIVE_N']-tacacmm.loc[PRODUCT_CODE,'OTHERS_SEND_N']+tacacmm.loc[PRODUCT_CODE,'CURRENT_N']+tacacmm.loc[PRODUCT_CODE,'NEXT_MATCH_USED_N']
)#投入材料单价=(期初金额+其他收入-其他支出+本期产出+本期转入)的金额/重量
else:
raise Exception('MAT_TYPE not NORMAL!')
MatValue=MatPrice*MAT_WT
LostPercent=0.4#次板和二级品的损失百分比(以后如果要跟着系统参数表,到时可另从数据库取数,这里暂时设成常用的40%)
#判断投入材料号为参数中材料号&物料形态数值大于3(即为二等品或废次)的计数是否大于0,是则分无聊形态为4和5、9计算损失金额;否则损失金额为0
if tacaia2.loc['0' + ACC_TITLE[1]][(tacaia2.loc['0' + ACC_TITLE[1]].IN_MAT_NO==MAT_NO) &(tacaia2.loc['0' + ACC_TITLE[1]].MAT_TYPE>3)].count()['MAT_WT']>0:
LostValue=tacaia2.loc['0' + ACC_TITLE[1]][(tacaia2.loc['0' + ACC_TITLE[1]].IN_MAT_NO==MAT_NO) & ((tacaia2.loc['0' + ACC_TITLE[1]].MAT_TYPE==4) | (tacaia2.loc['0' + ACC_TITLE[1]].MAT_TYPE==5))].sum()['MAT_WT']*MatPrice*LostPercent+tacaia2.loc['0' + ACC_TITLE[1]][(tacaia2.loc['0' + ACC_TITLE[1]].IN_MAT_NO==MAT_NO) &(tacaia2.loc['0' + ACC_TITLE[1]].MAT_TYPE==9)].sum()['MAT_WT']*(MatPrice-tacacj5.loc['900' + MTRL_NO + '000000','CRRT_YEAR_STD_AMT'])
else:
LostValue=0.00
if tacaia2.loc['0' + ACC_TITLE[1]][(tacaia2.loc['0' + ACC_TITLE[1]].IN_MAT_NO==MAT_NO)&(tacaia2.loc['0' + ACC_TITLE[1]].MAT_TYPE<=3)].count()['MAT_WT']>0:
LostPrice=LostValue/tacaia2.loc['0' + ACC_TITLE[1]][(tacaia2.loc['0' + ACC_TITLE[1]].IN_MAT_NO==MAT_NO)&(tacaia2.loc['0' + ACC_TITLE[1]].MAT_TYPE<=3)].sum()['MAT_WT']
OtherLostValue=0#如果投入没有正品产出,需将材料损失汇总分摊到产出最多的产副品上。
else:
OtherLostValue=LostValue
LostPrice=0.00
return {
'Mat_Value':MatValue,
'Mat_Price':MatPrice,
'Lost_Value':LostValue,
'Lost_Price':LostPrice,
'Other_Lost_Value':OtherLostValue
}
def SalePrices(PRODUCT_CODE,MAT_WT):
if PRODUCT_CODE[0] in ['0','1','2']:
SalePrice=(tacacm4.loc[PRODUCT_CODE,'PRST_VRNC_AMOUNT']+tacacm4.loc[PRODUCT_CODE,'OTHR_INCM_VRNC_AMT']-tacacm4.loc[PRODUCT_CODE,'OTHER_SEND_VRNC_AMT']+tacacm4.loc[PRODUCT_CODE,'CRRT_OTPT_VRNC_AMT']+tacacm4.loc[PRODUCT_CODE,'IN_MATCH_VRNC_AMT']-tacacm4.loc[PRODUCT_CODE,'NEXT_PRSP_VRNC_AMT']-tacacm4.loc[PRODUCT_CODE,'NEXT_MATCH_VRNC_AMT'])/(tacacm4.loc[PRODUCT_CODE,'PERIOD_START_N']+tacacm4.loc[PRODUCT_CODE,'OTHERS_RECEIVE_N']-tacacm4.loc[PRODUCT_CODE,'OTHERS_SEND_N']+tacacm4.loc[PRODUCT_CODE,'CURRENT_N']+tacacm4.loc[PRODUCT_CODE,'IN_MATCH_N']-tacacm4.loc[PRODUCT_CODE,'NEXT_PRSP_USED_N']-tacacm4.loc[PRODUCT_CODE,'NEXT_MATCH_USED_N'])#销售材料单价=(期初金额+其他收入-其他支出+本期产出+本期转入-本期投入-本期转出)的金额/重量
elif PRODUCT_CODE[0] in ['3','4','5','9']:
SalePrice=(tacacmm.loc[PRODUCT_CODE,'START_AMOUNT']+tacacmm.loc[PRODUCT_CODE,'OTHR_INCM_ACT_AMT']-tacacmm.loc[PRODUCT_CODE,'OTHR_SEND_ACT_AMT']+tacacmm.loc[PRODUCT_CODE,'CRRT_OTPT_ACT_AMT']+tacacmm.loc[PRODUCT_CODE,'NEXT_MATCH_AMT']-tacacmm.loc[PRODUCT_CODE,'NEXT_PRSP_STD_AMT']-tacacmm.loc[PRODUCT_CODE,'NEXT_MATCH_AMT']
)/(tacacmm.loc[PRODUCT_CODE,'PERIOD_START_N']+tacacmm.loc[PRODUCT_CODE,'OTHERS_RECEIVE_N']-tacacmm.loc[PRODUCT_CODE,'OTHERS_SEND_N']+tacacmm.loc[PRODUCT_CODE,'CURRENT_N']+tacacmm.loc[PRODUCT_CODE,'NEXT_MATCH_USED_N']-tacacmm.loc[PRODUCT_CODE,'NEXT_PRSP_USED_N']-tacacmm.loc[PRODUCT_CODE,'NEXT_MATCH_USED_N']
)#销售材料单价=(期初金额+其他收入-其他支出+本期产出+本期转入-本期投入-本期转出)的金额/重量
SaleValue=SalePrice*MAT_WT
return {
'Sale_Value':SaleValue,
'Sale_Price':SalePrice
}
def ProducePrices(INPUT_MAT,ACC_TITLE,MAT_TYPE,MAT_WT,MTRL_NO):
#tacaia2.loc[('3'+ACC_TITLE[1],Input_MAT),'']
if MAT_TYPE<=3:
MatPrice=tacaia2.loc[('3'+ACC_TITLE[1],INPUT_MAT),'MAT_PRICE'][0]
MatValue=MatPrice*MAT_WT
LostPrice=tacaia2.loc[('3'+ACC_TITLE[1],INPUT_MAT),'LOST_PRICE'][0]
LostValue=LostPrice*MAT_WT
elif MAT_TYPE in [4,5]:
LostPercent=0.4#次板和二级品的损失百分比(以后如果要跟着系统参数表,到时可另从数据库取数,这里暂时设成常用的40%)
MatPrice=tacaia2.loc[('3'+ACC_TITLE[1],INPUT_MAT),'MAT_PRICE'][0]*(1-LostPercent)
MatValue=MatPrice*MAT_WT
LostPrice=0.00
LostValue=LostPrice*MAT_WT
elif MAT_TYPE==9:
MatPrice=tacacj5.loc['900' + MTRL_NO + '000000','CRRT_YEAR_STD_AMT']
MatValue=MatPrice*MAT_WT
LostPrice=0.00
LostValue=LostPrice*MAT_WT
return {
'Mat_Value':MatValue,
'Mat_Price':MatPrice,
'Lost_Value':LostValue,
'Lost_Price':LostPrice
}
# +
#tacaia2.sort_index(level=['ACCOUNT_TITLE_ITEM','MAT_NO','UNIT_CODE','SALE_ORDER_SUB_NO'],inplace=True) #跟着前面重设索引,把账务代码提到第1位,对应改变索引排序
# +
#tacaia2.loc['31','KEY_SEQ']
# + tags=[]
#tacaia2.to_excel('tacaia2-202005.xlsx',sheet_name='tacaia2')
#tacaia2.loc[('B2003125AFA','LF03','33','MD2005061901'),'PRODUCT_CODE'][0][:4] #Pandas多层索引的引用
#[x[0] for x in tacaia2.index] #多层索引中一层所有元素的列表
# + tags=[]
acc_period=tacaia2['ACCOUNT_PERIOD'][1]
print(acc_period)
# -
tacacm4=pd.read_sql('select * from tacacm4 where YEAR=? and MON=?',conn,params=[acc_period[:4],acc_period[4:]])
tacacm4.set_index(['PRODUCT_CODE'],inplace=True)
tacacm4.drop(['REC_CREATOR', 'REC_CREATE_TIME', 'REC_REVISOR', 'REC_REVISE_TIME','ARCHIVE_FLAG', 'COMPANY_CODE', 'COMPANY_CNAME','COST_CENTER', 'STD_PRICE',"CURRNT_STD_AMOUNT","CRRT_OTPT_STD_AMT","OTHR_INCM_STD_AMT","SALE_STD_AMOUNT","NEXT_PRSP_STD_AMT","NEXT_MATCH_STD_AMT","OTHER_SEND_STD_AMT","PERIOD_END_STD_AMT","IN_MATCH_STD_AMT"],axis=1,inplace=True)
tacacm4[["CURRENT_N","CRRT_OTPT_VRNC_AMT","SALE_N","SALE_VRNC_AMOUNT","NEXT_PRSP_USED_N","NEXT_PRSP_VRNC_AMT","NEXT_MATCH_USED_N","NEXT_MATCH_VRNC_AMT","PERIOD_END_N","PERIOD_END_VRNC_AMT","IN_MATCH_N","IN_MATCH_VRNC_AMT"]]=[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00]
# +
#tacacmm.to_excel('tacacmm-202006.xlsx')
# +
#tacacm4[['PERIOD_END_N','PERIOD_END_VRNC_AMT']]#test
# +
#tacacm4[['PERIOD_END_N','PERIOD_END_VRNC_AMT']]=[0.00,0.00]
#tacacm4[['PERIOD_END_N','PERIOD_END_VRNC_AMT']]#test
# -
tacacm4.columns#test
tacacmm=pd.read_sql('select * from tacacmm where YEAR=? and MON=?',conn,params=[acc_period[:4],acc_period[4:]])
tacacmm.set_index(['PRODUCT_CODE'],inplace=True)
tacacmm.drop(['REC_CREATOR', 'REC_CREATE_TIME', 'REC_REVISOR', 'REC_REVISE_TIME','ARCHIVE_FLAG', 'COMPANY_CODE', 'COMPANY_CNAME', 'STD_PRICE'],axis=1,inplace=True)
tacacmm[['CURRENT_N','CRRT_OTPT_ACT_AMT','NEXT_MATCH_AMT', 'NEXT_MATCH_USED_N', 'GM_N', 'GM_ACT_AMT', 'SY_N','SY_ACT_AMT', 'LL_N','LL_ACT_AMT','PERIOD_END_N', 'PERIOD_END_ACT_AMT','NEXT_PRSP_USED_N','NEXT_PRSP_STD_AMT', 'IN_MATCH_AMT', 'IN_MATCH_N']]=[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00]
tacacj5=pd.read_sql_query('select * from tacacj5 where YEAR_MON=?',conn,params=[acc_period])
tacacj5.set_index(['PRODUCT_CODE'],inplace=True)
tacacj5.drop(['REC_CREATOR', 'REC_CREATE_TIME', 'REC_REVISOR', 'REC_REVISE_TIME','ARCHIVE_FLAG', 'COMPANY_CODE', 'COMPANY_CNAME'],axis=1,inplace=True)
# + tags=[]
print(tacacj5.head())
print(tacacm4.head())
print(tacacmm.head())
# +
#将ACAI25表按1、材料号升序;2、账务代码:按33 03 31 01 50 51 的顺序排列;3、销售合同号按升序排列
#tacaia2[tacaia2['MAT_WT'].rank(method='dense',ascending=False)==3]
# + tags=[]
#对账务代码自定义排序:先设排序的列表;将账务代码列的数据类型改成‘category’(astype);用set_category,设置账务代码的顺序;用sort_values使排序生效,同时,也将材料号和销售合同升序排列
'''acc_ttl_list=['33','03','31','01','50','51']
tacaia2['ACCOUNT_TITLE_ITEM']=tacaia2['ACCOUNT_TITLE_ITEM'].astype('category')
tacaia2['ACCOUNT_TITLE_ITEM'].cat.set_category(acc_ttl_list,inplace=True)
tacaia2.sortvalues(['MAT_NO','ACCOUNT_TITLE_ITEM','SALE_ORDER_SUB_NO'],ascending=True,inplace=True)
tacaia2'''
# -
def WriteSFC(PRODUCT_CODE,ACC_TITLE,MAT_VALUE,LOST_VALUE,SALE_VALUE,WT):
if PRODUCT_CODE[0] in ['0','1','2']:
SFCColumnName={
'df':tacacm4,
'Num':{
'01':'CURRENT_N',
'33':'NEXT_MATCH_USED_N',
'03':'IN_MATCH_N',
'31':'NEXT_PRSP_USED_N',
'50':'SALE_N',
'51':'SALE_N'
},
'Value':{
'01':'CRRT_OTPT_VRNC_AMT',
'33':'NEXT_MATCH_VRNC_AMT',
'03':'IN_MATCH_VRNC_AMT',
'31':'NEXT_PRSP_VRNC_AMT',
'50':'SALE_VRNC_AMOUNT',
'51':'SALE_VRNC_AMOUNT'
}
}
elif PRODUCT_CODE[0] in ['3','4','5','9']:
SFCColumnName={
'df':tacacmm,
'Num':{
'01':'CURRENT_N',
'33':'NEXT_MATCH_USED_N',
'03':'IN_MATCH_N',
'31':'NEXT_PRSP_USED_N',
'50':'GM_N',
'51':'SY_N'
},
'Value':{
'01':'CRRT_OTPT_ACT_AMT',
'33':'NEXT_MATCH_AMT',
'03':'IN_MATCH_AMT',
'31':'NEXT_PRSP_STD_AMT',
'50':'GM_ACT_AMT',
'51':'SY_ACT_AMT'
}
}
SFCValue={
'01':MAT_VALUE+LOST_VALUE,
'03':MAT_VALUE+LOST_VALUE,
'31':MAT_VALUE,
'33':MAT_VALUE,
'50':SALE_VALUE,
'51':SALE_VALUE
}
SFCColumnName['df'].loc[PRODUCT_CODE,SFCColumnName['Num'][ACC_TITLE]]+=WT
SFCColumnName['df'].loc[PRODUCT_CODE,SFCColumnName['Value'][ACC_TITLE]]+=SFCValue[ACC_TITLE]
# + tags=["outputPrepend"]
#tacaia2.index.get_level_values('MAT_NO')[0]==' ' #因为排序后第一行[0]为空行,所以迭代时从第二行[1]开始(上面已删除空行,不用从第二行开始了)
OTHER_LOST_VALUE=0.00
n=0
for mat in tacaia2.index.get_level_values('MAT_NO'):
for acc in ['01','33','03','31','50','51']:
print('No.' + str(n))
n=n+1
try:
print(tacaia2.loc[(acc,mat),'KEY_SEQ'])
if acc=='01':
tacaia2.loc[(acc,mat),['MAT_VALUE','MAT_PRICE','LOST_VALUE','LOST_PRICE']]=list(ProducePrices(tacaia2.loc[(acc,mat),'IN_MAT_NO'][0],acc,tacaia2.loc[(acc,mat),'MAT_TYPE'][0],tacaia2.loc[(acc,mat),'MAT_WT'][0],tacaia2.loc[(acc,mat),'MTRL_NO'][0]).values())
if acc=='33':
if tacaia2.loc[(acc,mat),'DIVVY_FLAG'][0]=='0':
INPUT_VALUE=list(InputPrices(mat,tacaia2.loc[(acc,mat),'PRODUCT_CODE'][0],tacaia2.loc[(acc,mat),'MTRL_NO'][0],tacaia2.loc[(acc,mat),'MAT_WT'][0],acc).values())
tacaia2.loc[(acc,mat),['MAT_VALUE','MAT_PRICE','LOST_VALUE','LOST_PRICE']]=INPUT_VALUE[:4]
OTHER_LOST_VALUE=OTHER_LOST_VALUE+INPUT_VALUE[4]
if acc=='03':
if tacaia2.loc[(acc,mat),'DIVVY_FLAG'][0]=='0':
tacaia2.loc[(acc,mat),['MAT_VALUE','MAT_PRICE','LOST_VALUE','LOST_PRICE']]=list(ProducePrices(tacaia2.loc[(acc,mat),'IN_MAT_NO'][0],acc,tacaia2.loc[(acc,mat),'MAT_TYPE'][0],tacaia2.loc[(acc,mat),'MAT_WT'][0],tacaia2.loc[(acc,mat),'MTRL_NO'][0]).values())
if acc=='31':
INPUT_VALUE=list(InputPrices(mat,tacaia2.loc[(acc,mat),'PRODUCT_CODE'][0],tacaia2.loc[(acc,mat),'MTRL_NO'][0],tacaia2.loc[(acc,mat),'MAT_WT'][0],acc).values())
tacaia2.loc[(acc,mat),['MAT_VALUE','MAT_PRICE','LOST_VALUE','LOST_PRICE']]=INPUT_VALUE[:4]
OTHER_LOST_VALUE=OTHER_LOST_VALUE+INPUT_VALUE[4]
if acc in ['50','51']:
tacaia2.loc[(acc,mat),['SALE_VALUE','SALE_PRICE']]=list(SalePrices(tacaia2.loc[(acc,mat),'PRODUCT_CODE'][0],tacaia2.loc[(acc,mat),'MAT_WT'][0]).values())
WriteSFC(tacaia2.loc[(acc,mat),'PRODUCT_CODE'][0],acc,tacaia2.loc[(acc,mat),'MAT_VALUE'][0],tacaia2.loc[(acc,mat),'LOST_VALUE'][0],tacaia2.loc[(acc,mat),'SALE_VALUE'][0],tacaia2.loc[(acc,mat),'MAT_WT'][0])
except:
continue
tacacmm.iloc[tacacmm['CRRT_OTPT_ACT_AMT'].rank(ascending=False)[0],tacacmm.columns.get_loc('CRRT_OTPT_ACT_AMT')]+=OTHER_LOST_VALUE
# -
tacaia2.to_excel('tacaia2-202007-r4.xlsx',merge_cells=False)
# + tags=[]
print(tacaia2.loc[('01','B1804068AJAAA'),'KEY_SEQ'])
# -
list(ProducePrices(tacaia2.loc[('01','B1804068AJAAA'),'IN_MAT_NO'][0],'01',tacaia2.loc[('01','B1804068AJAAA'),'MAT_TYPE'][0],tacaia2.loc[('01','B1804068AJAAA'),'MAT_WT'][0],tacaia2.loc[('01','B1804068AJAAA'),'MTRL_NO'][0]).values())
list(InputPrices('B2007077AC','10068Z10993','6',284.52,'31').values())[:5]
tacaia2.loc[('31','B1804068AJAA'),'PRODUCT_CODE'][0]
tacaia2.head()
tacaia2.loc[('31','B1804068AJAA'),['MAT_VALUE','MAT_PRICE','LOST_VALUE','LOST_PRICE']]=list(InputPrices('B1804068AJAA','10033Z3124','3',122.96,'31').values())[:4]
tacaia2.ix[1,'IN_MAT_NO']
| CostCalV0.1-20200616.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="aRcvXhinEhni"
# # Data Preparation for the Visualizer
# -
# The primary purpose of this notebook is to ensure that changes to the cleaned data files (ie. clean_northwestern.csv) do not automatically affect the visualizer. The copies of the data files used by the visualizer can only be altered by running this notebook.
#
# Additionally, some extra columns may be added that are helpful for the visualizer.
import requests
import pandas as pd
import os
# Create dataframes for Northwest, Suffolk, and Middlesex
nw = pd.read_csv('../../data/cleaned/clean_northwestern.csv')
pd.set_option("display.max.columns", None)
# Add columns: Incident Sex, Incident Murder
nw['Incident Sex'] = nw.groupby(['Person ID', 'Offense Date'])['sex'].transform('max')
nw['Incident Murder'] = nw.groupby(['Person ID', 'Offense Date'])['murder'].transform('max')
# Save the updated dataframes as csv files, overwriting them in the processed data folder
nw_file = nw.to_csv('../../data/cleaned/visualizer_northwestern.csv', index=False)
| analyses/notebooks/prepare_visualizer_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#move this notebook to folder above syndef to run
from syndef import synfits #import synestia snapshot (impact database)
import numpy as np
import matplotlib.pyplot as plt
test_rxy=np.linspace(7e6,60e6,100) #m
test_z=np.linspace(0.001e6,30e6,50) #m
rxy=np.log10(test_rxy/1e6) #Mm log10
z=np.log10(test_z/1e6) #Mm log10
TESTRXY,TESTZ=np.meshgrid(test_rxy,test_z) #2-D grid of rxy, z for color plot
#y=np.zeros(np.shape(rxy)) #array of zeros for residual fit
#rho1=synfits.resfuncspl(synfits.SNAP_Canup.rhomidfit[1],rxy,y)
#rho2=synfits.resfuncspl(synfits.SNAP_CukStewart.rhomidfit[1],rxy,y)
#rho3=synfits.resfuncspl(synfits.SNAP_Quintana.rhomidfit[1],rxy,y)
snaprho1=np.log10(synfits.SNAP_Canup.rho[synfits.SNAP_Canup.ind_outer])
snaprho2=np.log10(synfits.SNAP_CukStewart.rho[synfits.SNAP_CukStewart.ind_outer])
snaprho3=np.log10(synfits.SNAP_Quintana.rho[synfits.SNAP_Quintana.ind_outer])
snaprhomid1=np.log10(synfits.SNAP_Canup.rho[synfits.SNAP_Canup.ind_outer_mid])
snaprhomid2=np.log10(synfits.SNAP_CukStewart.rho[synfits.SNAP_CukStewart.ind_outer_mid])
snaprhomid3=np.log10(synfits.SNAP_Quintana.rho[synfits.SNAP_Quintana.ind_outer_mid])
snaprxy1=np.log10(synfits.SNAP_Canup.rxy[synfits.SNAP_Canup.ind_outer]/1e6)
snaprxy2=np.log10(synfits.SNAP_CukStewart.rxy[synfits.SNAP_CukStewart.ind_outer]/1e6)
snaprxy3=np.log10(synfits.SNAP_Quintana.rxy[synfits.SNAP_Quintana.ind_outer]/1e6)
snaprxymid1=np.log10(synfits.SNAP_Canup.rxy[synfits.SNAP_Canup.ind_outer_mid]/1e6)
snaprxymid2=np.log10(synfits.SNAP_CukStewart.rxy[synfits.SNAP_CukStewart.ind_outer_mid]/1e6)
snaprxymid3=np.log10(synfits.SNAP_Quintana.rxy[synfits.SNAP_Quintana.ind_outer_mid]/1e6)
snapz1=np.log10(synfits.SNAP_Canup.z[synfits.SNAP_Canup.ind_outer]/1e6)
snapz2=np.log10(synfits.SNAP_CukStewart.z[synfits.SNAP_CukStewart.ind_outer]/1e6)
snapz3=np.log10(synfits.SNAP_Quintana.z[synfits.SNAP_Quintana.ind_outer]/1e6)
const1=10.5#10 to 11; 10.55 (fiducial)
const2=0.86#0.85 to 0.9; 0.86 (fiducial)
const3=1e38 #0.9e35 (fiducial) / 1.5e33 (underestimate) / 1.1e37 (cross) / 1e38 (overestimate)
const4=-5.1 #-4.7 (fiducial) / -4.5 (underestimate) / -5 (cross) / -5.1 (overestimate)
test_z_s=const1*np.power(TESTRXY,const2) #scale height fit in m
test_rho_g=const3*np.power(TESTRXY,const4)*np.exp(-np.power(TESTZ/test_z_s,2))
test_rho_gmid=const3*np.power(test_rxy,const4)
plt.figure(figsize=(16,5))
plt.subplot(131)
#plt.plot(rxy,rho1,'b')
plt.plot(snaprxymid1,snaprhomid1,'r.')
plt.plot(np.log10(test_rxy/1e6),np.log10(test_rho_gmid),'k')
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log midplane density (kg/m$^3$)')
plt.title('Canup')
plt.xlim([.8,2])
plt.ylim([-2,3])
plt.subplot(132)
#plt.plot(rxy,rho2,'b')
plt.plot(snaprxymid2,snaprhomid2,'r.')
plt.plot(np.log10(test_rxy/1e6),np.log10(test_rho_gmid),'k')
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log midplane density (kg/m$^3$)')
plt.title('Cuk and Stewart')
plt.xlim([.8,2])
plt.ylim([-2,3])
plt.subplot(133)
#plt.plot(rxy,rho3,'b')
plt.plot(snaprxymid3,snaprhomid3,'r.')
plt.plot(np.log10(test_rxy/1e6),np.log10(test_rho_gmid),'k')
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log midplane density (kg/m$^3$)')
plt.title('Quintana')
plt.xlim([.8,2])
plt.ylim([-2,3])
plt.show()
plt.close()
plt.figure(figsize=(16,5))
plt.subplot(131)
plt.pcolor(np.log10(TESTRXY/1e6),np.log10(TESTZ/1e6),np.log10(test_rho_g))
plt.scatter(snaprxy1,snapz1,c=snaprho1)
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log z (Mm)')
plt.colorbar(label='log density (kg/m$^3$)')
plt.xlim([.8,2])
plt.subplot(132)
plt.pcolor(np.log10(TESTRXY/1e6),np.log10(TESTZ/1e6),np.log10(test_rho_g))
plt.scatter(snaprxy2,snapz2,c=snaprho2)
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log z (Mm)')
plt.colorbar(label='log density (kg/m$^3$)')
plt.xlim([.8,2])
plt.subplot(133)
plt.pcolor(np.log10(TESTRXY/1e6),np.log10(TESTZ/1e6),np.log10(test_rho_g))
plt.scatter(snaprxy3,snapz3,c=snaprho3)
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log z (Mm)')
plt.colorbar(label='log density (kg/m$^3$)')
plt.xlim([.8,2])
plt.show()
plt.close()
# -
| synestia-book/docs/syndef/.ipynb_checkpoints/PowerLawFits-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # <center> 数值计算方法 - Python实现</center>
# <center>(Numerical Computational Methods based on Python Language)</center>
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#序言" data-toc-modified-id="序言-1"><span class="toc-item-num">1 </span>序言</a></span><ul class="toc-item"><li><span><a href="#课程目标" data-toc-modified-id="课程目标-1.1"><span class="toc-item-num">1.1 </span>课程目标</a></span></li><li><span><a href="#先修内容" data-toc-modified-id="先修内容-1.2"><span class="toc-item-num">1.2 </span>先修内容</a></span></li><li><span><a href="#内容组织" data-toc-modified-id="内容组织-1.3"><span class="toc-item-num">1.3 </span>内容组织</a></span></li><li><span><a href="#怎么阅读和学习本文档" data-toc-modified-id="怎么阅读和学习本文档-1.4"><span class="toc-item-num">1.4 </span>怎么阅读和学习本文档</a></span></li><li><span><a href="#为什么选择Python作为本课程的语言?" data-toc-modified-id="为什么选择Python作为本课程的语言?-1.5"><span class="toc-item-num">1.5 </span>为什么选择Python作为本课程的语言?</a></span></li><li><span><a href="#Python和其中package的版本" data-toc-modified-id="Python和其中package的版本-1.6"><span class="toc-item-num">1.6 </span>Python和其中package的版本</a></span></li></ul></li><li><span><a href="#致谢" data-toc-modified-id="致谢-2"><span class="toc-item-num">2 </span>致谢</a></span></li></ul></div>
# -
# ## 序言
# 本课程的文档都开源公布在一下网址:
# > [https://gitee.com/zhoumaoying/numerical_analysis/tree/master/note-py](https://gitee.com/zhoumaoying/numerical_analysis/tree/master/note-py)
#
# 大家有两种方式对该课程的内容进行学习:
# - 直接在网页上选定对应章节的内容,在网页端打开。这种方式可能会使文档的格式有所变化,且无法执行文档中嵌入的代码。
# - 从网页上下载所有的.ipynb文件(注意要包含所有的文件夹的内容,本文中的图片都是利用相对路径方式添加)。这种方式能够自由修改文档(但请提前保存),而且能够实时执行文档中嵌入的代码,前提是正确在电脑上配置了python开发环境。
# ### 课程目标
# 本文档主要配合针对本科生开设的《数值计算方法》课程。本文档的主要内容参考了开源文档[Python Programming and Numerical Methods - A Guide for Engineers and Scientists](https://pythonnumericalmethods.berkeley.edu/notebooks/Index.html) (参见[Berkeley Python Numerical Methods](https://pythonnumericalmethods.berkeley.edu/notebooks/Index.html) )目前阶段,本文档主要是该开源文档的中文翻译和简化,以适应本课程的学习。在未来的时间里,本文档的最终目标是能够完全自主化。
# 本教程主要讲解利用python编程语言进行数值计算方法课程的教学,其目的有三:
# + 一是简单介绍python编程语言及其在工程实践和科学研究中的应用;
# + 二是结合python语言的特性更好地展现数值计算方法的内容;
# + 三是通过将程序编写、运行和结果可视化融入到数值计算方法的讲解中,更好地讲解计算机在数值计算中的应用。
#
# 从心理上讲,本课程还有一个比较远大似乎不切实际的目标,就是希望能够带着同学们适应计算机化的工业时代,也就是熟悉利用计算机表达真实世界的各种方法,掌握运用计算机处理真实世界问题的各种途径,适应在工程中运用计算机工具从事各种工作。
# ### 先修内容
# 本文档的写作初衷是向无任何背景知识的同学介绍基于编程的数值计算方法。尽管如此,我们还是希望同学们在学习本课程的时候能够具备一些基本知识,包括
# + 基础的计算机编程操作,如命令行工具(command line tool),bash操作等
# + 基本的windows操作系统知识,如环境变量,文档结构等
# + 基本的线性代数
# + 微积分基础知识
#
# 在未来的文档更新中,我们可能会更新一些基础知识的小短文。
#
# ### 内容组织
# 本文档总的来说分为两个部分:第一部分以python语言为例介绍了基本的编程概念;第二部分以python为载体介绍工程常用的数值计算方法。
# 第一部分包括第01章到第13章,分别包含以下内容
# + 第01章:介绍Python语言和Jupyter Notebook,以便于后期文档的阅读,操作,以及日常作业的提交等
# + 第02章:Python中的变量和基本数据结构 (Variables and Data Structures)
# + 第03章:Python中函数的定义与使用(Function Definitions and Calls)
# + 第04章:Python中的逻辑操作与程序分支(Logic Operations and Program Branching)
# + 第05章:Python中的迭代(Iteration)
# + 第06章:Python中的递归(Recursion)
# + 第07章:Python中的面向对象编程(Object Oriented Programming, OOP)
# + 第08章:程序和算法的复杂度(Time and Space Complexity)
# + 第09章:数的计算机表示(Representation of Numbers)
# + 第10章:程序错误,编程规范与程序调试(Errors, Good Programming Practices, and Debugging)
# + 第11章:数据读写操作(I/O and Documents)
# + 第12章:可视化与数据做图(Visualization and Plotting)
# + 第13章:Python中程序的并行化(Parallelization)
# 第二部分包括第14章到第25章,分别包含以下内容
# + 第14章:线性代数与线性方程组(Systems of Linear Equations)
# + 第15章:特征值与特征向量(Eigenvalues and Eigenvectors)
# + 第16章:逼近与插值(Approximations and Interpolation)
# + 第17章:逼近与最小二乘拟合(Approximations and Least-Square Fitting)
# + 第18章:级数(Series)
# + 第19章:非线性方程求解(Roots of Nonlinear Functions)
# + 第20章:数值微分(Numerical Differentiation)
# + 第21章:数值积分(Numerical Integration)
# + 第22章:常微分方程-初始值问题(Ordinary Differential Equations - Initial Value Problems)
# + 第23章:常微分方程-边界值问题(Ordinary Differential Equations - Boundary Value Problems
# + 第24章:傅立叶变换和谱方法(Fourier Transform and Spectral Method)
# + 第25章:机器学习简介(An Introduction to Machine Learning)
#
# 注意到:理想而言,数值计算方法应该需要构成一个利用计算机语言解决工程问题的课程群:
# + 数学建模 (Mathematical Modeling in Engineering Applications)
# + 算法设计与分析 (Algorithm Design and Analysis)
# + 计算机编程 (Programming)
# + 数值计算与分析(有限精度计算,Finite Precision Computation)
# + 高性能计算 (High Perormance Computation: Parallelization; Large-Scale Computation)
# + 可视化(Visualization; Discretization; Computer Graphics)
#
# 但是构建这么一个庞大的课程群并非易事,只能留到后面徐图之。
# ### 怎么阅读和学习本文档
# 这里其实分为两个部分:关于编程的部分和关于数值计算的部分。
# 针对编程部分,最好的方法就是不断练习和熟悉。
# 1. 在浏览本文档的过程中,希望你能够借助Jupyter notebook 不断地联系文档中给出的各个示例。记住,读10遍文档不如按照文档敲代码1遍
# 2. 文档的内容还是需要配合教师的课堂讲解进行理解。希望大家能够相互比较借鉴,从而能够提高学习效率。
# 针对数值计算方法部分,最好的办法是不断应用
# 1. 数值计算方法都需要针对特定的数学问题进行设计,这个一定要牢记
# 2. 本质上来说,数值计算方法的内容可以分为一下几个部分:数值线性代数、数值求根、数值近似、数值微分方程
# 3. 任何理论学习一定要结合具体问题进行分析。数值计算方法的发展是应工程问题解决过程中所需要了解的问题而产生的。所以我们一定要知道不同的方法的来龙去脉
# 4. 有机会的话,希望能够在日常的学习和工作过程中对数值计算方法进行应用
# ### 为什么选择Python作为本课程的语言?
# 实际上,从最早的编程语言以来,大量的语言都可以用进行数值计算,简单列举如下:
# + Fortran
# + C/C++
# + Matlab
# + Python
# + Julia
# + Mathematica
# + Maple
#
# 这些语言各有特点。本课程采用python的主要理由是Jupyter Notebook的优良特性,以及在现代化过程中丰富的packages。
# 从学习和应用的角度,不同语言之间的区别没那么大,主要是语法和编程方式的区别。因此希望大家以某一种语言作为主要载体,对其他语言大略了解其基本特征即可。
# ### Python和其中package的版本
# Python 版本:Python 3.9.10 (main, Jan 15 2022, 11:48:04)
# Python中所使用的主要package的版本:(pip3 list)
#
# + jupyter - 4.9.2
# + ipython - 8.1.0
# + numpy - 1.16.4
# + scipy - 1.8.0
# + matplotlib - 3.5.1
# + pandas - 1.3.5
# ## 致谢
# 本文档在编制过程中,得到了选修我主讲的《数值计算方法课程》的各班学生帮助,在此一并感谢并列举如下:
#
| note_jupyter/mc_numerics_ch00_index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Evaluate
# Can seasonal forecasts be used as 'alternate' realities? Here we show how a set of evaluation metrics can be used to answer this question. The evaluation metrics are available through an [R package](https://github.com/timokelder/UNSEEN) for easy evaluation of the UNSEEN ensemble. Here, we illustrate how this package can be used in the UNSEEN workflow. We will evaluate the generated UNSEEN ensemble of UK February precipitation and of MAM Siberian heatwaves.
#
# The framework to evaluate the UNSEEN ensemble presented here consists of testing the ensemble member independence, model stability and model fidelity, see also [NPJ preprint](https://eartharxiv.org/hyxeq/).
#
# <div class="alert alert-info">
#
# Note
#
# This is R code and not python!
#
# We switch to R since we believe R has a better functionality in extreme value statistics.
#
# </div>
# We load the UNSEEN package and read in the data.
# + nbsphinx="hidden"
setwd('../../..')
getwd()
# -
library(UNSEEN)
# The data that is imported here are the files stored at the end of the [preprocessing step](../2.Preprocess/2.Preprocess.ipynb).
#
SEAS5_Siberia_events <- read.csv("Data/SEAS5_Siberia_events.csv", stringsAsFactors=FALSE)
ERA5_Siberia_events <- read.csv("Data/ERA5_Siberia_events.csv", stringsAsFactors=FALSE)
SEAS5_Siberia_events_zoomed <- read.csv("Data/SEAS5_Siberia_events_zoomed.csv", stringsAsFactors=FALSE)
ERA5_Siberia_events_zoomed <- read.csv("Data/ERA5_Siberia_events_zoomed.csv", stringsAsFactors=FALSE)
SEAS5_Siberia_events$t2m <- SEAS5_Siberia_events$t2m - 273.15
ERA5_Siberia_events$t2m <- ERA5_Siberia_events$t2m - 273.15
SEAS5_Siberia_events_zoomed$t2m <- SEAS5_Siberia_events_zoomed$t2m - 273.15
ERA5_Siberia_events_zoomed$t2m <- ERA5_Siberia_events_zoomed$t2m - 273.15
head(SEAS5_Siberia_events_zoomed,n = 3)
head(ERA5_Siberia_events, n = 3)
EOBS_UK_weighted_df <- read.csv("Data/EOBS_UK_weighted_upscaled.csv", stringsAsFactors=FALSE)
SEAS5_UK_weighted_df <- read.csv("Data/SEAS5_UK_weighted_masked.csv", stringsAsFactors=FALSE)
# And then convert the time class to Date format, with the ymd function in lubridate:
# +
EOBS_UK_weighted_df$time <- lubridate::ymd(EOBS_UK_weighted_df$time)
str(EOBS_UK_weighted_df)
EOBS_UK_weighted_df_hindcast <- EOBS_UK_weighted_df[
EOBS_UK_weighted_df$time > '1982-02-01' &
EOBS_UK_weighted_df$time < '2017-02-01',
]
SEAS5_UK_weighted_df$time <- lubridate::ymd(SEAS5_UK_weighted_df$time)
str(SEAS5_UK_weighted_df)
# -
# ## Timeseries
#
# Here we plot the timeseries of SEAS5 (UNSEEN) and ERA5 (OBS) for the entire domain and a zoomed domain for the Siberian Heatwave.
unseen_timeseries(
ensemble = SEAS5_Siberia_events,
obs = ERA5_Siberia_events,
ensemble_yname = "t2m",
ensemble_xname = "year",
obs_yname = "t2m",
obs_xname = "year",
ylab = "MAM Siberian temperature (C)")
unseen_timeseries(
ensemble = SEAS5_Siberia_events_zoomed,
obs = ERA5_Siberia_events_zoomed,
ensemble_yname = "t2m",
ensemble_xname = "year",
obs_yname = "t2m",
obs_xname = "year",
ylab = "MAM Siberian temperature (C)")
# This shows that there is a temperature trend over the entire domain. Here we will continue with the 'zoomed' domain because it better describes the 2020 event.
#
# The timeseries consist of hindcast (years 1982-2016) and archived forecasts (years 2017-2020). The datasets are slightly different: the hindcasts contains 25 members whereas operational forecasts contain 51 members, the native resolution is different and the dataset from which the forecasts are initialized is different.
#
# **For the evaluation of the UNSEEN ensemble we want to only use the SEAS5 hindcasts for a consistent dataset.** Note, 2017 is not used in either the hindcast nor the operational dataset in this example, since it contains forecasts both initialized in 2016 (hindcast) and 2017 (forecast), see [retrieve](../1.Download/1.Retrieve.ipynb).
# We split SEAS5 into hindcast and operational forecasts:
# +
SEAS5_Siberia_events_zoomed_hindcast <- SEAS5_Siberia_events_zoomed[
SEAS5_Siberia_events_zoomed$year < 2017 &
SEAS5_Siberia_events_zoomed$number < 25,]
SEAS5_Siberia_events_zoomed_forecasts <- SEAS5_Siberia_events_zoomed[
SEAS5_Siberia_events_zoomed$year > 2017,]
# -
# And we select the same years for ERA5.
ERA5_Siberia_events_zoomed_hindcast <- ERA5_Siberia_events_zoomed[
ERA5_Siberia_events_zoomed$year < 2017 &
ERA5_Siberia_events_zoomed$year > 1981,]
# +
unseen_timeseries(
ensemble = SEAS5_Siberia_events_zoomed_hindcast,
obs = ERA5_Siberia_events_zoomed_hindcast,
ensemble_yname = "t2m",
ensemble_xname = "year",
obs_yname = "t2m",
obs_xname = "year",
ylab = "MAM Siberian temperature")
# -
unseen_timeseries(
ensemble = SEAS5_Siberia_events_zoomed_forecasts,
obs = ERA5_Siberia_events_zoomed[ERA5_Siberia_events_zoomed$year > 2017,],
ensemble_yname = "t2m",
ensemble_xname = "year",
obs_yname = "t2m",
obs_xname = "year",
ylab = "MAM Siberian temperature")
# For the UK we have a longer historical record available from EOBS:
unseen_timeseries(ensemble = SEAS5_UK_weighted_df,
obs = EOBS_UK_weighted_df,
ylab = 'UK February precipitation (mm/d)')
unseen_timeseries(ensemble = SEAS5_UK_weighted_df,
obs = EOBS_UK_weighted_df_hindcast,
ylab = 'UK February precipitation (mm/d)')
# Call the documentation of the function with `?unseen_timeseries`
# ## Independence
# Significance ranges need fixing + detrend method (Rob)
independence_test(
ensemble = SEAS5_Siberia_events,
n_lds = 3,
var_name = "t2m",
)
independence_test(
ensemble = SEAS5_Siberia_events_zoomed,
n_lds = 3,
var_name = "t2m",
)
independence_test(ensemble = SEAS5_UK)
# ## Stability
#
# For the stability test we assess whether the events get more severe with leadtime, due to a potential 'drift' in the model. We need to use the consistent hindcast dataset for this.
stability_test(
ensemble = SEAS5_Siberia_events_zoomed_hindcast,
lab = 'MAM Siberian temperature',
var_name = 't2m'
)
stability_test(ensemble = SEAS5_UK, lab = 'UK February precipitation (mm/d)')
# ## Fidelity
fidelity_test(
obs = ERA5_Siberia_events_zoomed_hindcast$t2m,
ensemble = SEAS5_Siberia_events_zoomed_hindcast$t2m,
units = 'C',
biascor = FALSE
)
# Lets apply a additive biascor
#
# +
#Lets apply a additive biascor
obs = ERA5_Siberia_events_zoomed_hindcast$t2m
ensemble = SEAS5_Siberia_events_zoomed_hindcast$t2m
ensemble_biascor = ensemble + (mean(obs) - mean(ensemble))
fidelity_test(
obs = obs,
ensemble = ensemble_biascor,
units = 'C',
biascor = FALSE
)
# -
fidelity_test(obs = EOBS_UK_weighted_df_hindcast$rr, ensemble = SEAS5_UK_weighted_df$tprate)
# To include a mean-bias correction, set `biascor = TRUE`:
fidelity_test(obs = EOBS_UK_weighted_df_hindcast$rr, ensemble = SEAS5_UK_weighted_df$tprate, biascor = TRUE)
?fidelity_test
# # Illustrate
# Here we use extreme value theory (EVT) to fit extreme value distributions to the SEAS5 (UNSEEN) and ERA5 (observed) data.
#
# To see example applications, have a look at the examples:
#
# * [Siberian Heatwave](https://unseen-open.readthedocs.io/en/latest/Notebooks/examples/Siberian_Heatwave.html)
# * [California fires](https://unseen-open.readthedocs.io/en/latest/Notebooks/examples/California_Fires.html)
# * [UK Precipitation](https://unseen-open.readthedocs.io/en/latest/Notebooks/examples/UK_Precipitation.html)
#
# We define a function to plot the extreme value distributions:
# +
library(extRemes)
library(ggplot2)
library(ggpubr)
EVT_plot <- function(obs, ensemble, GEV_type, main, y_lab = "February average precipitation (mm/day)", ylim = NA) {
## We plot the GEV distribution for ERA5 and empirical data for SEAS5
fit_obs <- fevd(
x = obs, threshold = NULL, threshold.fun = ~1, location.fun = ~1,
scale.fun = ~1, shape.fun = ~1, use.phi = FALSE,
type = GEV_type, method = "MLE", initial = NULL, # type= c("GEV", "GP", "PP", "Gumbel", "Exponential"), method= c("MLE", "GMLE", "Bayesian", "Lmoments")
span = NULL, units = NULL, time.units = "days", period.basis = "year", ## time and period only important for labelling and do not influence the calculation
na.action = na.fail, optim.args = NULL, priorFun = NULL,
priorParams = NULL, proposalFun = NULL, proposalParams = NULL,
iter = 9999, weights = 1, blocks = NULL, verbose = FALSE
)
## Now calculate the return levels and their confidence intervals for each return period within rperiods
rperiods <- c(seq(from = 1.01, to = 1.5, by = 0.1), 1.7, 2, 3, 5, 10, 20, 50, 80, 100, 120, 200, 250, 300, 500, 800, 2000, 5000)
rvs_obs <- ci.fevd(fit_obs, alpha = 0.05, type = "return.level", return.period = rperiods, method = "normal")
colnames(rvs_obs) <- c("Obs_l", "Obs", "Obs_h") # Rename the col
GEV_obs <- data.frame(cbind(rvs_obs, rperiods)) ## Make a datafram for ggplot
## Add the emipirical data
rp_obs <- length(obs) / 1:length(obs) ## these are the (empirical) return periods for the sorted datapoints
obs_sorted <- sort(obs, decreasing = T) ## For example, the highest extreme has a rp of 35 years, the second highest 17.5, third highest 11.7 etc.
datapoints_obs <- data.frame(cbind(rp_obs, obs_sorted))
rp_S5 <- length(ensemble) / 1:length(ensemble) # SEAS5 has return periods up to 3800 years
ensemble_sorted <- sort(ensemble, decreasing = T)
datapoints_S5 <- data.frame(cbind(rp_S5, ensemble_sorted))
## And plot
cols <- c("UNSEEN" = "black", "OBS " = "blue") ## for the legend
ggplot(data = datapoints_S5, aes(x = rp_S5)) +
geom_point(aes(y = ensemble_sorted, col = "UNSEEN"), alpha = 0.5, size = 1) +
geom_ribbon(data = GEV_obs, aes(ymin = Obs_l, ymax = Obs_h, x = rperiods, fill = "OBS "), alpha = 0.1) +
geom_point(data = datapoints_obs, aes(x = rp_obs, y = obs_sorted, col = "OBS "), size = 1) +
scale_x_continuous(trans = "log10") +
scale_fill_manual(name = "Data", values = cols) +
scale_colour_manual(name = NULL, values = cols) +
theme_classic() +
theme(
legend.position = c(.95, .05),
legend.justification = c("right", "bottom"),
legend.box.just = "right",
legend.title = element_blank(),
text = element_text(size = 11),
axis.text = element_text(size = 11)
) +
labs(title = main, y = y_lab, x = "Return period (years)") +
if (is.finite(ylim)) {
coord_cartesian(ylim = c(NA, ylim))
}
}
# -
# First, we fit a gumber and a GEV distribution (including shape parameter) to the observed extremes over Siberia. With a likelihood ratio test we show that the Gumbel distribution best describes the data.
fit_obs_Gumbel <- fevd(x = ERA5_Siberia_events_zoomed_hindcast$t2m,
type = "Gumbel"
)
fit_obs_GEV <- fevd(x = ERA5_Siberia_events_zoomed_hindcast$t2m,
type = "GEV"
)
lr.test(fit_obs_Gumbel, fit_obs_GEV)
# We show the gumbel plot for the observed (ERA5) and UNSEEN (SEAS5 hindcast data). This shows that the UNSEEN simulations are not within the uncertainty range of the observations. This has likely two reasons, illustrated in the evaluation section: there is some dependence between the events and there is too little variability within the UNSEEN ensemble.
# +
options(repr.plot.width = 12)
GEV_hindcast <- EVT_plot(ensemble = SEAS5_Siberia_events_zoomed_hindcast$t2m,
obs = ERA5_Siberia_events_zoomed_hindcast$t2m,
main = "Gumbel fit",
GEV_type = "Gumbel",
ylim = 3,
y_lab = 'MAM Siberian temperature (C)'
)
GEV_hindcast_corrected <- EVT_plot(ensemble = ensemble_biascor, #SEAS5_Siberia_events_zoomed_hindcast$t2m,
obs = ERA5_Siberia_events_zoomed_hindcast$t2m,
main = "Additive correction",
GEV_type = "Gumbel",
ylim = 3,
y_lab = 'MAM Siberian temperature (C)'
)
ggarrange(GEV_hindcast, GEV_hindcast_corrected,
labels = c("a", "b"), # ,"c","d"),
common.legend = T,
font.label = list(size = 10, color = "black", face = "bold", family = NULL),
ncol = 2, nrow = 1
)
# GEV_hindcast
# GEV_hindcast_corrected
# -
# So what can we get out of it? What if we look at the operational forecast? Even if we cannot use the dataset as a whole to estimate the likelihood of occurrence, have events similar to the 2020 event occurred?
#
# We select all archived SEAS5 (UNSEEN) events and all ERA5 (observed) events except for the 2020 event as reference.
ERA5_Siberia_events_zoomed_min1 <- ERA5_Siberia_events_zoomed[1:length(ERA5_Siberia_events_zoomed$t2m)-1,]
ERA5_Siberia_events_zoomed_2020 <- ERA5_Siberia_events_zoomed[length(ERA5_Siberia_events_zoomed$t2m),]
# ERA5_Siberia_events_zoomed_min1
# ERA5_Siberia_events_zoomed_2020
GEV_forecasts <- EVT_plot(ensemble = SEAS5_Siberia_events_zoomed_forecasts$t2m,
obs = ERA5_Siberia_events_zoomed$t2m,
main = "",
GEV_type = "Gumbel",
ylim = 3,
y_lab = 'MAM Siberian temperature (C)'
) # %>%
GEV_forecasts + geom_hline(yintercept = ERA5_Siberia_events_zoomed_2020$t2m)#,
# color = "black", linetype = "dashed", size = 1
# Plot the GEV distribution:
GEV1 <- EVT_plot(ensemble = SEAS5_Siberia_events_zoomed_forecasts$t2m,
obs = ERA5_Siberia_events_zoomed$t2m,
main = "GEV",
GEV_type = "GEV",ylim = 3) # %>%
GEV1
# +
Gumbel1 <- EVT_plot(ensemble = SEAS5_Siberia_events_zoomed_forecasts$t2m,
obs = ERA5_Siberia_events_zoomed_hindcast$t2m,
main = "Gumbel",
GEV_type = "Gumbel")
ggarrange(GEV1, Gumbel1,
labels = c("a", "b"), # ,"c","d"),
common.legend = T,
font.label = list(size = 10, color = "black", face = "bold", family = NULL),
ncol = 1, nrow = 2
) # %>%
# ggsave(filename = "graphs/Biascor.png",width =180,height = 180, units='mm',dpi=300)
# -
# And for the UK:
# +
GEV1 <- EVT_plot(ensemble = SEAS5_UK_weighted_df$tprate, obs = EOBS_UK_weighted_df_hindcast$rr, main = "GEV", GEV_type = "GEV") # %>%
Gumbel1 <- EVT_plot(ensemble = SEAS5_UK_weighted_df$tprate, obs = EOBS_UK_weighted_df_hindcast$rr, main = "Gumbel", GEV_type = "Gumbel") # %>%
ggarrange(GEV1, Gumbel1,
labels = c("a", "b"), # ,"c","d"),
common.legend = T,
font.label = list(size = 10, color = "black", face = "bold", family = NULL),
ncol = 1, nrow = 2
) # %>%
| doc/_build/.doctrees/nbsphinx/Notebooks/3.Evaluate/3.Evaluate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Folium Flask App
# folium_flask_app is a function for serving an interactive Folium map via Flask that binds map clicks with python functions.
# +
import webbrowser
from werkzeug.serving import run_simple
from dpd.folium import folium_flask_app
hostname = "localhost"
port = 9000
app = folium_flask_app()
webbrowser.open('http://' + hostname + ':' + str(port))
run_simple(hostname, port, app)
# -
| docs/notebooks/folium_flask_app.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Preliminaries
# import modules
import numpy as np
from sklearn.datasets import load_digits
... # your code here
# +
# base classes
class Node:
pass
class Tree:
def __init__(self):
self.root = Node()
def find_leaf(self, x):
node = self.root
while hasattr(node, "feature"):
j = node.feature
if x[j] <= node.threshold:
node = node.left
else:
node = node.right
return node
# -
# # Density Tree
class DensityTree(Tree):
def __init__(self):
super(DensityTree, self).__init__()
def train(self, data, prior, n_min=10):
'''
data: the feature matrix for the digit under consideration
prior: the prior probability of this digit
n_min: termination criterion (don't split if a node contains fewer instances)
'''
self.prior = prior
N, D = data.shape
D_try = int(np.sqrt(D)) # number of features to consider for each split decision
# find and remember the tree's bounding box,
# i.e. the lower and upper limits of the training feature set
m, M = np.min(data, axis=0), np.max(data, axis=0)
self.box = m.copy(), M.copy()
# identify invalid features and adjust the bounding box
# (If m[j] == M[j] for some j, the bounding box has zero volume,
# causing divide-by-zero errors later on. We must exclude these
# features from splitting and adjust the bounding box limits
# such that invalid features have no effect on the volume.)
valid_features = np.where(m != M)[0]
invalid_features = np.where(m == M)[0]
M[invalid_features] = m[invalid_features] + 1
# initialize the root node
self.root.data = data
self.root.box = m.copy(), M.copy()
# build the tree
stack = [self.root]
while len(stack):
node = stack.pop()
n = node.data.shape[0] # number of instances in present node
if n >= n_min:
# Call 'make_density_split_node()' with 'D_try' randomly selected
# indices from 'valid_features'. This turns 'node' into a split node
# and returns the two children, which must be placed on the 'stack'
#Split two nodes
left, right = make_density_split_node(node,N,np.random.choice(valid_features, D_try))
stack.append(left)
stack.append(right)
else:
# Call 'make_density_leaf_node()' to turn 'node' into a leaf node.
make_density_leaf_node(node, N)# your code here
def predict(self, x):
m, M = self.box
leaf = self.find_leaf(x)
p = leaf.response*self.prior
if (np.all(x<=M) and np.all(x>=m)):
return p
else:
return 0
# return p(x | y) * p(y) if x is within the tree's bounding box
# and return 0 otherwise
def make_density_split_node(node, N, feature_indices):
'''
node: the node to be split
N: the total number of training instances for the current class
feature_indices: a numpy array of length 'D_try', containing the feature
indices to be considered in the present split
'''
n, D = node.data.shape
m, M = node.box
#Volume of the Parent Node
V = np.product(M-m)
# find best feature j (among 'feature_indices') and best threshold t for the split
e_min = float("inf")
j_min, t_min = None, None
for j in feature_indices:
# Hint: For each feature considered, first remove duplicate feature values using
# 'np.unique()'. Describe here why this is necessary.
data_unique = np.sort(np.unique(node.data[:, j]))
# Compute candidate thresholds
tj = (data_unique[1:] + data_unique[:-1])/2.0
#Nl = len(np.where(data_unique < tj)[0])
#Nr = len(np.where(data_unique > tj)[0])
# Illustration: for loop - hint: vectorized version is possible
for t in tj:
#Compute number of instances left and right of threshold t
Nl = len(data_unique[data_unique<t])
Nr = len(data_unique[data_unique>t])
#Compute volume of threshold
Vl = V*(t-m[j])/(M[j]-m[j])
Vr = V - Vl
#Compute LOO error for both left and right children and add them
loo_error = (Nl/(N*Vl))*(Nl/N - 2.0*(Nl-1)/(N-1)) + (Nr/(N*Vr))*(Nr/N - 2.0*(Nr-1)/(N-1))
# choose the best threshold that minimi
if loo_error < e_min:
e_min = loo_error
j_min = j
t_min = t
# create children
left = Node()
right = Node()
X = node.data[:,j_min]
M_left = M.copy()
m_right = m.copy()
M_left[j_min] = t_min
m_right[j_min] = t_min
# initialize 'left' and 'right' with the data subsets and bounding boxes
# according to the optimal split found above
left.data = node.data[X<t_min,:] # store data in left node -- for subsequent splits
left.box = m, M_left # store bounding box in left node
right.data = node.data[X>t_min,:]
right.box = m_right, M
# turn the current 'node' into a split node
# (store children and split condition)
node.left = left
node.right = right
node.feature = j_min
node.threshold = t_min
# return the children (to be placed on the stack)
return left, right
def make_density_leaf_node(node, N):
'''
node: the node to become a leaf
N: the total number of training instances for the current class
'''
# compute and store leaf response
m, M = node.box
n = node.data.shape[0]
v = np.product(M-m)
node.response = n/(v*N)
# # Decision Tree
# + jupyter={"outputs_hidden": true}
class DecisionTree(Tree):
def __init__(self):
super(DecisionTree, self).__init__()
def train(self, data, labels, n_min=20):
'''
data: the feature matrix for all digits
labels: the corresponding ground-truth responses
n_min: termination criterion (don't split if a node contains fewer instances)
'''
N, D = data.shape
D_try = int(np.sqrt(D)) # how many features to consider for each split decision
# initialize the root node
self.root.data = data
self.root.labels = labels
stack = [self.root]
while len(stack):
node = stack.pop()
n = node.data.shape[0] # number of instances in present node
if n >= n_min and not node_is_pure(node):
# Call 'make_decision_split_node()' with 'D_try' randomly selected
# feature indices. This turns 'node' into a split node
# and returns the two children, which must be placed on the 'stack'.
... # your code here
else:
# Call 'make_decision_leaf_node()' to turn 'node' into a leaf node.
... # your code here
def predict(self, x):
leaf = self.find_leaf(x)
# compute p(y | x)
return ... # your code here
# + jupyter={"outputs_hidden": true}
def make_decision_split_node(node, feature_indices):
'''
node: the node to be split
feature_indices: a numpy array of length 'D_try', containing the feature
indices to be considered in the present split
'''
n, D = node.data.shape
# find best feature j (among 'feature_indices') and best threshold t for the split
... # your code here
# create children
left = Node()
right = Node()
# initialize 'left' and 'right' with the data subsets and labels
# according to the optimal split found above
left.data = ... # data in left node
left.labels = ... # corresponding labels
right.data = ...
right.labels = ...
# turn the current 'node' into a split node
# (store children and split condition)
node.left = left
node.right = right
node.feature = ...
node.threshold = ...
# return the children (to be placed on the stack)
return left, right
# + jupyter={"outputs_hidden": true}
def make_decision_leaf_node(node):
'''
node: the node to become a leaf
'''
# compute and store leaf response
node.N = ...
node.response = ... # your code here
# + jupyter={"outputs_hidden": true}
def node_is_pure(node):
'''
check if 'node' ontains only instances of the same digit
'''
return ... # your code here
# -
# # Evaluation of Density and Decision Tree
# read and prepare the digits data
digits = load_digits()
data = digits["data"]
target = digits["target"]
labels = [0,1,2,3,4,5,6,7,8,9]
# your code here
# +
# train trees, plot training error confusion matrices, and comment on your results
prediction = np.zeros(len(target))
posterior = np.zeros(len(target))
for l in labels:
Density = DensityTree()
data_l = data[target == l]
prior_l = len(data_l)/len(data)
Density.train(data_l,prior_l)
for i in range(data.shape[0]):
x = data[i,:]
p = Density.predict(x)
if p>posterior[i]:
posterior[i] = p
prediction[i] = l
print(np.sum(1*(prediction != target))/len(data))
# your code here
# -
# # Density and Decision Forest
# + jupyter={"outputs_hidden": true}
class DensityForest():
def __init__(self, n_trees):
# create ensemble
self.trees = [DensityTree() for i in range(n_trees)]
def train(self, data, prior, n_min=20):
for tree in self.trees:
# train each tree, using a bootstrap sample of the data
... # your code here
def predict(self, x):
# compute the ensemble prediction
return ... # your code here
# + jupyter={"outputs_hidden": true}
class DecisionForest():
def __init__(self, n_trees):
# create ensemble
self.trees = [DecisionTree() for i in range(n_trees)]
def train(self, data, labels, n_min=0):
for tree in self.trees:
# train each tree, using a bootstrap sample of the data
... # your code here
def predict(self, x):
# compute the ensemble prediction
return ... # your code here
# -
# # Evaluation of Density and Decision Forest
# + jupyter={"outputs_hidden": true}
# train forests (with 20 trees per forest), plot training error confusion matrices, and comment on your results
... # your code here
| ex04/.ipynb_checkpoints/tree-methods-stubs-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# earth_apollo17_3002_3000.jpg - (3000, 3002) - 3.783627624511719 s
# horse_284_177.jpeg - (284, 177) - 3.6966145277023315 s
# woman_blonde_512_512.jpg - (512, 512) - 3.710903482437134 s
# Overal average inference speed: 3.7303818782170612s.
yolov3_gpu_v2_on_cpu_times = {
'earth_apollo17_3002_3000.jpg': 3.783627624511719,
'horse_284_177.jpeg': 3.6966145277023315,
'woman_blonde_512_512.jpg': 3.710903482437134,
}
# earth_apollo17_3002_3000.jpg - (3000, 3002) - 0.3424176812171936 s - 0 boxes detected.
# horse_284_177.jpeg - (284, 177) - 0.2606033420562744 s - 1 boxes detected.
# woman_blonde_512_512.jpg - (512, 512) - 0.26442890644073486 s - 3 boxes detected.
# Overal average inference speed: 0.28914997657140096s.
yolov3_gpu_v2_on_gpu_times = {
'earth_apollo17_3002_3000.jpg': 0.3424176812171936,
'horse_284_177.jpeg': 0.2606033420562744,
'woman_blonde_512_512.jpg': 0.26442890644073486,
}
# earth_apollo17_3002_3000.jpg - (3000, 3002) - 14.350386412143708 s
# horse_284_177.jpeg - (284, 177) - 18.490979447364808 s
# woman_blonde_512_512.jpg - (512, 512) - 14.17750740289688 s
faster_rcnn_on_cpu_times = {
'earth_apollo17_3002_3000.jpg': 14.350386412143708,
'horse_284_177.jpeg': 18.490979447364808,
'woman_blonde_512_512.jpg': 14.17750740289688,
}
# earth_apollo17_3002_3000.jpg - (3000, 3002) - 1.2006240582466126 s - 1 boxes detected.
# horse_284_177.jpeg - (284, 177) - 1.447853364944458 s - 1 boxes detected.
# woman_blonde_512_512.jpg - (512, 512) - 1.1771916365623474 s - 3 boxes detected.
# Overal average inference speed: 1.2752230199178058s.
faster_rcnn_on_gpu_times = {
'earth_apollo17_3002_3000.jpg': 1.2006240582466126,
'horse_284_177.jpeg': 1.447853364944458,
'woman_blonde_512_512.jpg': 1.1771916365623474,
}
| notebooks/inference speed results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/seopbo/nlp_tutorials/blob/main/pairwise_text_classification_(klue_nli)_BERT.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="8sJoxEU0fJ5f"
# # Pairwise text classification - BERT
# - pre-trained language model로는 `klue/bert-base`를 사용합니다.
# - https://huggingface.co/klue/bert-base
# - pairwise text classification task를 수행하는 예시 데이터셋으로는 klue의 nli를 사용합니다.
# - https://huggingface.co/datasets/klue
# + [markdown] id="Z8-sxczRgiNj"
# ## Setup
# 어떠한 GPU가 할당되었는 지 아래의 코드 셀을 실행함으로써 확인할 수 있습니다.
# + colab={"base_uri": "https://localhost:8080/"} id="bbvZuVIdgim6" outputId="48a9bf05-2537-4994-c470-6e0ac5325ec6"
# gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
if gpu_info.find('failed') >= 0:
print('Not connected to a GPU')
else:
print(gpu_info)
# + [markdown] id="qA_Guaj4g-ME"
# 아래의 코드 셀을 실행함으로써 본 노트북을 실행하기위한 library를 install하고 load합니다.
# + colab={"base_uri": "https://localhost:8080/"} id="D3Djk64xhAPw" outputId="43ac2566-4995-4aeb-9756-300ff72cdaf0"
# !pip install torch
# !pip install transformers
# !pip install datasets
# !pip install -U scikit-learn
import torch
import transformers
import datasets
# + [markdown] id="v8uwFSCVhTlv"
# ## Preprocess data
# 1. `klue/bert-base`가 사용한 subword tokenizer를 load합니다.
# 2. `datasets` library를 이용하여 klue nli를 load합니다.
# 3. 1의 subword tokenizer를 이용 klue nli의 data를 pairwise text classification을 수행할 수 있는 형태, train example로 transform합니다.
# - `[CLS] premise_tokens [SEP] hypothesis_tokens [SEP]`
#
# + colab={"base_uri": "https://localhost:8080/", "height": 165, "referenced_widgets": ["60f004ad31a549f4b64fb1d54476abed", "f92140676576490f8244f8514c533716", "e35c34f233cf46ad85aef49e7fbac9cf", "fcb15a7c5f474515ae83af9b7f4c5340", "1f428c847fd948adb1910707354bce65", "08caddac51654b63ae6430c721530ce6", "f9bc50f0ce434ae68fd7015567cf8a0c", "8426af3cc4e54d34a7c9f0ab91b487fa", "1ba114eb81b6464ab462754e070e8ea2", "<KEY>", "803d08fff8e441adbcf01355248dcef0", "<KEY>", "d153144ca3d344a7a9b760c2a5f3acdc", "<KEY>", "<KEY>", "3c8d57eae12649758e105fd352b6a5c5", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "118451c5379f4373be933d7dc3e8bc27", "99d9b36cba164733863d9de7f5a119ed", "<KEY>", "c84a6dec16d147eb8cb88e604d8a8c9b", "c836089148154cfcab6e69a5b0154eff", "<KEY>", "<KEY>", "c0e658a839e64b0397a25dd3baa77f6e", "c40d275c153a4b08950e79366c25079b", "<KEY>", "<KEY>", "a254ae71f4594c15afbe78e87dd91ce8", "<KEY>", "cd2fd1f6fd884529a1fdbce48772ac1e", "3f7e41851e714173939bdcd7eec34aee", "83e055a83603478a96bfcc836e157cac", "0390863819294a2fa5af2074264255a3", "b639ecfb48f24ecea2f70ed39b09a402", "<KEY>", "766f88d9be0a4e1aaef2602298e4916d", "1a5a8a8d13cf4e35ac8cc7a2d5d06133", "<KEY>", "f2905940fa844f81aeb1cc4c38a2d9c1", "<KEY>", "<KEY>", "<KEY>", "02fda083c6e74180a17fb5d1b3ec49a0", "<KEY>", "46a6994705b6422590ff0ef4d9c18a13", "5ef252d800fa454884f18b1821bdba3d", "f2121f2075b745eca6dd949e8c044952", "2489f3223af5425e95b07d32b65ef1f4", "<KEY>", "3d9af7778a824b539a6d20209b70928a", "<KEY>"]} id="noK16hrChYyH" outputId="4c7952e0-1a11-413c-a30d-f6ec3da62506"
from datasets import load_dataset
cs = load_dataset("klue", "nli", split="train")
cs = cs.train_test_split(0.1)
test_cs = load_dataset("klue", "nli", split="validation")
train_cs = cs["train"]
valid_cs = cs["test"]
# + colab={"base_uri": "https://localhost:8080/"} id="1TnyhdO0hbGC" outputId="5522dfd8-b553-4e8a-efa9-88aed1dc4781"
cs["train"][:2]
# + [markdown] id="PZauV4B6kMPE"
# transform을 위한 함수를 정의하고 적용합니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 212, "referenced_widgets": ["8e255aad702e4e13897dd830c3edcb49", "b5c61fb1a6014d10b850269944b51e0d", "c72734df44c24eceb74ed72b1188f176", "be7b3133eeb945518596fd3df04d5a81", "bb40c6d6e7e24ded9258f084fec6f163", "cdf3a0c4430f44ab84de0b86d2dcc27c", "c96505a4ac2c44308d3064d8a4ce067e", "<KEY>", "<KEY>", "<KEY>", "f80cfaa900414acda4d2b814e351303d", "b3c9160a1b9e4554868d019be2a85030", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "ba65dd107d0944dbb144518204367782", "<KEY>", "971f3bc6ae174844adef7ed0aa15beee", "<KEY>", "<KEY>", "160daea234e9467aa3f2a517413fa127", "a94f3a8674ed4740ba7f9b7a9c006c1e", "71965f0699cf4ea39318fa4ee424a125", "1216d0af2b43471095eb76f84af86b5e", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "f4638fd96b8944e09f03ec07afe95098", "d7844982aced4d49b33e6765246d3cc6", "924000be100449ab8b0a8847ee9ef3a2", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "3a252c75705c4151add55939d5a82d87", "<KEY>", "499fe61004b54238b55dd39ad7e9e178", "<KEY>", "51f398ec0aba44099dc43a8ae15d988d", "a02f2ac8c6614475a02d1cde83e6b2a6", "<KEY>", "beafb153de3d42b2aadbbbd916c25fab", "<KEY>", "843f4925650841a0a4cde2351f8fafdc", "e1232716a98643adbe9b56508babdc31"]} id="rhSXDe4rkNi9" outputId="e8dd7d32-4518-4701-f520-da7165a2b816"
from transformers import AutoTokenizer, AutoConfig
tokenizer = AutoTokenizer.from_pretrained("klue/bert-base")
config = AutoConfig.from_pretrained("klue/bert-base")
print(tokenizer.__class__)
print(config.__class__)
# + colab={"base_uri": "https://localhost:8080/"} id="nRnXXNyDEDY1" outputId="3a34c100-c10f-4a4c-8846-c2a42a210d84"
from typing import Union, List, Dict
def transform(hypotheses: Union[str, List[str]], premises: Union[str, List[str]], tokenizer) -> Dict[str, List[List[int]]]:
if isinstance(hypotheses, str):
hypotheses = [hypotheses]
if isinstance(premises, str):
premises = [premises]
return tokenizer(text=premises, text_pair=hypotheses, add_special_tokens=True, padding=False, truncation=False)
samples = train_cs[:2]
transformed_samples = transform(samples["hypothesis"], samples["premise"], tokenizer)
print(samples)
print(transformed_samples)
# + colab={"base_uri": "https://localhost:8080/", "height": 113, "referenced_widgets": ["8646e976406b49749cf5e8008ac80f56", "9d322c7c3ba64125a9ad9c560aab8ede", "756d23e95e1c43ac807c54e80757afc1", "d827f364d2a04d8181c2e4ef8c498521", "<KEY>", "87839edecc2548fba6bbadd99ffdc16f", "<KEY>", "51708c4c7249491ea90f1c0dd34297fa", "<KEY>", "b8e8282097dd40dfa0a2132039f358c8", "05d1dec10ca24233a5d9688ab01f6eb9", "<KEY>", "e07e4f545e0c455fb7ddcfe960612aae", "d7e90b69a8104788b74e6400dbe0b502", "<KEY>", "d25445fecf174ecaaba001de842ee421", "3d5ffcb192624345ac60186d5e905b5e", "342d2da2604f45abaa40590673baa12d", "3a16ba8eaec54adc8ba22d09baeea8f4", "309aca3b60c04ffe8839c2f361e0dc4a", "<KEY>", "a7e43d86da344384a932225801ffacb8", "bba7c292754f4098b195af91e9db08a5", "<KEY>", "<KEY>", "ba46211c75b84ebeb0deeff05925be4a", "<KEY>", "1e36db3be3744236bd29c96d4f8f3a84", "d009240b251e4cc886a23391c6dfc6a4", "aa5da192b0b8417a8940860f71a860b6", "<KEY>", "<KEY>", "75b3e7f628ba48b1bed564f26abc6e79"]} id="-l8dJuoroxXd" outputId="49222685-2a07-456d-faeb-0e5a57841380"
train_ds = train_cs.map(lambda data: transform(data["hypothesis"], data["premise"], tokenizer), remove_columns=["guid", "source", "hypothesis", "premise"], batched=True).rename_column("label", "labels")
valid_ds = valid_cs.map(lambda data: transform(data["hypothesis"], data["premise"], tokenizer), remove_columns=["guid", "source", "hypothesis", "premise"], batched=True).rename_column("label", "labels")
test_ds = test_cs.map(lambda data: transform(data["hypothesis"], data["premise"], tokenizer), remove_columns=["guid", "source", "hypothesis", "premise"], batched=True).rename_column("label", "labels")
# + [markdown] id="rccBa-zIsvq_"
# ## Prepare model
# pairwise text classification을 수행하기위한 `klue/bert-base`를 load합니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 173, "referenced_widgets": ["efa74c0b9e5d4bfeb0ef8d08e74c8f34", "29489a29a6e64abdb4e626305240c1ea", "ba12d41565024a5ea3fbf44f5add9168", "946a7427ee3e4448bde6fda6ae71c9ee", "612d0ee22ac640fa89fec39f3ec92a92", "<KEY>", "19d32644185841e1ba018b685a41231c", "337fb0909f9f4a43ad276b2df964181c", "a989d7eb37954425b97254dadeede014", "fc8be71ec6154b69bfab1283e74a06e7", "ff23f82d662945a5ab4c629cc2098b19"]} id="OAAkXge5mapO" outputId="4bbe93c9-7da3-47e2-8245-2356a1e02146"
from transformers import AutoModelForSequenceClassification
model = AutoModelForSequenceClassification.from_pretrained("klue/bert-base", num_labels=3)
print(model.__class__)
# + [markdown] id="zurAlg2TtooX"
# ## Train model
# `Trainer` class를 이용하여 train합니다.
#
# - https://huggingface.co/transformers/custom_datasets.html?highlight=trainer#fine-tuning-with-trainer
# + id="NGKtGBx4lMdP"
import numpy as np
from transformers.data.data_collator import DataCollatorWithPadding
from sklearn.metrics import accuracy_score
def compute_metrics(p):
pred, labels = p
pred = np.argmax(pred, axis=1)
accuracy = accuracy_score(y_true=labels, y_pred=pred)
return {"accuracy": accuracy}
batchify = DataCollatorWithPadding(
tokenizer=tokenizer,
padding="longest",
)
# + colab={"base_uri": "https://localhost:8080/"} id="2Ep0-DLdGibr" outputId="feb314cb-8ed4-415c-c596-2dd9ce94a41b"
# mini-batch 구성확인
batchify(train_ds[:2])
# + colab={"base_uri": "https://localhost:8080/", "height": 572} id="Brl_Z371mC82" outputId="68382dba-2842-42b8-8c6a-0319e7f90c85"
from transformers import Trainer, TrainingArguments
training_args = TrainingArguments(
output_dir='./results',
evaluation_strategy="epoch",
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
learning_rate=1e-4,
weight_decay=0.01,
adam_beta1=.9,
adam_beta2=.95,
adam_epsilon=1e-8,
max_grad_norm=1.,
num_train_epochs=2,
lr_scheduler_type="linear",
warmup_steps=100,
logging_dir='./logs',
logging_strategy="steps",
logging_first_step=True,
logging_steps=100,
save_strategy="epoch",
seed=42,
dataloader_drop_last=False,
dataloader_num_workers=2
)
trainer = Trainer(
args=training_args,
data_collator=batchify,
model=model,
train_dataset=train_ds,
eval_dataset=valid_ds,
compute_metrics=compute_metrics
)
trainer.train()
# + colab={"base_uri": "https://localhost:8080/", "height": 194} id="-jenC9DZnjZ5" outputId="9291af6b-738c-476d-939c-38a87ca8f5a0"
trainer.evaluate(test_ds)
| pairwise_text_classification_(klue_nli)_BERT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import necessary module
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
import scipy
from array_response import *
import itertools
# +
Nt = 64 # number of transmit antennas
Nr = 16 # number of receive antennas
Ns = 1 # number of streams
Nc = 1 # number of cluster
Nray = 1 # number of rays in each cluster
realization = 10000 # equivalent to number of taking sample
At = np.zeros((Nt,Nc*Nray,realization),dtype=complex)
Ar = np.zeros((Nr,Nc*Nray,realization),dtype=complex)
alpha_hh = np.zeros((Nc*Nray,realization),dtype=complex)
alpha_hv = np.zeros((Nc*Nray,realization),dtype=complex)
alpha_vh = np.zeros((Nc*Nray,realization),dtype=complex)
alpha_vv = np.zeros((Nc*Nray,realization),dtype=complex)
var_hh = np.zeros((Nc*Nray,realization),dtype=float)
AoD = np.zeros((2,Nc*Nray),dtype=complex)
AoA = np.zeros((2,Nc*Nray),dtype=complex)
angle_sigma = 10/180*np.pi # standard deviation of the angles in azimuth and elevation both of Rx and Tx
gamma = np.sqrt((Nt*Nr)/(Nc*Nray))
count = 0
eps = 0.1 # 20dB isolation
sigma = np.sqrt(8/(1+eps**2)) # according to the normalization condition of H
for reali in range(realization):
for c in range(1,Nc+1):
AoD_azi_m = np.random.uniform(0,2*np.pi,1) # Mean Angle of Departure _ azimuth
AoD_ele_m = np.random.uniform(0,np.pi,1) # Mean Angle of Departure _ elevation
AoA_azi_m = np.random.uniform(0,2*np.pi,1) # Mean Angle of Arrival_ azimuth
AoA_ele_m = np.random.uniform(0,np.pi,1) # Mean Angle of Arrival_ elevation
AoD[0,(c-1)*Nray:Nray*c] = np.random.laplace(AoD_azi_m, angle_sigma, (1,Nray))
AoD[1,(c-1)*Nray:Nray*c] = np.random.laplace(AoD_ele_m, angle_sigma, (1,Nray))
AoA[0,(c-1)*Nray:Nray*c] = np.random.laplace(AoA_azi_m, angle_sigma, (1,Nray))
AoA[1,(c-1)*Nray:Nray*c] = np.random.laplace(AoA_ele_m, angle_sigma, (1,Nray))
for j in range(Nc*Nray):
At[:,j,reali] = array_response(AoD[0,j],AoD[1,j],Nt) # UPA array response
Ar[:,j,reali] = array_response(AoA[0,j],AoA[1,j],Nr)
var_hh = ((sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[0,j])**2)).real
var_hv = ((eps**2)*(sigma**2)*(np.cos(AoD[1,j])**2)*(np.cos(AoA[0,j])**2)).real
var_vh = ((eps**2)*(sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[1,j])**2)).real
var_vv = ((sigma**2)*(np.cos(AoD[1,j])**2)*(np.cos(AoA[1,j])**2)).real
alpha_hh[j,reali] = np.random.normal(0, np.sqrt(var_hh/2)) + 1j*np.random.normal(0, np.sqrt(var_hh/2))
alpha_hv[j,reali] = np.random.normal(0, np.sqrt(var_hv/2)) + 1j*np.random.normal(0, np.sqrt(var_hv/2))
alpha_vh[j,reali] = np.random.normal(0, np.sqrt(var_vh/2)) + 1j*np.random.normal(0, np.sqrt(var_vh/2))
alpha_vv[j,reali] = np.random.normal(0, np.sqrt(var_vv/2)) + 1j*np.random.normal(0, np.sqrt(var_vv/2))
# +
azi_rot = np.random.uniform(0,2*np.pi,realization)
ele_rot = np.random.uniform(0,np.pi/2,realization) # Why PI/2 ??
ht_hr = np.zeros(realization,dtype=complex)
vt_hr = np.zeros(realization,dtype=complex)
ht_vr = np.zeros(realization,dtype=complex)
vt_vr = np.zeros(realization,dtype=complex)
same_cluster = np.zeros(realization,dtype=complex)
cross_polar = np.zeros(realization,dtype=complex)
same_polar = np.zeros(realization,dtype=complex)
for reali in range(realization):
ht_hr[reali] = np.cos(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hh[0,reali]+np.sin(ele_rot[reali])*alpha_vh[0,reali]
vt_hr[reali] = np.cos(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hv[0,reali]+np.sin(ele_rot[reali])*alpha_vv[0,reali]
ht_vr[reali] = -np.sin(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hh[0,reali]+np.cos(ele_rot[reali])*alpha_vh[0,reali]
vt_vr[reali] = -np.sin(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hv[0,reali]+np.cos(ele_rot[reali])*alpha_vv[0,reali]
same_cluster[reali] = ht_hr[reali]+vt_hr[reali]+ht_vr[reali]+vt_vr[reali]
cross_polar[reali] = vt_hr[reali]+ht_vr[reali]
same_polar[reali] = ht_hr[reali]+vt_vr[reali]
# -
cross_polar
fig = plt.figure(figsize=(10,10), dpi=70)
# ax1 = fig.add_subplot(611)
# ax2 = fig.add_subplot(612)
# ax3 = fig.add_subplot(613)
# ax4 = fig.add_subplot(614)
ax5 = fig.add_subplot(211)
ax6 = fig.add_subplot(212)
# sns.distplot(np.abs(alpha_hh).reshape(1,-1),kde=False, ax=ax1)
# sns.distplot(np.angle(alpha_hh).reshape(1,-1),bins=100, kde = False, ax=ax2)
# sns.distplot(np.abs(ht_hr),bins=100, kde = False, ax=ax2)
# sns.distplot(np.angle(ht_hr),bins=100, kde = False, ax=ax4)
# sns.distplot(np.abs(same_cluster)**2,bins=100, kde = False, ax=ax3)
# sns.distplot(np.abs(cross_polar)**2,bins=100, kde = False, ax=ax4)
sns.distplot(np.abs(same_cluster)**2 - np.abs(cross_polar)**2,bins=100, kde = False, ax=ax5)
sns.distplot(np.abs(same_polar)**2 - np.abs(cross_polar)**2,bins=100, kde = False, ax=ax6, )
print(np.mean(np.abs(same_cluster)**2 - np.abs(cross_polar)**2))
print(np.mean(np.abs(same_polar)**2 - np.abs(cross_polar)**2))
# Not use now !!
# np.mean(var_hh) == np.mean(sigma/8)
# +
Ns = 1 # number of streams
Nc = 6 # number of cluster
Nray = 1 # number of rays in each cluster
Nt = 64 # number of transmit antennas
Nr = 16 # number of receive antennas
angle_sigma = 10/180*np.pi # standard deviation of the angles in azimuth and elevation both of Rx and Tx
gamma = np.sqrt((Nt*Nr)/(Nc*Nray))
realization = 1000 # equivalent to number of taking sample
count = 0
eps = 0.1 # 20dB isolation
sigma = np.sqrt(8/(1+eps**2)) # according to the normalization condition of H
H_pol = np.zeros((2*Nr,2*Nt,realization),dtype=complex)
At = np.zeros((Nt,Nc*Nray,realization),dtype=complex)
Ar = np.zeros((Nr,Nc*Nray,realization),dtype=complex)
alpha_hh = np.zeros((Nc*Nray,realization),dtype=complex)
alpha_hv = np.zeros((Nc*Nray,realization),dtype=complex)
alpha_vh = np.zeros((Nc*Nray,realization),dtype=complex)
alpha_vv = np.zeros((Nc*Nray,realization),dtype=complex)
var_hh = np.zeros((Nc*Nray,realization),dtype=float)
AoD = np.zeros((2,Nc*Nray),dtype=complex)
AoA = np.zeros((2,Nc*Nray),dtype=complex)
H = np.zeros((2*Nr,2*Nt,realization),dtype=complex)
azi_rot = np.random.uniform(0,2*np.pi,realization)
ele_rot = np.random.uniform(0,np.pi/2,realization) # Why PI/2 ??
R = np.array([[np.cos(ele_rot)*np.cos(azi_rot),np.sin(ele_rot)],[-np.sin(ele_rot)*np.cos(azi_rot),np.cos(ele_rot)]]) # rotation matrix
for reali in range(realization):
for c in range(1,Nc+1):
AoD_azi_m = np.random.uniform(0,2*np.pi,1) # Mean Angle of Departure _ azimuth
AoD_ele_m = np.random.uniform(0,np.pi,1) # Mean Angle of Departure _ elevation
AoA_azi_m = np.random.uniform(0,2*np.pi,1) # Mean Angle of Arrival_ azimuth
AoA_ele_m = np.random.uniform(0,np.pi,1) # Mean Angle of Arrival_ elevation
AoD[0,(c-1)*Nray:Nray*c] = np.random.laplace(AoD_azi_m, angle_sigma, (1,Nray))
AoD[1,(c-1)*Nray:Nray*c] = np.random.laplace(AoD_ele_m, angle_sigma, (1,Nray))
AoA[0,(c-1)*Nray:Nray*c] = np.random.laplace(AoA_azi_m, angle_sigma, (1,Nray))
AoA[1,(c-1)*Nray:Nray*c] = np.random.laplace(AoA_ele_m, angle_sigma, (1,Nray))
for j in range(Nc*Nray):
At[:,j,reali] = array_response(AoD[0,j],AoD[1,j],Nt) # UPA array response
Ar[:,j,reali] = array_response(AoA[0,j],AoA[1,j],Nr)
var_hh[j,reali] = ((sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[0,j])**2)).real
# var_hh = ((sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[0,j])**2)).real
# var_hv = ((eps**2)*(sigma**2)*(np.cos(AoD[1,j])**2)*(np.cos(AoA[0,j])**2)).real
# var_vh = ((eps**2)*(sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[1,j])**2)).real
# var_vv = ((sigma**2)*(np.cos(AoD[1,j])**2)*(np.cos(AoA[1,j])**2)).real
# alpha_hh[j,reali] = np.random.normal(0, np.sqrt(var_hh/2)) + 1j*np.random.normal(0, np.sqrt(var_hh/2))
# alpha_hv[j,reali] = np.random.normal(0, np.sqrt(var_hv/2)) + 1j*np.random.normal(0, np.sqrt(var_hv/2))
# alpha_vh[j,reali] = np.random.normal(0, np.sqrt(var_vh/2)) + 1j*np.random.normal(0, np.sqrt(var_vh/2))
# alpha_vv[j,reali] = np.random.normal(0, np.sqrt(var_vv/2)) + 1j*np.random.normal(0, np.sqrt(var_vv/2))
# alpha = np.vstack((np.hstack((alpha_hh[j,reali],alpha_hv[j,reali])),np.hstack((alpha_vh[j,reali],alpha_vv[j,reali]))))
# -
var_hh_test = var_hh.reshape(6000,1)
n, bins, patches = plt.hist(var_hh_test,bins=100)
plt.show()
plt.plot(var_hh_test[0:6000])
plt.show()
# print(alpha_hh.shape)
# print(alpha_hh[:,0])
# print(sigma)
# print(np.mean(alpha_hh))
print(np.mean(np.abs(alpha_hh)))
print(np.mean(np.abs(alpha_vv)))
print(np.mean(np.abs(alpha_hv)))
print((sigma*np.sqrt(np.pi)/4)/5*4)
# +
Ns = 1 # number of streams
Nc = 6 # number of cluster
Nray = 1 # number of rays in each cluster
Nt = 64 # number of transmit antennas
Nr = 16 # number of receive antennas
angle_sigma = 10/180*np.pi # standard deviation of the angles in azimuth and elevation both of Rx and Tx
gamma = np.sqrt((Nt*Nr)/(Nc*Nray))
realization = 1000 # equivalent to number of taking sample
count = 0
eps = 0.1 # 20dB isolation
sigma = np.sqrt(8/(1+eps**2)) # according to the normalization condition of H
test_cos = np.zeros(10000)
test_cos_multi = np.zeros(10000)
test_var_hh_square = np.zeros(10000)
test_var_hh_cube = np.zeros(10000)
test_var_substract= np.zeros(10000)
test_substract_cossin = np.zeros(10000)
test_add_cossin = np.zeros(10000)
for i in range(10000):
test_cos[i] = np.cos(np.random.uniform(0,2*np.pi))
test_cos_multi[i] = np.cos(np.random.uniform(0,2*np.pi))*np.cos(np.random.uniform(0,2*np.pi))
test_var_hh_square[i] = (sigma*np.cos(np.random.uniform(0,2*np.pi))*np.cos(np.random.uniform(0,2*np.pi)))**2
test_var_hh_cube[i] = (sigma*np.cos(np.random.uniform(0,2*np.pi))*np.cos(np.random.uniform(0,2*np.pi))*np.cos(np.random.uniform(0,2*np.pi)))**2
test_var_substract[i] = test_var_hh_square[i] - test_var_hh_cube[i]
alpha = np.random.uniform(0,np.pi)
test_substract_cossin[i] = -np.sin(alpha) + np.cos(alpha)
test_add_cossin[i] = np.sin(alpha) + np.cos(alpha)
# +
fig = plt.figure(figsize=(10,10), dpi=70)
ax1 = fig.add_subplot(611)
ax2 = fig.add_subplot(612)
ax3 = fig.add_subplot(613)
ax4 = fig.add_subplot(614)
ax5 = fig.add_subplot(615)
ax6 = fig.add_subplot(616)
sns.distplot(test_cos,bins=100, kde = False, ax=ax1)
sns.distplot(test_cos_multi,bins=100, kde = False, ax=ax2)
sns.distplot(test_var_hh_square,bins=100, kde = False, ax=ax3)
sns.distplot(test_var_substract,bins=100, kde = False, ax=ax4)
sns.distplot(test_substract_cossin,bins=100, kde = False, ax=ax5)
sns.distplot(test_add_cossin,bins=100, kde = False, ax=ax6)
| Test .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/harveenchadha/bol/blob/main/demos/hf/english/hf_english_enm_700_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="BUR_s3tt_IJF"
# # Vakyansh + Hugging Face : Indian English Speech To text Demo
#
# ---
#
#
# + [markdown] id="gQCiOn_9_ZeI"
# ## Install requirements
# + id="X_UDaMuH_GnL"
# %%capture
# !apt-get -y install sox ffmpeg
# !pip install transformers ffmpeg-python sox
# !wget https://raw.githubusercontent.com/harveenchadha/bol/main/demos/colab/record.py
# + [markdown] id="Ikp7WVluBacM"
# ## Load Indian English Model
# + id="pZ5rnYAt_rB1" colab={"base_uri": "https://localhost:8080/", "height": 311, "referenced_widgets": ["7b4269c5145e4c9493351887de627df8", "938b30973af144d89d0f9d0122c52742", "3b537d7979564b4b94f181f55237e58d", "40b59c1fabb94f3ead85328fb49f0381", "d0d644fd616f4e70b781ed52951a21fc", "<KEY>", "<KEY>", "<KEY>", "41aa162456764e48a489c832ee623bb1", "9848f2777f924fac8a72f341999a9233", "42374a176d174d1ababde928c03fcd0b", "7ebd1133e7344d8ba4292592ccb0d84a", "c9510eada774420384564aa6ba6abb27", "671adff362c04344aed3984aa9789576", "<KEY>", "989e2e1083794fe48dfe43f1db5b7b21", "565c9354f5fa4e20b1615ea53063d01f", "<KEY>", "7fb3603df1a94f03881a9bab45a4bce2", "14c625162c04401aad7e9ff7b04e31a0", "<KEY>", "6f4000b8a94d4d4bac7f6a174a660051", "0a726986955d40ff8d46dda5e4232aa4", "<KEY>", "<KEY>", "9a67cf44e11342dc873920261eada69d", "eb0301f636754e7f9ce3ee44e905b6ef", "3a3c45307e0d4477bc085eb358fe1835", "<KEY>", "<KEY>", "3dfdad09f6ae421297810c85adf3102f", "<KEY>", "<KEY>", "<KEY>", "5d6584d48d7542ff8362049c8825a2d4", "<KEY>", "<KEY>", "<KEY>", "5169322998f745f984ccccd3aee9f5e8", "019c09aed52b420791e3700687459174", "17d6fba0902c460caa7da6ba8626ebd2", "8764b6ace9a540a4a4f6d9fc0035ec35", "7f25b8ee60e74e4588ea48f6c56c8e47", "651bcee1ebea4788830d6345137293ac", "7a3a59a532834ff68639b15562e934a8", "<KEY>", "<KEY>", "be2b2d76fb4b425d8d6f382863deaeb3"]} outputId="316c59cb-da41-4a35-e589-fa46f6130c1b"
import soundfile as sf
import torch
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
def load_model():
# load pretrained model
processor = Wav2Vec2Processor.from_pretrained("Harveenchadha/vakyansh-wav2vec2-indian-english-enm-700")
model = Wav2Vec2ForCTC.from_pretrained("Harveenchadha/vakyansh-wav2vec2-indian-english-enm-700")
return processor, model
processor, model = load_model()
# + id="eT5eskX5KqEZ"
def parse_transcription(wav_file):
# load audio
audio_input, sample_rate = sf.read(wav_file)
# pad input values and return pt tensor
input_values = processor(audio_input, sampling_rate=16_000, return_tensors="pt").input_values
# INFERENCE
# retrieve logits & take argmax
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
# transcribe
transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
return transcription
# + [markdown] id="v6a1YbhPAXY0"
# ## Record file using colab
# + colab={"base_uri": "https://localhost:8080/", "height": 167} id="irfbRvhbAVs8" outputId="788e2dbd-d6eb-4bdf-d46c-4e63041ceac8"
from record import record_audio
record_audio('test')
# + [markdown] id="sHiDi3a0JKNk"
# ## Run Model on recorded file
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="uIccgORyCm0S" outputId="125da9ab-375a-445d-d3a0-2018f9415b7a"
parse_transcription('test.wav')
# + id="AuaGGkRA-u7E"
| demos/hf/english/hf_english_enm_700_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import math
import numpy as np
import pandas as pd
import xgboost
import sklearn
import matplotlib.pyplot as plt #Matplotlib for Viz
import seaborn as sns #Seaborn for Viz
from scipy import stats #Outlier Analysis & Removal
import plotly.graph_objects as go #Plotly for Viz
import plotly.express as px # Plotly express
from plotly.subplots import make_subplots
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.preprocessing import StandardScaler #Scaling variables
from sklearn.model_selection import train_test_split
from skmultilearn.model_selection import iterative_train_test_split
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier #RF, GB
from sklearn.metrics import f1_score, log_loss, confusion_matrix
from sklearn.multioutput import MultiOutputClassifier
from sklearn.datasets import make_classification
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
from xgboost import XGBClassifier
# -
# reading data files and storing them in a dataframe
df_train_features = pd.read_csv('C:/Users/hp/Desktop/EAI 6000 Project/lish-moa/train_features.csv')
df_test_features = pd.read_csv('C:/Users/hp/Desktop/EAI 6000 Project/lish-moa/test_features.csv')
df_train_target_nonscored = pd.read_csv('C:/Users/hp/Desktop/EAI 6000 Project/lish-moa/train_targets_nonscored.csv')
df_train_target_scored = pd.read_csv('C:/Users/hp/Desktop/EAI 6000 Project/lish-moa/train_targets_scored.csv')
# +
selected = df_train_features['cp_type'] == 'trt_cp'
df_train_features = df_train_features[selected].drop('cp_type', 1)
df_train_features['cp_dose'] = df_train_features['cp_dose'] == 'D1'
selected_test = df_test_features['cp_type'] == 'trt_cp'
df_train_target_scored = pd.read_csv('C:/Users/hp/Desktop/EAI 6000 Project/lish-moa/train_targets_scored.csv')[selected]
df_test_features = df_test_features.drop('cp_type', 1)
df_test_features['cp_dose'] = df_test_features['cp_dose'] == 'D1'
# -
np.sum(df_train_features.iloc[:, 1:].values, axis = 0)
X = df_train_features.iloc[:, 1:]
easy_cat = []
for category in df_train_target_scored.columns[1:]:
tg = df_train_target_scored[category]
total = tg.sum()
if total > 100:
continue
depth = int(total/2)+1
score = 0
for cr in ['gini', 'entropy']:
tree = DecisionTreeClassifier(criterion = cr, max_depth = depth)
tree.fit(X, tg)
score = max(score, f1_score(tg, tree.predict(X)))
print(f'best score for {category} (size - {total}) = {f1_score(tg, tree.predict(X))} on criterion {cr} with depth = {depth}')
if score == 1:
easy_cat.append((category, cr, tree.get_depth(),total))
print()
break
print()
# +
ct = [c[0] for c in easy_cat]
cr = [c[1] for c in easy_cat]
d = [c[2] for c in easy_cat]
s = [c[3] for c in easy_cat]
easy = pd.DataFrame({
'categories': ct,
'positive samples in dataset': s,
'criterion is': cr,
'with depth': d
})
easy.to_csv('light_categories.csv', index = False)
easy.sort_values(['positive samples in dataset', 'with depth'])
# +
sub = pd.read_csv('C:/Users/hp/Desktop/EAI 6000 Project/lish-moa/sample_submission.csv')
sub = sub.loc[:, pd.Index(easy['categories'])]
for cat, cr, depth in zip(easy['categories'], easy['criterion is'], easy['with depth']):
tg = df_train_target_scored[category]
tree = DecisionTreeClassifier(criterion = cr, max_depth = depth)
tree.fit(X, tg)
sub[cat] = tree.predict(df_test_features.iloc[:, 1:])
sub[~selected_test] = 0
sub.to_csv('tree_columns_results.csv', index = False)
# +
# Random Forest
encode_values = {"cp_type": {"trt_cp": 0, "ctl_vehicle": 1},
"cp_time": {24: 0, 48: 1, 72: 2},
"cp_dose": {"D1": 0, "D2": 1}}
df_train_features.replace(encode_values, inplace=True)
df_test_features.replace(encode_values, inplace=True)
# -
X_train = df_train_features.iloc[:,1:].to_numpy()
X_test = df_test_features.iloc[:,1:].to_numpy()
y_train = df_train_target_scored.iloc[:,1:].to_numpy()
y_test = df_test_features.iloc[:,1:].to_numpy()
model = RandomForestClassifier(n_estimators=200,max_depth=10, random_state=
0,min_samples_split=10)
model.fit(X_train,y_train)
# +
#Generating predictions from Random Forest Models
feature_list=df_train_features.columns
pred_rf=model.predict(X_test)
pred_rf_proba=model.predict_proba(X_test)
feat_importances = pd.Series(model.feature_importances_, index=feature_list[1:])
feat_importances=feat_importances.sort_values()
feat_importances.plot(kind='barh',figsize=(16,16))#Plotting feature importance
# +
import xgboost
from xgboost import XGBClassifier
from sklearn.multioutput import MultiOutputClassifier
xgb = MultiOutputClassifier(XGBClassifier(tree_method='gpu_hist'))
params = {'estimator__colsample_bytree': 0.6522,
'estimator__gamma': 3.6975,
'estimator__learning_rate': 0.0503,
'estimator__max_delta_step': 2.0706,
'estimator__max_depth': 10,
'estimator__min_child_weight': 31.5800,
'estimator__n_estimators': 166,
'estimator__subsample': 0.8639
}
xgb.set_params(**params)
xgb.fit(X_train,y_train)
# -
pred_xg_proba = xgb.predict_proba(X_test)
print(pred_xg_proba)
# +
grad_boost_classifier = GradientBoostingClassifier(learning_rate=0.01,max_depth=2,random_state=0)
#title = 'Gradient boosting binary dataset'
# plot_class_regions_for_classifier_subplot(grad_boost_classifier,X_train,y_train,X_test,y_test,title)
grad_boost_classifier.fit(X_train,y_train)
predicted_values = grad_boost_classifier.predict(X_test)
# -
from sklearn.preprocessing import OneHotEncoder, QuantileTransformer
from sklearn.model_selection import KFold
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.multioutput import MultiOutputClassifier
from xgboost import XGBClassifier
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import callbacks
import keras
from sklearn.metrics import log_loss
from keras.regularizers import L1, L2
# +
drug = pd.read_csv('C:/Users/hp/Desktop/EAI 6000 Project/lish-moa/train_features.csv')
target = pd.read_csv('C:/Users/hp/Desktop/EAI 6000 Project/lish-moa/train_targets_scored.csv')
# setting the sig_id column as index
drug.set_index('sig_id', inplace= True)
target.set_index('sig_id', inplace= True)
treat_drug = drug.query('cp_type == "trt_cp"')
treat_target = target.loc[treat_drug.index]
# -
# Getting list of columns names for categorical features, numerical features, gene epxression related features and cell vialbility related features
cat_cols = drug.select_dtypes(include = 'O').columns.tolist()
num_cols = drug.select_dtypes(exclude = 'O').columns.tolist()
gene_features = [i for i in num_cols if i.startswith('g-')]
cell_viability = [i for i in num_cols if i.startswith('c-')]
cat_cols2 = cat_cols + ['cp_time']
num_cols2 = num_cols
num_cols.remove('cp_time')
# +
# Data prepocesing i.e label encoding 'cp_dose', 'cp_time' and 'cp_type', or whether to drop vehicle/control treated sample rows
qt = QuantileTransformer()
def data_preprocessing(dataframe, only_treatment = True, fit = False, transform = False):
df = dataframe.copy()
if fit:
df[num_cols] = qt.fit_transform(df[num_cols])
if transform:
df[num_cols] = qt.transform(df[num_cols])
df["cp_dose"] = df.cp_dose.map({"D1": 0, "D2":1})
df["cp_time"] = df.cp_time.map({24: 0,48: 1, 72: 2})
if only_treatment:
df = df.drop("cp_type", 1)
else:
df["cp_type"] = df.cp_type.map({"trt_cp": 1, "ctl_vehicle":0})
return df
drug_cleaned = data_preprocessing(dataframe= drug, only_treatment= False, fit= True, transform= False)
drug_treatment = data_preprocessing(dataframe= drug, only_treatment= True,fit= True, transform= False)
# +
# Defining NN model to be optimized using Optuna hyperparameter optimization:
def for_bayes_optimization2(dimension):
[dl1,dl2,dl3,dl4,dp1,dp2,dp3,dp4,regu,regu_val,activation,learning_rate] = dimension
if (regu == 'l2'):
act_reg = keras.regularizers.l2(regu_val)
if (regu =='l1'):
act_reg = keras.regularizers.l1(regu_val)
lr = callbacks.ReduceLROnPlateau(monitor = 'val_loss', factor = 0.2, patience = 5, verbose = 0)
#x_train,x_val, y_train, y_val = train_test_split(drug_cleaned, target, test_size = 0.3, random_state = 42)
es = callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 1e-4, mode = 'min', baseline = 0.3 ,
restore_best_weights=False, patience= 30, verbose = 0)
adam = keras.optimizers.Adam(learning_rate = learning_rate)
model = Sequential()
model.add(Dense(dl1, input_dim = x_train.shape[1], activation = activation, activity_regularizer = act_reg))
model.add(Dropout(dp1))
model.add(Dense(dl2, activation = activation))
model.add(Dropout(dp2))
model.add(Dense(dl3, activation = activation))
model.add(Dropout(dp3))
model.add(Dense(dl4, activation = activation))
model.add(Dropout(dp4))
model.add(Dense(y_train.shape[1], activation = 'sigmoid'))
model.compile(optimizer = adam, loss = 'binary_crossentropy', metrics = ['AUC'])
model.fit(x = x_train, y = y_train, validation_data = (x_val, y_val), epochs = 200, batch_size = 128, callbacks = [es], verbose = 0)
log_loss_data = log_loss(np.ravel(y_val), np.ravel(model.predict_proba(x_val)), eps = 1e-7)
return model # or return log_loss_data (for optuna optimization)
# -
# Best parameters obtained from Optuna
best_set_from_baysian_optimization = [2048, 1982, 708, 470, 0.6067766671093088, 0.1, 0.4973213653064633, 0.5950996340056243, 'l1', 1e-05, 'swish', 0.0001]
#Prepartion of sample submission file
submission_test = pd.read_csv('../input/lish-moa/test_features.csv')
submission_test_prob = pd.read_csv('../input/lish-moa/sample_submission.csv')
submission_test_cleaned = data_preprocessing(dataframe= submission_test, only_treatment= False, fit= False, transform= True)
submission_test_prob.set_index('sig_id', inplace= True)
submission_test_cleaned.set_index('sig_id', inplace = True)
submission_test_cleaned
#setting initial prediction for all to zeros
submission_test_prob[:] = np.zeros(submission_test_prob.shape)
submission_test_prob
# For submission_File_prediction
n_splits = 5
sub_file = submission_test_cleaned
sub_file_all_predict = np.zeros(submission_test_prob.shape)
nn_loss = [] # neural network loss
xgb_loss = [] # xgb loss
combined_loss = [] # loss of ensembel of NN and XGB
for seed in [10, 20, 30]: # trying three dfiferent seeds
for e, (train, val) in enumerate(KFold(n_splits = n_splits, shuffle = True, random_state = seed).split(drug_cleaned, target)):
x_train, y_train = drug_cleaned.iloc[train], target.iloc[train]
x_val, y_val = drug_cleaned.iloc[val], target.iloc[val]
model = for_bayes_optimization2(best_set_from_baysian_optimization)
nn_predict = model.predict_proba(x_val)
sub_file_nn_predict = model.predict_proba(sub_file)
nn_loss_temp = log_loss(np.ravel(y_val), np.ravel(nn_predict), eps = 1e-7)
nn_loss.append(nn_loss_temp)
print(f"NN_log_loss fold {e}, seed {seed}: ", nn_loss_temp)
xgb = MultiOutputClassifier(XGBClassifier(tree_method = 'gpu_hist', n_estimators = 130, max_depth = 3, reg_alpha = 2, min_child_weight = 2,
gamma = 3, learning_rate = 0.0580666601841646, colsample_bytree = 0.58)) # Parameters obtained after optimization with Optuna
xgb.fit(x_train, y_train)
xgb_predict = np.array(xgb.predict_proba(x_val))[:,:,1].T
xgb_loss_temp = log_loss(np.ravel(y_val), np.ravel(xgb_predict), eps = 1e-7)
xgb_loss.append(xgb_loss_temp)
sub_file_xgb_predict = np.array(xgb.predict_proba(sub_file))[:,:,1].T
avg_sub_file_predict = (sub_file_nn_predict + sub_file_xgb_predict)/2
sub_file_all_predict = sub_file_all_predict + avg_sub_file_predict
combined_loss_temp = log_loss(np.ravel(y_val), np.ravel((nn_predict + xgb_predict)/2), eps = 1e-7)
combined_loss.append(combined_loss_temp)
print(f"xgb_log_loss fold {e}, seed {seed}: ", xgb_loss_temp)
print(f"combined_loss fold {e}, seed {seed}: ", combined_loss_temp)
print("Average log loss of NN is :", np.mean(nn_loss), " and standard deviation: ", np.std(nn_loss))
print("Average log loss of Xgboost is :", np.mean(xgb_loss), " and standard deviation: ", np.std(xgb_loss))
print("Combined log loss is :", np.mean(combined_loss), " and standard deviation: ", np.std(combined_loss))
final_predictions = sub_file_all_predict/(n_splits * 3)
submission_test_prob[:] = final_predictions
submission_test_prob = np.clip(submission_test_prob, 0.0005, 0.99)
submission_test_prob.iloc[submission_test.query('cp_type == "ctl_vehicle"').index] = 0.0
submission_test_prob.to_csv('submission.csv')
| EAI 6000 - FoAI/Discussions & Assignments/Week 3 - Fundamentals of AI Assignment (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: lark
# language: python
# name: lark
# ---
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# +
import torch
import torchaudio as ta
import torchaudio.functional as taf
import torchaudio.transforms as tat
from torchvision import transforms
print(torch.__version__)
print(ta.__version__)
import matplotlib
import matplotlib.pyplot as plt
from IPython.display import Audio, display
import pandas as pd
import os
import pprint
from typing import *
import itertools
from collections import Counter
import numpy as np
from datetime import datetime
from lark.config import Config
from lark.learner import Learner
from lark.ops import Sig2Spec, MixedSig2Spec
from lark.data import *
# -
torch.cuda.set_device(1)
torch.cuda.current_device()
cfg = Config(
n_workers=8,
use_noise=True,
use_overlays=False,
use_secondary_labels=True,
noise_nsr_dbs = [0, 3, 10],
f1_threshold=0.5,
# sites=['COR', 'SSW'],
sites=None,
use_neptune=True,
log_batch_metrics=False,
n_epochs=20,
bs=32,
lr=1e-3,
model='resnet18',
scheduler='torch.optim.lr_scheduler.OneCycleLR'
)
cfg.training_dataset_size
# as suggested here:
# https://stackoverflow.com/a/47212899/478746
# +
main_model = torch.hub.load('pytorch/vision:v0.9.0', 'resnet18', pretrained=True)
for param in main_model.parameters():
param.requires_grad = False
for layer in [main_model.layer3, main_model.layer4, main_model.avgpool]:
for param in layer.parameters():
param.requires_grad = True
prep = MixedSig2Spec(cfg)
posp = torch.nn.Linear(in_features=512, out_features=len(cfg.labels), bias=True)
main_model.fc = posp
model = torch.nn.Sequential(prep, main_model)
model = model.cuda()
# -
lrn = Learner("resnet18-half-frozen-bag-1", cfg, model)
# + tags=[]
lrn.learn()
# -
lrn.evaluate()
lrn.load_checkpoint('best')
lrn.evaluate()
lrn.scheduler.state_dict()
for param in model.parameters():
param.requires_grad = True
cfg = Config(
noise_nsr_dbs = [30],
sites=['SSW'],
use_neptune=True,
log_batch_metrics=True,
n_epochs=100,
bs=32,
lr=1e-4,
model='resnet18',
scheduler='torch.optim.lr_scheduler.CosineAnnealingLR'
)
lrn = Learner("resnet18-unfrozen", cfg, model)
lrn.learn()
lrn.evaluate()
lrn.load_checkpoint('best')
lrn.evaluate()
lrn.load_checkpoint('latest')
lrn.scheduler.state_dict()
cfg = Config(
noise_nsr_dbs = [35],
sites=['SSW'],
use_neptune=True,
log_batch_metrics=True,
n_epochs=100,
bs=32,
lr=1e-5,
model='resnet18',
scheduler='torch.optim.lr_scheduler.CosineAnnealingLR'
)
lrn = Learner("resnet18-unfrozen", cfg, model)
lrn.learn()
| nbs/botkop/014-resnet18-bag-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:balance] *
# language: python
# name: conda-env-balance-py
# ---
import cvxpy as cp
import numpy as np
import cmath
import hsbalance as hs
import matplotlib.pyplot as plt
# %matplotlib widget
# # Introduction
#
# This notebook discuss how the `hsbalance` package handles ill-conditioned planes problem
# Darlow discussed in his paper `<NAME>. "The identification and elimination of non-independent balance planes in influence coefficient balancing." Turbo Expo: Power for Land, Sea, and Air. Vol. 79603. American Society of Mechanical Engineers, 1982.` three test cases can be found in test cases in `/test/`
# ## Parameters
ALPHA_math=[
['1.41@45', '3.61@34', '3.61@34'],
['3.16@72', '2.24@27', '2.24@27'],
['2.83@45', '5@37', '5@37'],
['3.16@18', '3.61@34', '4.47@27']]
A_math=[
['3.16@72'],
['3.16@18'],
['4.12@14'],
['5.39@68']]
# Convert to complex numbers (cartesian) form
A = hs.convert_matrix_to_cart(A_math)
ALPHA = hs.convert_matrix_to_cart(ALPHA_math)
# A, ALPHA
# Adding ALPHA
alpha = hs.Alpha()
alpha.add(direct_matrix=ALPHA)
alpha.check()
# A warning is raised that plane[1] (the second plane, remember python starts counting from 0) is ill-conditioned.
# Ill-conditioned planes mean that they are algebraically depending on other planes. It means the ill-conditioned plane carries not new information to the system. When solving(especially with least squares method) the system tend to infiltrate (adding excessive masses at these planes)
# ## Solving with Least squares:
model_LeastSquares = hs.LeastSquares(A, alpha, name='Least_squares') # Instantiate least square model
W_LeastSquares = model_LeastSquares.solve() #solve
hs.convert_matrix_to_math(W_LeastSquares)
residuals_LeastSquares = model_LeastSquares.expected_residual_vibration()
hs.convert_matrix_to_math(residuals_LeastSquares) # Expected residule vibrations
# Root mean square error:
rmse_LeastSquares = model_LeastSquares.rmse()
rmse_LeastSquares
# ### Discussion
# The second plane is ill-conditioned as seen from the check, caused the second and third planes to increase the masses excessively.
alpha.check(ill_condition_remove=True)
# by turning ill_condition_remove boolean to True we remove the second plane (ill-conditioned)
alpha.value
# ## Solving with Least squares:
model_LeastSquares = hs.LeastSquares(A, alpha, name='Least_squares') # Instantiate least square model
W_LeastSquares = model_LeastSquares.solve() #solve
hs.convert_matrix_to_math(W_LeastSquares)
# Remember those are weights for first and third planes as plane 2 has been removed by check.
residuals_LeastSquares = model_LeastSquares.expected_residual_vibration()
hs.convert_matrix_to_math(residuals_LeastSquares) # Expected residule vibrations
# Root mean square error:
rmse_LeastSquares = model_LeastSquares.rmse()
rmse_LeastSquares
# ### Discussion
# by removing the second the correction weights at plane 3 has reduced to less than 4 times (from 5.14 kg to 1.14 kg). This causes a rise of RMSE and residual vibration on the other hand.
# It is left to the engineer judgment weather to remove (or constrain) the ill-conditioned planes or leave it to get the best RMSE possible.
# The target of this package is to give the engineer the best possible consulting service.
| Ill_conditioned_planes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Audio Event Classifier with Deep Learning
#
# Build a CNN sound classifier using melspectograms from ESC-50 data. Refer to *save_melspectorgrams.ipynb* for feature extraction.
# %matplotlib inline
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.preprocessing import image
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Input
from keras.applications.vgg16 import preprocess_input
from keras.utils import multi_gpu_model
import numpy as np
import json
import pickle
# # Set parameter values
# +
batch_size = 16 #intially it was 400
epochs = 15 # intial epochs were 200
# dimensions of our images.
img_width, img_height = 224, 224
input_tensor = Input(shape=(224,224,3))
nb_training_samples = 256 # this was 1600
nb_validation_samples = 64 # this was 400 # Set parameter values
# -
# # Configure training and validation data generators
#
# Provide paths to training and testing set directores
# +
# training generator configuration
training_data_dir = '/home/amnayak/Downloads/ESC-50-master/melspectrograms/training' #please change the path
training_datagen = image.ImageDataGenerator(
rescale=1./255)
training_generator = training_datagen.flow_from_directory(
training_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size)
# validation generator configuration
validation_data_dir ='/home/amnayak/Downloads/ESC-50-master/melspectrograms/testing/' #please change the path
validation_datagen = image.ImageDataGenerator(
rescale=1./255)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size)
# -
# # Load base model
base_model = VGG16(weights='imagenet', include_top=False, input_tensor=input_tensor)
print('Model loaded.')
base_model.summary()
# # Build top model
base_model.output_shape[1:]
# build a classifier model to put on top of the convolutional model
top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(2, activation='softmax'))
top_model.summary()
# # Combine base model with top model
# top_model.load_weights('bootlneck_fc_model.h5')
model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
model.summary()
# # Configure model training
num_layers_to_freeze = 15
# +
from keras import metrics, optimizers
def top_5_accuracy(y_true, y_pred):
return metrics.top_k_categorical_accuracy(y_true, y_pred, k=5)
for layer in model.layers[:num_layers_to_freeze]:
layer.trainable = False
# use nesterov accelrated gradient descent ??
# optimizer=optimizers.SGD(lr=1e-4, momentum=0.9, decay=1e-6, nesterov=True)
'''model.compile(optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
loss='categorical_crossentropy',
metrics=['accuracy', top_5_accuracy])'''
model.compile(optimizer=optimizers.SGD(learning_rate=0.01, momentum=0.0, nesterov=False,),
loss='categorical_crossentropy',
metrics=['accuracy', top_5_accuracy])
# parallel_model.compile(optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
# loss='categorical_crossentropy',
# metrics=['accuracy', top_5_accuracy])
# serialize model to JSON
model_json = model.to_json()
model_filename = "vgg16_model_{}_frozen_layers.json".format(num_layers_to_freeze)
with open(model_filename, "w") as json_file:
json_file.write(model_json)
# -
# # Fine-tune the model
# +
from keras.callbacks import ModelCheckpoint, TensorBoard
from time import time
tensorboard = TensorBoard(log_dir="logs/layers_frozen_{}".format(num_layers_to_freeze))
# checkpoint
filepath="esc50_vgg16_stft_weights_train_last_2_base_layers.best.hdf5"
best_model_checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [best_model_checkpoint, tensorboard]
# parallel_model.fit_generator(
# training_generator,
# steps_per_epoch=nb_training_samples/batch_size,
# epochs=epochs,
# validation_data=validation_generator,
# validation_steps=nb_validation_samples/batch_size,
# callbacks=callbacks_list)
model.fit_generator(
training_generator,
steps_per_epoch=nb_training_samples/batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples/batch_size,
callbacks=callbacks_list)
# parallel_model.fit_generator(
# training_generator,
# samples_per_epoch=nb_training_samples,
# epochs=epochs,
# validation_data=validation_generator,
# validation_steps=nb_validation_samples/batch_size,)
# nb_val_samples=nb_validation_samples)
# -
# # Get top k predictions for selected test files
nb_validation_samples/batch_size
training_generator.image_shape
def get_top_k_predictions(preds, label_map, k=5, print_flag=False):
sorted_array = np.argsort(preds)[::-1]
top_k = sorted_array[:k]
label_map_flip = dict((v,k) for k,v in label_map.items())
y_pred = []
for label_index in top_k:
if print_flag:
print ("{} ({})".format(label_map_flip[label_index], preds[label_index]))
y_pred.append(label_map_flip[label_index])
return y_pred
label_map
# +
label_map = (training_generator.class_indices)
json1 = json.dumps(label_map)
f = open("cough_label_map.json","w")
f.write(json1)
f.close()
img_path = '/home/amnayak/Downloads/ESC-50-master/melspectrograms/testing/cough/2-108017-A-24.png'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)* 1./255
preds = model.predict(x)[0]
print(model.predict(x)[0])
print(get_top_k_predictions(preds, label_map, k=3))
# -
# # Calculate and plot confusion matrix
# +
import itertools
import matplotlib.pyplot as plt
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
# print(cm)
plt.figure(figsize=(10,10))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# +
import os
from sklearn.metrics import confusion_matrix
testing_dir = '/home/amnayak/Downloads/ESC-50-master/melspectrograms/testing/'
y_true = []
y_pred = []
xyz=0
for label in label_map.keys():
file_list = os.listdir(testing_dir + label)
for file_name in file_list:
img_path = testing_dir + label + '/' + file_name
print(xyz,' images predicted...')
xyz+=1
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)* 1./255
preds = model.predict(x)[0]
y_true.append(label)
y_pred.append(get_top_k_predictions(preds, label_map, k=1)[0])
cm = confusion_matrix(y_true, y_pred)
plot_confusion_matrix(cm, sorted(label_map.keys()), normalize=True)
# -
with open('noobs_model.pkl','wb') as file:
pickle.dump(model,file)
| Noob_Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/objectc/CNN-with-TensorFlow2.0-and-Keras/blob/master/reduce_overfitting_CNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="03nhHVp_eMGS" colab_type="code" colab={}
# %matplotlib inline
# + id="Qve6MvyBiHJj" colab_type="code" colab={}
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import tensorflow as tf
from tensorflow import keras
# + [markdown] id="BHpPF2lQjT3_" colab_type="text"
# Load CIFAR10 using keras.datasets.cifar10.load_data()
# + id="W_Dwit3RjQ7A" colab_type="code" colab={}
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.cifar10.load_data()
# + [markdown] id="whY73Qe9jokg" colab_type="text"
# Create traning set, validation set, test set
# + id="opCOY1W9jge1" colab_type="code" colab={}
X_train_full = X_train_full/255.0
X_test = X_test/255.0
sklearn.utils.shuffle(X_train_full, y_train_full)
X_train = X_train_full[0:-5000]
y_train = y_train_full[0:-5000]
X_valid = X_train_full[-5000:]
y_valid = y_train_full[-5000:]
# + id="Af7fP59rj-a6" colab_type="code" colab={}
classes = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
# + id="6x04koVNkFTC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0fc5aa7b-9268-4df3-8e29-618281fd28b6"
X_train[0].shape
# + id="wdFql6xojzcN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1823} outputId="38299b75-41cf-4b7c-a370-c9af2bb584bb"
plt.figure(figsize=(32, 32), facecolor='white')
n_rows, n_cols = 5, 4
for row in range(n_rows):
for col in range(n_cols):
i = row * n_cols + col
ax = plt.subplot(n_rows, n_cols, i + 1)
plt.axis("off")
plt.imshow(X_train[i])
title = classes[np.squeeze(y_train[i])]
ax.set_title(title, color = 'black')
# + id="ellhvo_PgBXl" colab_type="code" colab={}
def reset_weights(model):
session = keras.backend.get_session()
for layer in model.layers:
if hasattr(layer, 'kernel_initializer'):
layer.kernel.initializer.run(session=session)
# + id="BbldfTiHiiGm" colab_type="code" colab={}
def plot_history(history):
# Plot training & validation accuracy values
plt.plot(history.history['sparse_categorical_accuracy'])
plt.plot(history.history['val_sparse_categorical_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validate'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validate'], loc='upper left')
plt.show()
# + id="DPC0dIo0j3ol" colab_type="code" colab={}
model = keras.models.Sequential(
[
keras.layers.Conv2D(32, (3,3), padding='same', activation=keras.activations.relu, input_shape=(32, 32, 3)),
keras.layers.Conv2D(32, (3,3), padding='same', activation=keras.activations.relu),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Conv2D(64, (3,3), padding='same', activation=keras.activations.relu),
keras.layers.Conv2D(64, (3,3), padding='same', activation=keras.activations.relu),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Flatten(),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(len(classes), activation=keras.activations.softmax)
]
)
# + id="SoyjYY2FqtuC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 442} outputId="dd7eec66-6324-45af-d69d-ccefc618d44c"
model.summary()
# + id="3XLSOscRl4p8" colab_type="code" colab={}
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.sparse_categorical_crossentropy,
metrics=[keras.metrics.sparse_categorical_accuracy])
# + id="f7F2L5yhgHL7" colab_type="code" colab={}
reset_weights(model)
# + id="A-dRxWRal7iy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 714} outputId="c8dd8811-1815-4c2e-b38c-3063d1a9867f"
history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid))
# + id="OGbXnElul-Bs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 573} outputId="c1472408-f409-4de3-afaf-b406d0ef3266"
plot_history(history)
# + [markdown] id="GlBPEyUTpnv1" colab_type="text"
# The plots of accuracy and loss above indicate that the training is overfitting as the training resule keep going weel while the performance of the validation is geting stuck. So that we need to some methods to overcome overfitting. Here we use 5 ways:
#
#
# 1. Earyly Stopping
# 2. Data Augmentation
# 3. Regularzation
# 4. Batch Normalization
# 5. Drop out
#
#
# + [markdown] id="KXaxW8T4pTJP" colab_type="text"
# ## Early Stopping
# In keras, you can implement Early Stopping easily by passing it to the training callback. You can also cutomize your own callback.
# + id="icvZcZghpZhV" colab_type="code" colab={}
reset_weights(model)
# + id="IuXg1_r5poU5" colab_type="code" colab={}
early_stopping_monitor = keras.callbacks.EarlyStopping(patience=2)
# + id="MTZjcJ4upytc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="ed89d1ec-9001-4697-a67c-4e600efd6335"
history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid), callbacks=[early_stopping_monitor])
# + id="OV1RLtQyqdOP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 573} outputId="f0d40b1e-732d-49b1-a8c4-4d5f1b75b868"
plot_history(history)
# + [markdown] id="UR_m8bqQqpLR" colab_type="text"
# ## Data Augmentation
# + id="8J2qDyHHqkrc" colab_type="code" colab={}
from keras_preprocessing import image
from keras_preprocessing.image import ImageDataGenerator
# + id="zQyYN1W7q8g1" colab_type="code" colab={}
datagen = ImageDataGenerator(
rotation_range = 10,
width_shift_range = 0.1,
height_shift_range = 0.1,
shear_range = 0.1,
zoom_range = 0.1,
horizontal_flip = True,
fill_mode = 'nearest'
)
# + id="yJwvEnBstrvs" colab_type="code" colab={}
X_train_gen = datagen.flow(X_train, y_train)
# + id="A79w9_7ntx18" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1037} outputId="c3a8fb93-d553-4465-9570-2da163cfbfb1"
history = model.fit(X_train_gen, epochs=20, validation_data=(X_valid, y_valid))
# + id="ov04VTQJv7Xt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 573} outputId="960d40fd-8d75-4be3-99e9-3708cde4333f"
plot_history(history)
# + [markdown] id="4wxIPxqRwPSJ" colab_type="text"
# ## Batch Normalization
# + id="Mncr_73ApmuO" colab_type="code" colab={}
model = keras.models.Sequential(
[
keras.layers.Conv2D(32, (3,3), padding='same', activation=keras.activations.relu, input_shape=(32, 32, 3)),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(32, (3,3), padding='same', activation=keras.activations.relu),
keras.layers.BatchNormalization(),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Conv2D(64, (3,3), padding='same', activation=keras.activations.relu),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(64, (3,3), padding='same', activation=keras.activations.relu),
keras.layers.BatchNormalization(),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Flatten(),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(len(classes), activation=keras.activations.softmax)
]
)
# + id="WPPLNXe6xIhi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 578} outputId="64211385-ebc1-45f6-a669-bd82536eb091"
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.sparse_categorical_crossentropy,
metrics=[keras.metrics.sparse_categorical_accuracy])
model.summary()
# + id="vwwZlY90w4VX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 714} outputId="e2eaea62-c0da-4267-a1ab-38ffd3d953ed"
reset_weights(model)
history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid))
# + id="RtfHFPRWyJUx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 573} outputId="45d39aeb-99c2-454f-c2d5-3bc2f9fb287f"
plot_history(history)
# + [markdown] id="UoH_g67J11pO" colab_type="text"
# ## Dropout
# Recently, more and more
# + id="_V53KJj2zKGk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 88} outputId="1b2a2476-54cb-4eb0-d2e1-d87354176fad"
model = keras.models.Sequential(
[
keras.layers.Conv2D(32, (3,3), padding='same', activation=keras.activations.relu, input_shape=(32, 32, 3)),
keras.layers.Conv2D(32, (3,3), padding='same', activation=keras.activations.relu),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Dropout(0.2),
keras.layers.Conv2D(64, (3,3), padding='same', activation=keras.activations.relu),
keras.layers.Conv2D(64, (3,3), padding='same', activation=keras.activations.relu),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Dropout(0.2),
keras.layers.Flatten(),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(len(classes), activation=keras.activations.softmax)
]
)
# + id="t3mO4tBH2kZJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 544} outputId="2ba5004f-3639-4c3e-ce7e-ad0a8c7551ea"
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.sparse_categorical_crossentropy,
metrics=[keras.metrics.sparse_categorical_accuracy])
model.summary()
# + id="xRpNpTwBq3PZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 714} outputId="452c7707-dc60-4808-b5a6-01e51330bd4d"
reset_weights(model)
history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid))
# + id="_tmZP9I8434v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 573} outputId="51a57311-20bd-4581-8415-1db92aad4412"
plot_history(history)
# + id="dEHZK65aIs26" colab_type="code" colab={}
model = keras.models.Sequential(
[
keras.layers.Conv2D(32, (3,3), padding='same', activation=keras.activations.relu, input_shape=(32, 32, 3), kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Conv2D(32, (3,3), padding='same', activation=keras.activations.relu, kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Conv2D(64, (3,3), padding='same', activation=keras.activations.relu, kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Conv2D(64, (3,3), padding='same', activation=keras.activations.relu, kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Flatten(),
keras.layers.Dense(128, activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Dense(len(classes), activation=keras.activations.softmax)
]
)
# + id="D2Eb06nKOxmc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 442} outputId="4cec7b75-e3c9-4614-9446-ec5a59fe9dca"
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.sparse_categorical_crossentropy,
metrics=[keras.metrics.sparse_categorical_accuracy])
model.summary()
# + id="hIUjsOSoOVOj" colab_type="code" colab={}
reset_weights(model)
# + id="cpuJfV09OuD1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1394} outputId="e32793f4-0513-4eb2-bcfb-9d8d29494807"
history = model.fit(X_train, y_train, epochs=40, validation_data=(X_valid, y_valid))
# + id="BlzFuuLXQdbg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 573} outputId="7514d92b-02ad-444a-f1da-138651c5bdc5"
plot_history(history)
# + id="eLKz2FGgaoXE" colab_type="code" colab={}
| reduce_overfitting_CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="nIh7bl4NsDe2"
# # Sphinx
# + id="cMhgW-nzdD5H" executionInfo={"status": "ok", "timestamp": 1621143185436, "user_tz": -330, "elapsed": 9326, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="ec3510fe-9551-4961-dd37-28671982e04a" colab={"base_uri": "https://localhost:8080/", "height": 887}
# !pip install -U Sphinx
# + id="UKF95PEurHqy"
# import sys
# sys.path.append("/content/drive/MyDrive")
# import mykeys
# project_name = "4CED0278"
# path = "/content/" + project_name
# # !mkdir "{path}"
# # %cd "{path}"
# import sys
# sys.path.append(path)
# # !git config --global user.email "<email>"
# # !git config --global user.name "sparsh-ai"
# # !git init
# # !git remote add origin2 https://"{mykeys.git_token}":x-oauth-basic@github.com/sparsh-ai/"{project_name}".git
# # !git pull origin2 master
# + colab={"base_uri": "https://localhost:8080/"} id="Vlmc5BV3tHGs" executionInfo={"status": "ok", "timestamp": 1610473730740, "user_tz": -330, "elapsed": 119372, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="e46dc2c2-3f06-47f5-ef02-df8726d33b7b"
# !sphinx-quickstart
# + id="EsYbN5n2t4QC"
# !pip install -q sphinx-rtd-theme
# + colab={"base_uri": "https://localhost:8080/"} id="BA0krP7CuO-A" executionInfo={"status": "ok", "timestamp": 1610480876265, "user_tz": -330, "elapsed": 2229, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="aafc42d6-271b-4590-cdd4-c5b7933ec062"
# %%writefile conf.py
project = 'MovieLens Recommender System'
copyright = '2021, <NAME>'
author = '<NAME>'
release = '1.0.0'
extensions = ['sphinx.ext.autodoc',
'nbsphinx',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'IPython.sphinxext.ipython_console_highlighting',
'rst2pdf.pdfbuilder'
]
source_suffix = ['.rst', '.ipynb']
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
master_doc = 'index'
pdf_documents = [('index', u'rst2pdf', u'Sample rst2pdf doc', u'<NAME>'),]
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# + id="1wZvoYtkufpV"
# !pip install sphinx
# !pip install nbconvert
# !pip install pandoc
# !pip install latex
# !pip install nbsphinx
# + id="WxYGwS2ru3tO"
# !cp "/content/drive/MyDrive/Colab Notebooks/tutorial_temp.ipynb" .
# + id="N4ijhJmK6SwO"
# # !mkdir documentation
# !rm -r ./documentation
# + colab={"base_uri": "https://localhost:8080/"} id="YjVl0I4EwHcu" executionInfo={"status": "ok", "timestamp": 1610474487407, "user_tz": -330, "elapsed": 2350, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="bdcfc001-60b4-4d9b-a718-9fcd2e7d5c08"
# %%writefile index.rst
.. MovieLens Recommender System documentation master file, created by
sphinx-quickstart on Tue Jan 12 17:48:52 2021.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to MovieLens Recommender System's documentation!
========================================================
.. toctree::
:maxdepth: 2
:caption: Contents:
tutorial_temp
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
# + colab={"base_uri": "https://localhost:8080/"} id="pecM_NGywj-8" executionInfo={"status": "ok", "timestamp": 1610480484655, "user_tz": -330, "elapsed": 1246, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="13a9f355-e387-48f0-a229-de24e366cf46"
# %%writefile readthedocs.yml
# python version
python:
version: 3.8
method: pip
install:
- requirements: requirements.txt
# build a PDF
formats:
- none
sphinx:
configuration: conf.py
# + colab={"base_uri": "https://localhost:8080/"} id="WnQb7X7HHgKJ" executionInfo={"status": "ok", "timestamp": 1610480550730, "user_tz": -330, "elapsed": 1830, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="cf41ca6c-2020-4a8c-8f12-7cdaf3754f5f"
# %%writefile requirements.txt
python==3.8
pandoc
nbformat
jupyter_client
ipython
nbconvert
sphinx>=1.5.1
ipykernel
sphinx_rtd_theme
nbsphinx
# + id="67BgFQa3xmkR"
# # !sphinx-build -b pdf . build/pdf
# # !make html
# + colab={"base_uri": "https://localhost:8080/"} id="NUjA3E7kykTr" executionInfo={"status": "ok", "timestamp": 1610480561416, "user_tz": -330, "elapsed": 5003, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="2a68730f-3960-4c9c-cc8f-eca6068fe50e"
# # !git checkout -b sphinx
# !git add .
# !git commit -m 'commit'
# !git push origin2 sphinx
# + colab={"base_uri": "https://localhost:8080/"} id="d55898QQ31aL" executionInfo={"status": "ok", "timestamp": 1610480819345, "user_tz": -330, "elapsed": 6444, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="282f6ae9-edbe-4ec8-878e-7b318186eaf9"
# !pip install -q rst2pdf
# + id="PXyQs2DaIwL0"
| _notebooks/2022-01-15-sphinx.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Code for **"Flash/No Flash"** figure.
# # Import libs
# +
from __future__ import print_function
import matplotlib.pyplot as plt
# %matplotlib inline
import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import numpy as np
from models import *
import torch
import torch.optim
from torch.autograd import Variable
from utils.denoising_utils import *
from utils.sr_utils import load_LR_HR_imgs_sr
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
imsize =-1
PLOT = True
# -
# # Load image
# +
imgs = load_LR_HR_imgs_sr('data/flash_no_flash/cave01_00_flash.jpg', -1, 1, enforse_div32='CROP')
img_flash = load_LR_HR_imgs_sr('data/flash_no_flash/cave01_00_flash.jpg', -1, 1, enforse_div32='CROP')['HR_pil']
img_flash_np = pil_to_np(img_flash)
img_noflash = load_LR_HR_imgs_sr('data/flash_no_flash/cave01_01_noflash.jpg', -1, 1, enforse_div32='CROP')['HR_pil']
img_noflash_np = pil_to_np(img_noflash)
g = plot_image_grid([img_flash_np, img_noflash_np],3,12)
# -
# # Setup
# +
pad = 'reflection'
OPT_OVER = 'net'
num_iter = 601
LR = 0.1
OPTIMIZER = 'adam'
reg_noise_std = 0.0
show_every = 50
figsize = 6
# We will use flash image as input
input_depth = 3
net_input =np_to_var(img_flash_np).type(dtype)
# +
net = skip(input_depth, 3, num_channels_down = [128, 128, 128, 128, 128],
num_channels_up = [128, 128, 128, 128, 128],
num_channels_skip = [4, 4, 4, 4, 4],
upsample_mode=['nearest', 'nearest', 'bilinear', 'bilinear', 'bilinear'],
need_sigmoid=True, need_bias=True, pad=pad).type(dtype)
mse = torch.nn.MSELoss().type(dtype)
img_flash_var = np_to_var(img_flash_np).type(dtype)
img_noflash_var = np_to_var(img_noflash_np).type(dtype)
# -
# # Optimize
# +
net_input_saved = net_input.data.clone()
noise = net_input.data.clone()
i = 0
def closure():
global i
if reg_noise_std > 0:
net_input.data = net_input_saved + (noise.normal_() * reg_noise_std)
out = net(net_input)
total_loss = mse(out, img_noflash_var)
total_loss.backward()
print ('Iteration %05d Loss %f' % (i, total_loss.data[0]), '\r', end='')
if PLOT and i % show_every == 0:
out_np = var_to_np(out)
plot_image_grid([np.clip(out_np, 0, 1)], factor=figsize, nrow=1)
i += 1
return total_loss
p = get_params(OPT_OVER, net, net_input)
optimize(OPTIMIZER, p, closure, LR, num_iter)
# -
# Sometimes the process stucks at reddish image, just run the code from the top one more time.
out_np = var_to_np(net(net_input))
q = plot_image_grid([np.clip(out_np, 0, 1), img_np], factor=13);
| 0-newbooks/deep-image-prior/flash-no-flash.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
using DifferentialEquations
using Plots
# Consider the simple reaction:
#
# \begin{align}
# A &\longrightarrow B\\
# \end{align}
#
# The reaction is elementary, and B acts as a solvent for A and the transition state. We will consider it in a few different solvent environments.
function batch(du, u, p, t)
MAR = p["MAR"]
PAR = p["PAR"]
k10, K10, V = PAR
NA = u[:,1]
NB = u[:,2]
NT = NA + NB
XA = NA./NT
XB = NB./NT
#For A in B
A12A = MAR[1]
#For B in A
A12B = MAR[2]
#For Transition State 1 in B
A12TS1 = MAR[3]
gammaA = exp.(XB.^2*A12A)
gammaB = exp.(XA.^2*A12B)
gammaTS1 = exp.(XB.^2*A12TS1)
z1 = 1/K10*gammaB./gammaA.*XB./XA
r1 = k10*gammaA./gammaTS1.*XA.*(1 .- z1).*NT/V
RA = -r1[1]
RB = r1[1]
du[1] = RA*V
du[2] = RB*V
return du, r1
end
# +
k10 = 1
K10 = 1
V = 1
NA0 = 100
NB0 = 0.0
var0 = [NA0 NB0]
span = (0.0, 25.0);
# +
#Solvate transition state relative to reactants
MARSET1 = zeros(3,3)
MARSET1[:,1] = [0.0, 0.0, 0.0] #no solvation
MARSET1[:,2] = [0.0, 0.0, 2.0] #destabilize TS1
MARSET1[:,3] = [0.0, 0.0, -2.0] #stabilize TS1
tcoar = range(0.0, stop = maximum(span), length = 20)
tfine = range(0.0, stop = maximum(span), length = 1000)
Xout = zeros(length(tfine), size(MARSET1, 2))
r1out = zeros(length(tfine), size(MARSET1, 2))
NAout = zeros(length(tfine), size(MARSET1, 2))
NBout = zeros(length(tfine), size(MARSET1, 2))
for i = 1:size(MARSET1, 2)
p0 = Dict("MAR" => MARSET1[:,i], "PAR" => [k10, K10, V])
prob = ODEProblem(batch, var0, span, p0)
sol = solve(prob, Rodas5(), abstol = 1e-18, reltol = 1e-18)
solf = sol(tfine)
NA = solf[1,:]
NB = solf[2,:]
NT = NA + NB
ext = NB/NA0
dut, rt1 = batch([0., 0.], [NA NB], p0, tfine)
Xout[:,i] = ext
r1out[:,i] = rt1
NAout[:,i] = NA
NBout[:,i] = NB
end
Xout[Xout .>= 0.9999] .= 0.9999
plt1 = plot(tfine/maximum(tfine), Xout, xlabel = "time", ylabel = "Conversion", labels = ["ideal" "destabilize TS" "stabilize TS"], legend = :bottomright)
plt1 =scatter!(plt1, tcoar/maximum(tcoar), 1 .- exp.(-k10*tcoar))
plt2 = plot(tfine/maximum(tfine), r1out, xlabel = "time", ylabel = "rate", labels = ["ideal" "destabilize TS" "stabilize TS"], legend = :topright)
plt3 = plot(tfine/maximum(tfine), log.(1 .- Xout))
plt4 = plot(Xout, r1out)
r1out[r1out .<= 0.000001] .= 0.000001
plt5 = plot(log.(1 .- Xout), log.(r1out), legend = nothing)
# plt2 = plot!(tfine[2:end]/maximum(tfine), r1out[2:end,:], labels = ["r1" nothing nothing])
# plt3 = plot(Xout, r2out, xlabel = "extent", ylabel = "rate", labels = ["r1" nothing nothing], legend = :topright)
# # plt3 = plot!(e2out, r2out, labels = ["r2" nothing nothing])
# plt4 = plot(Xout, z1out, xlabel = "extent", ylabel = "z", labels = ["z1" nothing nothing], legend = :topright)
# plt4 = plot!(Xout, z2out, labels = ["z2" nothing nothing])
# plt5 = plot(tfine/maximum(tfine), z1out, xlabel = "time", ylabel = "z", labels = ["z1" nothing nothing], legend = :topright)
# plt5 = plot!(tfine/maximum(tfine), z2out, labels = ["z2" nothing nothing])
display(plt1)
display(plt2)
display(plt3)
display(plt4)
display(plt5)
# plot(tfine/maximum(tfine), cov)
| 2021_JCAT_DeDonder_Solvents/Case Study 1c.ipynb |
# ##### Copyright 2021 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # jobshop_ft06_distance_sat
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/examples/jobshop_ft06_distance_sat.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a>
# </td>
# <td>
# <a href="https://github.com/google/or-tools/blob/master/examples/python/jobshop_ft06_distance_sat.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a>
# </td>
# </table>
# First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab.
# !pip install ortools
# +
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This model implements a variation of the ft06 jobshop.
A jobshop is a standard scheduling problem when you must sequence a
series of tasks on a set of machines. Each job contains one task per
machine. The order of execution and the length of each job on each
machine is task dependent.
The objective is to minimize the maximum completion time of all
jobs. This is called the makespan.
This variation introduces a minimum distance between all the jobs on each
machine.
"""
import collections
from ortools.sat.python import cp_model
def distance_between_jobs(x, y):
"""Returns the distance between tasks of job x and tasks of job y."""
return abs(x - y)
def jobshop_ft06_distance():
"""Solves the ft06 jobshop with distances between tasks."""
# Creates the model.
model = cp_model.CpModel()
machines_count = 6
jobs_count = 6
all_machines = range(0, machines_count)
all_jobs = range(0, jobs_count)
durations = [[1, 3, 6, 7, 3, 6], [8, 5, 10, 10, 10, 4], [5, 4, 8, 9, 1, 7],
[5, 5, 5, 3, 8, 9], [9, 3, 5, 4, 3, 1], [3, 3, 9, 10, 4, 1]]
machines = [[2, 0, 1, 3, 5, 4], [1, 2, 4, 5, 0, 3], [2, 3, 5, 0, 1, 4],
[1, 0, 2, 3, 4, 5], [2, 1, 4, 5, 0, 3], [1, 3, 5, 0, 4, 2]]
# Computes horizon statically.
horizon = 150
task_type = collections.namedtuple('task_type', 'start end interval')
# Creates jobs.
all_tasks = {}
for i in all_jobs:
for j in all_machines:
start_var = model.NewIntVar(0, horizon, 'start_%i_%i' % (i, j))
duration = durations[i][j]
end_var = model.NewIntVar(0, horizon, 'end_%i_%i' % (i, j))
interval_var = model.NewIntervalVar(start_var, duration, end_var,
'interval_%i_%i' % (i, j))
all_tasks[(i, j)] = task_type(start=start_var,
end=end_var,
interval=interval_var)
# Create disjuctive constraints.
for i in all_machines:
job_intervals = []
job_indices = []
job_starts = []
job_ends = []
for j in all_jobs:
for k in all_machines:
if machines[j][k] == i:
job_intervals.append(all_tasks[(j, k)].interval)
job_indices.append(j)
job_starts.append(all_tasks[(j, k)].start)
job_ends.append(all_tasks[(j, k)].end)
model.AddNoOverlap(job_intervals)
arcs = []
for j1 in range(len(job_intervals)):
# Initial arc from the dummy node (0) to a task.
start_lit = model.NewBoolVar('%i is first job' % j1)
arcs.append([0, j1 + 1, start_lit])
# Final arc from an arc to the dummy node.
arcs.append([j1 + 1, 0, model.NewBoolVar('%i is last job' % j1)])
for j2 in range(len(job_intervals)):
if j1 == j2:
continue
lit = model.NewBoolVar('%i follows %i' % (j2, j1))
arcs.append([j1 + 1, j2 + 1, lit])
# We add the reified precedence to link the literal with the
# times of the two tasks.
min_distance = distance_between_jobs(j1, j2)
model.Add(job_starts[j2] >= job_ends[j1] +
min_distance).OnlyEnforceIf(lit)
model.AddCircuit(arcs)
# Precedences inside a job.
for i in all_jobs:
for j in range(0, machines_count - 1):
model.Add(all_tasks[(i, j + 1)].start >= all_tasks[(i, j)].end)
# Makespan objective.
obj_var = model.NewIntVar(0, horizon, 'makespan')
model.AddMaxEquality(
obj_var, [all_tasks[(i, machines_count - 1)].end for i in all_jobs])
model.Minimize(obj_var)
# Solve model.
solver = cp_model.CpSolver()
status = solver.Solve(model)
# Output solution.
if status == cp_model.OPTIMAL:
print('Optimal makespan: %i' % solver.ObjectiveValue())
jobshop_ft06_distance()
| examples/notebook/examples/jobshop_ft06_distance_sat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# # Compile Tensorflow Object Detection model for Jetson Nano
# ## Download Model
# !mkdir -p posenet
# !wget https://storage.googleapis.com/download.tensorflow.org/models/tflite/posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite -O posenet/posenet_mobile.tflite
# +
model_filename = 'posenet/posenet_mobile.tflite'
model_name = 'posenet-mobile'
# Compress model into .tar.gz so SageMaker Neo can use it
model_tar = model_name + '.tar.gz'
# !tar -czf {model_tar} {model_filename}
# -
# ## Upload Model to S3
import sagemaker
from sagemaker import get_execution_role
import boto3
import botocore
import json
role = get_execution_role()
sess = sagemaker.Session()
region = 'ap-northeast-1'
# +
# S3 bucket and folders for saving model artifacts.
# Feel free to specify different bucket/folders here if you wish.
bucket = '<your bucket here>'
folder = 'edge-cv-datalake-demo/models/pose-estimator'
training_output_folder = folder + '/training-output'
compilation_output_sub_folder = folder + '/compilation-output'
s3_packaging_output_location = folder + '/packaging-output'
# S3 Location to save the model artifact after compilation
s3_training_output_location = 's3://{}/{}'.format(bucket, training_output_folder)
s3_compilation_output_location = 's3://{}/{}'.format(bucket, compilation_output_sub_folder)
s3_packaging_output_location = 's3://{}/{}'.format(bucket, s3_packaging_output_location)
print(s3_compilation_output_location)
print(s3_packaging_output_location)
# -
sagemaker_client = boto3.client('sagemaker', region_name=region)
pose_estimator_s3_path = sess.upload_data(model_tar, bucket, training_output_folder)
## Update version
pose_estimator_model_version = '0.1.0'
# ## Start a Neo Compilation Job
# +
import time
def compile_model(model_type, model_path, model_framework, model_data_shape, target_device, compiler_options):
compilation_job_name = 'edge-cv-datalake-demo-'+ model_type + '-' + pose_estimator_model_version.replace('.', '-') + '-' + target_device.replace('_', '-')
print('Compilation job for %s started' % compilation_job_name)
response = sagemaker_client.create_compilation_job(
CompilationJobName=compilation_job_name,
RoleArn=role,
InputConfig={
'S3Uri': model_path,
'DataInputConfig': model_data_shape,
'Framework': model_framework.upper()
},
OutputConfig={
'S3OutputLocation': s3_compilation_output_location,
'TargetDevice': target_device,
'CompilerOptions': compiler_options
},
StoppingCondition={
'MaxRuntimeInSeconds': 1800
}
)
print(response)
# Poll every 30 sec
while True:
response = sagemaker_client.describe_compilation_job(CompilationJobName=compilation_job_name)
if response['CompilationJobStatus'] == 'COMPLETED':
break
elif response['CompilationJobStatus'] == 'FAILED':
raise RuntimeError('Compilation failed')
print('Compiling ...')
time.sleep(30)
print('Done!')
return compilation_job_name
# -
def package_model(compilation_job_name, packaged_model_name, model_version):
model_packaging_job_name=compilation_job_name
response = sagemaker_client.create_edge_packaging_job(
RoleArn=role,
OutputConfig={
'S3OutputLocation': s3_packaging_output_location,
},
ModelName=packaged_model_name,
ModelVersion=model_version,
EdgePackagingJobName=model_packaging_job_name,
CompilationJobName=compilation_job_name,
)
print(response)
# Poll every 30 sec
while True:
job_status = sagemaker_client.describe_edge_packaging_job(EdgePackagingJobName=model_packaging_job_name)
if job_status['EdgePackagingJobStatus'] == 'COMPLETED':
break
elif job_status['EdgePackagingJobStatus'] == 'FAILED':
raise RuntimeError('Edge Packaging failed')
print('Packaging ...')
time.sleep(30)
print('Done!')
return model_packaging_job_name
# +
pose_estimator_model_data_shape = '{"sub_2":[1,257,257,3]}'
model_framework = 'tflite'
target_device = 'jetson_nano'
# Compile for JETPACK 4.5.1 on Jetson Nano
compiler_options = json.dumps({
'NVIDIA': {
'gpu_code': 'sm_53',
'trt-ver': '7.1.3',
'cuda-ver': '10.2'
}
})
# -
pose_estimator_compilation_job_name = compile_model('pose-estimator', pose_estimator_s3_path, model_framework, pose_estimator_model_data_shape, target_device, compiler_options)
pose_estimator_packaged_model_name = "pose-estimator-model-packaged-nano"
pose_estimator_model_package = '{}-{}.tar.gz'.format(pose_estimator_packaged_model_name, pose_estimator_model_version.replace('.', '-'))
print(pose_estimator_model_package)
pose_estimator_packaging_job_name = package_model('edge-cv-datalake-demo-pose-estimator-0-1-0-jetson-nano', pose_estimator_packaged_model_name, pose_estimator_model_version)
pose_estimator_packaging_job_name = package_model(pose_estimator_compilation_job_name, pose_estimator_packaged_model_name, pose_estimator_model_version)
target_device = 'rasp3b'
compiler_options = '{"":""}'
pose_estimator_compilation_job_name = compile_model('pose-estimator', pose_estimator_s3_path, model_framework, pose_estimator_model_data_shape, target_device, compiler_options)
| cloud/sagemaker/neo-tflite-pose.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fitting the "ionic model" of isotope fractionation
#
# The starting point is to imagine that the isotope vibrates in a potential well
# that somehow represents the effective bonding between the atom of interest and
# the rest of the crystal. We can follow Young et al. (2015) and represent the
# interaction via a Born–Mayer type interionic potential:
#
# $$ E(r) = \frac{z_1 z_2}{r}\left(\frac{e^2}{4\pi\epsilon_0}\right) + \frac{b}{r^n} + E_0,$$
#
# which gives the energy of the bond, $E$, as a function of the distance between the ions, $r$.
# The first term represents the Coulomb interaction between ions (which is attractive
# for ions of opposite charge since reducing $r$ makes the energy more negative). The
# the second term represents repulsion between ions due to overlap of their electron clouds. At small
# $r$ this repulsion dominates and there is an $r$, the equilibrium bond length, $r_0$,
# which minimizes $E$. The parameters $z_1$ and $z_2$ represent the charges on the ions, $e$ is the
# charge of an electron, $\epsilon_0$ is the vacuum permittivity. The parameters $b$ and $n$
# define the strength and shape of the repulsion term. $E_0$ just sets the absolute energy (and is not further
# involved otherwise).
#
# The force acting between the ions is the derivative of the energy with respect to distance
# (I think the convention is usually that the force is the negative derivative, but that would
# either imply a sign error in Young et al. 2015 or that I cannot take the derivative of a
# polynomial), which leads to equation 30 of Young et al. 2015:
#
# $$ F(r) = \frac{\mathrm{d}E}{\mathrm{d}r}
# = -\frac{z_1 z_2}{r^2}\left(\frac{e^2}{4\pi\epsilon_0}\right)
# - \frac{bn}{r^{n+1}}.$$
#
# At the equilibrium bond distance, $r_0$, $\frac{\mathrm{d}E}{\mathrm{d}r} = 0$. This
# means we can find $b$ in terms of the other parameters such that we can choose $r_0$:
#
# $$ b = -\left(\frac{e^2}{4\pi\epsilon_0}\right)\frac{z_1 z_2}{nr_0^{n-1}}. $$
#
# Commonly $n$ is set to 12, $r_0$ is taken from the ionic radii, and this sets $b$ for the
# mineral of interest.
#
# For isotopic fractionation, we need the force constant, $K_f$ for the effective bond. This is given
# by the second derivative of the energy with respect to distance:
#
# $$ K(r) = \frac{\mathrm{d}^2E}{\mathrm{d}r^2}
# = \frac{2 z_1 z_2}{r^3}\left(\frac{e^2}{4\pi\epsilon_0}\right)
# - \frac{b(n-1)n}{r^{n+2}},$$
#
# evaluated at $r_0$. Substituting $b$ and $r_0$ into this function gives $K_f$:
#
# $$K_f = K(r=r_0) = \frac{2z_1 z_2}{r_0^3}\left(\frac{e^2}{4\pi\epsilon_0}\right)
# - \left(\frac{z_1 z_2 e^2}{4\pi\epsilon_0}\right)\frac{(n-1)n}{nr_0^{n-1} r_0^{n+2}}\\
# = \frac{z_1 z_2 e^2 (1 - n)}{4\pi\epsilon_0 r_0^3},$$
#
# where the final form is given as equation 31 in Young et al. (2015). The following cells implement
# and plot these various functions.
#
# Turns out we assume that the effective charge depends on $r_0$ and the coordination number, $n_c$.
# $z_1 = \zeta \times 2.0$ and $z_1 = \zeta \times -2.0$ and assume that:
#
# $$\zeta = \zeta_0 + r_0 \zeta_r + n_c \zeta_n$$
#
# fitting $\zeta_0$, $\zeta_r$ and $\zeta_n$ to the calculated reduced fractionation factors for the MgO
# structures at 300 K.
#
# Import modules
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# +
# Define constants
eps0 = 8.854187817E-12 # Vacuum permittivity (F/m)
e = 1.60217662E-19 # electron charge (C)
# Conversion factors
m2ang = 1.0E10
j2ev = 6.242E18
def energy(r, zi, zj, b, n):
"""
Energy from Born-Mayer type interionic potential
r - distance between ions (m); can be array
zi, zj - charges on ions (electrons)
b - strength of repulsive part (J/m^n)
n - exponent for repulsive part (-); typically ~12
returns energy (J)
"""
en = (zi*zj*e**2)/(4.0*np.pi*eps0*r) + (b/r**n)
return en
def de_by_dr(r, zi, zj, b, n):
"""
Derivative (force) of Born-Mayer type interionic potential
r - distance between ions (m); can be array
zi, zj - charges on ions (electrons)
b - strength of repulsive part (J/m^n)
n - exponent for repulsive part (-); typically ~12
returns force (J/m = N)
NB: Is the sign convention correct?
"""
force = -((zi*zj*e**2)/(4.0*np.pi*eps0*r**2)) - ((b*n)/r**(n+1))
return force
def d2e_by_dr2(r, zi, zj, b, n):
"""
Second derivative of Born-Mayer type interionic potential
r - distance between ions (m); can be array
zi, zj - charges on ions (electrons)
b - strength of repulsive part (J/m^n)
n - exponent for repulsive part (-); typically ~12
returns second derivative of energy (J/m^2 = N/m)
"""
k = ((2.0*zi*zj*e**2)/(4.0*np.pi*eps0*r**3)) - ((b*(-n-1)*n)/r**(n+2))
return k
def cal_b(r0, zi, zj, n):
"""
Calculate b for Born-Mayer type interionic potential to give an equilbrium bond length
r_0 - equilibrioumdistance between ions (m); can be array
zi, zj - charges on ions (electrons)
n - exponent for repulsive part (-); typically ~12
returns b such that energy minimum is at r_0 (J/m^n)
"""
b = -((zi*zj*e**2)/(4.0*np.pi*eps0*r0**2)) * r0**(n+1)/n
return b
def kf(r0, zi, zj, n):
"""
Calculate force constant for Born-Mayer type interionic potential
r_0 - equilibrium distance between ions (m); can be array
zi, zj - charges on ions (electrons)
n - exponent for repulsive part (-); typically ~12
returns force constant (J/m^n)
"""
k = (zi * zj * e**2 * (1-n)) / (4.0 * np.pi * eps0 * r0**3)
return k
# +
# Plot an example and check some values
rs = np.linspace(1.5, 4.0) # Angstroms
n = 12
zi = 2.0
zj = -2.0
r0 = 2.1 # Angstroms
b = cal_b(r0/m2ang, zi, zj, n)
fig, ax = plt.subplots(figsize=(12,6))
ax.plot(rs, energy(rs/m2ang, zi, zj, b, n)*j2ev)
ax.set_xlabel('Distance (Angstroms)')
ax.set_ylabel('Energy (eV)')
ax.axvline(r0)
plt.show()
fig, ax = plt.subplots(figsize=(12,6))
ax.plot(rs, de_by_dr(rs/m2ang, zi, zj, b, n)*j2ev/m2ang, 'r')
ax.axvline(r0)
ax.axhline(0.0)
ax.set_ylim(-25, 25)
ax.set_xlabel('Distance (Angstroms)')
ax.set_ylabel('Force (eV/Angstrom)')
plt.show()
print("Energy around r_0:", energy(np.array([r0-0.001, r0, r0+0.001])/m2ang, zi, zj, b, n)*j2ev, "eV")
print("Force at r_0:", de_by_dr(r0/m2ang, zi, zj, b, n)*j2ev/m2ang, "eV/Ang")
print("Second derivative at r0:", d2e_by_dr2(r0/m2ang, zi, zj, b, n), "N/m")
print("Kf:", kf(r0/m2ang, zi, zj, n), "N/m") # No b
# -
# # de Koker melt
#
#
# Some useful functions...
import ionic_model
# +
r_coefs_melt_dekoker = [1.9613, -0.00165, 0.0000019]
pressures = np.array([0, 25, 50]) # GPa
r_dekoker = ionic_model.melt_bond_length(pressures, r_coefs_melt_dekoker)
print(r_dekoker)
b = cal_b(r_dekoker, zi, zj, n)
print(b)
k = kf(r_dekoker, zi, zj, n)
print("Kf:",k, "N/m")
beta = ionic_model.ionic_model_beta(k, 1573.0)
print("beta at 1573 K", beta)
# +
fig, ax = plt.subplots(figsize=(12,6))
ax.plot(rs, energy(rs/m2ang, zi, zj, b[0], n)*j2ev)
ax.plot(rs, energy(rs/m2ang, zi, zj, b[1], n)*j2ev)
ax.plot(rs, energy(rs/m2ang, zi, zj, b[2], n)*j2ev)
ax.set_xlabel('Distance (Angstroms)')
ax.set_ylabel('Energy (eV)')
plt.show()
# +
r = np.linspace(1.95E-10, 2.80E-10)
k12 = kf(r, 2.0, -2.0, 12)
b12 = ionic_model.ionic_model_beta(k12, 300.0)
k10 = kf(r, 2.0, -2.0, 10)
b10 = ionic_model.ionic_model_beta(k10, 300.0)
k8 = kf(r, 2.0, -2.0, 8)
b8 = ionic_model.ionic_model_beta(k8, 300.0)
k6 = kf(r, 2.0, -2.0, 6)
b6 = ionic_model.ionic_model_beta(k6, 300.0)
kq7 = kf(r, 2.0*0.75, -2.0*0.75, 12)
bq7 = ionic_model.ionic_model_beta(kq7, 300.0)
kq2 = kf(r, 1.0, -1.0, 12)
bq2 = ionic_model.ionic_model_beta(kq2, 300.0)
kq4 = kf(r, 0.5, -0.5, 12)
bq4 = ionic_model.ionic_model_beta(kq4, 300.0)
# +
fig, ax = plt.subplots()
ax.plot(r, b12, label='n=12, qfac=1.0')
ax.plot(r, b10, label='n=10, qfac=1.0')
ax.plot(r, b8, label='n=8, qfac=1.0')
ax.plot(r, b6, label='n=6, qfac=1.0')
ax.plot(r, bq2, '.', label='n=12, qfac=0.5')
ax.plot(r, bq4, '.', label='n=12, qfac=0.25')
ax.plot(r, bq7, '.', label='n=12, qfac=0.75')
ax.legend()
plt.show()
# +
r0 = 2.0E-10
rs = np.linspace(1.5E-10, 4.0E-10)
fig, ax = plt.subplots(figsize=(12,6))
ax.plot(rs*m2ang, energy(rs, 2.0, -2.0, cal_b(r0, 2.0, -2.0, 12), 12)*j2ev, label='q=1.0, n=12')
ax.plot(rs*m2ang, energy(rs, 2.0*0.75, -2.0*0.75, cal_b(r0, 2.0*0.75, -2.0*0.75, 12), 12)*j2ev, label='q=0.75, n=12')
ax.plot(rs*m2ang, energy(rs, 2.0, -2.0, cal_b(r0, 2.0, -2.0, 6), 6)*j2ev, label='q=1.0, n=6')
ax.plot(rs*m2ang, energy(rs, 2.0, -2.0, cal_b(2.6E-10, 2.0, -2.0, 12), 12)*j2ev, '--', label='q=1.0, n=12')
ax.plot(rs*m2ang, energy(rs, 2.0*0.75, -2.0*0.75, cal_b(2.6E-10, 2.0*0.75, -2.0*0.75, 12), 12)*j2ev, '--', label='q=0.75, n=12')
ax.plot(rs*m2ang, energy(rs, 2.0, -2.0, cal_b(2.6E-10, 2.0, -2.0, 6), 6)*j2ev, '--', label='q=1.0, n=6')
ax.set_ylim(-30, 100)
ax.legend()
ax.set_xlabel('Distance (Angstroms)')
ax.set_ylabel('Energy (eV)')
plt.show()
# -
2*0.75
# +
# Data - frac factor at 300 K
cscl_beta_permil = [43.657763, 35.181290, 28.256947, 22.598847, 18.12]
cscl_beta_ref = 18.12
cscl_r_ang = [2.07212, 2.12968, 2.18724, 2.24480, 2.30236]
cscl_r_ref = 2.302
# From /nfs/see-fs-02_users/earawa/lvs/Castep-isotopes-work/MgO_DFPT
mgo_beta_permil = [42.890104, 37.399128, 31.058124, 26.132359, 20.720653]
#mgo_beta_permil = [37.399128, 31.058124, 26.132359, 20.720653]
mgo_beta_ref = 26.132359
mgo_r_ang = [2.00354, 2.03985, 2.08651, 2.12726, 2.18267]
#mgo_r_ang = [2.03985, 2.08651, 2.12726, 2.18267]
mgo_r_ref = 2.12726
nias_first_beta_permil = [39.791693, 32.276645, 26.06, 20.885733, 16.716020] # Oct
nias_first_beta_ref = 26.06
nias_first_r_ang = [2.0329, 2.08647, 2.140, 2.19347, 2.24697]
nias_first_r_ref = 2.140
nias_second_beta_permil = [36.042615, 29.003766, 23.18, 18.325099, 14.440640] # trig pris
nias_second_beta_ref = 23.18
nias_second_r_ang = [2.04538, 2.09921, 2.153, 2.20686, 2.26069]
nias_second_r_ref = 2.153
cubzns_beta_permil = [30.05, 24.864859, 20.331742, 13.277411, 8.493347]# , 4.557974]
cubzns_beta_ref = 30.05
cubzns_r_ang = [2.000, 2.04407, 2.09393, 2.19364, 2.29335]# , 2.59952]
cubzns_r_ref = 2.000
# +
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(cscl_r_ang, cscl_beta_permil, 'k*', label='CsCl structure', markersize=10)
ax.plot(cscl_r_ref, cscl_beta_ref, 'ko', fillstyle='none', markersize=20)
ax.plot(mgo_r_ang, mgo_beta_permil, 'ys', label='NaCl (periclase)', markersize=8)
ax.plot(mgo_r_ref, mgo_beta_ref, 'yo', fillstyle='none', markersize=20)
ax.plot(nias_first_r_ang, nias_first_beta_permil, 'gs', label='NiAs structure (octahedral)',
markersize=8)
ax.plot(nias_first_r_ref, nias_first_beta_ref, 'go', fillstyle='none', markersize=20)
ax.plot(nias_second_r_ang, nias_second_beta_permil, 'bs', label='NiAs structure (trigonal prismatic)',
markersize=8)
ax.plot(nias_second_r_ref, nias_second_beta_ref, 'bo', fillstyle='none', markersize=20)
ax.plot(cubzns_r_ang, cubzns_beta_permil, 'r^', label='cubic ZnS structure', markersize=10)
ax.plot(cubzns_r_ref, cubzns_beta_ref, 'ro', fillstyle='none', markersize=20)
ax.set_xlabel('Bond length (Angstroms)')
ax.set_ylabel('1000.ln(beta) (per mill)')
ax.legend()
plt.show()
# -
import scipy.optimize as sp_opt
def calc_beta_300_vary_qn(r, qfac0, qfac1):
qfac = qfac0 + r*qfac1
n = 12
k = kf(r*1E-10, 2.0*qfac, -2.0*qfac, n)
beta = ionic_model.ionic_model_beta(k, 300.0)
return beta
mgo_popt, mgo_pcov = sp_opt.curve_fit(calc_beta_300_vary_qn, mgo_r_ang,
mgo_beta_permil, [1.0, 0.0])
cubzns_popt, cubzns_pcov = sp_opt.curve_fit(calc_beta_300_vary_qn, cubzns_r_ang,
cubzns_beta_permil, [1.0, 0.0])
# +
cscl_popt, cscl_pcov = sp_opt.curve_fit(calc_beta_300_vary_qn, cscl_r_ang,
cscl_beta_permil, [1.0, 0.0])
# -
nias_first_popt, nias_first_pcov = sp_opt.curve_fit(calc_beta_300_vary_qn, nias_first_r_ang,
nias_first_beta_permil, [1.0, 0.0])
print(mgo_popt)
print(mgo_pcov)
print(mgo_popt[0] + 1.9*mgo_popt[1])
print(mgo_popt[0] + 2.0*mgo_popt[1])
print(mgo_popt[0] + 2.3*mgo_popt[1])
# +
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(cscl_r_ang, cscl_beta_permil, 'k*', label='CsCl structure', markersize=10)
ax.plot(cscl_r_ref, cscl_beta_ref, 'ko', fillstyle='none', markersize=20)
ax.plot(mgo_r_ang, mgo_beta_permil, 'ys', label='NaCl (periclase)', markersize=8)
ax.plot(mgo_r_ref, mgo_beta_ref, 'yo', fillstyle='none', markersize=20)
ax.plot(nias_first_r_ang, nias_first_beta_permil, 'gs', label='NiAs structure (octahedral)',
markersize=8)
ax.plot(nias_first_r_ref, nias_first_beta_ref, 'go', fillstyle='none', markersize=20)
ax.plot(nias_second_r_ang, nias_second_beta_permil, 'bs', label='NiAs structure (trigonal prismatic)',
markersize=8)
ax.plot(nias_second_r_ref, nias_second_beta_ref, 'bo', fillstyle='none', markersize=20)
r_points = np.linspace(1.98, 2.32)
ax.plot(r_points, calc_beta_300_vary_qn(r_points, *cscl_popt), 'k', linestyle='--')
ax.plot(r_points, calc_beta_300_vary_qn(r_points, *mgo_popt), 'y', linestyle='--')
ax.plot(r_points, calc_beta_300_vary_qn(r_points, *nias_first_popt), 'g', linestyle='--')
ax.plot(r_points, calc_beta_300_vary_qn(r_points, (cscl_popt[0]+cubzns_popt[0])/2.0,
(cscl_popt[1]+cubzns_popt[1])/2.0), 'k', linestyle=':')
ax.plot(r_points, calc_beta_300_vary_qn(r_points, *cubzns_popt), 'r', linestyle='--')
ax.plot(cubzns_r_ang, cubzns_beta_permil, 'r^', label='cubic ZnS structure', markersize=10)
ax.plot(cubzns_r_ref, cubzns_beta_ref, 'ro', fillstyle='none', markersize=20)
ax.set_xlabel('Bond length (Angstroms)')
ax.set_ylabel('1000.ln(beta) (per mill)')
ax.legend()
plt.show()
# +
fig, ax = plt.subplots(nrows=2, figsize=(8,12))
ax[0].errorbar(8, cscl_popt[0], yerr=np.sqrt(np.diag(cscl_pcov))[0], fmt='k*')
ax[0].errorbar(4, cubzns_popt[0], yerr=np.sqrt(np.diag(cubzns_pcov))[0], fmt='r^')
ax[0].errorbar(6, mgo_popt[0], yerr=np.sqrt(np.diag(mgo_pcov))[0], fmt='ys')
ax[0].errorbar(6, nias_first_popt[0], yerr=np.sqrt(np.diag(nias_first_pcov))[0], fmt='gs')
ax[0].set_xlabel('Coordination number')
ax[0].set_ylabel('spring constant offset (units?)')
ax[1].errorbar(8, cscl_popt[1], yerr=np.sqrt(np.diag(cscl_pcov))[1], fmt='k*')
ax[1].errorbar(4, cubzns_popt[1], yerr=np.sqrt(np.diag(cubzns_pcov))[1], fmt='r^')
ax[1].errorbar(6, mgo_popt[1], yerr=np.sqrt(np.diag(mgo_pcov))[1], fmt='ys')
ax[1].errorbar(6, nias_first_popt[1], yerr=np.sqrt(np.diag(nias_first_pcov))[1], fmt='gs')
ax[1].set_xlabel('Coordination number')
ax[1].set_ylabel('"n" in potential function')
plt.show()
# -
def calc_beta_300_vary_q_coord(data, qfac0, qfac1, qfacgrd):
coord = data[1]
r = data[0]
qfac = qfac0 + r*qfac1 + coord*qfacgrd
n = 12
k = kf(r*1E-10, 2.0*qfac, -2.0*qfac, n)
beta = ionic_model.ionic_model_beta(k, 300.0)
return beta
def get_qfac(r, coord, qfac0, qfac1, qfacgrd):
qfac = qfac0 + r*qfac1 + coord*qfacgrd
return qfac
def calc_beta_model(r, coord, t, qfac0, qfac1, qfacgrd):
qfac = qfac0 + r*qfac1 + coord*qfacgrd
n = 12
k = kf(r*1E-10, 2.0*qfac, -2.0*qfac, n)
beta = ionic_model.ionic_model_beta(k, t)
return beta
# +
# Fit model to MgO polymorph data
rs = cscl_r_ang + mgo_r_ang + nias_first_r_ang + nias_second_r_ang + cubzns_r_ang
coords = [8.0, 8, 8, 8, 8, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 4]
data = np.array((rs, coords))
predictors = np.array((cscl_beta_permil + mgo_beta_permil + nias_first_beta_permil
+ nias_second_beta_permil + cubzns_beta_permil))
all_popt, all_pcov = sp_opt.curve_fit(calc_beta_300_vary_q_coord, data,
predictors, [1.0, 0.0, 0.0])
# +
# Silicate data for comparison
# Data from /nfs/see-fs-02_users/earawa/lvs/Castep-isotopes-work/free_energy/Mg2SiO4
# at -10, 0, 10, 20, 30 and 40 GPa
fo_data_bonds = np.array([2.18806, 2.1173566666666663, 2.06903, 2.0307566666666665, 2.000488333333333, 1.974421666666667])
fo_data_300k = np.array([18.799606, 25.343401, 30.932164, 36.051153, 40.674813, 44.893845])
fo_data_bonds_0GPa = 2.1173566666666663
fo_data_300k_0GPa = 25.343401
# Data from /nfs/see-fs-02_users/earawa/lvs/Castep-isotopes-work/free_energy/MgSiO3
# at 0 GPa, 20, 40, 60, 80, 100 and 120 GPa
pv_data_bonds = np.array([2.2277, 2.16657375, 2.1199, 2.0819075, 2.0497562499999997, 2.02188875, 1.9972662500000002])
pv_data_300k = np.array([23.443879, 30.005733, 35.920075, 41.051075, 47.352945, 52.504616, 57.403608])
pv_data_bonds_0GPa = 2.2277
pv_data_300k_0GPa = 23.443879
# +
# Make the figure...
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(fo_data_bonds, fo_data_300k, color='grey', linestyle='', marker=(6,0,30),
label='Forsterite', markersize=5)
ax.plot(fo_data_bonds_0GPa, fo_data_300k_0GPa, color='grey', marker='o', fillstyle='none', markersize=15)
ax.plot(pv_data_bonds, pv_data_300k, color='grey', linestyle='', marker=(8,2,0),
label='Bridgmanite', markersize=8)
ax.plot(pv_data_bonds_0GPa, pv_data_300k_0GPa, color='grey', marker='o', fillstyle='none', markersize=15)
ax.plot(cscl_r_ang, cscl_beta_permil, color='m', linestyle='', marker=(8,1,0),
label='CsCl structure', markersize=12)
ax.plot(cscl_r_ref, cscl_beta_ref, 'mo', fillstyle='none', markersize=25)
ax.plot(mgo_r_ang, mgo_beta_permil, color='y', linestyle='', marker=(6,1,0),
label='NaCl (periclase)', markersize=12)
ax.plot(mgo_r_ref, mgo_beta_ref, 'yo', fillstyle='none', markersize=25)
ax.plot(nias_first_r_ang, nias_first_beta_permil, color='cyan', linestyle='', marker=(6,2,0),
label='NiAs structure (octahedral)', markersize=12)
ax.plot(nias_first_r_ref, nias_first_beta_ref, 'o', color='cyan', fillstyle='none', markersize=25)
ax.plot(nias_second_r_ang, nias_second_beta_permil, color='cornflowerblue', linestyle='', marker=(6,0,0),
label='NiAs structure (trigonal prismatic)', markersize=10)
ax.plot(nias_second_r_ref, nias_second_beta_ref, color='cornflowerblue', marker='o', fillstyle='none', markersize=25)
ax.plot(cubzns_r_ang, cubzns_beta_permil, color='salmon', linestyle='', marker=(4,1,0),
label='cubic ZnS structure', markersize=12)
ax.plot(cubzns_r_ref, cubzns_beta_ref, color='salmon', marker='o', fillstyle='none', markersize=25)
for coord in [3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]:
r_points = np.linspace(1.98, 2.32)
coords = np.ones_like(r_points) * coord
data = np.stack((r_points, coords))
values = calc_beta_300_vary_q_coord(data, *all_popt)
ax.plot(r_points, values, 'k', linestyle=':')
ax.text(1.965, values[0], str(coord))
ax.plot(1.9875, calc_beta_300_vary_q_coord([1.9875,4.0], *all_popt), 'o', fillstyle='none',
color='darkcyan', markersize=6)
ax.plot(2.3125, calc_beta_300_vary_q_coord([2.3125,4.0], *all_popt), 'o',
color='darkcyan', markersize=6)
ax.plot(1.9875, calc_beta_300_vary_q_coord([1.9875,8.0], *all_popt), 'o', fillstyle='none',
color='saddlebrown', markersize=6)
ax.plot(2.3125, calc_beta_300_vary_q_coord([2.3125,8.0], *all_popt), 'o',
color='saddlebrown', markersize=6)
ax.set_xlabel('Bond length (Angstroms)')
ax.set_ylabel('1000.ln(beta) (per mill)')
ax.legend()
plt.show()
# +
rs = np.linspace(1.5E-10, 4.0E-10)
fig, ax = plt.subplots(figsize=(8,12), nrows=2, sharex=True, gridspec_kw={'hspace':0.0125})
r0 = 1.9875
coord = 4.0
qfac = get_qfac(r0, coord, *all_popt)
print(qfac)
ax[0].plot(rs*m2ang, energy(rs, 2.0*qfac, -2.0*qfac, cal_b(r0*1E-10, 2.0*qfac, -2.0*qfac, 12), 12)*j2ev,
label='r0=1.9875, coord=4, n=12', color='darkcyan', ls='--')
ax[1].plot(rs*m2ang, de_by_dr(rs, 2.0*qfac, -2.0*qfac, cal_b(r0*1E-10, 2.0*qfac, -2.0*qfac, 12), 12)*j2ev/m2ang,
color='darkcyan', ls='--')
ax[1].axvline(r0, ls='--', color='k')
ax[0].axvline(r0, ls='--', color='k')
ax[1].axhline(0.0, color='k', lw=1)
r0 = 1.9875
coord = 8.0
qfac = get_qfac(r0, coord, *all_popt)
print(qfac)
ax[0].plot(rs*m2ang, energy(rs, 2.0*qfac, -2.0*qfac, cal_b(r0*1E-10, 2.0*qfac, -2.0*qfac, 12), 12)*j2ev,
label='r0=1.9875, coord=8, n=12', color='saddlebrown', ls='--')
ax[1].plot(rs*m2ang, de_by_dr(rs, 2.0*qfac, -2.0*qfac, cal_b(r0*1E-10, 2.0*qfac, -2.0*qfac, 12), 12)*j2ev/m2ang,
color='saddlebrown', ls='--')
r0 = 2.3125
coord = 4.0
qfac = get_qfac(r0, coord, *all_popt)
print(qfac)
ax[0].plot(rs*m2ang, energy(rs, 2.0*qfac, -2.0*qfac, cal_b(r0*1E-10, 2.0*qfac, -2.0*qfac, 12), 12)*j2ev,
label='r0=2.3125, coord=4, n=12', color='darkcyan', ls='-')
ax[1].plot(rs*m2ang, de_by_dr(rs, 2.0*qfac, -2.0*qfac, cal_b(r0*1E-10, 2.0*qfac, -2.0*qfac, 12), 12)*j2ev/m2ang,
color='darkcyan', ls='-')
ax[1].axvline(r0, color='k')
ax[0].axvline(r0, color='k')
r0 = 2.3125
coord = 8.0
qfac = get_qfac(r0, coord, *all_popt)
print(qfac)
ax[0].plot(rs*m2ang, energy(rs, 2.0*qfac, -2.0*qfac, cal_b(r0*1E-10, 2.0*qfac, -2.0*qfac, 12), 12)*j2ev,
label='r0=2.3125, coord=8, n=12',
color='saddlebrown', ls='-')
ax[1].plot(rs*m2ang, de_by_dr(rs, 2.0*qfac, -2.0*qfac, cal_b(r0*1E-10, 2.0*qfac, -2.0*qfac, 12), 12)*j2ev/m2ang,
color='saddlebrown', ls='-')
ax[0].set_ylim(-30, 20)
ax[0].set_xlim(1.5, 4.0)
ax[1].set_xlim(1.5, 4.0)
ax[0].legend()
ax[0].set_ylabel('Energy (eV)')
ax[0].xaxis.set_ticks_position('none')
ax[1].set_ylim(-10.5, 10.5)
ax[1].set_xlabel('Distance (Angstroms)')
ax[1].set_ylabel('Force (eV/Angstrom)')
plt.show()
# +
rs = np.linspace(1.5E-10, 4.0E-10)
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(rs*m2ang, de_by_dr(rs, 2.0*qfac, -2.0*qfac, cal_b(r0*1E-10, 2.0*qfac, -2.0*qfac, 12), 12)*j2ev/m2ang, 'r')
ax.axvline(r0)
ax.axhline(0.0)
ax.set_ylim(-25, 25)
ax.set_xlabel('Distance (Angstroms)')
ax.set_ylabel('Force (eV/Angstrom)')
plt.show()
# -
import scipy.interpolate as spi
melt_coord = np.array(([4.93, 5.4, 6, 6.7, 7.25, 7.62, 7.85]))
melt_pressure = np.array(([0.1, 2.5, 7.2, 16.3, 34.3, 72.1, 159.4]))
coord_spline = spi.InterpolatedUnivariateSpline(melt_pressure, melt_coord)
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(melt_pressure, melt_coord, 'o')
ps = np.linspace(-10.0, 170.0)
ax.plot(ps, coord_spline(ps))
ax.set_xlabel('Pressure (GPa)')
ax.set_ylabel('Coordination number')
plt.show()
fig, ax = plt.subplots(figsize=(8,6))
ax.semilogx(melt_pressure, melt_coord, 'o')
ps = np.linspace(0.1, 160, 10000)
ax.semilogx(ps, coord_spline(ps))
ax.set_xlabel('Pressure (GPa)')
ax.set_ylabel('Coordination number')
plt.show()
melt_poly_coef = [1.9613, -0.00165, 0.0000019]
melt_rs_model = ionic_model.melt_bond_length(melt_pressure, melt_poly_coef)
fig, ax = plt.subplots(figsize=(8,6))
ps = np.linspace(0.0, 160.0)
ax.plot(ps, ionic_model.melt_bond_length(ps, melt_poly_coef)*1E10)
ax.set_xlabel('Pressure (GPa)')
ax.set_ylabel('Bond length (angstroms)')
plt.show()
# +
# Melting point beta
r_melt = ionic_model.melt_bond_length(0.0, melt_poly_coef)
coord_melt = coord_spline(0.0)
print("Melt at 0GPa has r", r_melt, "coord", coord_melt)
beta_300_melt = calc_beta_model(r_melt*1E10, coord_melt, 300.0, *all_popt)
print("Melt beta at 300 K", beta_300_melt)
print("Melt beta at 1573 K", calc_beta_model(r_melt*1E10, coord_melt, 1573.0, *all_popt))
beta_300_melt_correct = calc_beta_model(r_melt*1E10, coord_melt, 300.0,
2.1264451598128855, -0.93910997, 0.06109785)
print("Corrected Melt beta at 300 K", beta_300_melt_correct)
beta_300_melt_correct_athermal = calc_beta_model(r_melt*1E10, coord_melt, 300.0,
2.1807165400315522, -0.93910997, 0.06109785)
print("Corrected athermal Melt beta at 300 K", beta_300_melt_correct_athermal)
# +
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(cscl_r_ang, cscl_beta_permil, 'k*', label='CsCl structure', markersize=10)
ax.plot(cscl_r_ref, cscl_beta_ref, 'ko', fillstyle='none', markersize=20)
ax.plot(mgo_r_ang, mgo_beta_permil, 'ys', label='NaCl (periclase)', markersize=8)
ax.plot(mgo_r_ref, mgo_beta_ref, 'yo', fillstyle='none', markersize=20)
ax.plot(nias_first_r_ang, nias_first_beta_permil, 'gs', label='NiAs structure (octahedral)',
markersize=8)
ax.plot(nias_first_r_ref, nias_first_beta_ref, 'go', fillstyle='none', markersize=20)
ax.plot(nias_second_r_ang, nias_second_beta_permil, 'bs', label='NiAs structure (trigonal prismatic)',
markersize=8)
ax.plot(nias_second_r_ref, nias_second_beta_ref, 'bo', fillstyle='none', markersize=20)
ax.plot(cubzns_r_ang, cubzns_beta_permil, 'r^', label='cubic ZnS structure', markersize=10)
ax.plot(r_melt*1E10, beta_300_melt, 'c*', fillstyle='none', markersize=20, label='melt predictionm 0 GPa')
ax.plot(r_melt*1E10, beta_300_melt_correct, 'c*', fillstyle='none', markersize=20, label='corrected melt predictionm 0 GPa')
ax.plot(r_melt*1E10, beta_300_melt_correct_athermal, 'c*', fillstyle='none', markersize=20, label='corrected melt (athermal) predictionm 0 GPa')
for coord in [4.0, 5.0, 6.0, 7.0, 8.0, 9.0]:
r_points = np.linspace(1.95, 2.32)
coords = np.ones_like(r_points) * coord
data = np.stack((r_points, coords))
values = calc_beta_300_vary_q_coord(data, *all_popt)
ax.plot(r_points, values, 'k', linestyle=':')
ax.text(1.935, values[0], str(coord))
ax.set_xlabel('Bond length (Angstroms)')
ax.set_ylabel('1000.ln(beta) (per mill)')
ax.legend()
plt.show()
# +
import earthref
earth_model = earthref.EarthModel(earthref.ak135)
def depth_PT(depth):
"""Retrun liquidus P and T at a given depth in a magma ocean
Liquidus data Andrault et at. 2011 (EPSL doi:10.1016/j.epsl.2011.02.006)
who fit a modified Simmon and Glatzel equation:
T = T0 (P/a+1_^(1/c)
(see section 3.4) with parameters listed below. This replaces a
previous linear fit to data at 0 and 60 GPa.
"""
P = earth_model(6371-depth) # Interpolating AK135...
# We now have P, T is from TP plot
T_0 = 1940.0 # virtual liqidus temperature at 0 GPa
a = 26.0 # GPa
c = 1.9
T = T_0 * ((P / a) + 1)**(1/c)
return T, P
# +
depths = np.linspace(0.0, 2800.0, num=200)
# Get our list of Ps and Ts
Ts, Ps = depth_PT(depths)
r_melt = ionic_model.melt_bond_length(Ps, melt_poly_coef)
coord_melt = coord_spline(Ps)
beta_melt = calc_beta_model(r_melt*1E10, coord_melt, Ts, *all_popt)
# +
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(Ps, beta_melt)
plt.show()
# +
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(Ps, Ts)
plt.show()
# -
print(all_popt)
| potential_figure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %%
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (15, 7) # make bigger plots
import numpy as np
import pandas as pd
import sklearn.metrics
import statsmodels.api as sm
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.arima_model import ARIMA,ARMA
# -
# %%capture
# create loading bar
from tqdm.notebook import tqdm
tqdm().pandas()
# +
folder = 'C:/Users/Lorenzo/Desktop/MyStuff/Master/Paris/Bloom/data/'
count_ig_csv = folder + 'emerging_risks_doc_count_instagram.csv'
count_tw_csv = folder + 'emerging_risks_doc_count_twitter.csv'
engagement_fb_csv = folder + 'emerging_risks_local_engagement_facebook.csv'
engagement_ig_csv = folder + 'emerging_risks_local_engagement_instagram.csv'
engagement_tw_csv = folder + 'emerging_risks_local_engagement_twitter.csv'
files = [count_ig_csv, count_tw_csv, engagement_fb_csv, engagement_ig_csv, engagement_tw_csv]
# +
# %%
df = pd.read_csv(count_tw_csv)
try:
df.drop([381, 382, 383], inplace=True) # drop rows 381, 382, 383 as they are 0 values
df.date = pd.to_datetime(df.date)
df.set_index('date', inplace=True) # set date as index
df = df.asfreq('d')
except KeyError: # pass if rows 381, 382, 383 have already been dropped
print("KeyError")
pass
# -
weekly = df.rolling(7).mean()[7:]
smoothed = df.rolling(9).mean()[9:]
df.Pesticides.plot(label="Pesticides")
weekly.Pesticides.plot(label="Pesticides MA(7)")
plt.legend()
# + active=""
# pacf = plot_pacf(df.Pesticides)
#
# weekly_pacf = plot_pacf(weekly.Pesticides)
# + active=""
# acf = plot_acf(df.Pesticides)
#
# weekly_acf = plot_acf(weekly.Pesticides)
# -
train_len = 300
topic = "Farmer"
test_ARIMA_model = SARIMAX(weekly[topic][:train_len], order=(9, 0, 4)).fit()
test_MA_model = SARIMAX(weekly[topic][:train_len], order=(0, 0, 4)).fit()
test_AR_model = SARIMAX(weekly[topic][:train_len], order=(9, 0, 0)).fit()
# +
confidence_interval = 0.5
fcast = test_ARIMA_model.get_forecast(28).summary_frame(alpha=1 - confidence_interval)
prediction = fcast['mean']
# replace negative values with 0 because they are impossible
fcast['mean_ci_lower'] = [x if x>=0 else 0 for x in fcast['mean_ci_lower']]
fcast['mean_ci_upper'] = [x if x>=0 else 0 for x in fcast['mean_ci_upper']]
fig, ax = plt.subplots(figsize=(15, 5))
# Plot the data (here we are subsetting it to get a better look at the forecasts)
weekly[topic].plot(ax=ax, label=f"{topic}")
test_SARIMAX_model.fittedvalues.plot(ax=ax, label="Model fitted values")
# Construct the forecasts
prediction.plot(ax=ax, style='k--', label="Model Forecast")
ax.fill_between(fcast.index, fcast['mean_ci_lower'], fcast['mean_ci_upper'], color='k', alpha=0.1,
label=f"{int(confidence_interval*100)}% Conf Interval")
plt.legend(loc="upper left")
# +
CONF_INT = 0.3
fig, ax = plt.subplots(figsize=(15, 5))
# Plot the data (here we are subsetting it to get a better look at the forecasts)
weekly.Pesticides[220:265].plot(ax=ax, label="Pesticides")
# Construct the forecasts
fcast = test_SARIMAX_model.get_forecast(15).summary_frame(alpha=1-CONF_INT)
fcast['mean'].plot(ax=ax, style='k--', label="ARIMA Forecast")
ax.fill_between(fcast.index, fcast['mean_ci_lower'], fcast['mean_ci_upper'], color='k', alpha=0.1,
label=f"{int(CONF_INT*100)}% Conf Interval")
plt.legend(loc="lower left")
# -
def mean_absolute_percentage_error(y_true, y_pred):
if len(y_true) != len(y_pred):
raise ValueError(f"len y_true is {len(y_true)}, len y_pred is {len(y_true)}" )
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
res = sm.tsa.stattools.arma_order_select_ic(x, ic=['aic'])
ARMA(weekly.CEO, order=(7,0,4)).fit()
# +
# THIS CELL TAKES 3 mins: store Arima model for every column and 10 days forecasts
# models = dict() # store models so no need to rerun
# forecasts10 = dict() # forecasts of next 10 days
#
# for col in tqdm(weekly.columns):
# train = weekly[col][:200]
# # test = weekly[col][200:]
# ARIMA_model = SARIMAX(train, order=(7, 0, 4)).fit()
# models[col] = ARIMA_model
# prediction = ARIMA_model.predict(200, 210)
# forecasts10[col] = prediction
# after a few tests, dict comprehension reduced execution time by 1 minute (33%)
models = {col: SARIMAX(weekly[col][:200], order=(7, 0, 4)).fit() for col in tqdm(weekly.columns) if sum(
weekly[col]==0) < 200}
forecasts10 = {col: model.predict(200, 210) for col, model in models.items()}
# -
len(forecasts10.keys())
# +
rmses = dict()
for k,v in forecasts10.items():
rmses[k] = sklearn.metrics.mean_squared_error(weekly[k][200:202], v[0:2], squared=False)
# +
mapes = dict()
for k,v in forecasts10.items():
if 0.0 not in weekly[k][200:202].values:
mape = mean_absolute_percentage_error(weekly[k][200:202], v[0:2])
mapes[k] = round(mape, 2)
else:
print(k)
mapes[k] = None
# -
mapes
# +
mapes_val = list(mapes.values())
summa = 0
count = 0
for el in mapes_val:
if el is not None:
count += 1
summa += el
summa /(count)
# -
def ci_accuracy(test, lower, upper):
"""
Measure if real test value is inside confidence interval.
"""
tp, tn = 0, 0
for index, elem in enumerate(test):
if elem < upper[index] and elem > lower[index]:
tp += 1
else:
tn += 1
return tp / (tp + tn)
def get_mean(lst):
return sum(lst) / len(lst)
# +
ci_precision = dict()
check_length = 20
for column_name, model in models.items():
test = weekly[column_name][200: 200 + check_length]
ci = model.get_forecast(check_length).conf_int(alpha=0.4) # confidence interval at 80%
lower_ci = list(ci[f"lower {column_name}"])
upper_ci = list(ci[f"upper {column_name}"])
ci_precision[column_name] = ci_accuracy(test, lower_ci, upper_ci)
print(column_name, abs(get_mean(upper_ci)) - abs(get_mean(lower_ci)))
# -
ci_precision
get_mean(ci_precision.values())
forecasts_df = pd.DataFrame(forecasts10)
forecasts_df
weekly[200:211]
abs(forecasts_df - weekly[200:211]) / weekly[200:211]
weekly[200:211].to_csv("real.csv")
forecasts_df.to_csv("forecast.csv")
# + active=""
# AVERAGE MAPE of (7, 0, 4) forecast over weekly(excluding #DIV/0!)
#
# 08/03/2019 29%
# 09/03/2019 31%
# 10/03/2019 46%
# 11/03/2019 63%
# 12/03/2019 72%
# 13/03/2019 83%
# 14/03/2019 82%
# 15/03/2019 85%
# 16/03/2019 86%
# 17/03/2019 86%
# +
# THIS CELL TAKES 3 mins: store Arima model for every column and 10 days forecasts
models = dict() # store models so no need to rerun
forecasts10 = dict() # forecasts of next 10 days
for col in tqdm(df.columns):
train = df[col][:200]
test = df[col][200:]
ARIMA_model = SARIMAX(train, order=(7, 0, 4), enforce_stationarity=False).fit()
models[col] = ARIMA_model
prediction = ARIMA_model.predict(200, 210)
forecasts10[col] = prediction
# -
# + active=""
# res = sm.tsa.stattools.arma_order_select_ic(weekly.Pesticides[:200], ic=['aic'], max_ar=5, max_ma=5)
# -
res.aic_min_order
| code/old_models/Final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PySpark
# language: python
# name: pysparkkernel
# ---
# ## Training ResNet50 with distributed training on Maggy
# In this notebook we will train a ResNet-50 model from scratch with data from ImageNet. Note that a PyTorch Dataset and DataLoader is employed which results in large I/O overhead and doesn't fully utilize the GPU capabilities. For higher throughput, see `ImageNet_petastorm_training`.
# +
import time
import torch
from torchvision import models
from torchvision import transforms as T
import pandas as pd
import numpy as np
from PIL import Image
from hops import hdfs
# -
# ### Creating the PyTorch Dataset
# The metadata of our dataset is stored in a .csv file located in the root folder. It contains the labels of each image and its source path. For convenience, we relabel the classes into integers. In the `__getitem__` function, we enable custom transformations after reading the image and its label. The advantage of defining our own dataset is that we have no problems performing I/O operations on our DFS, which would fail when simply calling `os.open()` (which is what PyTorch's predefined datasets do).
class ImageNetDataset(torch.utils.data.Dataset):
def __init__(self, path, transform=None, test_set=False):
super().__init__()
self.root = path
self.df = pd.read_csv(path + "noisy_imagenette.csv")
self.transform = transform
if test_set:
self.df = self.df[self.df.is_valid]
else:
self.df = self.df[self.df.is_valid == False]
self.df.drop(["noisy_labels_" + str(i) for i in [1, 5, 25,50]], axis=1, inplace=True)
self.labels = {"n01440764": 0, # "tench"
"n02102040": 1, # "English springer"
"n02979186": 2, # "cassette player"
"n03000684": 3, # "chain saw"
"n03028079": 4, # "church"
"n03394916": 5, # "French horn"
"n03417042": 6, # "garbage truck"
"n03425413": 7, # "gas pump"
"n03445777": 8, # "golf ball"
"n03888257": 9, # "parachute"
}
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
row = self.df.iloc[idx]
label = self.labels[row["noisy_labels_0"]]
f = hdfs.open_file(self.root + row["path"])
try:
img = Image.open(f).convert("RGB")
finally:
f.close()
if self.transform:
img = self.transform(img)
sample = {"image": img, "label": label}
return sample
path = hdfs.project_path() + "DataSets/ImageNet/imagenette/"
# ### Defining data transforms
# To increase the variety of our training samples, we employ data augmentation via torchvision's transforms API. For training images, in addition to resizing and randomly cropping, we also flip images horizontally. In the test set, we use a center crop and no flips to remove randomness. All images are normalized for numeric convenience.
# +
train_transform = T.Compose(
[T.Resize(256),
T.RandomCrop(224),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
test_transform = T.Compose(
[T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# -
train_ds = ImageNetDataset(path, transform=train_transform)
test_ds = ImageNetDataset(path, transform=test_transform, test_set=True)
# ### Defining the training function
# In order to use PyTorch with maggy, we need to define our training loop in a function. The function takes our module, its hyperparameters and both the train and test set as input. Note that the module should be a class that is instantiated in our training loop, since transferring the model weights at the beginning of the loop would result in a huge communicational overhead. Likewise, it is not advised to use datasets with large memory footprint over the function, but rather load it from the DFS when requested.
# Inside the training loop it is **mandatory for maggy** to use a torch DataLoader. Apart from these restrictions, you can freely implement your training loop as in normal PyTorch. Finally, we have to import all of the used libraries inside the function.
def train_fn(module, hparams, train_set, test_set):
import torch
import time
from torch.utils.data import DataLoader
model = module(**hparams)
n_epochs = 3
batch_size = 64
lr = 0.1 * 2*batch_size/256
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
loss_criterion = torch.nn.CrossEntropyLoss()
train_loader = DataLoader(train_set, pin_memory=True, batch_size=batch_size)
test_loader = DataLoader(test_set, pin_memory=True, batch_size=batch_size, drop_last=True)
def eval_model(model, test_loader):
acc = 0
model.eval()
img_cnt = 0
with torch.no_grad():
for idx, data in enumerate(test_loader):
img, label = data["image"], data["label"]
prediction = model(img)
acc += torch.sum(torch.argmax(prediction, dim=1) == label)
img_cnt += len(label)
acc = acc.detach()/float(img_cnt)
print("Test accuracy: {:.3f}".format(acc))
print("-"*20)
return acc
model.train()
t_0 = time.time()
for epoch in range(n_epochs):
print("-"*20 + "\nStarting new epoch\n")
t_start = time.time()
for idx, data in enumerate(train_loader):
if idx%10 == 0:
print(f"Working on batch {idx}.")
img, label = data["image"], data["label"]
optimizer.zero_grad()
prediction = model(img)
output = loss_criterion(prediction, label.long())
output.backward()
optimizer.step()
t_end = time.time()
print("Epoch training took {:.0f}s.\n".format(t_end-t_start))
acc = eval_model(model, test_loader)
t_1 = time.time()
minutes, seconds = divmod(t_1 - t_0, 60)
hours, minutes = divmod(minutes, 60)
print("-"*20 + "\nTotal training time: {:.0f}h {:.0f}m {:.0f}s.".format(hours, minutes, seconds))
return float(acc)
# ### Configuring maggy
# As a last step, we need to configure our maggy experiment. Here we pass our model class, our train and test dataset as well as the desired backend. Maggy supports either `torch` or `deepspeed`, with additional constraints on deepspeed. If using torch, you can employ the PyTorch version of the ZeRO optimizer and model sharding by changing the ZeRO levels in the config (either 1, 2 or 3).
# +
from maggy import experiment
from maggy.experiment_config import TorchDistributedConfig
config = TorchDistributedConfig(name='ImageNet_training', module=models.resnet50, train_set=train_ds, test_set=test_ds, backend="torch")
# -
# ### Running the experiment
# Now that everything is configured, we are ready to run the experiment by calling the lagom function. You should be able to see the output of your workers in the notebook.
result = experiment.lagom(train_fn, config)
| examples/maggy-ImageNet-training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:anaconda]
# language: python
# name: conda-env-anaconda-py
# ---
# # Example: Model selection
#
# Perform model comparison and criteria based selection.
# +
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
from dstools import model_selection
# %matplotlib inline
# -
# Load training data
X, y = make_regression()
# Specify parameter grid
elnet_pipe = make_pipeline(
PCA(n_components=0.99, random_state=0),
ElasticNet(random_state=0)
)
rf_pipe = make_pipeline(
PCA(n_components=0.99, random_state=0),
RandomForestRegressor(random_state=0)
)
pipe_grid_specs = (
(elnet_pipe, {'alpha': [0.01, 0.1, 1], 'l1_ratio': [0.3, 0.5, 0.7]}),
(rf_pipe, {'max_depth': [200, 500, 700]})
)
# Format grid specifications
pipes_and_parameters = model_selection.parameter_grid(pipe_grid_specs, pipeline=True)
# Compare models and report the best alternative
pipe_results = model_selection.compare_estimators(
X, y, pipes_and_parameters, test_size=0.3, folds=10, scoring='neg_mean_squared_error'
)
model_selection.report_best_model(pipe_results, criteria='bias')
model_selection.report_best_model(pipe_results, criteria='variance')
| examples/model_selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Manually Adding Pores and Throats Using Extend
import openpnm as op
# %config InlineBackend.figure_formats = ['svg']
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(10)
ws = op.Workspace()
ws.settings['loglevel'] = 50
# We'll start with something simple, by adding a single pore and throat to a 2D network:
pn = op.network.Cubic(shape=[5, 5, 1], spacing=1.0)
# ## Add a Single Pore
#
# First add a pore and visualize it:
# NBVAL_IGNORE_OUTPUT
op.topotools.extend(network=pn, pore_coords=[[6, 6, 0.5]])
fig, ax = plt.subplots()
op.topotools.plot_coordinates(network=pn, ax=ax)
op.topotools.plot_connections(network=pn, ax=ax)
# Note that we've only added a pore, but not specified any connections to other pores. This requires quite a bit more thought than adding pore coords. The original network had 25 pores, number 0 to 24 (due to python's 0 indexing), so this new pore is number 25. Specifying connections requires explicitly stating which pores are connected to which according to the pore index.
# # Add a Single Throat
# Let's connect this new pore to a the single pore in the top-left corner, which we know to be pore 4. Again we'll use ``extend`` but specify ``throat_conns`` instead:
# NBVAL_IGNORE_OUTPUT
op.topotools.extend(network=pn, throat_conns=[[4, 25]])
fig, ax = plt.subplots()
op.topotools.plot_coordinates(network=pn, ax=ax)
op.topotools.plot_connections(network=pn, ax=ax)
# ## Find Several Throat and Add Simultaneously
#
# We can also find the indices of pores that are physicall close to pore 25 , then make connections between those:
Ps = pn.find_nearby_pores(pores=25, r=3)
print(Ps)
# The above search yeilded 3 pores that are withing a radius of 3 units of pore 25. In order to connected these to pore 25 y a new throat, we need to create a pair of indices indicating the pore on each the new throat as shown below. This convention for defining network topology is based on the sparse adjacency matrix expressed in COO format, as described [here](.../tutorials/data_and_topology_storage.ipynb).
[[25, i] for i in Ps[0]]
# We can send this list to the ``extend`` function to add all three new throats with one call:
# NBVAL_IGNORE_OUTPUT
op.topotools.extend(network=pn, throat_conns=[[25, i] for i in Ps[0]])
fig, ax = plt.subplots()
op.topotools.plot_coordinates(network=pn, ax=ax)
op.topotools.plot_connections(network=pn, ax=ax)
# ## More Complex Additions
#
# Now let's do something more complex, by adding pores inside a for-loop. First create a simple 2D cubic network:
net = op.network.Cubic(shape=[5, 5, 1], spacing=1.0)
# We'll now scan through each pores in the network and add 4 new pores next to each one, at the 4 corners:
Ps = net.Ps
Ts = net.Ts
coords = net['pore.coords']
dist = 0.3
corners = [[-1, -1], [-1, 1], [1, 1], [1, -1]]
for xdir, ydir in corners:
adj = np.zeros_like(coords)
adj[:, 0] = dist*xdir
adj[:, 1] = dist*ydir
new_coords = coords + adj
op.topotools.extend(network=net, pore_coords=new_coords)
new_Ps = net.Ps[-len(Ps):]
new_conns = np.vstack((Ps, new_Ps)).T
op.topotools.extend(network=net, throat_conns=new_conns)
# After any network manipulation operation, it's a good idea to check the health of the network, which checks for disconnected pores. All empty lists means nothing was found.
net.check_network_health()
fig, ax = plt.subplots(1, figsize=[6, 6])
op.topotools.plot_connections(network=net, throats=Ts, ax=ax, c='b')
op.topotools.plot_connections(network=net, throats=net.Ts[len(Ts):],
ax=ax, c='y')
op.topotools.plot_coordinates(network=net, pores=Ps, c='r', s=500, ax=ax)
op.topotools.plot_coordinates(network=net, pores=net.Ps[len(Ps):],
c='g', s=100, ax=ax)
plt.show()
| examples/notebooks/networks/manipulation/manually_adding_pores_and_throats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/hariseldon99/msph402b/blob/main/Computational_Linear_Algebra_all.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="VUW31D4ZhD-s"
# All Example Codes for Computational Linear Algebra
# ===================================================
# + [markdown] id="Xz3bgGKBhyUZ"
# **Examples for Gaussian Method**
# * From file [gauss_method_ex.py](03-Computational_Linear_Algebra/gauss_method_ex.py)
#
# #### The Problem:
#
# Use the Gaussian elimination method to solve the following sets of linear equations.
#
# \begin{align*}
# 25x + 5y + z &= 106.8 \\
# 64x + 8y + z &= 177.2 \\
# 144x + 12y + z &=279.2,
# \end{align*}
#
# and
#
# \begin{align*}
# 12x + 10y - 7z &= 15 \\
# 6x + 5y + 3z &= 4\\
# 5x - y + 5z &= 9
# \end{align*}
# + colab={"base_uri": "https://localhost:8080/"} id="8IBqxhyUh41c" outputId="6015f425-0592-4651-d4ae-108fc823826b"
import numpy as np
def GEPP(A, b, doPP = True):
'''
Gaussian elimination with partial pivoting.
input: A is an n x n numpy matrix
b is an n x 1 numpy array
output: x is the solution of Ax=b
with the entries permuted in
accordance with the pivoting
done by the algorithm
post-condition: A and b have been modified.
'''
n = len(A)
if b.size != n:
raise ValueError("Invalid argument: incompatible sizes between"+
"A & b.", b.size, n)
# k represents the current pivot row. Since GE traverses the matrix in the
# upper right triangle, we also use k for indicating the k-th diagonal
# column index.
# Elimination
for k in range(n-1):
if doPP:
# Pivot
maxindex = abs(A[k:,k]).argmax() + k
if A[maxindex, k] == 0:
raise ValueError("Matrix is singular.")
# Swap
if maxindex != k:
A[[k,maxindex]] = A[[maxindex, k]]
b[[k,maxindex]] = b[[maxindex, k]]
else:
if A[k, k] == 0:
raise ValueError("Pivot element is zero. Try setting doPP to True.")
#Eliminate
for row in range(k+1, n):
multiplier = A[row,k]/A[k,k]
A[row, k:] = A[row, k:] - multiplier * A[k, k:]
b[row] = b[row] - multiplier*b[k]
# Back Substitution
x = np.zeros(n)
for k in range(n-1, -1, -1):
x[k] = (b[k] - np.dot(A[k,k+1:],x[k+1:]))/A[k,k]
return x
def det(A):
_= GEPP(A, np.ones(A.shape[0]), doPP=True)
return np.prod(np.diagonal(A))
print("Defined")
# + colab={"base_uri": "https://localhost:8080/"} id="dW40vUVTibMr" outputId="9ff12018-41a2-4748-d5eb-9875cdfd1b36"
A = np.array([[25., 5., 1.],
[64., 8., 1.],
[144., 12., 1.]])
b = np.array([106.8,
177.2,
279.2])
x = GEPP(np.copy(A),np.copy(b), doPP=False)
print("First solution is given by x =", x)
print("Error is ", np.linalg.norm(A@x - b) * 100/np.linalg.norm(b), "%")
print("Determinant of first matrix is ", det(np.copy(A)))
A = np.array([[12., 10., -7.],
[6., 5., 3.],
[5., -1., 5.]])
b = np.array([15.,
4.,
9.])
try:
x = GEPP(np.copy(A),np.copy(b), doPP=False)
except ValueError:
x = GEPP(np.copy(A),np.copy(b))
print("Second solution is given by x =", x)
print("Error is ", np.linalg.norm(A@x - b) * 100/np.linalg.norm(b), "%")
# + [markdown] id="_RNxfzVIf_Nh"
# **Examples for LU Decomposition Method**
# * From file [lu_decomp_ex.py](03-Computational_Linear_Algebra/lu_decomp_ex.py)
#
# #### The Problem:
#
# Solve the first of two systems of linear equations in the previous problem using the LU decomposition method.
# + colab={"base_uri": "https://localhost:8080/"} id="rx1aJd6dyuPk" outputId="1ba5a54f-fe2a-4ae5-a8bf-3b4be8bde19b"
import numpy as np
from scipy.linalg import lu_factor, lu_solve
A = np.array([[25., 5., 1.],
[64., 8., 1.],
[144., 12., 1.]])
b = np.array([106.8,
177.2,
279.2])
A_fact, piv = lu_factor(A.copy())
print("Decomposed L Matrix:\n", np.tril(A_fact, k=0))
print("\n\nDecomposed U Matrix:\n", np.triu(A_fact, k=1))
x = lu_solve((A_fact.copy(), piv),b)
print("\nSolution is x =", x)
print("Solution is close?", np.allclose(A @ x, b))
id = np.eye(A.shape[0])
A_inv = np.zeros_like(A)
for i,row in enumerate(id):
A_inv[:,i] = lu_solve((A_fact.copy(), piv),row)
print("\n\nInverse of matrix is:\n", A_inv)
print("Solution is close?", np.allclose(A @ A_inv, id))
# + [markdown] id="pKHw6RLgharg"
# **Examples for Gauss-Seidel Method**
# * From file [gauss_seidel_ex.py](03-Computational_Linear_Algebra/gauss_method_ex.py)
#
# #### The Problem:
#
# Solve the following system of linear equations using the Gauss-Seidel method, displaying the relative error at each iteration until the solution approaches a fixed point within a small tolerance.
#
# \begin{align*}
# 16x + 3y &= 11\\
# 7x - 11y &= 13.
# \end{align*}
#
# Repeat this method for the system of linear equations from the previous problem and show that the method fails.
# + colab={"base_uri": "https://localhost:8080/"} id="eOPIVufAomjR" outputId="e425721e-7bbb-4b17-f4e4-15dba881231a"
import numpy as np
def gauss_seidel(A, b, tolerance=1e-10, max_iterations=100000, verbose=False):
"""
Simple Function for the Gauss-Seidel Method for solving a system of linear equations
Returns a numpy array consisting of the solution x, where A . x = b
Parameters:
A (numpy array): A square matrix of coefficients
b (numpy array): The RHS vector of the linear system
Returns:
x (numpy array): Solution to the equation A . x = b
"""
x = np.zeros_like(b, dtype=np.double)
if verbose:
print("Iteration\t% soln: Relative err")
#Iterate
for k in range(max_iterations):
x_old = x.copy()
#Loop over rows
for i in range(A.shape[0]):
x[i] = (b[i] - np.dot(A[i,:i], x[:i]) - np.dot(A[i,(i+1):], x_old[(i+1):])) / A[i ,i]
error = np.linalg.norm(x - x_old, ord=np.inf) / np.linalg.norm(x, ord=np.inf)
if verbose:
print("%lf\t %1.4lf " % (k, error * 100))
#Stop condition
if error < tolerance:
break
if k == max_iterations -1:
raise StopIteration("Error: Maximum iterations exceeded", k)
return x
def isDDM(m, n):
"""
Checks a numpy 2d square array for diagonal dominance
"""
# for each row
for i in range(0, n):
# for each column, finding sum of each row sans the diagonal
sum = np.sum(m[i]) - np.abs(m[i,i])
if (abs(m[i,i]) < sum) :
return False
return True
print("Defined!")
# + colab={"base_uri": "https://localhost:8080/"} id="hOdkNnh3iFOx" outputId="0e86034a-70db-47f5-bb9e-adfb9b82c6a5"
mat = np.array([[16, 3],
[7, -11]])
rhs = np.array([11,
13])
print("x =", gauss_seidel(mat, rhs, verbose=True))
# + colab={"base_uri": "https://localhost:8080/", "height": 340} id="UGYTC3ERcgDL" outputId="01e61c25-c4fc-413b-b116-bdbd58d3757c"
mat = np.array([[25, 5, 1],
[64, 8, 1],
[144, 12, 1]])
rhs = np.array([106.8,
177.2,
279.2])
print("Is matrix diagonally dominant?", isDDM(mat, 3))
print("x =", gauss_seidel(mat, rhs))
# -
# #### The Problem:
#
# Apply the previous algorithm for the Gauss-Seidel method in order to solve the following system of linear equations.
# \begin{align*}
# 12x + 3y - 5z &=1\\
# x + 5y + 3z &=28\\
# 3x + 7y + 13z &=76
# \end{align*}
#
# Check for diagonal dominance before you run the solution algorithm. Repeat the same with the first two equations interchanged and observe how the loss of diagonal dominance leads to the failure of the Gauss-Seidel method, despite the fact that the system of equations have not fundamentally changed.
#
# + colab={"base_uri": "https://localhost:8080/"} id="280Jf6UYf9EF" outputId="85a310a9-adc0-402e-fcc6-378ba18f3276" tags=[]
mat = np.array([[12, 3, -5],
[1, 5, 3],
[3, 7, 13]])
rhs = np.array([1,
28,
76])
print("Is matrix diagonally dominant?", isDDM(mat, 3))
print("x =", gauss_seidel(mat, rhs))
# + colab={"base_uri": "https://localhost:8080/"} id="yUZANoi0iWT8" outputId="611c3267-f261-4ba5-8ee3-788dafb4da8e"
mat = np.array([[1, 5, 3],
[12, 3, -5],
[3, 7, 13]])
rhs = np.array([28,
1,
76])
print("Is matrix diagonally dominant?", isDDM(mat, 3))
try:
print("x =", gauss_seidel(mat, rhs))
except Exception:
print("The algorithm failed to converge")
# -
# ### Application: Polynomial Regression
#
# We will use non-linear least-squares fit to determine how the length of a bluegill fish is related to its age. In the experiment, $n = 78$ bluegills were randomly sampled from a lake. The length $x$ (in mm) and age $t$ (in years) data that was gathered can be obtained at the link below
#
# [Dataset](03-Computational_Linear_Algebra/hotrod_dataset.csv)
#
# The data is in two columns. The first has the age $t_i$ and the second has the corresponding length readings $x_i$. The columns are separated by commas, making this file a **'csv'** or '*[comma separated values](https://datahub.io/docs/data-packages/csv)'* file. Import this data into numpy arrays and use matplotlib to plot the length as a function of age. We will now try to fit this data to a quadratic, given by $x(t;a,b) = a + bt + ct^2$. The parameters $a, b, c$ are to be chosen such that the chi-square is minimized, where
# \begin{equation*}
# \chi^2 \equiv \sum_i \bigg[\frac{x_i -x(t_i;a,b,c)}{\sigma_i}\bigg]^2,
# \end{equation*}
# and $\sigma_i$ is the error in $x_i$. Analytically, minimizing the $\chi^2$ with respect to $a, b, c$ leads to three simultaneous linear equations for $a,b,c$, given by
# \begin{equation*}
# \begin{pmatrix}
# S & S_t & S_{tt}\\
# S_t & S_{tt} & S_{ttt}\\
# S_{tt} & S_{ttt} & S_{tttt}
# \end{pmatrix}\begin{pmatrix} a \\ b \\ c\end{pmatrix}= \begin{pmatrix}S_x\\S_{tx}\\S_{ttx}\end{pmatrix}
# \end{equation*}
# To obtain the terms in the matrices above, the following formulae may be used.
# \begin{align*}
# S = \sum_i \frac{1}{\sigma^2_i},\; & S_t = \sum_i \frac{t_i}{\sigma^2_i},\; & S_x = \sum_i \frac{x_i}{\sigma^2_i},\\
# S_{tt} = \sum_i \frac{t^2_i}{\sigma^2_i},\; & S_{tx} = \sum_i \frac{t_ix_i}{\sigma^2_i},\;& S_{ttt} = \sum_i \frac{t^3_i}{\sigma^2_i},\\
# S_{tttt} = \sum_i \frac{t^4_i}{\sigma^2_i},\; & S_{ttx} = \sum_i \frac{t^2_ix_i}{\sigma^2_i}, & \\
# \end{align*}
#
# First, write and execute code to estimate the errors in the data for each age measured $t$. Do this by sorting the $x_i$-data corresponding to increasing values of the $t_i$-data, then estimating the length for each unique $t_i$ by averaging over the common data points. Now, you will have new values $t_j$ with average lengths $x_j$. The errors $\sigma_j$ can be the corresponding standard deviations.
#
# Then, write and execute additional code for obtaining the fitted values of $a,b,c$ according to the formulae given above and plot the resultant quadratic polynomial, comparing it with plots of the experimental data. Obtain and display the minimum value of $\chi^2$.
# + tags=[]
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import lu_factor, lu_solve
plt.rcParams['figure.figsize'] = (8,6)
plt.rcParams['font.size'] = 20
data = np.genfromtxt('03-Computational_Linear_Algebra/bluegills_dataset.csv', delimiter=',')
t = data[:,0]
x = data[:,1]
plt.scatter(t,x, color='red', s=10, alpha=0.4)
plt.xlabel("t (yr)")
plt.ylabel("x (mm)", rotation=0)
# Take only unique values of t and make an array of them
tdata = np.array([tval for tval in set(t)])
# For each unique val of t, find the vals of x and avg/stdev them
xdata = np.array([np.average(x[np.where(t==tval)]) for tval in tdata])
xerr = np.array([np.std(x[np.where(t==tval)]) for tval in tdata])
plt.errorbar(tdata,xdata, yerr=xerr, color='blue',fmt="o")
# Now, do the Regression
s = np.sum(1 / xerr**2)
st = np.sum(tdata / xerr**2)
stt = np.sum(tdata**2 / xerr**2)
sttt = np.sum(tdata**3 / xerr**2)
stttt = np.sum(tdata**4 / xerr**2)
sx = np.sum(xdata / xerr**2)
stx = np.sum(tdata * xdata / xerr**2)
sttx = np.sum(tdata**2 * xdata / xerr**2)
smat = np.array([[s, st, stt],
[st, stt, sttt],
[stt, sttt, stttt]])
svec = np.array([sx,
stx,
sttx])
s_fact, piv = lu_factor(smat)
avec = lu_solve((s_fact, piv),svec)
# Finally, plot the fitted function with avec coefficients
fit_func = np.polynomial.polynomial.Polynomial(avec)
t_fit, x_fit = fit_func.linspace(n=100, domain=[tdata[0],tdata[-1]])
chi_sq = np.sum(((xdata - fit_func(tdata))/xerr)**2)
plt.plot(t_fit, x_fit, label=f"Quad Fit: chisq = {chi_sq:1.4f}")
plt.legend()
plt.show()
# -
# #### Postscript
#
# Actually, all this effort was not strictly necessary for the problem above. SciPy already has a fitting routine that does this automatically. Look at the help documentation for 'scipy.optimize.curve_fit'. Th problem was a simple exercise on solving linear equations numerically. In any case, make sure that your fit is correct by comparing it with the output of the 'curve_fit' routine. A part of the documentation for this fitting routine is reproduced below.
#
#
# Help on function curve_fit in module scipy.optimize.minpack:
#
# curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, check_finite=True, bounds=(-inf, inf), method=None, jac=None, **kwargs)
# Use non-linear least squares to fit a function, f, to data.
#
# Assumes ``ydata = f(xdata, *params) + eps``.
#
# Parameters
# ----------
# f : callable
# The model function, f(x, ...). It must take the independent
# variable as the first argument and the parameters to fit as
# separate remaining arguments.
# xdata : array_like or object
# The independent variable where the data is measured.
# Should usually be an M-length sequence or an (k,M)-shaped array for
# functions with k predictors, but can actually be any object.
# ydata : array_like
# The dependent data, a length M array - nominally ``f(xdata, ...)``.
#
# ...
# ...
# ...
#
#
# Returns
# -------
# popt : array
# Optimal values for the parameters so that the sum of the squared
# residuals of ``f(xdata, *popt) - ydata`` is minimized.
# pcov : 2-D array
# The estimated covariance of popt. The diagonals provide the variance
# of the parameter estimate. To compute one standard deviation errors
# on the parameters use ``perr = np.sqrt(np.diag(pcov))``
#
# ...
# ...
# + tags=[]
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (8,6)
plt.rcParams['font.size'] = 20
data = np.genfromtxt('03-Computational_Linear_Algebra/bluegills_dataset.csv', delimiter=',')
t = data[:,0]
x = data[:,1]
plt.scatter(t,x, color='red', s=10, alpha=0.4)
plt.xlabel("t (yr)")
plt.ylabel("x (mm)", rotation=0)
params, conv = curve_fit(lambda t,a,b,c: a + b*t + c*t**2, t, x)
fit_func = np.polynomial.polynomial.Polynomial(params)
t_fit, x_fit = fit_func.linspace(n=100, domain=[tdata[0],tdata[-1]])
plt.plot(t_fit, x_fit)
plt.show()
| Computational_Linear_Algebra_all.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Add Matplotlib inline magic command
# %matplotlib inline
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
# Files to load
city_data_to_load = "Resources/city_data.csv"
ride_data_to_load = "Resources/ride_data.csv"
# Read the city data file and store it in a pandas DataFrame.
city_data_df = pd.read_csv(city_data_to_load)
city_data_df.head(10)
# Read the ride data file and store it in a pandas DataFrame.
ride_data_df = pd.read_csv(ride_data_to_load)
ride_data_df.head(10)
# Get the columns and the rows that are not null.
city_data_df.count()
# Get the columns and the rows that are not null.
city_data_df.isnull().sum()
# Get the data types of each column.
city_data_df.dtypes
# Get the unique values of the type of city.
city_data_df["type"].unique()
# Get the number of data points from the Urban cities.
sum(city_data_df["type"]=="Urban")
# Get the number of data points from the Suburban cities.
sum(city_data_df["type"]=="Suburban")
# Get the number of data points from the Rural cities.
sum(city_data_df["type"]=="Rural")
# Get the columns and the rows that are not null.
ride_data_df.count()
## Get the columns and the rows that are not null.
ride_data_df.isnull().sum()
ride_data_df.dtypes
# + active=""
#
# +
# # Combine the data into a single dataset
pyber_data_df = pd.merge(ride_data_df, city_data_df, how="right", on=["city", "city"])
# Display the DataFrame
pyber_data_df.head()
# +
# # Combine the data into a single dataset
pyber_data_df = pd.merge(ride_data_df, city_data_df, how="left", on=["city", "city"])
# Display the DataFrame
pyber_data_df.head()
# +
# # Combine the data into a single dataset
pyber_data_df = pd.merge(ride_data_df, city_data_df, how="outer", on=["city", "city"])
# Display the DataFrame
pyber_data_df.head()
# +
# # Combine the data into a single dataset
pyber_data_df = pd.merge(ride_data_df, city_data_df, how="inner", on=["city", "city"])
# Display the DataFrame
pyber_data_df.head()
# -
# Create the Urban city DataFrame.
urban_cities_df = pyber_data_df[pyber_data_df["type"] == "Urban"]
urban_cities_df.head()
# Create the Suburban and Rural city DataFrames.
suburban_cities_df = pyber_data_df[pyber_data_df["type"] == "Suburban"]
rural_cities_df = pyber_data_df[pyber_data_df["type"] == "Rural"]
rural_cities_df.head()
suburban_cities_df.head()
# # Get the number of rides for urban cities.
urban_ride_count = urban_cities_df.groupby(["city"]).count()["ride_id"]
urban_ride_count.head()
# +
# # Create the suburban and rural ride count.
suburban_ride_count = suburban_cities_df.groupby(["city"]).count()["ride_id"]
rural_ride_count = rural_cities_df.groupby(["city"]).count()["ride_id"]
# -
suburban_ride_count.head()
rural_ride_count.head()
# Get average fare for each city in the urban cities.
urban_avg_fare = urban_cities_df.groupby(["city"]).mean()["fare"]
urban_avg_fare.head()
# Get average fare for each city in the suburban and rural cities.
suburban_avg_fare = suburban_cities_df.groupby(["city"]).mean()["fare"]
rural_avg_fare = rural_cities_df.groupby(["city"]).mean()["fare"]
suburban_avg_fare.head()
rural_avg_fare.head()
# Get the average number of drivers for each urban city.
urban_driver_count = urban_cities_df.groupby(["city"]).mean()["driver_count"]
urban_driver_count.head()
# Get the average number of drivers for each city for the suburban and rural cities.
suburban_driver_count = suburban_cities_df.groupby(["city"]).mean()["driver_count"]
rural_driver_count = rural_cities_df.groupby(["city"]).mean()["driver_count"]
suburban_driver_count.head()
rural_driver_count.head()
# Build the scatter plots for urban cities.
plt.scatter(urban_ride_count, urban_avg_fare)
# Build the scatter plots for urban cities.
plt.scatter(urban_ride_count,
urban_avg_fare,
s=urban_driver_count)
# Build the scatter plots for urban cities.
plt.scatter(urban_ride_count,
urban_avg_fare,
s=10*urban_driver_count, c="coral",
edgecolor="black", linewidths=1,
alpha=0.8, label="Urban")
plt.title("PyBer Ride-Sharing Data (2019)")
plt.ylabel("Average Fare ($)")
plt.xlabel("Total Number of Rides (Per City)")
plt.grid(True)
# Add the legend.
plt.legend()
# Build the scatter plots for suburban cities.
plt.scatter(suburban_ride_count,
suburban_avg_fare,
s=10*suburban_driver_count, c="skyblue",
edgecolor="black", linewidths=1,
alpha=0.8, label="Suburban")
plt.title("PyBer Ride-Sharing Data (2019)")
plt.ylabel("Average Fare ($)")
plt.xlabel("Total Number of Rides (Per City)")
plt.grid(True)
# Add the legend.
plt.legend()
# Build the scatter plots for rural cities.
plt.scatter(rural_ride_count,
rural_avg_fare,
s=10*rural_driver_count, c="gold",
edgecolor="black", linewidths=1,
alpha=0.8, label="Rural")
plt.title("PyBer Ride-Sharing Data (2019)")
plt.ylabel("Average Fare ($)")
plt.xlabel("Total Number of Rides (Per City)")
plt.grid(True)
# Add the legend.
plt.legend()
# +
# Add the scatter charts for each type of city.
plt.scatter(urban_ride_count,
urban_avg_fare,
s=10*urban_driver_count, c="coral",
edgecolor="black", linewidths=1,
alpha=0.8, label="Urban")
plt.scatter(suburban_ride_count,
suburban_avg_fare,
s=10*suburban_driver_count, c="skyblue",
edgecolor="black", linewidths=1,
alpha=0.8, label="Suburban")
plt.scatter(rural_ride_count,
rural_avg_fare,
s=10*rural_driver_count, c="gold",
edgecolor="black", linewidths=1,
alpha=0.8, label="Rural")
# Show the plot
plt.show()
# +
# Build the scatter charts for each city type.
plt.subplots(figsize=(10, 6))
plt.scatter(urban_ride_count,
urban_avg_fare,
s=10*urban_driver_count, c="coral",
edgecolor="black", linewidths=1,
alpha=0.8, label="Urban")
plt.scatter(suburban_ride_count,
suburban_avg_fare,
s=10*suburban_driver_count, c="skyblue",
edgecolor="black", linewidths=1,
alpha=0.8, label="Suburban")
plt.scatter(rural_ride_count,
rural_avg_fare,
s=10*rural_driver_count, c="gold",
edgecolor="black", linewidths=1,
alpha=0.8, label="Rural")
# Incorporate the other graph properties
plt.title("PyBer Ride-Sharing Data (2019)", fontsize=20)
plt.ylabel("Average Fare ($)", fontsize=12)
plt.xlabel("Total Number of Rides (Per City)", fontsize=12)
plt.grid(True)
# Add the legend.
# Create a legend
lgnd = plt.legend(fontsize="12", mode="Expanded",
scatterpoints=1, loc="best", title="City Types")
lgnd.legendHandles[0]._sizes = [75]
lgnd.legendHandles[1]._sizes = [75]
lgnd.legendHandles[2]._sizes = [75]
lgnd.get_title().set_fontsize(12)
# Show the plot
plt.show()
## Save the figure.
plt.savefig("./Fig1.png")
# -
# Incorporate a text label about circle size.
plt.text(42, 35, "Note:\nCircle size correlates\nwith driver count per city.", fontsize="12")
| Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Chapter 5 stuff
# From Chapter 5
# Deutsch algorithm
# +
import numpy as np
import random
import cirq
from cirq import H, X, CNOT, measure
q0, q1 = cirq.LineQubit.range(2)
secret_function = [random.randint(0,1) for _ in range(2)]
def make_oracle(a, b, c):
if c[0]:
yield [CNOT(a,b), X(b)]
if c[1]:
yield CNOT(a,b)
def make_deutsch_circuit(d,e,f):
c = cirq.Circuit()
c.append([H(e), H(e), H(d)])
c.append(f)
c.append([H(d), measure(d, key='result')])
return c
oracle = make_oracle(q0, q1, secret_function)
circuit = make_deutsch_circuit(q0, q1, oracle)
print(circuit)
simulator = cirq.Simulator()
result = simulator.run(circuit)
print(result)
# -
# Deutsch-Josza
# +
'''
Code adopted from: https://qiskit.org/textbook/ch-algorithms/deutsch-jozsa.html#4.-Qiskit-Implementation-
'''
# initialization
import numpy as np
# importing Qiskit
from qiskit import BasicAer
#from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, execute
# import basic plot tools
from qiskit.visualization import plot_histogram
np.random.seed(42)
# set the length of the n-bit input string.
n = 3
# set the length of the n-bit input string.
n = 3
# %matplotlib inline
#Constant oracle
const_oracle = QuantumCircuit(n+1)
output = np.random.randint(2)
if output == 1:
const_oracle.x(n)
#const_oracle.draw(output='mpl')
const_oracle.draw()
#Balanced oracle
balanced_oracle = QuantumCircuit(n+1)
b_str = "101"
balanced_oracle = QuantumCircuit(n+1)
b_str = "101"
# Place X-gates
for qubit in range(len(b_str)):
if b_str[qubit] == '1':
balanced_oracle.x(qubit)
balanced_oracle.draw()
balanced_oracle = QuantumCircuit(n+1)
b_str = "101"
# Place X-gates
for qubit in range(len(b_str)):
if b_str[qubit] == '1':
balanced_oracle.x(qubit)
# Use barrier as divider
balanced_oracle.barrier()
# Controlled-NOT gates
for qubit in range(n):
balanced_oracle.cx(qubit, n)
balanced_oracle.barrier()
balanced_oracle.draw()
balanced_oracle = QuantumCircuit(n+1)
b_str = "101"
# Place X-gates
for qubit in range(len(b_str)):
if b_str[qubit] == '1':
balanced_oracle.x(qubit)
# Use barrier as divider
balanced_oracle.barrier()
# Controlled-NOT gates
for qubit in range(n):
balanced_oracle.cx(qubit, n)
balanced_oracle.barrier()
# Place X-gates
for qubit in range(len(b_str)):
if b_str[qubit] == '1':
balanced_oracle.x(qubit)
# Show oracle
balanced_oracle.draw()
#implementation of the algo
dj_circuit = QuantumCircuit(n+1, n)
# Apply H-gates
for qubit in range(n):
dj_circuit.h(qubit)
# Put qubit in state |->
dj_circuit.x(n)
dj_circuit.h(n)
dj_circuit.draw()
dj_circuit = QuantumCircuit(n+1, n)
# Apply H-gates
for qubit in range(n):
dj_circuit.h(qubit)
# Put qubit in state |->
dj_circuit.x(n)
dj_circuit.h(n)
# Add oracle
dj_circuit += balanced_oracle
dj_circuit.draw()
dj_circuit = QuantumCircuit(n+1, n)
# Apply H-gates
for qubit in range(n):
dj_circuit.h(qubit)
# Put qubit in state |->
dj_circuit.x(n)
dj_circuit.h(n)
# Add oracle
dj_circuit += balanced_oracle
# Repeat H-gates
for qubit in range(n):
dj_circuit.h(qubit)
dj_circuit.barrier()
# Measure
for i in range(n):
dj_circuit.measure(i, i)
# Display circuit
dj_circuit.draw()
# use local simulator
backend = BasicAer.get_backend('qasm_simulator')
shots = 1024
results = execute(dj_circuit, backend=backend, shots=shots).result()
answer = results.get_counts()
plot_histogram(answer)
# -
# Bernstein-vazirani Algorithm
# +
'''
This code is adopted from: https://github.com/qiskit-community/qiskit-community-tutorials/blob/master/Coding_With_Qiskit/ep6_Bernstein-Vazirani_Algorithm.ipynb
'''
# #!pip install qiskit
import numpy as np
from qiskit import *
from qiskit.visualization import plot_histogram
np.random.seed(42)
s = input("Enter the secret bit string:\n")
n = len(s)
circuit = QuantumCircuit(n+1,n)
# Step 0
circuit.x(n)
circuit.barrier()
# Step 1
circuit.h(range(n+1))
circuit.barrier()
# Step 2
for ii, yesno in enumerate(reversed(s)):
if yesno == '1':
circuit.cx(ii, n)
circuit.barrier()
# Step 3
circuit.h(range(n+1))
circuit.barrier()
circuit.measure(range(n), range(n))
# %matplotlib inline
#circuit.draw(output='mpl')
circuit.draw()
#Running the algorithm
simulator = Aer.get_backend('qasm_simulator')
result = execute(circuit, backend=simulator, shots=1024).result()
plot_histogram(result.get_counts(circuit))
# -
# N-qubit QFT using Qiskit
# +
from qiskit.circuit.library import QFT
n = input(" Enter the number of qubits: \n")
qft_circuit = QFT(num_qubits = n)
qft_circuit.draw()
# -
# Quantum Phase estimation using cirq
# +
"""Creates and simulates a phase estimator circuit.
=== EXAMPLE OUTPUT ===
Testing with 8 qubits.
target=0.0000, estimate=0.0000=0/256
target=0.1000, estimate=0.1016=26/256
target=0.2000, estimate=0.1992=51/256
target=0.3000, estimate=0.3008=77/256
target=0.4000, estimate=0.3984=102/256
target=0.5000, estimate=0.5000=128/256
target=0.6000, estimate=0.6016=154/256
target=0.7000, estimate=0.6992=179/256
target=0.8000, estimate=0.8008=205/256
target=0.9000, estimate=0.8984=230/256
RMS Error: 0.0011
"""
'''
This code is adopted from: https://github.com/quantumlib/Cirq/blob/master/examples/phase_estimator.py
'''
import numpy as np
import cirq
def run_estimate(unknown_gate, qnum, repetitions):
ancilla = cirq.LineQubit(-1)
qubits = cirq.LineQubit.range(qnum)
oracle_raised_to_power = [
unknown_gate.on(ancilla).controlled_by(qubits[i])**(2**i)
for i in range(qnum)
]
circuit = cirq.Circuit(cirq.H.on_each(*qubits), oracle_raised_to_power,
cirq.QFT(*qubits, without_reverse=True)**-1,
cirq.measure(*qubits, key='phase'))
return cirq.sample(circuit, repetitions=repetitions)
def experiment(qnum, repetitions=100):
def example_gate(phi):
gate = cirq.MatrixGate(
matrix=np.array([[np.exp(2 * np.pi * 1.0j * phi), 0], [0, 1]]))
return gate
print(f'Testing with {qnum} qubits.')
errors = []
for target in np.arange(0, 1, 0.1):
result = run_estimate(example_gate(target), qnum, repetitions)
mode = result.data['phase'].mode()[0]
guess = mode / 2**qnum
print(f'target={target:0.4f}, '
f'estimate={guess:0.4f}={mode}/{2**qnum}')
errors.append((target - guess)**2)
rms = np.sqrt(sum(errors) / len(errors))
print(f'RMS Error: {rms:0.4f}\n')
def main(qnums = (2, 4, 8), repetitions=100):
for qnum in qnums:
experiment(qnum, repetitions=repetitions)
if __name__ == '__main__':
main()
# -
# Simons algorithm using qiskit
# +
# #!pip install 'qiskit==0.19'
# #!pip install git+https://github.com/qiskit-community/qiskit-textbook.git#subdirectory=qiskit-textbook-src
'''
This is taken from: https://qiskit.org/textbook/ch-algorithms/simon.html
'''
# importing Qiskit
from qiskit import BasicAer
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, execute
# import basic plot tools
from qiskit.visualization import plot_histogram
from qiskit_textbook.tools import simon_oracle
import matplotlib.pyplot as plt
b = '110'
n = len(b)
simon_circuit = QuantumCircuit(n*2, n)
# Apply Hadamard gates before querying the oracle
simon_circuit.h(range(n))
# Apply barrier for visual separation
simon_circuit.barrier()
simon_circuit += simon_oracle(b)
# Apply barrier for visual separation
simon_circuit.barrier()
# Apply Hadamard gates to the input register
simon_circuit.h(range(n))
# Measure qubits
simon_circuit.measure(range(n), range(n))
simon_circuit.draw('mpl')
plt.show()
# use local simulator
backend = BasicAer.get_backend('qasm_simulator')
shots = 1024
results = execute(simon_circuit, backend=backend, shots=shots).result()
counts = results.get_counts()
plot_histogram(counts)
# Calculate the dot product of the results
def bdotz(b, z):
accum = 0
for i in range(len(b)):
accum += int(b[i]) * int(z[i])
return (accum % 2)
print('b = ' + b)
for z in counts:
print( '{}.{} = {} (mod 2) ({:.1f}%)'.format(b, z, bdotz(b,z), counts[z]*100/shots))
# -
# Shor algorithm
# +
# #!pip install 'qiskit==0.19'
# #!pip install 'qiskit==0.19'
# #!pip install git+https://github.com/qiskit-community/qiskit-textbook.git#subdirectory=qiskit-textbook-src
# #!pip install sympy
from qiskit import BasicAer
from qiskit.aqua import QuantumInstance
from qiskit.aqua.algorithms import Shor
number = Shor(N=15, a=7)
simulator = BasicAer.get_backend('qasm_simulator')
results_dictionary = number.run(QuantumInstance(backend=simulator, shots=5))
#result = results_dictionary['number']
print(results_dictionary)
# -
# Grover's algorithm
# +
# #!pip install 'qiskit==0.19'
'''
Grover's two-qubit search algorithm.
'''
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, BasicAer
from qiskit.visualization import plot_histogram
import matplotlib.pyplot as plt
c = ClassicalRegister(2, 'c')
q = QuantumRegister(2,'q')
qc = QuantumCircuit(q,c)
qc.h([q[0]])
qc.h([q[1]])
qc.x([q[0]])
qc.x([q[1]])
qc.cz(0,1)
qc.x([q[0]])
qc.x([q[1]])
qc.h([q[0]])
qc.h([q[1]])
qc.z(q[0])
qc.z(q[1])
qc.cz(0,1)
qc.h([q[0]])
qc.h([q[1]])
qc.measure(q[0], c[0])
qc.measure(q[1], c[1])
qc.draw('mpl')
plt.show()
simulator = BasicAer.get_backend('qasm_simulator')
job = execute(qc, simulator, shots=1024)
result = job.result()
count = result.get_counts(qc)
plot_histogram(count)
plt.show()
| ch-5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: dev
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Update sklearn to prevent version mismatches
# !pip install sklearn --upgrade
# install joblib. This will be used to save your model.
# Restart your kernel after installing
# !pip install joblib
# +
import pandas as pd
import numpy as np
import warnings
warnings.simplefilter('ignore', FutureWarning)
# -
# # Read the CSV and Perform Basic Data Cleaning
df = pd.read_csv("exoplanet_data.csv")
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()
# koi_disposition: y-values. Indicates a candidate planet of interest
# # Select your features (columns)
# Set features. This will also be used as your x values.
selected_features = df[['koi_fpflag_nt', 'koi_fpflag_ss', 'koi_fpflag_ec', 'koi_period', 'koi_period_err1', 'koi_period_err2', 'koi_time0bk', 'koi_time0bk_err1', 'koi_time0bk_err2', 'koi_impact', 'koi_impact_err1', 'koi_impact_err2','koi_duration','koi_duration_err1','koi_duration_err2','koi_depth','koi_depth_err1','koi_depth_err2','koi_prad','koi_prad_err1','koi_prad_err2','koi_teq','koi_insol','koi_insol_err1','koi_insol_err2','koi_model_snr','koi_tce_plnt_num',"koi_steff",'koi_steff_err1','koi_steff_err2','koi_slogg','koi_slogg_err1','koi_slogg_err2',"koi_srad","koi_srad_err1","koi_srad_err2",'ra','dec','koi_kepmag']]
# # Create a Train Test Split
#
# Use `koi_disposition` for the y values
from sklearn.model_selection import train_test_split
# assign data to X and y
X = selected_features
y = df["koi_disposition"]
# use train, test, split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, stratify=y)
X_train.shape
X_train.head()
# # Pre-processing
#
# Scale the data using the MinMaxScaler and perform some feature selection
# Scale the data
from sklearn.preprocessing import MinMaxScaler
# Scale the model
X_scaler = MinMaxScaler().fit(X_train)
# Transform the training and testing data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# Create a Logistic Regression model
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFECV
model_LR1 = LogisticRegression(solver='newton-cg', multi_class='auto')
model_LR1.fit(X_train_scaled, y_train)
# # Train the Model
#
#
model_LR1_training_score = round(model_LR1.score(X_train_scaled, y_train)*100,3)
base_accuracy = round(model_LR1.score(X_test_scaled, y_test)*100,3)
print(f"Initial Model Training Data Score: {model_LR1_training_score} %")
print(f"Initial Model Testing Data Score: {base_accuracy} %")
# # Feature Selection
# Determine which features we should keep
feature_names = X.columns.tolist()
selector = RFECV(estimator=model_LR1, cv=5, step=1)
_ = selector.fit(X_train_scaled, y_train)
# +
# Note that unlike the Random Forest Model, the Logistic Regession model does not have a most important features method
initial_features = sorted(zip(selector.ranking_, feature_names))
# Build a list of the features and rank them from most important to least
ranked_features = pd.DataFrame(initial_features, columns=['Ranking', 'Feature'])
ranked_features = ranked_features.set_index('Feature')
ranked_features
# -
# Remove any features with a rank > 16 (an arbitary classification)
selected_features = []
for r in initial_features:
if r[0] < 16:
selected_features.append(r[1])
# +
# Use new data for all subsequent models
## Assign new data to X
X_train_select = X_train[selected_features]
X_test_select = X_test[selected_features]
X_scaler = MinMaxScaler().fit(X_train_select)
X_train_scaled = X_scaler.transform(X_train_select)
X_test_scaled = X_scaler.transform(X_test_select)
## Train new model
model_LR2 = LogisticRegression(solver='newton-cg',multi_class='auto')
model_LR2.fit(X_train_scaled, y_train)
model_LR2_training_score = round(model_LR2.score(X_train_scaled, y_train)*100,3)
select_features_accuracy = round(model_LR2.score(X_test_scaled, y_test)*100,3)
print(f"Revised Model Training Data Score: {model_LR2_training_score} %")
print(f"Revised Model Testing Data Score: {select_features_accuracy} %")
# -
# # Hyperparameter Tuning
# Use `GridSearchCV` to tune the model's parameters
# Import the GridSearchCV model
from sklearn.model_selection import GridSearchCV
# +
model_LR3 = LogisticRegression(solver='newton-cg', multi_class='auto')
param_grid = {
'C': np.logspace(0, 4, 10),
'penalty': ['l2']
}
grid = GridSearchCV(model_LR3, param_grid, cv=5, verbose=0)
# Train the model with GridSearch
_ = grid.fit(X_train_scaled, y_train)
# +
# Tuned parameters
C = grid.best_params_['C']
penalty = grid.best_params_['penalty']
# Tuned model
tuned_model = LogisticRegression(solver='newton-cg', multi_class='auto',
C=C, penalty=penalty)
tuned_model.fit(X_train_scaled, y_train)
model_LR3_training_score = round(tuned_model.score(X_train_scaled, y_train)*100,3)
tuned_accuracy = round(tuned_model.score(X_test_scaled, y_test)*100,3)
print(f"Tuned Model Training Data Score: {model_LR3_training_score} %")
print(f"Tuned Model Testing Data Score: {tuned_accuracy} %")
# -
# # Model Predictions
# +
predicted_model = tuned_model.predict(X_test_scaled)
classifications = y_test.unique().tolist()
prediction_actual = {
'Actual': y_test,
'Prediction': predicted_model
}
LR_df = pd.DataFrame(prediction_actual)
LR_df = LR_df.set_index('Actual').reset_index()
LR_df.head(15)
# -
# # Evaluate the Model
evaluated_model = {'Model Type': ['Base Model', 'Selected Features Model', 'Tuned Model'],
'Accuracy': [f"{base_accuracy}%", f"{select_features_accuracy}%", f"{tuned_accuracy}%"]}
evaluated_model_df = pd.DataFrame(evaluated_model)
evaluated_model_df = evaluated_model_df.set_index('Model Type')
evaluated_model_df.to_csv('Resources/LogisticRegression_eval.csv')
evaluated_model_df
# # Save the Model
# save your model by updating "your_name" with your name
# and "your_model" with your model variable
# be sure to turn this in to BCS
# if joblib fails to import, try running the command to install in terminal/git-bash
import joblib
filename = 'ml_model_LR.sav'
joblib.dump(tuned_model, filename)
# +
# combine both evaluation tables together
# -
RFC = pd.read_csv('Resources/RandomForestClassifier_eval.csv')
merged_df = RFC.merge(evaluated_model_df,on="Model Type")
merged_df = merged_df.rename(columns={
'': '',
'Accuracy_x': 'RFC Accuracy',
'Accuracy_y': 'LR Accuracy'
})
merged_df.set_index("Model Type")
| .ipynb_checkpoints/model_2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Part of this file can't be rendered in GitHub. Refer to the following link for a properly rendered version of this file: https://nbviewer.jupyter.org/github/sfu-db/dataprep/blob/develop/examples/DataConnector_Twitter.ipynb
# # Connector for Twitter
#
# In this example, we will be going over how to use Connector with Twitter.
# ## Prerequisites
#
# Connector is a component in the DataPrep library that aims to simplify data access by providing a standard API set. The goal is to help users skip the complex API configuration. In this tutorial, we demonstrate how to use the connector component with Twitter.
#
# If you haven't installed DataPrep, run command `!pip install dataprep` or execute the following cell.
# Run me if you'd like to install
# !pip install dataprep
# # Download and store the configuration files in DataPrep.
#
# The configuration files are used to configure the parameters and initial setup for the API. The available configuration files can be manually downloaded here: [Configuration Files](https://github.com/sfu-db/DataConnectorConfigs) or automatically downloaded at usage.
#
# Store the configuration file in the dataprep folder.
# # Obtaining access token from Twitter
#
# In order for Twitter API to work, you need four keys - the **Consumer**, **Consumer Secret**, **Access Token** and **Access Token Secret**. These keys are unique identifiers of an application requesting access to the Twitter API. The access token will be your credential when making an API request. These keys and tokens can be obtained with the following three simple steps.
#
# ##### 1. Create a Developer Account
# In order to access Twitter data, you will need to create a server-side application on Twitter and to do so, you will need to create a developer account in order to create an application. You can sign up for the developer account on the [Twitter Developer Account](https://developer.twitter.com/en). An email confirming your approval of the developer account will be sent to you shortly after signing up.
#
# ##### 2. Create an Application
# After gaining access to your developer account, log in and create an app in the [Apps section of your profile](https://developer.twitter.com/en/apps). Fill in the name, description and URL of your website. You can skip the callback URL and other URL sections.
#
# ##### 3. Generate Auth Keys and Tokens
#
# After you successfully create an application, you can find your access keys and token keys in the **Keys and Tokens** section of Application Details. Generate the Access Token and Access Token Secret. Store them in a secure location as they will provide you access to the Twitter's data.
# # Initialize connector
#
# Copy and paste the **Consumer Key** into the **client_id** variable. Copy and paste the **Consumer Secret Key** into the **client_secret** variable. Ensure the **API name** argument is correct. This establishes a connection with Twitter and returns an object. Once you run the code you can use the built in functions available from connector.
# +
from dataprep.connector import connect, info
client_id = '<insert Consumer Key>'
client_secret = '<insert Consumer Secret Key>'
dc = connect('twitter', _auth={'client_id':client_id, 'client_secret':client_secret})
dc
# -
# # Functionalities
#
# Connector has several functions you can perform to gain insight on the data downloaded from Twitter.
# ### Connector.info
# The info method gives information and guidelines on using the connector. There are 4 sections in the response and they are table, parameters, example and schema.
#
# >1. Table - The table(s) being accessed.
# >2. Parameters - Identifies which parameters can be used to call the method.
# >3. Examples - Shows how you can call the methods in the Connector class.
# >4. Schema - Names and data types of attributes in the response.
info('twitter')
# ### Connector.query
# The query method downloads the website data and displays it in a Dataframe. The parameters must meet the requirements as indicated in connector.info for the operation to run. You can use the **_count** parameter to specify the number of tweets to be fetched. Each request can currently fetch a maximum of 100 requests.
#
# When the data is received from the server, it will either be in a JSON or XML format. The connector reformats the data in pandas Dataframe for the convenience of downstream operations.
#
# As an example, let's try to get 50 tweets related to COVID-19 from Twitter.
# #### Searching for tweets related to COVID-19
df = await dc.query("tweets", _q="covid-19", _count=50)
df
# # That's all for now.
# If you are interested in writing your own configuration file or modify an existing one, refer to the [Configuration Files](https://github.com/sfu-db/DataConnectorConfigs>).
| examples/DataConnector_Twitter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ravi-kr/Universe/blob/main/Recursion.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="u24qiZ3BdaJp" outputId="be6b36c1-4902-4844-8e58-4519654fd3a1"
# Write a recursive function that takes a number and returns the sum of all the numbers from zero to that number.
def cumulative(num):
if num in [0, 1]:
return num
else:
return num + cumulative(num -1 )
cumulative(10)
# + colab={"base_uri": "https://localhost:8080/"} id="-E-GnTU4doN-" outputId="eb08d7f6-cb4c-4b28-f7f4-bf0882f76a1d"
# Write a recursive function that takes a number as an input and returns the factorial of that number.
def factorial(n):
assert n >= 0 and int(n) == n, 'The number must be a positive integer only!'
if n in [0, 1]:
return 1
else:
return n * factorial(n - 1)
factorial(5)
# + colab={"base_uri": "https://localhost:8080/"} id="Or2TlnqBfpUf" outputId="80f622f5-2c11-42c3-9c67-8f8a782bed6f"
# Write a recursive function that takes a number ‘n’ and returns the nth number of the Fibonacci number.
def fibonacci(n):
if n in [0, 1]:
return n
else:
return fibonacci(n - 1) + fibonacci(n - 2)
fibonacci(6)
# + colab={"base_uri": "https://localhost:8080/"} id="Y5i8vhSXgNuJ" outputId="acb10503-b792-4ecb-bf75-d9fbd7a1abb1"
# Write a recursive function that takes a list of numbers as an input and returns the product of all the numbers in the list.
def productOfArray(arr):
if len(arr) == 0:
return 0
if len(arr) == 1:
return arr[0]
else:
return arr[len(arr) - 1] * productOfArray(arr[:len(arr) - 1])
productOfArray([1,2,3,4,5])
# + colab={"base_uri": "https://localhost:8080/"} id="y68ZF8fdhB9P" outputId="b727f018-c992-4abb-adf5-1d15241977bd"
# Write a function that takes a string and returns if the string is a palindrome.
def isPalindrom(strng):
if len(strng) == 0:
return True
if strng[0] != strng[len(strng) - 1]:
return False
return isPalindrom(strng[1:-1])
isPalindrom('madam')
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="NlWwDc7eh0hD" outputId="b7467b00-e447-4570-df75-caea14a8fa09"
# Write a recursive function that takes a string and reverse the string.
def reverse(st):
if len(st) in [0, 1]:
return st
else:
return st[len(st) - 1] + reverse(st[:len(st) - 1])
reverse('amazing')
# + colab={"base_uri": "https://localhost:8080/"} id="1La-1xBTi59Z" outputId="668c62bf-e498-4766-dbb2-4a6dc019aa3a"
# Write a recursive function that takes an array that may contain more arrays in it and returns an array with all values flattened.
def flatten(arr):
res = []
for i in arr:
if type(i) is list:
res.extend(flatten(i))
else:
res.append(i)
return res
flatten([[1], [2, 3], [4], [3, [2, 4]]])
# + colab={"base_uri": "https://localhost:8080/"} id="SJDV-wmkjnJr" outputId="1df2b914-2adf-4c80-aa06-14f9f4d5a35b"
# Write a recursive function that takes an array of words and returns an array that contains all the words capitalized.
def capitalizeWords(arr):
if len(arr) == 0:
return []
else:
return [arr[0].upper()] + capitalizeWords(arr[1:])
capitalizeWords(['foo', 'bar', 'world', 'hello'])
# + colab={"base_uri": "https://localhost:8080/"} id="fSWBeA5AkNxd" outputId="4f917728-a66f-490d-d2f0-6b13de4d0faa"
# Write a recursive function that takes an array and a callback function and returns True if any value of that array returns True from that callback function otherwise returns False.
def isEven(num):
if num % 2 == 0:
return True
else:
return False
def anyEven(arr, cb):
if len(arr) == 0:
return False
if cb(arr[0]):
return True
return anyEven(arr[1:], cb)
anyEven([1, 2, 3, 5], isEven)
# + colab={"base_uri": "https://localhost:8080/"} id="NrQTAQB2keoU" outputId="3144119c-af3b-4f87-cd06-65670655fd14"
# Write a recursive function that will return the sum of all the positive even numbers in a dictionary which may contain more dictionaries nested in it.
obj = {
"a": 2,
"b": {"x": 2, "y": {"foo": 3, "z": {"bar": 2}}},
"c": {"p": {"h": 2, "r": 5}, "q": 'ball', "r": 5},
"d": 1,
"e": {"nn": {"lil": 2}, "mm": 'car'}}
def evenSum(obj, sum = 0):
for k in obj.values():
if type(k) == int and k%2 == 0:
sum += k
elif isinstance(k, dict):
sum += evenSum(k, sum = 0)
return sum
evenSum(obj)
# + id="bHYXNmbnln5N"
| Recursion.ipynb |
# ---
# published: true
# layout: post
# title: Python Data Essentials - Pandas
# tags: coding python
# featured: python coding
# mast: columns
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:anaconda3]
# language: python
# name: conda-env-anaconda3-py
# ---
# [Pandas] bring Python a data type equivalent to super-charged spreadsheets. Pandas add two highly expressive data structures to Python, [Series] and [DataFrame]. Pandas [Series] and [DataFrame]s provide a performant analysis and manipulation of “relational” or “labeled” data similar to relational database tables like MySQL or the rows and columns of Excel. [Pandas] are great for working with time series data as well as arbitrary matrix data, and unlabeled data.
#
# [Pandas] leverage [NumPy] and if you are not familiar with this fundamental library for working with numbers, then I suggest you take a look at [Python Data Essentials - NumPy][Numpy] to get a decent footing.
#
# [Series]: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.html
# [DataFrame]: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
# [NumPy]: https://mk.imti.co/python-data-essentials-numpy/
# [Pandas]:https://pandas.pydata.org/
# **Quick reference to the examples below:**
#
# * Do not remove this line (for toc on a rendered blog)
# {:toc}
#
# If you want to go beyond this brief overview of [Pandas] I suggest the following resources:
#
# - [10 Minutes to pandas] - Official
# - [Official Tutorials]
# - [Data Analysis with Python and Pandas Tutorial Introduction] - Video
#
# [Data Analysis with Python and Pandas Tutorial Introduction]:https://www.youtube.com/watch?v=Iqjy9UqKKuo
# [Official Tutorials]:http://pandas.pydata.org/pandas-docs/stable/tutorials.html
# [10 Minutes to pandas]:http://pandas.pydata.org/pandas-docs/stable/10min.html
#
# ## Getting Started
#
# In this article, I'll be working with [Pandas] version 0.22.0. If you are running a newer version, there is a possibility of interfaces changing or functionality being deprecated or replaced. I these cases a quick review of the official documentation should suffice.
# !conda list pandas
# ## Series
#
# [Series] data structures support integer and label based indexing.
#
# > One-dimensional ndarray with axis labels (including time series).
#
# - [Series] official documentation
#
# [Series]: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.html
# +
import pandas as pd
fields = ['Name','CPU', 'GHz', 'Cores','Ram','Own']
b1 = ['Orange Pi Plus','ARM Cortex-A7',1.536,4,1046,2]
board = pd.Series(data=b1, index = fields)
print(f'Series: \n{board}\n')
print(f' Shape: {board.shape}')
print(f'Dimensions: {board.ndim}')
print(f' Size: {board.size}')
# -
# check for label
print(f'Do we have GPU data? {"GPU" in board}')
print(f'Do we have CPU data? {"CPU" in board}')
# ### Accessing and Deleting Elements
#
# - [pandas.DataFrame.iloc] - explicit selection by integer-index
# - [pandas.DataFrame.loc] - explicit selection by label
# - [pandas.Series.drop] - return Series with specified index labels removed.
# - [Indexing and Selecting Data] official documentation.
#
# [pandas.DataFrame.iloc]:http://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.iloc.html
# [pandas.DataFrame.loc]: http://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.loc.html
# [pandas.Series.drop]:https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.drop.html
# [Indexing and Selecting Data]:https://pandas.pydata.org/pandas-docs/stable/indexing.html
print(f'The {board["Name"]} runs at {board["GHz"]} GHz.')
print(f'The {board[0]} has {board[3]} cores.')
print(f'The {board[0]} has {board[-1]:,} megabytes of ram.')
# +
# select specific columns
cc = board[["CPU","Cores"]]
print(f'Series: \n{cc}\n')
print(f' Shape: {cc.shape}')
print(f'Dimensions: {cc.ndim}')
print(f' Size: {cc.size}')
# +
# remove a column return or inplace=True
nb = board.drop("Cores")
print(f'Series: \n{nb}\n')
print(f' Shape: {nb.shape}')
print(f'Dimensions: {nb.ndim}')
print(f' Size: {nb.size}')
# -
inventory = pd.Series([1,3,2],['Orange Pi Plus', 'Raspberry Pi 3', 'Asus Tinker Board'])
print(f'Series: \n{inventory}\n')
inventory = inventory.add(1)
print(f'Add 1 to all values: \n{inventory}\n')
# #### [NumPy] on [Series] data.
#
# [NumPy]: https://mk.imti.co/python-data-essentials-numpy/
# [Series]: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.html
import numpy as np
print(f'Square root of each item: \n{np.sqrt(inventory)}\n')
print(f'Each item to the power of 2: \n{np.power(inventory,2)}\n')
# Orange Pi Plus and Asus Tinker Boards
inventory[['Orange Pi Plus', 'Asus Tinker Board']] * 2
# ### Arithmetic on Series Data
# +
containers = ['a','b','c']
items = [1,10,100]
item_containers = pd.Series(index=containers, data=items)
print(f'All: \n{item_containers}\n')
print(f'Greater than 1: \n{item_containers[item_containers > 1]}\n')
# -
# add 10 items to a
item_containers = item_containers.add([10,0,0])
print(f'All: \n{item_containers}\n')
half_containers = item_containers / 2
print(f'Half: \n{half_containers}\n')
# ## [DataFrames]
#
# [DataFrames] are the central feature of [Pandas], a dictionary like data object.
#
# > Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure.
#
# - [pandas.DataFrame][DataFrames] official documentation.
#
# [Pandas]:https://pandas.pydata.org/
# [DataFrames]:https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
import pandas as pd
# ### Creating
# [Pandas] can create a [DataFrame] from a [NumPy] ndarray (structured or homogeneous), dict, or another DataFrame. A DataFrame with a Python [Dict] can contain [Series], arrays, constants, or list-like objects.
#
# In the example below I create a [dictionary][Dict] that maps three indexes to three varied data sets. The fictional AI machines 'hal', 'johnny 5' and 'bender' all have different attributes with some overlap. Each of the dictionary keys contains a Pandas Series object. However, they may contain any list-like objects.
#
# [Dict]:http://www.pythonforbeginners.com/dictionary/how-to-use-dictionaries-in-python
# +
ai = {'hal': pd.Series(data = [100, 90], index = ['intellect', 'dangerous']),
'johnny 5': pd.Series(data = [12, 5, 2], index = ['dangerous','humor','bending']),
'bender': pd.Series(data = [20, 50, 50, 100], index = ['intellect', 'dangerous', 'humor', 'bending'])}
df_ai = pd.DataFrame(ai)
df_ai
# -
print(f' Shape: {df_ai.shape}')
print(f'Dimensions: {df_ai.ndim}')
print(f' Size: {df_ai.size}')
print(f'Total NaNs: {df_ai.isnull().sum().sum()}')
print(f'NaN Counts: \n{df_ai.isnull().sum()}\n')
print(f'DataFrame Values: \n{df_ai.values}\n')
# ### Selecting
#
# The methods [mask][pandas.DataFrame.mask] and [where][pandas.DataFrame.where] are provided by Panda's [Series] and [DataFrame] data types. See the examples below for some simple examples of value selection using basic arithmatic expressions.
#
# - [pandas.DataFrame.mask]
# - [pandas.DataFrame.where]
# - [pandas.Series.mask]
# - [pandas.Series.where]
#
# [pandas.DataFrame.mask]:http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.mask.html
# [pandas.Series.mask]:http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.mask.html
# [pandas.DataFrame.where]:http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.where.html
# [pandas.Series.where]:http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.where.html
# mask out any data greater than 10
df_ai.mask(df_ai > 10)
# only return data greater than 10, otherwise NaN
df_ai.where(df_ai > 10)
# only return data greater than 10, otherwise 0
df_ai.where(df_ai > 10, 0)
# ### Modifying
#
# The AI bots 'hal', '<NAME>' and 'bender' share some common attributes, however where they do not, we get **nan** (not a number). Running the AI bot data through any math function would be problematic with the existence of non-numbers. Pandas give us quite a few options.
#
# There are many options for cleaning this data. I'll start with removing any rows that contain **nan** values. We can make these adjustments with the optional parameter **inplace=True** if we wanted to modify the DataFrame in place, however for the sake of examples it is better to keep the original in-tact.
# return a frame eliminating rows with NaN values
df_ai_common_rows = df_ai.dropna(axis=0)
df_ai_common_rows
# return a frame eliminating rows with NaN values
df_ai_common_cols = df_ai.dropna(axis=1)
df_ai_common_cols
# Depending on requirements, no data could mean zero in our scale of 0-100. While zero is not a reasonable assumption for our AI bots, it's an easy data fix:
# fill all NaNs with 0
df_ai.fillna(0)
# forward fill rows with previous column (axis=0) data
df_ai.fillna(method='ffill', axis=0)
# forward fill rows with previous column (axis=0) data
# then back fill
df_ai.fillna(method='ffill', axis=0).fillna(method='bfill', axis=0)
# Forward (ffill) and backfilling (bfill) have far better uses in time-series data. In this case, `hal` having a danger rating of 90 should not assume that his bending ability would be 90 as well, but this example clearly illustrates the forward and backfilling capabilities of [DataFrame]'s [fillna] method.
#
# [DataFrame]:https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
# [fillna]:https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.fillna.html
#
# If we needed to make assumptions regarding the ability of this team of AI bots we could assume unknown data could start as an average of known data.
# get the mean of data for each attribute row by column (axis=1)
df_ai.mean(axis=1)
# [pandas.DataFrame.apply] method applies the return value of a function along an axis of DataFrame, axis=1 in the example below. The function given to [pandas.DataFrame.apply] is passed the row or column depending the axis specified, the function below receives rows (because axis=1 is specified) and assigns each row to the variable "x" in which the method "mean" is called and resulting data returned from the function.
#
# We could have defied a named function; however this small opperation **x.fillna(x.mean())** is hardly worthy of such attention. Python's [lambda]s are one line, anonymous functions, and then used responsibly, can make the code more compact and readable at the same time.
#
# [pandas.DataFrame.apply]:http://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.apply.html
# [lambda]:https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions
clean_df_ai = df_ai.apply(lambda x: x.fillna(x.mean()),axis=1)
clean_df_ai
# ### Sorting
# order the columns by ai bot with the highest intellect
hii = clean_df_ai.sort_values(['intellect'], axis=1, ascending=False)
hii
print(f'The bot with the highest intelligence is {hii.columns[0]}.')
print(f'The bot with the lowest intelligence is {hii.columns[-1]}.')
# I doubt that `<NAME>` is more intelligent than `bender` but his data was unknown and therefore derived by using a mean, so the sore is mathematically correct.
#
# I won't attempt even to scratch the surface of sorting functions and their parameters provided by [DataFrame]s. This article is only intended to give you a taste and get you going.
#
# ### Math
#
# Use [NumPy] to perform any number of arithmetic operations on the values of a [DataFrames]. I suggest you take a look at my article [Python Data Essentials - Pandas][Numpy] for an overview of this compelling data science library.
#
#
# ## Essential Python 3
#
# A lot of data science in done in [Jupyter Notbooks] and libraries like [NumPy] make developing reports or documenting numerical processes. However if you a software developer like me, this code needs to run in a script on a server, in Amazon's [Lambda Function Handler] or even [kubeless] in a custom [kubernetes] cluster.
#
# Check out my article on [Essential Python 3] for a clean boilerplate script template to get you going.
#
# [Essential Python 3]: https://mk.imti.co/essential-python3/
# [kubernetes]: https://mk.imti.co/hobby-cluster/
# [kubeless]:https://kubeless.io/
# [Jupyter Notbooks]:https://mk.imti.co/golang-to-jupyter/
# [Numpy]:https://mk.imti.co/python-data-essentials-numpy/
# [Lambda Function Handler]:https://docs.aws.amazon.com/lambda/latest/dg/python-programming-model-handler-types.html
# [DataFrame]:https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
| notebooks/2018-06-17-python-data-essentials-pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="MyETdB-dkBsX"
# ## **Fine-tuning BERT for named-entity recognition**
#
# In this notebook, we are going to use **BertForTokenClassification** which is included in the [Transformers library](https://github.com/huggingface/transformers) by HuggingFace. This model has BERT as its base architecture, with a token classification head on top, allowing it to make predictions at the token level, rather than the sequence level. Named entity recognition is typically treated as a token classification problem, so that's what we are going to use it for.
#
# This tutorial uses the idea of **transfer learning**, i.e. first pretraining a large neural network in an unsupervised way, and then fine-tuning that neural network on a task of interest. In this case, BERT is a neural network pretrained on 2 tasks: masked language modeling and next sentence prediction. Now, we are going to fine-tune this network on a NER dataset. Fine-tuning is supervised learning, so this means we will need a labeled dataset.
#
# If you want to know more about BERT, I suggest the following resources:
# * the original [paper](https://arxiv.org/abs/1810.04805)
# * <NAME>'s [blog post](http://jalammar.github.io/illustrated-bert/) as well as his [tutorial](http://jalammar.github.io/a-visual-guide-to-using-bert-for-the-first-time/)
# * <NAME>'s [Youtube channel](https://www.youtube.com/channel/UCoRX98PLOsaN8PtekB9kWrw)
# * <NAME>'s [Youtube channel](https://www.youtube.com/user/abhisheksvnit)
#
# The following notebook largely follows the same structure as the tutorials by <NAME>. For his tutorials on the Transformers library, see his [Github repository](https://github.com/abhimishra91/transformers-tutorials).
#
# NOTE: this notebook assumes basic knowledge about deep learning, BERT, and native PyTorch. If you want to learn more Python, deep learning and PyTorch, I highly recommend cs231n by Stanford University and the FastAI course by Jeremy Howard et al. Both are freely available on the web.
#
# Now, let's move on to the real stuff!
# + [markdown] colab_type="text" id="e7wfLWyYkvDi"
# #### **Importing Python Libraries and preparing the environment**
#
# This notebook assumes that you have the following libraries installed:
# * pandas
# * numpy
# * sklearn
# * pytorch
# * transformers
# * seqeval
#
# As we are running this in Google Colab, the only libraries we need to additionally install are transformers and seqeval (GPU version):
# + id="d4_YJqjR_Gjw" colab_type="code" colab={}
# #!pip install transformers seqeval[gpu]
# + colab_type="code" id="IEnlUbgm8z3B" colab={}
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertConfig, BertForTokenClassification
# + [markdown] colab_type="text" id="Jzq1w3L1K5M-"
# As deep learning can be accellerated a lot using a GPU instead of a CPU, make sure you can run this notebook in a GPU runtime (which Google Colab provides for free! - check "Runtime" - "Change runtime type" - and set the hardware accelerator to "GPU").
#
# We can set the default device to GPU using the following code (if it prints "cuda", it means the GPU has been recognized):
# + colab_type="code" id="Sm1krxJtKxpx" tags=[] colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c0cf11ba-17ff-4a35-da11-5b7b0c4cb11d"
from torch import cuda
device = 'cuda' if cuda.is_available() else 'cpu'
print(device)
# + [markdown] colab_type="text" id="ahwMsmyG5ZPE"
# #### **Downloading and preprocessing the data**
# Named entity recognition (NER) uses a specific annotation scheme, which is defined (at least for European languages) at the *word* level. An annotation scheme that is widely used is called **[IOB-tagging](https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging))**, which stands for Inside-Outside-Beginning. Each tag indicates whether the corresponding word is *inside*, *outside* or at the *beginning* of a specific named entity. The reason this is used is because named entities usually comprise more than 1 word.
#
# Let's have a look at an example. If you have a sentence like "<NAME> was born in Hawaï", then the corresponding tags would be [B-PERS, I-PERS, O, O, O, B-GEO]. B-PERS means that the word "Barack" is the beginning of a person, I-PERS means that the word "Obama" is inside a person, "O" means that the word "was" is outside a named entity, and so on. So one typically has as many tags as there are words in a sentence.
#
# So if you want to train a deep learning model for NER, it requires that you have your data in this IOB format (or similar formats such as [BILOU](https://stackoverflow.com/questions/17116446/what-do-the-bilou-tags-mean-in-named-entity-recognition)). There exist many annotation tools which let you create these kind of annotations automatically (such as Spacy's [Prodigy](https://prodi.gy/), [Tagtog](https://docs.tagtog.net/) or [Doccano](https://github.com/doccano/doccano)). You can also use Spacy's [biluo_tags_from_offsets](https://spacy.io/api/goldparse#biluo_tags_from_offsets) function to convert annotations at the character level to IOB format.
#
# Here, we will use a NER dataset from [Kaggle](https://www.kaggle.com/namanj27/ner-dataset) that is already in IOB format. One has to go to this web page, download the dataset, unzip it, and upload the csv file to this notebook. Let's print out the first few rows of this csv file:
# + colab_type="code" id="deLB9HVX5I6F" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="57531522-0650-4a3c-dc6a-b48a9348a17a"
data = pd.read_csv("ner_datasetreference.csv", encoding='unicode_escape')
data.head()
# + [markdown] colab_type="text" id="ucYjhq6uRAmY"
# Let's check how many sentences and words (and corresponding tags) there are in this dataset:
# + colab_type="code" id="6gMibEJXTKDw" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="a7039b75-8800-4a1e-8554-89bd41168683"
data.count()
# + [markdown] id="sUGXZOfE_GkO" colab_type="text"
# As we can see, there are approximately 48,000 sentences in the dataset, comprising more than 1 million words and tags (quite huge!). This corresponds to approximately 20 words per sentence.
#
# Let's have a look at the different NER tags, and their frequency:
# + tags=[] id="s4Jn1fVT_GkO" colab_type="code" colab={} outputId="7ef53f45-8dfc-432f-9fcf-4b9754ee613b"
print("Number of tags: {}".format(len(data.Tag.unique())))
frequencies = data.Tag.value_counts()
frequencies
# + [markdown] id="gsX0ebih_GkS" colab_type="text"
# There are 8 category tags, each with a "beginning" and "inside" variant, and the "outside" tag. It is not really clear what these tags mean - "geo" probably stands for geographical entity, "gpe" for geopolitical entity, and so on. They do not seem to correspond with what the publisher says on Kaggle. Some tags seem to be underrepresented. Let's print them by frequency (highest to lowest):
# + tags=[] id="CmjbHirJ_GkS" colab_type="code" colab={} outputId="405920d1-f009-4263-ae01-6bf0c2f03235"
tags = {}
for tag, count in zip(frequencies.index, frequencies):
if tag != "O":
if tag[2:5] not in tags.keys():
tags[tag[2:5]] = count
else:
tags[tag[2:5]] += count
continue
print(sorted(tags.items(), key=lambda x: x[1], reverse=True))
# + [markdown] id="aGUQemBz_GkV" colab_type="text"
# Let's remove "art", "eve" and "nat" named entities, as performance on them will probably be not comparable to the other named entities.
# + id="8iorLrU4_GkW" colab_type="code" colab={} outputId="532a42ec-077a-41a2-fa18-e7987dc54a8c"
entities_to_remove = ["B-art", "I-art", "B-eve", "I-eve", "B-nat", "I-nat"]
data = data[~data.Tag.isin(entities_to_remove)]
data.head()
# + [markdown] colab_type="text" id="mskU4h0oRKEF"
# Now, we have to ask ourself the question: what is a training example in the case of NER, which is provided in a single forward pass? A training example is typically a **sentence**, with corresponding IOB tags. Let's group the words and corresponding tags by sentence:
# + colab_type="code" id="zkW2vNcO-uMH" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="855c09f8-1345-4851-a060-1f3a8290d4fe"
# pandas has a very handy "forward fill" function to fill missing values based on the last upper non-nan value
data = data.fillna(method='ffill')
data.head()
# + colab_type="code" id="Hmd-ow389k6Y" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="51e3a794-9847-4462-fd79-e280c00d81ce"
# let's create a new column called "sentence" which groups the words by sentence
data['sentence'] = data[['Sentence #','Word','Tag']].groupby(['Sentence #'])['Word'].transform(lambda x: ' '.join(x))
# let's also create a new column called "word_labels" which groups the tags by sentence
data['word_labels'] = data[['Sentence #','Word','Tag']].groupby(['Sentence #'])['Tag'].transform(lambda x: ','.join(x))
data.head()
# + [markdown] colab_type="text" id="JsjhdQbE-Lve"
# Let's have a look at the different NER tags.
#
# We create 2 dictionaries: one that maps individual tags to indices, and one that maps indices to their individual tags. This is necessary in order to create the labels (as computers work with numbers = indices, rather than words = tags) - see further in this notebook.
# + colab_type="code" id="CFRDM8WsQXvL" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="1e9d7dcb-f93c-4d71-e342-557c1709206b"
labels_to_ids = {k: v for v, k in enumerate(data.Tag.unique())}
ids_to_labels = {v: k for v, k in enumerate(data.Tag.unique())}
labels_to_ids
# + [markdown] colab_type="text" id="J08Cvk_USgbM"
# As we can see, there are now only 10 different tags.
#
# Let's only keep the "sentence" and "word_labels" columns, and drop duplicates:
# + colab_type="code" id="SrEgd4PZUgmF" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="9411df51-39d8-4757-a98d-acdd3319aa08"
data = data[["sentence", "word_labels"]].drop_duplicates().reset_index(drop=True)
data.head()
# + colab_type="code" id="r3ArUiVRqw0C" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b78ec91c-88a4-4433-9bd4-c9f726dc53e3"
len(data)
# + [markdown] colab_type="text" id="U8obZumRTBrT"
# Let's verify that a random sentence and its corresponding tags are correct:
# + colab_type="code" id="eUvupomW_fbe" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="926daac8-b2a0-436d-f98c-cbe93d1b00ed"
data.iloc[41].sentence
# + colab_type="code" id="0dLyY3Oi_lvp" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="5d61d25a-6d01-4249-ba53-30d268f5a330"
data.iloc[41].word_labels
# + [markdown] colab_type="text" id="f5EHpuB78pIa"
# #### **Preparing the dataset and dataloader**
# + [markdown] colab_type="text" id="15x7zmZnTgFx"
# Now that our data is preprocessed, we can turn it into PyTorch tensors such that we can provide it to the model. Let's start by defining some key variables that will be used later on in the training/evaluation process:
# + colab_type="code" id="lgNSM8Xz79Mg" tags=[] colab={}
MAX_LEN = 128
TRAIN_BATCH_SIZE = 4
VALID_BATCH_SIZE = 2
EPOCHS = 1
LEARNING_RATE = 1e-05
MAX_GRAD_NORM = 10
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# + [markdown] colab_type="text" id="wPYV2Ld6Tr5I"
# A tricky part of NER with BERT is that BERT relies on **wordpiece tokenization**, rather than word tokenization. This means that we should also define the labels at the wordpiece-level, rather than the word-level!
#
# For example, if you have word like "Washington" which is labeled as "b-gpe", but it gets tokenized to "Wash", "##ing", "##ton", then we will have to propagate the word’s original label to all of its wordpieces: "b-gpe", "b-gpe", "b-gpe". The model should be able to produce the correct labels for each individual wordpiece. The function below (taken from [here](https://github.com/chambliss/Multilingual_NER/blob/master/python/utils/main_utils.py#L118)) implements this.
#
#
#
#
#
# + colab_type="code" id="RNzSgZTfGUd8" colab={}
def tokenize_and_preserve_labels(sentence, text_labels, tokenizer):
"""
Word piece tokenization makes it difficult to match word labels
back up with individual word pieces. This function tokenizes each
word one at a time so that it is easier to preserve the correct
label for each subword. It is, of course, a bit slower in processing
time, but it will help our model achieve higher accuracy.
"""
tokenized_sentence = []
labels = []
sentence = sentence.strip()
for word, label in zip(sentence.split(), text_labels.split(",")):
# Tokenize the word and count # of subwords the word is broken into
tokenized_word = tokenizer.tokenize(word)
n_subwords = len(tokenized_word)
# Add the tokenized word to the final tokenized word list
tokenized_sentence.extend(tokenized_word)
# Add the same label to the new list of labels `n_subwords` times
labels.extend([label] * n_subwords)
return tokenized_sentence, labels
# + [markdown] colab_type="text" id="ez7qlFHl56ZW"
# Note that this is a **design decision**. You could also decide to only label the first wordpiece of each word and let the model only learn this (this is what was done in the original BERT paper, see Github discussion [here](https://github.com/huggingface/transformers/issues/64#issuecomment-443703063)). Another design decision could be to give the first wordpiece of each word the original word label, and then use the label “X” for all subsequent subwords of that word.
#
# All of them lead to good performance.
#
# Next, we define a regular PyTorch [dataset class](https://pytorch.org/docs/stable/data.html) (which transforms examples of a dataframe to PyTorch tensors). Here, each sentence gets tokenized, the special tokens that BERT expects are added, the tokens are padded or truncated based on the max length of the model, the attention mask is created and the labels are created based on the dictionary which we defined above.
#
# For more information about BERT's inputs, see [here](https://huggingface.co/transformers/glossary.html).
# + colab_type="code" id="aJty_Abw8_xK" colab={}
class dataset(Dataset):
def __init__(self, dataframe, tokenizer, max_len):
self.len = len(dataframe)
self.data = dataframe
self.tokenizer = tokenizer
self.max_len = max_len
def __getitem__(self, index):
# step 1: tokenize (and adapt corresponding labels)
sentence = self.data.sentence[index]
word_labels = self.data.word_labels[index]
tokenized_sentence, labels = tokenize_and_preserve_labels(sentence, word_labels, self.tokenizer)
# step 2: add special tokens (and corresponding labels)
tokenized_sentence = ["[CLS]"] + tokenized_sentence + ["[SEP]"] # add special tokens
labels.insert(0, "O") # add outside label for [CLS] token
labels.insert(-1, "O") # add outside label for [SEP] token
# step 3: truncating/padding
maxlen = self.max_len
if (len(tokenized_sentence) > maxlen):
# truncate
tokenized_sentence = tokenized_sentence[:maxlen]
labels = labels[:maxlen]
else:
# pad
tokenized_sentence = tokenized_sentence + ['[PAD]'for _ in range(maxlen - len(tokenized_sentence))]
labels = labels + ["O" for _ in range(maxlen - len(labels))]
# step 4: obtain the attention mask
attn_mask = [1 if tok != '[PAD]' else 0 for tok in tokenized_sentence]
# step 5: convert tokens to input ids
ids = self.tokenizer.convert_tokens_to_ids(tokenized_sentence)
label_ids = [labels_to_ids[label] for label in labels]
# the following line is deprecated
#label_ids = [label if label != 0 else -100 for label in label_ids]
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(attn_mask, dtype=torch.long),
#'token_type_ids': torch.tensor(token_ids, dtype=torch.long),
'targets': torch.tensor(label_ids, dtype=torch.long)
}
def __len__(self):
return self.len
# + [markdown] colab_type="text" id="hTP7zuWGWGUd"
# Now, based on the class we defined above, we can create 2 datasets, one for training and one for testing. Let's use a 80/20 split:
# + colab_type="code" id="jrkdZBLYHVcB" tags=[] colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="160536bb-9659-490e-db38-c5c7ff66b351"
train_size = 0.8
train_dataset = data.sample(frac=train_size,random_state=200)
test_dataset = data.drop(train_dataset.index).reset_index(drop=True)
train_dataset = train_dataset.reset_index(drop=True)
print("FULL Dataset: {}".format(data.shape))
print("TRAIN Dataset: {}".format(train_dataset.shape))
print("TEST Dataset: {}".format(test_dataset.shape))
training_set = dataset(train_dataset, tokenizer, MAX_LEN)
testing_set = dataset(test_dataset, tokenizer, MAX_LEN)
# + [markdown] colab_type="text" id="Ptv5AT_iTb7W"
# Let's have a look at the first training example:
# + colab_type="code" id="phmPylgAm8Xy" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="27a7cee8-c920-4602-c539-b117ce6236ec"
training_set[0]
# + [markdown] colab_type="text" id="VvU4nzL2W2Xo"
# Let's verify that the input ids and corresponding targets are correct:
# + colab_type="code" id="DWgnNJrYW2GP" tags=[] colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="2b5b5b04-b93c-4354-b104-10b96c7223ff"
for token, label in zip(tokenizer.convert_ids_to_tokens(training_set[0]["ids"]), training_set[0]["targets"]):
print('{0:10} {1}'.format(token, label))
# + [markdown] colab_type="text" id="Ky68FcTgWnfN"
# Now, let's define the corresponding PyTorch dataloaders:
# + colab_type="code" id="KIw793myWOmi" colab={}
train_params = {'batch_size': TRAIN_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
test_params = {'batch_size': VALID_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
# + [markdown] colab_type="text" id="73OzU7oXRxR8"
# #### **Defining the model**
# + [markdown] colab_type="text" id="T-iGhnhdLNdP"
# Here we define the model, BertForTokenClassification, and load it with the pretrained weights of "bert-base-uncased". The only thing we need to additionally specify is the number of labels (as this will determine the architecture of the classification head).
#
# Note that only the base layers are initialized with the pretrained weights. The token classification head of top has just randomly initialized weights, which we will train, together with the pretrained weights, using our labelled dataset. This is also printed as a warning when you run the code cell below.
#
# Then, we move the model to the GPU.
# + colab_type="code" id="cB9MR3KcWXUs" tags=[] colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="c0d93a4f-cd40-4f6b-edab-6dd51f2ee33b"
model = BertForTokenClassification.from_pretrained('bert-base-uncased', num_labels=len(labels_to_ids))
model.to(device)
# + [markdown] colab_type="text" id="Pp7Yl4JyWhDj"
# #### **Training the model**
#
# Before training the model, let's perform a sanity check, which I learned thanks to <NAME>'s wonderful [cs231n course](http://cs231n.stanford.edu/) at Stanford (see also his [blog post about debugging neural networks](http://karpathy.github.io/2019/04/25/recipe/)). The initial loss of your model should be close to -ln(1/number of classes) = -ln(1/17) = 2.83.
#
# Why? Because we are using cross entropy loss. The cross entropy loss is defined as -ln(probability score of the model for the correct class). In the beginning, the weights are random, so the probability distribution for all of the classes for a given token will be uniform, meaning that the probability for the correct class will be near 1/17. The loss for a given token will thus be -ln(1/17). As PyTorch's [CrossEntropyLoss](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html) (which is used by `BertForTokenClassification`) uses *mean reduction* by default, it will compute the mean loss for each of the tokens in the sequence (in other words, for all of the 512 tokens). The mean of 512 times -log(1/17) is, you guessed it, -log(1/17).
#
# Let's verify this:
#
#
# + colab_type="code" id="eqAN7YVIjKTr" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0a6acb55-a46e-46b6-e562-e4205a9857cd"
ids = training_set[0]["ids"].unsqueeze(0)
mask = training_set[0]["mask"].unsqueeze(0)
targets = training_set[0]["targets"].unsqueeze(0)
ids = ids.to(device)
mask = mask.to(device)
targets = targets.to(device)
outputs = model(input_ids=ids, attention_mask=mask, labels=targets)
initial_loss = outputs[0]
initial_loss
# + [markdown] colab_type="text" id="yLdwsru9Mh7U"
# This looks good. Let's also verify that the logits of the neural network have a shape of (batch_size, sequence_length, num_labels):
# + colab_type="code" id="X-z6YCpGnvfj" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b873271e-b7f8-48b6-f4f2-b63c3ccccbe5"
tr_logits = outputs[1]
tr_logits.shape
# + [markdown] colab_type="text" id="kwDLXxOVOCvD"
# Next, we define the optimizer. Here, we are just going to use Adam with a default learning rate. One can also decide to use more advanced ones such as AdamW (Adam with weight decay fix), which is [included](https://huggingface.co/transformers/main_classes/optimizer_schedules.html) in the Transformers repository, and a learning rate scheduler, but we are not going to do that here.
# + colab_type="code" id="kznSQfGIWdU4" colab={}
optimizer = torch.optim.Adam(params=model.parameters(), lr=LEARNING_RATE)
# + [markdown] colab_type="text" id="vZQ8JMF0NOe1"
# Now let's define a regular PyTorch training function. It is partly based on [a really good repository about multilingual NER](https://github.com/chambliss/Multilingual_NER/blob/master/python/utils/main_utils.py#L344).
# + colab_type="code" id="GLFivpkwW1HY" colab={}
# Defining the training function on the 80% of the dataset for tuning the bert model
def train(epoch):
tr_loss, tr_accuracy = 0, 0
nb_tr_examples, nb_tr_steps = 0, 0
tr_preds, tr_labels = [], []
# put model in training mode
model.train()
for idx, batch in enumerate(training_loader):
ids = batch['ids'].to(device, dtype = torch.long)
mask = batch['mask'].to(device, dtype = torch.long)
targets = batch['targets'].to(device, dtype = torch.long)
loss, tr_logits = model(input_ids=ids, attention_mask=mask, labels=targets)
tr_loss += loss.item()
nb_tr_steps += 1
nb_tr_examples += targets.size(0)
if idx % 100==0:
loss_step = tr_loss/nb_tr_steps
print(f"Training loss per 100 training steps: {loss_step}")
# compute training accuracy
flattened_targets = targets.view(-1) # shape (batch_size * seq_len,)
active_logits = tr_logits.view(-1, model.num_labels) # shape (batch_size * seq_len, num_labels)
flattened_predictions = torch.argmax(active_logits, axis=1) # shape (batch_size * seq_len,)
# now, use mask to determine where we should compare predictions with targets (includes [CLS] and [SEP] token predictions)
active_accuracy = mask.view(-1) == 1 # active accuracy is also of shape (batch_size * seq_len,)
targets = torch.masked_select(flattened_targets, active_accuracy)
predictions = torch.masked_select(flattened_predictions, active_accuracy)
tr_preds.extend(predictions)
tr_labels.extend(targets)
tmp_tr_accuracy = accuracy_score(targets.cpu().numpy(), predictions.cpu().numpy())
tr_accuracy += tmp_tr_accuracy
# gradient clipping
torch.nn.utils.clip_grad_norm_(
parameters=model.parameters(), max_norm=MAX_GRAD_NORM
)
# backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss = tr_loss / nb_tr_steps
tr_accuracy = tr_accuracy / nb_tr_steps
print(f"Training loss epoch: {epoch_loss}")
print(f"Training accuracy epoch: {tr_accuracy}")
# + [markdown] colab_type="text" id="k2dsCyP7dcF3"
# And let's train the model!
# + colab_type="code" id="y07Ybw8rZeZ7" tags=[] colab={"base_uri": "https://localhost:8080/", "height": 521} outputId="25bec966-fa2c-461b-a1cf-647fb26b143f"
for epoch in range(EPOCHS):
print(f"Training epoch: {epoch + 1}")
train(epoch)
# + [markdown] colab_type="text" id="r4jcSOJr680a"
# #### **Evaluating the model**
# + [markdown] colab_type="text" id="rYUTuOEUdfFJ"
# Now that we've trained our model, we can evaluate its performance on the held-out test set (which is 20% of the data). Note that here, no gradient updates are performed, the model just outputs its logits.
# + colab_type="code" id="RIVVfFHi7Aw7" colab={}
def valid(model, testing_loader):
# put model in evaluation mode
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_examples, nb_eval_steps = 0, 0
eval_preds, eval_labels = [], []
with torch.no_grad():
for idx, batch in enumerate(testing_loader):
ids = batch['ids'].to(device, dtype = torch.long)
mask = batch['mask'].to(device, dtype = torch.long)
targets = batch['targets'].to(device, dtype = torch.long)
loss, eval_logits = model(input_ids=ids, attention_mask=mask, labels=targets)
eval_loss += loss.item()
nb_eval_steps += 1
nb_eval_examples += targets.size(0)
if idx % 100==0:
loss_step = eval_loss/nb_eval_steps
print(f"Validation loss per 100 evaluation steps: {loss_step}")
# compute evaluation accuracy
flattened_targets = targets.view(-1) # shape (batch_size * seq_len,)
active_logits = eval_logits.view(-1, model.num_labels) # shape (batch_size * seq_len, num_labels)
flattened_predictions = torch.argmax(active_logits, axis=1) # shape (batch_size * seq_len,)
# now, use mask to determine where we should compare predictions with targets (includes [CLS] and [SEP] token predictions)
active_accuracy = mask.view(-1) == 1 # active accuracy is also of shape (batch_size * seq_len,)
targets = torch.masked_select(flattened_targets, active_accuracy)
predictions = torch.masked_select(flattened_predictions, active_accuracy)
eval_labels.extend(targets)
eval_preds.extend(predictions)
tmp_eval_accuracy = accuracy_score(targets.cpu().numpy(), predictions.cpu().numpy())
eval_accuracy += tmp_eval_accuracy
#print(eval_labels)
#print(eval_preds)
labels = [ids_to_labels[id.item()] for id in eval_labels]
predictions = [ids_to_labels[id.item()] for id in eval_preds]
#print(labels)
#print(predictions)
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_steps
print(f"Validation Loss: {eval_loss}")
print(f"Validation Accuracy: {eval_accuracy}")
return labels, predictions
# + [markdown] colab_type="text" id="zJaONluRdq-e"
# As we can see below, performance is quite good! Accuracy on the test test is > 93%.
# + colab_type="code" id="2BrxRjvxApY8" tags=[] colab={} outputId="60f9abbe-8272-41f6-bcaf-03571ecf726b"
labels, predictions = valid(model, testing_loader)
# + [markdown] colab_type="text" id="SAznLDwx_U2X"
# However, the accuracy metric is misleading, as a lot of labels are "outside" (O), even after omitting predictions on the [PAD] tokens. What is important is looking at the precision, recall and f1-score of the individual tags. For this, we use the seqeval Python library:
# + colab_type="code" id="0jDNXrjr-6BW" tags=[] colab={} outputId="782922e5-198f-4007-f658-c973b8dddff8"
from seqeval.metrics import classification_report
print(classification_report(labels, predictions))
# + [markdown] colab_type="text" id="4Gz-wHAw3xMk"
# #### **Inference**
#
# The fun part is when we can quickly test the model on new, unseen sentences.
# Here, we use the prediction of the **first word piece of every word**. Note that the function we used to train our model (`tokenze_and_preserve_labels`) propagated the label to all subsequent word pieces (so you could for example also perform a majority vote on the predicted labels of all word pieces of a word).
#
# *In other words, the code below does not take into account when predictions of different word pieces that belong to the same word do not match.*
# + colab_type="code" id="zPDla1mmZiax" tags=[] colab={} outputId="f78e6a27-642c-40b8-cc05-7ed0f936fa3f"
sentence = "India has a capital called Mumbai. On wednesday, the president will give a presentation"
inputs = tokenizer(sentence, padding='max_length', truncation=True, max_length=MAX_LEN, return_tensors="pt")
# move to gpu
ids = inputs["input_ids"].to(device)
mask = inputs["attention_mask"].to(device)
# forward pass
outputs = model(ids, mask)
logits = outputs[0]
active_logits = logits.view(-1, model.num_labels) # shape (batch_size * seq_len, num_labels)
flattened_predictions = torch.argmax(active_logits, axis=1) # shape (batch_size*seq_len,) - predictions at the token level
tokens = tokenizer.convert_ids_to_tokens(ids.squeeze().tolist())
token_predictions = [ids_to_labels[i] for i in flattened_predictions.cpu().numpy()]
wp_preds = list(zip(tokens, token_predictions)) # list of tuples. Each tuple = (wordpiece, prediction)
word_level_predictions = []
for pair in wp_preds:
if (pair[0].startswith(" ##")) or (pair[0] in ['[CLS]', '[SEP]', '[PAD]']):
# skip prediction
continue
else:
word_level_predictions.append(pair[1])
# we join tokens, if they are not special ones
str_rep = " ".join([t[0] for t in wp_preds if t[0] not in ['[CLS]', '[SEP]', '[PAD]']]).replace(" ##", "")
print(str_rep)
print(word_level_predictions)
# + [markdown] colab_type="text" id="sqDklprSqB5d"
# #### **Saving the model for future use**
# + [markdown] colab_type="text" id="VuUdX_fImswO"
# Finally, let's save the vocabulary, model weights and the model's configuration file to a directory, so that they can be re-loaded using the `from_pretrained()` class method.
#
#
# + colab_type="code" id="sDZtSsKKntuI" tags=[] colab={} outputId="79b2c502-fbd3-46b3-eb54-f6d53d9563d1"
import os
directory = "./model"
if not os.path.exists(directory):
os.makedirs(directory)
# save vocabulary of the tokenizer
tokenizer.save_vocabulary(directory)
# save the model weights and its configuration file
model.save_pretrained(directory)
print('All files saved')
print('This tutorial is completed')
# + [markdown] id="hUTTASzl_Gla" colab_type="text"
# ## Legacy
# + id="YklvaYs2_Gla" colab_type="code" colab={}
def prepare_sentence(sentence, tokenizer, maxlen):
# step 1: tokenize the sentence
tokenized_sentence = tokenizer.tokenize(sentence)
# step 2: add special tokens
tokenized_sentence = ["[CLS]"] + tokenized_sentence + ["[SEP]"]
# step 3: truncating/padding
if (len(tokenized_sentence) > maxlen):
# truncate
tokenized_sentence = tokenized_sentence[:maxlen]
else:
# pad
tokenized_sentence = tokenized_sentence + ['[PAD]'for _ in range(maxlen - len(tokenized_sentence))]
# step 4: obtain the attention mask
attn_mask = [1 if tok != '[PAD]' else 0 for tok in tokenized_sentence]
# step 5: convert tokens to input ids
ids = tokenizer.convert_tokens_to_ids(tokenized_sentence)
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(attn_mask, dtype=torch.long),
#'token_type_ids': torch.tensor(token_ids, dtype=torch.long),
}
| BERT/Custom_Named_Entity_Recognition_with_BERT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Temporal Difference: Off-policy Q-Learning, Stochastic
import numpy as np
# ## Create environment
def create_environment_states():
"""Creates environment states.
Returns:
num_states: int, number of states.
num_terminal_states: int, number of terminal states.
num_non_terminal_states: int, number of non terminal states.
"""
num_states = 16
num_terminal_states = 2
num_non_terminal_states = num_states - num_terminal_states
return num_states, num_terminal_states, num_non_terminal_states
def create_environment_actions(num_non_terminal_states):
"""Creates environment actions.
Args:
num_non_terminal_states: int, number of non terminal states.
Returns:
max_num_actions: int, max number of actions possible.
num_actions_per_non_terminal_state: array[int], number of actions per
non terminal state.
"""
max_num_actions = 4
num_actions_per_non_terminal_state = np.repeat(
a=max_num_actions, repeats=num_non_terminal_states)
return max_num_actions, num_actions_per_non_terminal_state
def create_environment_successor_counts(num_states, max_num_actions):
"""Creates environment successor counts.
Args:
num_states: int, number of states.
max_num_actions: int, max number of actions possible.
Returns:
num_state_action_successor_states: array[int], number of successor
states s' that can be reached from state s by taking action a.
"""
num_state_action_successor_states = np.repeat(
a=1, repeats=num_states * max_num_actions)
num_state_action_successor_states = np.reshape(
a=num_state_action_successor_states,
newshape=(num_states, max_num_actions))
return num_state_action_successor_states
def create_environment_successor_arrays(
num_non_terminal_states, max_num_actions):
"""Creates environment successor arrays.
Args:
num_non_terminal_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
Returns:
sp_idx: array[int], state indices of new state s' of taking action a
from state s.
p: array[float], transition probability to go from state s to s' by
taking action a.
r: array[float], reward from new state s' from state s by taking
action a.
"""
sp_idx = np.array(
object=[1, 0, 14, 4,
2, 1, 0, 5,
2, 2, 1, 6,
4, 14, 3, 7,
5, 0, 3, 8,
6, 1, 4, 9,
6, 2, 5, 10,
8, 3, 7, 11,
9, 4, 7, 12,
10, 5, 8, 13,
10, 6, 9, 15,
12, 7, 11, 11,
13, 8, 11, 12,
15, 9, 12, 13],
dtype=np.int64)
p = np.repeat(
a=1.0, repeats=num_non_terminal_states * max_num_actions * 1)
r = np.repeat(
a=-1.0, repeats=num_non_terminal_states * max_num_actions * 1)
sp_idx = np.reshape(
a=sp_idx,
newshape=(num_non_terminal_states, max_num_actions, 1))
p = np.reshape(
a=p,
newshape=(num_non_terminal_states, max_num_actions, 1))
r = np.reshape(
a=r,
newshape=(num_non_terminal_states, max_num_actions, 1))
return sp_idx, p, r
def create_environment():
"""Creates environment.
Returns:
num_states: int, number of states.
num_terminal_states: int, number of terminal states.
num_non_terminal_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
num_actions_per_non_terminal_state: array[int], number of actions per
non terminal state.
num_state_action_successor_states: array[int], number of successor
states s' that can be reached from state s by taking action a.
sp_idx: array[int], state indices of new state s' of taking action a
from state s.
p: array[float], transition probability to go from state s to s' by
taking action a.
r: array[float], reward from new state s' from state s by taking
action a.
"""
(num_states,
num_terminal_states,
num_non_terminal_states) = create_environment_states()
(max_num_actions,
num_actions_per_non_terminal_state) = create_environment_actions(
num_non_terminal_states)
num_state_action_successor_states = create_environment_successor_counts(
num_states, max_num_actions)
(sp_idx,
p,
r) = create_environment_successor_arrays(
num_non_terminal_states, max_num_actions)
return (num_states,
num_terminal_states,
num_non_terminal_states,
max_num_actions,
num_actions_per_non_terminal_state,
num_state_action_successor_states,
sp_idx,
p,
r)
# ## Set hyperparameters
def set_hyperparameters():
"""Sets hyperparameters.
Returns:
num_episodes: int, number of episodes to train over.
maximum_episode_length: int, max number of timesteps for an episode.
alpha: float, alpha > 0, learning rate.
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
gamma: float, 0 <= gamma <= 1, amount to discount future reward.
"""
num_episodes = 10000
maximum_episode_length = 200
alpha = 0.001
epsilon = 0.05
gamma = 1.0
return num_episodes, maximum_episode_length, alpha, epsilon, gamma
# ## Create value function and policy arrays
def create_value_function_arrays(num_states, max_num_actions):
"""Creates value function arrays.
Args:
num_states: int, number of states.
max_num_actions: int, max number of actions possible.
Returns:
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
"""
q = np.repeat(a=0.0, repeats=num_states * max_num_actions)
q = np.reshape(a=q, newshape=(num_states, max_num_actions))
return q
def create_policy_arrays(num_non_terminal_states, max_num_actions):
"""Creates policy arrays.
Args:
num_non_terminal_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
Returns:
policy: array[float], learned stochastic policy of which
action a to take in state s.
"""
policy = np.repeat(
a=1.0 / max_num_actions,
repeats=num_non_terminal_states * max_num_actions)
policy = np.reshape(
a=policy,
newshape=(num_non_terminal_states, max_num_actions))
return policy
# ## Create algorithm
# Set random seed so that everything is reproducible
np.random.seed(seed=0)
def initialize_epsiode(num_non_terminal_states):
"""Initializes epsiode with initial state.
Args:
num_non_terminal_states: int, number of non terminal states.
Returns:
init_s_idx: int, initial state index from set of non terminal states.
"""
# Randomly choose an initial state from all non-terminal states
init_s_idx = np.random.randint(
low=0, high=num_non_terminal_states, dtype=np.int64)
return init_s_idx
def epsilon_greedy_policy_from_state_action_function(
max_num_actions, q, epsilon, s_idx, policy):
"""Create epsilon-greedy policy from state-action value function.
Args:
max_num_actions: int, max number of actions possible.
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
s_idx: int, current state index.
policy: array[float], learned stochastic policy of which action a to
take in state s.
Returns:
policy: array[float], learned stochastic policy of which action a to
take in state s.
"""
# Save max state-action value and find the number of actions that have the
# same max state-action value
max_action_value = np.max(a=q[s_idx, :])
max_action_count = np.count_nonzero(a=q[s_idx, :] == max_action_value)
# Apportion policy probability across ties equally for state-action pairs
# that have the same value and zero otherwise
if max_action_count == max_num_actions:
max_policy_prob_per_action = 1.0 / max_action_count
remain_prob_per_action = 0.0
else:
max_policy_prob_per_action = (1.0 - epsilon) / max_action_count
remain_prob_per_action = epsilon / (max_num_actions - max_action_count)
policy[s_idx, :] = np.where(
q[s_idx, :] == max_action_value,
max_policy_prob_per_action,
remain_prob_per_action)
return policy
def loop_through_episode(
num_non_terminal_states,
max_num_actions,
num_state_action_successor_states,
sp_idx,
p,
r,
q,
policy,
alpha,
epsilon,
gamma,
maximum_episode_length,
s_idx):
"""Loops through episode to iteratively update policy.
Args:
num_non_terminal_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
num_state_action_successor_states: array[int], number of successor
states s' that can be reached from state s by taking action a.
sp_idx: array[int], state indices of new state s' of taking action a
from state s.
p: array[float], transition probability to go from state s to s' by
taking action a.
r: array[float], reward from new state s' from state s by taking
action a.
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
alpha: float, alpha > 0, learning rate.
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
gamma: float, 0 <= gamma <= 1, amount to discount future reward.
maximum_episode_length: int, max number of timesteps for an episode.
s_idx: int, current state index.
Returns:
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
"""
# Loop through episode steps until termination
for t in range(0, maximum_episode_length):
# Choose policy for chosen state by epsilon-greedy choosing from the
# state-action-value function
policy = epsilon_greedy_policy_from_state_action_function(
max_num_actions, q, epsilon, s_idx, policy)
# Get epsilon-greedy action
a_idx = np.random.choice(a=max_num_actions, p=policy[s_idx, :])
# Get reward
successor_state_transition_idx = np.random.choice(
a=num_state_action_successor_states[s_idx, a_idx],
p=p[s_idx, a_idx, :])
reward = r[s_idx, a_idx, successor_state_transition_idx]
# Get next state
next_s_idx = sp_idx[s_idx, a_idx, successor_state_transition_idx]
# Check to see if we actioned into a terminal state
if next_s_idx >= num_non_terminal_states:
q[s_idx, a_idx] += alpha * (reward - q[s_idx, a_idx])
break # episode terminated since we ended up in a terminal state
else:
# Get next action, max action of next state
max_action_value = np.max(a=q[s_idx, :])
max_action_stack = np.extract(
condition=q[s_idx, :] == max_action_value,
arr=np.arange(max_num_actions))
next_a_idx = np.random.choice(a=max_action_stack)
# Calculate state-action-function using quintuple SARSA
delta = gamma * q[next_s_idx, next_a_idx] - q[s_idx, a_idx]
q[s_idx, a_idx] += alpha * (reward + delta)
# Update state and action to next state and action
s_idx = next_s_idx
a_idx = next_a_idx
return q, policy
def off_policy_temporal_difference_q_learning(
num_non_terminal_states,
max_num_actions,
num_state_action_successor_states,
sp_idx,
p,
r,
q,
policy,
alpha,
epsilon,
gamma,
maximum_episode_length,
num_episodes):
"""Loops through episodes to iteratively update policy.
Args:
num_non_terminal_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
num_state_action_successor_states: array[int], number of successor
states s' that can be reached from state s by taking action a.
sp_idx: array[int], state indices of new state s' of taking action a
from state s.
p: array[float], transition probability to go from state s to s' by
taking action a.
r: array[float], reward from new state s' from state s by taking
action a.
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
alpha: float, alpha > 0, learning rate.
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
gamma: float, 0 <= gamma <= 1, amount to discount future reward.
maximum_episode_length: int, max number of timesteps for an episode.
num_episodes: int, number of episodes to train over.
Returns:
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
"""
for episode in range(0, num_episodes):
# Initialize episode to get initial state
init_s_idx = initialize_epsiode(num_non_terminal_states)
# Loop through episode and update the policy
q, policy = loop_through_episode(
num_non_terminal_states,
max_num_actions,
num_state_action_successor_states,
sp_idx,
p,
r,
q,
policy,
alpha,
epsilon,
gamma,
maximum_episode_length,
init_s_idx)
return q, policy
# ## Run algorithm
def run_algorithm():
"""Runs the algorithm."""
(num_states,
num_terminal_states,
num_non_terminal_states,
max_num_actions,
num_actions_per_non_terminal_state,
num_state_action_successor_states,
sp_idx,
p,
r) = create_environment()
(num_episodes,
maximum_episode_length,
alpha,
epsilon,
gamma) = set_hyperparameters()
q = create_value_function_arrays(num_states, max_num_actions)
policy = create_policy_arrays(num_non_terminal_states, max_num_actions)
# Print initial arrays
print("\nInitial state-action value function")
print(q)
print("\nInitial policy")
print(policy)
# Run off policy temporal difference q learning
q, policy = off_policy_temporal_difference_q_learning(
num_non_terminal_states,
max_num_actions,
num_state_action_successor_states,
sp_idx,
p,
r,
q,
policy,
alpha,
epsilon,
gamma,
maximum_episode_length,
num_episodes)
# Print final results
print("\nFinal state-action value function")
print(q)
print("\nFinal policy")
print(policy)
run_algorithm()
| machine_learning/reinforcement_learning/generalized_stochastic_policy_iteration/tabular/temporal_difference/np_temporal_difference/off_policy_stochastic_temporal_difference_q_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Global Imports
# Importing a bunch of globals from [this](./HNSCC_Imports.ipynb) notebook.
import NotebookImport
from HNSCC_Imports import *
# ##Calculate MATH Score
maf = FH.get_submaf(run.data_path, 'HNSC', fields='All')
maf = maf[maf.patient.map(lambda s: s[13:16] == '01A')]
maf.patient = maf.patient.map(lambda s: s[:12])
maf = maf.reset_index()
maf['t_alt_count'] = maf['t_alt_count'].replace('---', nan).astype(float)
maf['t_ref_count'] = maf['t_ref_count'].replace('---', nan).astype(float)
# +
frac = maf.t_alt_count / maf[['t_alt_count','t_ref_count']].sum(1)
frac = frac[frac > .075]
get_mad = lambda s: (s - s.median()).abs().median() * 1.826
med = frac.groupby(maf.patient).median()
mad = frac.groupby(maf.patient).mad()
math = (mad / med) * 100
math.name = 'MATH'
# -
# My numbers are a bit off... I should try and reconsile this.
math.describe()
math.hist()
# I'm going to tweak their threshold as my calculation is a bit miscalibrated.
(math > 32).value_counts()
# This is a bit closer but its hard to tell where the missing samples are.
(math > 31.5).value_counts()
math_t = (math > 31.5).map({True:'MATH High', False:'MATH Low'})
# # Survival Analysis
# Curves look a little off, maybe I'm using more recent data?
survival_and_stats(math_t, surv)
# Combination of HPV and MATH
# Not sure why they cut off survival at 4 years
violin_plot_pandas(hpv, math)
survival_and_stats(combine(math_t=='MATH High', hpv), surv)
# From another angle, I don't see the HPV effect in my data.
draw_survival_curves(math_t, surv, hpv)
# ### Combination Analysis in HPV-
# * TP53-3p (Our finding from [this paper](http://www.nature.com/ng/journal/v46/n9/full/ng.3051.html)).
# * I use a different working set with some old patients filtered
survival_and_stats(combine(del_3p<0, mut.features.ix['TP53']>0).ix[keepers_o].dropna(),
clinical.survival.survival_5y)
# This is with the old patients back in.
survival_and_stats(combine(del_3p<0, mut.features.ix['TP53']>0).ix[ti(hpv==False)].dropna(),
clinical.survival.survival_5y)
# TP53-MATH (Figure 6b)
violin_plot_pandas(mut.df.ix['TP53'], math)
survival_and_stats(combine(math_t=='MATH High', mut.features.ix['TP53']>0).ix[ti(hpv==False)].dropna(),
clinical.survival.survival_5y)
# 3p Deletion-MATH
violin_plot_pandas(del_3p, math, order=[-2,-1,0,1])
survival_and_stats(combine(math_t=='MATH High', del_3p < 0).ix[ti(hpv==False)].dropna(),
clinical.survival.survival_5y)
# TP53-3p combination + MATH
combo = combine(mut.features.ix['TP53']>0, del_3p < 0)
violin_plot_pandas(combo, math, order=['neither','3p_deletion',
'TP53','both'])
survival_and_stats(combine(math_t=='MATH High', combo=='both').ix[ti(hpv==False)].dropna(),
clinical.survival.survival_5y)
# TP53-3p combination in in the context of MATH
draw_survival_curves(combo, surv, math_t)
# MATH in the context of TP53-3p
draw_survival_curves(math_t=='MATH High', surv, combo)
# #### Add third subtype from out paper
two_hit = combine(del_3p<0, mut_new.ix['TP53']>0) == 'both'
two_hit.name = 'two_hit'
subtypes = combine(mirna.ix['hsa-mir-548k'][:,'01'] > -1, two_hit)
subtypes = subtypes.map({'hsa-mir-548k':1, 'neither':1, 'two_hit':2, 'both': 3})
subtypes.name = 'subtype'
violin_plot_pandas(subtypes, math)
draw_survival_curves(math_t=='MATH High', surv, subtypes)
draw_survival_curves(subtypes, surv, math_t=='MATH High')
| Notebooks/HNSCC_MATH.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import h5py
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('presentation')
from shabanipy.jj.plotting_general import (plot_fraunhofer, plot_extracted_switching_current,
plot_extracted_critical_current, plot_current_distribution)
#: Name of the sample that must appear in the measurement name usually of the form "{Wafer}-{Piece}_{Design}-{Iteration}_{Junction}_{Cooldown}"
SAMPLE_NAME = "{Wafer}-{Piece}_{Design}-{Iteration}"
SAMPLE_ID = "{Wafer}-{Piece}_{Design}-{Iteration}_{Junction}_{Cooldown}"
#: hdf5 file number
FILE_NUM = ''
#: Path to store generated files
PATH = (f"/Users/bh/Desktop/Code/Topological JJ/Samples/{SAMPLE_NAME}/{SAMPLE_ID}")
#: Name of generated processed data file
PROCESSED_DATA_NAME = (f"{PATH}/Data/{SAMPLE_ID}_processed-data-{FILE_NUM}.hdf5")
h = h5py.File(PROCESSED_DATA_NAME, 'r')
field_y = 'In-plane Field - Y::'
field_z = 'In-plane Field - Z::'
vg = 'Vg::'
f = h['Data'][f'{field_y}&{field_z}'][f'{vg}']
# -
out_field = np.array(f['vector magnet - Field X'])
v_drop = np.array(f["Voltage Drop"])
scaled_v_drop = np.array(f["ScaledVoltage"])
bias = np.array(f["Bias"])
# dR = np.array(f["dR"])
dVdI = np.diff(np.array(f["ScaledVoltage"]))/np.diff(np.array(f["Bias"]))
# + tags=[]
# plot_fraunhofer(out_field,bias,np.abs(dR),
plot_fraunhofer(out_field,bias,dVdI,
current_field_conversion = 35.705, #9th floor vector, 10th floor vector: 35.927
# savgol_windowl = 5, savgol_polyorder = 1,
# cvmax = , cvmin = ,
# bias_limits = ,
# out_field_limits = ,
# fig_size = ,
)
plt.savefig(f"Figs/Fraunhofer/fraun__{SAMPLE_ID}_field-y:{field_y[16:]}_field-z:{field_z[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight')
# +
"""Since there's a shift in the DMM the superconducting region isn't exactly around zero.
This value is not constant and needs to be adjusted.This threshold sets the voltage range around zero used
to determine the swicthing current. Usually the threshold is of the order of 1e-4 or 1e-6 depending on if correct_v_offset
is True or False."""
threshold =
sav_wind = 3
plot_extracted_switching_current(out_field, bias, scaled_v_drop,
threshold = threshold,
current_field_conversion = 35.705, #9th floor vector, 10th floor vector: 35.927
# correct_v_offset = , # Correct voltage offset when extracting switching current or not. Default is True
# symmetrize_fraun = , # Symmetrize the Fraunhofer or not(out-of-plane field axis). Symmetrizing is best when the Fraunhofer field range is uneven. Default is False
# center_fraun = , #Center the Fraunhofer around 0mT. Default is True
# savgol_windowl = sav_wind, savgol_polyorder = 1,
# bias_limits = ,
# out_field_limits = ,
# fig_size = ,
)
# -
plot_current_distribution(out_field, bias, scaled_v_drop,
threshold = threshold,
jj_length = 950e-9,
jj_width = 3e-6,
current_field_conversion = 35.705, #9th floor vector, 10th floor vector: 35.927
# correct_v_offset = , # Correct voltage offset when extracting switching current or not. Default is True
# symmetrize_fraun = , # Symmetrize the Fraunhofer or not(out-of-plane field axis). Symmetrizing is best when the Fraunhofer field range is uneven. Default is False
# center_fraun = , #Center the Fraunhofer around 0mT. Default is True
# savgol_windowl = sav_wind, savgol_polyorder = 1,
# x_limits = ,
# jx_limits = ,
# fig_size = ,
)
plt.savefig(f"Figs/Fraunhofer/current_dis__{SAMPLE_ID}_field-y:{field_y[16:]}_field-z:{field_z[16:]}_Vg:{vg[4:]}_{FILE_NUM}.pdf", dpi = 400, bbox_inches = 'tight')
| scripts/Analysis Templates/Analysis-out-field__Wafer-Piece_Design-Iteration_Junction_Cooldown.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="GGnsMN3aU7KX"
# # 0 - Environment Setup
# + colab={"base_uri": "https://localhost:8080/"} id="_O7kaRJoN-fC" outputId="5380f696-0216-43ca-b502-d36906fad9a0"
# import package to load BERT model
# !pip install transformers
# mount google drive to load dataset
from google.colab import drive
drive.mount('/content/drive')
# for data handling
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
import string
# pytorch module for model implementation
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
from transformers import BertModel, BertTokenizer
from tqdm import tqdm
# For saving model
from collections import OrderedDict
import urllib.request
import pickle
# + colab={"base_uri": "https://localhost:8080/"} id="AkLdut3NXm06" outputId="8b3462e8-9a4e-4dbd-d5c2-d7c03e25b811"
# Set up CUDA if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# + [markdown] id="Zxl4TWmfVKwH"
# # 1 - Load the Data
# + colab={"base_uri": "https://localhost:8080/", "height": 306} id="xfnvqOyQTFXF" outputId="566e2871-6d2d-44b5-cc36-30faf25625b5"
# Load dataset
train_songs = pd.read_csv('/content/drive/MyDrive/COMP89/train.csv')
# filter to only English songs
train_songs = train_songs.loc[train_songs.Language == 'en']
# convert to categorical to get numerical classes
train_songs['Genre'] = train_songs['Genre'].astype('category')
val_counts = train_songs['Genre'].value_counts()
new_songs = train_songs.loc[train_songs.Genre == val_counts.index[0]].sample(n=min(train_songs['Genre'].value_counts()), random_state=42)
for g in val_counts.index[1:]:
genre_df = train_songs.loc[train_songs.Genre == g]
genre_df = genre_df.sample(n=min(train_songs['Genre'].value_counts()), random_state=42)
new_songs = pd.concat([new_songs, genre_df])
new_songs.head()
# + [markdown] id="TY6tZT11WeYZ"
# # 2 - Model
# + [markdown] id="ac6RrgNSXxZp"
# ## 2.1 - Implementation
# + id="wGSsFB-CTKAw"
# bert word attention
class Word_RNN(nn.Module):
def __init__(self, hidden_size):
super(Word_RNN, self).__init__()
self.hidden_size = hidden_size
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.bert_model = BertModel.from_pretrained('bert-base-uncased', output_hidden_states = False).to(device)
self.word_weight = nn.Linear(self.hidden_size, self.hidden_size)
self.word_context_weight = nn.Linear(self.hidden_size, 1)
def forward(self, raw_sents):
sents = torch.zeros(1,len(raw_sents),self.hidden_size).to(device)
encoded_input = self.tokenizer(raw_sents, return_tensors='pt', padding = True, truncation = True).to(device)
h = self.bert_model(**encoded_input)[0]
for i in range(len(raw_sents)):
h_i = h[i, :, :].unsqueeze(0)
u_i = torch.tanh(self.word_weight(h_i))
u_iTw = self.word_context_weight(u_i).squeeze(2)
attn_weights = F.softmax(u_iTw, dim=1)
s_i = (attn_weights * h_i.permute(0,2,1)).sum(dim = 2)
sents[:,i,:] = s_i
return sents
# sentence attention
class Sent_RNN(nn.Module):
def __init__(self, word_num_hidden, sentence_num_hidden):
super(Sent_RNN, self).__init__()
self.sentence_num_hidden = sentence_num_hidden
self.lstm = nn.LSTM(word_num_hidden, sentence_num_hidden, bidirectional=True, batch_first = True)
self.sent_weight = nn.Linear(2*sentence_num_hidden, 2*sentence_num_hidden)
self.sent_context_weight = nn.Linear(2*sentence_num_hidden, 1)
def forward(self, x):
h_is, _ = self.lstm(x)
u_is = torch.tanh(self.sent_weight(h_is))
u_iTs = self.sent_context_weight(u_is).squeeze(2)
a_is = F.softmax(u_iTs, dim=1)
v = (a_is * h_is.permute(0,2,1)).sum(dim = 2)
return v
class HAN(nn.Module):
def __init__(self, sentence_num_hidden, word_hidden_size, num_classes):
super(HAN, self).__init__()
self.word_attn_rnn = Word_RNN(word_hidden_size)
self.sent_attn_rnn = Sent_RNN(word_hidden_size, sentence_num_hidden)
self.linear = nn.Linear(2*sentence_num_hidden, num_classes)
def forward(self, raw_sents):
word_embeddings = self.word_attn_rnn(raw_sents)
sent_embeddings = self.sent_attn_rnn(word_embeddings)
scores = self.linear(sent_embeddings)
return scores
# + colab={"base_uri": "https://localhost:8080/"} id="cOKf7cOoWutT" outputId="bf301cd6-b976-48f8-d344-a9ece7f85016"
# Define model hyperparamters
sentence_num_hidden = 256
word_hidden_size = 768
num_classes = new_songs['Genre'].nunique()
# Create instance of model
model = HAN(sentence_num_hidden, word_hidden_size, num_classes).to(device)
# Freeze all BERT layers from training
non_bert_params = []
for name, _param in model.named_parameters():
if 'bert' not in name:
non_bert_params.append(_param)
else:
_param.requires_grad = False
# Define training paramters
num_epochs = 5
lr = 0.01
# Define loss function and optimiser
criterion = nn.CrossEntropyLoss()
optimiser = torch.optim.SGD(non_bert_params, lr=lr)
# + [markdown] id="b4XPfrAGX3Gb"
# ## 2.2 - Training
# + id="7kQKailGhttu"
# Helper functions for accuracy
def predict(X):
preds = []
model.eval()
for x in X:
bars = x.split('/n')
out = model(bars)
train_preds.append(torch.argmax(F.softmax(out)).item())
return preds
def Accuracy(preds, label):
return np.mean(np.array(preds) == np.array(label))
# + [markdown] id="xpv-jDGgixto"
# <font color='red'>DO NOT RUN THIS CELL (WILL TAKE > 5 HOURS)</font>
# + colab={"base_uri": "https://localhost:8080/"} id="4To71dilXUbe" outputId="a866dee2-e96c-4473-b6ca-bf628c045e08"
X = new_songs.Lyrics.values.copy()
y = new_songs.Genre.cat.codes.values
model.train()
for epoch in range(num_epochs):
# shuffle dataset for each epoch
X, y = shuffle(X, y)
# SGD
for x, label in zip (X, y):
optimiser.zero_grad()
# split raw text input by each line in song
bars = x.split('/n')
out = model(bars)
label = torch.LongTensor([label]).to(device)
# find the loss
train_loss = criterion(out, label)
#backprop
train_loss.backward()
optimiser.step()
# Training Accuracy
print("Train acc: {}".format(Accuracy(predict(X), y)))
# + [markdown] id="QnCFR0cFdPN7"
# ## 2.3 - Save Model
# + id="oVjCbOyDYFmd"
partial_state_dict = OrderedDict()
for param_name in list(model.state_dict().keys()):
if 'bert' not in param_name:
partial_state_dict[param_name] = model.state_dict()[param_name]
# export
torch.save(partial_state_dict, '/content/drive/MyDrive/COMP89/partial_model_weights')
# + [markdown] id="nXzVGN36d58a"
# # 3 - Evaluation
# + id="70WjHvq_d7Br"
# Import model weights
url = 'https://github.com/salkhalil/Lyrics2Vec/raw/main/saved_embeddings/partial_model_weights'
urllib.request.urlretrieve(url, './partial_model')
# Load Model
model = HAN(sentence_num_hidden, word_hidden_size, num_classes).to(device)
model.load_state_dict(torch.load('./partial_model'), strict=False)
# Import test set
test_data = pd.read_csv('https://github.com/salkhalil/Lyrics2Vec/raw/main/datasets/cheeky.csv')
X = test_data.Lyrics.values.copy()
y = test_data.Genre.cat.codes.values
print("Test acc: {}".format(Accuracy(predict(X_test), y_test)))
# + id="A9JR8ptKeJYk"
# loss plot
# + id="sS4k3sQ5eJ-j"
# confusion matrix
| models/BERT_HAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Set up and run a simple real-time calibration pipeline, RCAL.
# +
# %matplotlib inline
import os
import sys
sys.path.append(os.path.join('..','..'))
from matplotlib import pylab
pylab.rcParams['figure.figsize'] = (8.0, 8.0)
pylab.rcParams['image.cmap'] = 'rainbow'
import matplotlib.pyplot as plt
import numpy
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs.utils import pixel_to_skycoord
from data_models.polarisation import PolarisationFrame
from processing_components.skycomponent.operations import create_skycomponent
from processing_components.simulation.testing_support import create_named_configuration, create_blockvisibility_iterator
from processing_components.functions.pipeline_functions import rcal
# -
# Define the data to be generated
# +
lowcore = create_named_configuration('LOWBD2', rmax=750.0)
times = numpy.linspace(-3.0, +3.0, 7) * numpy.pi / 12.0
frequency = numpy.linspace(1.0e8, 1.50e8, 3)
channel_bandwidth = numpy.array([5e7, 5e7, 5e7])
# Define the component and give it some polarisation and spectral behaviour
f = numpy.array([100.0, 20.0, -10.0, 1.0])
flux = numpy.array([f, 0.8 * f, 0.6 * f])
phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
compdirection = SkyCoord(ra=17.0 * u.deg, dec=-36.5 * u.deg, frame='icrs', equinox='J2000')
comp = create_skycomponent(flux=flux, frequency=frequency, direction=compdirection)
# -
def plotgain(gt, title=''):
plt.clf()
plt.plot(numpy.real(gt.gain[...,0,0]).flat, numpy.imag(gt.gain[...,0,0]).flat, '.')
plt.plot(numpy.real(gt.gain[...,1,1]).flat, numpy.imag(gt.gain[...,1,1]).flat, '.')
plt.title(title)
plt.xlabel('Real part of gain')
plt.ylabel('Imaginary part of gain')
plt.show()
# To do the simulation, we define a python generator that mimics an ingest. This generator creates, fills in visibilities, and applies gain errors. The generator only makes the data as needed. Hence the RCAL pipeline calls the generator repeatedly until all data have been constructed.
#
# To consume the data from the ingest, we define another generator, RCAL, that performs calibration and returns a gaintable.
#
# RCAL is itself a python generator so nothing happens until the pipeline is iterated.
#
# The simulation includes amplitude and phase errors of 0.01 and 0.1 radians. The plot shows the recovered gains.
# +
ingest = create_blockvisibility_iterator(lowcore, times=times,
frequency=frequency,
channel_bandwidth=channel_bandwidth, phasecentre=phasecentre,
weight=1, polarisation_frame=PolarisationFrame('linear'),
integration_time=1.0, number_integrations=1,
components=comp, phase_error=0.1, amplitude_error=0.01)
rcal_pipeline = rcal(vis=ingest, components=comp, phase_only=False)
print("Starting pipeline")
for igt, gt in enumerate(rcal_pipeline):
plotgain(gt, title="Chunk %d, time %s, residual %.3g (Jy)" % (igt, numpy.unique(gt.time),
numpy.average(gt.residual)))
print("Ingest and RCAL pipelines are empty, stopping")
# -
| processing_components/notebooks/rcal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
import shutil
import os
import io
import json
import time
import datetime
import dateutil
import re
from IPython.display import clear_output
from pprint import pprint
# +
parent_folder='steam_data'
if not os.path.exists(parent_folder):
os.makedirs(parent_folder)
# -
with open(os.path.join(parent_folder,'steamspy_exearlyaccess.html'),'r') as file:
temphtml=file.read()
all_games=re.findall(r'<td data-order=.+<a href=/app/[0-9]+',temphtml)
with open(os.path.join(parent_folder,'steamspy_earlyaccess.html'),'r') as file:
temphtml=file.read()
all_games+=re.findall(r'<td data-order=.+<a href=/app/[0-9]+',temphtml)
with open(os.path.join(parent_folder,'steamspy_earlyaccess_tag.html'),'r') as file:
temphtml=file.read()
all_games+=re.findall(r'<td data-order=.+<a href=/app/[0-9]+',temphtml)
game_names={}
for game_str in all_games:
quotes=[game_str.find('"'),game_str.rfind('"')]
tempappid=int(game_str[game_str.index("/app/")+5:])
if tempappid not in game_names:
game_names[int(game_str[game_str.index("/app/")+5:])]=game_str[quotes[0]+1:quotes[1]]
game_names.pop(296750)
game_names.pop(622050)
game_names.pop(719950)
game_names.pop(575050) # blocked in US
game_names.pop(353370) # steam controller!!
all_appids=sorted(list(game_names.keys()))
print(len(all_appids))
#all_appids.remove(578080) #remove pubg...
print(len(all_appids))
#all_appids=[578080] #pubg only!
#all_appids=[433850] #error fixing...
print(len(all_appids))
# +
with open(os.path.join(parent_folder,'early_access_search','found_games'),'r') as file:
other_ids_1=set([int(x) for x in file.read().split('\n')[:-1]])
with open(os.path.join(parent_folder,'early_access_search','found_games_2'),'r') as file:
other_ids_2=set([int(x) for x in file.read().split('\n')[:-1]])
other_ids=sorted(list(other_ids_1 | other_ids_2))
all_appids+=other_ids
print(len(all_appids))
# +
review_language="all"
review_subfolder='r_complete'
#if review_language != 'english':
# review_subfolder=review_subfolder+'_'+review_language
with open(os.path.join(parent_folder,'errors'),'a') as error_file:
error_file.write("---\n")
for i in range(0,len(all_appids)):
appid=str(all_appids[i])
if not os.path.exists(os.path.join(parent_folder,appid)):
os.makedirs(os.path.join(parent_folder,appid))
if not os.path.exists(os.path.join(os.path.join(parent_folder,appid),review_subfolder)):
os.makedirs(os.path.join(os.path.join(parent_folder,appid),review_subfolder))
print(appid)
for start_offset in range(0,2000000,100):#12100,100):
#time.sleep(0.1)
not_successful=True
while not_successful:
try:
tempjson=requests.get("https://store.steampowered.com/appreviews/"+appid+"?json=1&filter=recent&start_offset="+str(start_offset)+"&num_per_page=100&language="+review_language+"&purchase_type=all&review_type=all").text
not_successful=False
except:
time.sleep(29)
if len(tempjson)>12 and tempjson[0:12]=='{"success":1':
#while len(tempjson)<12 or tempjson[0:12]!='{"success":1':
# print("ERROR ("+appid+","+str(start_offset)+"): "+tempjson)
# time.sleep(60)
# tempjson=requests.get("https://store.steampowered.com/appreviews/"+appid+"?json=1&filter=recent&start_offset="+str(start_offset)+"&num_per_page=100").text
if tempjson=='{"success":1,"query_summary":{"num_reviews":0},"reviews":[]}':
break
else:
with open(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder),str(start_offset)), 'w') as file:
file.write(tempjson)
else:
error_file.write(str(appid)+"-"+str(start_offset)+"-"+review_language+'\n')
#tempjsons.append(requests.get("https://store.steampowered.com/appreviews/"+appid+"?json=1&filter=recent&start_offset="+str(start_offset)+"&num_per_page=100").text)
#print(tempjsons[-1]=='{"success":1,"query_summary":{"num_reviews":0},"reviews":[]}')
print(str(i+1)+"/"+str(len(all_appids))+" (" +str(appid)+"): "+str(int(start_offset/100)))
#print(str(i+1)+"/"+str(len(all_appids))+" (" +str(game_names[int(appid)])+"): "+str(int(start_offset/100)))
print("Done!")
# -
appid="361420"#"366910"#"361420"
reviews=[]
for start_offset in range(0,11900,100):
with open(os.path.join(os.path.join(os.path.join(parent_folder,appid),'r'),str(start_offset)),'r') as file:
reviews+= json.load(file)['reviews']
# +
#print(reviews[0]['voted_up'])
#print(reviews[0]['timestamp_created'])
#print(reviews[0]['timestamp_updated'])
#print(reviews[0])
recids=set()
positive_dict={}
num_reviews_dict={}
created=[]
for review in reviews:
if review['timestamp_created']==review['timestamp_updated']:
created.append(review['timestamp_created'])
#updates=[]
min_year=int(datetime.datetime.utcfromtimestamp(min(created)).strftime('%Y'))
min_month=int(datetime.datetime.utcfromtimestamp(min(created)).strftime('%m'))
for review in reviews:
if review['recommendationid'] not in recids and review['timestamp_created']==review['timestamp_updated']:
unixdate=review['timestamp_created']
year=int(datetime.datetime.utcfromtimestamp(unixdate).strftime('%Y'))
month=int(datetime.datetime.utcfromtimestamp(unixdate).strftime('%m'))
mo=(year-min_year)*12+(month-min_month)
if mo not in num_reviews_dict:
positive_dict[mo]=0
num_reviews_dict[mo]=0
num_reviews_dict[mo]+=1
if review['voted_up']:
positive_dict[mo]+=1
ordered_mos=sorted(positive_dict.keys())
num_reviews=[num_reviews_dict[ordered_mos[x]] for x in ordered_mos]
pos_reviews=[positive_dict[ordered_mos[x]] for x in ordered_mos]
ratios=[float(positive_dict[ordered_mos[x]])/float(num_reviews_dict[ordered_mos[x]]) for x in ordered_mos]
cumulative_ratios=[float(sum(pos_reviews[:x+1]))/float(sum(num_reviews[:x+1])) for x in range(len(ordered_mos))]
#print(ratios)
#print((year-2016)*12+(month-12))
#print(min(created))
#year=int(datetime.datetime.utcfromtimestamp(1481868474).strftime('%Y'))
#month=int(datetime.datetime.utcfromtimestamp(1481868474).strftime('%m'))
#print((year-2016)*12+(month-12))
#print(datetime.datetime.utcfromtimestamp(1481868474).strftime('%Y'))
#print(datetime.datetime.utcfromtimestamp(1481868474).strftime('%m'))
#print(datetime.datetime.utcfromtimestamp(1481868474).strftime('%Y-%m-%d %H:%M:%S'))
#for review in reviews:
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.xlabel("Month since release")
plt.ylabel("Number of new reviews")
plt.plot(ordered_mos,num_reviews);
# -
plt.xlabel("Month since release")
plt.ylabel("% new reviews positive")
plt.ylim(0.5,1)
plt.plot(ordered_mos,ratios);
plt.xlabel("Month since release")
plt.ylabel("% reviews positive")
plt.ylim(0.5,1)
plt.plot(ordered_mos,cumulative_ratios);
pprint(review)
print("Hello")
# +
review_language="english"
review_subfolder='r'
if review_language != 'english':
review_subfolder=review_subfolder+'_'+review_language
for i in range(len(all_appids)):
too_few_reviews=0
too_few_reviews_offsets=[]
appid=str(all_appids[i])
for filename in os.listdir(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder))):
if filename.isdigit() or (len(filename)>1 and filename[-1]=='b' and filename[:-1].isdigit()):
with open(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder),filename),'r') as file:
#if appid=='310380':
# pprint(json.load(file)['query_summary'])
if int(json.load(file)['query_summary']['num_reviews']) !=100:
too_few_reviews+=1
too_few_reviews_offsets.append(filename)
if too_few_reviews>1:
print("FAIL: "+appid)
print(too_few_reviews_offsets)
# Turns out sometimes the Steam API doesn't return every review in a
# certain range, even if there are more reviews after that one! I've tried re-downloading and
# it does not appear to be down to a network blip. Possibly Steam itself blocks some reviews
# from being shown, or they contain characters that cause problems for the request?
# It appears to be really uncommon in any case
# +
review_language="english"
review_subfolder='r'
if review_language != 'english':
review_subfolder=review_subfolder+'_'+review_language
for i in range(len(all_appids)):
output_json={}
output_reviews=[]
recids=set()
appid=str(all_appids[i])
for filename in os.listdir(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder))):
if filename.isdigit() or (len(filename)>1 and filename[-1]=='b' and filename[:-1].isdigit()):
with open(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder),filename),'r') as file:
reviews=json.load(file)['reviews']
for review in reviews:
recid=review['recommendationid']
if recid not in recids:
recids.add(recid)
output_reviews.append(review)
output_json['num_reviews']=len(output_reviews)
output_json['reviews']=output_reviews
if(len(output_reviews)!=0):
if not os.path.exists(os.path.join(parent_folder,appid+"_bulk")):
os.makedirs(os.path.join(parent_folder,appid+"_bulk"))
if not os.path.exists(os.path.join(os.path.join(parent_folder,appid+"_bulk"),"json")):
os.makedirs(os.path.join(os.path.join(parent_folder,appid+"_bulk"),"json"))
with open(os.path.join(os.path.join(os.path.join(parent_folder,appid+"_bulk"),"json"),appid+"_reviews_"+review_language),'w') as file:
json.dump(output_json, file)
print(str(i+1)+"/"+str(len(all_appids))+" (" +str(game_names[int(appid)])+"): "+str(output_json['num_reviews']))
print("Done!")
# +
review_language="english"
review_subfolder='r'
if review_language != 'english':
review_subfolder=review_subfolder+'_'+review_language
num_games=0
for temp_dir in os.listdir(parent_folder):
if len(temp_dir)>5 and temp_dir[-5:]=='_bulk':
appid=temp_dir[:-5]
num_games+=1
if os.path.exists(os.path.join(os.path.join(os.path.join(parent_folder,temp_dir),"json"),appid+"_reviews_"+review_language)):
with open(os.path.join(os.path.join(os.path.join(parent_folder,temp_dir),"json"),appid+"_reviews_"+review_language),'r') as file:
temp_json = json.load(file)
reviews=temp_json['reviews']
early_access_review_found=False
for review in reviews:
if(review['written_during_early_access']):
early_access_review_fond=True
#print(str(num_games)+" (" +str(game_names[int(appid)])+"): Verified")
break
else:
print(str(num_games)+" (" +str(game_names[int(appid)])+"): NO EARLY ACCESS REVIEWS!")
print("Done!")
# -
print(output_reviews)
from IPython.display import clear_output
a=3
b=4
for i in range(10):
print(a)
clear_output(wait=True)
time.sleep(1)
print(b)
# +
review_language="all"
for i in range(len(all_appids)):
output_json={}
output_reviews=[]
recids=set()
appid=str(all_appids[i])
review_subfolder='r'
if review_language != 'english':
review_subfolder=review_subfolder+'_'+review_language
for filename in os.listdir(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder))):
if filename.isdigit() or (len(filename)>1 and filename[-1]=='b' and filename[:-1].isdigit()):
with open(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder),filename),'r') as file:
reviews=json.load(file)['reviews']
for review in reviews:
recid=review['recommendationid']
if recid not in recids:
recids.add(recid)
output_reviews.append(review)
review_subfolder='r_nonsteampurchase'
if review_language != 'english':
review_subfolder=review_subfolder+'_'+review_language
for filename in os.listdir(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder))):
if filename.isdigit() or (len(filename)>1 and filename[-1]=='b' and filename[:-1].isdigit()):
with open(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder),filename),'r') as file:
reviews=json.load(file)['reviews']
for review in reviews:
recid=review['recommendationid']
if recid not in recids:
recids.add(recid)
output_reviews.append(review)
output_json['num_reviews']=len(output_reviews)
output_json['reviews']=output_reviews
if(len(output_reviews)!=0):
if not os.path.exists(os.path.join(parent_folder,"bulk")):
os.makedirs(os.path.join(parent_folder,"bulk"))
if not os.path.exists(os.path.join(os.path.join(parent_folder,"bulk"),"json")):
os.makedirs(os.path.join(os.path.join(parent_folder,"bulk"),"json"))
with open(os.path.join(os.path.join(os.path.join(parent_folder,"bulk"),"json"),appid+"_reviews_"+review_language),'w') as file:
json.dump(output_json, file)
clear_output(wait=True)
print(str(i+1)+"/"+str(len(all_appids))+" (" +str(game_names[int(appid)])+"): "+str(output_json['num_reviews']))
print("Done!")
# +
review_language="all"
### SAVE ONLY ENGLISH REVIEWS!
for i in range(len(all_appids)):
output_json={}
output_reviews=[]
recids=set()
appid=str(all_appids[i])
review_subfolder='r'
if review_language != 'english':
review_subfolder=review_subfolder+'_'+review_language
for filename in os.listdir(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder))):
if filename.isdigit() or (len(filename)>1 and filename[-1]=='b' and filename[:-1].isdigit()):
with open(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder),filename),'r') as file:
reviews=json.load(file)['reviews']
for review in reviews:
recid=review['recommendationid']
if review['language']=='english' and recid not in recids:
recids.add(recid)
output_reviews.append(review)
review_subfolder='r_nonsteampurchase'
if review_language != 'english':
review_subfolder=review_subfolder+'_'+review_language
for filename in os.listdir(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder))):
if filename.isdigit() or (len(filename)>1 and filename[-1]=='b' and filename[:-1].isdigit()):
with open(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder),filename),'r') as file:
reviews=json.load(file)['reviews']
for review in reviews:
recid=review['recommendationid']
if review['language']=='english' and recid not in recids:
recids.add(recid)
output_reviews.append(review)
output_json['num_reviews']=len(output_reviews)
output_json['reviews']=output_reviews
if(len(output_reviews)!=0):
if not os.path.exists(os.path.join(parent_folder,"bulk")):
os.makedirs(os.path.join(parent_folder,"bulk"))
if not os.path.exists(os.path.join(os.path.join(parent_folder,"bulk"),"json_english")):
os.makedirs(os.path.join(os.path.join(parent_folder,"bulk"),"json_english"))
with open(os.path.join(os.path.join(os.path.join(parent_folder,"bulk"),"json_english"),appid+"_reviews_english"),'w') as file:
json.dump(output_json, file)
clear_output(wait=True)
print(str(i+1)+"/"+str(len(all_appids))+" (" +str(game_names[int(appid)])+"): "+str(output_json['num_reviews']))
print("Done!")
# -
with open(os.path.join(parent_folder,"official_appid_json_list"),'r') as file:
temp_file=json.load(file)
official_app_list=sorted(list(set([x['appid'] for x in temp_file['applist']['apps']])))
official_app_list.pop(0)
official_app_list.pop(0)
official_app_list.pop(0)
print(len(official_app_list))
for appid in all_appids:
try:
official_app_list.remove(appid)
except:
pass
try:
game_names.remove(296750)
except:
pass
try:
game_names.remove(622050)
except:
pass
try:
game_names.remove(719950)
except:
pass
try:
game_names.remove(575050) # blocked in US
except:
pass
print(len(official_app_list))
print("Done!")
# +
#scan for early access games
review_language="all"
#10289
review_subfolder='r_all_purchases_all_languages'
testing_parent_folder=os.path.join(parent_folder,'early_access_search')
if not os.path.exists(testing_parent_folder):
os.makedirs(testing_parent_folder)
with open(os.path.join(testing_parent_folder,'errors'),'a') as error_file:
error_file.write("---\n")
for i in range(45822,len(official_app_list)):
appid=str(official_app_list[i])
#if not os.path.exists(os.path.join(testing_parent_folder,appid)):
# os.makedirs(os.path.join(testing_parent_folder,appid))
#if not os.path.exists(os.path.join(os.path.join(testing_parent_folder,appid),review_subfolder)):
# os.makedirs(os.path.join(os.path.join(testing_parent_folder,appid),review_subfolder))
print(appid)
all_reviews=[]
start_offset=0
not_successful=True
while not_successful:
try:
tempurl="https://store.steampowered.com/appreviews/"+appid+"?json=1&filter=recent&start_offset="+str(start_offset)+"&num_per_page=100&language=all&purchase_type=all&review_type=all"
tempjson=requests.get(tempurl).text
not_successful=False
except:
time.sleep(29)
if len(tempjson)>12 and tempjson[0:12]=='{"success":1':
num_reviews_rounded=int(json.loads(tempjson)['query_summary']['total_reviews']/100+1)*100
for start_offset in range(num_reviews_rounded,max([num_reviews_rounded-300,-1]),-100):
not_successful=True
while not_successful:
try:
tempurl="https://store.steampowered.com/appreviews/"+appid+"?json=1&filter=recent&start_offset="+str(start_offset)+"&num_per_page=100&language=all&purchase_type=all&review_type=all"
#print(tempurl)
tempjson=requests.get(tempurl).text
not_successful=False
except:
time.sleep(29)
if len(tempjson)>12 and tempjson[0:12]=='{"success":1':
if tempjson!='{"success":1,"query_summary":{"num_reviews":0},"reviews":[]}':
all_reviews=all_reviews+json.loads(tempjson)['reviews']
else:
error_file.write(str(appid)+"-"+str(start_offset)+"-"+review_language+'\n')
print(len(all_reviews))
for review in all_reviews:
if review['written_during_early_access']:
with open(os.path.join(testing_parent_folder,'found_games'),'a') as games_file:
games_file.write(str(appid)+'\n')
clear_output(wait=True)
print(str(i+1)+"/"+str(len(official_app_list))+" (" +str(appid)+"): Early access found!")
break
else:
clear_output(wait=True)
print(str(i+1)+"/"+str(len(official_app_list))+" (" +str(appid)+"): Normal")
else:
error_file.write(str(appid)+"-"+str(start_offset)+"-"+review_language+'\n')
print("Done!")
# -
# +
# all reviews for apps not found by steam spy
review_language="all"
for i in range(len(all_appids)):
output_json={}
output_reviews=[]
recids=set()
appid=str(all_appids[i])
review_subfolder='r_complete'
for filename in os.listdir(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder))):
if filename.isdigit() or (len(filename)>1 and filename[-1]=='b' and filename[:-1].isdigit()):
with open(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder),filename),'r') as file:
reviews=json.load(file)['reviews']
for review in reviews:
recid=review['recommendationid']
if recid not in recids:
recids.add(recid)
output_reviews.append(review)
output_json['num_reviews']=len(output_reviews)
output_json['reviews']=output_reviews
if(len(output_reviews)!=0):
if not os.path.exists(os.path.join(parent_folder,"bulk")):
os.makedirs(os.path.join(parent_folder,"bulk"))
if not os.path.exists(os.path.join(os.path.join(parent_folder,"bulk"),"json")):
os.makedirs(os.path.join(os.path.join(parent_folder,"bulk"),"json"))
with open(os.path.join(os.path.join(os.path.join(parent_folder,"bulk"),"json"),appid+"_reviews_"+review_language),'w') as file:
json.dump(output_json, file)
clear_output(wait=True)
print(str(i+1)+"/"+str(len(all_appids))+" (" +str(appid)+"): "+str(output_json['num_reviews']))
print("Done!")
# +
review_language="all"
### SAVE ONLY ENGLISH REVIEWS! (for apps not found by steam spy)
for i in range(len(all_appids)):
output_json={}
output_reviews=[]
recids=set()
appid=str(all_appids[i])
review_subfolder='r_complete'
for filename in os.listdir(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder))):
if filename.isdigit() or (len(filename)>1 and filename[-1]=='b' and filename[:-1].isdigit()):
with open(os.path.join(os.path.join(os.path.join(parent_folder,appid),review_subfolder),filename),'r') as file:
reviews=json.load(file)['reviews']
for review in reviews:
recid=review['recommendationid']
if review['language']=='english' and recid not in recids:
recids.add(recid)
output_reviews.append(review)
output_json['num_reviews']=len(output_reviews)
output_json['reviews']=output_reviews
if(len(output_reviews)!=0):
if not os.path.exists(os.path.join(parent_folder,"bulk")):
os.makedirs(os.path.join(parent_folder,"bulk"))
if not os.path.exists(os.path.join(os.path.join(parent_folder,"bulk"),"json_english")):
os.makedirs(os.path.join(os.path.join(parent_folder,"bulk"),"json_english"))
with open(os.path.join(os.path.join(os.path.join(parent_folder,"bulk"),"json_english"),appid+"_reviews_english"),'w') as file:
json.dump(output_json, file)
clear_output(wait=True)
print(str(i+1)+"/"+str(len(all_appids))+" (" +str(appid)+"): "+str(output_json['num_reviews']))
print("Done!")
# +
# SAVE STORE PAGE HTML
review_language="all"
html_folder=os.path.join(parent_folder,'store_html','game_pages')
if not os.path.exists(html_folder):
os.makedirs(html_folder)
with open(os.path.join(parent_folder,'errors'),'a') as error_file:
error_file.write("---\n")
for i in range(0,len(all_appids)):
appid=str(all_appids[i])
not_successful=True
while not_successful:
try:
temphtml=requests.get("https://store.steampowered.com/app/"+appid+"/").text
not_successful=False
except:
time.sleep(29)
if(len(temphtml)>500):
with open(os.path.join(html_folder,appid), 'w') as file:
file.write(temphtml)
else:
error_file.write(str(appid)+'\n')
#tempjsons.append(requests.get("https://store.steampowered.com/appreviews/"+appid+"?json=1&filter=recent&start_offset="+str(start_offset)+"&num_per_page=100").text)
#print(tempjsons[-1]=='{"success":1,"query_summary":{"num_reviews":0},"reviews":[]}')
print(str(i+1)+"/"+str(len(all_appids))+" (" +str(appid)+")")
#print(str(i+1)+"/"+str(len(all_appids))+" (" +str(game_names[int(appid)])+"): "+str(int(start_offset/100)))
print("Done!")
# -
# +
from selenium import webdriver
driver = webdriver.Firefox()
driver.get("https://store.steampowered.com/app/0/")
# -
# +
game_html_folder=os.path.join(parent_folder,"store_html","game_pages")
list_of_files=os.listdir(game_html_folder)
for i,game_html_file in enumerate(list_of_files,1):
with open(os.path.join(game_html_folder,game_html_file),'r') as file:
game_html=file.read()
if game_html.find('<span class="error">You must login to see this content.</span>')!=-1:
driver.get("https://store.steampowered.com/app/"+str(game_html_file)+"/")
with open(os.path.join(game_html_folder,game_html_file),'w') as file:
file.write(driver.page_source)
print(str(i)+"/"+str(len(list_of_files)))
# +
game_html_folder=os.path.join(parent_folder,"store_html","game_pages")
list_of_files=os.listdir(game_html_folder)
for i,game_html_file in enumerate(list_of_files,1):
with open(os.path.join(game_html_folder,game_html_file),'r') as file:
game_html=file.read()
if game_html.find('<h2>Hey, would you like to hide this kind of warning in the future?</h2>')!=-1:
print(game_html_file)
driver.get("https://store.steampowered.com/app/"+str(game_html_file)+"/")
print("Done!")
# -
| JupyterNotebooks/steam_review_scraper_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Maps
#
# Generate maps from tweet data
# +
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import os
import pandas as pd
import re
from IPython.display import clear_output
# -
BE = "BE"
COORDINATESFILE = "coordinates.csv"
COUNTRY = "country"
DATADIRLOCATIONS = "../data/locations/"
DATADIRTEXT = "../data/text/"
IDSTR = "id_str"
LATITUDE = "latitude"
LOCATION = "location"
LOCATIONLOWER = "location_lower"
LONGITUDE = "longitude"
NL = "NL"
SCREENNAME = "screenname"
TEXT = "text"
UNKNOWN = "-"
USER = "user"
# +
MINFIRSTLOCATIONPARTLEN = 8
def squeal(text=None):
clear_output(wait=True)
if not text is None: print(text)
def read_coordinates():
df = pd.read_csv(DATADIRLOCATIONS+COORDINATESFILE)
return(df[df[COUNTRY]==NL])
def read_user_locations(user_location_file):
return(pd.read_csv(DATADIRLOCATIONS+user_location_file,index_col=SCREENNAME))
def add_clean_locations(user_locations):
locations = []
for i in range(0,len(user_locations)):
first_field_slash = str(user_locations.iloc[i][COUNTRY]).split("/")[0].strip()
first_field_hyphen = first_field_slash.split(" - ")[0].strip()
first_field_comma = first_field_hyphen.split(",")[0].strip()
try: first_field_space = str(first_field_comma).split()[0]
except: first_field_space = ""
if len(first_field_space) >= MINFIRSTLOCATIONPARTLEN: first_field_comma = first_field_space
first_field_comma = re.sub(" *(nederland|the netherlands|netherlands|\(nl\)|belgië|belgie|belgium|\(be\))$","",\
first_field_comma,flags=re.IGNORECASE)
first_field_comma = re.sub("[.?!:;]*$","",first_field_comma)
locations.append(first_field_comma.lower())
user_locations[LOCATIONLOWER] = locations
return(user_locations)
def read_tweets(file_pattern):
file_name_list = sorted(os.listdir(DATADIRTEXT))
tweets_list = []
for file_name in file_name_list:
if re.search(file_pattern,file_name):
squeal(file_name)
tweets_list.append(pd.read_csv(DATADIRTEXT+file_name,index_col=IDSTR,compression="gzip"))
return(pd.concat(tweets_list).drop_duplicates())
# +
MONTH = "202007"
tweets = read_tweets(MONTH)
# +
LOCATIONSFILE = f"locations-{MONTH}.csv"
user_locations = read_user_locations(LOCATIONSFILE)
user_locations = user_locations[user_locations[COUNTRY] != UNKNOWN]
user_locations = add_clean_locations(user_locations)
# -
coordinates = read_coordinates()
coordinates[LOCATIONLOWER] = [location.lower() for location in coordinates[LOCATION]]
coordinates = coordinates.set_index(LOCATIONLOWER)
user_locations_with_coordinates = user_locations[user_locations[LOCATIONLOWER].isin(coordinates.index)]
# +
#QUERYTOPIC = "corona|covid|mondkapje|rivm|blijfthuis|houvol|huisarts|flattenthecurve"
#QUERYTOPIC = "mondkapje"
QUERYTOPIC = "1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter"
#QUERYTOPIC = ""
location_counts = {}
counter = 0
if QUERYTOPIC == "":
selected_tweets = tweets[tweets[USER].isin(user_locations_with_coordinates.index)]
selected_tweet_groups = selected_tweets.groupby([USER])
for user in selected_tweet_groups.groups:
location = user_locations_with_coordinates.loc[user][LOCATIONLOWER]
if not location in location_counts: location_counts[location] = 0
location_counts[location] += len(selected_tweet_groups.groups[user])
counter += 1
if counter % 1000 == 0: squeal(counter)
else:
regex = re.compile(QUERYTOPIC,flags=re.IGNORECASE)
df = tweets[tweets[USER].isin(user_locations_with_coordinates.index)]
for i in range(0,len(df)):
if regex.search(str(df.iloc[i][TEXT])):
user = df.iloc[i][USER]
location = user_locations_with_coordinates.loc[user][LOCATIONLOWER]
if not location in location_counts: location_counts[location] = 1
else: location_counts[location] += 1
counter += 1
if counter % 100 == 0: squeal(counter)
squeal(counter)
location_counts = {location:location_counts[location]
for location in sorted(location_counts.keys(),key=lambda l:location_counts[l],reverse=True)}
# -
print(f"found: {sum(location_counts.values())} tweets; coverage: {round(sum(location_counts.values())/len(tweets)*100,1)}%")
def make_plot_data(location_counts,coordinates):
x = []
y = []
data_values = []
labels = []
seen = {}
for location in location_counts:
key = str(coordinates.loc[location][LONGITUDE])+" "+str(coordinates.loc[location][LATITUDE])
if not key in seen:
if key != "nan nan":
x.append(coordinates.loc[location][LONGITUDE])
y.append(coordinates.loc[location][LATITUDE])
data_values.append(location_counts[location])
labels.append(location)
seen[key] = len(x)-1
else:
data_values[seen[key]] = data_values[seen[key]]+location_counts[location]
return(x,y,data_values,labels)
x,y,data_values,labels = make_plot_data(location_counts,coordinates)
len(data_values)
def find_missing_locations(x,y,coordinates):
keys = {}
for i in range(0,len(x)):
key = " ".join([str(x[i]),str(y[i])])
keys[key] = True
for location in coordinates.index:
key = " ".join([str(coordinates.loc[location][LONGITUDE]),str(coordinates.loc[location][LATITUDE])])
if not key in keys.keys(): print(location)
find_missing_locations(x,y,coordinates)
[(data_values[i],labels[i]) for i in sorted(range(0,len(data_values)),key=lambda j:data_values[j],reverse=True)][:10]
[(data_values[i],labels[i],int(coordinates.loc[labels[i]]["population_size"])) \
for i in sorted(range(0,len(data_values)),key=lambda j:data_values[j],reverse=True)][-20:]
# +
BLOBFACTOR = 0.01*778888/location_counts["amsterdam"]
FONTSIZE = 7
PLOTFILENAME = f"map-{MONTH}.png"
img_netherlands = mpimg.imread("nederland.png")
plt.figure(figsize=(12.5,15))
plt.imshow(img_netherlands,alpha=0.4,extent=[3.3,7.2,50.75,53.55],aspect="auto")
plt.scatter(x,y,s=[data_value*BLOBFACTOR for data_value in data_values],alpha=0.6)
seen = {}
for i in range(0,len(labels)): plt.annotate(labels[i],(x[i],y[i]),fontsize=FONTSIZE)
plt.savefig(PLOTFILENAME)
plt.show()
# +
import matplotlib
PLOTFILENAME = f"correlation-{MONTH}.png"
font = {"size":14}
matplotlib.rc("font",**font)
x_correlation = []
y_correlation = []
labels_correlation = []
for i in range(0,len(data_values)):
location = labels[i]
nbr_of_tweets = data_values[i]
population_size = int(coordinates.loc[location]["population_size"])
if population_size == population_size:
x_correlation.append(population_size)
y_correlation.append(nbr_of_tweets)
labels_correlation.append(location)
plt.figure(figsize=(6,4))
plt.scatter(x_correlation,y_correlation)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("population size")
plt.ylabel("number of tweets")
plt.tight_layout()
plt.savefig(PLOTFILENAME)
plt.show()
# -
# ## time-consuming...
def get_locations_without_coordinates():
user_locations_without_coordinates = user_locations[~user_locations[LOCATIONLOWER].isin(coordinates.index)]
missing_location_counts = {}
counter = 0
selected_tweets = tweets[tweets[USER].isin(user_locations_without_coordinates.index)]
selected_tweet_groups = selected_tweets.groupby([USER])
for user in selected_tweet_groups.groups:
location = user_locations_without_coordinates.loc[user][LOCATIONLOWER]
if not location in missing_location_counts: missing_location_counts[location] = 0
missing_location_counts[location] += len(selected_tweet_groups.groups[user])
counter += 1
if counter % 1000 == 0: squeal(counter)
return(missing_location_counts)
# +
#missing_location_counts = get_locations_without_coordinates()
#{location:missing_location_counts[location] \
# for location in sorted(missing_location_counts.keys(),key=lambda l:missing_location_counts[l],reverse=True)}
# -
| maps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quickstart
# If you have a working version of Python 2 or 3 on your system
# (we recommend [Anaconda Python](https://www.continuum.io/downloads)),
# you can simply install the latest stable release of the *lightkurve* package using ``pip``:
# ```
# $ pip install lightkurve
# ```
# With *lightkurve* installed, it is easy to extract brightness time series data (astronomers call this a *lightcurve*)
# from the tiny images of stars collected by the Kepler spacecraft.
#
# For example, let's download and display the pixels of a famous star named [KIC 8462852](https://en.wikipedia.org/wiki/KIC_8462852), also known as *Tabby's Star* or *Boyajian's Star*.
#
# First, we start Python and import the `KeplerTargetPixelFile` class:
from lightkurve import KeplerTargetPixelFile
# Next, we obtain the Kepler pixel data for the star from the [data archive](https://archive.stsci.edu/kepler/):
tpf = KeplerTargetPixelFile.from_archive(8462852, quarter=16,
quality_bitmask='hardest');
# Next, let's display the first image in this data set:
# %matplotlib inline
tpf.plot(frame=1);
# It looks like the star is an isolated object, so we can extract a lightcurve by simply summing up all the pixel values in each image:
lc = tpf.to_lightcurve(aperture_mask='all');
# The above method returned a `KeplerLightCurve` object which gives us access to the flux over time, which are both available as array objects. The time is in units of *days* and the flux is in units *electrons/second*.
lc.time, lc.flux
# We can plot these data using the `plot()` method:
lc.plot(linestyle='solid');
# The plot reveals a short-lived 20% dip in the brightness of the star. It looks like we re-discovered one of the [intriguing dips in Tabby's star](https://en.wikipedia.org/wiki/KIC_8462852#Luminosity)!
| docs/source/tutorials/quickstart.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
import pymc3 as pm
import numpy as np
import theano
import seaborn
import matplotlib.pyplot as plt
seaborn.set_context('poster')
seaborn.set_style('white')
seaborn.set(color_codes=True)
# %matplotlib inline
# +
loc1 = 10
scale1 = 1
size1 = 4
loc2 = 12
scale2 = 3
size2 = 6
sample1 = np.random.normal(loc=loc1, scale=scale1, size=size1)
sample2 = np.random.normal(loc=loc2, scale=scale2, size=size2)
with pm.Model() as model:
mu1 = pm.Flat('mu1')
sd1 = pm.Exponential('sd1', lam=1)
data1 = pm.Normal('data1', mu=mu1, sd=sd1, observed=sample1)
mu2 = pm.Flat('mu2')
sd2 = pm.Exponential('sd2', lam=1)
data2 = pm.Normal('data2', mu=mu2, sd=sd2, observed=sample2)
diff_mu = pm.Deterministic('diff_mu', mu2 - mu1)
diff_sd = pm.Deterministic('diff_sd', sd2 - sd1)
effect_size = pm.Deterministic('effect size',
diff_mu / pm.sqrt((sd1**2 + sd2**2) / 2))
# -
sample1
sample2
with model:
params = pm.variational.advi(n=100000)
trace = pm.variational.sample_vp(params, draws=5000)
pm.traceplot(trace[200:])
pm.plot_posterior(trace[200:], varnames=['diff_mu', 'diff_sd', 'effect size'], color='#87ceeb')
pm.plot_posterior(trace[200:], varnames=['mu1', 'mu2', 'sd1', 'sd2'], color='#87ceeb',)
| notebooks/intercomparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import spacy
from wordcloud import WordCloud, ImageColorGenerator
from nltk.stem.porter import *
import matplotlib.pyplot as plt
# -
df = pd.read_csv("data/reviews_final.csv")
df.head()
df.dropna(inplace=True)
df.shape
nlp = spacy.load('pt_core_news_sm')
# # Word cloud reviews
# +
# Create stopword set
nlp = spacy.load('pt_core_news_sm')
stopwords = set(nlp.Defaults.stop_words)
stopwords.update(['celular', 'aparelho', 'produto', 'dia', 'xiaomi', 'veio', 'telefone'])
text = " ".join(str(review) for review in df['review'])
wc = WordCloud(stopwords=stopwords, background_color="white").generate(text)
# -
# Show
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
plt.show()
readmi_mask = np.array(Image.open("images/redmi_8.jpg"))
readmi_mask
# +
# Create a word cloud image
wc = WordCloud(background_color="white", max_words=100, mask=readmi_mask,
stopwords=stopwords, contour_width=3, contour_color='firebrick')
# Generate a wordcloud
wc.generate(text)
# show
plt.figure(figsize=[20,10])
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
plt.show()
| word_cloud.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Classification, Regression and Other Prediction Model
# ## Dataset
#
# We‘ll use "201707-citibike-tripdata.csv.zip" (after preprocessed in HW0)
#
# ## Schema
#
# - Every station’s information
# - id, name, lat, lng
# - Every stations’ flow data
# - id, time, in-flow, out-flow
# ### Import packages
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.plotly as py
import os
from time import time
from plotly.graph_objs import *
from mpl_toolkits.mplot3d import Axes3D
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.naive_bayes import MultinomialNB, GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR
from sklearn.tree import DecisionTreeRegressor, ExtraTreeClassifier
from sklearn.linear_model import BayesianRidge
from statsmodels.tsa.arima_model import ARIMA
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# %matplotlib inline
# ### Read csv to dataframe
# use pandas to read data
# preprocessed dataset
df = pd.read_csv('./201707-citibike-tripdata-preprocessed.csv')
df.head()
# every station's information
station_info = pd.read_csv('./station_info.csv')
station_info.head()
# every station's in-flow data
station_in_flow = pd.read_csv('./in_flow.csv')
station_in_flow.head()
# every station's out-flow data
station_out_flow = pd.read_csv('./out_flow.csv')
station_out_flow.head()
# ## Using historical (14 days) data to predict every station's outflow tomorrow (1 day)
# ### Extract following values
#
# - station_id
# - outflow(and this is we want to predict)
station_out_flow.head()
# ### Discretize outflow
# - discretize with divided by every station's outflow standard deviation and round to integer
# - process them so it can be solve as a classification problem
# By previous homework's results, we can find divided by 5 is a good way to discretize so apply it and round the value to integer.
station_out_dis = (station_out_flow / 5).round(0)
station_out_dis.head()
# ### Use previous (14 days) data to estimate next days’ outflow
# - use a sliding window to increase our data (shift k days each time, and determine the k = 1 )
# +
def get_data(isdis, idx, st):
if isdis == True:
df = pd.DataFrame(station_out_dis.iloc[st * 48 : (15 + st) * 48, idx]).T
else:
df = pd.DataFrame(station_out_flow.iloc[st * 48 : (15 + st) * 48, idx]).T
df.columns = [i for i in range(df.shape[1])]
return df
def get_station(isdis, idx):
data = pd.DataFrame()
res = []
for i in range(16):
data = data.append(get_data(isdis, idx, i))
return data
# -
# - We can use ```get_station(is_discrete, index)``` to get the station's outflow data from 7/01 - 7/15 to 7/16 - 7/30 in each row
get_station(True, 1).head()
# ### Evaluate each model
#
# Calculate the mean accuracy, mean square error and using time
def eval_model(isdis, clf):
ans = 0
t = time()
for idx in range(634):
data = get_station(isdis, idx)
train_x, test_x, train_y, test_y = train_test_split(data.iloc[:, :14 * 48], data.iloc[:, 14 * 48:], test_size = 0.3)
for i in range(48):
ans += clf.fit(train_x, train_y.iloc[:, i]).score(test_x, test_y.iloc[:, i])
if isdis == True:
print 'Average accuracy for 48 timeslot: {:.4f}'.format((ans / 634.0 / 48.0))
else:
print 'Mean square error for 48 timeslot: {:.4f}'.format((ans / 634.0 / 48.0))
print 'Time: {:.2f} sec'.format(time() - t)
# ## Try following models (as classification problem)
#
# compare the computation time and result ( average accuracy for 48 timeslot )
# ### K-Nearest-Neighbor
#
# Classifier implementing the k-nearest neighbors vote.
#
# By previous homework, the results of Kmeans and PCA => Agglomerative Clustering look like we can divided the data into 3 - 4 parts so we choose k = 3 or 4.
#
# [package](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html)
clf = OneVsRestClassifier(KNeighborsClassifier(n_neighbors = 3))
eval_model(True, clf)
clf = OneVsRestClassifier(KNeighborsClassifier(n_neighbors = 4))
eval_model(True, clf)
# ### Naive Bayes
#
# Try multinomial and Gaussian to predict the data.
#
# - Naive Bayes classifier for multinomial models
#
# The multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work.
#
# - Gaussian Naive Bayes (GaussianNB)
#
# Can perform online updates to model parameters via partial_fit method. For details on algorithm used to update feature means and variance online, see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
# http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
#
# [package](http://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html)
clf = OneVsRestClassifier(MultinomialNB())
eval_model(True, clf)
clf = OneVsRestClassifier(GaussianNB())
eval_model(True, clf)
# ### Random Forest
#
# That is a random forest classifier and setting max_depth to prevent the decision tree being too deep leads to overfitting and wasting time.
#
# A random forest is a meta estimator that fits a number of decision tree classifiers on various sub-samples of the dataset and use averaging to improve the predictive accuracy and control over-fitting. The sub-sample size is always the same as the original input sample size but the samples are drawn with replacement if bootstrap=True (default).
#
# [package](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
clf = OneVsRestClassifier(RandomForestClassifier(max_depth = 2))
eval_model(True, clf)
clf = OneVsRestClassifier(RandomForestClassifier(max_depth = 5))
eval_model(True, clf)
# ### Support vector machine(SVC)
#
# C-Support Vector Classification and the implementation is based on libsvm. The fit time complexity is more than quadratic with the number of samples which makes it hard to scale to dataset with more than a couple of 10000 samples.
#
# Try to use different kernels to see the accuracy and using time.
#
# [package](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html)
# +
ker = ['linear', 'poly', 'rbf', 'sigmoid']
for item in ker:
print 'kernel: {}'.format(item)
clf = OneVsRestClassifier(SVC(kernel = item))
eval_model(True, clf)
# -
# ### Other
#
# Use extremely randomized tree classifier to predict the data.
#
# Extra-trees differ from classic decision trees in the way they are built. When looking for the best split to separate the samples of a node into two groups, random splits are drawn for each of the max_features randomly selected features and the best split among those is chosen. When max_features is set 1, this amounts to building a totally random decision tree.
#
# Also setting max_depth to prevent the decision tree being too deep leads to overfitting and wasting time.
#
# [package](http://scikit-learn.org/stable/modules/generated/sklearn.tree.ExtraTreeClassifier.html)
clf = OneVsRestClassifier(ExtraTreeClassifier(max_depth = 2))
eval_model(True, clf)
clf = OneVsRestClassifier(ExtraTreeClassifier(max_depth = 5))
eval_model(True, clf)
# ### Compare and Observation
# - K-Nearest-Neighbor
#
# n_neighbors is set to 4 is more accuracy but the time it takes is also more. They take about 105 seconds and the accuracy is almost 80%.
#
# - Naive Bayes
#
# Choose the multinomial naive bayes is better because the outflows are match the multinomial model rather than Gaussian during weekday and weekend. The time they take is less than KNN and Multinomial's accuracy is almost 80% too.
#
# - Random Forest
#
# Compared with the max_depth and I find the time is getting much without improving accuracy. The time is much more than other models because it builds many trees to decide the predicted results.
#
# - Support vector machine(SVC)
#
# Every kernel's results are almost the same but sometime we can find accuracy is a little bit higher and taking less time in linear kernel.
#
# - Extremely Randomized Tree
#
# Extremely randomized tree classifier's accuracy is less than other models and the using time is not decreasing. QQ
#
# ## Calculate the confusion matrix
# generate the label by collecting all the target values and construct the confusion matrix.
# +
label = set()
for i in range(48):
for item in pd.unique(station_out_dis.iloc[:, -i]):
label.add(item)
label = list(label)
num_l = len(label)
clf = OneVsRestClassifier(MultinomialNB())
mat = np.zeros([num_l, num_l], dtype = np.int)
for idx in range(634):
data = get_station(True, idx)
train_x, test_x, train_y, test_y = train_test_split(data.iloc[:, :14 * 48], data.iloc[:, 14 * 48:], test_size = 0.3)
mat += (confusion_matrix(clf.fit(train_x, train_y.iloc[:, 0]).predict(test_x), test_y.iloc[:, 0], labels = label))
# -
# Print the confusion matrix for predicting the first hour in one day
# for Naive Bayes.
mat
# ## Performance with different parameters in SVM
# Test following parameters
#
# - kernel
# - linear
# - poly
# - rbf
# - sigmoid
#
# They are almost the same performance in using time about 130 seconds and the random training data causing the results a little different.
#
# The linear and poly kernel are different from the power of distribution and the predicted results are higher than tbf sometimes because the testing data matches the distribution sometime.
#
# We can find the accuracy in rbf kernel sometimes higher than the others and I think the reason is rbf can approximate of any non-linear function in high precision. So it could be more match than the others and from previos work we know the data is more close to mutinormal distribution.
#
# Sigmoid model is similar to the logistic regression model and the using time is more than others because it defines curves according to where the logistic value is greater than some value (modeling probability).
#
# According to above models, I consider the best model is rbf because it can match the training data in whatever linear, polynomial or something high dimensional distribution and takes not much time.
# ## Try following models (as regression problem)
#
# compare the computation time and result ( Mean square error )
# ### ARIMA
# +
rng = list(pd.date_range("2017-07-01 00:00:00", "2017-07-31 23:30:00", freq = "30min"))
ts = pd.DataFrame(station_out_flow.iloc[:, 0].values, index = rng)
ts.columns = ['outflow']
mod = ARIMA(ts, order = (1,1,1))
res = mod.fit(disp = False)
print res.summary()
# -
# ### Bayesian regression
#
# Use Bayesian ridge regression and try to set n_iter to see how much time it takes and how accuracy it inprove.
#
# Fit a Bayesian ridge model and optimize the regularization parameters lambda (precision of the weights) and alpha (precision of the noise).
#
# [package](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.BayesianRidge.html)
clf = OneVsRestClassifier(BayesianRidge(n_iter = 300))
eval_model(False, clf)
clf = OneVsRestClassifier(BayesianRidge(n_iter = 500))
eval_model(False, clf)
# ### Decision tree regression
#
# A decision tree but use a regressor. Also setting max_depth to prevent the decision tree being too deep leads to overfitting and wasting time. And see how much time it takes and how accuracy it inprove.
#
# [package](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html)
clf = OneVsRestClassifier(DecisionTreeRegressor(max_depth = 2))
eval_model(False, clf)
clf = OneVsRestClassifier(DecisionTreeRegressor(max_depth = 5))
eval_model(False, clf)
# ### Support vector machine(SVR)
#
# Use Epsilon-Support Vector Regression and the implementation is based on libsvm.
#
# Also try some kernel to see the results and time it takes.
#
# [package](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html)
# +
ker = ['linear', 'poly', 'rbf', 'sigmoid']
for item in ker:
print 'kernel: {}'.format(item)
clf = OneVsRestClassifier(SVR(kernel = item))
eval_model(False, clf)
# -
# ### Other
#
#
# Regression based on k-nearest neighbors.
#
# The target is predicted by local interpolation of the targets associated of the nearest neighbors in the training set.
clf = OneVsRestClassifier(KNeighborsRegressor(n_neighbors = 3))
eval_model(False, clf)
clf = OneVsRestClassifier(KNeighborsRegressor(n_neighbors = 4))
eval_model(False, clf)
# ### Compare and Observation
# - ARIMA
#
# Autoregressive Integrated Moving Average ARIMA(p,d,q) Model and the order is (p,d,q) order of the model for the number of AR parameters, differences, and MA parameters to use. And the model's error rate is less than others sometimes.
#
# - Bayesian regression
#
# Using Bayesian ridge regression, fitting a Bayesian ridge model and optimizing the regularization parameters lambda (precision of the weights) and alpha (precision of the noise). So the using time is much more.
#
# - Decision tree regression
#
# The strategy used to choose the split at each node, supported strategies are “best” to choose the best split. And criteria are “mse” for the mean squared error, which is equal to variance reduction as feature selection criterion and minimizes the L2 loss using the mean of each terminal node. So the mean square error is low.
#
# - Support vector machine(SVR)
#
# Compared to SVC, the results are more less error rate. And also taking more time, to other regression models are more high error rate.
#
# - Regression k-nearest neighbors
#
# The regression knn weighted every node and than do knn, so the using time is more than SVR, decision tree. More fitable than knn but using time is more much too.
#
# The using time of regression models is more than classification models but the results seens more accuracy and low error rate.
# ## Other
# Try other method to solve this prediction problem,and give a result and some explanation.
#
# Using linear SVC and linear SVR.
#
# Linear Support Vector Classification.
#
# Similar to SVC with parameter kernel=’linear’, but implemented in terms of liblinear rather than libsvm, so it has more flexibility in the choice of penalties and loss functions and should scale better to large numbers of samples.
#
# This class supports both dense and sparse input and the multiclass support is handled according to a one-vs-the-rest scheme.
#
# [package](http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html)
#
# Linear Support Vector Regression.
#
# Similar to SVR with parameter kernel=’linear’, but implemented in terms of liblinear rather than libsvm, so it has more flexibility in the choice of penalties and loss functions and should scale better to large numbers of samples.
#
# This class supports both dense and sparse input.
#
# [package](http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVR.html)
clf = OneVsRestClassifier(LinearSVC())
eval_model(True, clf)
clf = OneVsRestClassifier(LinearSVR())
eval_model(False, clf)
# ### Compare and Observation
# Linear SVC model's accuracy is less than others sometime but the using time is less. Could cause it implemented in terms of liblinear rather than libsvm, so it has more flexibility in the choice of penalties and loss functions and should scale better to large numbers of samples.
#
# The outcomes of the Linear SVR are much the same of the SVR model in linear kernel but the mean square error is less than the just SVR model. I think it could be good at the choice of penalties and loss functions and should scale better to large numbers of samples.
#
# And we can find that in large dataset, using linear SVC or SVR would predict much accuracy.
| hw3/Group3 hw3 0416077.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + deletable=true editable=true
from mvpa2.suite import *
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from itertools import product, combinations
from numpy.testing import assert_array_equal
# + deletable=true editable=true
# %matplotlib inline
# + [markdown] deletable=true editable=true
# # Some functions to filter matrices and to compute averages of that matrices
# + deletable=true editable=true
def compute_within_between_mean(mat, wanted_labels, labels):
assert(mat.shape[0] == mat.shape[1])
assert(len(labels) == mat.shape[0])
mask_labels = [True if lbl in wanted_labels else False for lbl in labels]
# XXX: this assumes that the matrices are NOT symmetric, but it
# should work anyway with symmetric matrices, since we're taking
# the average
# take mask within areas, without diagonal
mask_mat_within = np.zeros(mat.shape, dtype=bool)
mask_mat_within[np.ix_(mask_labels, mask_labels)] = True
np.fill_diagonal(mask_mat_within, False)
mean_within = mat[mask_mat_within].mean()
# take mask between areas
mask_mat_between = np.zeros(mat.shape, dtype=bool)
mask_mat_between[np.ix_(mask_labels, np.logical_not(mask_labels))] = True
mask_mat_between[np.ix_(np.logical_not(mask_labels), mask_labels)] = True
mean_between = mat[mask_mat_between].mean()
return (mean_within, mean_between)
# + deletable=true editable=true
def filter_matrix(mat, wanted_labels, labels):
assert(mat.shape[0] == mat.shape[1])
assert(len(labels) == mat.shape[0])
mask_labels = [True if lbl in wanted_labels else False for lbl in labels]
subset_labels = filter(lambda x: x in wanted_labels, labels)
return pd.DataFrame(mat[np.ix_(mask_labels, mask_labels)], index=subset_labels, columns=subset_labels)
# + deletable=true editable=true
# save correlations within/between across core and extended systems
def compute_correlations(mean_rdms, systems_to_compute, labels):
systems_correlations = dict()
for system in systems_to_compute:
corrs = []
wanted_labels = systems[system]
for rdm in mean_rdms:
corrs.append(compute_within_between_mean(rdm.values, wanted_labels, labels))
corrs = pd.DataFrame(np.asarray(corrs), columns=['within', 'between'])
systems_correlations[system] = corrs
return systems_correlations
# + deletable=true editable=true
# let's make a dictionary containing infos on the different systems
systems = {
'early_visual': ['EV1 - L', 'EV1 - R', 'EV2 - L', 'EV2 - R'],
'precuneus': ['dPreCun - L', 'dPreCun - R',
'mPreCun - L', 'mPreCun - R',
'vPreCun - L', 'vPreCun - R'],
'dorsal_core': ['pMTG - L', 'pMTG - R',
'mMTG - L', 'mMTG - R',
'aMTG - L', 'aMTG - R'],
'ventral_core': ['OccFus - L', 'OccFus - R',
'pFus - L', 'pFus - R',
'mFus - L', 'mFus - R',
'aFus - R'],
'anterior_core': ['IFG1 - L', 'IFG2 - L', 'IFG2 - R'],
'theory_of_mind': [ 'TPJ - L', 'TPJ - R', 'MPFC - L', 'MPFC - R']
}
systems['core'] = systems['dorsal_core'] + systems['ventral_core'] + systems['anterior_core']
systems['extended'] = systems['precuneus'] + systems['theory_of_mind']
systems['core+extended'] = systems['core'] + systems['extended']
# + [markdown] deletable=true editable=true
# # First using Raiders
# + deletable=true editable=true
part1 = h5load('raidershpal_rdm_level2_part1.hdf5')
part2 = h5load('raidershpal_rdm_level2_part2.hdf5')
# + deletable=true editable=true
labels = pd.read_csv('roi_coord.csv')['Abbreviation'].tolist()
# + deletable=true editable=true
len(part1) == len(part2)
# + deletable=true editable=true
# they are in order, so we can average them right away
mean_rdms = [(np.arctanh(p1) + np.arctanh(p2))/2 for p1, p2 in zip(part1, part2)]
# + deletable=true editable=true
# make them symmetric and also normalize them by the diagonal
mean_rdms_norm = []
for rdm in mean_rdms:
tmp = rdm.copy()
tmp += rdm.T
tmp /= 2
# go back to correlation
tmp = np.tanh(tmp)
# normalize by noise values, as in Guntupalli et al., 2016
diag = np.diag(tmp).reshape((1, -1))
sqrt_diag = np.sqrt((diag * diag.T))
tmp /= sqrt_diag
mean_rdms_norm.append(tmp)
# + deletable=true editable=true
# save a pandas dataframe with all the data in there
# since we're taking only the upper triangular matrix, let's make sure we're assigning
# the right labels
pairs = list(product(labels, labels))
pairs_mat = np.array([' + '.join(p) for p in pairs]).reshape((len(labels), -1))
pairs_mat_triu = pairs_mat[np.triu_indices_from(pairs_mat, k=1)]
mean_rdms_triu = [r[np.triu_indices_from(r, k=1)] for r in mean_rdms_norm]
# make also a "system" label
label2system = dict()
for key, value in systems.iteritems():
if key in ['core', 'extended', 'core+extended']:
continue
else:
for lbl in value:
label2system[lbl] = key
pairs_system = [(label2system[p1], label2system[p2]) for p1, p2 in pairs]
pairs_system_mat = np.array(['+'.join(p) for p in pairs_system]).reshape((len(labels), -1))
pairs_system_mat_triu = pairs_system_mat[np.triu_indices_from(pairs_system_mat, k=1)]
npairs = len(pairs_mat_triu)
subj_pairs = ['+'.join(c) for c in combinations(['sub{0:02}'.format(i) for i in range(1, 12)], 2)]
nsubj_pairs = len(subj_pairs)
data = {
'corr': np.hstack(mean_rdms_triu),
'subj': np.repeat(subj_pairs, npairs),
'rois': np.tile(pairs_mat_triu, nsubj_pairs),
'systems': np.tile(pairs_system_mat_triu, nsubj_pairs)
}
df_hpal = pd.DataFrame(data, columns=['subj', 'rois', 'systems', 'corr'])
# + deletable=true editable=true
df_hpal.head()
# + deletable=true editable=true
df_hpal.to_csv('hpal_pairwise_corr.csv', index=False)
# + deletable=true editable=true
# remove early visual
mean_rdms_norm_noev = [filter_matrix(rdm, systems['core+extended'], labels) for rdm in mean_rdms_norm]
# + deletable=true editable=true
labels_noev = mean_rdms_norm_noev[0].columns.tolist()
# + deletable=true editable=true
systems_to_compute = ['core', 'extended']
# + deletable=true editable=true
correlations_all = compute_correlations(mean_rdms_norm_noev, systems_to_compute, labels_noev)
# + deletable=true editable=true
for key, df in correlations_all.iteritems():
df.to_csv('{0}_hpal_withinbetween_correlations.csv'.format(key), index=False)
# + [markdown] deletable=true editable=true
# ## Now do the same within each main system
# + deletable=true editable=true
mean_rdms_norm_core = [filter_matrix(rdm.values, systems['core'], labels_noev) for rdm in mean_rdms_norm_noev]
systems_to_compute = ['dorsal_core', 'ventral_core', 'anterior_core']
correlations_core = compute_correlations(mean_rdms_norm_core, systems_to_compute, mean_rdms_norm_core[0].columns.tolist())
for key, df in correlations_core.iteritems():
df.to_csv('{0}-withincore_hpal_withinbetween_correlations.csv'.format(key), index=False)
# + deletable=true editable=true
mean_rdms_norm_ext = [filter_matrix(rdm.values, systems['extended'], labels_noev) for rdm in mean_rdms_norm_noev]
systems_to_compute = ['theory_of_mind', 'precuneus']
correlations_ext = compute_correlations(mean_rdms_norm_ext, systems_to_compute, mean_rdms_norm_ext[0].columns.tolist())
for key, df in correlations_ext.iteritems():
df.to_csv('{0}-withinext_hpal_withinbetween_correlations.csv'.format(key), index=False)
# + [markdown] deletable=true editable=true
# # Do the same for the task data
# + deletable=true editable=true
task_data = h5load('taskdata_rdm_level2.hdf5')
# + deletable=true editable=true
assert_array_equal(task_data[0].sa.targets, task_data[0].fa.roi)
# + deletable=true editable=true
labels_task = task_data[0].sa.targets
# + [markdown] deletable=true editable=true
# These are per subject distance matrices
# + deletable=true editable=true
len(task_data), task_data[0].shape, task_data[0]
# + deletable=true editable=true
# these are distances, not correlations, so go back to correlation
task_data = [1. - rdm.samples for rdm in task_data]
# + deletable=true editable=true
# save a pandas dataframe with all the data in there
# since we're taking only the upper triangular matrix, let's make sure we're assigning
# the right labels
pairs = list(product(labels_task, labels_task))
pairs_mat = np.array([' + '.join(p) for p in pairs]).reshape((len(labels_task), -1))
pairs_mat_triu = pairs_mat[np.triu_indices_from(pairs_mat, k=1)]
task_data_triu = [r[np.triu_indices_from(r, k=1)] for r in task_data]
# make also a "system" label
label2system = dict()
for key, value in systems.iteritems():
if key in ['core', 'extended', 'core+extended']:
continue
else:
for lbl in value:
label2system[lbl] = key
pairs_system = [(label2system[p1], label2system[p2]) for p1, p2 in pairs]
pairs_system_mat = np.array(['+'.join(p) for p in pairs_system]).reshape((len(labels), -1))
pairs_system_mat_triu = pairs_system_mat[np.triu_indices_from(pairs_system_mat, k=1)]
npairs = len(pairs_mat_triu)
subj = ['sub{0:02}'.format(i) for i in range(1, 34)]
nsubj = len(subj)
data_task = {
'corr': np.hstack(task_data_triu),
'subj': np.repeat(subj, npairs),
'rois': np.tile(pairs_mat_triu, nsubj),
'systems': np.tile(pairs_system_mat_triu, nsubj)
}
df_task = pd.DataFrame(data_task, columns=['subj', 'rois', 'systems', 'corr'])
# + deletable=true editable=true
df_task.head()
# + deletable=true editable=true
df_task.to_csv('task_pairwise_corr.csv', index=False)
# + deletable=true editable=true
# remove early visual
task_data_noev = [filter_matrix(rdm, systems['core+extended'], labels_task) for rdm in task_data]
# + deletable=true editable=true
labels_task_noev = task_data_noev[0].columns.tolist()
# + deletable=true editable=true
systems_to_compute = ['core', 'extended']
# + deletable=true editable=true
correlations_all_task = compute_correlations(task_data_noev, systems_to_compute, labels_task_noev)
# + deletable=true editable=true
for key, df in correlations_all_task.iteritems():
df.to_csv('{0}_task_withinbetween_correlations.csv'.format(key), index=False)
# + [markdown] deletable=true editable=true
# ## Now do the same within each main system
# + deletable=true editable=true
task_data_core = [filter_matrix(rdm.values, systems['core'], labels_noev) for rdm in task_data_noev]
systems_to_compute = ['dorsal_core', 'ventral_core', 'anterior_core']
correlations_core_task = compute_correlations(task_data_core, systems_to_compute, task_data_core[0].columns.tolist())
for key, df in correlations_core_task.iteritems():
df.to_csv('{0}-withincore_task_withinbetween_correlations.csv'.format(key), index=False)
# + deletable=true editable=true
task_data_ext = [filter_matrix(rdm.values, systems['extended'], labels_noev) for rdm in task_data_noev]
systems_to_compute = ['theory_of_mind', 'precuneus']
correlations_ext_task = compute_correlations(task_data_ext, systems_to_compute, task_data_ext[0].columns.tolist())
for key, df in correlations_ext_task.iteritems():
df.to_csv('{0}-withinext_task_withinbetween_correlations.csv'.format(key), index=False)
# + [markdown] deletable=true editable=true
# # Tests
# + deletable=true editable=true
from numpy.testing import assert_array_equal
# + deletable=true editable=true
def test_filter_matrix():
mat = np.ones((6, 6))
mat[np.ix_([3, 4, 5], [3, 4, 5])] = 3
labels = range(6)
wanted_labels_ = [range(3), [3, 4, 5], [0, 1, 3, 5]]
for wanted_labels in wanted_labels_:
mat_filt = filter_matrix(mat, wanted_labels, labels)
assert_array_equal(mat_filt, mat[np.ix_(wanted_labels, wanted_labels)])
test_filter_matrix()
# + deletable=true editable=true
def test_compute_within_between_mean():
mat = np.zeros((10, 10))
mat[:5, :5] = 5
mat[5:, 5:] = 20
assert_array_equal(compute_within_between_mean(mat, range(5), range(10)), [5.0, 0.0])
assert_array_equal(compute_within_between_mean(mat, range(5, 10), range(10)), [20.0, 0.0])
test_compute_within_between_mean()
| notebooks/get_between-within_correlations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import os
import sys
import time
import numpy as np
import tensorflow as tf
import direct_sparse_layer_definition as ld
import direct_sparse_regularizers as reg
from direct_sparse_module import sparse_nn_ops as sc_module
from load_mnist_dataset import load_dataset
# +
def model_mnist(
sparse_data,
tensor_in_sizes,
train_labels=None,
num_classes=10,
scope='mn256-',
initializer=None,
d1=0.1,
d2=0.3,
d3=0.4,
rscale=0.01,
max_bias=0.1
):
dim = 5
strides = [1,1,1,1,1]
padding = 'SAME'
pooling_sizes = [1,1,2,2,1]
batch_size = tensor_in_sizes[0]
total_size = np.prod(tensor_in_sizes)
net = {}
ops = [None]*6
tmp_tin = tensor_in_sizes
net['sd_converted'] = ld.create_sparse_data_to_direct_sparse(sparse_data, dim)
net['conv1_1'], tmp_tin, ops[0] = ld.create_sparse_conv_layer_reg(
net['sd_converted'],
[1,3,3,1,8],
tmp_tin,
strides,
padding,
dim,
d1,
'K-RELU',
name=scope + 'sc1',
initializer=initializer,
scale = rscale,
bias_offset = max_bias
)
net['conv1_2'], tmp_tin, ops[1] = ld.create_sparse_conv_layer_reg(
net['conv1_1'],
[1,3,3,8,8],
tmp_tin,
strides,
padding,
dim,
d1,
'K-RELU',
name=scope + 'sc2',
initializer=initializer,
scale = rscale,
bias_offset = max_bias
)
net['conv1_3'], tmp_tin, ops[2] = ld.create_sparse_conv_layer_reg(
net['conv1_2'],
[1,3,3,8,8],
tmp_tin,
strides,
padding,
dim,
d1,
'K-RELU',
name=scope + 'sc3',
initializer=initializer,
scale = rscale,
bias_offset = max_bias
)
net['pool1'], tmp_tin = ld.create_sparse_pooling_layer(net['conv1_3'], pooling_sizes, tmp_tin, dim, d3)
net['conv2_1'], tmp_tin, ops[3] = ld.create_sparse_conv_layer_reg(
net['pool1'],
[1,3,3,8,16],
tmp_tin,
strides,
padding,
dim,
d2,
'K-RELU',
name=scope + 'sc4',
initializer=initializer,
scale = rscale,
bias_offset = max_bias
)
net['conv2_2'], tmp_tin, ops[4] = ld.create_sparse_conv_layer_reg(
net['conv2_1'],
[1,3,3,16,16],
tmp_tin,
strides,
padding,
dim,
d2,
'K-RELU',
name=scope + 'sc5',
initializer=initializer,
scale = rscale,
bias_offset = max_bias
)
net['conv2_3'], tmp_tin, ops[5] = ld.create_sparse_conv_layer_reg(
net['conv2_2'],
[1,3,3,16,16],
tmp_tin,
strides,
padding,
dim,
d2,
'K-ABS',
name=scope + 'sc6',
initializer=initializer,
scale = rscale,
bias_offset = max_bias
)
net['sparse_to_dense'] = ld.create_direct_sparse_to_dense(net['conv2_3'], dim)
net['dense_reshaped1'] = tf.reshape(net['sparse_to_dense'], [batch_size, 1, 14, 14, 16])
net['dense_reshaped2'] = tf.reshape(net['dense_reshaped1'], [batch_size, -1])
net['dense1'] = tf.layers.dense(net['dense_reshaped2'], 512)
net['dense2'] = tf.layers.dense(net['dense1'], num_classes)
predictions = {
'classes': tf.argmax(net['dense2'], axis=1),
'probabilities': tf.nn.softmax(net['dense2'])
}
loss = tf.losses.softmax_cross_entropy(
onehot_labels=train_labels,
logits=net['dense2']
)
loss += tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
accuracy = tf.metrics.accuracy(tf.argmax(train_labels, axis=1), predictions['classes'])
return loss, predictions, accuracy, net, ops
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
# +
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
X_train = (X_train * 255).astype(np.uint8)
X_train[X_train<50] = 0
X_val = (X_val * 255).astype(np.uint8)
X_val[X_val<50] = 0
X_test = (X_test * 255).astype(np.uint8)
X_test[X_test<50] = 0
y_train_softmax = np.zeros((y_train.shape[0], 10))
y_train_softmax[np.arange(y_train.shape[0]), y_train] = 1
y_val_softmax = np.zeros((y_val.shape[0], 10))
y_val_softmax[np.arange(y_val.shape[0]), y_val] = 1
y_test_softmax = np.zeros((y_test.shape[0], 10))
y_test_softmax[np.arange(y_test.shape[0]), y_test] = 1
dim = 5
batch_size = 32
tensor_in_sizes_=[batch_size, 1, 28, 28, 1] #[batch, depth, height, width, in_channels]
num_classes = 10
batch_label_sizes = [batch_size, num_classes]
tensor_in_sizes = np.array(tensor_in_sizes_, dtype=np.int64)
sparse_data = tf.sparse_placeholder(tf.float32, shape=tensor_in_sizes, name='sparse_placeholder')
dense_labels = tf.placeholder(tf.float32, shape=batch_label_sizes, name='labels_placeholder')
# -
with open('thr_experiment2.log', 'wb') as f:
for pruning_thr in [5e-3]:
print('===============================================================')
print('===============================================================')
print(pruning_thr)
f.write(str(pruning_thr)+'\n')
print('===============================================================')
print('===============================================================')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
with tf.Session(config=config) as sess:
print('initializing model')
loss, predictions, accuracy, net, ops = model_mnist(
sparse_data,
tensor_in_sizes,
dense_labels,
num_classes,
scope='mn256_thr{}-'.format(pruning_thr),
d1 = 0.1,
d2 = 0.2,
d3 = 0.4
)
optimizer = tf.train.AdagradOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
print('data and model are initialized')
f.write('data and model are initialized'+'\n')
num_epochs = 30
kernels = {}
to_remove = {}
for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
if 'filter' in var.name and 'Adagrad' not in var.name:
parts = var.name.split('/')
if parts[0] not in kernels:
kernels[parts[0]] = {}
if 'filter_indices' in parts[1]:
kernels[parts[0]]['filter_indices'] = var
if 'filter_shape' in parts[1]:
kernels[parts[0]]['filter_shape'] = var
if 'filter_channel_mapping' in parts[1]:
kernels[parts[0]]['filter_channel_mapping'] = var
if 'filter_values' in parts[1]:
kernels[parts[0]]['filter_values'] = var
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_acc = 0
train_batches = 0
density_layer3 = 0
start_time = time.time()
for batch in iterate_minibatches(X_train.reshape(-1, 1, 28, 28, 1), y_train_softmax, batch_size):
feed_dict = {
sparse_data: tf.SparseTensorValue(
[cl for cl in zip(*[arr.astype(np.int64) for arr in batch[0].nonzero()])],
batch[0][batch[0].nonzero()].astype(np.float32),
batch[0].shape
),
dense_labels: batch[1]
}
_, train_err_batch, train_acc_batch, n1 = sess.run([train_op, loss, accuracy, net['conv1_3']], feed_dict=feed_dict)
sess.run(ops, feed_dict=feed_dict)
density_layer3 += np.mean(n1.out_channel_densities)
#print(np.mean(n1.out_channel_densities), n1.out_channel_densities)
train_err += train_err_batch
train_acc += train_acc_batch[0]
train_batches += 1
training_time = time.time()
print('Epoch {} of {} took {:.3f}s'.format(epoch + 1, num_epochs, training_time - start_time))
print(' train loss :\t\t{:.6f}'.format(train_err / train_batches))
print(' train accuracy:\t\t{:.2f} %'.format(train_acc / train_batches * 100))
print(' average density at conv layer 3:\t\t{:.2f} %'.format(density_layer3 / train_batches * 100))
f.write('Epoch {} of {} took {:.3f}s'.format(epoch + 1, num_epochs, training_time - start_time)+'\n')
f.write(' train loss :\t\t{:.6f}'.format(train_err / train_batches)+'\n')
f.write(' train accuracy:\t\t{:.2f} %'.format(train_acc / train_batches * 100)+'\n')
f.write(' density:\t\t{:.2f} %'.format(density_layer3 / train_batches * 100)+'\n')
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val.reshape(-1, 1, 28, 28, 1), y_val_softmax, batch_size):
feed_dict = {
sparse_data: tf.SparseTensorValue(
[cl for cl in zip(*[arr.astype(np.int64) for arr in batch[0].nonzero()])],
batch[0][batch[0].nonzero()].astype(np.float32),
batch[0].shape
),
dense_labels: batch[1]
}
val_err_batch, val_acc_batch = sess.run([loss, accuracy], feed_dict=feed_dict)
val_err += val_err_batch
val_acc += val_acc_batch[0]
val_batches += 1
val_time = time.time()
print('Val {} of {} took {:.3f}s'.format(epoch + 1, num_epochs, val_time - training_time))
print(' val loss :\t\t{:.6f}'.format(val_err / val_batches))
print(' val accuracy:\t\t{:.2f} %'.format(val_acc / val_batches * 100))
f.write('Val {} of {} took {:.3f}s'.format(epoch + 1, num_epochs, val_time - training_time)+'\n')
f.write(' val loss :\t\t{:.6f}'.format(val_err / val_batches)+'\n')
f.write(' val accuracy:\t\t{:.2f} %'.format(val_acc / val_batches * 100)+'\n')
# Weights removal
removed_weights = 0
weights_total = 0
for layer, values in kernels.items():
num_filter_values = sess.run(values['filter_values'])
current_small = np.abs(num_filter_values) < pruning_thr
weights_total += (num_filter_values != -1).sum()
if layer in to_remove:
prev_small = to_remove[layer]
num_filter_indices = sess.run(values['filter_indices'])
num_filter_channel_mapping = sess.run(values['filter_channel_mapping'])
indices_to_remove = prev_small & current_small
removed_weights += indices_to_remove.sum()
new_filter_values = num_filter_values[~indices_to_remove]
new_filter_indices = num_filter_indices[~indices_to_remove]
fill_values = -1 * np.ones(num_filter_values.shape[0] - new_filter_values.shape[0])
fill_indices = -1 * np.ones(num_filter_indices.shape[0] - new_filter_indices.shape[0])
to_subtract = np.zeros_like(num_filter_channel_mapping)
for i in range(1, len(num_filter_channel_mapping)):
to_subtract[i] = indices_to_remove[:num_filter_channel_mapping[i]].sum()
sess.run([
values['filter_channel_mapping'].assign(num_filter_channel_mapping - to_subtract),
values['filter_values'].assign(np.hstack([new_filter_values, fill_values])),
values['filter_indices'].assign(np.hstack([new_filter_indices, fill_indices]))
])
to_remove[layer] = current_small
print('Pruning {} of {} took {:.3f}s'.format(epoch + 1, num_epochs, time.time() - val_time))
print(' removed {} out of {} weights'.format(removed_weights, weights_total))
f.write('Pruning {} of {} took {:.3f}s'.format(epoch + 1, num_epochs, time.time() - val_time)+'\n')
f.write(' removed {} out of {} weights'.format(removed_weights, weights_total)+'\n')
saver = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=1)
saver.save(sess, './' + '_thr_' + str(pruning_thr))
| tensorflow/core/user_ops/direct_sparse_experiments/mnist/MNIST_thr.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.2
# language: julia
# name: julia-1.4
# ---
using DrWatson
@quickactivate "MEngProject"
using MEngProject
using CUDA, DifferentialEquations, PyPlot, NNlib, ImageFiltering, Images, MEngProject, MEngProject.LamKernels, MEngProject.Laminart, MEngProject.Utils, BenchmarkTools, Test
img = convert(CuArray{Float32,2}, load(datadir("Iine_100_100_gs.png")));
p = LaminartGPU.kernels(img, Parameters.parameters);
p = LaminartGPU.add_I_u_p(img, p);
tspan = (0.0f0, 100.0f0)
u0 = reshape(CUDA.zeros(Float32, p.dim_i, p.dim_j*(5*p.K+2)), p.dim_i, p.dim_j, 5*p.K+2,1);
# +
x_lgn = reshape(CuArray{Float32}(undef, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1)
C = reshape(CuArray{Float32}(undef, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1)
H_z = reshape(CuArray{Float32}(undef, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1)
f = LaminartGPU.MyFunction(x_lgn, C, H_z)
# -
prob = ODEProblem(f, u0, tspan, p)
sol = solve(prob)
x .= u0[:, :, 1:p.K,:]
NNlib.conv(x, p.k_x_lgn, pad=0,flipped=true)
p.k_x_lgn
x
NNlib.conv(x, p.k_x_lgn, pad=2,flipped=true)
x_lgn .= 0
C .= 0
H_z .=0
u0 .=0;
x_lgn
u0;
x = u0[:, :, 1:p.K,:]
NNlib.conv(x, p.k_x_lgn, pad=0, flipped=true)
x = u0
x_lgn = NNlib.conv(x, p.k_x_lgn, pad=0,flipped=true)
struct MyFunction{T} <: Function
x_lgn::T
C::T
H_z::T
end
MyFunction()
# +
u = u0
du = u0
@inbounds begin
x = @view u[:, :, 1:p.K,:]
y = @view u[:, :, p.K+1:2*p.K,:]
m = @view u[:, :, 2*p.K+1:3*p.K,:]
z = @view u[:, :, 3*p.K+1:4*p.K,:]
s = @view u[:, :, 4*p.K+1:5*p.K,:]
# C = @view u[:, :, 5*p.K+1:6*p.K]
# H_z = @view u[:, :, 6*p.K+1:7*p.K]
v_p = @view u[:, :, 5*p.K+1,:]
v_m = @view u[:, :, 5*p.K+2,:]
# x_lgn = @view u[:, :, 7*p.K+3]
dx = @view du[:, :, 1:p.K,:]
dy = @view du[:, :, p.K+1:2*p.K,:]
dm = @view du[:, :, 2*p.K+1:3*p.K,:]
dz = @view du[:, :, 3*p.K+1:4*p.K,:]
ds = @view du[:, :, 4*p.K+1:5*p.K,:]
dv_p = @view du[:, :, 5*p.K+1,:]
dv_m = @view du[:, :, 5*p.K+2,:]
# x_lgn = @view ff.x_lgn[:,:,1,:]
# x_lgn = similar(v_p)
# C = similar(x)
# H_z = similar(x)
# x_lgn = Array{eltype(u)}(undef, p.dim_i, p.dim_j)
# C = reshape(Array{eltype(u)}(undef, p.dim_i, p.dim_j*p.K),p.dim_i,p.dim_j, p.K)
# C = reshape(zeros(p.dim_i, p.dim_j*p.K),p.dim_i,p.dim_j, p.K)
# C = copy(u[:, :, 1:p.K])
# H_z = copy(u[:, :, 1:p.K])
# LaminartGPU.fun_x_lgn!(x_lgn, x, p)
# LaminartGPU.fun_v_C!(C, v_p, v_m, p)
# LaminartGPU.fun_H_z!(H_z, z, p)
LaminartGPU.fun_dv!(dv_p, v_p, p.r, x_lgn, p)
# LaminartGPU.fun_dv!(dv_m, v_m, .-p.r, x_lgn, p)
# LaminartGPU.fun_dx_v1!(dx, x, C, z, p.x_V2, p)
# LaminartGPU.fun_dy!(dy, y, C, x, m, p)
# LaminartGPU.fun_dm!(dm, m, x, p)
# LaminartGPU.fun_dz!(dz, z, y, H_z, s, p)
# LaminartGPU.fun_ds!(ds, s, H_z, p)
end
# -
dv = similar(x_lgn)
LaminartGPU.conv!(dv, x_lgn, p.k_gauss_1, p)
size(x_lgn)
# +
p.k_gauss_1
# -
conv(x_lgn, p.k_gauss_1)
depthwiseconv(x_lgn, p.k_gauss_1)
u0
@view u[:, :, 5*p.K+1:5*p.K+1,:]
# m = @view u[:, :, 2*p.K+1:3*p.K,:]
x = @view u[:, :, 1:p.K,:]
y = @view u[:, :, p.K+1:2*p.K,:]
m = @view u[:, :, 2*p.K+1:3*p.K,:]
z = @view u[:, :, 3*p.K+1:4*p.K,:]
s = @view u[:, :, 4*p.K+1:5*p.K,:]
# C = @view u[:, :, 5*p.K+1:6*p.K]
# H_z = @view u[:, :, 6*p.K+1:7*p.K]
v_p = @view u[:, :, 5*p.K+1:5*p.K+1,:]
v_m = @view u[:, :, 5*p.K+2:5*p.K+2,:]
# x_lgn = @view u[:, :, 7*p.K+3]
dx = @view du[:, :, 1:p.K,:]
dy = @view du[:, :, p.K+1:2*p.K,:]
dm = @view du[:, :, 2*p.K+1:3*p.K,:]
dz = @view du[:, :, 3*p.K+1:4*p.K,:]
ds = @view du[:, :, 4*p.K+1:5*p.K,:]
dv_p = @view du[:, :, 5*p.K+1:5*p.K+1,:]
dv_m = @view du[:, :, 5*p.K+2:5*p.K+2,:]
NNlib.conv(x, p.k_x_lgn, pad=0,flipped=true)
x_ = reshape(CUDA.ones(100,200),100,100,2,1)
NNlib.conv(x_, p.k_x_lgn, pad=0,flipped=true)
# - not using @view correctly
# - want ijk1 array, ie 100×100×1×1 CuArray{Float32,4,Nothing}:
# - returns 100×100×2×1 CuArray{Float32,4,CuArray{Float32,2,Nothing}}
x = @view u[:, :, 1:p.K,:]
NNlib.conv(x, p.k_x_lgn, pad=0,flipped=true)
x__ = u[:, :, 1:p.K,:]
NNlib.conv(x__, p.k_x_lgn, pad=0,flipped=true)
size(x)
size(x__)
x
x[:,:,:,:]
x[:,:,:,:] == x__
x == x__
x
# - if put [:,:,:,:] after variable declared as @view something,
# indexing correct
# - but that is then not using @view? which is needed for gpu?/ to write to slice being viewed?
@view u[:,:,1:2,:]
@views u[:,:,1:2,:]
u_ = Array(u)
@view u_[:,:,1:2,:]
@view u[:,:,1:2,:]
t = reshape(CUDA.ones(5,20),5,5,2,2)
t_ = CuArray(reshape(ones(5,20),5,5,2,2))
# - CuArray doesnt like to be reshaped
# - Workaround is to make
# - array first
# - reshape
# - convert to cuarray
function reshape2d_4d(img::AbstractArray)
reshape(img, size(img)[1], size(img)[2], 1, 1)
end
# +
img = convert(Array{Float32,2}, load(datadir("Iine_100_100_gs.png")));
img = reshape2d_4d(img)
img = CuArray(img)
r = similar(img)
LaminartGPU.I_u!(r, img, p)
temp_out = (I = img, r = r)
p = merge(p, temp_out);
# -
tspan = (0.0f0, 100.0f0)
u0 = CuArray(reshape(zeros(Float32, p.dim_i, p.dim_j*(5*p.K+2)), p.dim_i, p.dim_j, 5*p.K+2,1));
# +
x_lgn = CuArray(reshape(Array{Float32}(undef, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1))
C = CuArray(reshape(Array{Float32}(undef, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1))
H_z = CuArray(reshape(Array{Float32}(undef, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1))
f = LaminartGPU.MyFunction(x_lgn, C, H_z)
# -
prob = ODEProblem(f, u0, tspan, p)
sol = solve(prob)
u = u0;
x = @view u[:, :, 1:p.K,:]
y = @view u[:, :, p.K+1:2*p.K,:]
m = @view u[:, :, 2*p.K+1:3*p.K,:]
z = @view u[:, :, 3*p.K+1:4*p.K,:]
s = @view u[:, :, 4*p.K+1:5*p.K,:];
# C = @view u[:, :, 5*p.K+1:6*p.K]
# H_z = @view u[:, :, 6*p.K+1:7*p.K]
v_p = @view u[:, :, 5*p.K+1:5*p.K+1,:]
v_m = @view u[:, :, 5*p.K+2:5*p.K+2,:];
# x_lgn = @view u[:, :, 7*p.K+3]
dx = @view du[:, :, 1:p.K,:]
dy = @view du[:, :, p.K+1:2*p.K,:]
dm = @view du[:, :, 2*p.K+1:3*p.K,:]
dz = @view du[:, :, 3*p.K+1:4*p.K,:]
ds = @view du[:, :, 4*p.K+1:5*p.K,:]
dv_p = @view du[:, :, 5*p.K+1:5*p.K+1,:]
dv_m = @view du[:, :, 5*p.K+2:5*p.K+2,:];
LaminartGPU.fun_x_lgn!(x_lgn, x, p)
x_lgn
size(x)
eltype(x)
typeof(x)
x_ = Array(x)
k_ = Array(p.k_x_lgn);
NNlib.conv(x_, k_, pad=0,flipped=true)
u_ = Array(u);
x__ = @view u_[:,:,1:p.K,:]
NNlib.conv(x, p.k_x_lgn, pad=0,flipped=true)
typeof(x)
typeof(x__)
NNlib.conv(x__, k_, pad=0,flipped=true)
p.k_x_lgn
x
NNlib.conv(x, p.k_x_lgn, pad=0,flipped=true)
LaminartGPU.fun_v_C!(C, v_p, v_m, p)
LaminartGPU.fun_H_z!(H_z, z, p)
LaminartGPU.fun_dv!(dv_p, v_p, p.r, x_lgn, p)
LaminartGPU.fun_dv!(dv_m, v_m, .-p.r, x_lgn, p)
LaminartGPU.fun_dx_v1!(dx, x, C, z, p.x_V2, p)
LaminartGPU.fun_dy!(dy, y,C, x, m, p)
LaminartGPU.fun_dm!(dm, m, x, p)
LaminartGPU.fun_dz!(dz, z, y,H_z, s, p)
LaminartGPU.fun_ds!(ds, s, H_z, p)
# ## Diff between @view of ::CuArray and Array(::CuArray)
# - SubArray{Float32,4,Array{Float32,4},
# - SubArray{Float32,4,CuArray{Float32,4,Nothing},
#
# What does Nothing mean in CuArray type??
#
# What does direct ie no@view of CuArray return?
# - still has nothing
u[:, :, 1:p.K,:]
conv(u[:, :, 1:p.K,:], p.k_x_lgn, pad=0)
@benchmark conv(u[:, :, 1:p.K,:], p.k_x_lgn, pad=0)
@benchmark conv(u_[:, :, 1:p.K,:], k_, pad=0)
@benchmark @. xconv(u[:, :, 1:p.K,:], p.k_x_lgn, pad=0)
t_1 = CuArray(reshape(zeros(Float32, p.dim_i, p.dim_j*(5*p.K+2)), p.dim_i, p.dim_j, 5*p.K+2,1));
# +
t_2 = CuArray(reshape(zeros(Float32, p.dim_i, p.dim_j*(5*p.K+2)), p.dim_i, p.dim_j, 5*p.K+2,1));
# -
typeof(t_2)
typeof(t_1)
t_3 = CuArray(collect(reshape(zeros(Float32, p.dim_i, p.dim_j*(5*p.K+2)), p.dim_i, p.dim_j, 5*p.K+2,1)));
typeof(t_3)
t_4 = CuArray(zeros(Float32, p.dim_i, p.dim_j*(5*p.K+2)))
# +
t_5 = CUDA.zeros(Float32, p.dim_i, p.dim_j*(5*p.K+2))
# -
typeof(t_5)
xx = view(u[:, :, 1:p.K,:])
xx = view(u[1:10, 1:1, 1:1,1:1])
u=u0
x = CuArray(@view u[:, :, 1:p.K,:])
NNlib.conv(x, p.k_x_lgn, pad=0,flipped=true)
| notebooks/dev/GPU_test1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py37-email
# language: python
# name: py37-email
# ---
import sys
sys.path.insert(0, "..")
from brforest import BrForestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_recall_fscore_support
from sklearn import preprocessing
import sklearn.datasets
import urllib.request
import numpy as np
# +
train_url = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/dna.scale.tr"
test_url = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/dna.scale.t"
n_features = 180
X_tr, y_tr = sklearn.datasets.load_svmlight_file(urllib.request.urlopen(train_url), n_features=n_features)
X_t, y_t = sklearn.datasets.load_svmlight_file(urllib.request.urlopen(test_url), n_features=n_features)
X_tr, X_t = X_tr.toarray(), X_t.toarray()
le = preprocessing.LabelEncoder()
le.fit(y_tr)
y_tr, y_t = le.transform(y_tr), le.transform(y_t)
print(X_tr.shape)
print(y_tr.shape)
print(X_t.shape)
print(y_t.shape)
# +
def precision_recall_f1(y_true, y_pred):
precisions, recalls, f1s, supports = precision_recall_fscore_support(y_t, y_pred)
majority_index = np.argmax(supports)
macro = lambda vals: np.mean(np.delete(vals, majority_index))
return macro(precisions), macro(recalls), macro(f1s)
def eval_clf(clf, X_tr, y_tr, X_t, y_t):
clf.fit(X_tr, y_tr)
y_pred = clf.predict(X_t)
precision, recall, f1 = precision_recall_f1(y_t, y_pred)
print(f"Precision: {precision}; Recall: {recall}; F1: {f1}")
eval_clf(RandomForestClassifier(n_estimators=100, criterion="entropy", max_features="sqrt"), X_tr, y_tr, X_t, y_t)
eval_clf(BrForestClassifier(n_estimators=100, criterion="entropy", max_features="sqrt"), X_tr, y_tr, X_t, y_t)
| notebooks/experiment_on_public_datasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # <NAME>'s Global FPE Calibration
# by <NAME> and <NAME>
# Remember that whenever you power-cycle the Observatory Simulator, you should set `preload=True` below.
#
# When you are running this notbook and it has *not* been power cycled, you should set `preload=False`.
from tessfpe.dhu.fpe import FPE
from tessfpe.dhu.unit_tests import check_house_keeping_voltages
fpe1 = FPE(1, debug=False, preload=False, FPE_Wrapper_version='6.1.1')
print fpe1.version
if check_house_keeping_voltages(fpe1):
print "Wrapper load complete. Interface voltages OK."
# We assume that there is a slight global error in the housekeeping that may be corrected by a linear transformation:
#
# $$ f(x) := m \cdot x + c $$
#
# To calculus $c$, we can average the biases in the housekeeping, but we must first convert them to ADUs from uA by using the `unscale_value` function.
#
# Note that because of statistical variation, we collect 50 samples:
def estimate_c_param(fpe,samples=50):
from tessfpe.dhu.house_keeping import unscale_value
from tessfpe.data.housekeeping_channels import housekeeping_channels
sample_data = []
for _ in range(samples):
analogue_house_keeping = fpe.house_keeping["analogue"]
biases = [unscale_value(analogue_house_keeping[k],
16,
housekeeping_channels[k]["low"],
housekeeping_channels[k]["high"])
for k in fpe1.house_keeping["analogue"]
if 'bias' in k]
for b in biases:
sample_data.append(b)
return sum(sample_data) / len(sample_data)
# Below run the above estimation; this should be approximately $-4.83$ based on previous calculations
estimate_c_param(fpe1)
# Next we can use the known voltages from the power supply to recover the slope $m$, which is done as follows. Once again, 50 samples are collected because we are scientists at MIT:
# +
def unscale_1_8_f(x):
from tessfpe.data.housekeeping_channels import housekeeping_channels
from tessfpe.dhu.house_keeping import unscale_value
low = housekeeping_channels['+1.8f']['low']
high = housekeeping_channels['+1.8f']['high']
return unscale_value(x, 16, low, high)
def unscale_1_f(x):
from tessfpe.data.housekeeping_channels import housekeeping_channels
from tessfpe.dhu.house_keeping import unscale_value
low = housekeeping_channels['+1f']['low']
high = housekeeping_channels['+1f']['high']
return unscale_value(x, 16, low, high)
def estimate_m_param(fpe,samples=50):
slope_samples = []
for _ in range(samples):
a_ = unscale_1_8_f(1.807)
b_ = unscale_1_f(0.998)
a = unscale_1_8_f(fpe1.house_keeping['analogue']['+1.8f'])
b = unscale_1_f(fpe1.house_keeping['analogue']['+1f'])
slope_samples.append((a_ - b_) / (a - b))
return sum(slope_samples) / len(slope_samples)
# -
# Once again, we run the calculation; this should be $\approx 1$
estimate_m_param(fpe1)
| John Doty's Global Calibration of the Housekeeping Data Collection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# First install the repo and requirements.
# %pip --quiet install git+https://github.com/mfinzi/equivariant-MLP.git
# # Implementing New Representations
# As our solver treats objects very generally, implementing new representations is surprisingly easy. To implement a new [Representation](https://emlp.readthedocs.io/en/latest/package/emlp.solver.reps.html#emlp.reps.Rep) you need to implement `size()` which is the dimension of the representation, `rho(M)` which is a mapping from the group elements to the representation matrix, as well `__eq__` and `__hash__` to distinguish different representations. It's also a good idea to implement a `__str__` function to improve readability. All representations implemented this way should have the `.G` attribute specifying the symmetry group.
#
# The implementation also requires you to specify whether the representation is regular (whether `rho(M)` outputs a permutaiton matrix) with the `is_regular` attribute, and also the `.T` property that returns the dual of the representation. We plan on removing these two requirements in a later release.
# ## Example 1: Irreducible Representations of SO(2)
# As a first example, we show one can implement the real irreducible representations of the group SO(2). All of irreducible representations $\psi_n$ of SO(2) are $2$-dimensional (except for $\psi_0$ which is the same as [Scalar](https://emlp.readthedocs.io/en/latest/package/emlp.reps.html#emlp.reps.Scalar) $= \mathbb{R} = \psi_0$). These representations can be written $\psi_n(R_\theta) = \begin{bmatrix}\cos(n\theta) &\sin(n\theta)\\-\sin(n\theta) & \cos(n\theta) \end{bmatrix}$ or simply: $\psi_n(R) = R^n$.
# +
import jax.numpy as jnp
from emlp.reps import Rep,vis,V,equivariance_error
from emlp.groups import SO,S
class SO2Irreps(Rep):
""" (Real) Irreducible representations of SO2 """
is_regular=False
def __init__(self,order):
assert order>0, "Use Scalar for 𝜓₀"
self.G=SO(2)
self.order = order
def size(self):
return 2
def rho(self,M):
return jnp.linalg.matrix_power(M,self.order)
def __str__(self):
number2sub = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
return f"𝜓{self.order}".translate(number2sub)
def __eq__(self,other):
return type(self)==type(other) and self.G==other.G and self.order==other.order
def __hash__(self):
return hash((type(self),self.G,self.order))
@property
def T(self):
return self
# -
# That's it! Now we can use the SO(2) irreps in the type system, and solve for equivariant bases that contain them.
psi1 = SO2Irreps(1)
psi2 = SO2Irreps(2)
psi3 = SO2Irreps(3)
psi1*psi2+psi3
# We can verify [schur's lemma](https://en.wikipedia.org/wiki/Schur%27s_lemma), that there are no nontrivial equivariant linear maps from one irrep to another:
print((psi1>>psi2).equivariant_basis(),(psi2>>psi3).equivariant_basis(),(psi1>>psi3).equivariant_basis())
# And we can include non irreducibles in our representation too. For example computing equivariant maps from $T_4 \rightarrow \psi_2$.
vis(V(SO(2))**4,psi2,False)
Wrep = V(SO(2))**4>>psi2
Q = Wrep.equivariant_basis()
print("{} equivariant maps with r={} basis elements".format(Wrep,Q.shape[-1]))
import numpy as np
W = Q@np.random.randn(Q.shape[-1])
print("With equivariance error {:.2e}".format(equivariance_error(W,V(SO(2))**4,psi2,SO(2))))
# ## Example 2: PseudoScalars, PseudoVectors, and PseudoTensors
# With a slightly more sophisticated example, we'll now implement the representations known as [PseudoScalars](https://en.wikipedia.org/wiki/Pseudoscalar), [PseudoVectors](https://en.wikipedia.org/wiki/Pseudovector), and other PseudoTensor representations. These representations commonly occur in physics when working with cross products or the Hodge star, and also describe the Fermi statistics of spin 1/2 particles that are antisymmetric under exchange.
#
# A pseudoscalar is like a scalar `Scalar` $=\mathbb{R}$, but incurs a $-1$ under orientation reversing transformations: $\rho(M) = \mathrm{sign}(\mathrm{det}(M))$. Similarly, pseudovectors are like ordinary vectors but can pick up this additional $-1$ factor. In fact, we can convert any representation into a pseudorepresentation by multiplying by a pseudoscalar.
#
# <!-- For when the group $G$ is the symmetric group $S_n$, the sign of the determinant of the permutation matrices $\sigma\in G$ is nothing but the parity of the permutation $(-1)^{N(\sigma)}$ -->
from emlp.reps import Rep,V,T,vis,Scalar
class PseudoScalar(Rep):
is_regular=False
def __init__(self,G=None):
self.G=G
def __call__(self,G):
return PseudoScalar(G)
def size(self):
return 1
def __str__(self):
return "P"
def rho(self,M):
sign = jnp.linalg.slogdet(M@jnp.eye(M.shape[0]))[0]
return sign*jnp.eye(1)
def __eq__(self,other):
return type(self)==type(other) and self.G==other.G
def __hash__(self):
return hash((type(self),self.G))
@property
def T(self):
return self
G = S(4)
P = PseudoScalar(G)
W = V(G)
# We can then build up pseudotensors with multiplication. As expected pseudovectors incur a -1 for odd permutations.
pseudovector = P*W
g = G.sample()
print(f"Sample g = \n{g}")
print(f"Pseudovector 𝜌 = \n{pseudovector.rho_dense(g)}")
# Again, we can freely mix and match these new representations with existing ones.
P*(W**2 +P)+W.T
# Equivariant maps from matrices to pseodovectors yield a different set of solutions from maps from matrices to vectors.
vis(W**2,pseudovector,cluster=False)
vis(W**2,W,cluster=False)
vis(P*W**2,W**2,cluster=False)
# And of course we can verify the equivariance:
rin = P*W**2
rout = W**2
Q = (rin>>rout).equivariant_basis()
print(f"With equivariance error {equivariance_error(Q,rin,rout,G):.2e}")
# We can even mix and match with the irreducible representations above.
P = PseudoScalar(SO(2))
W = V(SO(2))
rep = psi2>>P*W**2
print(rep)
print(rep.equivariant_basis().shape)
# ## Additional Information
# Several other functions may be optionally implemented to improve performance such as the Lie Algebra representation `drho(A)` which by default is calculated automatically from `rho` as $d\rho(A) := d\rho(M)|_{M=I}(A) = \frac{d}{dt} \rho(e^{tA})|_{t=0}$, the dual representation `.T`. However, these functions are optional and the representation can function fine without them.
| docs/notebooks/colabs/4new_representations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="vjAC2mZnb4nz"
# # Image transformations
#
# This notebook shows new features of torchvision image transformations.
#
# Prior to v0.8.0, transforms in torchvision have traditionally been PIL-centric and presented multiple limitations due to that. Now, since v0.8.0, transforms implementations are Tensor and PIL compatible and we can achieve the following new
# features:
# - transform multi-band torch tensor images (with more than 3-4 channels)
# - torchscript transforms together with your model for deployment
# - support for GPU acceleration
# - batched transformation such as for videos
# - read and decode data directly as torch tensor with torchscript support (for PNG and JPEG image formats)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="btaDWPDbgIyW" outputId="8a83d408-f643-42da-d247-faf3a1bd3ae0"
import torch, torchvision
torch.__version__, torchvision.__version__
# + [markdown] id="9Vj9draNb4oA"
# ## Transforms on CPU/CUDA tensor images
#
# Let's show how to apply transformations on images opened directly as a torch tensors.
# Now, torchvision provides image reading functions for PNG and JPG images with torchscript support.
# + id="Epp3hCy0b4oD"
from torchvision.datasets.utils import download_url
download_url("https://farm1.static.flickr.com/152/434505223_8d1890e1e2.jpg", ".", "test-image.jpg")
download_url("https://farm3.static.flickr.com/2142/1896267403_24939864ba.jpg", ".", "test-image2.jpg")
# + id="Y-m7lYDPb4oK"
import matplotlib.pylab as plt
# %matplotlib inline
# + colab={"base_uri": "https://localhost:8080/", "height": 303} id="5bi8Q7L3b4oc" outputId="e5de5c73-e16d-4992-ebee-94c7ddf0bf54"
from torchvision.io.image import read_image
tensor_image = read_image("test-image.jpg")
print("tensor image info: ", tensor_image.shape, tensor_image.dtype)
plt.imshow(tensor_image.numpy().transpose((1, 2, 0)))
# -
def to_rgb_image(tensor):
"""Helper method to get RGB numpy array for plotting"""
np_img = tensor.cpu().numpy().transpose((1, 2, 0))
m1, m2 = np_img.min(axis=(0, 1)), np_img.max(axis=(0, 1))
return (255.0 * (np_img - m1) / (m2 - m1)).astype("uint8")
# + colab={"base_uri": "https://localhost:8080/", "height": 322} id="PgWpjxQ3b4pF" outputId="e9a138e8-b45c-4f75-d849-3b41de0e5472"
import torchvision.transforms as T
# to fix random seed is now:
torch.manual_seed(12)
transforms = T.Compose([
T.RandomCrop(224),
T.RandomHorizontalFlip(p=0.3),
T.ConvertImageDtype(torch.float),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
out_image = transforms(tensor_image)
print("output tensor image info: ", out_image.shape, out_image.dtype)
plt.imshow(to_rgb_image(out_image))
# + [markdown] id="LmYQB4cxb4pI"
# Tensor images can be on GPU
# + colab={"base_uri": "https://localhost:8080/", "height": 322} id="S6syYJGEb4pN" outputId="86bddb64-e648-45f2-c216-790d43cfc26d"
out_image = transforms(tensor_image.to("cuda"))
print("output tensor image info: ", out_image.shape, out_image.dtype, out_image.device)
plt.imshow(to_rgb_image(out_image))
# + [markdown] id="jg9TQd7ajfyn"
# ## Scriptable transforms for easier deployment via torchscript
#
# Next, we show how to combine input transformations and model's forward pass and use `torch.jit.script` to obtain a single scripted module.
#
# **Note:** we have to use only scriptable transformations that should be derived from `torch.nn.Module`.
# Since v0.8.0, all transformations are scriptable except `Compose`, `RandomChoice`, `RandomOrder`, `Lambda` and those applied on PIL images.
# The transformations like `Compose` are kept for backward compatibility and can be easily replaced by existing torch modules, like `nn.Sequential`.
#
# Let's define a module `Predictor` that transforms input tensor and applies ImageNet pretrained resnet18 model on it.
# + id="NSDOJ3RajfvO"
import torch
import torch.nn as nn
import torchvision.transforms as T
from torchvision.io.image import read_image
from torchvision.models import resnet18
class Predictor(nn.Module):
def __init__(self):
super().__init__()
self.resnet18 = resnet18(pretrained=True).eval()
self.transforms = nn.Sequential(
T.Resize(256),
T.CenterCrop(224),
T.ConvertImageDtype(torch.float),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
x = self.transforms(x)
y_pred = self.resnet18(x)
return y_pred.argmax(dim=1)
# + [markdown] id="ZZKDovqej5vA"
# Now, let's define scripted and non-scripted instances of `Predictor` and apply on multiple tensor images of the same size
# + id="GBBMSo7vjfr0"
from torchvision.io.image import read_image
predictor = Predictor().to("cuda")
scripted_predictor = torch.jit.script(predictor).to("cuda")
tensor_image1 = read_image("test-image.jpg")
tensor_image2 = read_image("test-image2.jpg")
batch = torch.stack([tensor_image1[:, -320:, :], tensor_image2[:, -320:, :]]).to("cuda")
res1 = scripted_predictor(batch)
res2 = predictor(batch)
# + colab={"base_uri": "https://localhost:8080/", "height": 501} id="Dmi9r_p-oKsk" outputId="b9c55e7d-5db1-4975-c485-fecc4075bf47"
import json
from torchvision.datasets.utils import download_url
download_url("https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json", ".", "imagenet_class_index.json")
with open("imagenet_class_index.json", "r") as h:
labels = json.load(h)
plt.figure(figsize=(12, 7))
for i, p in enumerate(res1):
plt.subplot(1, 2, i + 1)
plt.title("Scripted predictor:\n{label})".format(label=labels[str(p.item())]))
plt.imshow(batch[i, ...].cpu().numpy().transpose((1, 2, 0)))
plt.figure(figsize=(12, 7))
for i, p in enumerate(res2):
plt.subplot(1, 2, i + 1)
plt.title("Original predictor:\n{label})".format(label=labels[str(p.item())]))
plt.imshow(batch[i, ...].cpu().numpy().transpose((1, 2, 0)))
# + [markdown] id="7IYsjzpFqcK8"
# We save and reload scripted predictor in Python or C++ and use it for inference:
# + colab={"base_uri": "https://localhost:8080/", "height": 52} id="0kk9LLw5jfol" outputId="05ea6db7-7fcf-4b74-a763-5f117c14cc00"
scripted_predictor.save("scripted_predictor.pt")
scripted_predictor = torch.jit.load("scripted_predictor.pt")
res1 = scripted_predictor(batch)
for i, p in enumerate(res1):
print("Scripted predictor: {label})".format(label=labels[str(p.item())]))
# -
# Data reading and decoding functions also support torch script and therefore can be part of the model as well:
class AnotherPredictor(Predictor):
def forward(self, path: str) -> int:
with torch.no_grad():
x = read_image(path).unsqueeze(0)
x = self.transforms(x)
y_pred = self.resnet18(x)
return int(y_pred.argmax(dim=1).item())
# + id="-cMwTs3Yjffy"
scripted_predictor2 = torch.jit.script(AnotherPredictor())
res = scripted_predictor2("test-image.jpg")
print("Scripted another predictor: {label})".format(label=labels[str(res)]))
| examples/python/tensor_transforms.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: .NET (C#)
// language: C#
// name: .net-csharp
// ---
// # Using hte piTop[4] display
#r "nuget:piTop"
#r "nuget:piTop.FoundationKit"
#r "nuget:SixLabors.Fonts,1.0.0-beta0010"
using PiTop;
using PiTop.Abstractions;
using PiTopMakerArchitecture.Foundation;
using PiTopMakerArchitecture.Foundation.Components;
using PiTopMakerArchitecture.Foundation.Sensors;
var module = new PiTopModule();
var builtinDisplay = module.Display;
// +
using SixLabors.ImageSharp;
using SixLabors.ImageSharp.Drawing;
using SixLabors.ImageSharp.Drawing.Processing;
builtinDisplay.Draw((d,cr) =>
{
var square = new RectangularPolygon(builtinDisplay.Width / 4, builtinDisplay.Height / 4, builtinDisplay.Width/2, builtinDisplay.Height/2);
d.Clear(Color.Black);
d.Fill(Color.White, square);
});
// -
display(builtinDisplay, "text/html")
// +
using System;
using SixLabors.Fonts;
using SixLabors.ImageSharp;
using SixLabors.ImageSharp.Drawing;
using SixLabors.ImageSharp.Drawing.Processing;
var font = SystemFonts.Collection.Find("Roboto").CreateFont(14);
var text = "Diego was here";
module.Display.Draw((context,cr) => {
context.Clear(Color.Black);
var rect = TextMeasurer.Measure(text, new RendererOptions(font));
var x = (cr.Width - rect.Width) / 2;
var y = (cr.Height + (rect.Height)) / 2;
context.DrawText(text, font, Color.White, new PointF(0, 0));
});
// -
display(builtinDisplay, "text/html")
| examples/notebooks/piTopDisplay.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear Regression
# LR model prediction:
#
# $$\hat{y} = \theta_{0} + \theta_{1}x_{1} + \theta_{2}x_{2} + \cdots + \theta_{n}x_n= h_{\theta}(x) \cdot x$$
#
# Goal:
# $$minize_{\theta} MSE(X, h_{\theta}) = \frac{1}{m} \sum_{i=1}^{m}\left( \theta^T \cdot x^\left(i\right) - y^\left(i\right) \right)$$
# ## Solution 1: Normal Equations
# $$ \hat{\theta} = \left(X^T \cdot X\right)^{-1} \cdot X^T \cdot y$$
# +
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
IMAGES_PATH = "D:\Jupyter\Scikit-learn\Image"
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
# Ignore useless warnings (see SciPy issue #5998)
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")
# +
import numpy as np
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.randn(100, 1)
# -
plt.plot(X, y, "b.")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.axis([0, 2, 0, 15])
save_fig("generated_data_plot")
plt.show()
X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance
theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
theta_best
X_new = np.array([[0], [2]])
X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance
y_predict = X_new_b.dot(theta_best)
y_predict
plt.plot(X_new, y_predict, "r-", linewidth=2, label="Predictions")
plt.plot(X, y, "b.")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.legend(loc="upper left", fontsize=14)
plt.axis([0, 2, 0, 15])
save_fig("linear_model_predictions")
plt.show()
# ### Normal Equation的Sklearn实现
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
lin_reg.intercept_, lin_reg.coef_
# ## Solution 2: Grdient Descent
# * 假设你在山上迷路了,且只能感受到地的坡度
# * 下山的最好策略: 走最陡的坡(导数)
#
# **GD的主要问题:**
# 1. 如果Cost Function不是碗状函数,那么很难找到全局最优解
#
# **GD的注意事项**
# 1. 保证所有的feature都有相似的Scale
# ### Batch GD
#
# * 计算Cost function关于不同theta的导数
# * 好比这个问题: 面向东的时候,坡度多少,面向西的时候,坡度,选择一个坡度最大的作为下一步的方向
#
# $$\frac{\partial}{\partial \theta_{j}}MSE(\theta) = \frac{2}{m}\sum_{i=1}^m\left(\theta^T \cdot x^\left(i\right) - y^\left(i\right)\right)x_{j}^\left(i\right) \tag{1}$$
#
# $$\nabla_{\theta}MSE(\theta) = \begin{pmatrix} \frac{\partial}{\partial \theta_{0}}MSE(\theta) \\ \frac{\partial}{\partial \theta_{1}}MSE(\theta) \\ \vdots \\ \frac{\partial}{\partial \theta_{n}}MSE(\theta) \end{pmatrix}= \frac{2}{m}X^T \cdot (X \cdot \theta - y) \tag{2} $$
#
# $$ \theta^\left(next step\right) = \theta -\eta \nabla_{\theta}MSE(\theta) \tag{3}$$
#
# 公式(1)是实际算数公式,(2)是矢量形式的
#
# **公式(3)是最终的GD 算法**
# +
eta = 0.1
n_iterations = 1000
m = 100
theta = np.random.randn(2,1)
for iteration in range(n_iterations):
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta = theta - eta * gradients
# -
theta
# +
theta_path_bgd = []
def plot_gradient_descent(theta, eta, theta_path=None):
m = len(X_b)
plt.plot(X, y, "b.")
n_iterations = 1000
for iteration in range(n_iterations):
if iteration < 10:
y_predict = X_new_b.dot(theta)
style = "b-" if iteration > 0 else "r--"
plt.plot(X_new, y_predict, style)
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta = theta - eta * gradients
if theta_path is not None:
theta_path.append(theta)
plt.xlabel("$x_1$", fontsize=18)
plt.axis([0, 2, 0, 15])
plt.title(r"$\eta = {}$".format(eta), fontsize=16)
# +
np.random.seed(42)
theta = np.random.randn(2,1) # random initialization
plt.figure(figsize=(10,4))
plt.subplot(131); plot_gradient_descent(theta, eta=0.02)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(132); plot_gradient_descent(theta, eta=0.1, theta_path=theta_path_bgd)
plt.subplot(133); plot_gradient_descent(theta, eta=0.5)
save_fig("gradient_descent_plot")
plt.show()
# -
# ### Stochastic GD
# Drawback of Batch GD:
#
# * Take whole dataset to compute graident, which is super slow
#
# **Stochastic GD:**
# * Pick a random instance in the triaining set every step and compute graident only on single instance---much faster
# * cost function会上下跳动,只是从趋势上看是下降的,最终会降低到最低点,但是之后又会反弹,并且不会再次到最低点
# * 解非最优解
# * 优化版SGD: 在每次迭代后重新设置learning rate
#
# +
theta_path_sgd = []
m = len(X_b)
np.random.seed(42)
n_epochs = 50
t0, t1 = 5, 50 # learning schedule hyperparameters
def learning_schedule(t):
return t0 / (t + t1)
theta = np.random.randn(2,1) # random initialization
for epoch in range(n_epochs):
for i in range(m):
if epoch == 0 and i < 20: # not shown in the book
y_predict = X_new_b.dot(theta) # not shown
style = "b-" if i > 0 else "r--" # not shown
plt.plot(X_new, y_predict, style) # not shown
random_index = np.random.randint(m)
xi = X_b[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(epoch * m + i)
theta = theta - eta * gradients
theta_path_sgd.append(theta) # not shown
plt.plot(X, y, "b.") # not shown
plt.xlabel("$x_1$", fontsize=18) # not shown
plt.ylabel("$y$", rotation=0, fontsize=18) # not shown
plt.axis([0, 2, 0, 15]) # not shown
save_fig("sgd_plot") # not shown
plt.show()
# -
# **BGD用了1000次循环找到最优解,SGD只用了50次**
# #### SGD的python实现
from sklearn.linear_model import SGDRegressor
sgd_reg = SGDRegressor(max_iter=50, penalty=None, eta0=0.1, random_state=42)
sgd_reg.fit(X, y.ravel())
sgd_reg.intercept_, sgd_reg.coef_
# ### Mini-batch GD
#
# * 只取训练集的部分子集来训练
# +
theta_path_mgd = []
n_iterations = 50
minibatch_size = 20
np.random.seed(42)
theta = np.random.randn(2,1) # random initialization
t0, t1 = 200, 1000
def learning_schedule(t):
return t0 / (t + t1)
t = 0
for epoch in range(n_iterations):
shuffled_indices = np.random.permutation(m)
X_b_shuffled = X_b[shuffled_indices]
y_shuffled = y[shuffled_indices]
for i in range(0, m, minibatch_size):
t += 1
xi = X_b_shuffled[i:i+minibatch_size]
yi = y_shuffled[i:i+minibatch_size]
gradients = 2/minibatch_size * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(t)
theta = theta - eta * gradients
theta_path_mgd.append(theta)
# -
theta
# ### Compare different GD method
theta_path_bgd = np.array(theta_path_bgd)
theta_path_sgd = np.array(theta_path_sgd)
theta_path_mgd = np.array(theta_path_mgd)
plt.figure(figsize=(7,4))
plt.plot(theta_path_sgd[:, 0], theta_path_sgd[:, 1], "r-s", linewidth=1, label="Stochastic")
plt.plot(theta_path_mgd[:, 0], theta_path_mgd[:, 1], "g-+", linewidth=2, label="Mini-batch")
plt.plot(theta_path_bgd[:, 0], theta_path_bgd[:, 1], "b-o", linewidth=3, label="Batch")
plt.legend(loc="upper left", fontsize=16)
plt.xlabel(r"$\theta_0$", fontsize=20)
plt.ylabel(r"$\theta_1$ ", fontsize=20, rotation=0)
plt.axis([2.5, 4.5, 2.3, 3.9])
save_fig("gradient_descent_paths_plot")
plt.show()
# # Polynomial Regression
# * 两度
# 用于拟合非线性的函数
# +
import numpy as np
import numpy.random as rnd
np.random.seed(42)
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)
plt.plot(X, y, "b.")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.axis([-3, 3, 0, 10])
save_fig("quadratic_data_plot")
plt.show()
# -
# ## 通过PolynomialFeature构造特征值
from sklearn.preprocessing import PolynomialFeatures
poly_features = PolynomialFeatures(degree=2, include_bias=False)
X_poly = poly_features.fit_transform(X)
X[0]
X_poly[0]
# * `X_poly`中保存着原特征值,和特征值的平方
#
# * 现在我们可以用线性模型来拟合
# +
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X_poly, y)
lin_reg.intercept_, lin_reg.coef_
# -
# 模拟结果是:
#
# $$\hat{y} = 0.56x_{1}^2 + 0.93x_{1} + 1.78$$
# # Learning Curves
# ## 为什么用Learning Curve?
# **评估模型的好坏**
# 对于Polynomial的维度选择,是一个问题,你可以使用线性的,二维的,甚至多维度的来拟合曲线,下面我们用不同的度数来拟合曲线
# +
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
for style, width, degree in (("g-", 1, 300), ("b--", 2, 2), ("r-+", 2, 1)):
polybig_features = PolynomialFeatures(degree=degree, include_bias=False)
std_scaler = StandardScaler()
lin_reg = LinearRegression()
polynomial_regression = Pipeline([
("poly_features", polybig_features),
("std_scaler", std_scaler),
("lin_reg", lin_reg),
])
polynomial_regression.fit(X, y)
y_newbig = polynomial_regression.predict(X_new)
plt.plot(X_new, y_newbig, style, label=str(degree), linewidth=width)
plt.plot(X, y, "b.", linewidth=3)
plt.legend(loc="upper left")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.axis([-3, 3, 0, 10])
save_fig("high_degree_polynomials_plot")
plt.show()
# -
# * 我们可以看出,300度的明显比其他的好,但是容易出现过拟合
# * 线性模型,相反容易出现欠拟合
# * **那么该如何选择度数呢? 最好有个像Regression中那种CV评价机制就好了**
# ## 定义Learning Curves
# ### 线性模型的学习曲线
# +
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
def plot_learning_curves(model, X, y):
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)
train_errors, val_errors = [], []
for m in range(1, len(X_train)):
model.fit(X_train[:m], y_train[:m])
y_train_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_val)
train_errors.append(mean_squared_error(y_train[:m], y_train_predict))
val_errors.append(mean_squared_error(y_val, y_val_predict))
plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train")
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val")
plt.legend(loc="upper right", fontsize=14) # not shown in the book
plt.xlabel("Training set size", fontsize=14) # not shown
plt.ylabel("RMSE", fontsize=14) # not shown
# -
lin_reg = LinearRegression()
plot_learning_curves(lin_reg, X, y)
plt.axis([0, 80, 0, 3]) # not shown in the book
save_fig("underfitting_learning_curves_plot") # not shown
plt.show() # not shown
# * 当只有1-2个trainging set时,RMSE->0, 说明现在模型很好
# * 当更多训练样本时,模型误差越来越大了
# * 然后就一直在一个地方,不怎么动了
# * **这是一个典型的过拟合模型,两条曲线都在某个值处停止波动,且这个值还很大**
# ### 非线性模型的学习曲线
# +
from sklearn.pipeline import Pipeline
polynomial_regression = Pipeline([
("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
("lin_reg", LinearRegression()),
])
plot_learning_curves(polynomial_regression, X, y)
plt.axis([0, 80, 0, 3]) # not shown
save_fig("learning_curves_plot") # not shown
plt.show() # not shown
# -
# * 训练集的误差比test集的要小得多
# * 两条线的gap说明, 模型在训练集的表现远远好于测试集, **过拟合**
# # Regularized Linear Models
# ## Ridge Regression
# 为了防止Overfitting的问题出现---》我们可以设计更加复杂的多维函数,只要让他们的权重越小就好了
#
# $$J(\theta) = MSE(\theta) + \alpha \frac{1}{2}\sum_{i=1}^n \theta_{i}^2 \tag{4}$$
#
# $$\hat{\theta} = (X^T \cdot X + \alpha A)^\left(-1\right) \cdot X^T \cdot y \tag{5}$$
#
# 公式(4)是添加惩罚项的Cost function, 公式(5)是它的向量形式
# +
from sklearn.linear_model import Ridge
np.random.seed(42)
m = 20
X = 3 * np.random.rand(m, 1)
y = 1 + 0.5 * X + np.random.randn(m, 1) / 1.5
X_new = np.linspace(0, 3, 100).reshape(100, 1)
def plot_model(model_class, polynomial, alphas, **model_kargs):
for alpha, style in zip(alphas, ("b-", "g--", "r:")):
model = model_class(alpha, **model_kargs) if alpha > 0 else LinearRegression()
if polynomial:
model = Pipeline([
("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
("std_scaler", StandardScaler()),
("regul_reg", model),
])
model.fit(X, y)
y_new_regul = model.predict(X_new)
lw = 2 if alpha > 0 else 1
plt.plot(X_new, y_new_regul, style, linewidth=lw, label=r"$\alpha = {}$".format(alpha))
plt.plot(X, y, "b.", linewidth=3)
plt.legend(loc="upper left", fontsize=15)
plt.xlabel("$x_1$", fontsize=18)
plt.axis([0, 3, 0, 4])
plt.figure(figsize=(8,4))
plt.subplot(121)
plot_model(Ridge, polynomial=False, alphas=(0, 10, 100), random_state=42)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(122)
plot_model(Ridge, polynomial=True, alphas=(0, 10**-5, 1), random_state=42)
save_fig("ridge_regression_plot")
plt.show()
# -
# ### Ridge Regression in sklearn
from sklearn.linear_model import Ridge
ridge_reg = Ridge(alpha=1, solver="cholesky", random_state=42)
ridge_reg.fit(X, y)
ridge_reg.predict([[1.5]])
# **上面的代码也等价于下面的代码**
sgd_reg = SGDRegressor(max_iter=5, penalty="l2", random_state=42)
sgd_reg.fit(X, y.ravel())
sgd_reg.predict([[1.5]])
# ## Lasso Regression
# $$J(\theta) = MSE(\theta) + \alpha \sum_{i=1}^n \theta_{i} \tag{6}$$
# * Lasso Regression倾向于将不重要的特征值权重都设置为0
# * Lasso Regression可以同时完成特征选择,并输入一个稀疏模型(非零权重很少)
from sklearn.linear_model import Lasso
lasso_reg = Lasso(alpha=0.1)
lasso_reg.fit(X, y)
lasso_reg.predict([[1.5]])
# **上面的代码也等价于`SGDRegressor(max_iter=5, penalty="l1", random_state=42)`**
# ## Elastic Net
# 介于Ridge和Lasso之间的
#
# $$J(\theta) = MSE(\theta) + r \alpha \sum_{i=1}^n \theta_{i} + \frac{1-r}{2}\alpha \frac{1}{2}\sum_{i=1}^n \theta_{i}^2 \tag{7}$$
# ## Early Stopping
# 当Cost function不再减少,并开始增加的时候,说明模型开始过拟合了,我们可以探测这个点
# +
np.random.seed(42)
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 2 + X + 0.5 * X**2 + np.random.randn(m, 1)
X_train, X_val, y_train, y_val = train_test_split(X[:50], y[:50].ravel(), test_size=0.5, random_state=10)
poly_scaler = Pipeline([
("poly_features", PolynomialFeatures(degree=90, include_bias=False)),
("std_scaler", StandardScaler()),
])
X_train_poly_scaled = poly_scaler.fit_transform(X_train)
X_val_poly_scaled = poly_scaler.transform(X_val)
sgd_reg = SGDRegressor(max_iter=1,
penalty=None,
eta0=0.0005,
warm_start=True,
learning_rate="constant",
random_state=42)
n_epochs = 500
train_errors, val_errors = [], []
for epoch in range(n_epochs):
sgd_reg.fit(X_train_poly_scaled, y_train)
y_train_predict = sgd_reg.predict(X_train_poly_scaled)
y_val_predict = sgd_reg.predict(X_val_poly_scaled)
train_errors.append(mean_squared_error(y_train, y_train_predict))
val_errors.append(mean_squared_error(y_val, y_val_predict))
best_epoch = np.argmin(val_errors)
best_val_rmse = np.sqrt(val_errors[best_epoch])
plt.annotate('Best model',
xy=(best_epoch, best_val_rmse),
xytext=(best_epoch, best_val_rmse + 1),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.05),
fontsize=16,
)
best_val_rmse -= 0.03 # just to make the graph look better
plt.plot([0, n_epochs], [best_val_rmse, best_val_rmse], "k:", linewidth=2)
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="Validation set")
plt.plot(np.sqrt(train_errors), "r--", linewidth=2, label="Training set")
plt.legend(loc="upper right", fontsize=14)
plt.xlabel("Epoch", fontsize=14)
plt.ylabel("RMSE", fontsize=14)
save_fig("early_stopping_plot")
plt.show()
# -
# ## 使用场合
# * 建议: Linear Regression + Ridge Regression
# * 如果只有少量的feature有用: Elastic Net > Lasso
# # Logic Regression
# 目标函数:
#
# $$\hat{p} = h_{\theta}(x) = \sigma \left( \theta^T \cdot x\right)$$
#
# Cost Functions:
#
# $$J(\theta) = - \frac{1}{m} \sum_{i=1}^m \begin{bmatrix} y^\left(i\right) \log{\hat{p}^\left(i\right)} + (1-y^\left(i\right))\log{1- \hat{p}^\left(i\right)} \end{bmatrix}$$
#
# Cost Function的导数
#
# $$\frac{\partial}{\partial{\theta_{j}}} = \frac{1}{m} \sum_{i=1}^{m}\left(\sigma \left(\theta^T \cdot x^\left(i\right)\right) - y^\left(i\right)\right)x_{j}^\left(i\right)$$
# ## Use iris as examples to LR
from sklearn import datasets
iris = datasets.load_iris()
list(iris.keys())
X = iris["data"][:, 3:] # petal width
y = (iris["target"] == 2).astype(np.int) # 1 if Iris-Virginica, else 0
# * Create LogicRegression to train model
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(random_state=42)
log_reg.fit(X, y)
# ### LR with one variables
# +
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
decision_boundary = X_new[y_proba[:, 1] >= 0.5][0]
plt.figure(figsize=(8, 3))
plt.plot(X[y==0], y[y==0], "bs")
plt.plot(X[y==1], y[y==1], "g^")
plt.plot([decision_boundary, decision_boundary], [-1, 2], "k:", linewidth=2)
plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris-Virginica")
plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris-Virginica")
plt.text(decision_boundary+0.02, 0.15, "Decision boundary", fontsize=14, color="k", ha="center")
plt.arrow(decision_boundary, 0.08, -0.3, 0, head_width=0.05, head_length=0.1, fc='b', ec='b')
plt.arrow(decision_boundary, 0.92, 0.3, 0, head_width=0.05, head_length=0.1, fc='g', ec='g')
plt.xlabel("Petal width (cm)", fontsize=14)
plt.ylabel("Probability", fontsize=14)
plt.legend(loc="center left", fontsize=14)
plt.axis([0, 3, -0.02, 1.02])
save_fig("logistic_regression_plot")
plt.show()
# -
decision_boundary
# ### LR with two variables
# +
from sklearn.linear_model import LogisticRegression
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.int)
log_reg = LogisticRegression(C=10**10, random_state=42)
log_reg.fit(X, y)
x0, x1 = np.meshgrid(
np.linspace(2.9, 7, 500).reshape(-1, 1),
np.linspace(0.8, 2.7, 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_proba = log_reg.predict_proba(X_new)
plt.figure(figsize=(10, 4))
plt.plot(X[y==0, 0], X[y==0, 1], "bs")
plt.plot(X[y==1, 0], X[y==1, 1], "g^")
zz = y_proba[:, 1].reshape(x0.shape)
contour = plt.contour(x0, x1, zz, cmap=plt.cm.brg)
left_right = np.array([2.9, 7])
boundary = -(log_reg.coef_[0][0] * left_right + log_reg.intercept_[0]) / log_reg.coef_[0][1]
plt.clabel(contour, inline=1, fontsize=12)
plt.plot(left_right, boundary, "k--", linewidth=3)
plt.text(3.5, 1.5, "Not Iris-Virginica", fontsize=14, color="b", ha="center")
plt.text(6.5, 2.3, "Iris-Virginica", fontsize=14, color="g", ha="center")
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.axis([2.9, 7, 0.8, 2.7])
save_fig("logistic_regression_contour_plot")
plt.show()
# -
| Scikit-learn/Scikit-learn-Linear_polyomial_logic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# importing tools
import s3fs
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import cmocean
import math
from matplotlib.patches import Ellipse
import gsw
#for removing warnings
import warnings
# # Mooring data
# +
# open the data with s3fs from the databucket
fsg = s3fs.S3FileSystem(anon=False,
client_kwargs={
'endpoint_url': 'https://karen.uiogeo-apps.sigma2.no'
})
#data_path2 = 's3://data/vel_dataIII.zarr/vel_dataIII.zarr'
data_path1 = 's3://velocity-u.zarr'
data_path2 = 's3://velocity-v.zarr'
data_path3 = 's3://data/CREG12.L75-REF08_mesh_zgr.zarr'
data_path4 = 's3://data/modmoor_varianceII.zarr'
remote_files1 = fsg.glob(data_path1)
remote_files2 = fsg.glob(data_path2)
remote_files3 = fsg.glob(data_path3)
remote_files4 = fsg.glob(data_path4)
store1 = s3fs.S3Map(root=data_path1, s3=fsg, check=False)
store2 = s3fs.S3Map(root=data_path2, s3=fsg, check=False)
store3 = s3fs.S3Map(root=data_path3, s3=fsg, check=False)
store4 = s3fs.S3Map(root=data_path4, s3=fsg, check=False)
du = xr.open_zarr(store=store1, consolidated=True)
dv = xr.open_zarr(store=store2, consolidated=True)
dzz = xr.open_zarr(store=store3, consolidated=True)
dvv = xr.open_zarr(store=store4, consolidated=True)
# -
bathym = dzz.mbathy.isel(x=slice(200,1400),y=slice(650,1800)).squeeze(axis=0)
def open_s3fs(path):
# open the data with s3fs from the databucket
fsg = s3fs.S3FileSystem(anon=False,
client_kwargs={
'endpoint_url': 'https://karen.uiogeo-apps.sigma2.no'
})
data_path = f's3://data/{path}'
remote_files = fsg.glob(data_path)
fileset = [fsg.open(file) for file in remote_files]
#open the dataset
dset = xr.open_mfdataset(fileset,
#combine='by_coords',
compat='override')
return dset
# +
d15 = open_s3fs('mooring_barents/A-TWAIN_2015-2017_AT200-3_22-118m_ACDP_16640.nc')
d13 = open_s3fs('mooring_barents/A-TWAIN_2013-2015_AT200-2_14-121m_ACDP_16640.nc')
d17 = open_s3fs('mooring_barents/A-TWAIN_2017-2019_AT200-4_20-167m_ACDP_16640.nc')
y1 = open_s3fs('mooring_yermark/Y1.nc') # have information on temp and salinity
y2 = open_s3fs('mooring_yermark/Y2.nc')
y3 = open_s3fs('mooring_yermark/Y3.nc')
MB = open_s3fs('mooring_lofoten/PROVOLO_Mooring_MB.nc') # have information on temp and salinity
MW = open_s3fs('mooring_lofoten/PROVOLO_Mooring_MW.nc')
MN = open_s3fs('mooring_lofoten/PROVOLO_Mooring_MN.nc')
MS = open_s3fs('mooring_lofoten/PROVOLO_Mooring_MS.nc')
ANO18 = open_s3fs('moorings_/AON_Sig_ADCP_BS3_2016_2018.nc')
ANO16 = open_s3fs('moorings_/AON_LR_ADCP_BS3_2014_2016.nc')
ANO14 = open_s3fs('moorings_/AON_LR_ADCP_BS3_2013_2014.nc')
E3 = open_s3fs('mooring_nord_svalbard/NL_E3.nc') # have information on temp and salinity
W3 = open_s3fs('mooring_nord_svalbard/NL_W3.nc')
E2 = open_s3fs('mooring_nord_svalbard/NL_E2.nc')
W2 = open_s3fs('mooring_nord_svalbard/NL_W2.nc')
E1 = open_s3fs('mooring_nord_svalbard/NL_E1.nc')
W1 = open_s3fs('mooring_nord_svalbard/NL_W1.nc')
grid = open_s3fs('smooth_grid_karen.nc')
# -
p_list = [(31.13247,81.24202),
(31.13533,81.24255),
(31.14506,81.24587)
,(011.1189, 69.5289),
(013.16845,68.58759),
(013.19866,68.56109),
(012.45082,68.50128),
(5.48733,80.03876),
(5.56333,79.44093),
(24.00000,81.24925),
(18.29052,81.10979),
(23.59853,81.30813),
(18.23789,81.22686),
(23.59982,81.35453),
(18.23730,81.27356)]
def mean_DEPTH(variabel):
mean_ = np.zeros(len(variabel.DEPTH))
for i in range(len(variabel.DEPTH)):
u = variabel.isel(DEPTH=i)
nu = np.isnan(u)
u_ = u[~nu]
mean_[i] = np.mean(u_.values)
return mean_
# +
warnings.simplefilter("ignore")
y1_s = mean_DEPTH(y1.PSAL)
y1_t = mean_DEPTH(y1.TEMP)
y1_p = y1.PRES
y2_s = mean_DEPTH(y2.PSAL)
y2_t = mean_DEPTH(y2.TEMP)
y2_p = y2.PRES
y3_s = mean_DEPTH(y3.PSAL)
y3_t = mean_DEPTH(y3.TEMP)
y3_p = y3.PRES
MB_s = mean_DEPTH(MB.PSAL)
MB_t = mean_DEPTH(MB.TEMP)
MB_p = MB.PRES_ins
MW_s = mean_DEPTH(MW.PSAL)
MW_t = mean_DEPTH(MW.TEMP)
MW_p = MW.PRES_ins
MN_s = mean_DEPTH(MN.PSAL)
MN_t = mean_DEPTH(MN.TEMP)
MN_p = MN.PRES_ins
#MS_s = MS.PSAL
E3_s = mean_DEPTH(E3.PSAL)
E3_t = mean_DEPTH(E3.TEMP)
E3_p = E3.PRES_ins
W3_s = mean_DEPTH(W3.PSAL)
W3_t = mean_DEPTH(W3.TEMP)
W3_p = W3.PRES_ins
E2_s = mean_DEPTH(E2.PSAL)
E2_t = mean_DEPTH(E2.TEMP)
E2_p = E2.PRES_ins
W2_s = mean_DEPTH(W2.PSAL)
W2_t = mean_DEPTH(W2.TEMP)
W2_p = W2.PRES_ins
E1_s = mean_DEPTH(E1.PSAL)
E1_t = mean_DEPTH(E1.TEMP)
E1_p = E1.PRES_ins
#W1_s = W1.PSAL
# -
gsw.density.rho(SA, CT, p)
p = gsw.p_from_z(depth,lat) #depth må være positiv oppover, løses med *(-1)
y1_s
E1
# function to get the density with depth fro the relevant moorings
def dens(sal, temp, depth, i):
lat = p_list[i][1]
lon = p_list[i][0]
#depth = -sal['DEPTH']
pres = gsw.p_from_z(depth,lat)
sa = gsw.SA_from_SP(sal, pres, lon, lat)
dens = gsw.density.sigma0(sa,temp)
#dens = gsw.density.rho_t_exact(sa,temp,pres)
return dens
# +
# SALINITY PROFILE YERMARK
warnings.simplefilter("ignore")
fig, axes = plt.subplots(1,3,figsize=(16,10))
sl = [y1_s, y2_s, y3_s]
tl = [y1_t, y2_t, y3_t]
dl = [y1,y2,y3]
for axs, i, b, t, m in zip(axes.flat, range(3),[1535,1209,850],[0,0,0], ['Y1', 'Y2', 'Y3']):
depth = -dl[i]['DEPTH']
rho = dens(sl[i],tl[i],depth,i)
dyp_mr = np.linspace(t,b,len(depth))
axs.plot(sl[i],dyp_mr, color = 'steelblue', label='Practical salinity')
ax2 = axs.twiny()
ax2.plot(rho,dyp_mr, color = 'b', label ='Potential density')
axs.invert_yaxis()
axs.set_xlabel('psu', fontsize=14)
ax2.set_xlabel('kg/m³', fontsize=14)
axs.legend(frameon=False, fontsize=12, loc="lower left")
ax2.legend(frameon=False, fontsize=12, loc="lower right")
axs.text(0.50, 0.98, f'{m}', color='k', fontsize=14, transform=axs.transAxes, verticalalignment='top')
fig.tight_layout()
plt.savefig('haloclin_dens_yermark.pdf', dpi = 300)
# +
#SALINITY PROFILES LOFOTEN
warnings.simplefilter("ignore")
fig, axes = plt.subplots(1,3,figsize=(16,10))
axes[0].set_ylabel('Depth [m]', fontsize=14)
sl = [MB_s, MW_s, MN_s]
tl = [MB_t, MW_t, MN_t]
dl = [MB,MW,MN]
for axs, i, b, t, m in zip(axes.flat, range(3), [2935,1500,640], [70,80,80], ['MB', 'MW', 'MN']):
depth = -dl[i]['DEPTH']
rho = dens(sl[i],tl[i],depth,i)
dyp_mr = np.linspace(t,b,len(depth))
axs.plot(sl[i],dyp_mr, color = 'steelblue', label='Practical salinity')
ax2 = axs.twiny()
ax2.plot(rho,dyp_mr, color = 'b', label ='Potential density')
axs.invert_yaxis()
axs.set_xlabel('psu', fontsize=14)
ax2.set_xlabel('kg/m³', fontsize=14)
axs.legend(frameon=False, fontsize=12, loc="lower left")
ax2.legend(frameon=False, fontsize=12, loc="lower right")
axs.text(0.50, 0.98, f'{m}', color='k', fontsize=14, transform=axs.transAxes, verticalalignment='top')
fig.tight_layout()
plt.savefig('haloclin_dens_lofoten.pdf', dpi = 300)
# +
# SALINITY PROFILES NORTH OF SVALBARD
warnings.simplefilter("ignore")
sl = [E3_s, W3_s, E2_s, W2_s, E1_s]
tl = [E3_t, W3_t, E2_t, W2_t, E1_t]
dl = [E3,W3,E2,W2,E1]
fig, axes = plt.subplots(1,5,figsize=(18,10))
axes[0].set_ylabel('Depth [m]', fontsize=14)
for axs, i, b, t, m in zip(axes.flat,range(5), [1202,1222,727,706,401],[110,50,60,30,50], ['E3','W3','E2','W2','E1']):
depth = -dl[i]['DEPTH']
rho = dens(sl[i],tl[i],depth,i)
dyp_mr = np.linspace(t,b,len(depth))
axs.plot(sl[i],dyp_mr, color = 'steelblue', label='Practical salinity')
ax2 = axs.twiny()
ax2.plot(rho,dyp_mr, color = 'b', label ='Potential density')
axs.invert_yaxis()
axs.set_xlabel('psu', fontsize=14)
ax2.set_xlabel('kg/m³', fontsize=14)
axs.legend(frameon=False, fontsize=12, loc="lower left")
ax2.legend(frameon=False, fontsize=12, loc="upper right")
axs.text(0.70, 0.95, f'{m}', color='k', fontsize=14, transform=axs.transAxes, verticalalignment='top')
fig.tight_layout()
plt.savefig('haloclin_dens_Nsvalbard.pdf', dpi = 300)
# -
| Moor_sal_temp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import os
import json
from hana_ml.dataframe import ConnectionContext
# +
with open(os.path.join(os.getcwd(), './env_cloud.json')) as f:
hana_env_c = json.load(f)
port_c = hana_env_c['port']
user_c = hana_env_c['user']
url_c = hana_env_c['url']
pwd_c = hana_env_c['pwd']
cc = ConnectionContext(url_c, port_c, user_c, pwd_c)
print(cc.hana_version())
print(cc.get_current_schema())
# -
# ## Create Graph from Vertices and Edges
# +
from hana_ml.dataframe import create_dataframe_from_pandas
import pandas as pd
# example SHAPE column: POINT (1752440.6821975708 5439964.327102661)
v_hdf = create_dataframe_from_pandas(
connection_context=cc,
pandas_df=pd.read_csv("./datasets/wwc_stormwater_vertices.csv"),
table_name="LM_STROM_WATER_VERTICES",
primary_key="ID",
geo_cols=["SHAPE"],
srid=2193,
force=True)
# example SHAPE column: LINESTRING (1749169.286201477 5422260.568099976, 1749162.987197876 5422242.643096924)
e_hdf = create_dataframe_from_pandas(
connection_context=cc,
pandas_df=pd.read_csv("./datasets/wwc_stormwater_edges.csv"),
table_name="LM_STORM_WATER_EDGES",
primary_key="ID",
not_nulls=["SOURCE", "TARGET"],
geo_cols=["SHAPE"],
srid=2193,
force=True)
# +
import hana_ml.graph as hg
g_storm = hg.create_graph_from_dataframes(
connection_context=cc,
vertices_df=v_hdf,
vertex_key_column="ID",
edges_df=e_hdf,
edge_source_column="SOURCE",
edge_target_column="TARGET",
edge_key_column="ID",
workspace_name="LM_STORM_WATER",
)
# -
# Let's check
hgw = hg.discover_graph_workspaces(cc)
hgw[hgw.WORKSPACE_NAME == 'LM_STORM_WATER']
# Instantiate existing graph
# here only for demo purposes, since we already instantiated it during creation
g_storm = hg.Graph(
connection_context=cc,
workspace_name='LM_STORM_WATER',
schema="GRAPH_USER" # Optional, only needed if the schema differs from your logon schema
)
print(g_storm)
# ## Let's plot the whole graph on a map
# +
from keplergl import KeplerGl
pdf_storm_edges = g_storm.edges_hdf.select('ID', ('SHAPE_GEO.ST_TRANSFORM(4326).ST_ASGEOJSON()', 'GJ')).collect()
map = KeplerGl(height=600, width=800)
map.add_data(pdf_storm_edges, 'Stormwater')
map
# -
# ## Inspect some basic graph stats
g_storm.describe()
# ### The Graph is not connected. Let's get the weakly connected components.
# `WeaklyConnectedComponents` is one of the available `Graph` algorithms.
#
# The usage pattern always is:
# ```Python
# result = hana_ml.graph.algorithms.<algorithm_name>(graph=<graph_instance>).execute(<parameters>)
# ```
# This returns an instance of the algorithm class, that contains the results as properties (e.g. `result.vertices`). Note: Tabular data are always returned as Pandas `DataFrame`s
# +
import hana_ml.graph.algorithms as hga
wcc = hga.WeaklyConnectedComponents(graph=g_storm).execute()
print(f'There are {wcc.components_count} components in the Graph.')
# -
# Which are the largest components (i.e. sub networks)?
wcc.components.sort_values(by='NUMBER_OF_VERTICES', ascending=False).head(2)
wcc.vertices.head(10)
# ### Store the component number for each vertex in a HANA Table so we can use it as a filter.
# Due to a limitation in GraphQL, `WeaklyConnectedComponents.vertices` can only return the IDs.
# If we want to havve the full records, we need to load the information separately.
#
# Options:
# - Create Filter List on the client
# - Store results to HANA Tale and filter via an select statement
hdf_wcc = create_dataframe_from_pandas(
connection_context=cc,
pandas_df=wcc.vertices,
drop_exist_tab=True,
table_name='LM_STORMWATER_WCC',
force=True,
allow_bigint=True,
primary_key='ID')
g_storm_comp1 = g_storm.subgraph(
workspace_name = "LM_STORMWATER_COMP1",
vertices_filter='ID IN (SELECT ID FROM LM_STORMWATER_WCC WHERE COMPONENT = 25)',
force = True
)
g_storm_comp2 = g_storm.subgraph(
workspace_name = "LM_STORMWATER_COMP2",
vertices_filter='ID IN (SELECT ID FROM LM_STORMWATER_WCC WHERE COMPONENT = 5)',
force = True
)
# ## Let's plot the two weakly connected components on a map
# +
pdf_storm_comp1_edges = g_storm_comp1.edges_hdf \
.select('ID', 'SOURCE', 'TARGET', ('SHAPE_GEO.ST_TRANSFORM(4326).ST_ASGEOJSON()', 'GJ')).collect()
pdf_storm_comp2_edges = g_storm_comp2.edges_hdf \
.select('ID', 'SOURCE', 'TARGET', ('SHAPE_GEO.ST_TRANSFORM(4326).ST_ASGEOJSON()', 'GJ')).collect()
map = KeplerGl(height=600, width=800)
map.add_data(pdf_storm_comp1_edges, 'Stormwater Component 1')
map.add_data(pdf_storm_comp2_edges, 'Stormwater Component 2')
map
# -
# ## Let's look Upstream and Downstream
#
# Let's assume somebody reported a problem with the node WCC_SW002719. We want to analyze that further.
#
# +
start_vertex_id = 'WCC_SW002719'
# Get the details of that vertex
start_vertex = g_storm_comp2.vertices_hdf \
.filter(f"ID = '{start_vertex_id}'") \
.select('ID', ('SHAPE_GEO.ST_TRANSFORM(4326).ST_ASGEOJSON()', 'GJ')).collect()
start_vertex
# +
neighbors = hga.Neighbors(graph=g_storm_comp2).execute(
start_vertex=start_vertex_id,
direction='ANY',
lower_bound=1,
upper_bound=5)
neighbors.vertices.head(5)
# +
vkc=g_storm_comp2.vertex_key_column
in_list = neighbors.vertices.ID.str.cat(sep="','")
filter = f"{vkc} IN ('{in_list}')" # Dynamically build the filter condition as SQL WHERE
print(filter)
pdf_storm_comp2_neighbors = g_storm_comp2.vertices_hdf \
.filter(filter) \
.select('ID', ('SHAPE_GEO.ST_TRANSFORM(4326).ST_ASGEOJSON()', 'GJ')).collect()
# -
map = KeplerGl(height=600, width=800)
map.add_data(pdf_storm_comp2_neighbors, '5-hop neighbors')
map.add_data(start_vertex, 'Start Vertex')
map
# ## Upstream and Downstream with NeighborsSubgraphs
g_neighbors_upstream = hga.NeighborsSubgraph(graph=g_storm_comp2).execute(
start_vertex=start_vertex_id, direction='INCOMING',
lower_bound=0, upper_bound=10000)
g_neighbors_downstream = hga.NeighborsSubgraph(graph=g_storm_comp2).execute(
start_vertex=start_vertex_id, direction='OUTGOING',
lower_bound=0, upper_bound=10000)
# +
ekc = g_storm_comp2.edge_key_column
in_list = g_neighbors_upstream.edges.ID.astype(str).str.cat(sep=',' )
pdf_storm_comp2_neighbors_upstream_edges = g_storm_comp2.edges_hdf \
.filter(f"{ekc} IN ({in_list})") \
.select('ID', ('SHAPE_GEO.ST_TRANSFORM(4326).ST_ASGEOJSON()', 'GJ')).collect()
in_list = g_neighbors_downstream.edges.ID.astype(str).str.cat(sep=',' )
pdf_storm_comp2_neighbors_downstream_edges = g_storm_comp2.edges_hdf \
.filter(f"{ekc} IN ({in_list})") \
.select('ID', ('SHAPE_GEO.ST_TRANSFORM(4326).ST_ASGEOJSON()', 'GJ')).collect()
# -
map = KeplerGl(height=600, width=800)
map.add_data(start_vertex, 'Start Vertex')
map.add_data(pdf_storm_comp2_neighbors_upstream_edges, 'Upstream')
map.add_data(pdf_storm_comp2_neighbors_downstream_edges, 'Downstream')
map
# +
# The Shortest Path One to All, could give an indication about what to check first
spoa = hga.ShortestPathsOneToAll(graph=g_storm_comp2).execute(source=start_vertex_id, direction='INCOMING', weight='LENGTH_M')
spoa.vertices.sort_values('DISTANCE')
# -
| NOTEBOOKS/WELLINGTON_STORMWATER/Wellington Stormwater Network Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, IdentityTransform
from matplotlib.collections import PathCollection, PatchCollection
from matplotlib.patheffects import withStroke, SimpleLineShadow, Normal, Stroke
from matplotlib.patches import PathPatch
import numpy as np
# -
# %matplotlib widget
# **问题:**
# collection.set_path_effects 方法如何使用?
# patheffects.withStroke 如何使用
px = np.random.rand(10)
py = np.random.rand(10)
offsets = np.ma.column_stack([px, py])
fig, ax = plt.subplots()
ax.plot(px,py)
reduction = 50
c = Path.unit_circle()
c = c.transformed(Affine2D().scale(0.5 * reduction))
# +
collection = PathCollection(
(c,), offsets=offsets,
transOffset = ax.transData,
edgecolor='black',
facecolor=(0, 0, 0, .0125),
linewidth=1
)
collection.set_transform(IdentityTransform())
ax.add_collection( collection )
# -
collection.set_path_effects([withStroke(linewidth=5, foreground='r')])
collection.set_path_effects([])
collection.get_path_effects()
circle = Circle((x, y), radius, clip_on=False, zorder=10, linewidth=1,
edgecolor='black', facecolor=(0, 0, 0, .0125),
path_effects=[withStroke(linewidth=5, foreground='w')])
| 2020070303.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
# ## a)
df_a = pd.read_csv('correctedDiabetes.csv')
df_a
df_a.replace(np.NaN,9999,inplace=True)
df_a
# ## b)
df_b = pd.read_csv('correctedDiabetes.csv')
df_b
df_b['Pregnancies'].replace(np.NaN,0,inplace=True)
df_b['Glucose'].replace(np.NaN,800.0,inplace=True)
df_b['BloodPressure'].replace(np.NaN,72,inplace=True)
df_b['SkinThickness'].replace(np.NaN,35,inplace=True)
df_b['Insulin'].replace(np.NaN,94,inplace=True)
df_b['BMI'].replace(np.NaN,33.6,inplace=True)
df_b['DiabetesPedigreeFunction'].replace(np.NaN,0.627,inplace=True)
df_b['Age'].replace(np.NaN,50,inplace=True)
df_b['Outcome'].replace(np.NaN,0,inplace=True)
df_b
# ## c)
df_c = pd.read_csv('correctedDiabetes.csv')
df_c
df_c['Pregnancies'].replace(np.NaN,df_c['Pregnancies'].mean(),inplace=True)
df_c['Glucose'].replace(np.NaN,df_c['Glucose'].mean(),inplace=True)
df_c['BloodPressure'].replace(np.NaN,df_c['BloodPressure'].mean(),inplace=True)
df_c['SkinThickness'].replace(np.NaN,df_c['SkinThickness'].mean(),inplace=True)
df_c['Insulin'].replace(np.NaN,df_c['Insulin'].mean(),inplace=True)
df_c['BMI'].replace(np.NaN,df_c['BMI'].mean(),inplace=True)
df_c['DiabetesPedigreeFunction'].replace(np.NaN,df_c['DiabetesPedigreeFunction'].mean(),inplace=True)
df_c['Age'].replace(np.NaN,df_c['Age'].mean(),inplace=True)
df_c['Outcome'].replace(np.NaN,df_c['Outcome'].mean(),inplace=True)
df_c
# ## d)
df_d = pd.read_csv('correctedDiabetes.csv')
df_d
df_d['Pregnancies'].replace(np.NaN,df_d['Pregnancies'].mode()[0],inplace=True)
df_d['Glucose'].replace(np.NaN,df_d['Glucose'].mode()[0],inplace=True)
df_d['BloodPressure'].replace(np.NaN,df_d['BloodPressure'].mode()[0],inplace=True)
df_d['SkinThickness'].replace(np.NaN,df_d['SkinThickness'].mode()[0],inplace=True)
df_d['Insulin'].replace(np.NaN,df_d['Insulin'].mode()[0],inplace=True)
df_d['BMI'].replace(np.NaN,df_d['BMI'].mode()[0],inplace=True)
df_d['DiabetesPedigreeFunction'].replace(np.NaN,df_d['DiabetesPedigreeFunction'].mode()[0],inplace=True)
df_d['Age'].replace(np.NaN,df_d['Age'].mode()[0],inplace=True)
df_d['Outcome'].replace(np.NaN,df_d['Outcome'].mode()[0],inplace=True)
df_d
# ## e)
df_a['Glucose'].hist()
df_b['Glucose'].hist()
df_c['Glucose'].hist()
df_d['Glucose'].hist()
# ## g)
from sklearn.impute import SimpleImputer
df_g = pd.read_csv('correctedDiabetes.csv');
imp = SimpleImputer(strategy='mean')
imp.fit(df_g)
df_g
# +
x=pd.DataFrame(imp.transform(df_g),columns=['Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI',
'DiabetesPedigreeFunction','Age','Outcome'])
df_g.update(x)
df_g.head(50)
# -
| 2/Q3/Q3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Iryna-Lytvynchuk/Data_Science/blob/main/Hw2_3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/", "height": 554} id="cEG5AUk5moyq" outputId="053e593f-b866-466b-8f47-b130126f641f"
import pandas as pd
df = pd.read_csv("https://drive.google.com/file/d/1eTEbn59Q6ZwVbSIeqtZwYgOeJkta1ipi/view?usp=sharing")
print(df)
# + colab={"base_uri": "https://localhost:8080/"} id="jVlHG8h5pPn-" outputId="26a937e9-fe68-4f5b-a182-213a44c9ee78"
print(df.head(5))
# + colab={"base_uri": "https://localhost:8080/"} id="TxvH3Yh8pU-y" outputId="9f66deeb-815f-4266-e688-40adb644b4a6"
df.shape
# + id="9JahRo7qqByC"
# О скольки книгах хранит данные датасет? 550
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="wbIgc6rvqxPZ" outputId="2ebf0150-b1eb-4251-8d24-40bec90cfcc5"
df.columns = ['name', 'author', 'user_rating', 'reviews', 'price', 'year', 'genre']
df
# + colab={"base_uri": "https://localhost:8080/"} id="t1UA9GJ0rGl6" outputId="49166ed4-42ca-45a2-f200-304869554993"
print(df
.isnull()
.sum()
)
# + id="wXU6y1SLrNCR"
# Есть ли в каких либо переменных пропуски? Нет
# + colab={"base_uri": "https://localhost:8080/"} id="scFYNJWTrSza" outputId="65228710-37de-4725-8882-41d873a674cb"
df["genre"].unique()
# + id="DPj_sl5Jre9u"
# Какие есть уникальные жанры? Non Fiction, Fiction
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="fjN3M4sktqy6" outputId="1242549f-b5dd-440d-b31b-d7c587a6b56a"
import matplotlib.pyplot as plt
df["price"].plot.hist()
plt.show()
# + id="Gc2xIYjGbMKg"
sns.barplot(x="year", y="price", data=df)
# + id="awih_baBegaf"
sns.heatmap(df.corr())
# + id="viQnNkxgfuop"
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
x = df['year']
y = df['price']
z = df['reviews']
ax.set_xlabel("year")
ax.set_ylabel("price")
ax.set_zlabel("reviews")
ax.scatter(x, y, z)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="mCK5HBy0uWIC" outputId="3a9f01e1-a701-48d9-8d0a-3ab102ba417f"
df.agg({'price' : ['min', 'max', 'mean', 'median']})
#Mаксимальная цена? 0
#Минимальная цена? 105
#Средняя цена? 13.1
#Медианная цена? 11
# + colab={"base_uri": "https://localhost:8080/"} id="-c6F1cv-u8LT" outputId="e6d45f24-300a-43e1-9bf7-8a502d54b0e5"
#Какой рейтинг в датасете наивысший? 4.9
df["user_rating"].max()
# + colab={"base_uri": "https://localhost:8080/"} id="DMn09UGpvUbH" outputId="8601d45c-ff96-4cbf-a296-118ad02b6928"
#Сколько книг имеют такой рейтинг? 52
df['user_rating'].value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 130} id="Jj5pH7b_xFhQ" outputId="7378243d-58ce-4f7e-b0b3-9a8b9f2d4249"
#У какой книги больше всего отзывов? You Are a Badass: How to Stop Doubting Your Gr...
df.[df.reviews == df.reviews.max()]
# + colab={"base_uri": "https://localhost:8080/", "height": 200} id="vCKhOxuZyzAW" outputId="588ff13d-e121-45d5-bea0-47f4050a1441"
#Из тех книг, что попали в Топ-50 в 2015 году, какая книга самая дорогая (можно использовать промежуточный датафрейм)? Wonder
a = df[df.year==2015]
a[a.price == a.price.max()]
# + colab={"base_uri": "https://localhost:8080/"} id="460rneX75kzz" outputId="52f406b4-cea6-4fc9-f202-44d2d10ee801"
#Сколько книг жанра Fiction попали в Топ-50 в 2010 году (используйте &)? 20
b = df[(df.year==2010) & (df.genre=="Fiction")]
b.shape
# + colab={"base_uri": "https://localhost:8080/"} id="67dNtYYY6ByC" outputId="61dddfe8-0213-4787-f7e2-f17fe90d1528"
#Сколько книг c рейтингом 4.9 попало в рейтинг в 2010 и 2011 годах (используйте | или функцию isin)? 1
df.name[((df.year==2010) | (df.year==2011)) & (df.user_rating==4.9)].count()
# + colab={"base_uri": "https://localhost:8080/", "height": 661} id="kGxIhcHB-BN7" outputId="bb49c81f-fcbc-4c17-addd-c83460a29e0d"
#Какая книга последняя в отсортированном списке? Old School (Diary of a Wimpy Kid #10)
c = df[(df.year==2015) & (df.price < 8)]
c.sort_values("price", axis = 0, ascending = True,
inplace = True, na_position ='last')
c
# + colab={"base_uri": "https://localhost:8080/", "height": 143} id="S5HPtcX0SYcx" outputId="88b6811e-a549-45a4-e044-4b7918157054"
df.groupby(["genre"]).agg({'price': ['min','max']}).reset_index()
#Максимальная цена для жанра Fiction: 82
#Минимальная цена для жанра Fiction: 0
#Максимальная цена для жанра Non Fiction: 105
#Минимальная цена для жанра Non Fiction: 0
# + colab={"base_uri": "https://localhost:8080/"} id="Bxm36VMdT0iQ" outputId="de4a54d4-aa4c-4a3c-e27b-1aa433ceba33"
author_count = df.groupby(["author"])['author'].count().sort_values(ascending=False)
print(author_count)
#Какой размерности вышла таблица? [248 rows x 2 columns]
#У какого автора больше всего книг? <NAME>
#Сколько книг у этого автора? 12
# + colab={"base_uri": "https://localhost:8080/"} id="0weA2KwkV93L" outputId="0c26ab37-2e7a-463c-a2cd-ce7207d1703e"
author_rating = df.groupby(["author"])['user_rating'].mean().sort_values(ascending=False)
print(author_rating)
#У какого автора средний рейтинг минимальный? <NAME>
#Какой у этого автора средний рейтинг? 3.9
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="IO16oG5iW51p" outputId="4a6354b1-a368-499d-9a64-fa37e448cd0a"
pd.concat([author_count.sort_values(ascending = True), author_rating.sort_values(ascending = True)], axis=1)
# + id="bPOni9mOXyql"
#Какой автор первый в списке? <NAME>
| Hw2_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="-eV_p1ZY32gN"
# # Initialization
#
# + colab={} colab_type="code" id="a5AQsn9532gR"
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import StratifiedKFold
import warnings
from sklearn.exceptions import DataConversionWarning
import itertools
# -
# Hiding data conversion warnings generated by standard scaler.
# Standard scaler expects float as input and converts input to float throwing a warning message.
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
# Running MLP on the entire dataset of 60K records.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="QwNrHiEy32gu" outputId="677cdc9a-38b7-46ee-c2f0-d27124313615"
train_url='https://github.com/shobhitshah/MNIST-digit-recognizer/blob/develop/data/mnist-in-csv/mnist_train.csv.zip?raw=true'
test_url='https://github.com/shobhitshah/MNIST-digit-recognizer/blob/develop/data/mnist-in-csv/mnist_test.csv.zip?raw=true'
train = pd.read_csv(train_url, compression='zip')
test = pd.read_csv(test_url, compression='zip')
train.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 233} colab_type="code" id="1NpkMPQC32g5" outputId="72b746c0-2e24-42d9-e116-5dc4593f282a"
train.sample(5)
# + [markdown] colab_type="text" id="ytgQnyF132hF"
# # Feature selection
# + colab={} colab_type="code" id="r0-ab7Ev32hG"
y_train=train.label.values
X_train=train.drop('label', axis=1).values
X_test = test.drop('label', axis=1).values
y_test = test.label.values
# -
class_names = ['0', '1', '2', '3', '4',
'5', '6', '7', '8', '9']
# + [markdown] colab_type="text" id="G_rz-kbE32hJ"
# # Define model
# + colab={} colab_type="code" id="VbUxMwoi32hL"
mlp = MLPClassifier(hidden_layer_sizes=(128,128), max_iter=200, alpha=1e-4,
solver='adam', verbose=False, tol=1e-4, random_state=1,
learning_rate_init=.001)
# -
# MLP is highly sensitive to scaling. Therefore, I am using standard scaler to scale the data. Use a pipeline to scale and fit the model.
#
# https://scikit-learn.org/stable/auto_examples/preprocessing/plot_scaling_importance.html
clf = make_pipeline(StandardScaler(), mlp)
# + [markdown] colab_type="text" id="JS1f3lbI32hN"
# # Model fit and validate using Stratified KFold
# + colab={} colab_type="code" id="fjWX1HiC32hO"
def model_score(model, X, y):
score = model.score(X, y)
return score
# -
def model_fit(model, X_train, y_train):
cv_scores = []
kfold = StratifiedKFold(n_splits=3, shuffle=True)
for train, val in kfold.split(X_train, y_train):
model.fit(X_train[train], y_train[train])
cv_score = model_score(model, X_train[val], y_train[val])
print("Validation score for classifier... %.4f" % cv_score)
cv_scores.append(cv_score)
print("Done")
return cv_scores
# + colab={} colab_type="code" id="IR2TfvGx32hR"
cv_scores = model_fit(clf, X_train, y_train)
# -
# # Evaluate the model
# +
print("Generating test scores for classifier... ")
score = model_score(clf, X_test, y_test)
print("Test score for classifier... %.4f" % score)
# -
# # Make predictions
predictions = clf.predict(X_test)
predictions[0]
y_test[0]
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img.reshape(28, 28), cmap=plt.cm.binary)
predicted_label = predictions_array
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} ({})".format(class_names[predicted_label],
class_names[true_label]),
color=color)
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 4
num_cols = 6
num_images = num_rows*num_cols
plt.figure(figsize=(2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, num_cols, i+1)
plot_image(i, predictions, y_test, X_test)
plt.show()
# + [markdown] colab_type="text" id="4AVTyI6U32hZ"
# # Visualization of MLP weights
# Acknowledgments - https://scikit-learn.org/stable/auto_examples/neural_networks/plot_mnist_filters.html#sphx-glr-auto-examples-neural-networks-plot-mnist-filters-py
# +
fig, axes = plt.subplots(5, 5, figsize=(13, 8))
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), vmin=.5 * vmin,
vmax=.5 * vmax)
ax.grid(False)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| scripts/mnist-digit-recognizer-MLP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''py38_rdkit_blog'': conda)'
# metadata:
# interpreter:
# hash: b732ac2e9e8541d594a55bb9899b9aa030b36b870c696cf9f423bdd695da61af
# name: python3
# ---
# # "fpocket 4.0 - Towards biologics"
# > "A quick demo of all the new fpocket features with special focus on protein-peptide interfaces"
#
# - toc: true
# - branch: master
# - badges: true
# - comments: true
# - author: <NAME>
# - categories: [fpocket, pockets, cavity, PPI, protein, epitope, DNA, RNA, antibody]
# - image: images/fpocket-logo.png
# I just released the last version of fpocket (4.0) which contains quite a few new features. Most of the work was done by <NAME>, an eager summer intern we took in at Discngine during last year's COVID-mess summer. He did a great job under these circumstances (remote work in a new company etc). So kudos to him for these new additions.
#
# ## MMCIF support
#
# First of all, fpocket now supports mmCIF. Yay ... yet another very painful task to integrate for yet another painfully crappy molecular file format (I admire the openbabel folks ... ;) ). Same as for the PDB format we forked essentially an integration from the VMD molfile plugin and extended that a bit further. Mael also integrated mmCIF writers so all output files can be written in this new crappy format as well. Essentially you now have the choice to write either only pdb or cif or both. So everybody should be happy (nah ... that's never going to be case anyway).
#
# ## New command line flags
#
# The most interesting part (for me) of this release are additional command line flags and behind these several use cases that can be addressed now. These command line arguments enable mainly to work in a more controlled way with multi chain protein structures. This can be particularly useful if:
# 1. you want to study only one domain of one of these novel gigantic cryoEM structures
# 2. you want to study in detail a protein - biomolecule binding site (so other than small molecule)
# 3. you want to learn key characteristics of peptide/protein binding sites vs RNA/DNA binding sites vs drug binding sites
#
# ```
# -c char : (Default is none): Use this flag to choose which chains you want to delete before running fpocket. The selected chains can be specified with ',' or ':' delimiters, for example you can use it '-c B,D' or '-c B:D'. You can delete up to 20 different chains.
# ```
# This flag essentially allows you to explicitly delete particular chain(s) before doing the fpocket run. This allows you to identify (without any bias) clefts that might occur on the PPI that were inaccessible before for pocket prediction.
#
# ```
# -k char : (Default is none): Use this flag to choose which chains you want to keep before running fpocket. The selected chains can be specified with ',' or ':' delimiters, for example you can use it '-k A,B,E' or '-k A:B:E'. You can keep up to 20 different chains.
# ```
# This is essentially the inverse operator of the previous flag. If you do not want to list n chains, you can decide to keep only the relevant ones here.
#
# ```
# -a char : (Default is none): With this flag you can select a chain you want to be considered as a ligand. Works the same way as the "-r" flag but with a whole chain. Only a single chain can be chosen, for example '-a D'.
# ```
#
# This flag essentially allows you to target for only one particular binding epitope. If you want to extract descriptors for only that binding site where the select chain is located then you can use the `-a` flag. Fpocket will hide the chain specified via the flag. Run vertice detection and then will choose all vertices overlapping with the chain you specified in `-a`. These will then be clustered together into a final "pocket". This is rather generic and can be applied on protein chains, RNA, DNA ... whatever you like given that it is defined in a seperate chain. It is so generic, that in theory you can do even very nasty stuff, but let's see if you can find that out yourselves.
#
# This argument is particularly interesting when you want to extract examples (3D patches) or numerical descriptors (polar/apolar ASA, curvature, buriedness ...) for other binding sites than classical small molecule binding sites. It essentially lies out the basics for training & learning what such other binding epitopes look like. You can use that for a fancy model, or to characterize binding epitopes ultimately.
#
# ## Miscellaneous
#
# Seperately, several things were added to the [github repo of fpocket](https://github.com/Discngine/fpocket), namely the old documentation was ported to markdown (that was painful).
# There's also a tiny testing environment, CI/CD pipes finally set up to check compilation and unit tests upon PR's.
#
# Last, an official fpocket docker image is also available on [dockerhub](https://hub.docker.com/r/fpocket/fpocket/).
# # Detecting binding epitopes
#
# Enough text now. Let's check what you can do with that now. Here we have a nice example of a [TEAD4 / YAP complex](https://www.rcsb.org/structure/6HIK). I'm interested in studying the clefts covered by YAP on the TEAD4 structure. Before I essentially had to drop YAP from the PDB run fpocket and put back in YAP to see where it is located. Here YAP corresponds to chain L (the red one) and TEAD4 (the grey one).
#
#
# +
#collapse-hide
import py3Dmol
viewer = py3Dmol.view(query='pdb:6hik',width=300, height=300)
viewer.setStyle({'chain':'A'},{'cartoon':{'color':'grey'}})
viewer.setStyle({'chain':'L'},{'cartoon':{'color':'red'}})
viewer.zoomTo()
# -
# We can now for instance launch fpocket on TEAD4 by either dropping YAP or by keeping TEAD4 explicitly. Whatever mindset you prefer ;) Let's be in a positive mood and keep TEAD4:
# ```
# fpocket -f 6hik.pdb -k A
# ```
#
# +
#collapse-hide
import py3Dmol
viewer = py3Dmol.view(query='pdb:6hik',width=500, height=500)
pdbdata=open('../data/6hik_out.pdb', 'r').read()
viewer.addModel(pdbdata,'pdb')
viewer.setStyle({'chain':'A'},{'cartoon':{'color':'grey'}})
viewer.setStyle({'chain':'L'},{'cartoon':{'color':'red'},'line':{}})
viewer.setStyle({'chain':'C'},{'sphere':{'colorscheme':{'prop':'resi','gradient':'roygb','min':1,'max':15}}})
#viewer.setStyle({'chain':'C'},{'line':{'colorfunc':colorAsSnake}})
viewer.zoomTo()
# -
# When doing this in an automatic way, so with classical fpocket parameters, you'll now be able to identify pockets that overlap with YAP. That's kind of nice, but what if I want to study really in detail all the binding interface itself between YAP and TEAD4?
# In order to instruct fpocket to define a pocket only on the interface itself you can now do something like that:
#
# ```bash
# fpocket -f 6hik.pdb -a L
# ```
#
#
# +
#collapse-hide
import py3Dmol
viewer = py3Dmol.view(query='pdb:6hik',width=500, height=500)
pdbdata=open('../data/6hik_out_explicit.pdb', 'r').read()
viewer.addModel(pdbdata,'pdb')
viewer.setStyle({'chain':'A'},{'cartoon':{'color':'grey'}})
viewer.setStyle({'chain':'L'},{'cartoon':{'color':'red'},'line':{}})
viewer.setStyle({'chain':'C'},{'sphere':{'color':'orange'}})
# -
# You can now get the full orange blob as the interacting epitope. Together with that you get the usual statistics etc. The cool thing is, this can be very well used on protein RNA interfaces as well.
#
# ### A word on druggability
#
# If you want to assess druggability, please use the default fpocket parameters and not a guided pocket detection or other pocket detection parameters. The druggability assessment intends to estimate a pockets tractability for small molecule binding sites. Applying this to such larger surfaces is out of the applicability domain here in my opinion.
#
#
# ## Extracting descriptors with fpocket
#
# Ok running these things on individual examples is nice. You can automate things with fpocket quite easily and extract descriptors on a larger scale using the fancy `-d` flag (oh yes, yet another flag) if the only thing you're after is descriptor extraction.
#
# ### Why would I ever want to extract descriptors?
#
# Calculating descriptors on binding epitopes is generating the basis for using these to derive several potential applications afterwards. For instance, to derive the druggability prediction in fpocket, first we extracted descriptors of known druggable and supposed non-druggable (no religious debate here) pockets, second we determined the most relevant descriptors and last we trained a scoring function based on these descriptors.
#
# You can very well imagine to do the same on other types of binding epitopes (there are a few papers out there on that already I guess), like protein protein interfaces, antibody / antigene interfaces more specifically, crystal contacts, protein DNA/RNA interfaces etc...
#
# Ultimately these characterisations will allow you to train functions or super fancy __deep__ learning models (if you really need that).
#
# In order to do that on a larger scale you could use dpocket (a less well-known sibling of fpocket), but it currently still doesn't fully integrate the logic with selecting chains as ligands (still a bit of work needed here). So let's stick to fpocket on a larger scale example for descriptor extraction which will work just fine:
#
#
# ```
# data/peptide_data/3uqp.pdb B
# data/peptide_data/3uri.pdb B
# data/peptide_data/4rcp.pdb B
# data/peptide_data/4tpg.pdb E
# data/peptide_data/5jxh.pdb H
# ```
#
# Let's suppose we have a csv file like this one above. It defines a set of PDB files and the chain we want to consider as a ligand explicitly during the fpocket run to assess the binding epitope this chain is making with the "receptor". You could run this in a very geeky way using this here:
#
# `awk '{ print "fpocket -f " $1 " -a "$2 " -d"}' list.txt` | sh
#
# This will output a messy thing like this:
# ```csv
# cav_id drug_score volume nb_asph inter_chain apol_asph_proportion mean_asph_radius as_density mean_asph_solv_acc mean_loc_hyd_dens flex hydrophobicity_score volume_score charge_score polarity_score a0_apol a0_pol af_apol af_pol n_abpa ala cys asp glu phe gly his ile lys leu met asn pro gln arg ser thr val trp tyr chain_1_type chain_2_type num_res_chain_1 num_res_chain_2 lig_het_tag name_chain_1 name_chain_2
# 1 0.0021 4091.2471 213 0 0.1831 4.3624 10.8343 0.5065 10.3590 0.0000 17.6957 4.1304 3 31 176.3137 207.7781 91.7446 196.2307 21 1 4 2 3 0 3 1 4 0 4 1 3 0 1 1 6 4 2 3 3 0 0 457 457 PSA A A
# cav_id drug_score volume nb_asph inter_chain apol_asph_proportion mean_asph_radius as_density mean_asph_solv_acc mean_loc_hyd_dens flex hydrophobicity_score volume_score charge_score polarity_score a0_apol a0_pol af_apol af_pol n_abpa ala cys asp glu phe gly his ile lys leu met asn pro gln arg ser thr val trp tyr chain_1_type chain_2_type num_res_chain_1 num_res_chain_2 lig_het_tag name_chain_1 name_chain_2
# 1 0.1847 4801.9624 204 0 0.3529 4.4814 10.8531 0.5450 22.0000 0.0000 27.5882 3.4706 -8 26 183.5595 226.1442 124.2374 226.0680 11 1 0 0 7 0 2 1 7 0 8 4 0 0 4 0 9 5 0 2 1 0 0 328 328 NULL A A
# cav_id drug_score volume nb_asph inter_chain apol_asph_proportion mean_asph_radius as_density mean_asph_solv_acc mean_loc_hyd_dens flex hydrophobicity_score volume_score charge_score polarity_score a0_apol a0_pol af_apol af_pol n_abpa ala cys asp glu phe gly his ile lys leu met asn pro gln arg ser thr val trp tyr chain_1_type chain_2_type num_res_chain_1 num_res_chain_2 lig_het_tag name_chain_1 name_chain_2
# 1 1.0000 2998.7053 187 0 0.5241 4.3207 11.5341 0.5341 48.6735 0.0000 34.3462 5.0769 3 16 107.4789 84.2598 47.7836 96.3200 8 1 2 1 2 0 0 1 1 1 1 3 3 0 3 0 1 0 1 4 1 0 0 238 238 NULL A A
# cav_id drug_score volume nb_asph inter_chain apol_asph_proportion mean_asph_radius as_density mean_asph_solv_acc mean_loc_hyd_dens flex hydrophobicity_score volume_score charge_score polarity_score a0_apol a0_pol af_apol af_pol n_abpa ala cys asp glu phe gly his ile lys leu met asn pro gln arg ser thr val trp tyr chain_1_type chain_2_type num_res_chain_1 num_res_chain_2 lig_het_tag name_chain_1 name_chain_2
# 1 0.9998 1552.9973 110 0 0.4182 4.5201 6.7627 0.4934 36.8261 0.0000 34.2593 4.8519 0 17 105.0637 48.2599 82.1878 27.8238 12 1 1 2 0 0 3 2 0 0 2 1 1 0 3 1 2 0 0 6 2 0 0 594 594 NULL A A
# cav_id drug_score volume nb_asph inter_chain apol_asph_proportion mean_asph_radius as_density mean_asph_solv_acc mean_loc_hyd_dens flex hydrophobicity_score volume_score charge_score polarity_score a0_apol a0_pol af_apol af_pol n_abpa ala cys asp glu phe gly his ile lys leu met asn pro gln arg ser thr val trp tyr chain_1_type chain_2_type num_res_chain_1 num_res_chain_2 lig_het_tag name_chain_1 name_chain_2
# 1 0.0044 3875.7029 152 0 0.1513 4.4958 9.8519 0.5972 9.5652 0.0000 -7.5946 3.4865 -7 27 114.7247 187.8280 70.7198 141.5900 15 2 3 2 8 0 0 4 4 2 0 1 0 0 0 2 2 4 1 1 1 0 0 499 499 NULL A A
#
#
# ```
#
#
# Let's clean this up the geeky way:
#
# `awk '{ print "fpocket -f " $1 " -a "$2 " -d"}' list.txt | sh | awk '{if(NR%2==0)print}'`
#
# This should only give the descriptors:
#
# ```
# 1 0.0021 3972.3301 213 0 0.1831 4.3624 10.8343 0.5065 10.3590 0.0000 17.6957 4.1304 3 31 176.3137 207.7781 91.7446 196.2307 21 1 4 2 3 0 3 1 4 0 4 1 3 0 1 1 6 4 2 3 3 0 0 457 457 PSA A A
# 1 0.1847 4604.5068 204 0 0.3529 4.4814 10.8531 0.5450 22.0000 0.0000 27.5882 3.4706 -8 26 183.5595 226.1442 124.2374 226.0680 11 1 0 0 7 0 2 1 7 0 8 4 0 0 4 0 9 5 0 2 1 0 0 328 328 NULL A A
# 1 1.0000 2895.7646 187 0 0.5241 4.3207 11.5341 0.5341 48.6735 0.0000 34.3462 5.0769 3 16 107.4789 84.2598 47.7836 96.3200 8 1 2 1 2 0 0 1 1 1 1 3 3 0 3 0 1 0 1 4 1 0 0 238 238 NULL A A
# 1 0.9998 1574.4559 110 0 0.4182 4.5201 6.7627 0.4934 36.8261 0.0000 34.2593 4.8519 0 17 105.0637 48.2599 82.1878 27.8238 12 1 1 2 0 0 3 2 0 0 2 1 1 0 3 1 2 0 0 6 2 0 0 594 594 NULL A A
# 1 0.0044 3794.7029 152 0 0.1513 4.4958 9.8519 0.5972 9.5652 0.0000 -7.5946 3.4865 -7 27 114.7247 187.8280 70.7198 141.5900 15 2 3 2 8 0 0 4 4 2 0 1 0 0 0 2 2 4 1 1 1 0 0 499 499 NULL A A
# ```
# I did this on a larger list of peptide binding structures and this is typically a good way to start studying some of the properties of these epitopes compared to drug binding sites or others.
# +
import pandas as pd
import altair as alt
d=pd.read_table('../data/peptide_out.txt',delim_whitespace=True,header=None,names=["cav_id","drug_score","volume","nb_asph","inter_chain","apol_asph_proportion","mean_asph_radius","as_density","mean_asph_solv_acc","mean_loc_hyd_dens","flex","hydrophobicity_score","volume_score","charge_score","polarity_score","a0_apol","a0_pol","af_apol","af_pol","n_abpa","ala","cys","asp","glu","phe","gly","his","ile","lys","leu","met","asn","pro","gln","arg","ser","thr","val","trp","tyr","chain_1_type","chain_2_type","num_res_chain_1","num_res_chain_2","lig_het_tag","name_chain_1","name_chain_2"])
alt.Chart(d).mark_bar().encode(
alt.X("volume", bin=True),
y='count()',
)
alt.Chart(d).transform_fold(
['a0_pol', 'a0_apol'],
as_=['Columns', 'Values']
).mark_area(
opacity=0.5,
interpolate='step'
).encode(
alt.X('Values:Q', bin=alt.Bin(maxbins=20)),
alt.Y('count()', stack=None),
alt.Color('Columns:N')
)
# -
# Here we can see the distribution of polar versus apolar surface areas of these peptide binding sites under consideration. What is interesting already is that we can clearly distinguish this epitope with these descriptors alone from typical drug binding sites (more hydrophobic in general).
alt.Chart(d).mark_circle(size=60).encode(
x='n_abpa',
y='a0_apol',
color='volume',
tooltip=['drug_score', 'mean_asph_solv_acc', 'a0_apol','a0_pol',"ala","cys","phe","gly","ile","leu","met","pro","val","trp","tyr"]
).interactive()
# The plot above is mainly to play around with altair and plottng in this blogging environment ;). However it shows the relationship between the number of ABPA's (almost burried polar atoms - if you don't know what these are, you should read [this paper on shielded hydrogen bonds](https://pubs.acs.org/doi/10.1021/ja207494u) and the apolar surface area. Coloring done is by volume of the "pocket".
#
#
# ## Perspectives
#
# All of these new functionalities (a part from the support of yet another "useless" file format) set the grounds for characterization of various types of binding epitopes using fpocket. This can terefore be used for functional characterization of protein structures, ultimately indicating where might bind what on a protein structure.
#
# But still a few sleepless nights to go until we can achieve that. Fortunately everything is opensource, so you can do that before!!! ;)
| _notebooks/2021-02-02-fpocket-towards-biologics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
def visualize(info, show_limit=1000):
history = info['history'][-show_limit:]
closes = [data[2] for data in history]
closes_index = [data[1] for data in history]
# buy tick
buy_tick = np.array([data[1] for data in history if data[0] == 0])
buy_price = np.array([data[2] for data in history if data[0] == 0])
sell_tick = np.array([data[1] for data in history if data[0] == 1])
sell_price = np.array([data[2] for data in history if data[0] == 1])
plt.figure(figsize=(14,10)) # change the figure size
plt.plot(closes_index, closes)
plt.scatter(buy_tick, buy_price - 3, c='g', marker="^", s=25)
plt.scatter(sell_tick, sell_price + 3, c='r', marker="v", s=25)
plt.show(block=True)
time.sleep(3)
# + jupyter={"outputs_hidden": false}
FILENAME = "./info/duel_dqn_OHLCV-v0_weights_1688662LS_0_7_0.6181420165052214.info"
info = np.load(FILENAME).all()
visualize(info, show_limit=15000)
visualize(info, show_limit=1000)
# + jupyter={"outputs_hidden": false}
# plot cumulative return and max drawdown
def show_mdd(info): # xs is cumulative return / portfolio , if reward u should
reward_history = [data[4] for data in info['history']]
df = pd.DataFrame()
df['reward'] = reward_history
df['cum_reward'] = df['reward'].cumsum()
xs = df['cum_reward'].values
i = np.argmax(np.maximum.accumulate(xs) - xs) # end of the period
j = np.argmax(xs[:i]) # start of period
plt.figure(figsize=(16,10))
plt.plot(xs)
plt.plot([i, j], [xs[i], xs[j]], 'o', color='Red', markersize=10)
plt.show()
return xs[-1]
total_return = show_mdd(info)
print("total cumulative return : ",total_return)
print("portfolio value {0} -> {1}" .format(100*10000, info['portfolio']))
| visualize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 ana-current (new)
# language: python
# name: ana1-current-py3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import h5py as h5
import sys
import os
from pathlib import Path
from importlib import reload
sys.path.append('../')
import svd_waveform_processing as proc
reload(proc)
# -
plt.style.use('dark_background')
#matplotlib.rcParams.keys()
plt.rcParams['figure.figsize'] = (10, 6)
plt.rc('font', size=12)
length = 8
width = 1.5
plt.rcParams['xtick.major.size'] = length
plt.rcParams['ytick.major.size'] = length
plt.rcParams['xtick.major.width'] = width
plt.rcParams['ytick.major.width'] = width
# +
expname='xcsx35017'
dirname=Path('/reg/d/psdm/%s/%s/hdf5/smalldata/'%(expname[0:3],expname))
def fileName_forTraces(expname, run):
return'%s_Run%03d.h5'%(expname, run)
# +
run = 112 # single pulse traces
fname = dirname / fileName_forTraces(expname, run)
with h5.File(fname, 'r') as f:
print(f.keys())
# d_icarus = np.asarray(f['icarus_pink']['full'])
dat_1pulse = -np.asarray(f['acq01']['full'])[:500]
runs = np.arange(70,90) # 4 pulse traces
dat = []
for run in runs:
fname = dirname / fileName_forTraces(expname, run)
with h5.File(fname, 'r') as f:
# print(f.keys())
# d_icarus = np.asarray(f['icarus_pink']['full'])
d_acq = np.asarray(f['acq01']['full'])
print(d_acq.shape)
dat.append(-d_acq)
dat = np.concatenate(dat)
print(dat.shape)
# -
fig, ax = plt.subplots(1,2, figsize=(12,6))
ax[0].plot(dat[14:17,1350:1800].T)
ax[1].plot(dat_1pulse[10:13,1350:1800].T)
# plt.xlim(50,150)
plt.show()
dat = dat[:,1350:1800]
dat_1pulse = dat_1pulse[:,1350:1800]
# +
# plt.plot(dat[1])
# plt.xlim(115,135)
# -
# Data need to be aligned before building the basis.
#
# # (i) Align pulses
#
# Test between correlation and convolution.
# +
conv = []
corr = []
for wave in dat_1pulse[:200]:
conv.append( np.argmax(np.convolve(dat_1pulse[0], wave, 'full')) )
corr.append( np.argmax(np.correlate(dat_1pulse[0], wave, 'full')) )
conv = conv-np.median(conv)
corr = corr-np.median(corr)
fig, ax = plt.subplots(1,3, figsize=(14,5))
ax[0].plot(conv)
ax[0].set_title('convolution')
ax[1].plot(corr)
ax[1].set_title('correlation')
ax[2].plot(corr+conv)
ax[2].set_title('sum')
plt.show()
# -
# Convolution and correlation are consistent in calculating the shift. There is just a sign difference, as expected.
# +
# single pulse
ref = np.argmax(np.convolve(dat_1pulse[0],dat_1pulse[0]))
for ii, wave in enumerate(dat_1pulse):
shift = np.argmax(np.convolve(dat_1pulse[0], wave, 'full')) - ref
dat_1pulse[ii] = np.roll(wave, -shift)
# 4 pulse
conv_range = (120,135)
ref = np.argmax(np.convolve(dat[1,conv_range[0]:conv_range[1]],dat[1,conv_range[0]:conv_range[1]]))
for ii, wave in enumerate(dat):
shift = np.argmax(np.convolve(dat[1,conv_range[0]:conv_range[1]], wave[conv_range[0]:conv_range[1]], 'full')) - ref
dat[ii] = np.roll(wave, -shift)
# +
fig, ax = plt.subplots(1,2, figsize=(14,6))
ax[0].plot(dat_1pulse[10:13,:].T)
ax[0].set_title('Aligned pulses')
# ax[1].plot(dat[35:39,:].T)
ax[1].plot(dat[20:25,:].T)
ax[1].set_title('Aligned pulses')
ax[1].set_xlim(110,210)
plt.show()
# -
# # (ii) Get single pulse basis
# +
n_c = 2
A, proj, svd = proc.get_basis_and_projector(dat_1pulse[:50], n_components=n_c)
fig, ax = plt.subplots(1,2, figsize=(12,5))
ax[0].set_title('Eigenvalues')
ax[0].plot(svd.singular_values_,'o')
ax[0].set_yscale('log')
ax[1].set_title('Eigen waveforms')
ax[1].plot(svd.components_[0])
ax[1].plot(0.5*svd.components_[1])
ax[1].set_xlim(110,210)
plt.show()
# -
# The second components still is important here. As seen later, there is some jitter in the data, which can be accounted by the second component (~1st derivative).
A.shape
# +
# conv = np.convolve(A.squeeze(), dat[1], 'full')
p1 = np.argmax(dat[1])
p2 = p1+3 + np.argmax(dat[1,p1+3:])+1
p4 = p2+3 + np.argmax(dat[1,p2+3:])
p3 = p2+3 + np.argmax(dat[1,p2+3:p4])
plt.plot(dat[1])
plt.axvline(x=p1)
plt.axvline(x=p2)
plt.axvline(x=p3)
plt.axvline(x=p4)
plt.plot(np.roll(A[:,0],-1))
# plt.plot(0.5*np.roll(A[:,1],2))
plt.xlim(110,210)
plt.show()
# -
# ## Build basis by shifting the single pulse basis accordingly
# +
dl = [-1, p2-p1, p3-p1, p4-p1]
A0 = np.roll(A, dl[0], axis=0)
A1 = np.roll(A0, dl[1], axis=0)
A2 = np.roll(A0, dl[2], axis=0)
A3 = np.roll(A0, dl[3], axis=0)
A_tot = np.concatenate([A0,A1,A2,A3], axis=1)
plt.plot(dat[1])
plt.plot(np.sum(A_tot[:,0::n_c], axis=1))
plt.xlim(120,220)
# -
# # Test the 4-pulse fit
proj = np.linalg.pinv(A_tot)
regr = proc.WaveformRegressor(A=A_tot, projector=proj)
reconstructed = regr.fit_reconstruct(dat)
fig, ax = plt.subplots(2,3, figsize=(20,14))
ax = np.ravel(ax)
for ii in range(6):
idx = np.random.randint(dat.shape[0])
ax[ii].plot(dat[idx])
ax[ii].plot(reconstructed[idx])
ax[ii].set_xlim(110,220)
ax[ii].set_title(str(idx))
# There is a small jitter in the different peak position. Because the sampling is of the order of the feature time scale, a small shift can have a big impact. Interestingly, peaks seems to sometimes shift with respect to one-another as well; it is not a shift of the whole waveform. In order to accont for that, it is best to fit the data with two components (n_c=2), since the second component can be seen as the derivative of the curves and encodes information about small shifts.
#
# By rerunning the analysis with only one component, the effects of those small shifts become evident.
#
# The intensity of each pulse is given by the norm the corresponding coefficient in the basis: np.linalg.norm(coeff_pulse_ii)
# # Test new code (multi-pulse)
#
# Test implementation of the multi-pulse fit in the waveform_processing package. It should produce the same results as above.
reload(proc)
dl = [-1, p2-p1-1, p3-p1, p4-p1]
n_c = 3
regr2 = proc.construct_waveformRegressor(dat_1pulse[:50], n_components=n_c, n_pulse=4, delay=dl)
plt.plot(dat[1])
plt.axvline(x=dl[0])
plt.axvline(x=dl[1])
plt.axvline(x=dl[2])
plt.axvline(x=dl[3])
plt.plot(regr2.A[0])
plt.xlim(110,210)
plt.plot(dat[1])
plt.plot(np.sum(regr2.A[0::n_c,:], axis=0))
plt.xlim(120,220)
reconstructed = regr2.fit_reconstruct(dat)
intensities = regr2.get_pulse_intensity(dat)
intensities_max = regr2.get_pulse_intensity(dat, mode='max')
score = regr2.score(dat)
fig, ax = plt.subplots(2,3, figsize=(18,11))
ax = np.ravel(ax)
for ii in range(6):
idx = np.random.randint(dat.shape[0])
ax[ii].plot(dat[idx])
ax[ii].plot(reconstructed[idx])
ax[ii].set_xlim(110,220)
ax[ii].set_title('{}\nscore: {:.3}\n'.format(str(idx), score[idx])+str(intensities_max[idx]))
plt.tight_layout()
plt.show()
# ### Intensity ratio: compare the two methods to calculate intensity
# +
ii = 15
print(intensities[ii,0]/intensities[ii,1])
print(intensities_max[ii,0]/intensities_max[ii,1])
print('\n')
print(intensities[ii,1]/intensities[ii,2])
print(intensities_max[ii,1]/intensities_max[ii,2])
print('\n')
print(intensities[ii,2]/intensities[ii,3])
print(intensities_max[ii,2]/intensities_max[ii,3])
# -
# The ratios are close, but not the same. This is a problem. The norm of the coefficient might not be the right approach if the basis is not orthonormal. Let's check.
# ## Check orthonormality
A = regr2.A
print(A[0].dot(A[0]))
print(A[0].dot(A[n_c-1]))
print(A[0].dot(A[n_c]))
# The basis is clearly not orthogonal. This explains why the norm of the coefficient should not be used to calculate the intensity for multi-pulse waveforms. It works well for single pulse waveforms though.
#
# While an orthogonal basis can be constructed, it will not be straightforward to extract the pulse intensities from it.
#
# #### Recommendation: use mode='max' to calculate the intensities of multi-pulse waveforms.
intensities = regr2.get_pulse_intensity(dat[0])
regr2.coeffs_
| waveform_analysis/multi_pulse/4pulse_xcs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2
import gif2numpy
import os
from collections import Counter
from pathlib import Path
from skimage.color import rgb2lab, deltaE_cie76
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
# -
image_path = Path('images', 'f2', 'wy', 'WyattWxl.gif')
if image_path.is_file():
np_images, extensions, image_specs = gif2numpy.convert(image_path)
image = np_images[0]
print("The type of this input is {}".format(type(image)))
print("Shape: {}".format(image.shape))
plt.imshow(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plt.imshow(gray_image, cmap='gray')
def RGB2HEX(color):
return "#{:02x}{:02x}{:02x}".format(int(color[0]), int(color[1]), int(color[2]))
def get_image(image_path):
np_images, extensions, image_specs = gif2numpy.convert(image_path)
image = np_images[0]
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def get_colors(image, number_of_colors, show_chart):
modified_image = cv2.resize(image, (87, 154), interpolation = cv2.INTER_AREA)
modified_image = modified_image.reshape(modified_image.shape[0]*modified_image.shape[1], 3)
clf = KMeans(n_clusters = number_of_colors)
labels = clf.fit_predict(modified_image)
counts = Counter(labels)
center_colors = clf.cluster_centers_
# We get ordered colors by iterating through the keys
ordered_colors = [center_colors[i] for i in counts.keys()]
hex_colors = [RGB2HEX(ordered_colors[i]) for i in counts.keys()]
rgb_colors = [ordered_colors[i] for i in counts.keys()]
if (show_chart):
plt.figure(figsize = (8, 6))
plt.pie(counts.values(), labels = hex_colors, colors = hex_colors)
return rgb_colors
get_colors(get_image('WyattWxl.gif'), 8, True)
images = []
p = Path('images')
for i in p.glob('**/*.gif'):
images.append(i)
print(len(images))
COLORS = {
'GREEN': [0, 128, 0],
'BLUE': [0, 0, 128],
'YELLOW': [255, 255, 0]
}
def match_image_by_color(image, color, threshold = 60, number_of_colors = 10):
image_colors = get_colors(image, number_of_colors, False)
selected_color = rgb2lab(np.uint8(np.asarray([[color]])))
select_image = False
for i in range(number_of_colors):
curr_color = rgb2lab(np.uint8(np.asarray([[image_colors[i]]])))
diff = deltaE_cie76(selected_color, curr_color)
if (diff < threshold):
select_image = True
return select_image
def show_selected_images(images, color, threshold, colors_to_match):
index = 1
for i in range(len(images)):
selected = match_image_by_color(images[i],
color,
threshold,
colors_to_match)
if (selected):
plt.subplot(1, 5, index)
plt.imshow(images[i])
index += 1
plt.figure(figsize = (20, 10))
show_selected_images(images, COLORS['GREEN'], 60, 5)
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import pandas as pd
import os
import numpy as np
import sklearn.metrics as skm
# +
PATH = '/Users/christophberger/Downloads'
method = 'mcp'
correct = os.path.join(PATH, f'{method}_correct.txt')
confidence = os.path.join(PATH, f'{method}_confidence.txt')
# correct_list = np.loadtxt(correct, delimiter='\n', converters = {0: lambda s: int(s == 'True')})
correct_list = np.genfromtxt(correct, dtype=bool)
confidence_list = np.loadtxt(confidence, delimiter='\n')
# -
correct_list
fpr, tpr, thresh = skm.roc_curve(y_true = correct_list, y_score = confidence_list, pos_label = 1) #positive class is 1; negative class is 0
auroc_classification = skm.auc(fpr, tpr)
plt.plot(fpr, tpr)
plt.plot(thresh, tpr)
plt.plot(correct_list, confidence_list)
thresholds = np.arange(0,1,0.01)
thresholds
acc = []
for thresh in thresholds:
conf_t = []
pred_t = []
for conf, pred in zip(confidence_list, correct_list):
if conf > thresh:
conf_t.append(conf)
pred_t.append(pred)
acc.append(sum(pred_t)/len(pred_t))
# +
method = 'de'
correct = os.path.join(PATH, f'{method}_correct.txt')
confidence = os.path.join(PATH, f'{method}_confidence.txt')
# correct_list = np.loadtxt(correct, delimiter='\n', converters = {0: lambda s: int(s == 'True')})
correct_list = np.genfromtxt(correct, dtype=bool)
confidence_list = np.loadtxt(confidence, delimiter='\n')
acc_de = []
for thresh in thresholds:
conf_t = []
pred_t = []
for conf, pred in zip(confidence_list, correct_list):
if conf > thresh:
conf_t.append(conf)
pred_t.append(pred)
if not len(pred_t):
acc_odin.append(1)
continue
acc_de.append(sum(pred_t)/len(pred_t))
# +
method = 'odin'
correct = os.path.join(PATH, f'{method}_correct.txt')
confidence = os.path.join(PATH, f'{method}_confidence.txt')
# correct_list = np.loadtxt(correct, delimiter='\n', converters = {0: lambda s: int(s == 'True')})
correct_list = np.genfromtxt(correct, dtype=bool)
confidence_list = np.loadtxt(confidence, delimiter='\n')
acc_odin = []
for thresh in thresholds:
conf_t = []
pred_t = []
for conf, pred in zip(confidence_list, correct_list):
if conf > thresh:
conf_t.append(conf)
pred_t.append(pred)
if not len(pred_t):
acc_odin.append(1)
continue
acc_odin.append(sum(pred_t)/len(pred_t))
# -
plt.plot(thresholds, acc_odin, label='ODIN')
plt.plot(thresholds, acc, label='MCP')
plt.plot(thresholds, acc_de, label='Deep Ensemble (3)')
plt.xlabel('Softmax Confidence')
plt.ylabel('Accuracy')
plt.legend()
| notebooks/Classification_Plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Here, we explain how to use TransferFunctionHelper to visualize and interpret yt volume rendering transfer functions. Creating a custom transfer function is a process that usually involves some trial-and-error. TransferFunctionHelper is a utility class designed to help you visualize the probability density functions of yt fields that you might want to volume render. This makes it easier to choose a nice transfer function that highlights interesting physical regimes.
#
# First, we set up our namespace and define a convenience function to display volume renderings inline in the notebook. Using `%matplotlib inline` makes it so matplotlib plots display inline in the notebook.
# +
import yt
import numpy as np
from IPython.core.display import Image
from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper
from yt.visualization.volume_rendering.render_source import VolumeSource
def showme(im):
# screen out NaNs
im[im != im] = 0.0
# Create an RGBA bitmap to display
imb = yt.write_bitmap(im, None)
return Image(imb)
# -
# Next, we load up a low resolution Enzo cosmological simulation.
ds = yt.load('Enzo_64/DD0043/data0043')
# Now that we have the dataset loaded, let's create a `TransferFunctionHelper` to visualize the dataset and transfer function we'd like to use.
tfh = TransferFunctionHelper(ds)
# `TransferFunctionHelpler` will intelligently choose transfer function bounds based on the data values. Use the `plot()` method to take a look at the transfer function.
# Build a transfer function that is a multivariate gaussian in temperature
tfh = TransferFunctionHelper(ds)
tfh.set_field('temperature')
tfh.set_log(True)
tfh.set_bounds()
tfh.build_transfer_function()
tfh.tf.add_layers(5)
tfh.plot()
# Let's also look at the probability density function of the `cell_mass` field as a function of `temperature`. This might give us an idea where there is a lot of structure.
tfh.plot(profile_field='cell_mass')
# It looks like most of the gas is hot but there is still a lot of low-density cool gas. Let's construct a transfer function that highlights both the rarefied hot gas and the dense cool gas simultaneously.
# +
tfh = TransferFunctionHelper(ds)
tfh.set_field('temperature')
tfh.set_bounds()
tfh.set_log(True)
tfh.build_transfer_function()
tfh.tf.add_layers(8, w=0.01, mi=4.0, ma=8.0, col_bounds=[4.,8.], alpha=np.logspace(-1,2,7), colormap='RdBu_r')
tfh.tf.map_to_colormap(6.0, 8.0, colormap='Reds')
tfh.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r')
tfh.plot(profile_field='cell_mass')
# -
# Let's take a look at the volume rendering. First use the helper function to create a default rendering, then we override this with the transfer function we just created.
# +
im, sc = yt.volume_render(ds, ['temperature'])
source = sc.get_source(0)
source.set_transfer_function(tfh.tf)
im2 = sc.render()
showme(im2[:,:,:3])
# -
# That looks okay, but the red gas (associated with temperatures between 1e6 and 1e8 K) is a bit hard to see in the image. To fix this, we can make that gas contribute a larger alpha value to the image by using the ``scale`` keyword argument in ``map_to_colormap``.
# +
tfh2 = TransferFunctionHelper(ds)
tfh2.set_field('temperature')
tfh2.set_bounds()
tfh2.set_log(True)
tfh2.build_transfer_function()
tfh2.tf.add_layers(8, w=0.01, mi=4.0, ma=8.0, col_bounds=[4.,8.], alpha=np.logspace(-1,2,7), colormap='RdBu_r')
tfh2.tf.map_to_colormap(6.0, 8.0, colormap='Reds', scale=5.0)
tfh2.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r', scale=1.0)
tfh2.plot(profile_field='cell_mass')
# -
# Note that the height of the red portion of the transfer function has increased by a factor of 5.0. If we use this transfer function to make the final image:
# +
source.set_transfer_function(tfh2.tf)
im3 = sc.render()
showme(im3[:,:,:3])
# -
# The red gas is now much more prominant in the image. We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids.
| doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Basic global settings trials including ncluster, method and data used
import os
import json
import itertools
import pandas as pd
from functions import *
# %set_env GUROBI_HOME=/usr/local/gurobi/linux64
# +
with open('config.json', 'r') as f:
config_all = json.loads(f.read())
config = config_all['Texas_7_years_1']
inputs = config['inputs']
settings = config['settings']
print(inputs)
pv_all = np.genfromtxt(
os.path.join(config['root'], config['pv_path']), delimiter=',', filling_values=0)
wind_all = np.genfromtxt(
os.path.join(config['root'], config['wind_path']), delimiter=',', filling_values=0)
load_all = np.genfromtxt(
os.path.join(config['root'], config['load_path']), delimiter=',', filling_values=0) # Data load
# -
day_num = settings['day_num']
time_set = np.arange(24*day_num) # Time horizon
if 'profile_id' in settings:
profile_id = settings['profile_id']
renewable = [wind_all[time_set, profile_id],pv_all[time_set, profile_id]]
else:
# Denote that first profile is used and no other profile exists.
profile_id = -1
renewable = [wind_all[time_set], pv_all[time_set]]
load = load_all[time_set]*100 # Data extract
list_temp = itertools.product([100000,150000],[50000,30000],[500000,750000],[1000000,1250000])
backups = pd.DataFrame(columns=['c_bat','c_bat_power','c_pv','c_wind'],data=list_temp)
for backup in backups.index:
print("Index :" + str(backup))
update = backups.loc[backup,:].to_dict()
for item in ['c_bat','c_bat_power','c_pv','c_wind']:
inputs[item] = update[item]
config['inputs'] = inputs
expected = pd.read_csv('/home/jupyter-zyh/Gnhe/benchmark/Linear/Texas/profile-{}/expected/expected_2555_{}_1e-4.csv'.format(profile_id,backup), index_col=0).loc[0,:].to_dict()
sim_features_df = pd.read_csv('/home/jupyter-zyh/Gnhe/benchmark/Linear/Texas/profile-{}/features/features_2555_{}_1e-4.csv'.format(profile_id,backup), index_col=0)
df = run_trials(config, wind_all, pv_all, load_all, expected, sim_features_df[['renewable_cap_0','renewable_cap_1','max_energy','max_power']])
df.set_index('description').to_csv(
config['save_root']+settings['data']+('_Int_' if inputs['gen_cap'] else '_')+('Renewable'+str(profile_id)+'_' if 'profile_id' in settings else '_')+str(settings['day_num'])+'Days_'+str(settings['period'])+'HrsPerPeriod_{}.csv'.format(backup))
print("All Done!")
| analysis_setting/centroid/centroid_linear.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.1
# language: julia
# name: julia-1.6
# ---
# # Arrays
#
# This notebook illustrates how to create and reshuffle arrays. Other notebooks focus on matrix algebra and other functions applied to matrices.
# ## Load Packages and Extra Functions
# +
using Printf, DelimitedFiles
include("jlFiles/printmat.jl") #a function for prettier matrix printing
# -
# # Scalars, Vectors and Multi-dimensional Arrays
#
# *are different things*, even if they happen to "look" similar. For instance, a 1x1 array is not a scalar and an nx1 array is not a vector. This is discussed in some detail further down.
#
# However, we first present some common features of all arrays (vectors or multi-dimensional arrays).
# # Creating Arrays
#
# The typical ways of getting an array are
#
# * hard coding the contents
# * reading in data from a file
# * as a result from computations
# * allocating the array and then changing the elements
# * (often not so smart) growing the array by adding rows (or columns,..)
# * by list comprehension
#
# The next few cells give simple examples.
# ## 1. Hard Coding the Contents or Reading from a File
# +
z = [11 12; #typing in your matrix
21 22]
println("A matrix that we typed in:")
printmat(z)
x = readdlm("Data/MyData.csv",',',skipstart=1) #read matrix from file
println("First four lines of x from csv file:")
printmat(x[1:4,:])
# -
# ## 2a. Allocating an Array and Then Changing the Elements: Fill
#
# An easy way to create an array is to use the `fill()` function.
#
# ```
# A = fill(0,(10,2)) #10x2, integers (0)
# B = fill(0.0,10) #vector with 10 elements, floats (0.0)
# C = fill(NaN,(10,2)) #10x2, floats (NaN)
# D = fill("",3) #vector with 3 elements, strings ("")
# E = fill(Date(1),3) #vector with 3 elements, dates (0001-01-01)
# ```
# +
x = fill(0.0,(3,2)) #creates a 3x2 matrix filled with 0.0
println("so far, x is filled with 0.0. For instance, x[1,1] is $(x[1,1])")
for i = 1:size(x,1), j = 1:size(x,2)
x[i,j] = i/j
end
println("\nx after some computations")
printmat(x)
# -
# ## 2b. Allocating an Array and Then Changing the Elements: A More General Approach (extra)
#
# You can also create an array by
#
# ```
# A = Array{Int}(undef,10,2) #10x2, integers
# F = Array{Any}(undef,3) #vector with 3 elements, can include anything
# ```
#
# The ```undef``` signals that the matrix is yet not initialized. This is more cumbersome than `fill()`, but sometimes more flexible.
# +
F = Array{Any}(undef,3)
F[1] = [1;2;3;4] #F[1] contains a vector
F[2] = "<NAME>" #F[2] a string
F[3] = 1978 #F[3] an integer
printmat(F)
# -
# ## 3. Growing an Array
#
# Growing a matrix is done by `[A;B]` and/or `[A B]` (or by `vcat`, `hcat` and `cat`). This is somewhat slow, so do not use it for appending to a matrix in a long loop. Instead, pre-allocate the matrix and then fill it (see above).
#
# However, growing a *vector* is not that slow. It can be done by
# ```
# push!(old vector,new element 1,new element 2)
# ```
#
# If you instead want to append all elements of a vector, then do
# ```
# append!(old vector,vector to append) #in Julia 1.6, you can append several vectors
# ```
# +
A = [11 12;
21 22]
B = [1 2;
0 10]
z = [A;B] #same as vcat(A,B)
println("\n","stacking A and B vertically")
printmat(z)
z2 = [A B] #same as hcat(A,B)
println("\n","stacking A and B horizontally")
printmat(z2)
# -
B = Float64[] #empty vector, to include floats
for i = 1:3
x_i = 2.0 + 10^i
push!(B,x_i) #adding an element at the end
end
println("a vector with 3 elements:")
printmat(B)
# ## 4. List Comprehension and map (extra)
#
# List comprehension sounds fancy, but it is just a simple way to create arrays from repeated calculations. Similar to a "for loop."
#
# You can achieve the same thing with ```map``` (for instance, by ```map(i->collect(1:i),1:3)```).
# +
A = [collect(1:i) for i=1:3] #this creates a vector of vectors
println("A[1] is vector with 1 element, A[2] a vector with 2 elements,...")
printmat(A)
B = map(i->collect(1:i),1:3)
printmat(B)
# -
# # Using Parts of a Matrix 1
#
# The most common way to use parts of an array is by indexing. For instance, to use the second column of `A`, do `A[:,2]`.
#
# Notice that `A[1,:]` gives a (column) vector (yes, it does), while `A[1:1,:]` gives a 1xk matrix. (It looks like a row vector, but is really a matrix with just one row.)
#
# Also notice that `z = A[1,:]` creates an independent copy, so changing `z` will *not* change `A`.
# +
A = [11 12;
21 22]
println("A:")
printmat(A)
println("\nsecond column of A:")
printmat(A[:,2])
println("\n","first row of A (as a vector): ")
printmat(A[1,:]) #notice 1 makes it a vector
println("\n","first row of A: ")
printmat(A[1:1,:]) #use 1:1 to keep it as a 1x2 matrix
# -
# # Using Parts of a Matrix 2 (extra)
#
# In case you do not need an independent copy, then `y = view(A,1,:)` creates a *view* of the first row of `A`. This saves memory and is sometimes faster. Notice, however, that changing `y` by `y .= [1,2]` will now change the first row of `A`. Notice that the dot `.` is needed.
#
# A shortcut to loop over all rows of `A` is `for i in eachrow(A)`. There is also `eachcol()`.
#
# To make a *copy or a view?* If you need to save memory: a view. Instead, if you need speed: try both. (Copies are often quicker when you need to do lots of computations on the matrix, for instance, in a linear regression.)
# +
println("\n","view of first row of A (although it prints like a column vector): ")
y = view(A,1,:)
printmat(y)
y .= [1,2] #changing y and thus the first row of A
println("A after changing y")
printmat(A)
for i in eachrow(A) #looping over all rows
println("another row: ")
printmat(i)
end
# -
# # Splitting up an Array (extra)
#
# Sometimes you want to assign separate names to the columns (or rows) of a matrix. The next cell shows an example.
# +
println("A simple way...which works well when you want to create a few variables")
x1 = A[:,1]
x2 = A[:,2]
printmat(x2)
println("Another, prettier way")
(z1,z2) = [A[:,i] for i = 1:2]
printmat(z2)
# -
# # Arrays vs. Vectors vs. Scalars
#
# Matrices, vectors and scalars are different things, even if they contain the same number of elements. In particular,
#
# (a) an nx1 matrix is not the same thing as an n-vector
#
# (b) a 1x1 matrix or a 1-element vector are not the same thing as a scalar.
#
# As you will see further on, vectors are often more convenient than nx1 matrices.
#
# To convert a 1-element vector or 1x1 matrix `C` to a scalar, just do `myScalar = C[]`.
# +
A = ones(3,1) #this is a 3x1 matrix
B = ones(3) #a vector with 3 elements
println("The sizes of matrix A and vector B: $(size(A)) $(size(B))")
println("\nTesting if A==B: ",isequal(A,B))
println("\nThe nx1 matrix A and n-element vector B can often be used together, for instance, as in A+B, whose size is ",size(A+B))
printmat(A+B)
# +
C = ones(1,1) #a 1x1 matrix
c = 1 #a scalar
println("\nc/C would give an error since C is a (1x1) matrix")
println("\nInstead, do c/C[]: ",c/C[])
if length(C) == 1 && !isa(C,Number)
C = C[]
end
println("\nAfter conversion of C, do c/C: ",c/C)
# -
# # Vectors: x'x and x'A*x Create Scalars (if x is a vector)
#
# If `x` is a vector and `A` a matrix, then `x'x` and `x'A*x` are scalars. This is what a linear algebra text book would teach you, so vectors are very useful.
#
# This is *not* true if `x` is a matrix of size nx1. In that case the result is a 1x1 matrix.
#
# Recommendation: use vectors (instead of nx1 matrices) when you can.
# +
x = [1;2] #this is a vector
A = [11 12;
21 22]
println("\nx'x and x'A*x when x is a 2 element vector: ",x'x," ",x'A*x)
x = zeros(Int,2,1) #this is a 2x1 matrix (array)
x[1] = 1
x[2] = 2
println("\nx'x and x'A*x when x is a 2x1 array: ",x'x," ",x'A*x)
# -
# # An Array of Arrays (extra)
#
# If `x1` and `x2` are two arrays, then `y=[x1,x2]` is a vector (of arrays) where `y[1] = x1` and `y[2] = x2`. (If you instead want to stack `x1` and `x2` into a single matrix, use `[x1 x2]`, `[x1;x2]` or one of the `cat` functions discussed above.)
#
# In this case `y[1]` is actually a view of `x1` so changing elements of one changes the other.
# +
x1 = ones(3,2)
x2 = [1;2]
y = [x1,x2] #a vector of arrays
println(size(y))
printmat(y[1])
printmat(y[2])
# -
# # Arrays are Different...
#
# Vectors and matrices (arrays) can take lots of memory space, so **Julia is designed to avoid unnecessary copies of arrays**. In short, notice the following
#
# * ```B = A``` creates two names of the *same* array (changing one changes the other)
# * ```B = reshape(A,n,m)```, ```B = vec(A)```, and ```B = A'``` and create *another view* of the same array (changing one changes the other)
# * When an you input an array to a function, then this array is shared between the function and the calling program (scope). Changing *elements* of the array (inside the function) will then change the array outside the function. The next cell provides some details.
#
# If you do not like this behaviour, then use `copy(A)` to create an independent copy of the array.
# +
function f1(A)
A[1] = A[1]/2 #changing ELEMENTS of A, affects outside value
#A = A/2 #this would NOT affect the outside value
return A
end
x = [1.0 2.0]
printlnPs("original x: ",x)
y1 = f1(x)
printlnPs("x after calling f1(x): ",x)
# -
| Tutorial_06a_Arrays.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="4oXHxB3s-IRf"
# # Tesla Stock Price Prediction using stacked LSTM
#
# The data is collected in real time using Tiingo API and loaded using pandas_datareader
# + colab={"base_uri": "https://localhost:8080/"} id="eIiPH2kir9vI" executionInfo={"status": "ok", "timestamp": 1635572276792, "user_tz": -330, "elapsed": 4495, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="bfa199ff-bb3e-43fc-f8a7-abb23d074436"
# !pip install pandas_datareader
# + id="s1opBMWAtJVc" executionInfo={"status": "ok", "timestamp": 1635574582311, "user_tz": -330, "elapsed": 388, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}}
# this is the data loader which will load the stock prices using Tiingo API
import pandas_datareader as pdr
api_key = "<YOUR API KEY>"
# go to the below URL and sign up to create your API key
# https://api.tiingo.com/documentation/general/overview
# + colab={"base_uri": "https://localhost:8080/"} id="sHz1rHJmuaJP" executionInfo={"status": "ok", "timestamp": 1635574586943, "user_tz": -330, "elapsed": 2749, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="92123c26-7447-49c7-e5d2-7f5584f57422"
# import the necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
# we need to ensure that tensorflow version > 2.x
import tensorflow as tf
print(tf.__version__)
# + id="YlH3VJnBtikb" executionInfo={"status": "ok", "timestamp": 1635574594955, "user_tz": -330, "elapsed": 1049, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}}
df = pdr.get_data_tiingo('TSLA', api_key=api_key)
# + id="IvK6SmU9t1gb" executionInfo={"status": "ok", "timestamp": 1635574602050, "user_tz": -330, "elapsed": 872, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}}
# save the dataset for future use
df.to_csv('data/Tesla.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="GJYsCOnIu2qi" executionInfo={"status": "ok", "timestamp": 1635574604139, "user_tz": -330, "elapsed": 422, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="8a9429c4-5d9c-4108-e121-ff7cf50ce112"
# read saved dataset
df = pd.read_csv('data/Tesla.csv')
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="Nt0FW-SAww3T" executionInfo={"status": "ok", "timestamp": 1635574608275, "user_tz": -330, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="35ff2179-0171-479d-b075-b327f89e9318"
# we shall use the closing price of each day stocks as final labels
df_close = df.reset_index()['close']
df_close
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="dwghBBjpxFSa" executionInfo={"status": "ok", "timestamp": 1635574611072, "user_tz": -330, "elapsed": 634, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="94ff3258-ad95-451d-aa27-c4388b1d20cf"
plt.plot(df_close)
# + colab={"base_uri": "https://localhost:8080/"} id="yPkeeWCMxRoa" executionInfo={"status": "ok", "timestamp": 1635574615511, "user_tz": -330, "elapsed": 421, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="d2ada2b6-f8cf-4f61-fdf9-4d81e10cab3a"
# since LSTM is very sensitive to scaling we need to scale the data
scaler=MinMaxScaler(feature_range=(0,1))
df_c = scaler.fit_transform(np.array(df_close).reshape(-1,1))
print(df_c)
# + colab={"base_uri": "https://localhost:8080/"} id="zyB5eHRByNo5" executionInfo={"status": "ok", "timestamp": 1635574617287, "user_tz": -330, "elapsed": 13, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="ab2e7fbb-1eb6-46d1-af14-a566acbf688e"
# train and test split time series data
training_size=int(len(df_c)*0.80)
test_size=len(df_c)-training_size
train_data,test_data=df_c[0:training_size,:],df_c[training_size:len(df_c),:1]
print(train_data.shape, test_data.shape)
# + id="dCHep4ViyozO" executionInfo={"status": "ok", "timestamp": 1635574619118, "user_tz": -330, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}}
# create data matrix for lstm
def create_dataset(dataset, time_step=1):
dataX, dataY = [], []
for i in range(len(dataset)-time_step-1):
part = dataset[i:(i+time_step), 0]
dataX.append(part)
dataY.append(dataset[i + time_step, 0])
return np.array(dataX), np.array(dataY)
# + colab={"base_uri": "https://localhost:8080/"} id="1nx8H53OzXWQ" executionInfo={"status": "ok", "timestamp": 1635574620592, "user_tz": -330, "elapsed": 13, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="7f74b107-9729-4891-cbed-8d46ac5965da"
# we shall use previous 80 days data for each LSTM learning
time_step = 80
x_train, y_train = create_dataset(train_data, time_step)
x_test, y_test = create_dataset(test_data, time_step)
print(x_train.shape, y_train.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="As3WZS2UzvxC" executionInfo={"status": "ok", "timestamp": 1635574622058, "user_tz": -330, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="ccc4a0d6-9756-4f12-a024-aa8a5cf86f56"
# reshape features from 2D to 3D as required by LSTM
n_features = 1
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1] , n_features)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1] , n_features)
print(x_train.shape, x_test.shape)
# + [markdown] id="OhGyxwjy0k_8"
# # LSTM Model
# + id="AO3fUSFD0RSw" executionInfo={"status": "ok", "timestamp": 1635574625588, "user_tz": -330, "elapsed": 661, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}}
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
# + colab={"base_uri": "https://localhost:8080/"} id="_CXZcQ3h0qUL" executionInfo={"status": "ok", "timestamp": 1635574638490, "user_tz": -330, "elapsed": 7143, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="3bfb6fc3-2033-4b6c-b6ca-a076a3f86416"
# create LSTM model
model=Sequential()
model.add(LSTM(64,return_sequences=True,input_shape=(time_step,1)))
model.add(LSTM(64,return_sequences=True))
model.add(LSTM(64))
model.add(Dense(1))
model.compile(loss='mean_squared_error',optimizer='adam')
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="KgxI-Tif1D2a" executionInfo={"status": "ok", "timestamp": 1635574915761, "user_tz": -330, "elapsed": 267358, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="26cd122c-8039-4f28-dbf7-3d42ce2df020"
# model training
model.fit(x_train,y_train,validation_data=(x_test,y_test),epochs=200,batch_size=64,verbose=1)
# + [markdown] id="sZlVnMoS_m7T"
# ## Predictions
# + id="ET0QHm4C1pYq" executionInfo={"status": "ok", "timestamp": 1635574945330, "user_tz": -330, "elapsed": 2220, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}}
train_predict=model.predict(x_train)
test_predict=model.predict(x_test)
# + id="bWuHG7wm3QsY" executionInfo={"status": "ok", "timestamp": 1635574982028, "user_tz": -330, "elapsed": 432, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}}
# Transform back to original scale
train_predict=scaler.inverse_transform(train_predict)
test_predict=scaler.inverse_transform(test_predict)
# + [markdown] id="2YjUgGx13e0x"
# ## Metrics
# + id="BnfVvZ-D3aF6" executionInfo={"status": "ok", "timestamp": 1635575037642, "user_tz": -330, "elapsed": 400, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}}
import math
from sklearn.metrics import mean_squared_error
# + colab={"base_uri": "https://localhost:8080/"} id="nBAR1aco3nrU" executionInfo={"status": "ok", "timestamp": 1635575161647, "user_tz": -330, "elapsed": 425, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="68a9dd60-c0c3-47f3-96a9-072c9afa3aae"
# we will use RMSE error metric
rmse_train = math.sqrt(mean_squared_error(y_train,train_predict))
rmse_test = math.sqrt(mean_squared_error(y_test,test_predict))
print(f"RMSE on train set: {round(rmse_train, 4)}")
print(f"RMSE on test set: {round(rmse_test, 4)}")
# + [markdown] id="z9U6mjxQ_0Bq"
# As from the RMSE error, the model might be a little underfitting. We can deal with this issue by: \
# 1. Getting more training data. \
# 2. Increasing the size or number of parameters or layers in the model. \
# 3. Increasing the complexity of the model by using a bigger model. \
# 4. Increasing the training time, until cost function is minimised.
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="BuIVtciL4Dsy" executionInfo={"status": "ok", "timestamp": 1635575276170, "user_tz": -330, "elapsed": 504, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="21771d51-0b7f-42c7-cd24-14190a536285"
### Plotting
# shift train predictions for plotting
look_back=80
trainPredictPlot = np.empty_like(df_c)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(train_predict)+look_back, :] = train_predict
# shift test predictions for plotting
testPredictPlot = np.empty_like(df_c)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(train_predict)+(look_back*2)+1:len(df_c)-1, :] = test_predict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(df_c))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
# + [markdown] id="isQiQAF_5Ehz"
# ## Future Predictions
# + colab={"base_uri": "https://localhost:8080/"} id="TwyUS8Oo4h4Z" executionInfo={"status": "ok", "timestamp": 1635575597405, "user_tz": -330, "elapsed": 421, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="b6ed81fd-0480-4cfc-83ed-a914c7a622e0"
x_input=list(test_data[(len(test_data)-look_back):].reshape(1,-1))
temp_input = x_input[0].tolist()
print(temp_input)
# + colab={"base_uri": "https://localhost:8080/"} id="To0to_RF5wHa" executionInfo={"status": "ok", "timestamp": 1635576207007, "user_tz": -330, "elapsed": 1842, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="f92eece3-46ae-4ea7-91e2-dd283d17d4a9"
# demonstrate prediction for next 10 days
lstm_output=[]
n_steps=80
i=0
while(i<30):
if(len(temp_input)>n_steps):
x_input = np.array(temp_input[1:])
print("{} day input {}".format(i,x_input))
x_input = x_input.reshape((1, n_steps, 1))
yhat = model.predict(x_input, verbose=0)
print("{} day output {}".format(i,yhat))
temp_input.extend(yhat[0].tolist())
temp_input=temp_input[1:]
lstm_output.extend(yhat.tolist())
else:
print("{} day input {}".format(i,x_input))
x_input = np.array(x_input).reshape((1, n_steps,1))
yhat = model.predict(x_input, verbose=0)
print("{} day output {}".format(i,yhat))
temp_input.extend(yhat[0].tolist())
print(len(temp_input))
lstm_output.extend(yhat.tolist())
i += 1
print(lstm_output)
# + [markdown] id="Q3-ef9x29t69"
# ## Plotting Predictions
# + id="YdvoddOs7nz4" executionInfo={"status": "ok", "timestamp": 1635576348487, "user_tz": -330, "elapsed": 395, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}}
og_day=np.arange(1,n_steps+1)
pred_day=np.arange(n_steps+1,n_steps+31)
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="OISQrCBg8ntT" executionInfo={"status": "ok", "timestamp": 1635576425967, "user_tz": -330, "elapsed": 502, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="f2347dea-3de7-45bf-f93e-e2f82cb5e6eb"
plt.plot(og_day,scaler.inverse_transform(df_c[(len(df_c)-n_steps):]))
plt.plot(pred_day,scaler.inverse_transform(lstm_output))
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="QOLEOloe842Q" executionInfo={"status": "ok", "timestamp": 1635576560265, "user_tz": -330, "elapsed": 464, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="f73128d5-a31b-4cad-bddb-bd9989da65a9"
# plot last 20 days and next 30 days only
df2=df_c.tolist()
df2.extend(lstm_output)
plt.plot(df2[(len(df_c)-20):])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="mkplZ6aX9bZA" executionInfo={"status": "ok", "timestamp": 1635576608075, "user_tz": -330, "elapsed": 473, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlSJDlZ0s-MgWUH_Pwct_zj3X1Lfh7O8awMGmCUg=s64", "userId": "04140676807795382520"}} outputId="36c5f25b-5872-4c08-e328-ba8b9f654390"
df3=scaler.inverse_transform(df2).tolist()
plt.plot(df3)
| DL/Stock Price Prediction - LSTM/Stock Price Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Identify Missing Time Blocks
import pandas as pd
import sqlite3
# Import full Ceusus Block Group data, generate one row per 3-hour block group per day.
df_census = pd.read_csv("../data/census-data/BlockGroup/FinalBlockGroupData.csv")
# +
start_year = '2013'
end_year = '2017'
df_time = pd.DataFrame({
'date': pd.date_range(
start = pd.Timestamp(start_year),
end = pd.Timestamp(end_year) + pd.offsets.YearEnd(0) + pd.Timedelta(days=1),
freq = '3h',
closed = 'left'
)
})
# -
# Add columns for month/day/year/3-hr time block category column.
df_time['date'] = pd.to_datetime(df_time['date'])
df_time['year'] = df_time['date'].dt.year
df_time['month'] = df_time['date'].dt.month
df_time['day'] = df_time['date'].dt.day
df_time['time_block'] = pd.cut(df_time['date'].dt.hour,8,labels=['12am-3am','3am-6am','6am-9am','9am-12pm','12pm-3pm','3pm-6pm','6pm-9pm','9pm-12am'],include_lowest=True)
df_time.head(10)
# Merge with census on Year
full_date_time_census = df_time.merge(df_census[['Tract','BlockGroup','Year']], left_on='year', right_on='Year')
full_date_time_census.head(10)
full_date_time_census.count()
# Import Census/Crime/Weather to figure out how to merge
conn = sqlite3.connect('DC-Criminalistics_06012019/data/crime_census_weather.db')
c = conn.cursor()
df = pd.read_sql('''select * from crime_census_weather where year >=2013 and year <= 2017''', conn)
df.head()
list(df)
# Add date/time/3-hr time fields to crime/census/weather df
# +
df['START_DATE'] = pd.to_datetime(df['START_DATE'])
df['START_YEAR'] = df['START_DATE'].dt.year
df['START_MONTH'] = df['START_DATE'].dt.month
df['START_DAY'] = df['START_DATE'].dt.day
df['START_TIME_CATEGORY'] = pd.cut(df['START_DATE'].dt.hour,8,labels=['12am-3am','3am-6am','6am-9am','9am-12pm','12pm-3pm','3pm-6pm','6pm-9pm','9pm-12am'],include_lowest=True)
# -
df.head()
# Need to update Block Group field in census/date/time df so it will join correctly to crime/weather/census
# +
full_date_time_census['Tract'] = full_date_time_census['Tract'].apply(str)
full_date_time_census['Tract'] = full_date_time_census['Tract'].apply(lambda x: x.zfill(6))
# -
full_date_time_census['BlockGroup_clean'] = full_date_time_census['Tract'] + " " + full_date_time_census['BlockGroup']
full_date_time_census.head()
# Joining crime/weather to full date/timeblock data
time_blocks_and_crime = full_date_time_census.merge(df, how='left', left_on=['year','month','day','time_block','BlockGroup_clean'], right_on=['START_YEAR','START_MONTH','START_DAY','START_TIME_CATEGORY','BLOCK_GROUP'])
time_blocks_and_crime.head()
no_crime_time_census = time_blocks_and_crime[time_blocks_and_crime['OCTO_RECORD_ID'].isna()]
no_crime_time_census.count()
| notebooks/.ipynb_checkpoints/Missing Census and Time Blocks_06022019-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
import shapely.geometry
import pandas as pd
import geopandas as gpd
import asf_search as asf
wkt = 'POLYGON((-135.7 58.2,-136.6 58.1,-135.8 56.9,-134.6 56.1,-134.9 58.0,-135.7 58.2))'
#max_temporal=
#time= +- max_temporal
start='2022-01-01'
end='2022-02-01'
results = asf.geo_search(platform=[asf.PLATFORM.SENTINEL1], intersectsWith=wkt, start=start,end=end)
# +
#print(results)
# -
a=gpd.GeoDataFrame.from_features(results.geojson())
a[a.processingLevel=='GRD_HD']
| notebooks/sentinel1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Calculate the number of observations that went into detecting each waterbody <img align="right" src="../../../Supplementary_data/dea_logo.jpg">
#
# * **Compatability:** Notebook currently compatible with the `NCI` environment only. You can make this notebook `Sandbox` compatible by pointing it to the DEA Waterbodies timeseries located in AWS.
# * **Products used:**
# None.
# * **Prerequisites:** This notebook explores the individual waterbody timeseries csvs contained within the DEA Waterbodies dataset. It has been designed with that very specific purpose in mind, and is not intended as a general analysis notebook.
# ## Description
# This notebook loops through all of the individal waterbodies timeseries produced within DEA Waterbodies, and generates statistics on the number of observations within each of the individual records.
#
# 1. Load the required python modules
# 2. Set up the directory where the timeseries data are all located
# 3. Glob through that directory to get a list of all the files to loop through
# 4. Loop through each file and make a note of its length
# 5. Calculate length statistics
#
# ***
# ## Getting started
#
# To run this analysis, run all the cells in the notebook, starting with the "Load packages" cell.
# ### Load packages
# Import Python packages that are used for the analysis.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import xarray as xr
import pandas as pd
import glob
# -
# ### Analysis parameters
#
# * `TimeseriesDir`: Folder where the DEA Waterbodies timeseries are saved
# * `AnalysisStartDate`: e.g. `'1985-01-01'`. Date to start counting observations from. The dataset begins in 1987. If you want to select a shorter date range over which to count observations, set this data to your custom range.
# * `AnalysisEndDate`: e.g. `'2019-01-01'`. Final date to finish counting observations. The dataset is being continually updated. If you want to select a shorter date range over which to count observations, set this data to your custom range.
# +
TimeseriesDir = '/g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid'
AnalysisStartDate = '1985-01-01'
AnalysisEndDate = '2019-01-01'
# -
# ## Get a list of all of the csv files
CSVFiles = glob.glob(f'{TimeseriesDir}/**/*.csv', recursive=True)
# ## Open each file, then work out how many observations occur between the observation period
AllObs = []
for FileName in CSVFiles:
try:
TimeHistory = pd.read_csv(FileName)
TimeHistory['Observation Date'] = pd.to_datetime(TimeHistory['Observation Date'])
NumObs = len(TimeHistory[(TimeHistory['Observation Date'] > AnalysisStartDate) &
(TimeHistory['Observation Date'] < AnalysisEndDate)])
if NumObs < 50:
print(FileName)
AllObs.append(NumObs)
except:
print(FileName +' did not work')
# ## Calculate some statistics on observation length
#
# You can edit these cells to generate different length statistics.
AllObs.sort()
AllObsNP = np.array(AllObs)
plt.hist(AllObsNP, bins=20)
plt.xlabel(f'Number of Observations')
plt.title(f'Number of Observations between {AnalysisStartDate} and {AnalysisEndDate} \n'
'for individual DEA Waterbodies')
# ### Interrogate the length some more
# You can change the statistic here depending on what you're interested in.
AllObsNP.min()
# ***
#
# ## Additional information
#
# **License:** The code in this notebook is licensed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0).
# Digital Earth Australia data is licensed under the [Creative Commons by Attribution 4.0](https://creativecommons.org/licenses/by/4.0/) license.
#
# **Contact:** If you need assistance, please post a question on the [Open Data Cube Slack channel](http://slack.opendatacube.org/) or on the [GIS Stack Exchange](https://gis.stackexchange.com/questions/ask?tags=open-data-cube) using the `open-data-cube` tag (you can view previously asked questions [here](https://gis.stackexchange.com/questions/tagged/open-data-cube)).
# If you would like to report an issue with this notebook, you can file one on [Github](https://github.com/GeoscienceAustralia/dea-notebooks).
#
# **Last modified:** January 2020
#
# **Compatible datacube version:** N/A
# ## Tags
# Browse all available tags on the DEA User Guide's [Tags Index](https://docs.dea.ga.gov.au/genindex.html)
# + raw_mimetype="text/restructuredtext" active=""
# **Tags**: :index:`DEA Waterbodies`
| Scientific_workflows/DEAWaterbodies/DEAWaterbodiesToolkit/CalculateObservationsThatWentIntoWaterBodyIdentification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import torch
import sys
from research_tools.store import reader
import os
import matplotlib.pyplot as plt
from IPython.display import display
paths = [('22_33_50_778649-30_09_20', 1)]
run_dirs = [f'/tiger/u/kshen6/byol-pytorch/{slurmid}/run_{runid}' for slurmid, runid in paths]
log_reader = reader.ExperimentLogReader(None, run_dirs)
# log_reader.run_readers[0].load_data_dict('ckpt-699')
dict_id = ['ckpt8-499']
log_reader.make_plots('epoch', 'train_acc', dict_id=dict_id)
log_reader.make_plots('epoch', 'val_acc', dict_id=dict_id)
| evaluate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import time
import geopy
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.express as px
import matplotlib.pyplot as plt
import ipywidgets as widgets
import geopandas as gpd
import matplotlib.pyplot as plt
import plotly_express as px
from geopy.geocoders import Nominatim
from tabulate import tabulate
from datetime import datetime as dt
from matplotlib import gridspec
from IPython.core.display import HTML
from ipywidgets import fixed
from multiprocessing import Pool
from geopy.extra.rate_limiter import RateLimiter
# -
def jupyter_settings():
# %matplotlib inline
# %pylab inline
plt.style.use( 'bmh' )
plt.rcParams['figure.figsize'] = [20,12]
plt.rcParams['font.size'] = 15
display( HTML( '<style>.container { width:100% !important; }</style>') )
pd.set_option('display.float_format', lambda x: '%.3f' % x)
pd.options.display.max_columns = None
pd.options.display.max_rows = None
pd.set_option( 'display.expand_frame_repr', False )
sns.set()
jupyter_settings()
# # 0.0 Imports
# ## 0.1 Loanding Data
df_kc_sales = pd.read_csv("kc_house_data.csv", index_col = False, low_memory = False)
df_kc_sales.sample(5)
# # 1.0 Data Descriptive
#
#
df1 = df_kc_sales.copy()
# # 1.1 Data Dimensions
print ('Number of Rows: {}'.format( df1.shape[0]))
print ('Number of Cols: {}'.format (df1.shape[1]))
df1.columns
# # 1.2 Data Types
df1.dtypes
# # 1.3 API (Adicionar Endereço Completo)
# +
#df1['query'] = df1[['lat', 'long']].apply( lambda x: str(x['lat']) + ',' + str(x['long']), axis = 1)
#
#df_query = df1.loc[:,('id', 'query')]
# +
#Create Empty Columns
#df1.loc[:,'place_id'] = 'NA'
#df1.loc[:,'osm_type'] = 'NA'
#df1.loc[:,'house_number'] = 'NA'
#df1.loc[:,'road'] = 'NA'
#df1.loc[:,'neighbourhood'] = 'NA'
#df1.loc[:,'city'] = 'NA'
#df1.loc[:,'county'] = 'NA'
#df1.loc[:,'state'] = 'NA'
#df1.loc[:,'country'] = 'NA'
#df1.loc[:,'country_code'] = 'NA'
# +
#p = Pool(4)
#
#
#start = time.process_time()
#
##Arquivo 'defs' em .py
#
#import defs
#
#df_query[['house_number','road', 'neighbourhood', 'city', 'state', 'country']] = p.map ( defs.get_data, df_query.iterrows())
#
#end = time.process_time()
#
#print ('Time Elapsed: {}', end - start)
#df_query = df_query.to_csv("df_query.csv")
# -
df_query = pd.read_csv('df_query.csv')
df_query.head()
# # 1.4 Check NA
df1.isnull().sum()#/df1.shape[0] * 100
# # 1.4 Descriptive Statistical
num_att = df1.select_dtypes( include = ['int64', 'float64'])
# # 1.4.1 Numerical Attributes
# +
# Central Tendency - Mean, Median
ct1 = pd.DataFrame (num_att.apply (np.mean)).T
ct2 = pd.DataFrame (num_att.apply (np.median)).T
#Dispersion - std, min, max, range, skew, kurtosis
d1 = pd.DataFrame (num_att.apply (np.std)).T
d2 = pd.DataFrame (num_att.apply (min)).T
d3 = pd.DataFrame (num_att.apply (max)).T
d4 = pd.DataFrame (num_att.apply (lambda x: x.max() - x.min())).T
d5 = pd.DataFrame (num_att.apply (lambda x: x.skew())).T
d6 = pd.DataFrame (num_att.apply (lambda x: x.kurtosis())).T
# Concatenate
t = pd.concat ((d2, d3, d4, ct1, ct2, d1, d5, d6)).T.reset_index()
t.columns =['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis']
t
# -
# # 1.5 Modificações Para Alterações de Variáveis (Usabilidade)
# +
##Considerações de Old_House e New_House
#Imóvel com data a partir de 01-01-2015: 'new_house'
#Imóvel com data anterior à 01-01-2015: 'old_house'
df1['date'] = pd.to_datetime(df1['date'], format = '%Y-%m-%d')
df1['house_age'] = df1['date'].apply( lambda x: 'new_house' if x > pd.to_datetime('2015-01-01',format = '%Y-%m-%d') else 'old_house')
# +
##Considerações sobre tipo de Quarto
#Imóvel com um quarto: 'Studio'
#Imóvel com dois quartos: 'Apartament'
#Imóvel com mais de dois quartos: 'House'
#Imóvel com valor '0': 'No Bedroom'
df1['dormitory_type'] = df1['bedrooms'].apply(lambda x: 'studio' if x == 1 else
'apartament' if x == 2 else
'house' if x > 2 else "no bedroom" )
# +
##Considerações sobre Conservação do Apartamento
#Imóvel com condição 1: 'bad'
#Imóvel com condição 2: 'bad'
#Imóvel com condição 3: 'regular'
#Imóvel com condição 4: 'regular'
#Imóvel com condição 5: 'good'
df1['condition_type'] = df1['condition'].apply(lambda x: 'bad' if x <= 2 else
'regular' if (x == 3) | (x == 4) else 'good')
df1['condition'] = df1['condition'].astype('str')
# +
#Modificação de Colunas
df1 = df1.drop(['sqft_living15', 'sqft_lot15'], axis = 1)
# +
#Transformação para Datas
df1['yr_built'] = pd.to_datetime(df1['yr_built'], format = '%Y').dt.year
df1['yr_renovated'] = df1['yr_renovated'].apply (lambda x: pd.to_datetime ('1900-01-01', format = '%Y-%m-%d') if x == 0 else pd.to_datetime (x, format ='%Y'))
# +
##Considerações sobre Level de Valores
#Imóvel 0 até 321.950: 0
#Imóvel 321.950 até 450.000: 1
#Imóvel 450.000 até 645.000: 2
#Imóvel acima de 645.000: 3
df1['level'] = 'standard'
df1['level'] = df1['price'].apply ( lambda x: 0 if x < 321950 else
1 if (x > 321950) & (x < 450000) else
2 if (x > 450000) & (x < 645000) else 3)
df1['level'] = df1['level'].astype(int)
# +
# Coluna "Yes/No" WaterFront
df1['is_waterfront'] = df1['waterfront'].apply( lambda x: 'Yes' if x == 1 else 'No')
# +
## Size das Salas de Estar
#Imóvel 0 até 1.427: 0
#Imóvel 1.427 até 1.910: 1
#Imóvel 1.910 até 2.550: 2
#Imóvel acima de 2.550: 3
df1['size'] = 'standard'
df1['size'] = df1['sqft_living'].apply ( lambda x: 's00' if (x >= 0) & (x <= 1427) else
's01' if (x > 1427) & (x <= 1910) else
's02' if (x > 1910) & (x <= 2550) else 's03')
# +
##Inconformidade de Valores
print(df1['bedrooms'].unique())
print(df1['floors'].value_counts())
# -
df1 = df1.drop(df1[df1['bedrooms'] > 11].index)
df1['floors'] = df1['floors'].apply(np.ceil)
df1['bathrooms'] = df1['bathrooms'].apply(np.ceil)
df_clean = df1.to_csv('df_clean')
df_clean = pd.read_csv('df_clean')
# # 3.0 Perguntas CEO
# ## 1. Quantas casas estão disponíveis para compra?
houses = len( df1['id'].unique())
print ('O número de casas disponíveis é de {}.'.format (houses))
# ## 2. Quantos atributos as casas possuem?
# +
attribut = df1.drop(['id', 'date'], axis = 1)
a = len(attribut.columns)
print ('O número de atributos é: {}.'.format (a))
# -
# ## 3. Quais são os atributos das casas?
a = attribut.columns
a
# ## 4. Qual a casa mais cara?
# +
a = df1[['id', 'price']].sort_values ( 'price', ascending = False).reset_index(drop = True)['id'][0]
b = df1[['id', 'price']].sort_values ( 'price', ascending = False).reset_index(drop = True)['price'][0]
print ('A casa mais cara é a de ID: {} e valor: {}.'.format (a,b))
# -
# ## 5. Qual a casa com o maior número de quartos?
# +
a = df1[['id', 'bedrooms']].sort_values ( 'bedrooms', ascending = False).reset_index(drop = True)['id'][0]
b = df1[['id', 'bedrooms']].sort_values ( 'bedrooms', ascending = False).reset_index(drop = True)['bedrooms'][0]
print ('A casa com o maior número de quartos é a de ID: {} e possui: {} quartos.'.format (a,b))
# -
# ## 6. Quantas casas possuem 2 banheiros? E qual o valor médio destas?
# +
a = len(df1.loc[df1['bathrooms'] == 2, ['id', 'bathrooms']])
b = np.round(df1.loc[df1['bathrooms'] == 2, 'price'].mean(), 2)
print( 'O número de casas que possuem 2 banheiros é de {} e o valor médio destas estão por volta de {}.'. format(a,b))
# -
# ## 7. Qual o preço mínimo entre as casas com 3 quartos?
# +
a = df1.loc[df1['bedrooms'] == 3, 'price'].min()
print( 'O preço mínimo de casas que possuem 3 quartos é de {}.'. format(a))
# -
# ## 8. Quantas casas possuem mais de 300 metros quadrados na sala de estar e têm mais de 2 banheiros?
# +
df1['m2'] = df1['sqft_living'] *0.093
a = len(df1.loc[(df1['m2'] > 300) & (df1['bedrooms'] > 2), 'id'])
print( 'O número de casas que possuem 300 metros quadrados na sala de estar e possuem 2 banheiros é de {}.'. format(a))
# -
# ## 9. Quantas casas possuem mais de 2 andares? E quantos possuem dois andares?
# +
a = len (df1.loc[df1['floors']>2, 'id'])
b = len (df1.loc[df1['floors'] == 2, 'id'])
print( 'O número de casas que possuem mais de 2 andares é de {} e das que possuem dois andares é de {}.'. format(a,b))
# -
# ## 10. Quantas casas tem vista para o mar/lago?
a = len (df1.loc[df1['waterfront'] == 1, 'id'])
print( 'O número de casas que possuem vista para o mar/lago é de {}.'. format(a))
# ## 11. Das casas com vista para o mar/lago, quantas tem 3 quartos?
# +
a = len(df1.loc[(df1['waterfront'] == 1) & (df1['bedrooms'] == 3), 'id'])
print( 'O número de casas que possuem vista para o mar/lago e possuem 3 quartos é de {}.'. format(a))
# -
# ## 12. Qual a data do imóvel mais antigo? E qual a data mais antiga de renovação?
# +
a = df1['yr_built'].min()
b = df1.loc[df1['yr_renovated'] > pd.to_datetime('1900-01-01',format = '%Y-%m-%d'), 'yr_renovated'].dt.year.min()
print( 'A data do imóvel mais antigo é de {} e data do imóvel que tem a reforma mais antiga é de {}.'. format(a,b))
# -
# ## 13. Qual o número máximo de andares e quantos casas possuem este número?
# +
a = df1['floors'].unique().max()
b = len(df1[df1['floors'] == 3.5][['floors', 'id']])
print( 'O número máximo de andares é de {} e {} imóveis possuem este número.'. format(a,b))
# -
# ## 14. Quantos imóveis estão com a condição regular?
# +
a = df1.loc[df1['condition_type'] == 'regular', 'id'].size
print( 'O número imóveis que estão com a condição regular é de {}.'. format(a))
# -
# ## 15. Quantos imóveis estão com a condição 'bad' e possuem vista para mar/lago?
# +
a = df1.loc[(df1['condition_type'] == 'bad')& (df1['waterfront'] == 1), 'id'].size
print( 'O número imóveis que estão com a condição "bad" e possuem vista para mar/lago é de {}.'. format(a))
# -
# ## 16. Quantos imóveis estão com a condição 'good' e são consideradas 'new_house'?
# +
a = df1.loc[(df1['condition_type'] == 'good')& (df1['house_age'] == 'new_house'), 'id'].size
print( 'O número imóveis que estão com a condição "good" e são consideradas "new_house" é de {}.'. format(a))
# -
# ## 17. Qual o valor do imóvel mais caro do tipo 'studio'?
# +
a = df1.loc[df1['dormitory_type'] == 'studio', 'price'].max()
print( 'O valor do imóvel mais caro do tipo "studio" é de {}.'. format(a))
# -
# ## 18. Quantos imóveis do tipo "apartment" foram reformados em 2015?
# +
a = df1.loc[df1['yr_renovated'] == pd.to_datetime ('2015-01-01'), 'id'].size
print( 'A quantidade de imóveis do tipo "apartment" que foram reformados no ano de 2015 é de {}.'. format(a))
# -
# ## 19. Qual o maior número de quartos que um imóvel do tipo "house" possui?
# +
a = df1.loc[df1['dormitory_type'] == 'house', 'bedrooms'].max()
print( 'O maior número de quartos de um imóvel do tipo "house" é de {}.'. format(a))
# -
# ## 20. Quantos imóveis "new_house" foram reformados no ano de 2014?
# +
a = df1.loc[( df1['house_age'] == 'new_house') &
( df1['yr_renovated'] == pd.to_datetime ('2014-01-01', format = '%Y-%m-%d')), 'id'].size
print ( 'A quantidade de imóveis considerados "new_house" que foram reformados no ano de 2014 é de {}.'. format(a))
# -
# # <center> TABELAS
# ## <center> Imóveis x Quartos
df_grouped = df1[['id', 'bedrooms']].groupby('bedrooms').size()
df_grouped
# ## <center> Imóveis x Ano de Construção
#
df_grouped = df1[['id', 'yr_built']].groupby('yr_built').count().reset_index()
df_grouped
# ## <center> Menor Número de Quartos x Ano de Construção de Imóveis
df_grouped = df1[['bedrooms', 'yr_built']].groupby('yr_built').min()
df_grouped
# ## <center> Preço de Compra Mais Alto x Número de Quarto
df_grouped = df1[['price', 'bedrooms']].groupby('bedrooms').max().reset_index()
df_grouped
# ## <center> Tamanho Médio das Salas dos Imóveis x Ano de Construção
df_grouped = df1[['sqft_living', 'yr_built']].groupby('yr_built').mean().reset_index()
df_grouped
# ## <center> Média dos Preços dos Imóveis por Tipo de Dormitório x Ano de Construção
df1[['price', 'dormitory_type']].groupby('dormitory_type').mean().reset_index()
# ## <center> Média dos Preços dos Imóveis x Level
df1[['price', 'level']].groupby('level').mean().reset_index()
# ## <center> Média dos Preços dos Imóveis x Tamanho da Sala de Estar
df1[['price', 'size']].groupby('size').mean().reset_index()
# # <center> GRÁFICOS
# ## 1. Gráfico que represente a soma dos preços pelo números de quartos
# +
aux1 = df1[['price', 'bedrooms']].groupby('bedrooms').sum().reset_index()
sns.barplot( x = 'bedrooms', y= 'price', data = aux1)
# -
# ## 2. Gráfico de linhas que represente a média dos preços pelo ano de construção dos imóveis.
# +
aux1 = df1[['price', 'yr_built']].groupby('yr_built').mean().reset_index()
sns.lineplot( x = 'yr_built', y= 'price', data = aux1)
# -
# ## 3. Gráfico que represente a média dos preços pelos tipos de dormitórios
# +
aux1 = df1[['price', 'dormitory_type']].groupby('dormitory_type').mean().reset_index()
sns.barplot( x = 'dormitory_type', y= 'price', data = aux1)
# -
# ## 4. Gráfico que represente a evolução da média dos preços pelo ano da reforma dos imóveis a partir do ano de 1930.
aux1 = df1.loc[df1['yr_renovated'] > pd.to_datetime ('1930'), ['price','yr_renovated']].groupby ('yr_renovated').mean().reset_index()
sns.lineplot( x = 'yr_renovated', y = 'price', data = aux1)
# # <center> DASHBOARDS
# +
fig = plt.figure(figsize = (20,12))
specs = gridspec.GridSpec(ncols = 2, nrows = 3, figure=fig)
ax1 = fig.add_subplot(specs[0,:])
ax2 = fig.add_subplot(specs[1,:])
ax3 = fig.add_subplot(specs[2,0])
ax4 = fig.add_subplot(specs[2,1])
# 1
df = df1[['price', 'yr_built']].groupby('yr_built').mean().reset_index()
sns.lineplot( x = 'yr_built', y = 'price', data = df, ax = ax1);
ax1.set_title( "Price vs Year Built" , size = 20 )
ax1.set_xlabel("")
ax1.set_ylabel("")
# 2
df = df1.loc[df1['yr_renovated'] > pd.to_datetime ('1930'), ['price','yr_renovated']].groupby ('yr_renovated').mean().reset_index()
sns.lineplot( x = 'yr_renovated', y = 'price', data = df, ax = ax2);
ax2.set_title( "Price vs Year Renovated" , size = 20 )
ax2.set_xlabel("")
ax2.set_ylabel("")
# 3
df = df1[['price', 'bedrooms']].groupby('bedrooms').sum().reset_index()
sns.barplot( x = 'bedrooms', y = 'price', data = df, ax = ax3);
ax3.set_title( "Price vs Bedrooms" , size = 20 )
ax3.set_xlabel("")
ax3.set_ylabel("")
# 4
df = df1[['price', 'dormitory_type']].groupby('dormitory_type').mean().reset_index()
sns.barplot(x = 'dormitory_type', y = 'price', data =df, ax= ax4);
ax4.set_title( "Price vs Dormitory Type" , size = 20 )
ax4.set_xlabel("")
ax4.set_ylabel("")
# +
fig = plt.figure(figsize = (20,12))
specs = gridspec.GridSpec(ncols = 2, nrows = 2, figure=fig)
ax1 = fig.add_subplot(specs[0,:])
ax2 = fig.add_subplot(specs[1,0])
ax3 = fig.add_subplot(specs[1,1])
# Gráfico 1:
df1['year'] = pd.to_datetime (df1['date']).dt.year
by_year = df1[['price', 'year']].groupby('year').sum().reset_index()
sns.barplot(x = 'year',y = 'price', data = by_year, ax = ax1)
ax1.set_title( "Price vs Year" , size = 20 )
ax1.set_xlabel("")
ax1.set_ylabel("")
# Gráfico 2:
df1['day'] = pd.to_datetime (df1['date'])
by_day = df1[['price', 'day']].groupby('day').mean().reset_index()
sns.lineplot(x = 'day', y = 'price', data = by_day, ax = ax2)
ax2.set_title( "Price vs Day" , size = 20 )
ax2.set_xlabel("")
ax2.set_ylabel("")
# Gráfico 3:
df1['week'] = pd.to_datetime (df1['date']).dt.strftime('%Y-%U')
by_week = df1[['price', 'week']].groupby('week').mean().reset_index()
sns.lineplot(x = 'week', y = 'price', data = by_week, ax = ax3)
ax3.set_title( "Price vs Week" , size = 20 )
ax3.set_xlabel("")
ax3.set_ylabel("")
plt.xticks (rotation = 90 );
# -
# # <center> FILTROS ITERATIVOS
df2 = df1.copy()
style = {'description_width' : 'initial'}
# +
#Iterative Buttons - Price
price_limit = widgets.IntSlider(
value = int(df2['price'].mean() ),
min = df2['price'].min(),
max = df2['price'].max(),
step = 1,
description = 'Maximum Price',
disable = False,
style = style
)
# +
#Iterative Buttons - WaterFront
waterfront_bar = widgets.Dropdown(
options = df2['is_waterfront'].unique().tolist(),
value = 'Yes',
description = 'Water View',
disable = False
)
# +
#Iterative Buttons - LivingRoom
livingroom_limit = widgets.IntSlider(
value = int(df2['sqft_living'].mean() ),
min = df2['sqft_living'].min(),
max = df2['sqft_living'].max(),
step = 1,
description = 'Minimum Living Room Size',
disable = False,
style = style
)
# +
#Iterative Buttons - Bathrooms
bathrooms_limit = widgets.IntSlider(
value = df2['bathrooms'].mean(),
min = df2['bathrooms'].min(),
max = df2['bathrooms'].max(),
step = 1,
description = 'Minimum Bathrooms',
disable = False,
style = style
)
# +
#Iterative Buttons - Year Built
yr_built_limit = widgets.IntSlider(
value = df2['yr_built'].mean(),
min = df2['yr_built'].min(),
max = df2['yr_built'].max(),
step = 1,
description = 'Year Built',
disable = False,
style = style
)
# -
# ## <center> MAPA INTERATIVO
def update_map(df2, price_limit, livingroom_limit, bathrooms_limit, yr_built_limit, waterfront_bar ):
filters = df2[(df2['price'] < price_limit)&
(df2['sqft_living'] > livingroom_limit)&
(df2['bathrooms'] > bathrooms_limit) &
(df2['yr_built'] > yr_built_limit)&
(df2['is_waterfront'])][['id', 'lat', 'long', 'price', 'level']].copy()
mapa = px.scatter_mapbox ( filters,
lat='lat',
lon='long',
size = 'price',
color = 'level',
hover_name='id',
color_continuous_scale = px.colors.cyclical.IceFire,
size_max = 15,
zoom = 10 )
mapa.update_layout (mapbox_style = 'open-street-map', height = 600, margin = {'r':0, 't':0, 'l':0, 'b':0})
mapa.show()
widgets.interactive (update_map, df2 = fixed (df2), price_limit = price_limit,
livingroom_limit = livingroom_limit,
bathrooms_limit = bathrooms_limit,
yr_built_limit = yr_built_limit,
waterfront_bar = waterfront_bar)
# # <center> ITERATIVIDADE COM O DASHBOARD
df3 = df2.copy()
# +
#Change Date Format
df3['year'] = pd.to_datetime (df3['date']).dt.strftime( '%Y' )
df3['date'] = pd.to_datetime (df3['date']).dt.strftime( '%Y-%m-%d')
df3['year_week'] = pd.to_datetime (df3['date']).dt.strftime( 'Y%-%U')
#Widget to Control Data
price_limit = widgets.IntSlider(
value = int(df2['price'].mean() ),
min = df2['price'].min(),
max = df2['price'].max(),
step = 1,
description = 'Maximum Price',
disable = False,
style = style
)
waterfront_limit = widgets.Checkbox(
value = False,
description = 'Is Waterfront?',
disable = False,
indent = False)
# -
def update_dash (df3, price_limit, waterfront_limit ):
filters_2 = df3[(df3['price'] < price_limit)&
(df3['waterfront'] == waterfront_limit)].copy()
fig = plt.figure (figsize = (21,12))
specs = gridspec.GridSpec ( ncols = 2, nrows = 2, figure = fig )
ax1 = fig.add_subplot (specs[0,:])
ax2 = fig.add_subplot (specs[1,:])
by_year = filters_2[['price','year']].groupby ('year').count().reset_index()
sns.barplot (x = 'year', y = 'price', data = by_year, ax = ax1)
plt.xticks (rotation = 90 );
by_day = filters_2[['price','date']].groupby ('date').mean().reset_index()
sns.lineplot (x = 'date', y = 'price', data = by_day, ax = ax2)
plt.xticks (rotation = 90 );
widgets.interactive ( update_dash,
df3 = fixed (df3),
price_limit = price_limit,
waterfront_limit = waterfront_limit )
| KC_House_Sales.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="mKqgKTs3FZCI"
# <center>
# <h4>Universidad Nacional de Córdoba - Facultad de Matemática, Astronomía, Física y Computación</h4>
# <h3>Diplomatura en Ciencia de Datos, Aprendizaje Automático y sus Aplicaciones</h3>
# </center>
# + [markdown] colab_type="text" id="b956SHDlFZCJ"
# <h1> Práctico -> Grupo 7_2</h1>
# <h3> Análisis y Visualización de Datos - 2020 </h3>
#
# Durante este práctico vamos a trabajar sobre el dataset [Human Freedom Index 2018](https://www.cato.org/human-freedom-index-new) de el instituto Cato. Este índice mide en detalle lo que entendemos como libertad, utilizando 79 indicadores de libertad personal y económica en distintos aspectos, hasta obtener un hermoso numerito del 1 al 10.
#
# Las variables más importantes sobre las que trabaja el dataset son:
#
# * Rule of Law
# * Security and Safety
# * Movement
# * Religion
# * Association, Assembly, and Civil Society
# * Expression and Information
# * Identity and Relationships
# * Size of Government
# * Legal System and Property Rights
# * Access to Sound Money
# * Freedom to Trade Internationally
# * Regulation of Credit, Labor, and Business
#
# Nosotros centrarermos nuestro análisis en variables relacionadas a *Identity and Relationships* en paises de Latinoamérica, y los compararemos con las estadísticas globales. La pregunta a responder es simple: **¿Qué niveles de libertad se viven en Latinoamérica, especificamente en cuanto libertades de indentidad?**. Sin embargo, para hacer un análisis de los datos tenemos que platear también estas sub preguntas:
#
# 1. ¿Qué significa tener un puntaje de 4.5? Hay que poner los puntajes de la región en contexto con los datos del resto del mundo.
# 2. ¿Cuál es la tendencia a lo largo de los años? ¿Estamos mejorando, empeorando?
# 3. En este estudio, la libertad se mide con un estimadores principal: *hf_score* que hace referencia a Human Freedom, que a su vez está calculado en base a dos otros estimadores *ef_score*, para Economic Freedom y *pf_score*, para Personal Freedom. Estos tres estimadores, ¿se relacionan de la misma manera con la libertad de identidad?
#
# Inicialmente, en toda exploración de datos tenemos muy poca información a priori sobre el significado de los datos y tenemos que empezar por comprenderlos. Les proponemos los siguientes ejercicios como guía para comenzar esta exploración.
# + colab={"base_uri": "https://localhost:8080/", "height": 73} colab_type="code" id="xZoxu5phFZCK" outputId="316448bd-61a0-4fc4-e2d0-7abfce040abd"
import matplotlib.pyplot as plt
import numpy
import pandas
import seaborn
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="-N3A9N2LFZCP" outputId="cfada158-aea9-4ed2-cd1a-d24db74f14f4"
seaborn.__version__
# + [markdown] colab_type="text" id="Gg_MqLmTTi-h"
# Si un archivo está disponible en la web, podemos leerlo con pandas utilizando su URL sin necesidad de descargarlo.
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="-QOtVVUOFZCU" outputId="9ab2645c-ce83-4ffc-c61b-2477a50f068c"
dataset = pandas.read_csv(
'https://object.cato.org/sites/cato.org/files/human-freedom-index-files/human-freedom-index-2019.csv')
dataset.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 160} colab_type="code" id="S_n8Fh3EFZCY" outputId="9335dbb1-b639-42e7-8183-a3974b1e9fc6"
dataset.columns # Way too many columns!
# + [markdown] colab_type="text" id="05keUcCbFZCc"
# Por suerte las columnas tienen un prefijo que nos ayuda a identificar a qué sección pertenecen. Nos quedamos sólo con las que comienzan con *pf_indentity*, junto con otras columnas más generales
# + colab={} colab_type="code" id="EQCW-sJSFZCd"
score_cols = [col for col in dataset.columns if 'pf_identity' in col] + [
'pf_score', # Personal Freedom (score)
'pf_rank', # Personal Freedom (rank)
'ef_score', # Economic Freedom (score)
'ef_rank', # Economic Freedom (rank)
'hf_score', # Human Freedom (score)
'hf_rank', # Human Freedom (rank)
]
important_cols = ['year', 'ISO_code', 'countries', 'region'] + score_cols
# + colab={"base_uri": "https://localhost:8080/", "height": 677} colab_type="code" id="ZXYn1yL5FZCh" outputId="9ff73231-1c11-407d-f47e-1fea3215e48b"
dataset[important_cols][:5]
# + [markdown] colab_type="text" id="HPC6X48OVqgN"
# Si analizamos los tipos, vemos que casi todas las columnas se leen como objetos debido a que los valores nulos están marcados con `-`. Los vamos a eliminar y volveremos a convertir las columnas con puntajes a tipo numérico.
# + colab={"base_uri": "https://localhost:8080/", "height": 231} colab_type="code" id="An0A40SKVjxW" outputId="8cbbf6e9-c100-407a-ad50-a07b458e9511"
dataset.dtypes
# + colab={"base_uri": "https://localhost:8080/", "height": 677} colab_type="code" id="cNSfBVH2WDVu" outputId="2bcdcc79-d263-4391-8272-4f4ac3ad04b1"
dataset = dataset[important_cols].replace('-', numpy.nan)
for score_col in score_cols:
dataset[score_col] = pandas.to_numeric(dataset[score_col])
dataset[:5]
# -
dataset.dtypes
# + [markdown] colab_type="text" id="jD3nFLfXSgp7"
# # **Parte 1**
#
# Luego de las clases del primer fin de semana, ya tenemos las herramientas para responder las siguientes preguntas:
# + [markdown] colab_type="text" id="DHwEp61uFZCl"
# ## **1 Estadísticos descriptivos**
# -
# ### **1.1** Para comenzar con un pantallazo de los datos, calcular el rango de las variables.
# +
import numpy as np
def rango(var):
print(score_col + ' -> '+ str(var.max()), "-", str(var.min()),"=",str(var.max()- var.min()))
print("Rango de las variables")
print("Variable -> max - min = rango")
for score_col in score_cols:
rango(dataset[score_col])
# -
# **NOTA:**
# **a)** La variable **pf_identity** en todas sus variantes posee el mismo rango.
# **b)** La variable **hf_score** posee un rango mucho menor que **pf_identity**, por lo tanto los valores de los registros estarán menos dispersos.
# ### **1.2** Para cada país, tenemos varias realizaciones para cada variable *pf_identity* y *hf_score*. Si queremos comparar un país con otro, ¿cuál es la manera adecuada de hacerlo? Por ejemplo, ¿nos quedamos con un único valor? ¿o comparamos todos los valores? ¿usamos el promedio? ¿usamos la mediana?
# **NOTA:**
# **a)** Una forma práctica de comparar un país con otro es mediante gráficos. Presentaremos la comparación de paises correspondientes a Latinoamérica y el Caribe (latam).
# **b)** Compararíamos los paises teniendo en cuenta la **mediana**, ya que es un valor que no se ve influenciado por valores extremos. Una vez ordenados los valores se tiene la misma cantidad de elementos por debajo de la mediana que por encima, por lo tanto me permitiría hacer una comparación sin el problema de los outliers.
dataset.region.unique()
# +
import pandas as pd
# latam hará referencia la base de datos para Latinoamérica y el caribe
# dataset seguirá siendo el nombre para la base de datos del mundo
latam = dataset[(dataset.region == "Latin America & the Caribbean")]
latam[:3]
# +
import numpy as np
import matplotlib.pyplot as plt
plt.figure (figsize=(16,8))
plt.subplot(121)
plt1= dataset.sort_values(['pf_identity']).reset_index(drop=True)
seaborn.barplot(data=latam, x='pf_identity', y='countries', color='blue', alpha=0.5, estimator=np.median)
plt.ylabel('')
plt.xlabel('pf_identity')
plt.title('Comparación del índice de Libertad de Identidad (pf_identity) por país')
seaborn.despine(left=True)
plt.subplot(122)
plt2= dataset.sort_values(['hf_score']).reset_index(drop=True)
seaborn.barplot(data=latam, x='hf_score', y='countries', color='blue', alpha=0.5, estimator=np.median)
plt.ylabel('')
plt.xlabel('hf_score')
plt.title('Comparación del índice de Libertad Humana (hf_score) por país')
seaborn.despine(left=True)
# -
# **NOTA:**
# **a)** Los paises con menor **pf_identity** (índice de libertad de identidad) son Barbados, Guyana y Trinidad y Tobago. Argentina esta ubicado dentro de los paises con mejor Indice.
# **b)** El país con menor **hf_score** (índice de libertad humana) es Venezuela. Por el contrario, Chile es el país con el índice mas alto. Argentina se encuentra entre los cinco países con menor índice de libertad humana de Latinoamerica.
# ### **1.3** Obtener media, mediana y desviación estándar de las variables *pf_identity* y *hf_score* en el mundo y compararla con la de Latinoamérica y el caribe. Usar la respuesta del punto anterior para justificar si la comparación es válida o no.
# +
import numpy as np
import pandas as pd
print('ESTADÍSTICOS DESCRIPTIVOS PARA LATAM')
print('Latam, pf_identity, Media = '+ str(np.mean(latam.pf_identity)))
print('Latam, pf_identity, Mediana = '+ str(np.median(latam.pf_identity.dropna())))
print('Latam, pf_identity, Dev.Est. = '+ str(np.std(latam.pf_identity)))
print('Latam, hf_score, Media = '+ str(np.mean(latam.hf_score)))
print('Latam, hf_score, Mediana = '+ str(np.median(latam.hf_score.dropna())))
print('Latam, hf_score, Dev.Est. = '+ str(np.std(latam.hf_score)))
print('ESTADÍSTICOS DESCRIPTIVOS PARA MUNDO')
print('Mundo, pf_identity, Media = '+ str(np.mean(dataset.pf_identity)))
print('Mundo, pf_identity, Mediana = '+ str(np.median(dataset.pf_identity.dropna())))
print('Mundo, pf_identity, Dev.Est. = '+ str(np.std(dataset.pf_identity)))
print('Mundo, hf_score, Media = '+ str(np.mean(dataset.hf_score)))
print('Mundo, hf_score, Mediana = '+ str(np.median(dataset.hf_score.dropna())))
print('Mundo, hf_score, Dev.Est. = '+ str(np.std(dataset.hf_score)))
# -
# **NOTA:**
# **a)** Se observa para ambas variables, que los datos están menos dispersos en **latam** que en el **mundo**, teniendo en cuenta el desvio estandar y el rango calculado anteriormente.
# **b)** Por lo tanto una menor dispersion genera un valor de media mas representativo en el set de datos para latam que los pertenecientes al mundo.
# **c)** Para la variable **pf_identity**, se observa que en ambos casos **latam** y **mundo** el valor de la media es menor que el de la mediana , esto puede deberse al efecto de los datos extremos que traccionan el valor de la media. Al no coincidir los valores de media y mediana (media < mediana), se detecta una **asimetría negativa**, vemos que a priori podría ser una **distribución hipergeométrica** la que explica a este conjunto de datos.
# **d)** Para la variable **hf_score**, se observa para **latam** y **mundo** que el valor de la media es practicamente igual al de la mediana, por lo que a priori estos datos parecen tener una **distribución normal**.
# ### **1.4** ¿Tiene sentido calcular la moda?
# **NOTA:**
# **a)** No tiene sentido calcular la **moda** ya que tenemos demasiados valores posibles en ambas variables, y esta metrica es mejor utilizarla para caracterizar variables categoricas.
# ### **1.5** ¿Cómo pueden sanearse los valores faltantes?
# chaqueo si hay nulos y si es así cuantos hay
print(dataset.pf_identity.isnull().sum())
# chaqueo si hay nulos y si es así cuantos hay
print(dataset.hf_score.isnull().sum())
# chaqueo si hay nulos y si es así cuantos hay
print(latam.pf_identity.isnull().sum())
# chaqueo si hay nulos y si es así cuantos hay
print(latam.hf_score.isnull().sum())
# **NOTA:**
# **a)** Como tengo suficientes grados de libertad puedo quitar del análisis las filas que posean datos faltantes (nan) ya que aunque la base de datos se reduzca, me alcanzan los grados de libertad para los futuros análisis.
# **b)** Otra opción puede ser remplazar el nan por el valor de la media de esa variable para que no tire la distribución hacia abajo al usar 0.
# **c)** De todos modos como las filas con datos faltantes son las mismas en las dos variables con las que estamos trabajando elejimos la opcion de no tenerlas en cuenta, **no las eliminamos** por las dudas las necesitemos luego. Vamos a obviar los valores nan por el momento.
# +
# Obviamos los nan para la base de datos del mundo
import pandas as pd
dataset_notna = dataset[pd.notnull(dataset['pf_identity'])]
print(dataset_notna.pf_identity.isnull().sum())
print(dataset_notna.hf_score.isnull().sum())
# -
# **NOTA:**
# **a)** Si al dejar de lado los *registros nulos* para la variable **pf_identity**, también se dejan de lado todos los *registros nulos* para la variable **hf_score**, nos aseguramos de que los registros nulos para ambas variables eran los mismos.
# +
# Obviamos los nan para la base de datos de latam
import pandas as pd
latam_notna = latam[pd.notnull(latam['pf_identity'])]
print(latam_notna.pf_identity.isnull().sum())
print(latam_notna.hf_score.isnull().sum())
# -
# **NOTA:**
# **a)** Si al dejar de lado los *registros nulos* para la variable **pf_identity**, también se dejan de lado todos los *registros nulos* para la variable **hf_score**, nos aseguramos de que los registros nulos para ambas variables eran los mismos.
# ### **1.6** ¿Encuentra outliers en estas dos variables? ¿Qué método utiliza para detectarlos? Los outliers, ¿son globales o por grupo? ¿Los eliminaría del conjunto de datos?
# **NOTA:**
# **a)** Utilizando un **boxplot** es posible graficar distribuciones por grupos y ver si quedan valores sueltos (outliers) y si estos valores siguen algún ordenamiento.
# **b)** Mediante un gráfico de cajas se pueden observar los outliers que en un grafico de barras quedarian dentro de la barra de desviación estandar.
# **c)** En este caso si se observan los outliers agrupados dentro de las regiones. Un caso muy llamativo es el de *Oceanía* que parece tener ptacticamente todos sus registros como outlaiers.
# **d)** Decidimos filtrar los outlaiers utilizando la fórmula que deja solo aquellos valores que se encuentran a 2.5 desviaciones estandares de la media.
plt.figure(figsize=(8,5))
p1=seaborn.boxplot(data=dataset_notna,
x='region', y='pf_identity')
plt.ylabel('Distribución de estimación')
plt.xlabel('region')
plt.xticks(rotation=45)
seaborn.despine()
# Filtrado de outliers
def clean_outliers(dataset, column_name):
"""Returns dataset removing the outlier rows from column @column_name."""
interesting_col = dataset[column_name]
mask_salarios_outlier = (
numpy.abs(interesting_col - interesting_col.mean()) <= (2.5 * interesting_col.std()))
return dataset[mask_salarios_outlier]
dataset_clean = clean_outliers(dataset_notna, "pf_identity")
print("Filas removidas", len(dataset_notna) - len(dataset_clean))
dataset_clean = clean_outliers(dataset_notna, "hf_score")
print("Filas removidas", len(dataset_notna) - len(dataset_clean))
latam_clean = clean_outliers(latam_notna, "pf_identity")
print("Filas removidas", len(latam_notna) - len(latam_clean))
latam_clean = clean_outliers(latam_notna, "hf_score")
print("Filas removidas", len(latam_notna) - len(latam_clean))
# +
# luego de la limpieza de valores nulos y de outliers calculamos cuantos registros en totales fueron filtrados
print("Total de filas filtradas")
print("Base de datos -> registros iniciales - registros finales = registros filtrados")
print("mundo -> ", str(len(dataset)), " - ", str(len(dataset_clean))," = ", len(dataset) - len(dataset_clean))
print("latam -> ", str(len(latam)), " - ", str(len(latam_clean))," = ", len(latam) - len(latam_clean))
# +
# Total outlaiers para pf_identity en el mundo= 0
# Total outlaiers para hf_score en el mundo = 13
# Implementamos la siguiente función para:
# Conocer cuales fueron los 13 registros filtrados para hf_score en el mundo
# Conocer cuales fueron los 10 registros filtrados para hf_score en latam
import numpy as np
def complement_outliers(dataset, column_name):
interesting_col = dataset[column_name]
complement = (
np.abs(interesting_col-interesting_col.mean())>(2.5*interesting_col.std()))
return dataset[complement]
# -
dataset_removed = complement_outliers(dataset_notna, "hf_score")
latam_removed = complement_outliers(latam_notna, "hf_score")
# +
import seaborn
import matplotlib.pyplot as plt
plt.figure (figsize=(12,3))
plt.subplot(121)
plt1= dataset_removed.countries.value_counts(normalize=True)
seaborn.barplot (x=dataset_removed.countries,y= dataset_removed.hf_score, ci=None)
plt.xticks(rotation=90)
plt.title("Outlaiers para hf_score por paises en el Mundo")
plt.ylabel('Frecuencia')
plt.xlabel('')
plt.ylim(0.1)
plt.subplot(122)
plt1= latam_removed.countries.value_counts(normalize=True)
seaborn.barplot (x=latam_removed.countries,y= latam_removed.hf_score, ci=None)
plt.xticks(rotation=90)
plt.title("Outlaiers para hf_score por paises en Latinoamérica")
plt.ylabel('Frecuencia')
plt.xlabel('')
plt.ylim(0.1)
# -
# **NOTA:**
# **a)** Los *outlaiers filtrados* para el **mundo** corresponden a registros de 5 paises.
# **b)** Podemos observar que los *outlaiers filtrados* para **latam** corresponden todos a Venezuela.
# + [markdown] colab_type="text" id="kwzDqGtfFZCm"
# ## **2. Agregación de datos**
# ### **2.1** Grafiquen la media de la variable *pf_identity* y *hf_score* a través de los años.
#
# +
import numpy as np
import matplotlib.pyplot as plt
plt.figure (figsize=(12,6))
plt.subplot(121)
p1= seaborn.lineplot(data=dataset_clean, x='year', y='pf_identity', estimator=np.mean, color='blue', label='pf_identity')
seaborn.lineplot(data=dataset_clean, x='year', y='hf_score', estimator=numpy.mean, color='red', label='hf_score')
plt.xticks(rotation=90);
plt.xlabel('Años')
plt.ylabel('Frecuencias')
plt.title('Evolución de pf_identity y hf_score en el Mundo')
plt.subplot(122)
p2= seaborn.lineplot(data=latam_clean, x='year', y='pf_identity', estimator=np.mean, color='blue', label='pf_identity')
seaborn.lineplot(data=latam_clean, x='year', y='hf_score', estimator=numpy.mean, color='red', label='hf_score')
plt.xticks(rotation=90);
plt.xlabel('Años')
plt.ylabel('Frecuencias')
plt.title('Evolución de pf_identity y hf_score en Latinoamérica')
# -
# **NOTA:**
# **a)** En los graficos es posible observar que la variable **pf_idendity** muestra una caida importante a nivel mundial, y aún más en latam en los últimos años.
# **b)** A pesar de esta caída, es importante observar que actualmente es más alto el valor de la **media** de **pf_identity** en latam que en el mundo.
# **c)** En cuando al comportameniendo de la variable **hf_score** se observa un patrón muy similar en ambos gráficos, siendo el valor de **media** para latam y el mundo aproximadamente el mismo, manteniendose durante los años.
# **d)** Nos llama la atención el comportamiento de **pf_identity** ya que hubieramos esperado que aumentara a lo largo del tiempo en vez de disminuir de este modo.
# ### **2.2** Realicen los mismos gráficos, pero separando por regiones (Cada variable en un gráfico distinto, sino no se ve nada). ¿La tendencia observada, es la misma que si no dividimos por regiones?
#
# +
import numpy as np
import matplotlib.pyplot as plt
plt.figure (figsize=(15,10))
plt.subplot(121)
p2= seaborn.lineplot(data=dataset_clean, x='year', y='pf_identity', hue='region', estimator=np.mean)
plt.xticks(rotation=90);
plt.xlabel('Años')
plt.ylabel('Frecuencias')
plt.title('pf_identity por Regiones')
plt.subplot(122)
p2= seaborn.lineplot(data=dataset_clean, x='year', y='hf_score', hue='region',estimator=np.mean)
plt.xticks(rotation=90);
plt.xlabel('Años')
plt.ylabel('Frecuencias')
plt.title('hf_score por Regiones')
# -
# **NOTA:**
# **a)** Para **hf_score** la tendencia parece ser la misma dividiendo por regiones o analizando a nivel global.
# **b)** Para **pf_identity** se observan algunas regiones con caidas más marcadas que otras. Incluso para Norteamérica se aprecia que el índice ha subido en los últimos años.
# ### **2.3** Si lo consideran necesario, grafiquen algunos países de Latinoamerica para tratar de explicar la tendencia de la variable *pf_identity* en la región. ¿Cómo seleccionarion los países relevantes a esa tendencia?
#
# #### Pista: hay gráficos de seaborn que permiten generar visualizaciones para cada valor de una variable categórica, en este caso, las distintas regiones.
#
# #### Sólo por curiosidad, graficar la tendencia de *hf_score* y *ef_score* a través de los años. ¿Tienen alguna hipótesis para este comportamiento?
# +
import numpy as np
import matplotlib.pyplot as plt
plt.figure(figsize=(10,6))
g = seaborn.catplot(x="year", y="pf_identity", col="countries", col_wrap=3,
data=latam_clean, kind="bar", height=2.5, aspect=1.5, color='purple', margin_titles=True)
# -
# **NOTA:**
# **a)** Gracias a estos gráfcos podemos ver que la caida del índice **pf_identity** que habíamos detectado en el punto anterior, no se da en todos los países de latam del mismo modo.
# **b)** La mayoría de los paises se mantienen constantes a lo largo del tiempo y muy pocos tienen un leve aumento.
# **c)** SUponemos que la caída general del índice se debe a la suma del efecto producido por la caída de algunos paises y por el hecho de varios paises si bien son constantes, esta constancia es en un valor bajo del índice. Esto lleva a arrastrar la tendencia general del índice hacia abajo.
# +
# Seleccionamos 7 paises para graficar la evolución de los índices hf_score y ef_score
import pandas as pd
latam_select = latam_clean[(latam_clean.countries =='Argentina')]
latam_select = pd.concat([latam_select, latam_clean[(latam_clean.countries =='Bolivia')]])
latam_select = pd.concat([latam_select, latam_clean[(latam_clean.countries =='Venezuela')]])
latam_select = pd.concat([latam_select, latam_clean[(latam_clean.countries =='Chile')]])
latam_select = pd.concat([latam_select, latam_clean[(latam_clean.countries =='Mexico')]])
latam_select = pd.concat([latam_select, latam_clean[(latam_clean.countries =='Uruguay')]])
latam_select = pd.concat([latam_select, latam_clean[(latam_clean.countries =='Panama')]])
latam_select = pd.concat([latam_select, latam_clean[(latam_clean.countries =='Brazil')]])
# +
import numpy as np
import matplotlib.pyplot as plt
plt.figure (figsize=(15,6))
plt.subplot(121)
plt1= seaborn.lineplot(data=latam_select, x='year', y='hf_score', hue='countries', estimator=np.mean)
plt.xticks(rotation=90);
plt.xlabel('Años')
plt.ylabel('Frecuencias')
plt.title('hf_score por país de Latam')
plt.subplot(122)
plt2= seaborn.lineplot(data=latam_select, x='year', y='ef_score', hue='countries', estimator=np.mean)
plt.xticks(rotation=90);
plt.xlabel('Años')
plt.ylabel('Frecuencias')
plt.title('ef_score por país de Latam')
# -
# **NOTA:**
# **a)** Observamos que la tendencia de **hf_score** se mantiene bastante constante en algunos paises a lo largo de los años. Excepto para Argentina y Brasil.
# **b)** En cuanto al índice **ef_score** (índice de libertad económica) observamos que mientra la mayoría de los paises mantiene una tendencia similar de comportamiento, Argentina muestra una caída muy notable del mismo, llegando a su punto más bajo en 2014. Esta caida del índice de libertad economica se debe a las políticas económicas restrictivas de gobierno tomadas en esos años como: el control de capitales, la fijacion de precios, la restriccion de importaciones,la nacionalizaciones de empresas, etc.
# + [markdown] colab_type="text" id="z7cBpSPrFZCn"
# ## **3. Distribuciones**
# ### **3.1** Graficar en un mismo histograma la distribución de la variable *pf_identity* en global, y en Latinoamérica y el caribe. Repetir para la variable *hf_score*. ¿Visualmente, a qué tipo de distribución corresponde cada variable? ¿Es correcto utilizar todos el conjunto de valores disponibles para esa region en estos gráficos?
#
# +
import seaborn
import matplotlib.pyplot as plt
plt.figure (figsize=(15,6))
plt.subplot(121)
seaborn.distplot(dataset_clean.pf_identity, color='blue', label='Mundial', bins=15, kde=False, norm_hist=True)
seaborn.distplot(latam_clean.pf_identity, color='red', label='Latam', bins=15, kde=False, norm_hist=True)
plt.xlabel('pf_identity')
plt.title('Histograma de distribución de pf_identity')
plt.subplot(122)
seaborn.distplot(dataset_clean.hf_score, color='blue', label='Mundial', bins=15, kde=False, norm_hist=True)
seaborn.distplot(latam_clean.hf_score, color='red', label='Latam', bins=15, kde=False, norm_hist=True)
plt.xlabel('hf_score')
plt.title('Histograma de distribución de hf_score')
# -
# **NOTA:**
# **a)** **pf_identity**, variable discreta (es un promedio), dist. asimétrica negativa -> distribución hipergeométrica.
# **b)** **hf_score**, variable continua, dist. asimétrica negativa-> distribución logística.
# + [markdown] colab_type="text" id="lEwQJIulFZCo"
# ## **4. Correlaciones y Relaciones**
#
# En este ejercicio queremos responder a las preguntas
#
# * Las libertades personales y económicas, ¿van siempre de la mano?
# * ¿Cómo se relacionan ambas con las libertades respectivas a las relaciones personales?
#
# Para ello, analizaremos las correlaciones entre las variables pf_identity, pf_score y ef_score.
#
# Como pf_indentity contribuye al cálculo de pf_score esperamos hallar algún grado de correlación. Lo contrario podría ocurrir con ef_score.
# -
# ### **4.1** ¿Qué conclusiones puede sacar de un gráfico pairplot de estas tres variables? ¿Es adecuado para los valores de pf_identity? ¿Por qué?
# +
import seaborn
seaborn.pairplot(data=dataset_clean, vars=['pf_identity', 'pf_score', 'ef_score'], height=5)
# -
# **NOTA:**
# **a)** Los histogramas de la diagonal principal nos muestran las distribuciones de cada una de las variables analizadas, observandosé que estas están muy sesgadas hacia la izquierda.
# **b)** Los diagramas de dispersión por sobre y debajo de la diagonal nos muestran las relaciones entre dos de las variables. Podemos ver que el **ef_score** y el **pf_score** poseen una *relacion positiva* entre ellos.
# **c)** Este tipo de grafico no sería del todo adecuado para **pf_identity** ya que es una variable discreta(promedio) y este grafico es mejor para variables continuas.
# ### **4.2** Graficar la correlación (visual) entre pf_identity y pf_score; y entre pf_identity y ef_score. Analizar el resultado, ¿se pueden sacar conclusiones? Tengan en cuenta que como pf_identity es el resultado de un promedio, sólo toma algunos valores. Es, en la práctica, discreta, y eso afecta al tipo de gráfico que podemos usar.
# +
import seaborn
import matplotlib.pyplot as plt
plt.figure (figsize=(12,6))
plt.subplot(121)
plt1= dataset_clean.pf_identity.value_counts(normalize=True)
seaborn.barplot (x=dataset_clean.pf_identity, y=dataset_clean.pf_score, ci=None)
plt.xticks(rotation=90)
plt.title("pf_identity Vs. pf_score")
plt.ylim(0.1)
plt.subplot(122)
plt2= dataset_clean.pf_identity.value_counts(normalize=True)
seaborn.barplot (x=dataset_clean.pf_identity, y=dataset_clean.ef_score, ci=None)
plt.xticks(rotation=90)
plt.title("pf_identity Vs. ef_score")
plt.ylim(0.1)
# -
# **NOTA:**
# **a)** Se utilizaron **Gráficos de barra** ya que son óptimos para variables numéricas discretas como lo es **pf_identity**
# **b)** En estos gráficos se aprecia una tendencia a un *correlación positiva* entre **pf_identity** y **pf_score**, lo que indica que a medida que aumenta una variable también lo hace la otra.
# **c)** En cuanto a **pf_identity** Vs **ef_score** no se observa una *correlación* entre ambas.
# ### **4.3** Convertir estas variables en categóricas, es decir, a partir de pf_indentity generar otra variable pf_identity_segment que tome los valores `high`, `medium` y `low`. Pueden hacerlo con una función escrita por ustedes, o usando alguna función de pandas como `pandas.cut` o `pandas.dcut`. Repetir para ef_score y pf_score. El criterio para decidir qué intervalos de valores corresponden a cada categoría tienen que decidirlo ustedes, pueden usar los estadísticos mediana y los cuartiles.
# +
dataset_cut= dataset_clean
dataset_cut['pf_identity_segment'] = pandas.cut(dataset_clean.pf_identity, 3, labels=["high", "medium", "low"], include_lowest=True)
dataset_cut['pf_score_segment'] = pandas.cut(dataset_clean.pf_score, 3, labels=["high", "medium", "low"], include_lowest=True)
dataset_cut['ef_score_segment'] = pandas.cut(dataset_clean.ef_score, 3, labels=["high", "medium", "low"], include_lowest=True)
important_cols_cut = ['region', 'countries', 'pf_identity_segment','pf_score_segment','ef_score_segment']
dataset_cut[important_cols_cut][:10]
# -
# ### **4.4** Graficar la correlación (visual) entre estas tres variables categoricas usando gráficos de calor (heatmaps). Note: van a necesitar 3 gráficos distintos, porque en cada uno podemos incluir sólo 2 variables.
# +
import seaborn
import matplotlib.pyplot as plt
plt.figure (figsize=(15,5))
plt.subplot(131)
plt1 = pandas.crosstab(dataset_cut.pf_identity_segment, dataset_cut.pf_score_segment)
seaborn.heatmap(plt1, annot=True, fmt='g',cmap='Blues')
plt.ylabel('pf_identity_segment')
plt.xlabel('pf_score_segment')
plt.title('pf_identity Vs pf_score')
plt.subplot(132)
plt2 = pandas.crosstab(dataset_cut.pf_identity_segment, dataset_cut.ef_score_segment)
seaborn.heatmap(plt2, annot=True, fmt='g',cmap='Blues')
plt.ylabel('pf_identity_segment')
plt.xlabel('ef_score_segment')
plt.title('pf_identity Vs ef_score')
plt.subplot(133)
plt3 = pandas.crosstab(dataset_cut.pf_score_segment, dataset_cut.ef_score_segment)
seaborn.heatmap(plt3, annot=True, fmt='g',cmap='Blues')
plt.ylabel('pf_score_segment')
plt.xlabel('ef_score_segment')
plt.title('pf_score Vs ef_score')
# -
# **NOTA:**
# **a)** Los **heatmaps** son muy útiles para graficar la relación entre **variables categóricas**
# **b)** En estos gráficos podemos observar que mientras una variable disminuye, la otra también lo hace, pero no llega a ver del todo una correlación positiva.
| practico.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
# %matplotlib inline
from matplotlib.pyplot import imshow
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from torchvision import transforms, utils
# -
# # Deep Neural Network Model (AlexNet)
class KitModel(nn.Module):
def __init__(self):
super(KitModel, self).__init__()
self.conv1 = nn.Conv2d(3, 96, (11, 11), stride=4, padding=0)
self.conv2 = nn.Conv2d(in_channels=96, out_channels=256, kernel_size=5, stride=1, groups=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=256, out_channels=384, kernel_size=3, stride=1, groups=1, padding=1)
self.conv4 = nn.Conv2d(in_channels=384, out_channels=384, kernel_size=3, stride=1, groups=2, padding=1)
self.conv5 = nn.Conv2d(in_channels=384, out_channels=256, kernel_size=3, stride=1, groups=2, padding=1)
self.fc6_1 = nn.Linear(in_features = 9216, out_features = 4096)
self.fc7_1 = nn.Linear(in_features = 4096, out_features = 4096)
self.ip_1 = nn.Linear(in_features = 4096, out_features = 1)
self.relu = nn.ReLU()
self.drop = nn.Dropout()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, x):
conv1 = self.conv1(x)
relu1 = self.relu(conv1)
pool1 = self.maxpool(relu1)
norm1 = self.LRN(size = 5, alpha=0.0001, beta=0.75)(pool1)
conv2 = self.conv2(norm1)
relu2 = self.relu(conv2)
pool2 = self.maxpool(relu2)
norm2 = self.LRN(size = 5, alpha=0.0001, beta=0.75)(pool2)
conv3 = self.conv3(norm2)
relu3 = self.relu(conv3)
conv4 = self.conv4(relu3)
relu4 = self.relu(conv4)
conv5 = self.conv5(relu4)
relu5 = self.relu(conv5)
pool5 = self.maxpool(relu5)
fc6_0 = pool5.view(pool5.size(0), -1)
fc6_1 = self.fc6_1(fc6_0)
relu6 = self.relu(fc6_1)
drop6 = self.drop(relu6)
fc7_1 = self.fc7_1(drop6)
relu7 = self.relu(fc7_1)
ip_0 = self.drop(relu7)
ip_1 = self.ip_1(ip_0)
return ip_1
class LRN(nn.Module):
def __init__(self, size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True):
super(KitModel.LRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average=nn.AvgPool3d(kernel_size=(size, 1, 1),
stride=1,
padding=(int((size-1.0)/2), 0, 0))
else:
self.average=nn.AvgPool2d(kernel_size=size,
stride=1,
padding=int((size-1.0)/2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
class PandasDataset(Dataset):
def __init__(self, list_images, list_targets, transform=None):
self.list_images = list_images
self.list_targets = list_targets
# add transforms as well
self.transform = transform
def __getitem__(self, idx):
image = Image.open(self.list_images[idx]).convert('RGB')
image = image.resize((227,227), Image.BILINEAR)
image = np.array(image, dtype='f4')
# Convert RGB to BGR
image = image[:, :, ::-1]
image = image.astype('float32')
# add transforms
if self.transform:
image = self.transform(image)
return image, self.list_targets[idx]
def __len__(self):
return len(self.list_images)
# +
model = KitModel()
model.load_state_dict(torch.load('generated_files/pytorch_state.npy'))
model.train(False)
model.eval()
# +
batch_size = 30
file_list = [
'streetview_image.jpg',
]
# I'm interested only in testing the predictions, so label=0
labels = [
0
]
# -
# ## Example of image
image = Image.open(file_list[0]).convert('RGB')
imshow(np.array(image))
# +
means = np.load('generated_files/places205CNN_mean_filtered.npy')
transformations = transforms.Compose([lambda x: x - means, # Subtracts image means
transforms.ToTensor(),
lambda x: x*255] # Restore the input range to [0, 255]
)
dataset = PandasDataset(file_list, labels, transformations)
load = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=10)
preds = np.zeros((len(file_list), 1))
for i, data in enumerate(load):
inputs, labels = data
n = len(inputs)
ifrom = i*batch_size
ito = i*batch_size+n
inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
preds[ifrom:ito] = outputs.data.numpy()
print("Predicted:", preds)
# -
| ACMMM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from qiskit import *
from qiskit.visualization import *
from math import pi
# ## For 1-qubit Hamiltonian
#
# H = a.I + b.X + c.Y + d.Z
# ### Quantum Part
# +
# Ansatz must represent different states in Hilbert Space.
# These Ry and Rx gates allow representation of any state on a bloch sphere.
# Rotations are performed around both the x-axis and the y-axis.
# We could choose a different ansatz here as all the states on a bloch sphere are not needed to be represented for generating a trial wavefinction in this case.
def ansatz(circuit, parameter):
circuit.ry(parameter, 0) # A rotation around y-axis
circuit.rx(parameter, 0) # A rotation around x-axis
return circuit
# -
def change_basis(circuit, hamiltonian_term):
if hamiltonian_term == 'X':
circuit.h(0)
# or use circuit.u2(0, pi, 0)
if hamiltonian_term == 'Y':
circuit.u2(0, pi/2, 0)
return circuit
def vqe(n, circuit, hamiltonian_term):
if hamiltonian_term == 'I':
return 1
if hamiltonian_term == 'Z':
circuit.measure(range(n), range(n))
if hamiltonian_term == 'X':
change_basis(circuit, hamiltonian_term)
circuit.measure(range(n), range(n))
if hamiltonian_term == 'Y':
change_basis(circuit, hamiltonian_term)
circuit.measure(range(n), range(n))
return circuit
def main(n, parameter, hamiltonian_term):
circuit = QuantumCircuit(n, n)
# Ansatz
ansatz(circuit, parameter)
circuit.barrier()
# Apply VQE + Change basis + Measurement
vqe(n, circuit, hamiltonian_term)
return circuit
# #### Hamiltonian Term Y
n = 1
parameter = pi
hamiltonian_term = 'Y'
circuit = main(n, parameter, hamiltonian_term)
circuit.draw('mpl')
backend = Aer.get_backend('qasm_simulator')
results = execute(circuit, backend, shots = 1024).result()
counts = results.get_counts()
print(counts)
plot_histogram(counts)
# +
expectation_val = 0
for i in counts:
sign = 1
if i == '1':
sign = -1
expectation_val += (sign * counts[i])
expectation_val = expectation_val/1024
y_expectation_val = expectation_val
print(y_expectation_val)
# -
# #### Hamiltonian Term X
n = 1
parameter = pi
hamiltonian_term = 'X'
circuit = main(n, parameter, hamiltonian_term)
circuit.draw('mpl')
backend = Aer.get_backend('qasm_simulator')
results = execute(circuit, backend, shots = 1024).result()
counts = results.get_counts()
print(counts)
plot_histogram(counts)
# +
expectation_val = 0
for i in counts:
sign = 1
if i == '1':
sign = -1
expectation_val += (sign * counts[i])
expectation_val = expectation_val/1024
x_expectation_val = expectation_val
print(y_expectation_val)
# -
# #### Hamiltonian Term Z
n = 1
parameter = pi
hamiltonian_term = 'Z'
circuit = main(n, parameter, hamiltonian_term)
circuit.draw('mpl')
backend = Aer.get_backend('qasm_simulator')
results = execute(circuit, backend, shots = 1024).result()
counts = results.get_counts()
print(counts)
plot_histogram(counts)
# +
expectation_val = 0
for i in counts:
sign = 1
if i == '1':
sign = -1
expectation_val += (sign * counts[i])
expectation_val = expectation_val/1024
z_expectation_val = expectation_val
print(y_expectation_val)
# -
# Hamiltonian
H = 0.7 + (0.6 * x_expectation_val) + (0.5 * y_expectation_val) + (0.1 * z_expectation_val)
print(H)
# ##### Refer: https://www.mustythoughts.com/variational-quantum-eigensolver-explained
# ##### Refer: https://github.com/DavitKhach/quantum-algorithms-tutorials/blob/master/variational_quantum_eigensolver.ipynb
| qiskit-ibm/algorithms/.ipynb_checkpoints/vqe_quantum_modules-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Finding Conserved Patterns Across Two Time Series
#
# ## AB-Joins
#
# This tutorial is adapted from the [Matrix Profile I](https://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf) paper and replicates Figures 9 and 10.
#
# Previously, we had introduced a concept called [time series motifs](https://stumpy.readthedocs.io/en/latest/Tutorial_STUMPY_Basics.html), which are conserved patterns found within a single time series, $T$, that can be discovered by computing its [matrix profile](https://stumpy.readthedocs.io/en/latest/Tutorial_The_Matrix_Profile.html) using STUMPY. This process of computing a matrix profile with one time series is commonly known as a "self-join" since the subsequences within time series $T$ are only being compared with itself. However, what do you do if you have two time series, $T_{A}$ and $T_{B}$, and you want to know if there are any subsequences in $T_{A}$ that can also be found in $T_{B}$? By extension, a motif discovery process involving two time series is often referred to as an "AB-join" since all of the subsequences within time series $T_{A}$ are compared to all of the subsequences in $T_{B}$.
#
# It turns out that "self-joins" can be trivially generalized to "AB-joins" and the resulting matrix profile, which annotates every subsequence in $T_{A}$ with its nearest subsequence neighbor in $T_{B}$, can be used to identify similar (or unique) subsequences across any two time series. Additionally, as long as $T_{A}$ and $T_{B}$ both have lengths that are greater than or equal to the subsequence length, $m$, there is no requirement that the two time series must be the same length.
#
# In this short tutorial we will demonstrate how to find a conserved pattern across two independent time series using STUMPY.
#
# ## Getting Started
#
# Let's import the packages that we'll need to load, analyze, and plot the data.
# +
# %matplotlib inline
import stumpy
import pandas as pd
import numpy as np
from IPython.display import IFrame
import matplotlib.pyplot as plt
plt.style.use('stumpy.mplstyle')
# -
# ## Finding Similarities in Music Using STUMPY
#
# In this tutorial we are going to analyze two songs, “Under Pressure” by Queen and <NAME> as well as “Ice Ice Baby” by Vanilla Ice. For those who are unfamiliar, in 1990, Vanilla Ice was alleged to have sampled the bass line from "Under Pressure" without crediting the original creators and the copyright claim was later settled out of court. Have a look at this short video and see if you can hear the similarities between the two songs:
IFrame(width="560", height="315", src="https://www.youtube.com/embed/HAA__AW3I1M")
# The two songs certainly share some similarities! But, before we move forward, imagine if you were the judge presiding over this court case. What analysis result would you need to see in order to be convinced, beyond a shadow of a doubt, that there was wrongdoing?
# ## Loading the Music Data
#
# To make things easier, instead of using the raw music audio from each song, we're only going to use audio that has been pre-converted to a single frequency channel (i.e., the 2nd MFCC channel sampled at 100Hz).
# +
queen_df = pd.read_csv("https://zenodo.org/record/4294912/files/queen.csv?download=1")
vanilla_ice_df = pd.read_csv("https://zenodo.org/record/4294912/files/vanilla_ice.csv?download=1")
print("Length of Queen dataset : " , queen_df.size)
print("Length of Vanilla ice dataset : " , vanilla_ice_df.size)
# -
# ## Visualizing the Audio Frequencies
#
# It was very clear in the earlier video that there are strong similarities between the two songs. However, even with this prior knowledge, it's incredibly difficult to spot the similarities (below) due to the sheer volume of the data:
# +
fig, axs = plt.subplots(2, sharex=True, gridspec_kw={'hspace': 0})
plt.suptitle('Can You Spot The Pattern?', fontsize='30')
axs[0].set_title('Under Pressure', fontsize=20, y=0.8)
axs[1].set_title('Ice Ice Baby', fontsize=20, y=0)
axs[1].set_xlabel('Time')
axs[0].set_ylabel('Frequency')
axs[1].set_ylabel('Frequency')
ylim_lower = -25
ylim_upper = 25
axs[0].set_ylim(ylim_lower, ylim_upper)
axs[1].set_ylim(ylim_lower, ylim_upper)
axs[0].plot(queen_df['under_pressure'])
axs[1].plot(vanilla_ice_df['ice_ice_baby'], c='orange')
plt.show()
# -
# ## Performing an AB-Join with STUMPY
#
# Fortunately, using the `stumpy.stump` function, we can quickly compute the matrix profile by performing an AB-join and this will help us easily identify and locate the similar subsequence(s) between these two songs:
m = 500
queen_mp = stumpy.stump(T_A = queen_df['under_pressure'],
m = m,
T_B = vanilla_ice_df['ice_ice_baby'],
ignore_trivial = False)
# Above, we call `stumpy.stump` by specifying our two time series `T_A = queen_df['under_pressure']` and `T_B = vanilla_ice_df['ice_ice_baby']`. Following the original published work, we use a subsequence window length of `m = 500` and, since this is not a self-join, we set `ignore_trivial = False`. The resulting matrix profile, `queen_mp`, essentially serves as an annotation for `T_A` so, for every subsequence in `T_A`, we find its closest subsequence in `T_B`.
#
# As a brief reminder of the matrix profile data structure, each row of `queen_mp` corresponds to each subsequence within `T_A`, the first column in `queen_mp` records the matrix profile value for each subsequence in `T_A` (i.e., the distance to its nearest neighbor in `T_B`), and the second column in `queen_mp` keeps track of the index location of the nearest neighbor subsequence in `T_B`.
#
# One additional side note is that AB-joins are not symmetrical in general. That is, unlike a self-join, the order of the input time series matter. So, an AB-join will produce a different matrix profile than a BA-join (i.e., for every subsequence in `T_B`, we find its closest subsequence in `T_A`).
#
# ## Visualizing the Matrix Profile
#
# Just as we've done [in the past](https://stumpy.readthedocs.io/en/latest/Tutorial_STUMPY_Basics.html), we can now look at the matrix profile, `queen_mp`, computed from our AB-join:
# +
queen_motif_index = queen_mp[:, 0].argmin()
plt.xlabel('Subsequence')
plt.ylabel('Matrix Profile')
plt.scatter(queen_motif_index,
queen_mp[queen_motif_index, 0],
c='red',
s=100)
plt.plot(queen_mp[:,0])
plt.show()
# -
# Now, to discover the global motif (i.e., the most conserved pattern), `queen_motif_index`, all we need to do is identify the index location of the lowest distance value in the `queen_mp` matrix profile (see red circle above).
queen_motif_index = queen_mp[:, 0].argmin()
print(f'The motif is located at index {queen_motif_index} of "Under Pressure"')
# In fact, the index location of its nearest neighbor in "Ice Ice Baby" is stored in `queen_mp[queen_motif_index, 1]`:
vanilla_ice_motif_index = queen_mp[queen_motif_index, 1]
print(f'The motif is located at index {vanilla_ice_motif_index} of "Ice Ice Baby"')
# ## Overlaying The Best Matching Motif
#
# After identifying the motif and retrieving the index location from each song, let's overlay both of these subsequences and see how similar they are to each other:
# +
plt.plot(queen_df.iloc[queen_motif_index : queen_motif_index + m].values, label='Under Pressure')
plt.plot(vanilla_ice_df.iloc[vanilla_ice_motif_index:vanilla_ice_motif_index+m].values, label='Ice Ice Baby')
plt.xlabel('Time')
plt.ylabel('Frequency')
plt.legend()
plt.show()
# -
# Wow, the resulting overlay shows really strong correlation between the two subsequences! Are you convinced?
#
# ## Summary
#
# And that's it! In just a few lines of code, you learned how to compute a matrix profile for two time series using STUMPY and identified the top-most conserved behavior between them. While this tutorial has focused on audio data, there are many further applications such as detecting imminent mechanical issues in sensor data by comparing to known experimental or historical failure datasets or finding matching movements in commodities or stock prices, just to name a few.
#
# You can now import this package and use it in your own projects. Happy coding!
#
# ## Resources
#
# [Matrix Profile I](https://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf)
#
# [STUMPY Documentation](https://stumpy.readthedocs.io/en/latest/)
#
# [STUMPY Matrix Profile Github Code Repository](https://github.com/TDAmeritrade/stumpy)
| docs/Tutorial_AB_Joins.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ShreyasJothish/taarifa_water_pumps/blob/master/DS1_Predictive_Modeling_Challenge_RandomForest.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="EFmO0jeY8dFj" colab_type="code" outputId="7379cc67-8b67-4502-a4b2-74d70f121f4b" colab={"base_uri": "https://localhost:8080/", "height": 609}
# Load the data from Kaggle
# !pip install kaggle
# Upgrade the version of Seaborn
# !pip install -U seaborn
# Install category_encoders
# !pip install category_encoders
# + id="IkQErr7h8_aw" colab_type="code" outputId="23d76ff4-4870-4d25-b4b9-12972fcfcbe9" colab={"base_uri": "https://localhost:8080/", "height": 349}
# Mount the drive to download the data from Kaggle
from google.colab import drive
drive.mount('/content/drive')
# %env KAGGLE_CONFIG_DIR=/content/drive/My Drive
# !kaggle competitions download -c ds1-predictive-modeling-challenge
# + id="8_qQhIIXNNzR" colab_type="code" outputId="3d3b42d6-e1a2-4f63-d107-27d9c6d107e4" colab={"base_uri": "https://localhost:8080/", "height": 121}
# Extract the csv files
# !unzip train_features.csv.zip
# !unzip train_labels.csv.zip
# !unzip test_features.csv.zip
# + id="A6yHzH9E9J8d" colab_type="code" colab={}
# Generic imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# + id="hyAcKkIi9TLE" colab_type="code" colab={}
# Loading the independent features as X and
# dependent variable as y
nan_values_list = ['Not Known', 'Unknown', 'None', 'Not known', 'not known',
'-', 'unknown', 'Unknown Installer', '##', 'none']
train_features_df = pd.read_csv('train_features.csv', na_values=nan_values_list)
train_labels_df = pd.read_csv('train_labels.csv')
# + id="xJkIZMC898DR" colab_type="code" colab={}
def atleast(row, value_count_series, count=5):
# Identify items who have funded atleast 5 pumps
if str(row) == "nan":
return np.nan
value_count = value_count_series.get(row)
if value_count < count:
return 0
else:
return 1
def character_grouping(row):
# Reduce the dimension based on 1st character else return *
if str(row) == "nan":
return np.nan
if row[0].isalpha():
return row[0].lower()
else:
return "*"
def classify_lga(row):
# Classify lga into Rural, Urban and others
if str(row) == "nan":
return np.nan
if row.lower().find('rural'):
return "rural"
elif row.lower().find('urban'):
return "urban"
else:
return "other"
def prefix_grouping(row, prefix_count=3):
# Reduce the dimension based on 1st character else return *
if str(row) == "nan":
return np.nan
if prefix_count > len(row):
return "#"
if row[0:prefix_count].isalpha():
return row[0:prefix_count].lower()
else:
return "*"
def map_ward_construction_year(input_df):
# Map ward to construction year
# Here train_features_df shall be used as reference for
# both trainging and test data set.
df = input_df.copy()
ward_construction_year_dict = {}
ward_list = df['ward'].unique()
# top ward's construction year shall be used incase there is no
# matching construction year for individual ward.
top_ward = df['ward'].describe().top
top_ward_construction_year = \
int(df[df['ward'] == top_ward]['construction_year'].median())
for ward in ward_list:
ward_construction_year = \
int(df[df['ward'] == ward]['construction_year'].median())
if ward not in ward_construction_year_dict:\
if ward_construction_year == 0:
ward_construction_year_dict[ward] = top_ward_construction_year
else:
ward_construction_year_dict[ward] = ward_construction_year
return ward_construction_year_dict
def compute_construction_year(row, ward_construction_year_dict,
top_ward_construction_year):
# compute the consturction year if it is 0
ward = row['ward']
construction_year = row['construction_year']
if construction_year == 0:
if ward in ward_construction_year_dict:
return ward_construction_year_dict[ward]
else:
return top_ward_construction_year
else:
return construction_year
def compute_age(row):
# compute the consturction age
date_recorded = row['date_recorded']
year_recorded = int(date_recorded.split('-')[0])
construction_year = row['construction_year']
return (year_recorded - construction_year)
def compute_year_recorded(row):
# split year from date_recorded
return int(row.split('-')[0])
def compute_month_recorded(row):
# split year from date_recorded
return int(row.split('-')[1])
# + id="yWE7Uq5H-F7Z" colab_type="code" colab={}
def feature_engineering(df):
# Create a column to indicate funder with atleast 5 pumps maintained.
value_count_funder = df.funder.value_counts()
df['funder_aleast_5'] = df['funder'].apply(atleast,
args=(value_count_funder,))
# Create a column to indicate installer with atleast 5 pumps maintained.
value_count_installer = df.installer.value_counts()
df['installer_aleast_5'] = df['installer'].apply(atleast,
args=(value_count_installer,))
# Apply mean for missing values of latitude and longitude
mean_longitude = df['longitude'].mean()
df['longitude'] = df['longitude'].apply(lambda x: mean_longitude if round(x, 2) == 0 else x)
mean_latitude = df['latitude'].mean()
df['latitude'] = df['latitude'].apply(lambda x: mean_latitude if round(x, 2) == 0 else x)
# Grouping wpt_name, subvillage based on 1st alphabet
df['wpt_name_character_grouping'] = df['wpt_name'].apply(character_grouping)
df['subvillage_character_grouping'] = df['subvillage'].apply(character_grouping)
# Classify lga based on Rural, Urban and others
df['lga_engineered'] = df['lga'].apply(classify_lga)
# Grouping ward, scheme_name based on 1st alphabet
df['ward_character_grouping'] = df['ward'].apply(character_grouping)
df['scheme_name_character_grouping'] = df['scheme_name'].apply(character_grouping)
# Grouping based on prefix
df['funder_prefix_grouping'] = df['funder'].apply(prefix_grouping)
df['installer_prefix_grouping'] = df['installer'].apply(prefix_grouping)
df['wpt_name_prefix_grouping'] = df['wpt_name'].apply(prefix_grouping)
df['subvillage_prefix_grouping'] = df['subvillage'].apply(prefix_grouping)
df['lga_prefix_grouping'] = df['lga'].apply(prefix_grouping)
df['ward_prefix_grouping'] = df['ward'].apply(prefix_grouping)
df['scheme_name_prefix_grouping'] = df['scheme_name'].apply(prefix_grouping)
# Compute missing construction year
ward_construction_year_dict = map_ward_construction_year(df)
top_ward = df['ward'].describe().top
top_ward_construction_year = \
int(df[df['ward'] == top_ward]['construction_year'].median())
df['construction_year'] = df.apply(compute_construction_year, axis=1,
args=(ward_construction_year_dict,
top_ward_construction_year,))
# Compute age of well
df['age'] = df.apply(compute_age, axis=1)
# Fetch Year and Month of date recorded
df['year_recorded'] = df['date_recorded'].apply(compute_year_recorded)
df['month_recorded'] = df['date_recorded'].apply(compute_month_recorded)
df.select_dtypes(include=np.number).isnull().sum()
for col in df.select_dtypes(include=np.number).columns:
if df[col].isnull().sum() > 0:
df[col].fillna(df[col].mean(), inplace=True)
feature_engineering(train_features_df)
# + id="mG4VyQIT9gIe" colab_type="code" colab={}
# Selecting independent and dependent variables.
X = train_features_df.drop(columns=['id', 'funder', 'installer', 'wpt_name',
'subvillage', 'lga','ward','scheme_name'])
y = train_labels_df.status_group
# + id="MN2TBHsWvGgT" colab_type="code" outputId="ac16f2b3-5c77-4a04-fbcb-dd2c138f8e39" colab={"base_uri": "https://localhost:8080/", "height": 413}
pd.set_option('display.max_columns', None)
train_features_df.head()
# + id="UTozD9Pf_8ZH" colab_type="code" colab={}
# Split data into train and test using k-fold cross-validation
# with independent test data set.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.25,
shuffle=True,
random_state=42
)
# + id="Ikns6JusrwOW" colab_type="code" outputId="262d119a-5d26-4120-a32d-ac21d585401c" colab={"base_uri": "https://localhost:8080/", "height": 104}
# Get quick initial metrics estimate.
# Using sklearn accuracy_score
import numpy as np
from sklearn.metrics import accuracy_score
majority_class = y_train.mode()[0]
prediction = np.full(shape=y_train.shape,
fill_value=majority_class)
print(f'accuracy score {accuracy_score(y_train, prediction)}')
# Using simple pandas value counts method
print(y_train.value_counts(normalize=True))
# + id="8HifL8OexTSs" colab_type="code" colab={}
# Data pre-processing, Feature selection and Model selection.
# Imports for pipeline
from sklearn.pipeline import make_pipeline
import category_encoders as ce
from sklearn.preprocessing import RobustScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
# + id="_FUTW1lf94_m" colab_type="code" colab={}
# Create pipeline
pipeline = make_pipeline(\
ce.BinaryEncoder(),
RobustScaler(),
RandomForestClassifier(n_estimators=200,
class_weight=None,
n_jobs=-1))
# + id="LOhMkTAy99Ge" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 5601} outputId="fa290cda-2940-4514-ee3d-4c7978a7046f"
# Model validation.
param_grid = {
'randomforestclassifier__max_depth': [5, 6, 7],
'randomforestclassifier__min_samples_split': [2, 3, 4],
'randomforestclassifier__min_samples_leaf': [1, 2, 3, 4],
'randomforestclassifier__max_features': ['auto', 'log2']
}
gridsearch = GridSearchCV(pipeline, param_grid=param_grid, cv=2,
scoring='accuracy', verbose=20)
gridsearch.fit(X_train, y_train)
# + id="426lM6bh-Ady" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="c085fc8b-8838-4abd-ae32-dfdd9b519baf"
# Interpret the results.
# Best cross validation score
print('Cross Validation Score:', gridsearch.best_score_)
# Best parameters which resulted in the best score
print('Best Parameters:', gridsearch.best_params_)
# + id="Z21cnpwJ-Dau" colab_type="code" outputId="6912ab9f-f3af-4467-dfa2-99716ef5cff4" colab={"base_uri": "https://localhost:8080/", "height": 35}
#Get the best model and check it against test data set.
# Predict with X_test features
y_pred = gridsearch.predict(X_test)
# Compare predictions to y_test labels
test_score = accuracy_score(y_test, y_pred)
print('Accuracy Score on test data set:', test_score)
# + id="kGmGyZAi-HQv" colab_type="code" colab={}
test_features_df = pd.read_csv('test_features.csv', na_values=nan_values_list)
feature_engineering(test_features_df)
X_submission = test_features_df.drop(columns =['id', 'funder', 'installer', 'wpt_name', 'subvillage',
'lga','ward','scheme_name'])
# Predict with X_submission features
y_submission = gridsearch.predict(X_submission)
y_submission_df = pd.DataFrame(y_submission, columns=['status_group'])
output_for_submission = test_features_df.join(y_submission_df).loc[:, ['id','status_group']]
# + id="ahroY1is-LRR" colab_type="code" outputId="24b9edc9-7c1b-437f-d70e-09f121dc7584" colab={"base_uri": "https://localhost:8080/", "height": 202}
output_for_submission.head()
# + id="gaXDKW3w-Nn9" colab_type="code" outputId="5e677898-7ffc-41f3-eba3-d210b51c9fa4" colab={"base_uri": "https://localhost:8080/", "height": 87}
print(output_for_submission.status_group.value_counts())
print(output_for_submission.shape)
# + id="ZT4yRwcE-cLr" colab_type="code" colab={}
print(output_for_submission.to_csv(index=False))
| DS1_Predictive_Modeling_Challenge_RandomForest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="XJpXCHoK1lZQ"
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from keras.preprocessing import image
print(tf.__version__)
# +
traindatagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
training_set = traindatagen.flow_from_directory(
"../dataset",
target_size=(64, 64),
batch_size=32,
class_mode="categorical"
)
# -
cnn = tf.keras.models.Sequential()
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation="relu", input_shape=[64, 64, 3]))
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation="relu"))
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
# +
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation="relu"))
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation="relu"))
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
# -
| .ipynb_checkpoints/MaskPredictor-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DBSCAN
import random
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# +
def normalization(X,degree):
""" A scaling technique in which values
are shifted and rescaled so that they
end up ranging between 0 and 1.
It is also known as Min-Max scaling
----------------------------------------
degree: polynomial regression degree, or attribute/feature number
"""
X[:, :(degree)] = (X[:, :(degree)] - np.amin(X[:, :(degree)], axis = 0))/ \
(np.amax(X[:, :(degree)], axis = 0) - np.amin(X[:, :(degree)], axis = 0))
return X
def readin_csv_data_clustering(path):
df = pd.read_csv(path)
X = df.iloc[:,:].values
return X
class DBSCAN():
'''DBSCAN: Density-Bases Spatial Clustering of Applications with Noise
eps: a float parameter used to specify the radius of neighborhood w.r.t.
some point
minpt:the least number of points around a core point'''
def __init__(self, eps, minpt):
self.e = eps
self.m = minpt
self.outlier_label = 0
self.core_label = -1
self.border_label = -2
self.fw = 8 # figure width
self.fl = 8 # figure length
def get_neighbour_points(self, X, idx_c):
tmp = []
for i in range(len(X)):
if np.linalg.norm([x1 - x2 for x1, x2 in zip(X[i], X[idx_c])]) <= self.e:
tmp.append(i)
return tmp
def intersection(self, l1, l2):
return [value for value in l1 if value in l2]
def fit(self, X):
#find all core points, core label: -1, border label: -2, outlier label:0
point_label = [self.outlier_label]*len(X)
core_label = []
border_label = []
neighbor_points_set = []
for i in range(len(X)):
neighbor_points_set.append(self.get_neighbour_points(X, i))
if(len(neighbor_points_set[i])>=self.m):
core_label.append(i)
point_label[i] = self.core_label
else:
border_label.append(i)
for i in border_label:
for j in neighbor_points_set[i]:
if j in core_label:
point_label[i] = self.border_label
break
#find cluster
cluster = 1
unvisited_set = [i for i in range(len(X))]
queue = []
cluster_set = []
while not len(core_label)==0:
unvisited_set_old = unvisited_set
queue = random.choice(core_label)
unvisited_set = [i for i in unvisited_set if i not in neighbor_points_set[queue]]
while not len(neighbor_points_set[queue])==0:
first_sample = neighbor_points_set[queue].pop(0)
if(point_label[first_sample] == self.core_label):
dtmp = self.intersection(neighbor_points_set[first_sample], unvisited_set)
neighbor_points_set[queue].extend(dtmp)
unvisited_set = [i for i in unvisited_set if i not in dtmp]
cluster_set = [i for i in unvisited_set_old if i not in unvisited_set]
for i in cluster_set:
point_label[i] = cluster
cluster = cluster + 1
core_label = [i for i in core_label if i not in cluster_set]
return point_label, cluster
def DBSCAN_visualize(self, X, point_label, n):
plt.figure(figsize = (self.fl, self.fw))
for i in range(n):
x, y = [], []
for j in range(len(X)):
if point_label[j] == i:
x.append(X[j, 0])
y.append(X[j, 1])
if(i == self.outlier_label):
plt.scatter(x, y, c='r', alpha=1, marker='x', s=100, label = 'Outliers')
else:
plt.scatter(x, y, alpha=0.8, marker='.', cmap=plt.cm.jet, s=100, \
label = 'cluster '+str(i))
plt.grid(True)
plt.legend()
plt.show()
# +
def main():
X = readin_csv_data_clustering("concentric_circles.csv")
X = normalization(X, 2)
model = DBSCAN(0.06, 3)
point_labels, clusters = model.fit(X)
model.DBSCAN_visualize(X, point_labels, clusters)
if __name__ == "__main__":
main()
# -
| DBSCAN/DBSCAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (gameAI)
# language: python
# name: gameai
# ---
# +
#implementation of a convolutional neural net for dog breed identification
import IPython
from utils.general_utils import *
import numpy as np
import pandas as pd #used for importing data from csv to usable dataset
import tensorflow as tf
# +
BREED_LIST = "../data/included/breed_list.csv"
#ids with image matrix already converted
PRE_PROCESSED_DATA = "/home/ben/Documents/github/kaggle_dog_breed/data/preprocessed_data/"
# +
#input function for a tensorflow estimator
def train_input_fn(dataset):
breed = get_breed_from_id(dataset.id)
image_matrix = np.ndarray((7,6),dtype=float)
feature_dict = {'image_matrix':image_matrix, 'breed':breed}
labels = populate_breeds(BREED_LIST)
return feature_dict, labels
#a wrapper function to invoke the Train dataset
def training_set_input_fn():p
train_input_fn()
# -
def model_fn():
return cnn
# Build the Estimator
model = tf.estimator.Estimator(model_fn)
| src/CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: statmath381
# language: python
# name: statmath381
# ---
# + [markdown] id="ULBQbid4uiU9"
# # Toy Regression
# -
# <div class="btn btn-notebook" role="button">
# <img src="../_static/images/colab_logo_32px.png"> [Run in Google Colab](https://colab.research.google.com/drive/1adhqoV6b0uEavLDmMfkiwtRjam0DrXux?usp=sharing)
# </div>
# <div class="btn btn-notebook" role="button">
# <img src="../_static/images/github_logo_32px.png"> [View on GitHub](https://github.com/adapt-python/notebooks/blob/d0364973c642ea4880756cef4e9f2ee8bb5e8495/Regression.ipynb)
# </div>
# + [markdown] id="dStPUYoxuiVC"
# You will find here the application of DA methods from the ADAPT package on a simple one
# dimensional DA regression problem.
#
# First we import packages needed in the following. We will use ``matplotlib Animation`` tools in order to
# get a visual understanding of the selected methods:
# + id="UuXrj6aLuiVD"
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import rc
rc('animation', html='jshtml')
# + [markdown] id="UGUDIWX7uiVD"
# ## Experimental Setup
# + [markdown] id="HC98SY8YuiVE"
# We now set the synthetic regression DA problem using the [make_regression_da](https://adapt-python.github.io/adapt/generated/adapt.utils.make_regression_da.html)
# function from ``adapt.utils``.
# + id="vMDaBVqGuiVE"
from adapt.utils import make_regression_da
Xs, ys, Xt, yt = make_regression_da()
tgt_index_lab_ = np.random.choice(100,3)
Xt_lab = Xt[tgt_index_lab_]; yt_lab = yt[tgt_index_lab_]
# + [markdown] id="t7o6GA3guiVE"
# We define here a ``show`` function which we will use in the following to visualize the algorithms performances
# on the toy problem.
# + id="YUwGy5TauiVF"
def show(ax, y_pred=None, X_src=Xs, weights_src=50, weights_tgt=100):
ax.scatter(X_src, ys, s=weights_src, label="source", edgecolor="black")
ax.scatter(Xt, yt, s=50, alpha=0.5, label="target", edgecolor="black")
ax.scatter(Xt_lab, yt_lab, s=weights_tgt,
c="black", marker="s", alpha=0.7, label="target labeled")
if y_pred is not None:
ax.plot(np.linspace(-0.7, 0.6, 100), y_pred, c="red", lw=3, label="predictions")
index_ = np.abs(Xt - np.linspace(-0.7, 0.6, 100)).argmin(1)
score = np.mean(np.abs(yt - y_pred[index_]))
score = " -- Tgt MAE = %.2f"%score
else:
score = ""
ax.set_xlim((-0.7,0.6))
ax.set_ylim((-1.3, 2.2))
ax.legend(fontsize=16)
ax.set_xlabel("X", fontsize=16)
ax.set_ylabel("y = f(X)", fontsize=16)
ax.set_title("Toy regression DA issue"+score, fontsize=18)
return ax
# + colab={"base_uri": "https://localhost:8080/", "height": 359} executionInfo={"elapsed": 347, "status": "ok", "timestamp": 1631026450850, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="Z0at42mIuiVG" outputId="1<PASSWORD>" tags=["nbsphinx-thumbnail"]
fig, ax = plt.subplots(1, 1, figsize=(8, 5))
show(ax=ax)
plt.show()
# + [markdown] id="3TSvcD56uiVH"
# As we can see in the figure above (plotting the output data ``y`` with respect to the inputs ``X``),
# source and target data define two distinct domains. We have modeled here a classical supervised
# DA issue where the goal is to build a good model on orange data knowing only the labels (``y``) of the blue
# and black points.
#
# We now define the base model used to learn the task. We use here a neural network with two hidden layer.
# We also define a ``SavePrediction`` callback in order to save the prediction of the neural network at
# each epoch.
# + id="1P1743AOuiVH"
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Input, Dense, Reshape
from tensorflow.keras.optimizers import Adam
def get_model():
model = Sequential()
model.add(Dense(100, activation='elu', input_shape=(1,)))
model.add(Dense(100, activation='relu'))
model.add(Dense(1))
model.compile(optimizer=Adam(0.01), loss='mean_squared_error')
return model
# + id="sdnIAcFluiVH"
from tensorflow.keras.callbacks import Callback
class SavePrediction(Callback):
"""
Callbacks which stores predicted
labels in history at each epoch.
"""
def __init__(self):
self.X = np.linspace(-0.7, 0.6, 100).reshape(-1, 1)
self.custom_history_ = []
super().__init__()
def on_epoch_end(self, batch, logs={}):
"""Applied at the end of each epoch"""
predictions = self.model.predict_on_batch(self.X).ravel()
self.custom_history_.append(predictions)
# + [markdown] id="dwwKZS4duiVI"
# ## TGT Only
# + [markdown] id="LD9EEQMduiVI"
# First, let's fit a network only on the three labeled target data. As we could have guessed,
# this is not sufficient to build an efficient model on the whole target domain.
# + id="idpXQj73uiVI"
np.random.seed(0)
tf.random.set_seed(0)
model = get_model()
save_preds = SavePrediction()
model.fit(Xt_lab, yt_lab, callbacks=[save_preds], epochs=100, batch_size=64, verbose=0);
# + id="dxu80mcMuiVJ"
def animate(i, *fargs):
ax.clear()
y_pred = save_preds.custom_history_[i].ravel()
if len(fargs)<1:
show(ax, y_pred)
else:
show(ax, y_pred, **fargs[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 359} executionInfo={"elapsed": 736, "status": "ok", "timestamp": 1631026460551, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="wyyzuYUPuiVJ" outputId="adecfbce-3280-4089-e8ff-de71d133deef"
fig, ax = plt.subplots(1, 1, figsize=(8, 5))
ani = animation.FuncAnimation(fig, animate, frames=100, interval=60, blit=False, repeat=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 458, "output_embedded_package_id": "1uV_0KOcXYGCP1mLp7wKVpAFM9aEuJFwd"} executionInfo={"elapsed": 21912, "status": "ok", "timestamp": 1631026485394, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="MbBq9MswvNTR" outputId="0573e0dd-cf7d-4cd5-fbcb-c76956835b5a"
ani
# -
# 
# + [markdown] id="veAq61JEuiVK"
# ## Src Only
# + [markdown] id="pmDxTQR2uiVK"
# We would like to use the large amount of labeled source data to improve
# the training of the neural network on the target domain. However,
# as we can see on the figure below, using only the source
# dataset fails to provide an efficient model.
# + id="9mDLkRi_uiVK"
np.random.seed(0)
tf.random.set_seed(0)
model = get_model()
save_preds = SavePrediction()
model.fit(Xs, ys, callbacks=[save_preds], epochs=100, batch_size=100, verbose=0);
# + colab={"base_uri": "https://localhost:8080/", "height": 359} executionInfo={"elapsed": 901, "status": "ok", "timestamp": 1631026849825, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="mZGBurN7uiVK" outputId="f2e5111c-e8c9-421f-f944-873c7fb221c9"
fig, ax = plt.subplots(1, 1, figsize=(8, 5))
ani = animation.FuncAnimation(fig, animate, frames=100, blit=False, repeat=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 458, "output_embedded_package_id": "1P9v04RrDOIyCpKzIJv1FUqHLsJiRN12n"} executionInfo={"elapsed": 22533, "status": "ok", "timestamp": 1631026877515, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="q6_qGedqxrSC" outputId="05ce6bc2-5122-4261-99c0-5a92b6be2683"
ani
# -
# 
# + [markdown] id="pjYRlsJFuiVL"
# ## All
# + [markdown] id="qsTze2EWuiVL"
# Same thing happen when using both source and target labeled data. As the source sample ovewhelms the target one,
# the model is not fitted enough on the target domain.
# + id="8gDtFcrCuiVL"
np.random.seed(0)
tf.random.set_seed(0)
model = get_model()
save_preds = SavePrediction()
model.fit(np.concatenate((Xs, Xt_lab)),
np.concatenate((ys, yt_lab)),
callbacks=[save_preds],
epochs=100, batch_size=110, verbose=0);
# + colab={"base_uri": "https://localhost:8080/", "height": 359} executionInfo={"elapsed": 605, "status": "ok", "timestamp": 1631026879643, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="whtgcbNxuiVL" outputId="7728baa7-4e15-47ca-cac7-90eb5eb490f9"
fig, ax = plt.subplots(1, 1, figsize=(8, 5))
ani = animation.FuncAnimation(fig, animate, frames=100, blit=False, repeat=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 458, "output_embedded_package_id": "1FmtcgTofWJm-tGoQ62E09cZNurEAXjgz"} executionInfo={"elapsed": 22492, "status": "ok", "timestamp": 1631026902132, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="QKKKGxB6xwUK" outputId="ddab15e7-ed28-4458-dd6a-99bd03d17abd"
ani
# -
# 
# + [markdown] id="icjgazMKuiVM"
# ## CORAL
# + [markdown] id="8kkm_0YTuiVM"
# Let's now consider the domain adaptation method [CORAL](https://adapt-python.github.io/adapt/generated/adapt.feature_based.CORAL.html)
# This "two-stage" method first perfroms a feature alignment on source data and then fit
# an estimator on the new feature space.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1543, "status": "ok", "timestamp": 1631028094414, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="vT59E0PjuiVM" outputId="8ca38a41-8acc-453a-e87b-8e7023245932"
from adapt.feature_based import CORAL
save_preds = SavePrediction()
model = CORAL(get_model(), lambda_=1e-3, random_state=0)
model.fit(Xs.reshape(-1, 1), ys, Xt,
callbacks=[save_preds], epochs=100, batch_size=110, verbose=0);
# + colab={"base_uri": "https://localhost:8080/", "height": 359} executionInfo={"elapsed": 685, "status": "ok", "timestamp": 1631028113364, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="yqTa2OFDuiVM" outputId="0e0aee70-3026-477b-ee01-749266d317af"
fig, ax = plt.subplots(1, 1, figsize=(8, 5))
X_transformed = model.transform(Xs.reshape(-1, 1), domain="src").ravel()
ani = animation.FuncAnimation(fig, animate, frames=100, blit=False, repeat=True,
fargs=(dict(X_src=X_transformed),))
# + colab={"base_uri": "https://localhost:8080/", "height": 458, "output_embedded_package_id": "1Ttyom_6bSd3EC9gfwJAAbjEq5BsY6Btj"} executionInfo={"elapsed": 22037, "status": "ok", "timestamp": 1631028138605, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="TIgMmcB_yZIC" outputId="4c5602f0-7609-4eb4-9d2f-d31b1da901f0"
ani
# -
# 
# + [markdown] id="6YU3FI1ZuiVN"
# As we can see. when using CORAL method, source input data are translated closer to
# target data. However, for this example, this is not enough to obtain a good model
# on the target domain.
# + [markdown] id="a4GpS8oPuiVN"
# ## TrAdaBoostR2
# + [markdown] id="I64SAjZwuiVN"
# We now consider an instance-based method: [TrAdaBoostR2](https://adapt-python.github.io/adapt/generated/adapt.instance_based.TrAdaBoostR2.html).
# This method consists in a reverse boosting algorithm decreasing the weights of source data poorly predicted
# at each boosting iteraton.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 35098, "status": "ok", "timestamp": 1631027101298, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="yiZb0SLfuiVN" outputId="889fd295-6ff1-459a-9081-34d6e02845bc"
from adapt.instance_based import TrAdaBoostR2
model = TrAdaBoostR2(get_model(), n_estimators=30, random_state=0)
save_preds = SavePrediction()
model.fit(Xs.reshape(-1, 1), ys.reshape(-1, 1), Xt_lab.reshape(-1, 1), yt_lab.reshape(-1, 1),
callbacks=[save_preds], epochs=100, batch_size=110, verbose=0);
# + id="U2ZlowKAuiVN"
def animate_tradaboost(i):
ax.clear()
i *= 10
j = int(i / 100)
y_pred = save_preds.custom_history_[i].ravel()
weights_src = 10000 * model.sample_weights_src_[j]
weights_tgt = 10000 * model.sample_weights_tgt_[j]
show(ax, y_pred, weights_src=weights_src, weights_tgt=weights_tgt)
# + colab={"base_uri": "https://localhost:8080/", "height": 359} executionInfo={"elapsed": 1393, "status": "ok", "timestamp": 1631027142058, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="sz-lEId3uiVN" outputId="f57adda3-1f97-444e-fb89-c8e8fbadb4dd"
fig, ax = plt.subplots(1, 1, figsize=(8, 5))
ani = animation.FuncAnimation(fig, animate_tradaboost, frames=299, interval=120, blit=False, repeat=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 458, "output_embedded_package_id": "1e7E37NAi-SwTfzoDR6r-YJOiQRH2l8ib"} executionInfo={"elapsed": 76866, "status": "ok", "timestamp": 1631027224437, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="1Y1F1GtYyyk6" outputId="8a525926-2e30-4dd1-8363-1cac93f6dc2a"
ani
# -
# 
ani.save('tradaboost.gif', writer="imagemagick")
# + [markdown] id="nQe5Xs0uuiVO"
# As we can see on the figure above, [TrAdaBoostR2](https://adapt-python.github.io/adapt/generated/adapt.instance_based.TrAdaBoostR2.html) perfroms very well
# on this toy DA issue! The importance weights are described by the size of data points.
# We observe that the weights of source instances close to 0 are decreased as the weights of target instances increase.
# This source instances indeed misleaded the fitting of the network on the target domain. Decreasing their weights helps
# then a lot to obtain a good target model.
# + [markdown] id="Vb-KDV0huiVO"
# ## RegularTransferNN
# + [markdown] id="kXdy06isuiVO"
# Finally, we consider here the paremeter-based method [RegularTransferNN](https://adapt-python.github.io/adapt/generated/adapt.parameter_based.RegularTransferNN.html).
# This method fits the target labeled data with a regularized loss. During training, the mean squared error on target data is
# regularized with the euclidean distance between the target model parameters and the ones of a pre-trained source model.
# + id="c3Jj2LvUuiVO"
from adapt.parameter_based import RegularTransferNN
np.random.seed(0)
tf.random.set_seed(0)
save_preds = SavePrediction()
model_0 = get_model()
model_0.fit(Xs.reshape(-1, 1), ys, callbacks=[save_preds], epochs=100, batch_size=110, verbose=0);
model = RegularTransferNN(model_0, lambdas=1.0, random_state=0)
model.fit(Xt_lab, yt_lab, callbacks=[save_preds], epochs=100, batch_size=110, verbose=0);
# + colab={"base_uri": "https://localhost:8080/", "height": 359} executionInfo={"elapsed": 1024, "status": "ok", "timestamp": 1631027315313, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="S3HpW80nuiVO" outputId="47bab4f6-a3e9-43f8-ddd4-1020d51fa560"
fig, ax = plt.subplots(1, 1, figsize=(8, 5))
ani = animation.FuncAnimation(fig, animate, frames=200, interval=60, blit=False, repeat=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 458, "output_embedded_package_id": "1oaecmJApJ98h8AVUsJqbkQ4LppOO8rld"} executionInfo={"elapsed": 45025, "status": "ok", "timestamp": 1631027364812, "user": {"displayName": "adapt python", "photoUrl": "", "userId": "02175971479792024881"}, "user_tz": -120} id="WA4Dh4rOzcwy" outputId="13308a9c-b56e-4c9e-8a42-ef974126b73d"
ani
# -
# 
| src_docs/examples/Regression.ipynb |
# ---
# layout: single
# title: "Understand uncertainty"
# excerpt: ". "
# authors: ['<NAME>']
# modified: '{:%Y-%m-%d}'.format(datetime.now())
# category: [course-materials]
# class-lesson: ['class-intro-spatial-r']
# permalink: /course-materials/earth-analytics/week-5/understand-uncertainty-lidar/
# nav-title: 'Remote sensing uncertainty'
# week: 5
# sidebar:
# nav:
# author_profile: false
# comments: true
# order: 5
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
#
# {% include toc title="In This Lesson" icon="file-text" %}
#
#
#
# <div class='notice--success' markdown="1">
#
#
#
# ## <i class="fa fa-graduation-cap" aria-hidden="true"></i> Learning Objectives
#
#
#
# After completing this tutorial, you will be able to:
#
#
#
# * Be able to list atleast 3 sources of uncertainty / error associated with remote sensing data.
#
# * Be able to interpret a scatter plot that compares remote sensing values with field measured values to determine how "well" the two metrics compare.
#
# * Be able to describe 1-3 ways to better understand sources of error associated with a comparison between remote sensing values with field measured values.
#
#
#
# ## <i class="fa fa-check-square-o fa-2" aria-hidden="true"></i> What you need
#
#
#
# You will need a computer with internet access to complete this lesson and the data for week 5 of the course.
#
#
#
# </div>
#
#
#
# ## Understanding uncertainty and error.
#
#
#
# It is important to consider error and uncertainty when presenting scientific
#
# results. Most measurements that we make - be they from instruments or humans -
#
# have uncertainty associated with them. We will discuss what
#
# that means, below.
#
#
#
# ## Uncertainty
#
#
#
# **Uncertainty:** Uncertainty quantifies the range of values within which the
#
# value of the measure falls within - within a specified level of confidence. The
#
# uncertainty quantitatively indicates the "quality" of your measurement. It
#
# answers the question: "how well does the result represent the value of the
#
# quantity being measured?"
#
#
#
# ### Tree height measurement example
#
#
#
# So for example let's pretend that we measured the height of a tree 10 times. Each
#
# time our tree height measurement may be slightly different? Why? Because maybe
#
# each time we visually determined the top of the tree to be in a slightly different
#
# place. Or maybe there was wind that day during measurements that
#
# caused the tree to shift as we measured it yielding a slightly different height each time. or... what other reasons can you think of that might impact tree height
#
# measurements?
#
#
#
# <figure>
#
# <a href="{{ site.url }}/images/course-materials/earth-analytics/week-5/measuring-tree-height.jpg">
#
# <img src="{{ site.url }}/images/course-materials/earth-analytics/week-5/measuring-tree-height.jpg" alt="national geographic scaling trees graphic"></a>
#
# <figcaption>When we measure tree height by hand, many different variables may impact the accuracy and precision of our results. Source: http://www.haddenham.net/newsroom/guess-tree-height.html
#
# </figcaption>
#
# </figure>
#
#
#
# ## What is the true value?
#
#
#
# So you may be wondering, what is the true height of our tree?
#
# In the cause of a tree in a forest, it's very difficult to determine the
#
# true height. So we accept that there will be some variation in our measurements
#
# and we measure the tree over and over again until we understand the range of
#
# heights that we are likely to get when we measure the tree.
#
#
#
# +
```{r standard-error, fig.cap="Distribution of tree heights." }
# create data frame containing made up tree heights
tree_heights <- data.frame(heights=c(10, 10.1, 9.9, 9.5, 9.7, 9.8,
9.6, 10.5, 10.7, 10.3, 10.6))
# what is the average tree height
mean(tree_heights$heights)
# what is the standard deviation of measurements?
sd(tree_heights$heights)
boxplot(tree_heights$heights,
main="Distribution of tree height measurements (m)",
ylab="Height (m)",
col="springgreen")
```
# -
#
#
# In the example above, our mean tree height value is towards the center of
#
# our distribution of measured heights. We might expect that the sample mean of
#
# our observations provides a reasonable estimate of the true value. The
#
# variation among our measured values may also provide some information about the
#
# precision (or lack thereof) of the measurement process.
#
#
#
# <a href="http://www.physics.csbsju.edu/stats/box2.html" target="_blank">Read more about the basics of a box plot</a>
#
#
#
# +
```{r hist-tree-height, fig.cap="Tree height distribution"}
# view distribution of tree height values
hist(tree_heights$heights, breaks=c(9,9.6,10.4,11),
main="Distribution of measured tree height values",
xlab="Height (m)", col="purple")
```
# -
#
#
# ## Measurement accuracy
#
#
#
# Measurement **accuracy** is a concept that relates to whether there is bias in
#
# measurements, i.e. whether the expected value of our observations is close to
#
# the true value. For low accuracy measurements, we may collect many observations,
#
# and the mean of those observations may not provide a good measure of the truth
#
# (e.g., the height of the tree). For high accuracy measurements, the mean of
#
# many observations would provide a good measure of the true value. This is
#
# different from **precision**, which typically refers to the variation among
#
# observations. Accuracy and precision are not always tightly coupled. It is
#
# possible to have measurements that are very precise but inaccurate, very
#
# imprecise but accurate, etc.
#
#
#
# ## Systematic vs Random error
#
#
#
# **Systematic error:** a systematic error is one that tends to shift all measurements
#
# in a systematic way. This means that the mean value of a set of measurements is
#
# consistently displaced or varied in a predictable way, leading to inaccurate observations.
#
# Causes of systematic errors may be known or unknown but should always be corrected for when present.
#
# For instance, no instrument can ever be calibrated perfectly, so when a group of measurements systematically differ from the value of a standard reference specimen, an adjustment in the values should be made.
#
# Systematic error can be corrected for only when the "true value" (such as the value assigned to a calibration or reference specimen) is known.
#
#
#
# *Example:* Remote sensing instruments need to be calibrated. For example a laser in
#
# a lidar system may be tested in a lab to ensure that the distribution of output light energy
#
# is consistent every time the laser "fires".
#
#
#
# **Random error:** is a component of the total error which, in the course of a number of measurements, varies in an unpredictable way. It is not possible to correct for random error. Random errors can occur for a variety of reasons such as:
#
#
#
# * Lack of equipment sensitivity. An instrument may not be able to respond to or indicate a change in some quantity that is too small or the observer may not be able to discern the change.
#
# * Noise in the measurement. Noise is extraneous disturbances that are unpredictable or random and cannot be completely accounted for.
#
# * Imprecise definition. It is difficult to exactly define the dimensions of a object. For example, it is difficult to determine the ends of a crack with measuring its length. Two people may likely pick two different starting and ending points.
#
#
#
# *Example:* random error may be introduced when we measure tree heights as discussed above.
#
#
#
# - <a href="https://www.nde-ed.org/GeneralResources/ErrorAnalysis/UncertaintyTerms.htm">Source: nde-ed.org</a>
#
#
#
#
#
# <figure>
#
# <a href="{{ site.url }}/images/course-materials/earth-analytics/week-5/accuracy_precision.png">
#
# <img src="{{ site.url }}/images/course-materials/earth-analytics/week-5/accuracy_precision.png" alt="national geographic scaling trees graphic"></a>
#
# <figcaption>Accuracy vs precision. Accuracy quantifies how close a measured value is to the true value. Precision quantifies how close two or more measurements agree with each other (how quantitatively repeatable are the results) Source: http://www.ece.rochester.edu/courses/ECE111/error_uncertainty.pdf
#
# </figcaption>
#
# </figure>
#
#
#
# ## Using lidar to estimate tree height
#
#
#
# We use lidar data to estimate tree height because it is an efficient way to measure
#
# large areas of trees (forests) quantitatively. However, we can process the lidar
#
# data in many different ways to estimate height. Which method most closely represents
#
# the actual heights of the trees on the ground?
#
#
#
# <figure>
#
# <a href="{{ site.url }}/images/course-materials/earth-analytics/week-3/scaling-trees-nat-geo.jpg">
#
# <img src="{{ site.url }}/images/course-materials/earth-analytics/week-3/scaling-trees-nat-geo.jpg" alt="national geographic scaling trees graphic"></a>
#
# <figcaption>It can be difficult to measure the true height of trees! Often times "seeing" the very top of the tree where it is tallest is not possible from the ground - especially in dense, tall forests. One can imagine the amount of uncertainty that is thus introduced when we try to estimate the true height of trees! Image Source:
#
# National Geographic
#
# </figcaption>
#
# </figure>
#
#
#
# <figure>
#
# <a href="{{ site.url }}/images/course-materials/earth-analytics/week-3/waveform.png" target="_blank">
#
# <img src="{{ site.url }}/images/course-materials/earth-analytics/week-3/waveform.png" alt="Example of a lidar waveform"></a>
#
# <figcaption>An example LiDAR waveform. Source: NEON, Boulder, CO.
#
# </figcaption>
#
# </figure>
#
#
#
#
#
# <figure>
#
# <a href="{{ site.url }}/images/course-materials/earth-analytics/week-3/Treeline_ScannedPoints.png">
#
# <img src="{{ site.url }}/images/course-materials/earth-analytics/week-3/Treeline_ScannedPoints.png" alt="example of a tree profile after a lidar scan."></a>
#
# <figcaption>Cross section showing LiDAR point cloud data (above) and the
#
# corresponding landscape profile (below). Graphic: <NAME>
#
# </figcaption>
#
# </figure>
#
#
#
#
#
#
#
# +
```{r uncertainty-lidar, echo=F, warning=FALSE, message=FALSE, results = "hide" }
# load libraries
library(raster)
library(rgdal)
library(rgeos)
library(ggplot2)
library(dplyr)
options(stringsAsFactors = FALSE)
SJER_chm <- raster("data/week5/california/SJER/2013/lidar/SJER_lidarCHM.tif")
SJER_chm[SJER_chm==0] <- NA
SJER_plots <- readOGR("data/week5/california/SJER/vector_data",
"SJER_plot_centroids")
# extract max height
SJER_height <- extract(SJER_chm,
SJER_plots,
buffer = 20, # specify a 20 m radius
fun=max, # extract the MEAN value from each plot
sp=TRUE, # create spatial object
stringsAsFactors=FALSE)
# import the centroid data and the vegetation structure data
SJER_insitu <- read.csv("data/week5/california/SJER/2013/insitu/veg_structure/D17_2013_SJER_vegStr.csv",
stringsAsFactors = FALSE)
# find the max and mean stem height for each plot
insitu_stem_height <- SJER_insitu %>%
group_by(plotid) %>%
summarise(insitu_max = max(stemheight))
# merge the insitu data into the centroids data.frame
SJER_height <- merge(SJER_height,
insitu_stem_height,
by.x = 'Plot_ID',
by.y = 'plotid')
```
# -
#
#
# ## Study site location
#
#
#
# To answer the question above, let's look at some data from a study site location
#
# in California - the San Joaquin Experimental range field site. You can see the field
#
# site location on the map below.
#
#
#
# +
```{r ggmap, echo=F, warning=F, message=F, results = "hide"}
library(ggmap)
cali_map <- get_map(location = "California",
source="google",
maptype="terrain", crop=FALSE,
zoom=6)
# creating a sample data.frame with your lat/lon points
lon <- c(SJER_chm@extent@xmin)
lat <- c(SJER_chm@extent@ymin)
# import us data
state_boundary_us <- readOGR("data/week5/usa-boundary-layers",
"US-State-Boundaries-Census-2014")
df <- as.data.frame(cbind(lon,lat))
site_location <- SpatialPoints(df, proj4string = crs(SJER_chm))
site_location_wgs84 <- spTransform(site_location, CRSobj = crs(state_boundary_us))
site_locat_points <- as.data.frame(coordinates(site_location_wgs84))
```
# -
#
#
# +
```{r ggmap-plot, echo=F, warning=F, message=F, results = "hide", fig.cap="ggmap of study area."}
# create a map with a point location for boulder.
ggmap(cali_map) + labs(x = "", y = "") +
geom_point(data = site_locat_points, aes(x = lon, y = lat, fill = "red", alpha = 0.2), size = 5, shape = 19) +
guides(fill=FALSE, alpha=FALSE, size=FALSE)
```
# -
#
#
# ## Study area plots
#
#
#
# At this study site, we have both lidar data - specifically a canopy height model
#
# that was processed by NEON (National Ecological Observatory Network). We also
#
# have some "ground truth" data. That is we have measured tree height values collected at a set
#
# of field site plots by technicians at NEON. We will call these measured values
#
# *in situ* measurements.
#
#
#
# A map of our study plots is below overlaid on top of the canopy height mode.
#
#
#
# +
```{r plot-plots, fig.cap="plots", echo=F}
# Overlay the centroid points and the stem locations on the CHM plot
plot(SJER_chm,
main="Study area plot locations",
col=gray.colors(100, start=.3, end=.9),
legend=F,
box=F, # turn off black border
axes=F) # turn off axis labels and ticks
# pch 0 = square
plot(SJER_plots,
pch = 15,
cex = 2,
col = "magenta",
add=TRUE)
par(xpd=T)
legend(SJER_chm@extent@xmax+100, SJER_chm@extent@ymax,
legend="Plot \nlocations",
pch = 15,
col = "magenta",
bty="n")
```
# -
#
#
# ### Compare lidar derived height to in situ measurements
#
#
#
# We can compare maximum tree height values at each plot to the maximum pixel value
#
# in our CHM for each plot. To do this, we define the geographic boundary of our plot
#
# using a polygon - in the case below we use a circle as the boundary. We then extract
#
# the raster cell values for each circle and calculate the max value for all of the
#
# pixels that fall within the plot area.
#
#
#
# Then, we calculate the max height of our measured plot tree height data.
#
#
#
# Finally we compare the two using a scatter plot to see how closely the data relate.
#
# Do they follow a 1:1 line? Do the data diverge from a 1:1 relationship?
#
#
#
# <figure>
#
# <img src="{{ site.url }}/images/course-materials/earth-analytics/week-5/buffer-circular.png" alt="buffer circular">
#
# <figcaption>The extract function in R allows you to specify a circular buffer
#
# radius around an x,y point location. Values for all pixels in the specified
#
# raster that fall within the circular buffer are extracted. In this case, we
#
# will tell R to extract the maximum value of all pixels using the fun=max
#
# command. Source: <NAME>, NEON
#
# </figcaption>
#
# </figure>
#
#
#
# +
```{r plot-data, fig.cap="final plot", echo=F, warning=F, message=F}
# create plot
p <-ggplot(SJER_height@data, aes(x = insitu_max, y=SJER_lidarCHM)) +
geom_point() +
theme_bw() +
xlab("Mean measured height (m)") +
ylab("Mean LiDAR pixel (m)") +
ggtitle("Lidar Derived Max Tree Height \nvs. InSitu Measured Max Tree Height") +
geom_abline(intercept = 0, slope=1) +
geom_smooth(method=lm)
p
```
# -
#
#
# ### How different are the data?
#
#
#
# +
```{r view-diff, echo=F, fig.cap="box plot showing differences between chm and measured heights."}
# Calculate difference
SJER_height@data$ht_diff <- (SJER_height@data$SJER_lidarCHM - SJER_height@data$insitu_max)
SJER_height@data$Plot_ID <- gsub("SJER", "", SJER_height@data$Plot_ID)
# create bar plot using ggplot()
ggplot(data=SJER_height@data,
aes(x=Plot_ID, y=ht_diff, fill=Plot_ID)) +
geom_bar(stat="identity") +
xlab("Plot Name") + ylab("Height difference (m)") +
ggtitle("Difference: \nLidar Max height - in situ Max height (m)")
```
# -
#
#
# ## View interactive scatterplot
#
#
#
# <a href="https://plot.ly/~leahawasser/170/" target="_blank">View scatterplot plotly</a>
#
#
#
#
#
# ## View interactive difference barplot
#
#
#
# <a href="https://plot.ly/~leahawasser/158/chm-minus-insitu-differences/
#
# " target="_blank">View scatterplot differences</a>
#
#
#
# +
```{r ggplotly, echo=F, eval=F}
library(plotly)
Sys.setenv("plotly_username"="leahawasser")
Sys.setenv("plotly_api_key"="#")
plotly_POST(p)
```
| _posts/course-materials/earth-analytics/python/live/week-5/in-class/2016-12-06-spatial05-understand-uncertainty.ipynb |