code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
nx=61 #number of points in space
nt=50 #number of points in time
dt=0.01 #time step
dx=2/(nx-1) #space step
print(dx)
u=np.ones(nx)
u[int(0.5/dx):int(1/dx+1)]=2 #initial conditions
print(u)
plt.plot(np.linspace(0,2,nx),u) #plotting the initial values at t=0
plt.title("Initial Plot")
plt.xlabel("x")
plt.ylabel("u")
plt.show()
for it in range(nt): #using the discrete convection equation
un=u.copy()
#print(un)
for ix in range(1,nx):
u[ix]=un[ix]-un[ix]*dt/dx*(un[ix]-un[ix-1])
print(u)
plt.plot(np.linspace(0,2,nx),u)
plt.title("Final Plot")
plt.xlabel("x")
plt.ylabel("u")
plt.show()
|
12 Steps to Navier-Stokes/Step 2 - Adding Non-Linearity (Burger's Inviscid Equation).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Particionado
# ## Creamos una sesión de spark
from pyspark.sql import SparkSession
# Spark permite desde la creación de la sesión o contexto, indicar la cantidad de particiones que tendremos
#
# Para esto debemos de indicar con '[ ]' en la indicación de master la cantidad total de particiones
spark = SparkSession.builder.appName("Particionado").master( "local[5]").getOrCreate()
df = spark.range(0,20)
df.rdd.getNumPartitions()
# El método 'parallelize', permite la asignar manualmente la cantidad de particiones.
rdd1 = spark.sparkContext.parallelize((0,20),6)
rdd1.getNumPartitions()
# Del mismo modo cuandore creamos un RDD o DF, podemos hacer esto.
#
# En el caso de los RDD se realiza de la siguiente forma
rddDesdeArchivo = spark \
.sparkContext \
.textFile("/home/spark/Downloads/curso-apache-spark-platzi-master/files/deporte.csv",10)
rddDesdeArchivo.getNumPartitions()
# Es una buena practica tener los archivos de datos particionados para una carga mas rápida y mejor administración.
#
# El método 'saveAsTextFile' permite almacenar los archivos, particionados o no, en un ruta.
rddDesdeArchivo.saveAsTextFile("/home/spark/Downloads/salidastexto")
# !ls /home/spark/Downloads/salidastexto/
# A continuación se muestra como cargar los multiples archivos en un mismo RDD.
#
# Esta operación tambien se puede realizar para DF
rdd = spark.sparkContext.wholeTextFiles("/home/spark/Downloads/salidastexto/*")
lista = rdd.mapValues(lambda x : x.split()).collect()
l = [l[0] for l in lista]
l.sort()
rddDesdeArchivo = spark \
.sparkContext \
.textFile(','.join(l),
10).map(lambda l : l.split(","))
rddDesdeArchivo.take(7)
# !pwd
|
code/curso-apache-spark-platzi/5. Particionado.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from fdh_gallica import Periodical, Search, Document
from fdh_gallica.parallel_process import iiif_urls_for_documents
from tqdm.autonotebook import tqdm
import json
import pandas as pd
from pandas.io.json import json_normalize
# -
# %matplotlib inline
# get all images with relevant query
re_execute = False
if re_execute:
obj = Search(all_fields='<NAME>', dc_type='image', dc_creator='<NAME>')
obj.execute()
print(f'number of fetched documents: {len(obj.documents)}')
print(f'total records {obj.total_records}')
print('retry fetching ...')
obj.retry()
print(f'number of fetched documents: {len(obj.documents)}')
df = json_normalize(json.loads(json.dumps(obj.records)))
print('save documents to local')
# list handling becomes a pain of we save as csv
df.to_pickle('raw_df.pkl')
df = pd.read_pickle('raw_df.pkl')
# parse identifier for easier access:
# +
image_dataframe = pd.DataFrame(df['dc:identifier']\
.map(lambda x: x[0] if type(x) == list else x))\
.rename(columns={'dc:identifier':'identifier'})
df['id'] = image_dataframe
# -
df.head()
# ### useless columns
# Useless because identical for all images,
# remove them for ease of use, next ones contain all the same content
df['dc:rights'].value_counts()
df['dc:type'].value_counts()
df['dc:language'].value_counts()
df['dc:format'].value_counts()
df['dc:format'].map(lambda x: 'photo' in x[0]).value_counts()
# Almost all are paper, except for a few:
df['dc:format'].map(lambda x: 'papier' in x[0]).value_counts()
df[~df['dc:format'].map(lambda x: 'papier' in x[0])].id
# we don't care about those, so remove:
df = df[df['dc:format'].map(lambda x: 'papier' in x[0])]
df['dc:identifier'].head()
df['dc:identifier'].map(lambda x: x[0]).duplicated().value_counts()
df['dc:publisher'].value_counts()
# ### removing them
del df['dc:type']
del df['dc:language']
del df['dc:format']
del df['dc:identifier']
del df['dc:rights']
del df['dc:publisher']
# # getting information about subjects
#some objects are stored as list, others arent, use if else to handle
df['subject_is_list'] = df['dc:subject'].map(lambda x: type(x) == list)
df['len_list'] = df[df.subject_is_list]['dc:subject'].map(len)
# length is non uniform ... we will have to unnest
df.len_list.value_counts()
df[df.subject_is_list]['dc:subject'].map(lambda x: x).value_counts().head()
# unnest list to get better view
to_unnest = df[df.subject_is_list]['dc:subject'].tolist()
to_unnest = df[df.subject_is_list][['dc:subject', 'id']].apply(lambda x: (x['dc:subject'], x.id), axis=1).tolist()
unnested = [(i, id_) for (list_, id_) in to_unnest for i in list_]
# we can also use the **title**, more about this later
df[~df.subject_is_list][['dc:subject', 'id', 'dc:title']].head()
to_append = df[~df.subject_is_list][['dc:subject', 'id']].rename(columns={'dc:subject':'descriptor'})
unnested.__len__()
subject_frame = pd.DataFrame(unnested, columns=['descriptor', 'id'])
subject_frame = pd.concat([subject_frame, to_append], sort=False)
subject_frame.count()
# associate each descriptior of one object (given by id)
subject_frame.head()
# do some keyword searching
subject_frame['19e'] = subject_frame.descriptor.map(lambda x: x[-len('-- 19e siècle'):] == '-- 19e siècle')
subject_frame['Portraits'] = subject_frame.descriptor.map(lambda x: 'portraits' in x.lower())
is_19e = subject_frame[subject_frame['19e']].id.unique()
subject_frame[subject_frame.Portraits].id.unique().__len__()
portrait_ids = subject_frame[subject_frame.Portraits].id.unique()
# dessin humoristiques & caricatures seams important
subject_frame[~subject_frame.id.isin(portrait_ids)].head()
# we also get sculptures!
subject_frame[~subject_frame.id.isin(portrait_ids)].descriptor.value_counts().head()
subject_frame['caricature'] = subject_frame.descriptor.map(lambda x: 'Caricatures et dessins humoristiques' in x)
subject_frame[~subject_frame.id.isin(portrait_ids) & ~subject_frame.caricature].descriptor.value_counts().head()
# ## Extracting individual names:
import re
name = "<NAME> (1827-1892)"
name_2 = "Gobin (1982"
name_3 = '<NAME> -- Portraits'
# +
def find_names(name):
matches = re.findall('([\w\'.-]* [\w\'.-]* [\w\'.]*|[\w\'.-]* [\w\'.]*|[\w\'.-]*|[\w\'.]*\, [\w\'.-]*[ \w\'.]+) \([\d]{2}', name)
return [x for x in matches if len(x) > 0]
def find_names_undated(name):
matches = re.findall('([\w\'.-]*|[\w\'.]*\, [\w\'.-]*[ \w\'.]+) -- Portraits', name)
return [x for x in matches if len(x) > 0]
# -
find_names(name), find_names(name_2), find_names_undated(name_3)
subject_frame.dropna()[subject_frame.dropna().named_subject.map(lambda x: 'Balthasar' in x)]
# unicode normalize first
import unicodedata
subject_frame['descriptor'] = subject_frame.descriptor.map(lambda x: unicodedata.normalize('NFC', x))
subject_frame['subject_name'] = subject_frame.descriptor.map(find_names)
no_name = subject_frame.subject_name.map(len) == 0
subject_frame.loc[ no_name, 'subject_name'] = subject_frame[no_name].descriptor.map(find_names_undated)
no_name = subject_frame.subject_name.map(len) == 0
subject_frame[no_name]['descriptor'].drop_duplicates()
named_subjects = subject_frame[subject_frame.subject_name.map(len) > 0].id.unique()
# number of images we can find a name to:
len(named_subjects)
# # at most 1837
subject_frame[subject_frame.subject_name.map(len) > 0].subject_name.map(str).unique().__len__()
subject_frame[subject_frame.subject_name.map(len) > 0].id.unique().__len__()
subject_frame.loc[~no_name, 'named_subject'] = subject_frame[~no_name].subject_name.map(lambda x: x[0])
subject_frame.named_subject.value_counts().head()
# +
#explore_names[explore_names.index.map(len) < 10]
# -
subject_frame.named_subject.unique().__len__()
(subject_frame.named_subject.value_counts() > 1).value_counts()
explore_names = subject_frame.named_subject.value_counts()
explore_names.hist(bins=50)
# number of names we have:
subject_frame.named_subject.value_counts().count()
# # Create named_subject.pkl
subject_frame[subject_frame.named_subject.notna()].to_pickle('named_subject.pkl')
subject_frame[subject_frame.named_subject.notna()].named_subject.map(len).value_counts()
# how many resources does that correspond to:
id_identified = subject_frame[subject_frame.named_subject.notna()].id.unique()
# # about half of the images belong to known person
subject_frame[subject_frame.named_subject.notna()].id.unique().__len__()
len(set(subject_frame.id.unique())) # nbr unique ids
# a lot of them don't tell us the name in the source, but it can be found in the title
len(set(subject_frame.id.unique()).difference(set(id_identified)))
subject_frame[~subject_frame.id.isin(named_subjects)].descriptor.value_counts().head(10)
subject_frame[~subject_frame.id.isin(portrait_ids) & ~subject_frame.id.isin(named_subjects)].descriptor.value_counts().head()
subject_frame[~subject_frame.id.isin(portrait_ids)].head()
df['dc:title'].map(lambda x: len(x) if type(x) == list else 1).value_counts()
df['nbr_title'] = df['dc:title'].map(lambda x: len(x) if type(x) == list else 1)
df[df.nbr_title == 1]['dc:title'].sample(10).tolist()
def find_names_title(name):
return [x.strip() for x in re.findall('([\S\s ^(\:)]*) \: ', name) if len(x.strip()) > 0]
def extract_from_brackets(name):
return re.findall('\[([\w \-\,\.]*)\]', name)
def fix_bad_naming(name):
return re.sub('[\[\]]*', '', name)
text_1 = '<NAME>-Dramatiques. La petite fronde : [photographie, tirage de démonstration] / [<NAME>]'
text_2 = 'Armand.[lol] Châtelet : [photographie :,'
fix_bad_naming(text_2)
extract_from_brackets(text_1)
df[df.nbr_title == 1]['dc:title'].map(extract_from_brackets).value_counts().head()
df.loc[df.nbr_title == 1, 'first_title'] = df[df.nbr_title == 1]['dc:title'].map(find_names_title)
df.first_title.dropna().map(len).value_counts()
df[df.first_title.map(lambda x: len(x)==0 if type(x) ==list else False)]['dc:title'].tolist()
df.loc[df.first_title.notna(), 'first_title'] = df[
df.first_title.notna()].first_title.map(
lambda x: fix_bad_naming(x[0]) if len(x)>0 else x)#.value_counts()
df.count()
df = pd.merge(df, subject_frame[['id', 'named_subject']].dropna(), on='id', how='outer')
df.count()
df.loc[df.first_title.isna() & df['dc:title'].notna(), 'first_title'] = df[ df.first_title.isna( )& df['dc:title'].notna()]['dc:title'].map(lambda x: fix_bad_naming(find_names_title(x[0])[0])).tolist()
df[df.named_subject.isna()]['dc:subject'].value_counts().index.tolist().__len__()
df.named_subject.dropna().map(lambda x: 'Charlaviski' in x).value_counts()
df[df.first_title.notna() & df.named_subject.isna() & df['dc:subject'].map(lambda x: 'collectifs' in x)].first_title
# ## now that we have extracted names for most people, get years lived where available (from subject)
# +
def get_years(string):
x = re.findall('\(([\d{4} -]+)\)', string)
if len(x) == 0:
return None
return x[0]
def get_years_special(string):
x = re.findall('\(([\d{4}\d{2} -\.\?]+)[\S \;]*\)', string)
if len(x) == 0:
return None
return x[0]
# -
subject_frame['years'] = subject_frame.descriptor.map(get_years)
get_years_special('Febvre, Paul (18..-1928)')
# 41 for which we can't find perfect match
unmatched = subject_frame.years.isna() & subject_frame.descriptor.map(lambda x: '(1' in x)
subject_frame[unmatched].count()
subject_frame[unmatched].descriptor
subject_frame.loc[unmatched, 'years'] = subject_frame[unmatched].descriptor.map(get_years_special)
subject_frame[subject_frame.years.notna()].id.unique().__len__()
id_years = subject_frame[subject_frame.years.notna()].id.unique()
df = pd.merge(df, subject_frame[['id', 'years']].dropna(), on='id', how='outer')
# we never have more than one named individual ... it seems
(df.groupby(id).named_subject.count() > 1).value_counts()
# # we can't get year or name of subject, who are we looking at
df[df.named_subject.isna() | df.years.isna()].count()
df[
df.named_subject.isna() |\
df.years.isna() &\
df['dc:subject'].map(lambda x: 'Portraits de studio -- 19e siècle' in x) &\
~df['dc:subject'].map(lambda x: 'Théâtre' in x)
]['dc:title'].value_counts().sample(10)
matches = df['dc:title'].map(lambda x: re.match('(?!\:)([\s\S]*) :', str(x)))
df[matches.isna()]['dc:title']
matches.dropna().map(lambda x: re.sub('\[|\]|\:', '', x.group(0))).value_counts().head(100).index.tolist()
def find_keywords(x):
if not x:
return x
x = str(x)
l = re.findall('(?!\d)[\w]*', x) # take all words, exclude years
l = list(set([word for word in l if len(word) > 0])) # drop duplicates in one entry
return l
tag_list = pd.Series([x for i in df['dc:subject'].map(find_keywords).tolist() for x in i]).value_counts().reset_index()
tag_list_title = pd.Series([x for i in df['dc:title'].map(find_keywords).tolist() for x in i]).value_counts().reset_index()
tag_list_title[tag_list_title['index'].map(lambda x: x.islower() and len(x) > 2)]['index'].head(30).tolist()
tag_list[tag_list['index'].map(lambda x: x.islower() and len(x) > 2)]['index'].tolist()
# ### associate tags and ids in new dataframe. Remove useless tags. save tag-id association as json file
# +
# clear useless tags
from itertools import compress
def tag_exclude(tag):
result = False
if len(tag) < 3:
result = False
elif len(tag) == 3:
if tag.islower() and tag != 'roi':
result = False
else:
result = True
else:
# this list may change
if tag in ['siècle', 'Portraits', 'studio', 'Scènes', 'collectifs', 'photographie', 'Nadar']:
result = False
else:
result = True
return result
def tag_list_exclude(tag_list):
mask = list(map(tag_exclude, tag_list))
return list(compress(tag_list, mask))
# -
tag_frame = pd.DataFrame(df.id)
tag_frame['tags'] = df['dc:subject'].map(find_keywords) + df['dc:title'].map(find_keywords)
tag_frame['tags'] = tag_frame['tags'].apply(lambda x: list(set(x)))
tag_frame['tags'] = tag_frame.tags.apply(tag_list_exclude)
tag_explode = tag_frame.tags.apply(pd.Series) \
.merge(tag_frame, right_index=True, left_index=True) \
.drop(['tags'],axis=1) \
.melt(id_vars=['id'], value_name='tag') \
.drop('variable', axis=1) \
tag_ids = tag_explode.groupby(tag_explode.tag)['id'].unique().apply(list)
tag_ids.to_json('tag_ids.json')
# ### there really is no associated year
subject_frame[~subject_frame.id.isin(id_years) &\
subject_frame.named_subject.isna() &\
~subject_frame.Portraits].descriptor.value_counts().index
df[df.first_title.notna() & df.named_subject.notna()][['first_title', 'named_subject']]
df[df.first_title.notna() & df.named_subject.notna()].named_subject.value_counts()
df[df.first_title.notna() & df.named_subject.notna()].named_subject.count()
df[df.first_title.notna() & df.named_subject.isna()].first_title.value_counts().head(30).index.tolist()
|
notebooks/Cleaning the Metadata.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os, re
import lxml.etree as ET
# change the working directory to directory holding the xml files
os.chdir(r"C:\Users\pandorfer\ownCloud\Documents\own\buchbesitz\data\fromOxgarage")
path = os.getcwd()
#load the xsl-file and create a xsl-transform object
xslt = ET.parse("oxgarageToMyTEI.xsl")
transform = ET.XSLT(xslt)
#iterate over the directory, transform every XML-File and save the result in another directory
for file in os.listdir(path):
if file.endswith(".xml"):
dom = ET.parse(file)
newdom = transform(dom)
newdom.write(str("oxgarageToMyTei/"+file))
os.chdir(r"C:\Users\pandorfer\ownCloud\Documents\own\buchbesitz\data\fromOxgarage\oxgarageToMyTei")
path = os.getcwd()
for file in os.listdir(os.getcwd()):#iterate over files in the directory
data = open(file, "r", encoding="utf-8")# open each file
text = data.read()# read each file and save content as string to "text"
data.close()# close every file
replace = str('TEI xmlns="http://www.tei-c.org/ns/1.0"'+ ' xml:id="'+file+'"') # add the file name as xml:id in the root element
text = re.sub(r'TEI xmlns="http://www.tei-c.org/ns/1.0"', replace, text) # add the file name as xml:id in the root element
matches = re.findall(r'<rs type="\w+" ref="#\w+_\w+">', text)
for match in matches:
text = re.sub(match, match.lower(), text)
matches = re.findall(r'<rs type="\w+" ref="#\w+">', text)
for match in matches:
text = re.sub(match, match.lower(), text)
text = re.sub(r'(?P<all><p>(?P<match>\w*\s*\w*)</p><table rend="rules">)',
'\g<all><head type="added">\g<match></head>', text)#add table headers
output_text = open(file, "w")
print(text, file=output_text)
output_text.close()
|
workflow/XSLT.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append("../../")
import numpy as np
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, RBF, ConstantKernel as C
import matplotlib.pyplot as plt
from core.vhgpr import VHGPR
from oned import S, f, r
from core.inputs import GaussianInputs
import time
plt.rcParams.update({'font.size': 16})
def plot_prediction(vhgp):
x = np.linspace(0, 10, 100).reshape(-1 ,1)
truefuncf = f(x)
truefuncg = r(x)
predresults = vhgp.predict(x)
predfuncf = predresults[0]
predfuncg = np.sqrt(np.exp(predresults[2]))
plt.figure(figsize = (4,4))
plt.plot(x, truefuncf, color='black')
plt.plot(x, predfuncf, color='tab:red')
plt.plot(vhgp.X_train_, vhgp.y_train_, 'o', color='tab:orange')
plt.xlabel('$x$')
plt.ylabel('mean')
plt.show()
plt.figure(figsize = (4,4))
plt.plot(x, truefuncg, color='black')
plt.plot(x, predfuncg, color='tab:red')
plt.xlabel('$x$')
plt.ylabel('std')
plt.show()
plt.show()
dim = 1
mean, cov = 5 * np.ones(dim), np.eye(dim)
domain = np.array([[0,10]]*dim)
inputs = GaussianInputs(mean, cov, domain, dim)
np.random.seed(0)
DX = inputs.sampling(100)
DY = S(DX).reshape(-1)
# + tags=[]
kernelf = C(100.0, (1e-1, 1e3)) * RBF(5, (1e-1, 1e1))
kernelg = C(2.0, (1e-1, 2*1e1)) * RBF(5, (1e-1, 1e2))
vhgpr = VHGPR(kernelf, kernelg)
vhgpr.fit(DX, DY)
# -
plot_prediction(vhgpr)
|
HGPextreme/examples/oned/func_prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# -
os.system('rm -rf tacotron2-husein-alignment')
os.system('mkdir tacotron2-husein-alignment')
import tensorflow as tf
import numpy as np
from glob import glob
import tensorflow as tf
import malaya_speech
import malaya_speech.train
from malaya_speech.train.model import tacotron2_nvidia as tacotron2
import malaya_speech.config
import numpy as np
import json
import malaya_speech.train as train
# +
def norm_mean_std(x, mean, std):
zero_idxs = np.where(x == 0.0)[0]
x = (x - mean) / std
x[zero_idxs] = 0.0
return x
def average_by_duration(x, durs):
mel_len = durs.sum()
durs_cum = np.cumsum(np.pad(durs, (1, 0)))
x_char = np.zeros((durs.shape[0],), dtype=np.float32)
for idx, start, end in zip(range(mel_len), durs_cum[:-1], durs_cum[1:]):
values = x[start:end][np.where(x[start:end] != 0.0)[0]]
x_char[idx] = np.mean(values) if len(values) > 0 else 0.0
return x_char.astype(np.float32)
# -
f0_stat = np.load('../speech-bahasa/husein-stats/stats_f0.npy')
energy_stat = np.load('../speech-bahasa/husein-stats/stats_energy.npy')
files = glob('../speech-bahasa/output-husein/mels/*.npy')
# +
reduction_factor = 1
maxlen = 904
minlen = 32
pad_to = 8
data_min = 1e-2
_pad = 'pad'
_start = 'start'
_eos = 'eos'
_punctuation = "!'(),.:;? "
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
MALAYA_SPEECH_SYMBOLS = (
[_pad, _start, _eos] + list(_special) + list(_punctuation) + list(_letters)
)
def generate(files):
for f in files:
f = f.decode()
mel = np.load(f)
mel_length = len(mel)
if mel_length > maxlen or mel_length < minlen:
continue
stop_token_target = np.zeros([len(mel)], dtype = np.float32)
text_ids = np.load(f.replace('mels', 'text_ids'), allow_pickle = True)[
0
]
text_input = np.array(
[
MALAYA_SPEECH_SYMBOLS.index(c)
for c in text_ids
if c in MALAYA_SPEECH_SYMBOLS
]
)
num_pad = pad_to - ((len(text_input) + 2) % pad_to)
text_input = np.pad(
text_input, ((1, 1)), 'constant', constant_values = ((1, 2))
)
text_input = np.pad(
text_input, ((0, num_pad)), 'constant', constant_values = 0
)
num_pad = pad_to - ((len(mel) + 1) % pad_to) + 1
pad_value_mel = np.log(data_min)
mel = np.pad(
mel,
((0, num_pad), (0, 0)),
'constant',
constant_values = pad_value_mel,
)
stop_token_target = np.pad(
stop_token_target, ((0, num_pad)), 'constant', constant_values = 1
)
len_mel = [len(mel)]
len_text_ids = [len(text_input)]
f0 = np.load(f.replace('mels', 'f0s'))
num_pad = pad_to - ((len(f0) + 1) % pad_to) + 1
f0 = np.pad(
f0,
((0, num_pad)),
'constant',
)
f0 = norm_mean_std(f0, f0_stat[0], f0_stat[1])
len_f0 = [len(f0)]
energy = np.load(f.replace('mels', 'energies'))
num_pad = pad_to - ((len(energy) + 1) % pad_to) + 1
energy = np.pad(
energy,
((0, num_pad)),
'constant',
)
energy = norm_mean_std(energy, energy_stat[0], energy_stat[1])
len_energy = [len(energy)]
yield {
'mel': mel,
'text_ids': text_input,
'len_mel': len_mel,
'len_text_ids': len_text_ids,
'stop_token_target': stop_token_target,
'f0': f0,
'len_f0': len_f0,
'energy': energy,
'len_energy': len_energy,
'f': [f]
}
def parse(example):
mel_len = example['len_mel'][0]
input_len = example['len_text_ids'][0]
g = tacotron2.generate_guided_attention(mel_len, input_len, reduction_factor = reduction_factor)
example['g'] = g
return example
def get_dataset(files, batch_size = 2, shuffle_size = 2, thread_count = 24):
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{
'mel': tf.float32,
'text_ids': tf.int32,
'len_mel': tf.int32,
'len_text_ids': tf.int32,
'stop_token_target': tf.float32,
'f0': tf.float32,
'len_f0': tf.int32,
'energy': tf.float32,
'len_energy': tf.int32,
'f': tf.string
},
output_shapes = {
'mel': tf.TensorShape([None, 80]),
'text_ids': tf.TensorShape([None]),
'len_mel': tf.TensorShape([1]),
'len_text_ids': tf.TensorShape([1]),
'stop_token_target': tf.TensorShape([None]),
'f0': tf.TensorShape([None]),
'len_f0': tf.TensorShape([1]),
'energy': tf.TensorShape([None]),
'len_energy': tf.TensorShape([1]),
'f': tf.TensorShape([1]),
},
args = (files,),
)
dataset = dataset.map(parse, num_parallel_calls = thread_count)
dataset = dataset.padded_batch(
shuffle_size,
padded_shapes = {
'mel': tf.TensorShape([None, 80]),
'text_ids': tf.TensorShape([None]),
'len_mel': tf.TensorShape([1]),
'len_text_ids': tf.TensorShape([1]),
'g': tf.TensorShape([None, None]),
'stop_token_target': tf.TensorShape([None]),
'f0': tf.TensorShape([None]),
'len_f0': tf.TensorShape([1]),
'energy': tf.TensorShape([None]),
'len_energy': tf.TensorShape([1]),
'f': tf.TensorShape([1]),
},
padding_values = {
'mel': tf.constant(0, dtype = tf.float32),
'text_ids': tf.constant(0, dtype = tf.int32),
'len_mel': tf.constant(0, dtype = tf.int32),
'len_text_ids': tf.constant(0, dtype = tf.int32),
'g': tf.constant(-1.0, dtype = tf.float32),
'stop_token_target': tf.constant(0, dtype = tf.float32),
'f0': tf.constant(0, dtype = tf.float32),
'len_f0': tf.constant(0, dtype = tf.int32),
'energy': tf.constant(0, dtype = tf.float32),
'len_energy': tf.constant(0, dtype = tf.int32),
'f': tf.constant('', dtype = tf.string),
},
)
return dataset
return get
# -
features = get_dataset(files)()
features = features.make_one_shot_iterator().get_next()
input_ids = features['text_ids']
input_lengths = features['len_text_ids'][:, 0]
speaker_ids = tf.constant([0], dtype = tf.int32)
mel_outputs = features['mel']
mel_lengths = features['len_mel'][:, 0]
guided = features['g']
stop_token_target = features['stop_token_target']
batch_size = tf.shape(guided)[0]
model = tacotron2.Model(
[input_ids, input_lengths],
[mel_outputs, mel_lengths],
len(MALAYA_SPEECH_SYMBOLS),
)
r = model.decoder_logits['outputs']
decoder_output, post_mel_outputs, alignment_histories, _, _, _ = r
stop_token_predictions = model.decoder_logits['stop_token_prediction']
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, 'tacotron2-husein/model.ckpt-42000')
import matplotlib.pyplot as plt
def decode(x):
return ''.join([MALAYA_SPEECH_SYMBOLS[i] for i in x])
def get_duration_from_alignment(alignment):
D = np.array([0 for _ in range(np.shape(alignment)[0])])
for i in range(np.shape(alignment)[1]):
max_index = list(alignment[:, i]).index(alignment[:, i].max())
D[max_index] = D[max_index] + 1
return D
count = 0
while True:
try:
o = sess.run([decoder_output, post_mel_outputs, stop_token_predictions, alignment_histories, features])
f = o[-1]
for i in range(len(f['f'])):
file = f['f'][i,0].decode().split('/')[-1]
file = f'tacotron2-husein-alignment/{file}'
len_mel = f['len_mel'][i, 0]
len_text_ids = f['len_text_ids'][i, 0]
d = get_duration_from_alignment(o[3][i, :len_text_ids, :len_mel])
assert d.sum() == len_mel
np.save(file, d)
print('done', count)
count += 1
except:
break
1195 * 2
len(files)
# +
# import pickle
# with open('dataset-mel.pkl', 'wb') as fopen:
# pickle.dump([o[-1], d], fopen)
# +
# import pickle
# with open('a.pkl', 'wb') as fopen:
# pickle.dump([np.reshape(o[0][0], [-1, 80]), np.reshape(o[1][0], [-1, 80]), o[-1]['mel'][0]], fopen)
|
pretrained-model/tts/fastspeech2/calculate-alignment-tacotron2-husein.ipynb
|
;; -*- coding: utf-8 -*-
;; ---
;; jupyter:
;; jupytext:
;; text_representation:
;; extension: .scm
;; format_name: light
;; format_version: '1.5'
;; jupytext_version: 1.14.4
;; kernelspec:
;; display_name: Calysto Scheme 3
;; language: scheme
;; name: calysto_scheme
;; ---
;; ### 練習問題2.69
;; 以下の手続きは、引数として記号・頻度ペアのリ スト(同じ記号が⼆つ以上のペアに出てくることはない)を取り、
;; ハフマンアルゴリズムに従ってハフマン符号化木を⽣成する。
;;
;; (define (generate-huffman-tree pairs)
;; (successive-merge (make-leaf-set pairs)))
;;
;; make-leaf-setは、上で記述した、ペアのリストを葉の順序つき集合に変換する⼿続きである。
;; successive-mergeは、集合の中で重みが最小の要素をmake-code-treeを使って順番にくっつけていき、
;; 最後に要素がひとつだけ残るようにするというものである。
;; その要素が求めるハフマン木となる。
;; この手続きを書け
;; (この手続きにはちょっと厄介なところがあるが、
;; そこまで複雑ではない。もし⼿続きの設計が複雑になったとしたら、
;; ほぼ確実に何かを間違えている。
;; 順序つきの集合表現を使っているということが大きな助けになる)
;; +
(define (make-leaf symbol weight) (list 'leaf symbol weight))
(define (leaf? object) (eq? (car object) 'leaf))
(define (symbol-leaf x) (cadr x))
(define (weight-leaf x) (caddr x))
; コンストラクタ
(define (make-code-tree left right)
(list left right
(append (symbols left) (symbols right))
(+ (weight left) (weight right))
)
)
; セレクタ
(define (left-branch tree) (car tree))
(define (right-branch tree) (cadr tree))
(define (symbols tree)
(if (leaf? tree) (list (symbol-leaf tree))
(caddr tree)
)
)
(define (weight tree)
(if (leaf? tree) (weight-leaf tree)
(cadddr tree)
)
)
(define (decode bits tree)
(define (decode-1 bits current-branch)
(if (null? bits) '()
(let ((next-branch (choose-branch (car bits) current-branch)))
(if (leaf? next-branch) (cons (symbol-leaf next-branch) (decode-1 (cdr bits) tree))
(decode-1 (cdr bits) next-branch))
)
)
)
(decode-1 bits tree)
)
(define (choose-branch bit branch)
(cond ((= bit 0) (left-branch branch))
((= bit 1) (right-branch branch))
(else (error "bad bit: CHOOSE-BRANCH" bit))
)
)
(define (adjoin-set x set)
(cond ((null? set) (list x))
((< (weight x) (weight (car set))) (cons x set))
(else (cons (car set) (adjoin-set x (cdr set))))))
(define (make-leaf-set pairs)
(if (null? pairs) '()
(let ((pair (car pairs)))
(adjoin-set (make-leaf (car pair) ; symbol
(cadr pair)) ; weight
(make-leaf-set (cdr pairs)))
)
)
)
;; +
; 符号化処理
(define (encode-symbol symbol tree)
(define (iter sub result)
(if (leaf? sub)
(if (eq? (symbol-leaf sub) symbol) result
'())
(let ((l (left-branch sub))
(r (right-branch sub)))
(let ((l-result (iter l (append result '(0)))))
(if (not (null? l-result)) l-result
(iter r (append result '(1)))
)
)
)
)
)
(let ((result (iter tree '())))
(if (null? result) (error "bad symbol: ENCODE" symbol)
result
)
)
)
; 符号化処理
(define (encode message tree)
(if (null? message) '()
(append
(encode-symbol (car message) tree)
(encode (cdr message) tree))
)
)
;; -
; 逆順で返ってくることに注意
(make-leaf-set '((A 4) (B 2) (D 1) (C 1)))
;; +
; 間違った実装
(define (successive-merge-ng set)
(define (iter sub)
(cond ((null? sub) '())
((= (length sub) 1) sub)
((= (length sub) 2) (make-code-tree (car sub) (cadr sub)))
(else (make-code-tree (car sub) (iter (cdr sub))))
)
)
(iter (reverse set))
)
(define (generate-huffman-tree-ng pairs)
(successive-merge-ng (make-leaf-set pairs)))
;; -
; あまりネストしてない例だとうまく動作しているようにみえる。
(define sample-tree1 (generate-huffman-tree-ng '((A 4) (B 2) (D 1) (C 1))))
sample-tree1
(define sample-tree2 (generate-huffman-tree-ng '((A 10) (B 4) (C 1) (D 1) (E 1) (F 1) (G 1) (H 1))))
sample-tree2
(display (encode '(A) sample-tree1))
(newline)
(display (encode '(B) sample-tree1))
(newline)
(display (encode '(C) sample-tree1))
(newline)
(display (encode '(D) sample-tree1))
(newline)
(display (encode '(A) sample-tree2))
(newline)
(display (encode '(B) sample-tree2))
(newline)
(display (encode '(C) sample-tree2))
(newline)
(display (encode '(D) sample-tree2))
(newline)
(display (encode '(E) sample-tree2))
(newline)
(display (encode '(F) sample-tree2))
(newline)
(display (encode '(G) sample-tree2))
(newline)
(display (encode '(H) sample-tree2))
(newline)
;; +
; 考え直した回答
; 指定した頻度がリストに含まれているか。
; 含まれていない場合、falseを返す。
; 含まれている場合、最初に見つかったペアと次のペアを返す。(これの2つで木を生成する)
(define (have-weight? w set)
;(display "have-weight? ")
;(display w)
;(display " ")
;(display set)
;(display " ")
;(newline)
(cond ((null? set) #f)
((not (pair? set))
;(if (= (weight set) w) (list set)
; #f
; )
#f
)
(else
(if (= (weight (car set)) w) (list (car set) (cadr set))
(have-weight? w (cdr set))
)
)
)
)
; ハフマン符号木の生成の記号・頻度ペアのリストから、指定したペアを削除したリストを返す。
(define (remove-info item set)
(if (null? set) '()
(if (equal? item (car set)) (remove-info item (cdr set))
(cons (car set) (remove-info item (cdr set)))
)
)
)
; 回答
(define (successive-merge set)
(define (iter w result)
;(display result)
;(newline)
(cond ((null? result) '())
((= (length result) 1) (car result))
;((not (pair? result)) result) ; これは動作しない
;((= (length result) 2) (make-code-tree (cadr result) (car result)))
(else
(let ((ll (have-weight? w result)))
(if (equal? ll #f) (iter (+ w 1) result)
(let ((top (car ll))
(next (cadr ll)))
(let ((new-result (remove-info next (remove-info top result))))
(if (null? new-result) (iter w (list (make-code-tree next top)))
;(if (null? new-result) (iter w (list (make-code-tree top next)))
;(if (= (length new-result) 1) (iter w (append (list (make-code-tree next top)) new-result))
(let (
(new-item (make-code-tree next top))
;(new-item (if (>= (weight next) (weight top)) (make-code-tree next top) (make-code-tree top next)))
)
(if (<= (weight new-item) (weight (car new-result)))
(iter w (append (list new-item) new-result))
(iter w (append new-result (list new-item)))
;(iter w (append (list new-item) new-result))
)
)
)
;)
)
)
)
)
)
)
)
(iter 1 set)
)
(define (generate-huffman-tree pairs)
(successive-merge (make-leaf-set pairs)))
;; -
(generate-huffman-tree '((A 4) (B 2) (D 1) (C 1)))
; テキストのハフマン符号木の生成の記号・頻度ペアのリスト
; これだとうまく動作しない
; 再度の枝の頻度が「左枝<右枝」となり、(A 8)の位置が想定と異なる。
; ->修正してできるようになった。
; ->これに無理やり対応すると、プログラムが落ちる場合があった。
; 落ちない汎用的な動作を正解とし、これで想定したハフマン符号木が生成できないのはOKとした。
(define sample-tree3 (generate-huffman-tree '((A 8) (B 3) (C 1) (D 1) (E 1) (F 1) (G 1) (H 1))))
sample-tree3
; 左枝≧右枝となるように頻度を調整
; ->想定したハフマン符号木が生成できるようになった。
(define sample-tree4 (generate-huffman-tree '((A 8) (B 2) (C 1) (D 1) (E 1) (F 1) (G 1) (H 1))))
sample-tree4
; 単一の記号で動作確認
(display (encode '(A) sample-tree4))
(newline)
(display (encode '(B) sample-tree4))
(newline)
(display (encode '(C) sample-tree4))
(newline)
(display (encode '(D) sample-tree4))
(newline)
(display (encode '(E) sample-tree4))
(newline)
(display (encode '(F) sample-tree4))
(newline)
(display (encode '(G) sample-tree4))
(newline)
(display (encode '(H) sample-tree4))
(newline)
(encode '(B A C A D A E A F A B B A A A G A H) sample-tree4)
(decode '(1 0 0 0 1 0 1 0 0 1 0 1 1 0 1 1 0 0 0 1 1 0 1 0 1 0 0 1 0 0 0 0 0 1 1 1 0 0 1 1 1 1) sample-tree4)
|
exercises/2.69.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: python3
# ---
# ## Exploring COVID Papers
#
# This notebook uses terms data from CORD dataset, processed by Text Analytics for Health and stored in raw JSON format.
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import io
# First, let's read all data on papers and corresponding mentions of medications:
df = pd.read_json('../data/PaperMedicationTerms.json.gz',compression='gzip')
df
# Now let's remove those medications that do not have corresponding ontology id, and remove duplicate mentions of each medication in one paper. We will also limit the papers to the time period starting on January 1, 2020, and ending Ferburary 1, 2021.
clean_papers = df[(~df['umls_id'].isnull()) & (df['publish_time']>'2020-01-01') & (df['publish_time']<'2021-02-01')]\
.drop_duplicates(subset=['title','umls_id'],keep='first')
clean_papers
# Followed by the list of medications:
medindex = pd.read_json('../data/Medications.json')
medindex = medindex[~medindex['umls_id'].isnull()].drop_duplicates(subset='umls_id',keep='first').set_index('umls_id')
medindex
# Let's find out top medications by number of mentions:
meds = clean_papers.groupby('umls_id').agg({ 'title' : 'count', 'isNegated' : sum })\
.rename(columns={'title' : 'mentions', 'isNegated' : 'negative'})\
.sort_values(by='mentions',ascending=False)\
.merge(medindex,on='umls_id').rename(columns={ 'text' : 'name' })
meds.head(15)
meds['positive'] = meds['mentions']-meds['negative']
meds.iloc[:15].set_index('name')[['positive','negative']].plot(kind='bar',rot=90,stacked=True)
plt.show()
topmeds = {
'C0020336' : 'hydroxychloroquine',
'C4726677' : 'remdesivir',
'C0008269' : 'chloroquine',
'C1609165' : 'tocilizumab',
'C0674432' : 'lopinavir',
'C0292818' : 'ritonavir',
'C0052796' : 'azithromycin',
'C0011777' : 'dexamethasone',
'C0042866' : 'vitamin D',
'C1138226' : 'favipiravir',
'C0021641' : 'insulin',
'C0019134' : 'heparin'
}
# +
clean_papers['month'] = clean_papers['publish_time'].astype('datetime64[M]')
imeds = clean_papers[clean_papers['umls_id'].apply(lambda x: x in topmeds.keys())].copy()
imeds['name'] = imeds['umls_id'].apply(lambda x: topmeds[x])
def positive_count(x):
return x.value_counts()
def negative_count(x):
return x.value_counts()
c = x.value_counts()
return 0 if len(c)<2 else -c[1]
medhist = imeds.groupby(['month','name']).agg({'text' : 'count', 'isNegated' : 'sum' }).rename(columns={'text' : 'mentions', 'isNegated' : 'negative' })
medhist['positive'] = medhist['mentions']-medhist['negative']
medhist
# -
mnthlymentions = clean_papers.groupby('month').agg({'text' : 'count' }).rename(columns={'text' : 'mcount'})
mnthlymentions
# Now let's compute the frequency of overall publications by month. To do this, we need to load original publication dataset:
pubs = pd.read_csv('../data/metadata.csv')
pubs['publish_time'] = pd.to_datetime(pubs['publish_time'])
pubs['month'] = pubs['publish_time'].astype('datetime64[M]')
pubs = pubs[(pubs['publish_time']>'2020-01-01') & (pubs['publish_time']<'2021-02-01')]
mnthlypubs = pubs.groupby('month').agg({'title' : 'count' }).rename(columns={'title' : 'mcount'})
mnthlypubs
medhist['relmentions'] = 0.0
for d in mnthlymentions.index:
# medhist.loc[d]['relmentions'] = medhist.loc[d,'mentions'] / mnthlymentions.loc[d,'mcount'] - normalize by mentions
medhist.loc[d]['relmentions'] = medhist.loc[d,'mentions'] / mnthlypubs.loc[d,'mcount'] # - normalize by # of pubs
medhist
medh = medhist.reset_index()
medh['relmentions_pct'] = medh['relmentions'] * 100
fig,ax = plt.subplots(3,4,figsize=(15,10))
for i,n in enumerate(topmeds.keys()):
medh[medh['name']==topmeds[n]].set_index('month')['relmentions_pct'].plot(title=topmeds[n],ax=ax[i//4,i%4],rot=90)
fig.tight_layout()
plt.show()
df = pd.read_json('../data/PaperEntities.json.gz',compression='gzip')
df
|
notebooks/ExplorePaperTerms.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="_d4uCpilVg_i"
# # Pneumonia Detection in chest X-ray images using CNNs and Transfer Learning
# + [markdown] id="gkUe_YtcQRSK"
# [](https://colab.research.google.com/github/mariostrbac/pneumonia-detection-in-chest-xrays/blob/main/notebooks/xray_pneumonia_detection.ipynb)
# + [markdown] id="qXbS4taCMdFd"
# * [1. Download and extract the dataset file](#first-bullet)
# * [2. Data preparation ](#second-bullet)
# * [3. Visualize the dataset](#third-bullet)
# * [4. CNN model - VGG19 ](#fourth-bullet)
# * [5. Train the model](#fifth-bullet)
# * [6. Results](#sixth-bullet)
#
# + [markdown] id="vjLLIfqBVx7Z"
# > References:
# > * [Pneumonia Detection in chest X-ray images using Convolutional Neural Networks and Transfer Learning](https://www.sciencedirect.com/science/article/abs/pii/S0263224120305844)
# > * [Transfer learning & fine-tuning - Keras](https://keras.io/guides/transfer_learning/)
# > * [Classification on imbalanced data - TensorFlow](https://www.tensorflow.org/tutorials/structured_data/imbalanced_data#class_weights)
# + id="j5tAiQqJVdGp"
import os
import sys
import itertools
import numpy as np
import pandas as pd
from glob import glob
from tqdm import tqdm
from PIL import Image
import tensorflow as tf
import keras
from keras import optimizers
from keras.applications import VGG19
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# + colab={"base_uri": "https://localhost:8080/"} id="gsyEghBoPQiQ" outputId="62724b00-eb03-4980-951c-22f0940b13e7"
ROOT_DIR = "chest-xray-pneumonia-classification"
IN_GOOGLE_COLAB = True
if IN_GOOGLE_COLAB:
# mount google drive
from google.colab import drive
drive.mount('/content/gdrive')
# change the current working directory
# %cd gdrive/'My Drive'
# create a root directory if there's none
if not os.path.isdir(ROOT_DIR):
# %mkdir $ROOT_DIR
# change the current working directory
# %cd $ROOT_DIR
# + [markdown] id="JNqwhfNxEt9q"
# ---
# + [markdown] id="NqKRP5eWEn14"
# # 1. Download and extract the dataset file <a name="first-bullet"></a>
# + [markdown] id="2Rh7T0bsEx-N"
# Get the dataset from [Mendeley](https://data.mendeley.com/datasets/rscbjbr9sj/2) or [Kaggle](https://www.kaggle.com/paultimothymooney/chest-xray-pneumonia).
# + id="MOB-NztK_YUO"
DATASET_URL = "https://data.mendeley.com/public-files/datasets/rscbjbr9sj/files/f12eaf6d-6023-432f-acc9-80c9d7393433/file_downloaded"
DOWNLOAD_DATASET = False
EXTRACT_DATASET = False
if DOWNLOAD_DATASET:
# !wget $DATASET_URL
if EXTRACT_DATASET:
# !unzip file_downloaded
# + [markdown] id="QSsRFJYFAYyU"
# ---
# + [markdown] id="jdtnWXhB7SOC"
# # 2. Data preparation <a name="second-bullet"></a>
# + [markdown] id="HgrP9hSU8X13"
# ## Build a Pandas DataFrame from the dataset directory
# + [markdown] id="nfeUYM_r7WOp"
# The relatively small size of the dataset makes it possible to preload the entire data into memory. In this case, the benefit of fast access to the data during training is much more significant than the expense of increased memory usage.
# + colab={"base_uri": "https://localhost:8080/"} id="aFQV_ilUzOV4" outputId="b4c90d1a-bbd5-4f29-b630-92cb14a2171c"
# input image shape
IMAGE_SIZE = (64, 64)
DATASET_PATH = os.path.abspath('chest_xray/')
# dict for encoding the dataset labels
classID = {'normal': 0, 'pneumonia': 1}
dataset = []
# iterate over the test and train sets
for fold_dir in glob(os.path.join(DATASET_PATH, '*')):
fold = fold_dir.split('/')[-1].lower()
print('\n> ' + fold)
# iterate over classes in a set
for class_dir in glob(os.path.join(fold_dir, '*')):
label = class_dir.split('/')[-1].lower()
print(label)
# iterate over images in a class
for filename in tqdm(os.listdir(class_dir), file=sys.stdout):
if filename.endswith(".jpeg"):
# get additional info about the type of pneumonia
if label == 'pneumonia':
info = filename.split("_")[1] # bacterial or virus
else:
info = 'n.a.d.' # no abnormalities detected
img_file_path = os.path.join(class_dir, filename)
# resize and convert the image in advance to a pre-defined shape and the RGB color space
image = np.array(Image.open(img_file_path).resize(IMAGE_SIZE).convert('RGB'))
dataset.append([image, info, label, classID[label], fold])
# convert to a Pandas DataFrame
cxr_df = pd.DataFrame(dataset, columns=['image', 'info', 'label', 'classID', 'fold']) # chest x-ray dataframe
cxr_df = cxr_df.astype({'info': 'category', 'label': 'category', 'classID': 'uint8', 'fold': 'category'})
# + [markdown] id="6capN5kXPhkY"
# ## Store the data
# + id="eWTlpz1MlZtz"
STORE_DATA = False
if STORE_DATA:
cxr_df.to_pickle('cxr_df.pkl')
# + [markdown] id="0KmcEzkOPkOG"
# ## Load the data
# + id="jiYVDkacCQiT"
LOAD_DATA = True
if LOAD_DATA:
cxr_df = pd.read_pickle('cxr_df.pkl')
# + [markdown] id="U14HL-qo9K4K"
# # 3. Visualize the dataset <a name="third-bullet"></a>
# + [markdown] id="-z0UfW97M5iM"
# ## Show the DataFrame object
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="sX2pSbq4rh_G" outputId="a2c48af9-8fc9-4ac4-953d-554338c1078b"
cxr_df
# + colab={"base_uri": "https://localhost:8080/"} id="TgpYwGny6f4z" outputId="f446fe34-bf67-4256-9559-0ca8346ed41f"
cxr_df.info(memory_usage='deep')
# + [markdown] id="Is2nCIFE_waC"
# ## Data distribution
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="z3qeZK4erI5t" outputId="36fc3b7a-d170-4247-c2aa-ab6a28d74b34"
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
sns.countplot(x = cxr_df[cxr_df['fold'] == 'train']['label'])
plt.title('Train data')
plt.subplot(1,2,2)
sns.countplot(x = cxr_df[cxr_df['fold'] == 'test']['label'])
plt.title('Test data')
plt.savefig('class_distribution_sns.png')
plt.show()
# + [markdown] id="srpYRXCFAHEV"
# ## Type distribution
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="7D7HWmXfucCz" outputId="05a420e6-3157-43d6-b29e-436a59e98447"
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
sns.countplot(x = cxr_df[(cxr_df.fold == 'train')]['info'])
plt.title('Train data')
plt.subplot(1,2,2)
sns.countplot(x = cxr_df[(cxr_df.fold == 'test')]['info'])
plt.title('Test data')
plt.savefig('type_distribution_.png')
plt.show()
# + [markdown] id="IeprKk4qA7dq"
# ## Show images in the dataset
# + [markdown] id="5fAF0ZxLBYRB"
# The helper function for showing a batch of X-ray images from the dataset.
# + id="a-TrmiuIJ40h"
def show_batch(batch, size, hspace=0.28, wspace=0.2, title=None):
"""Show the images from the `batch`."""
height, width = size
plt.figure(figsize=(10,10))
if title is not None:
plt.suptitle(title, weight='bold', y=0.96)
for n in range(height*width):
ax = plt.subplot(height, width, n+1)
plt.imshow(batch[n][0])
plt.title(batch[n][1])
plt.subplots_adjust(wspace=wspace, hspace=hspace)
plt.axis("off")
# + colab={"base_uri": "https://localhost:8080/", "height": 591} id="5qawae8DLtvD" outputId="9c2d2dfd-85a8-4790-beb5-337185b9af47"
# random batch from the dataset
sample_batch = cxr_df[['image', 'label']].sample(frac=1).to_numpy()
show_batch(sample_batch, size=(4, 4), hspace=0.2, wspace=0.1)
plt.savefig('sample_batch.png')
# + colab={"base_uri": "https://localhost:8080/", "height": 227} id="5yb-IqWFtev_" outputId="dec81ba3-67f9-467f-b862-d9923198a7d2"
# get differnt types of images from the dataset (virus, bacterial, n.a.d.)
pneumonia_virus_xray = cxr_df[(cxr_df['label']=='pneumonia') & (cxr_df['info']=='virus')].iloc[0][['image', 'info']]
pneumonia_bacteria_xray = cxr_df[(cxr_df['label']=='pneumonia') & (cxr_df['info']=='bacteria')].iloc[0][['image', 'info']]
normal_xray = cxr_df[(cxr_df['label']=='normal')].iloc[0][['image', 'info']]
# show images
sample_batch = [pneumonia_virus_xray, pneumonia_bacteria_xray, normal_xray]
show_batch(sample_batch, size=(1,3), wspace=0.05)
plt.savefig('different_types.png')
# + [markdown] id="kENiIMhvVNs7"
# ---
# + [markdown] id="6TH6DFmrVPan"
# # 4. CNN model - VGG19 <a name="fourth-bullet"></a>
# + [markdown] id="EmwZPn_XKeZl"
# ## Initialize the model
# + id="fE5ZFZqXVOVh"
# input image size
IMAGE_SIZE = (64, 64, 3)
# instantiate a base model with pre-trained weights
vgg19 = VGG19(weights='imagenet', # load weights pre-trained on ImageNet
include_top=False, # do not include the ImageNet classifier at the top
input_shape=IMAGE_SIZE)
# freeze the base model
vgg19.trainable = False
# create a new model on top
inputs = keras.Input(shape=IMAGE_SIZE)
x = vgg19(inputs, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
outputs = keras.layers.Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs, outputs)
# + [markdown] id="WsrxMBF2KzyD"
# ## Architecture of the base model
# + colab={"base_uri": "https://localhost:8080/"} id="9qH6BRSGKvIq" outputId="3fd0d04e-1c5d-48f3-fd6f-e46e8d9bea78"
vgg19.summary()
# + [markdown] id="VqHJ_MJsLTnE"
# ## Architecture of the model
# + colab={"base_uri": "https://localhost:8080/"} id="C7Tq7d8KKqKF" outputId="65166056-4597-4e9c-a71c-d0a320f83517"
model.summary()
# + [markdown] id="URZbOTVzLg3r"
# ---
# + [markdown] id="hcOynv-dKF5n"
# # 5. Train the model <a name="fifth-bullet"></a>
# + [markdown] id="6s2YRIYzSzpe"
# ## Split the data
# + id="mdNGaWkARTvp"
# split the data into train and test sets (predefined)
train_cxr_df = cxr_df[cxr_df['fold']=='train'][['image', 'classID']]
test_cxr_df = cxr_df[cxr_df['fold']=='test'][['image', 'classID']]
# split the train data into train and validation sets
train_cxr_df, val_cxr_df = train_test_split(train_cxr_df, test_size=0.2, shuffle=True)
# split the sets into examples and labels
X_train = np.stack(train_cxr_df['image'].to_numpy())
y_train = train_cxr_df['classID'].to_numpy()
X_val = np.stack(val_cxr_df['image'].to_numpy())
y_val = val_cxr_df['classID'].to_numpy()
X_test = np.stack(test_cxr_df['image'].to_numpy())
y_test = test_cxr_df['classID'].to_numpy()
# + [markdown] id="9bdbZJLzvBZ5"
# ## Class weights
# + colab={"base_uri": "https://localhost:8080/"} id="syrZg1fieaPj" outputId="17c8eec3-7513-49da-dfd8-ccc09ac6ac54"
pos, neg = train_cxr_df.classID.value_counts().to_numpy()
weight_for_0 = (1 / neg)*(pos+neg)/2.0
weight_for_1 = (1 / pos)*(pos+neg)/2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
print('Weight for class 0: {:.2f}'.format(weight_for_0))
print('Weight for class 1: {:.2f}'.format(weight_for_1))
# + [markdown] id="TwkajXkXS3tT"
# ## Image data preprocessing
# + id="yvJwuJAsLqvJ"
BATCH_SIZE = 32
datagen = ImageDataGenerator(rescale=1.0/255.0)
train_it = datagen.flow(X_train,
y=y_train,
batch_size=BATCH_SIZE,
shuffle=True)
val_it = datagen.flow(X_val,
y=y_val,
batch_size=BATCH_SIZE,
shuffle=False)
test_it = datagen.flow(X_test,
y=y_test,
batch_size=BATCH_SIZE,
shuffle=False)
# + [markdown] id="rkUfjPZITCqX"
# ## Model training
# + [markdown] id="1Ul_mjk4Ugqf"
# ### a) Train only the trainable layers on top
# + colab={"base_uri": "https://localhost:8080/"} id="ea5oVKcWsB_j" outputId="92b3a1d8-ebe3-44ab-d502-ee406d98c30f"
EPOCHS = 25
# configure the model for training (Adam, Binary Cross-Entropy, Accuracy)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-4),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.BinaryAccuracy()])
# train only the trainable layers
history1 = model.fit(train_it, epochs=EPOCHS, validation_data=val_it, class_weight=class_weight)
# + [markdown] id="vyzUmgawwWiE"
# #### Evaluate the model on the test set
# + colab={"base_uri": "https://localhost:8080/"} id="Sh4eMFpUtnOv" outputId="da6a8f70-4a66-49b9-e035-1e23f6d4b714"
model.evaluate(test_it, return_dict=True)
# + [markdown] id="m6oOhJA4UCc-"
# ### b) Fine-tune the model
# + colab={"base_uri": "https://localhost:8080/"} id="sVO2XzODtyIk" outputId="c70030ab-8e5d-48d3-e964-a8bce9492d85"
EPOCHS_FT = 1
# unfreeze the base model
vgg19.trainable = True
# recompile the model with a smaller learning rate value
model.compile(optimizer=keras.optimizers.Adam(1e-5),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.BinaryAccuracy()])
# train end-to-end
history2 = model.fit(train_it, epochs=EPOCHS_FT, batch_size=BATCH_SIZE, validation_data=val_it, class_weight=class_weight)
# + [markdown] id="EOU3VHw6yIYH"
# #### Evaluate the fine-tuned model on the test set
# + colab={"base_uri": "https://localhost:8080/"} id="427MIBgDuxzQ" outputId="ffab9536-2b13-4528-cc01-435258c7a7b6"
model.evaluate(test_it, return_dict=True)
# + [markdown] id="8OZbTj0WMAmk"
# ---
# + [markdown] id="4_D8AD87L65H"
# # 6. Results <a name="sixth-bullet"></a>
# + [markdown] id="UAougZ5_8I4l"
# ## Helper functions
# + [markdown] id="1ptFjNx98QR1"
# The function for plotting accuracy and loss graphs for train and validation sets.
# + id="qo8NrPxS201_"
def show_history(history, figsize=(15,5), save_fig=True, print_results=True):
"""Shows accuracy and loss graphs for train and validation sets."""
df = pd.DataFrame(history)
sns.set()
plt.figure(figsize=figsize)
# accuracy graph
plt.subplot(1,2,1)
g = sns.lineplot(data=df[['binary_accuracy', 'val_binary_accuracy']], dashes=False)
g.set(title='Model Accuracy', xlabel='Epoch', ylabel='Accuracy')
g.legend(labels=['train', 'validation'])
# loss graph
plt.subplot(1,2,2)
g = sns.lineplot(data=df[['loss', 'val_loss']], dashes=False)
g.set(title='Model loss', xlabel='Epoch', ylabel='Loss')
g.legend(labels=['train', 'validation'])
plt.subplots_adjust(wspace=0.23)
if save_fig:
plt.savefig('acc_loss_graphs.png')
plt.show()
if print_results:
print('\n\tMax train accuracy: %.4f %%\033[0m\t\t\t\tMin train loss: %.5f' % (df.binary_accuracy.max() * 100,
df.loss.min()))
print('\tMax validation accuracy: %.4f %%\t\t\tMin validation loss: %.5f' % (df.val_binary_accuracy.max() * 100,
df.val_loss.min()))
# + [markdown] id="gCZeh4EMeVhb"
# ---
# + [markdown] id="v95TruWyeOep"
# ## a) Accuray and loss graphs
# + colab={"base_uri": "https://localhost:8080/", "height": 407} id="-wy99i51vAbP" outputId="080f9ddf-481c-4ea8-e794-a7942fa2755f"
# append the fine-tuning history to the previous history
history1.history['binary_accuracy'] += history2.history['binary_accuracy']
history1.history['val_binary_accuracy'] += history2.history['val_binary_accuracy']
history1.history['loss'] += history2.history['loss']
history1.history['val_loss'] += history2.history['val_loss']
show_history(history1.history)
# + [markdown] id="GjkluQ5B9bLa"
# ## b) Confusion matrix
# + id="HDZy60n6PJaV"
labels = ['normal', 'pneumonia']
y_true = y_test
y_pred = tf.round(model.predict(x=test_it, batch_size=BATCH_SIZE)).numpy().T[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 410} id="QkcqW5PMzeod" outputId="2d3091d0-4dad-4e1f-d12d-6a38eef7f6ab"
# compute confusion matrices
cm = confusion_matrix(y_true=y_test, y_pred=y_pred)
cm_normalized = confusion_matrix(y_true=y_test, y_pred=y_pred, normalize='true')
# plot matrices and save the fig
plt.figure(figsize=(15,6))
for i, cmx in enumerate([cm_normalized, cm]):
plt.subplot(1,2,i+1)
cmx_df = pd.DataFrame(cmx, columns=labels, index=labels).round(2)
g = sns.heatmap(cmx_df, annot=True, fmt='g', cmap='Blues')
g.set(title='Confusion Matrix', xlabel='Predicted label', ylabel='True label')
plt.subplots_adjust(wspace=0.25)
plt.savefig('confusion_matrices.png')
plt.show()
# + [markdown] id="PDWWSkBN9mr4"
# ## c) Precision, Recall and F1-score
# + colab={"base_uri": "https://localhost:8080/", "height": 485} id="4PpKM1ZP0Cbo" outputId="cc2e6e30-6fec-4566-a491-478971b72903"
# compute precision, recall and f1-score
report = classification_report(y_true, y_pred, target_names=['normal', 'pneumonia'], output_dict=True)
report = pd.DataFrame(report).iloc[:-1, :].T
# plot the classificaton report
plt.figure(figsize=(10,8))
sns.heatmap(report, annot=True, cmap='YlOrRd')
plt.yticks(va='center')
plt.savefig('classifcation_report.png')
plt.show()
# + [markdown] id="hrsa2SM61yIf"
# ## d) Plot top losses
#
# + [markdown] id="ORvG7DfL04hn"
# The function for plotting the samples with the highest loss in the dataset.
# + id="WD48eN8uGCoJ"
def plot_top_losses(top_k, X, y_true, predict, loss, labels, **kwargs):
"""Shows the examples with the highest loss in the dataset."""
y_pred = predict(x=X, batch_size=BATCH_SIZE)
loss_values = np.array(loss(y_true, y_pred))
top_k_idxs = loss_values.argsort()[-top_k:][::-1]
worst_batch = []
for idx in top_k_idxs:
true_prob = y_test[idx]*y_pred[idx] + (1-y_test[idx])*(1-y_pred[idx])
classID = int(y_true[idx])
title = '%s / %.3f / %.4f' % (labels[classID], loss_values[idx], true_prob)
worst_batch.append((X[idx], title))
show_batch(worst_batch, title='Actual/Loss/Prediction_Probability', **kwargs)
# + colab={"base_uri": "https://localhost:8080/", "height": 632} id="Ub0dO5Qu0dlL" outputId="3f14cac6-45e5-40ce-ff9f-1dbd0bbf3380"
loss_f = keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE)
plot_top_losses(top_k=9,
X=X_test/255.0,
y_true=y_test.reshape(-1, 1),
predict=model.predict,
loss=loss_f,
labels=labels,
size=(3,3))
plt.savefig('top_losses.png', dpi=350)
|
xray_pneumonia_detection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 5: Logistic Classification
# Author: <NAME> (이승재)
# <div class="alert alert-warning">
# We use elemental PyTorch to implement linear regression here. However, in most actual applications, abstractions such as <code>nn.Module</code> or <code>nn.Linear</code> are used. You can see those implementations near the end of this notebook.
# </div>
# ## Reminder: Logistic Regression
# ### Hypothesis
# $$ H(X) = \frac{1}{1+e^{-W^T X}} $$
# ### Cost
# $$ cost(W) = -\frac{1}{m} \sum y \log\left(H(x)\right) + (1-y) \left( \log(1-H(x) \right) $$
# - If $y \simeq H(x)$, cost is near 0.
# - If $y \neq H(x)$, cost is high.
# ### Weight Update via Gradient Descent
# $$ W := W - \alpha \frac{\partial}{\partial W} cost(W) $$
# - $\alpha$: Learning rate
# ## Imports
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# For reproducibility
torch.manual_seed(1)
# ## Training Data
x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]]
y_data = [[0], [0], [0], [1], [1], [1]]
# Consider the following classification problem: given the number of hours each student spent watching the lecture and working in the code lab, predict whether the student passed or failed a course. For example, the first (index 0) student watched the lecture for 1 hour and spent 2 hours in the lab session ([1, 2]), and ended up failing the course ([0]).
x_train = torch.FloatTensor(x_data)
y_train = torch.FloatTensor(y_data)
# As always, we need these data to be in `torch.Tensor` format, so we convert them.
print(x_train.shape)
print(y_train.shape)
# ## Computing the Hypothesis
# $$ H(X) = \frac{1}{1+e^{-W^T X}} $$
# PyTorch has a `torch.exp()` function that resembles the exponential function.
print('e^1 equals: ', torch.exp(torch.FloatTensor([1])))
# We can use it to compute the hypothesis function conveniently.
W = torch.zeros((2, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
hypothesis = 1 / (1 + torch.exp(-(x_train.matmul(W) + b)))
print(hypothesis)
print(hypothesis.shape)
# Or, we could use `torch.sigmoid()` function! This resembles the sigmoid function:
print('1/(1+e^{-1}) equals: ', torch.sigmoid(torch.FloatTensor([1])))
# Now, the code for hypothesis function is cleaner.
hypothesis = torch.sigmoid(x_train.matmul(W) + b)
print(hypothesis)
print(hypothesis.shape)
# ## Computing the Cost Function (Low-level)
# $$ cost(W) = -\frac{1}{m} \sum y \log\left(H(x)\right) + (1-y) \left( \log(1-H(x) \right) $$
# We want to measure the difference between `hypothesis` and `y_train`.
print(hypothesis)
print(y_train)
# For one element, the loss can be computed as follows:
-(y_train[0] * torch.log(hypothesis[0]) +
(1 - y_train[0]) * torch.log(1 - hypothesis[0]))
# To compute the losses for the entire batch, we can simply input the entire vector.
losses = -(y_train * torch.log(hypothesis) +
(1 - y_train) * torch.log(1 - hypothesis))
print(losses)
# Then, we just `.mean()` to take the mean of these individual losses.
cost = losses.mean()
print(cost)
# ## Computing the Cost Function with `F.binary_cross_entropy`
# In reality, binary classification is used so often that PyTorch has a simple function called `F.binary_cross_entropy` implemented to lighten the burden.
F.binary_cross_entropy(hypothesis, y_train)
# ## Training with Low-level Binary Cross Entropy Loss
x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]]
y_data = [[0], [0], [0], [1], [1], [1]]
x_train = torch.FloatTensor(x_data)
y_train = torch.FloatTensor(y_data)
# +
# 모델 초기화
W = torch.zeros((2, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
# optimizer 설정
optimizer = optim.SGD([W, b], lr=1)
nb_epochs = 1000
for epoch in range(nb_epochs + 1):
# Cost 계산
hypothesis = torch.sigmoid(x_train.matmul(W) + b) # or .mm or @
cost = -(y_train * torch.log(hypothesis) +
(1 - y_train) * torch.log(1 - hypothesis)).mean()
# cost로 H(x) 개선
optimizer.zero_grad()
cost.backward()
optimizer.step()
# 100번마다 로그 출력
if epoch % 100 == 0:
print('Epoch {:4d}/{} Cost: {:.6f}'.format(
epoch, nb_epochs, cost.item()
))
# -
# ## Training with `F.binary_cross_entropy`
# +
# 모델 초기화
W = torch.zeros((2, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
# optimizer 설정
optimizer = optim.SGD([W, b], lr=1)
nb_epochs = 1000
for epoch in range(nb_epochs + 1):
# Cost 계산
hypothesis = torch.sigmoid(x_train.matmul(W) + b) # or .mm or @
cost = F.binary_cross_entropy(hypothesis, y_train)
# cost로 H(x) 개선
optimizer.zero_grad()
cost.backward()
optimizer.step()
# 100번마다 로그 출력
if epoch % 100 == 0:
print('Epoch {:4d}/{} Cost: {:.6f}'.format(
epoch, nb_epochs, cost.item()
))
# -
# ## Loading Real Data
import numpy as np
xy = np.loadtxt('data-03-diabetes.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
x_train = torch.FloatTensor(x_data)
y_train = torch.FloatTensor(y_data)
print(x_train[0:5])
print(y_train[0:5])
# ## Training with Real Data using low-level Binary Cross Entropy Loss
# +
# 모델 초기화
W = torch.zeros((8, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
# optimizer 설정
optimizer = optim.SGD([W, b], lr=1)
nb_epochs = 100
for epoch in range(nb_epochs + 1):
# Cost 계산
hypothesis = torch.sigmoid(x_train.matmul(W) + b) # or .mm or @
cost = -(y_train * torch.log(hypothesis) + (1 - y_train) * torch.log(1 - hypothesis)).mean()
# cost로 H(x) 개선
optimizer.zero_grad()
cost.backward()
optimizer.step()
# 10번마다 로그 출력
if epoch % 10 == 0:
print('Epoch {:4d}/{} Cost: {:.6f}'.format(
epoch, nb_epochs, cost.item()
))
# -
# ## Training with Real Data using `F.binary_cross_entropy`
# +
# 모델 초기화
W = torch.zeros((8, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
# optimizer 설정
optimizer = optim.SGD([W, b], lr=1)
nb_epochs = 100
for epoch in range(nb_epochs + 1):
# Cost 계산
hypothesis = torch.sigmoid(x_train.matmul(W) + b) # or .mm or @
cost = F.binary_cross_entropy(hypothesis, y_train)
# cost로 H(x) 개선
optimizer.zero_grad()
cost.backward()
optimizer.step()
# 10번마다 로그 출력
if epoch % 10 == 0:
print('Epoch {:4d}/{} Cost: {:.6f}'.format(
epoch, nb_epochs, cost.item()
))
# -
# ## Checking the Accuracy our our Model
# After we finish training the model, we want to check how well our model fits the training set.
hypothesis = torch.sigmoid(x_train.matmul(W) + b)
print(hypothesis[:5])
# We can change **hypothesis** (real number from 0 to 1) to **binary predictions** (either 0 or 1) by comparing them to 0.5.
prediction = hypothesis >= torch.FloatTensor([0.5])
print(prediction[:5])
# Then, we compare it with the correct labels `y_train`.
print(prediction[:5])
print(y_train[:5])
correct_prediction = prediction.float() == y_train
print(correct_prediction[:5])
# Finally, we can calculate the accuracy by counting the number of correct predictions and dividng by total number of predictions.
accuracy = correct_prediction.sum().item() / len(correct_prediction)
print('The model has an accuracy of {:2.2f}% for the training set.'.format(accuracy * 100))
# ## Optional: High-level Implementation with `nn.Module`
class BinaryClassifier(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(8, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return self.sigmoid(self.linear(x))
model = BinaryClassifier()
# +
# optimizer 설정
optimizer = optim.SGD(model.parameters(), lr=1)
nb_epochs = 100
for epoch in range(nb_epochs + 1):
# H(x) 계산
hypothesis = model(x_train)
# cost 계산
cost = F.binary_cross_entropy(hypothesis, y_train)
# cost로 H(x) 개선
optimizer.zero_grad()
cost.backward()
optimizer.step()
# 20번마다 로그 출력
if epoch % 10 == 0:
prediction = hypothesis >= torch.FloatTensor([0.5])
correct_prediction = prediction.float() == y_train
accuracy = correct_prediction.sum().item() / len(correct_prediction)
print('Epoch {:4d}/{} Cost: {:.6f} Accuracy {:2.2f}%'.format(
epoch, nb_epochs, cost.item(), accuracy * 100,
))
|
Lesson-05_Logistic_Classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import numpy as np
import tensorflow as tf
from PIL import Image
from six.moves import cPickle as pickle
# from six.moves import range
pickle_file = './model/SVHN_multi.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Test set', test_dataset.shape, test_labels.shape)
# +
def LecunLCN(X, image_shape, threshold=1e-4, radius=7, use_divisor=True):
"""
Local Contrast Normalization
:param X: tf_train_dataset
:param image_shape: [batch_size, image_size, image_size, num_channels]
"""
# Get Gaussian filter
filter_shape = (radius, radius, image_shape[3], 1)
filters, filters_asarray = gaussian_filter(filter_shape)
X = tf.convert_to_tensor(X, dtype=tf.float32)
# Compute the Guassian weighted average by means of convolution
convout = tf.nn.conv2d(X, filters, [1,1,1,1], 'SAME')
# Subtractive step
mid = int(np.floor(filter_shape[1] / 2.))
# Make filter dimension broadcastable and subtract
centered_X = tf.subtract(X, convout)
# Boolean marks whether or not to perform divisive step
if use_divisor:
# Note that the local variances can be computed by using the centered_X
# tensor. If we convolve this with the mean filter, that should give us
# the variance at each point. We simply take the square root to get our
# denominator
# Compute variances
sum_sqr_XX = tf.nn.conv2d(tf.square(centered_X), filters, [1,1,1,1], 'SAME')
# Take square root to get local standard deviation
denom = tf.sqrt(sum_sqr_XX)
per_img_mean = tf.reduce_mean(denom)
divisor = tf.maximum(per_img_mean, denom)
# Divisise step
new_X = tf.truediv(centered_X, tf.maximum(divisor, threshold))
else:
new_X = centered_X
return new_X
def gaussian_filter(kernel_shape, ax):
# The Gaussian filter of the desired size initialized to zero
filter_ = np.zeros(kernel_shape, dtype = float)
mid = np.floor(kernel_shape[0] / 2.) # Middle of kernel of Gaussian filter
for kernel_idx in range(0, kernel_shape[2]):
for i in range(0, kernel_shape[0]): # go on width of Gaussian weighting window
for j in range(0, kernel_shape[1]): # go on height of Gaussian weighting window
filter_[i, j, kernel_idx, 0] = gauss(i - mid, j - mid)
filter_ = filter_ / np.sum(filter_)
return tf.convert_to_tensor(filter_, dtype=tf.float32), filter_
def gauss(x, y, sigma=3.0):
Z = 2 * np.pi * sigma ** 2
return 1. / Z * np.exp(-(x ** 2 + y ** 2) / (2. * sigma ** 2))
# -
filename = "1.png"
fullname = os.path.join('./data/test', filename)
im = Image.open(fullname)
house_num = ''
image_index, _ = filename.split(".")
image_index = int(image_index) - 1
# +
image_size = 32
num_labels = 11 # 0-9, + blank
num_channels = 1 # grayscale
patch_size = 5
depth1 = 16
depth2 = 32
depth3 = 64
num_hidden1 = 64
graph = tf.Graph()
with graph.as_default():
# Input data.
tf_test_dataset = tf.placeholder(tf.float32, shape=(1, 32, 32, 1))
# Variables.
layer1_weights = tf.get_variable("W1",
shape=[patch_size, patch_size, num_channels, depth1],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
layer1_biases = tf.Variable(tf.constant(1.0, shape=[depth1]), name='B1')
layer2_weights = tf.get_variable("W2",
shape=[patch_size, patch_size, depth1, depth2],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth2]), name='B2')
layer3_weights = tf.get_variable("W3",
shape=[patch_size, patch_size, depth2, num_hidden1],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden1]), name='B3')
s1_w = tf.get_variable("WS1", shape=[num_hidden1, num_labels],
initializer=tf.contrib.layers.xavier_initializer())
s1_b = tf.Variable(tf.constant(1.0, shape=[num_labels]), name='BS1')
s2_w = tf.get_variable("WS2", shape=[num_hidden1, num_labels],
initializer=tf.contrib.layers.xavier_initializer())
s2_b = tf.Variable(tf.constant(1.0, shape=[num_labels]), name='BS2')
s3_w = tf.get_variable("WS3", shape=[num_hidden1, num_labels],
initializer=tf.contrib.layers.xavier_initializer())
s3_b = tf.Variable(tf.constant(1.0, shape=[num_labels]), name='BS3')
s4_w = tf.get_variable("WS4", shape=[num_hidden1, num_labels],
initializer=tf.contrib.layers.xavier_initializer())
s4_b = tf.Variable(tf.constant(1.0, shape=[num_labels]), name='BS4')
s5_w = tf.get_variable("WS5", shape=[num_hidden1, num_labels],
initializer=tf.contrib.layers.xavier_initializer())
s5_b = tf.Variable(tf.constant(1.0, shape=[num_labels]), name='BS5')
# Model.
def model(data, keep_prob, shape):
LCN = LecunLCN(data, shape)
conv = tf.nn.conv2d(LCN, layer1_weights, [1,1,1,1], 'VALID', name='C1')
hidden = tf.nn.relu(conv + layer1_biases)
lrn = tf.nn.local_response_normalization(hidden)
sub = tf.nn.max_pool(lrn, [1,2,2,1], [1,2,2,1], 'SAME', name='S2')
conv = tf.nn.conv2d(sub, layer2_weights, [1,1,1,1], padding='VALID', name='C3')
hidden = tf.nn.relu(conv + layer2_biases)
lrn = tf.nn.local_response_normalization(hidden)
sub = tf.nn.max_pool(lrn, [1,2,2,1], [1,2,2,1], 'SAME', name='S4')
conv = tf.nn.conv2d(sub, layer3_weights, [1,1,1,1], padding='VALID', name='C5')
hidden = tf.nn.relu(conv + layer3_biases)
hidden = tf.nn.dropout(hidden, keep_prob)
shape = hidden.get_shape().as_list()
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
logits1 = tf.matmul(reshape, s1_w) + s1_b
logits2 = tf.matmul(reshape, s2_w) + s2_b
logits3 = tf.matmul(reshape, s3_w) + s3_b
logits4 = tf.matmul(reshape, s4_w) + s4_b
logits5 = tf.matmul(reshape, s5_w) + s5_b
return [logits1, logits2, logits3, logits4, logits5]
# Training computation.
[logits1, logits2, logits3, logits4, logits5] = model(tf_test_dataset, 1, [10, 32, 32, 1])
predict = tf.stack([tf.nn.softmax(logits1), tf.nn.softmax(logits2),
tf.nn.softmax(logits3), tf.nn.softmax(logits4),
tf.nn.softmax(logits5)])
test_prediction = tf.transpose(tf.argmax(predict, 2))
saver = tf.train.Saver()
input_image_array = np.expand_dims(test_dataset[image_index, :, :, :], axis=0)
with tf.Session(graph=graph) as session:
saver.restore(session, "./model/CNN_multi2.ckpt")
print("Model restored.")
print('Initialized')
test_prediction = session.run(test_prediction,
feed_dict={tf_test_dataset : input_image_array,})
print(test_prediction)
# -
number_house = "".join([str(digit) for digit in test_prediction[0,:] if digit != 10])
number_house
|
CNN_eval.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # List Basics
# ---
#
# __Lists__ are a collection data item values.
# - List is a built-in data type in Python 3
# - Each item can be same or different data type (Int, Float, String, Boolean, List, and etc)
# - Each items are separated by a comma except the last
# - A list is denoted by square brackets: ```[ ]```
# - A list is iterable; therefore, we can traverse through it with a for loop
# - Lists are compatible with the following built-in functions:
# - ```str() and tuple()``` :: can be converted to these data-types easily
# - ```len()``` :: returns the size of your list
# - ```enumerate()``` :: to help you pair index and items
# - ```reversed()``` :: will create a flipped iterator object
# - ```sorted()``` :: will help you return a sorted version of the list
# - ```min() and max()``` :: will help you determine the least and greatest value within a list, and compare lists
# - ```sum()``` :: in a list composed of numeric values, sum() will add up all the values
# - _many more, but they are deemed advanced and requires their own lessons_
#
# __Examples:__
# +
# Examples of Lists in Python
a_list = [1,2,3,4,5,6]
b_list = ['h', 'e', 'l', 'l', 'o']
c_list = [
[1,2,3,4,5,6],
['h', 'e', 'l', 'l', 'o'],
True
]
print('c_list:', c_list)
# -
# ### Generating Lists from Sequences
#
# __```list()``` function:__
#
# - Converts the argument into a list
# - The argument should be either a sequence-like data (example: strings)
# +
# Using List()
result1 = list("hello")
result2 = list(range(10,20,3))
result3 = list(str(1234))
print('result1:', result1)
print('result2:', result2)
print('result3:', result3)
# -
# ### Traversal & Accessing a List
#
# __To Traverse:__ _to travel across_; helps us get through our data.
# +
# For Loop & Lists
for item in [1,2,3,4, 'hello!']:
print('Current item:', item)
# -
# __Lists are indexable__
#
# Similar to strings, we can look at an individual value at an index location; returns a value.
# +
# List Indexing
# Format: list[location] gives us a value
a_list = [10, 13, 16, 19]
print('a_list[0]:', a_list[0])
print('a_list[1]:', a_list[1])
print('a_list[3]:', a_list[3])
print('a_list[-1]:', a_list[-1])
# -
# We can also traverse by index:
# +
# List Traverse with index:
a_list = list('hello')
for i in range(len(a_list)):
print('Item at index %d is %s.' % (i, a_list[i]))
# -
# __Lists are Sliceable__
#
# We can look at portions of a list by slicing; slicing returns a sample of the list back.
#
# ```Slicing = [start:end:step] step is 1 if not defined```
# +
# List Slicing
a_list = list(range(10,20))
print('a_list[0:len(a_list)]:', a_list[0:len(a_list)])
print('a_list[1:2]:', a_list[1:2])
print('a_list[:3]:', a_list[:3])
print('a_list[::-1]:', a_list[::-1])
print('a_list[-2:-4:-1]:', a_list[-2:-4:-1])
|
.ipynb_checkpoints/01 List Basics-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# ==============================================================================
# Copyright 2021 Google LLC. This software is provided as-is, without warranty
# or representation for any use or purpose. Your use of it is subject to your
# agreement with Google.
# ==============================================================================
#
# Author: <NAME>
# Email: <EMAIL>
#
# Filename image_tf.ipynb
# Classify numbers 0-9
#
# +
### Import packages
# -
# !cd /home/jupyter/vapit/ai-platform-tf/Vertex
# !python3 -m pip install -r ./requirements.txt -U -q --user
# !python3 -m pip install -U -q google-cloud-aiplatform
# !python3 -m pip install -U -q google-cloud-storage==1.32
# !gcloud components update --quiet
# !python3 -m pip install -U -q build
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# +
# Import packages
import json
import logging
import pandas as pd
import numpy as np
from datetime import datetime
from pytz import timezone
from googleapiclient import discovery
from google.cloud import aiplatform
# -
# ### Configure Global Variables
# List your current GCP project name
# project_id = !gcloud config list --format 'value(core.project)' 2>/dev/null
project_id
# Configure your system variables
# +
# Configure your global variables
PROJECT = project_id[0] # Replace with your project ID
USER = 'cchatterjee' # Replace with your user name
BUCKET_NAME = 'vapit_data' # Replace with your gcs bucket name - gloablly unique
FOLDER_NAME = 'tf_models'
TIMEZONE = 'US/Pacific'
REGION = 'us-central1'
PACKAGE_URIS = f"gs://{BUCKET_NAME}/trainer/tensorflow/trainer-0.1.tar.gz"
JOB_DIR = 'gs://{}/{}/jobdir'.format(
BUCKET_NAME,
FOLDER_NAME
)
MODEL_DIR = 'gs://{}/{}/models'.format(
BUCKET_NAME,
FOLDER_NAME
)
print("JOB_DIR = ", JOB_DIR)
print("MODEL_DIR = ", MODEL_DIR)
# -
# ### Authenticate your GCP account
#
# This is required if you run the notebook in Colab
try:
from google.colab import auth
auth.authenticate_user()
print("Colab user is authenticated.")
except: pass
# Create your bucket
# !gsutil mb -l $REGION gs://$BUCKET_NAME
# Clean old job logs, job packages and models
# !gsutil -m -q rm $JOB_DIR/packages/**
# !gsutil -m -q rm $MODEL_DIR/model**
# -----------
# ### Special functions
#
#------
def find_best_model_dir(model_dir, offset=1, maxFlag=1):
# Get a list of model directories
all_models = ! gsutil ls $model_dir
print("")
print("All Models = ")
print(*all_models, sep='\n')
# Check if model dirs exist
if (("CommandException" in all_models[0]) or (len(all_models) <= 1)):
print("Create the models first.")
return ""
# Find the best model from checkpoints
import re
best_acc = -np.Inf
if (maxFlag != 1):
best_acc = np.Inf
best_model_dir = ""
tup_list = []
for i in range(1,len(all_models)):
all_floats = re.findall(r"[-+]?\d*\.\d+|\d+", all_models[i]) #Find the floats in the string
cur_acc = -float(all_floats[-offset]) #which item is the model optimization metric
tup_list.append([all_models[i],cur_acc])
if (maxFlag*(cur_acc > best_acc) or (1-maxFlag)*(cur_acc < best_acc)):
best_acc = cur_acc
best_model_dir = all_models[i]
if maxFlag:
tup_list.sort(key=lambda tup: tup[1], reverse=False)
else:
tup_list.sort(key=lambda tup: tup[1], reverse=True)
#for i in range(len(tup_list)):
# print(tup_list[i][0])
print("Best Accuracy from Checkpoints = ", best_acc)
print("Best Model Dir from Checkpoints = ", best_model_dir)
return best_model_dir
# +
# Plot original images
# image_array is np.array(num_images, x_size, y_size)
import matplotlib.pyplot as plt
def plot_images(image_array):
fig, axarr = plt.subplots(4,4, figsize=(8, 8))
for i in range(4):
for j in range(4):
#axarr[i,j].set_title('Image-'+str(4*i+j))
axarr[i,j].imshow(image_array[4*i+j,:,:], cmap='gray')
# -
# ## Build python package and upload to your bucket
# +
# %%writefile ./setup.py
# python3
# ==============================================================================
# Copyright 2020 Google LLC. This software is provided as-is, without warranty
# or representation for any use or purpose. Your use of it is subject to your
# agreement with Google.
# ==============================================================================
# https://cloud.google.com/ai-platform/training/docs/runtime-version-list
from setuptools import find_packages
from setuptools import setup
#Runtime 2.1
REQUIRED_PACKAGES = ['tensorflow==2.1.0',
'numpy==1.18.0',
'pandas==0.25.3',
'scikit-learn==0.22',
'google-cloud-storage==1.23.0',
'gcsfs==0.6.1',
'cloudml-hypertune',
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='Trainer package for Tensorflow Task'
)
# -
# Create the tf_trainer directory and load the trainer files in it
# !mkdir -p trainer
# +
# %%writefile ./trainer/__init__.py
# python3
# ==============================================================================
# Copyright 2020 Google LLC. This software is provided as-is, without warranty
# or representation for any use or purpose. Your use of it is subject to your
# agreement with Google.
# ==============================================================================
# +
# %%writefile ./trainer/inputs.py
# Create the train and label lists
import math
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
#------
def load_data():
"""Creates train and test data set"""
mnist = tf.keras.datasets.mnist
(X_train, y_train),(X_test, y_test) = mnist.load_data()
X_train, X_test = X_train / 255.0, X_test / 255.0
# Check the shape
print("X_train shape = ", X_train.shape)
print("X_test shape = ", X_test.shape)
print("y_train shape = ", y_train.shape)
print("y_test shape = ", y_test.shape)
return [X_train, X_test, y_train, y_test]
# +
# %%writefile ./trainer/model.py
import tensorflow as tf
import numpy as np
from tensorflow.keras import backend as K
def tf_model(input_shape, model_depth: int = 1, dropout_rate: float = 0.02):
"""Creates the keras model used by task to train the model."""
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout
x_dim = input_shape[0]
y_dim = input_shape[1]
model = Sequential()
model.add(Flatten(input_shape=input_shape))
for i in range(0,model_depth):
nUnits = x_dim*y_dim-(i+1)*((x_dim*y_dim-128)//model_depth)
model.add(Dense(nUnits, activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(10, activation='softmax'))
print(model.summary())
return model
def custom_loss(y_true, y_pred):
custom_loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
return custom_loss
def custom_metric(y_true, y_pred):
y_true = K.flatten(y_true)
y_pred = K.flatten(y_pred)
custom_metric = np.mean(tf.math.squared_difference(y_true, y_pred))
custom_metric = tf.reduce_sum(custom_metric)
return custom_metric
# +
# %%writefile ./trainer/train.py
# python3
# ==============================================================================
# Copyright 2020 Google LLC. This software is provided as-is, without warranty
# or representation for any use or purpose. Your use of it is subject to your
# agreement with Google.
# ==============================================================================
import os
import json
import tensorflow as tf
import numpy as np
import datetime as datetime
from pytz import timezone
import hypertune
import argparse
from trainer import model
from trainer import inputs
import warnings
warnings.filterwarnings("ignore")
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#0 = all messages are logged (default behavior)
#1 = INFO messages are not printed
#2 = INFO and WARNING messages are not printed
#3 = INFO, WARNING, and ERROR messages are not printed
def parse_arguments():
"""Argument parser.
Returns:
Dictionary of arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--model_depth', default=1, type=int,
help='Hyperparameter: depth of the model')
parser.add_argument('--dropout_rate', default=0.02, type=float,
help='Hyperparameter: Drop out rate')
parser.add_argument('--learning_rate', default=0.0001, type=float,
help='Hyperparameter: initial learning rate')
parser.add_argument('--epochs', default=2, type=int,
help='Hyperparameter: epoch.')
parser.add_argument('--batch_size', default=4, type=int,
help='batch size of the deep network')
parser.add_argument('--model_dir', default="",
help='Directory to store model checkpoints and logs.')
parser.add_argument('--verbosity',choices=['DEBUG', 'ERROR', 'FATAL', 'INFO', 'WARN'],
default='FATAL')
args, _ = parser.parse_known_args()
return args
def get_callbacks(args, early_stop_patience: int = 3):
"""Creates Keras callbacks for model training."""
# Get trialId
trialId = json.loads(os.environ.get("TF_CONFIG", "{}")).get("task", {}).get("trial", "")
if trialId == '':
trialId = '0'
print("trialId=", trialId)
curTime = datetime.datetime.now(timezone('US/Pacific')).strftime('%H%M%S')
# Modify model_dir paths to include trialId
model_dir = args.model_dir + "/checkpoints/cp-"+curTime+"-"+trialId+"-{val_accuracy:.4f}"
log_dir = args.model_dir + "/log_dir"
tensorboard_cb = tf.keras.callbacks.TensorBoard(log_dir, histogram_freq=1)
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(model_dir, monitor='val_accuracy', mode='max',
verbose=0, save_best_only=True,
save_weights_only=False)
earlystop_cb = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)
return [checkpoint_cb, tensorboard_cb, earlystop_cb]
if __name__ == "__main__":
# ---------------------------------------
# Parse Arguments
# ---------------------------------------
args = parse_arguments()
#args.model_dir = MODEL_DIR + datetime.datetime.now(timezone('US/Pacific')).strftime('/model_%m%d%Y_%H%M')
print(args)
# ---------------------------------------
# Input Data & Preprocessing
# ---------------------------------------
print("Input and pre-process data ...")
# Extract train_seismic, train_label
train_test_data = inputs.load_data()
X_train = train_test_data[0]
X_test = train_test_data[1]
y_train = train_test_data[2]
y_test = train_test_data[3]
# ---------------------------------------
# Train model
# ---------------------------------------
print("Creating model ...")
input_shape = (X_train.shape)[1:]
tf_model = model.tf_model(input_shape, model_depth=args.model_depth,
dropout_rate=args.dropout_rate)
optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate)
tf_model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print("Fitting model ...")
callbacks = get_callbacks(args, 3)
histy = tf_model.fit(x=X_train, y=y_train,
epochs=args.epochs,
batch_size=args.batch_size,
validation_data=(X_test, y_test),
callbacks=callbacks)
# TBD save history for visualization
final_epoch_accuracy = histy.history['accuracy'][-1]
final_epoch_count = len(histy.history['accuracy'])
print('final_epoch_accuracy = %.6f' % final_epoch_accuracy)
print('final_epoch_count = %2d' % final_epoch_count)
# -
# Create the tf directory and load the trainer files in it
# !cp ./trainer/train.py ./trainer/train_hpt.py
# %%writefile -a ./trainer/train_hpt.py
"""This method updates a CAIP HPTuning Job with a final metric for the job.
In TF2.X the user must either use hypertune or a custom callback with
tf.summary.scalar to update CAIP HP Tuning jobs. This function uses
hypertune, which appears to be the preferred solution. Hypertune also works
with containers, without code change.
Args:
metric_tag: The metric being optimized. This MUST MATCH the
hyperparameterMetricTag specificed in the hyperparameter tuning yaml.
metric_value: The value to report at the end of model training.
global_step: An int value to specify the number of trainin steps completed
at the time the metric was reported.
"""
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy',
metric_value=final_epoch_accuracy,
global_step=final_epoch_count
)
# !cd /home/jupyter/vapit/ai-platform-tf/Vertex
# !python3 -m build
# !gsutil cp ./dist/trainer-0.1.tar.gz $PACKAGE_URIS
# ------
# ### Training with Google Vertex AI
# For the full article, please visit: https://cloud.google.com/vertex-ai/docs
#
# Where Vertex AI fits in the ML workflow \
# The diagram below gives a high-level overview of the stages in an ML workflow. The blue-filled boxes indicate where Vertex AI provides managed services and APIs:
#
# <img src="img/ml-workflow.svg" alt="Drawing">
#
# As the diagram indicates, you can use Vertex AI to manage the following stages in the ML workflow:
#
# - Train an ML model on your data:
# - Train model
# - Evaluate model accuracy
# - Tune hyperparameters
#
#
# - Deploy your trained model.
#
# - Send prediction requests to your model:
# - Online prediction
# - Batch prediction (for TensorFlow only)
#
#
# - Monitor the predictions on an ongoing basis.
#
# - Manage your models and model versions.
#
# - For the latest list, see
# - Pre-built containers for training: https://cloud.google.com/vertex-ai/docs/training/pre-built-containers
# and
# - Pre-built containers for prediction: https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers
#
# #### Train at local
#
# Before submitting training jobs to Cloud AI Platform, you can test your train.py code in the local environment. You can test by running your python script in command line, but another and maybe better choice is to use `gcloud ai-platform local train` command. The latter method could make sure your your entire python package are ready to be submitted to the remote VMs.
from trainer import inputs
train_test_data = inputs.load_data()
X_test = train_test_data[1]
plot_images(X_test)
# +
# %%time
# Run the training manually
# Training parameters
from datetime import datetime
from pytz import timezone
MODEL_DEPTH = 2
DROPOUT_RATE = 0.01
LEARNING_RATE = 0.00005
EPOCHS = 1
BATCH_SIZE = 32
MODEL_DIR_PYTH = 'gs://{}/{}/models/{}'.format(
BUCKET_NAME,
FOLDER_NAME,
datetime.now(timezone('US/Pacific')).strftime('model_%m%d%Y_%H%M')
)
print('MODEL_DEPTH = %2d' % MODEL_DEPTH)
print('DROPOUT_RATE = %.4f' % DROPOUT_RATE)
print('LEARNING_RATE = %.6f' % LEARNING_RATE)
print('EPOCHS = %2d' % EPOCHS)
print('BATCH_SIZE = %2d' % BATCH_SIZE)
print("MODEL_DIR =", MODEL_DIR_PYTH)
# Run training
# ! python3 -m trainer.train \
# --model_depth=$MODEL_DEPTH \
# --dropout_rate=$DROPOUT_RATE \
# --learning_rate=$LEARNING_RATE \
# --epochs=$EPOCHS \
# --batch_size=$BATCH_SIZE \
# --model_dir=$MODEL_DIR_PYTH
# -
# ------
# ### Hyperparameter Tuning
#
# To use hyperparameter tuning in your training job you must perform the following steps:
#
# - Specify the hyperparameter tuning configuration for your training job by including a HyperparameterSpec in your TrainingInput object.
#
# - Include the following code in your training application:
#
# - Parse the command-line arguments representing the hyperparameters you want to tune, and use the values to set the hyperparameters for your training trial.
# - Add your hyperparameter metric to the summary for your graph.
#
# +
# Google Vertex AI requires each job to have unique name,
# Therefore, we use prefix + timestamp to form job names.
JOBNAME_HPT = 'tensorflow_train_{}_{}_hpt'.format(
USER,
datetime.now(timezone(TIMEZONE)).strftime("%m%d%y_%H%M")
) # define unique job name
# We use the job names as folder names to store outputs.
MODEL_DIR_HPT = 'gs://{}/{}/models/{}'.format(
BUCKET_NAME,
FOLDER_NAME,
datetime.now(timezone('US/Pacific')).strftime('model_%m%d%Y_%H%M')
)
JOB_DIR = 'gs://{}/{}/jobdir'.format(
BUCKET_NAME,
FOLDER_NAME
)
print("JOB_NAME_HPT = ", JOBNAME_HPT)
print("JOB_DIR = ", JOB_DIR)
print("MODEL_DIR_HPT = ", MODEL_DIR_HPT)
# -
# ### Submit the hyperparameter job to vertex AI
# +
executor_image_uri = 'us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-2:latest'
python_module = "trainer.train_hpt"
api_endpoint = "{}-aiplatform.googleapis.com".format(REGION)
machine_type = "n1-standard-4"
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
# study_spec
metric = {
"metric_id": "accuracy",
"goal": aiplatform.gapic.StudySpec.MetricSpec.GoalType.MAXIMIZE,
}
depth = {
"parameter_id": "model_depth",
"integer_value_spec": {"min_value": 1, "max_value": 5},
"scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
}
# dropout_rate = {
# "parameter_id": "dropout_rate",
# "double_value_spec": {"min_value": 0.001, "max_value": 0.1},
# "scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LOG_SCALE,
# }
# learning_rate = {
# "parameter_id": "learning_rate",
# "double_value_spec": {"min_value": 0.00001, "max_value": 0.01},
# "scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LOG_SCALE,
# }
# batch_size = {
# "parameter_id": "batch_size",
# "integer_value_spec": {"min_value": 1, "max_value": 16},
# "scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
# }
epochs = {
"parameter_id": "epochs",
"integer_value_spec": {"min_value": 1, "max_value": 4},
"scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
}
# trial_job_spec
machine_spec = {
"machine_type": machine_type,
}
worker_pool_spec = {
"machine_spec": machine_spec,
"replica_count": 1,
"python_package_spec": {
"executor_image_uri": executor_image_uri,
"package_uris": [PACKAGE_URIS],
"python_module": python_module,
"args": [
'--job-dir',
JOB_DIR,
'--model_dir',
MODEL_DIR_HPT,
'--dropout_rate',
str(DROPOUT_RATE),
'--learning_rate',
str(LEARNING_RATE),
'--batch_size',
str(BATCH_SIZE),
],
},
}
# hyperparameter_tuning_job
hyperparameter_tuning_job = {
"display_name": JOBNAME_HPT,
"max_trial_count": 4,
"parallel_trial_count": 2,
"study_spec": {
"metrics": [metric],
"parameters": [depth, epochs],
# "algorithm": aiplatform.gapic.StudySpec.Algorithm.RANDOM_SEARCH,
},
"trial_job_spec": {"worker_pool_specs": [worker_pool_spec]},
}
parent = f"projects/{PROJECT}/locations/{REGION}"
response = client.create_hyperparameter_tuning_job(
parent=parent, hyperparameter_tuning_job=hyperparameter_tuning_job
)
print("response:", response)
job_name_hpt = response.name.split('/')[-1]
# -
# #### Check the status of Long Running Operation (LRO) with Google API Client
#
# Send an API request to Vertex AI to get the detailed information. The most interesting piece of information is the hyperparameter values in the trial with best performance metric.
client_options = {"api_endpoint": api_endpoint}
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
name = client.hyperparameter_tuning_job_path(
project=PROJECT,
location=REGION,
hyperparameter_tuning_job=job_name_hpt,
)
response = client.get_hyperparameter_tuning_job(name=name)
print("Job status = ", response.state)
print("response:", response)
# print("response state: ", str(response.state))
if "JobState.JOB_STATE_SUCCEEDED" == str(response.state):
print("Job state succeeded.")
# #### Get the hyperparameters associated with the best metrics
# +
max_ind = 0
max_val = 0
for ind, trials in enumerate(response.trials):
value = trials.final_measurement.metrics[0].value
print("Metrics Value (larger is better):", value)
if value > max_val:
max_val = value
max_ind = ind
param_dict = {}
for params in response.trials[max_ind].parameters:
param_dict[params.parameter_id] = params.value
print(param_dict)
depth=str(int(param_dict['model_depth']))
# dropout_rate=str(param_dict['dropout_rate'])
# learning_rate=str(param_dict['learning_rate'])
# batch_size=str(int(param_dict['batch_size']))
epochs=str(int(param_dict['epochs']))
# -
# #### Get the best model
best_model_dir_hpt = find_best_model_dir(MODEL_DIR_HPT+'/checkpoints', offset=1, maxFlag=1)
# ------
# ### Training with Tuned Parameters
#
# Once your hyperparameter training jobs are done. You can use the optimized combination of hyperparameters from your trials and start a single training job on Cloud AI Platform to train your final model.
# +
# Google Cloud AI Platform requires each job to have unique name,
# Therefore, we use prefix + timestamp to form job names.
JOBNAME_TRN = 'tensorflow_train_{}_{}'.format(
USER,
datetime.now(timezone(TIMEZONE)).strftime("%m%d%y_%H%M")
)
# We use the job names as folder names to store outputs.
MODEL_DIR_TRN = 'gs://{}/{}/models/{}'.format(
BUCKET_NAME,
FOLDER_NAME,
datetime.now(timezone('US/Pacific')).strftime('model_%m%d%Y_%H%M')
)
JOB_DIR = 'gs://{}/{}/jobdir'.format(
BUCKET_NAME,
FOLDER_NAME
)
# Training parameters
MODEL_DEPTH = 3
DROPOUT_RATE = 0.02
LEARNING_RATE = 0.0001
BATCH_SIZE = 32
EPOCHS = 2
print("JOB_NAME_TRN = ", JOBNAME_TRN)
print("JOB_DIR = ", JOB_DIR)
print("MODEL_DIR_TRN = ", MODEL_DIR_TRN)
# +
executor_image_uri = 'us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-2:latest'
python_module = "trainer.train"
api_endpoint = "{}-aiplatform.googleapis.com".format(REGION)
machine_type = "n1-standard-4"
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
custom_job = {
"display_name": JOBNAME_TRN,
"job_spec": {
"worker_pool_specs": [
{
"machine_spec": {
"machine_type": machine_type,
},
"replica_count": 1,
"python_package_spec": {
"executor_image_uri": executor_image_uri,
"package_uris": [PACKAGE_URIS],
"python_module": python_module,
"args": [
'--job-dir',
JOB_DIR,
'--model_dir',
MODEL_DIR_TRN,
'--model_depth',
str(MODEL_DEPTH),
'--dropout_rate',
str(DROPOUT_RATE),
'--learning_rate',
str(LEARNING_RATE),
'--batch_size',
str(BATCH_SIZE),
'--epochs',
str(EPOCHS),
],
},
}
]
},
}
parent = f"projects/{PROJECT}/locations/{REGION}"
response = client.create_custom_job(parent=parent, custom_job=custom_job)
print("response:", response)
job_id_trn = response.name.split('/')[-1]
# -
# Check the training job status
# check the training job status
client_options = {"api_endpoint": api_endpoint}
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
name = client.custom_job_path(
project=PROJECT,
location=REGION,
custom_job=job_id_trn,
)
response = client.get_custom_job(name=name)
print(response.state)
# #### Get the best model
best_model_dir_trn = find_best_model_dir(MODEL_DIR_TRN+'/checkpoints', offset=1, maxFlag=1)
# --------
# ### Deploy the Model
#
# Vertex AI provides tools to upload your trained ML model to the cloud, so that you can send prediction requests to the model.
#
# In order to deploy your trained model on Vertex AI, you must save your trained model using the tools provided by your machine learning framework. This involves serializing the information that represents your trained model into a file which you can deploy for prediction in the cloud.
#
# Then you upload the saved model to a Cloud Storage bucket, and create a model resource on Vertex AI, specifying the Cloud Storage path to your saved model.
#
# When you deploy your model, you can also provide custom code (beta) to customize how it handles prediction requests.
#
#
# #### Import model artifacts to Vertex AI
#
# When you import a model, you associate it with a container for Vertex AI to run prediction requests. You can use pre-built containers provided by Vertex AI, or use your own custom containers that you build and push to Container Registry or Artifact Registry.
#
# You can use a pre-built container if your model meets the following requirements:
#
# - Trained in Python 3.7 or later
# - Trained using TensorFlow, scikit-learn, or XGBoost
# - Exported to meet framework-specific requirements for one of the pre-built prediction containers
#
# The link to the list of pre-built predict container images:
#
# https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers?_ga=2.125143370.-1302053296.1620920844&_gac=1.221340266.1622086653.CjwKCAjw47eFBhA9EiwAy8kzNOkCqVAmokRvQaxBDOoa8AhGOpzzW69x64rRzfgWxogIn3m6moQoBRoCuOsQAvD_BwE
# +
MODEL_NAME = "image_tensorflow_model"
response = aiplatform.Model.upload(
display_name = MODEL_NAME,
serving_container_image_uri = 'us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-2:latest',
artifact_uri = best_model_dir_hpt, #best_model_dir_trn,
)
model_id = response.name.split('/')[-1]
print("model_id = ", model_id)
# -
# #### Create Endpoint
#
# You need the endpoint ID to deploy the model.
# +
MODEL_ENDPOINT_DISPLAY_NAME = "image_tensorflow_model_endpoint"
aiplatform.init(project=PROJECT, location=REGION)
endpoint = aiplatform.Endpoint.create(
display_name=MODEL_ENDPOINT_DISPLAY_NAME, project=PROJECT, location=REGION,
)
endpoint_id = endpoint.resource_name.split('/')[-1]
print("endpoint.display_name = ", endpoint.display_name)
print("endpoint.resource_name = ", endpoint.resource_name)
#print(endpoint.uri)
print("endpoint_id = ", endpoint_id)
# -
# #### Deploy Model to the endpoint
#
# You must deploy a model to an endpoint before that model can be used to serve online predictions; deploying a model associates physical resources with the model so it can serve online predictions with low latency. An undeployed model can serve batch predictions, which do not have the same low latency requirements.
# +
MODEL_NAME = "image_tensorflow_model"
DEPLOYED_MODEL_DISPLAY_NAME = "image_tensorflow_model_deployed"
aiplatform.init(project=PROJECT, location=REGION)
model = aiplatform.Model(model_name=model_id)
# The explanation_metadata and explanation_parameters should only be
# provided for a custom trained model and not an AutoML model.
model.deploy(
endpoint=endpoint,
deployed_model_display_name=DEPLOYED_MODEL_DISPLAY_NAME,
machine_type = "n1-standard-4",
sync=True
)
print(model.display_name)
print(model.resource_name)
# -
# ### Explore models and endpoints
# +
# List all deployed models
from google.cloud.aiplatform import gapic as aip
def list_models():
PARENT = "projects/" + PROJECT + "/locations/" + REGION
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
client_options = {"api_endpoint": API_ENDPOINT}
client = aip.ModelServiceClient(client_options=client_options)
response = client.list_models(parent=PARENT)
model_list = []
for model in response:
model_list.append(
{
"name": model.name,
"display_name": model.display_name,
"create_time": model.create_time,
"container": model.container_spec.image_uri,
"artifact_uri": model.artifact_uri
}
)
return(model_list)
model_list = list_models()
model_list
# +
# List all Endpoints
from google.cloud.aiplatform import gapic as aip
def list_endpoints():
PARENT = "projects/" + PROJECT + "/locations/" + REGION
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
client_options = {"api_endpoint": API_ENDPOINT}
client = aip.EndpointServiceClient(client_options=client_options)
response = client.list_endpoints(parent=PARENT)
endpoint_list = []
for endpoint in response:
model_name = ''
if (len(endpoint.deployed_models) > 0):
model_name = endpoint.deployed_models[0].model
endpoint_list.append(
{
"name": endpoint.name,
"display_name": endpoint.display_name,
"create_time": endpoint.create_time,
"deployed_models": model_name
}
)
return(endpoint_list)
endpoint_list = list_endpoints()
endpoint_list
# +
# deployed_model_id = endpoint.list_models()[0].id
# print(deployed_model_id)
# endpoint.undeploy(deployed_model_id=deployed_model_id)
# +
# print(endpoint.list_models())
# print(endpoint.resource_name)
# -
# ------
# ### Send inference requests to your model
#
# Vertex AI provides the services you need to request predictions from your model in the cloud.
#
# There are two ways to get predictions from trained models: online prediction (sometimes called HTTP prediction) and batch prediction. In both cases, you pass input data to a cloud-hosted machine-learning model and get inferences for each data instance.
#
# Vertex AI online prediction is a service optimized to run your data through hosted models with as little latency as possible. You send small batches of data to the service and it returns your predictions in the response.
# #### Call Google API for online inference
# +
from googleapiclient import errors
from trainer import inputs
train_test_data = inputs.load_data()
x_test = train_test_data[1]
#y_test = train_test_data[3]
pprobas = []
batch_size = 16
n_samples = min(160,x_test.shape[0])
print("batch_size=", batch_size)
print("n_samples=", n_samples)
aiplatform.init(project=PROJECT, location=REGION)
for i in range(0, n_samples, batch_size):
j = min(i+batch_size, n_samples)
print("Processing samples", i, j)
response = aiplatform.Endpoint(endpoint_id).predict(instances=x_test[i:j].tolist())
try:
for prediction_ in response.predictions:
pprobas.append(prediction_)
except errors.HttpError as err:
# Something went wrong, print out some information.
tf.compat.v1.logging.error('There was an error getting the job info, Check the details:')
tf.compat.v1.logging.error(err._get_reason())
break
# -
np.array(pprobas)
# #### Call Google GCLOUD API for online inference
# +
from trainer import inputs
train_test_data = inputs.load_data()
x_test = train_test_data[1]
# Create a temporary json file to contain data to be predicted
JSON_TEMP = 'tf_test_data.json' # temp json file name to hold the inference data
batch_size = 100 # data batch size
start = 0
end = min(ind+batch_size, len(x_test))
body={'instances': x_test[start:end].tolist()}
# body = json.dumps(body).encode().decode()
with open(JSON_TEMP, 'w') as fp:
fp.write(json.dumps(body))
# -
# !gcloud beta ai endpoints predict $endpoint_id \
# --region=$REGION \
# --json-request=$JSON_TEMP
# #### Call Google API for batch inference
# +
# Write batch data to file in GCS
import shutil
import os
# Clean current directory
DATA_DIR = './batch_data'
shutil.rmtree(DATA_DIR, ignore_errors=True)
os.makedirs(DATA_DIR)
n_samples = min(1000,x_test.shape[0])
nFiles = 10
nRecsPerFile = min(1000,n_samples//nFiles)
print("n_samples =", n_samples)
print("nFiles =", nFiles)
print("nRecsPerFile =", nRecsPerFile)
# Create nFiles files with nImagesPerFile images each
for i in range(nFiles):
with open(f'{DATA_DIR}/unkeyed_batch_{i}.json', "w") as file:
for z in range(nRecsPerFile):
print(f'{{"dense_input": {np.array(x_test)[i*nRecsPerFile+z].tolist()}}}', file=file)
#print(f'{{"{model_layers[0]}": {np.array(x_test)[i*nRecsPerFile+z].tolist()}}}', file=file)
#key = f'key_{i}_{z}'
#print(f'{{"image": {x_test_images[z].tolist()}, "key": "{key}"}}', file=file)
# Write batch data to gcs file
# !gsutil -m cp -r ./batch_data gs://$BUCKET_NAME/$FOLDER_NAME/
# Remove old batch prediction results
# !gsutil -m rm -r gs://$BUCKET_NAME/$FOLDER_NAME/batch_predictions
# +
JOBNAME_BATCH = 'tensorflow_batch_{}_{}'.format(
USER,
datetime.now(timezone(TIMEZONE)).strftime("%m%d%y_%H%M")
)
# We use the job names as folder names to store outputs.
JOB_DIR_BATCH = 'gs://{}/{}/{}'.format(
BUCKET_NAME,
FOLDER_NAME,
JOBNAME_BATCH,
)
INPUT_PATH='gs://' + BUCKET_NAME + '/' + FOLDER_NAME + '/batch_data/*'
OUTPUT_PATH='gs://' + BUCKET_NAME + '/' + FOLDER_NAME + '/batch_predictions'
print("JOB_NAME_BATCH = ", JOBNAME_BATCH)
print("JOB_DIR_BATCH = ", JOB_DIR_BATCH)
# +
aiplatform.init(project=PROJECT, location=REGION)
my_model = aiplatform.Model(model_name=model_id)
# Make SDK batch_predict method call
batch_prediction_job = my_model.batch_predict(
instances_format="jsonl",
predictions_format="jsonl",
job_display_name=JOBNAME_BATCH,
gcs_source=INPUT_PATH,
gcs_destination_prefix=OUTPUT_PATH,
model_parameters=None,
machine_type="n1-standard-4",
starting_replica_count=1,
max_replica_count=1,
sync=True,
)
print(batch_prediction_job.display_name)
print(batch_prediction_job.resource_name)
print(batch_prediction_job.state)
# -
print("errors")
# !gsutil cat $OUTPUT_PATH/prediction.errors_stats-00000-of-00001
print("batch prediction results")
# !gsutil cat $OUTPUT_PATH/prediction.results-00000-of-00010
|
ai-platform-tf/Vertex/image_tf.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""
Created on Wed Nov 4 2020
@author: <NAME>
"""
import pandas as pd
import matplotlib.pyplot as plt
# -
# **We use a dataset which includes salaries and compensation for all employees of the County of San Mateo. Data can be found on https://data.smcgov.org/.**
# +
''' Load and read data '''
df = pd.read_json("https://data.smcgov.org/resource/e4sp-qk5h.json")
df.head(5)
# -
# **We can also load data as a csv file.**
df1 = pd.read_csv("https://data.smcgov.org/resource/e4sp-qk5h.csv")
df1.head(5)
# **.describe( ) returns statistics such as mean and standard deviation of the numerical columns in a dataset.**
df.describe()
# **We can also find the statistics of non-numeric columns if we include all columns.**
df.describe(datetime_is_numeric=True, include="all")
df.plot(subplots=True, figsize=(8,20), color="blue", grid=True)
plt.legend(fontsize=10)
plt.tight_layout()
plt.show()
df.hist(figsize=(15,10), color='blue', bins=100)
plt.tight_layout()
plt.show()
# **.drop( ) deletes rows or columns in a dataframe.**
df.drop("other_employer_cost", axis=1)
df.describe(datetime_is_numeric=rue, include="all")
# **If we drop a column, it doesn't change the original data frame by default. If we want to change the original data frame we need to use inplace=True**
df.drop("other_employer_cost", axis=1, inplace=True)
df
df.dtypes
print(' Average salary is', df.regular_pay.mean())
print('total number of employees is',df.name.count())
# **We can also count the unique values of a columns.**
print('Departments: \n', df.department_name.value_counts())
# **We can do the same by using a function and then apply it to the dataframe**
# +
def map_department(x):
if x == "Health System":
return 1
df['department_mapped_value'] = df.department_name.apply(map_department)
# -
print('Total number of employees working for health system is',
df.department_mapped_value.value_counts()[1])
# **We can also find the total number of employees working for departments using groupby**
df['id'] = [i for i in range(1000)]
df.groupby('department_name', sort=False)["id"].count().reset_index(name ='Total number of employees')
df.groupby('job_title', sort=False)["id"].count().reset_index(name ='Total number of employees')
# **Next we show how to join two data frame in pandas.**
# +
ind = (0,1,2,3)
first_name = ('Danny', 'Daniel', 'Don', 'Ali')
last_name = ('Reed', 'Coto', 'Gomez', 'Sori')
job_title = ('data analyst', 'software developer', 'computer scientist', 'programmer')
language = ('Python', 'C++', 'Java', 'Python')
df1 = pd.DataFrame(list(zip(first_name, last_name, job_title, language)),
columns =['first_name', 'last_name', 'job_title', 'language'],
index=ind)
first_name = ('Danny', 'Daniel', 'Don', 'Moh')
last_name = ('Reed', 'Coto', 'Gomez', 'Danesh')
salary = (150000, 160000, 120000, 120000 )
age = (24, 32, 29, 33)
df2 = pd.DataFrame(list(zip(first_name, last_name, salary, age)),
columns =['first_name', 'last_name', 'salary', 'age'],
index=ind)
# -
df1
df2
# **Inner join: An inner join combines two DataFrames based on a join key and returns a new DataFrame that contains only those rows that have matching values in both of the original DataFrames.**
inner_join = pd.merge(left=df1, right=df2)
inner_join
# **Left join: Like an inner join, a left join uses join keys to combine two DataFrames. But it also returns all of the rows from the left DataFrame, even those rows whose join key(s) do not have values in the right DataFrame**.
left_join = pd.merge(left=df1, right=df2, how='left')
left_join
right_join = pd.merge(left=df1, right=df2, how='right')
right_join
# **We can also concatanate two data frames:**
df3 = pd.concat( [df1, df2],axis=0,ignore_index=True)
df3
df3.groupby('language', sort=False)['job_title'].count().reset_index(name='total')
|
Pandas_Python/pandas - data analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %cd /home/jovyan/work
# +
from chordifier.algorithm.Pipeline import Pipeline
from chordifier.experiments.KeyboardFactory import make
from chordifier.utils import vector
PARAMETERS = {
# general
'keyboard': make([2, 2, 2, 2, 2] + [0] * 5),
'characters': 15,
# sequencer
'filename': "dataset/ngram.json",
'length': 3,
'samples': 500,
# dynamics
'x_y_ratio': 1.5,
'stiffness': vector(1.8, 1.3, 1.2, 1.1, 2, 2, 1.1, 1.2, 1.3, 1.8),
# pruner
'priority': vector(18000, 13000, 12000, 11000, 14500,
14516, 11008, 12004, 13002, 18001),
'finger_priorities': 0.1,
'average_offsets': 0.05,
'deviation_of_offsets': 0.2,
# evaluator
'distances_travelled': 1,
'chord_difficulties': 1,
# optimizer
'best': False,
'generations': 5,
'population_size': 100,
'mate_probability': 0.5,
'mutate_probability': 0.20,
'mutate_independent_probability': 0.10,
'select_tournament_size': 10,
}
pipeline = Pipeline(PARAMETERS)
pipeline.prepare()
result = pipeline.optimize()
# +
from chordifier.algorithm.Sequencer import Sequencer
s = Sequencer("dataset/ngram.json", 4, 3, 9)
print("=== Sequencer test ===")
print("--- sequences ---")
print(s.sequences)
print("--- occurrences ---")
print(s.occurrences)
print("--- uniques ---")
print(s.uniques)
print("--- indices ---")
print(s.indices)
# -
print("=== Sequencer ===")
print("--- sequences ---")
print(pipeline.sequencer.sequences[:10])
print("--- occurrences ---")
print(pipeline.sequencer.occurrences[:10])
print("--- uniques ---")
print(pipeline.sequencer.uniques)
print("--- indices ---")
print(pipeline.sequencer.indices[:10])
# +
from bokeh.models import HoverTool
from chordifier.KeyboardRenderer import KeyboardRenderer
renderer = KeyboardRenderer(pipeline.keyboard)
quad = renderer.plot.quad(top=[15], bottom=[-16], left=[-25],
right=[9], color="#000000", alpha=0)
hover = HoverTool()
hover.tooltips = [
("x", "$x"),
("y", "$y"),
]
hover.renderers = [quad]
hover.point_policy = "follow_mouse"
hover.attachment = "right"
renderer.plot.add_tools(hover)
print("=== Keyboard ===")
renderer.present()
# -
print("=== Preprocessor ===")
print("--- zones ---")
for zone in pipeline.preprocessor.zones:
print(zone)
print("--- chords ---")
print(pipeline.preprocessor.chords[222:999:666])
print("--- positions ---")
print(pipeline.preprocessor.positions[222:999:666])
print("--- origins ---")
print(pipeline.preprocessor.origins)
print("=== Pruner intact ===")
print("--- chords ---")
print(pipeline.pruner_intact.chords[222:999:666])
print("--- positions ---")
print(pipeline.pruner_intact.positions[222:999:666])
print("--- origins ---")
print(pipeline.pruner_intact.origins)
print("--- metrics ---")
print(pipeline.pruner_intact.metrics[222:999:666])
print("--- scores ---")
print(pipeline.pruner_intact.scores[222:999:666])
print("--- totals ---")
print(pipeline.pruner_intact.totals[222:999:666])
print("=== Pruner ===")
print("--- chords ---")
print(pipeline.pruner.chords)
print("--- scores ---")
print(pipeline.pruner.scores)
print("--- totals ---")
print(pipeline.pruner.totals)
# +
import numpy as np
import pprint
pp = pprint.PrettyPrinter(indent=4)
permutation = np.arange(0, 15)[::-1]
total = pipeline.evaluator.evaluate(permutation)
print("=== Evaluator ===")
print("--- pruner_permuted.chords ---")
print(pipeline.evaluator.pruner_permuted.chords)
print("--- pruner_permuted.positions ---")
#print(pipeline.evaluator.pruner_permuted.positions)
print("--- pruner_permuted.scores ---")
print(pipeline.evaluator.pruner_permuted.scores)
print("--- chords ---")
print(pipeline.evaluator.chords[:2])
print("--- positions ---")
#print(pipeline.evaluator.positions[:2])
print("--- metrics ---")
print(pipeline.evaluator.metrics[:2])
print("--- scores ---")
print(pipeline.evaluator.scores[:2])
print("--- totals ---")
print(pipeline.evaluator.totals[:2])
print("--- occurrence_weighted ---")
print(pipeline.evaluator.occurrence_weighted[:2])
print("--- total ---")
print(total[0])
print("--- retrieve_mapping ---")
pp.pprint(pipeline.evaluator.retrieve_mapping(permutation))
# +
from chordifier.algorithm.Evaluator import determine_source_positions
positions = np.arange(1 * 5 * 10 * 2).reshape(1, 5, 10, 2)
chords = np.array([[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 3, 7, 0, 0, 0, 0, 0],
[0, 2, 1, 3, 5, 0, 0, 0, 0, 0],
[0, 0, 2, 3, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 3, 7, 0, 0, 0, 0, 0],
]])
source_positions = determine_source_positions(chords, positions)
print("=== determine_source_positions ===")
print("--- positions ---")
print(positions)
print("--- chords ---")
print(chords)
print("--- source_positions ---")
print(source_positions)
# -
|
lab/test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8
# language: python
# name: python3
# ---
# <img src="https://github.com/pmservice/ai-openscale-tutorials/raw/master/notebooks/images/banner.png" align="left" alt="banner">
# # Credit risk using SageMaker linear-learner
# Contents
# - Setup
# - Introduction
# - Load and explore data
# - Create logistic regression model using SageMaker linear-learner algorithm
# - Deploy the SageMaker model in the AWS Cloud
# - Score the model
# **Note:** This notebook works correctly with kernel `Python 3.7.x`.
# ## 1. Setup
# Before you use the sample code in this notebook, you must perform the following setup tasks:
#
# - Create a SageMaker Service, setting up steps described here: https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html
# - Install reqiured python packages from PyPi repository
# ### Package installation
# !pip install -U boto3 | tail -n 1
# !pip install -U sagemaker | tail -n 1
# !pip install -U pandas==1.2.5 | tail -n 1
# !pip install -U scikit_learn==0.20.3 | tail -n 1
# !pip install -U category_encoders | tail -n 1
# ## 2. Introduction
#
# This notebook defines, trains and deploys the model predicting risk for credit.
# ## 3. Load and explore data
# In this section you will prepare your data for training using SageMaker linear-learner algorithm.
#
# - Load data from github repository
# - Explore data
# - Store training data in S3 Object Storage
# ### Load data from github repository
import numpy as np
import pandas as pd
data = pd.read_csv('https://raw.githubusercontent.com/IBM/watson-openscale-samples/main/IBM%20Cloud/WML/assets/data/credit_risk/credit_risk_training.csv')
# ### Explore data
# +
print("Sample records:")
display(data.head())
print("Label column summary:")
display(data.Risk.value_counts())
# -
# ### Store training data in S3 Object Storage
# You will use SageMaker build-in linear-learner algorithm. This algorithm expects first column to be the label when training data is in `text/csv` format.
#
# Moreover label column have to be numeric, so you will recode it.
# #### Save prepared data to local filesystem
target = 'Risk'
string_features = [nm for nm, ty in zip(data.dtypes.index, data.dtypes.values) if (nm != target) and (ty is np.dtype('O')) ]
numeric_features = [nm for nm, ty in zip(data.dtypes.index, data.dtypes.values) if (nm != target) and (ty is not np.dtype('O'))]
data_recoded = pd.concat([data[[target]], pd.get_dummies(data[string_features]), data[numeric_features]], axis=1)
data_recoded.replace({target: {'Risk': 1, 'No Risk': 0}}, inplace = True)
train_data_filename = 'credit_risk_training_recoded.csv'
data_recoded.to_csv(path_or_buf = train_data_filename, index = False, header = False)
# **Note:** Header row have to be omitted. First column have to be target.
print(data_recoded.columns.tolist())
# #### Upload data to S3 Object Storage
import time
import json
import boto3
import sagemaker
aws_credentials = {'access_key': '***',
'secret_key': '***',
'region_name': '***'}
# **Note:** You have to provide credentials from your Amazon account.
# +
import boto3
import sagemaker
session = boto3.session.Session(
aws_access_key_id = aws_credentials['access_key'],
aws_secret_access_key = aws_credentials['secret_key'],
region_name = aws_credentials['region_name']
)
region = session.region_name
sagemaker_session = sagemaker.Session(session)
bucket = sagemaker_session.default_bucket()
s3 = session.resource('s3')
# -
print('Default bucket: {}'.format(bucket))
#
# **Tip:** You can run following code `[bkt.name for bkt in s3.buckets.all()]` to list all your buckets.
[bkt.name for bkt in s3.buckets.all()]
# **Note:** You have to replace `bucket_name` with name of bucket in your S3 Object Storage and path where training data will be stored.
#
bucket_name = '*******'
train_data_path = 'credit_risk'
output_data_path = 's3://{}/credit-risk/output'.format(bucket_name)
time_suffix = time.strftime("%Y-%m-%d-%H-%M", time.gmtime())
s3_bucket = s3.Bucket(bucket_name)
s3_bucket.upload_file(Filename = train_data_filename, Key = '{}/{}'.format(train_data_path, train_data_filename))
# Let's check if your data have been uploaded successfully.
for s3_obj in s3_bucket.objects.all():
if (s3_obj.bucket_name == bucket_name) and (train_data_path in s3_obj.key):
train_data_uri = 's3://{}/{}'.format(s3_obj.bucket_name, s3_obj.key)
print(train_data_uri)
# <a id="model"></a>
# ## 3. Create logistic regression model using SageMaker linear-learner algorithm
#
# In this section you will learn how to:
#
# - Setup training parameters
# - Start training job
# ### Setup training parameters
# +
from sagemaker.amazon.amazon_estimator import get_image_uri
sm_client = session.client('sagemaker')
# +
training_image = get_image_uri(session.region_name, 'linear-learner')
iam_client = session.client('iam')
[role_arn, *_] = [role['Arn'] for role in iam_client.list_roles()['Roles'] if 'AmazonSageMaker-ExecutionRole' in role['RoleName'] or 'SagemakerFull' in role['RoleName']]
linear_job_name = 'Credit-risk-linear-learner-' + time_suffix
# +
linear_training_params = {
"AlgorithmSpecification": {
"TrainingImage": training_image,
"TrainingInputMode": "File"
},
"HyperParameters": {
"feature_dim": str(data_recoded.shape[1] - 1),
"mini_batch_size": "100",
"predictor_type": "binary_classifier",
"epochs": "10",
"num_models": "32",
"loss": "auto"
},
"InputDataConfig": [{
"ChannelName": "train",
"ContentType": "text/csv",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": train_data_uri,
"S3DataDistributionType": "ShardedByS3Key"
}
}
}],
"OutputDataConfig": {"S3OutputPath": output_data_path},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.c4.xlarge",
"VolumeSizeInGB": 2
},
"RoleArn": role_arn,
"StoppingCondition": {
"MaxRuntimeInSeconds": 6 * 60
},
"TrainingJobName": linear_job_name
}
# -
# ### Start training job
sm_client.create_training_job(**linear_training_params)
# +
try:
sm_client.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName = linear_job_name)
except Exception:
print('Traing job error.')
train_job_details = sm_client.describe_training_job(TrainingJobName = linear_job_name)
train_job_status = train_job_details['TrainingJobStatus']
if train_job_status == 'Failed':
print(train_job_details['FailureReason'])
else:
train_job_arn = train_job_details['TrainingJobArn']
print(train_job_arn)
trained_model_uri = train_job_details['ModelArtifacts']['S3ModelArtifacts']
print(trained_model_uri)
# -
# ## 5. Deploy the SageMaker model in the AWS Cloud
#
# In this section you will learn howto:
#
# - Setup deployment parameters
# - Create deployment configuration endpoint
# - Create online scoring endpoint
# ### Setup deployment parameters
# +
linear_hosting_container = {'Image': training_image, 'ModelDataUrl': trained_model_uri}
create_model_details = sm_client.create_model(
ModelName = linear_job_name,
ExecutionRoleArn = role_arn,
PrimaryContainer = linear_hosting_container)
print(create_model_details['ModelArn'])
# -
# ### Create deployment configuration endpoint
# +
endpoint_config = 'Credit-risk-linear-endpoint-config-' + time_suffix
print(endpoint_config)
create_endpoint_config_details = sm_client.create_endpoint_config(
EndpointConfigName = endpoint_config,
ProductionVariants = [{
'InstanceType': 'ml.m4.xlarge',
'InitialInstanceCount': 1,
'ModelName': linear_job_name,
'VariantName': 'AllTraffic'}])
endpoint_config_details = sm_client.describe_endpoint_config(EndpointConfigName = endpoint_config)
print(endpoint_config_details)
# -
# ### Create online scoring endpoint
# +
scoring_endpoint = 'Credit-risk-endpoint-scoring-' + time_suffix
create_endpoint_details = sm_client.create_endpoint(
EndpointName = scoring_endpoint,
EndpointConfigName = endpoint_config)
# +
try:
sm_client.get_waiter('endpoint_in_service').wait(EndpointName = scoring_endpoint)
except Exception:
print('Create scoring endpoint error')
scoring_endpoint_details = sm_client.describe_endpoint(EndpointName = scoring_endpoint)
scoring_enpoint_config_status = scoring_endpoint_details['EndpointStatus']
if scoring_enpoint_config_status != 'InService':
print(scoring_endpoint_details['FailureReason'])
else:
print(scoring_endpoint_details['EndpointArn'])
# -
# ## 5. Score the model
#
# In this section you will learn howto score deployed model.
#
# - Prepare sample data for scoring
# - Send payload for scoring
# ### Prepare sample data for scoring
# You will use data in `csv` format as scoring payload. First column (label) is removed from data. Last 20 training records are selected as scoring payload.
scoring_data_filename = 'credit_risk_scoring_recoded.csv'
with open(train_data_filename) as f_train:
with open(scoring_data_filename, 'w') as f_score:
f_score.writelines([','.join(line.split(',')[1:]) for line in f_train.readlines()[-10:]])
# ### Send payload for scoring
# +
sm_runtime = session.client('runtime.sagemaker')
with open(scoring_data_filename) as f_payload:
scoring_response = sm_runtime.invoke_endpoint(EndpointName = scoring_endpoint,
ContentType = 'text/csv',
Body = f_payload.read().encode())
scored_records = scoring_response['Body'].read().decode()
print(json.loads(scored_records))
|
notebooks/with-outputs/CreditModelSagemakerLinearLearner-with-outputs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook implements a pre-trained sentiment analysis pipeline including a regex pre-processing step, tokenization, n-gram computation, and logistic regreression model as a RESTful API.
# +
import cPickle
import json
import pandas as pd
import sklearn
import requests
# -
# ### Import the trained model
resp = requests.get("https://raw.githubusercontent.com/crawles/gpdb_sentiment_analysis_twitter_model/master/twitter_sentiment_model.pkl")
resp.raise_for_status()
cl = cPickle.loads(resp.content)
# ### Import data pre-processing function
def regex_preprocess(raw_tweets):
pp_text = pd.Series(raw_tweets)
user_pat = '(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)'
http_pat = '(https?:\/\/(?:www\.|(?!www))[^\s\.]+\.[^\s]{2,}|www\.[^\s]+\.[^\s]{2,})'
repeat_pat, repeat_repl = "(.)\\1\\1+",'\\1\\1'
pp_text = pp_text.str.replace(pat = user_pat, repl = 'USERNAME')
pp_text = pp_text.str.replace(pat = http_pat, repl = 'URL')
pp_text.str.replace(pat = repeat_pat, repl = repeat_repl)
return pp_text
# # Setup the API
# Jupyter Kernel Gateway utilizes a global REQUEST JSON string that will be replaced on each invocation of the API.
REQUEST = json.dumps({
'path' : {},
'args' : {}
})
# ### Compute sentiment using trained model and serve using POST
# Using the kernel gateway, a cell is created as an HTTP handler using a single line comment. The handler supports common HTTP verbs (GET, POST, DELETE, etc). For more information, view the <a href="https://jupyter-kernel-gateway.readthedocs.io/en/latest/http-mode.html">docs</a>.
# POST /polarity_compute
req = json.loads(REQUEST)
tweets = req['body']['data']
print(cl.predict_proba(regex_preprocess(tweets))[:][:,1])
# # Predict timepoint API
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import Adam
from keras.layers.advanced_activations import PReLU
# +
from keras.models import model_from_json
import pickle
# Load json and create model
json_file = open("model.json", 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# Load weights into new model
model.load_weights("model.h5")
# Compile model
model.compile(loss='mse', optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8))
# Load scaler
scaler = pickle.load(open('scaler.sav', 'rb'))
# +
# POST /predict_timepoint
req = json.loads(REQUEST)
data = req['body']['data']
x = np.array([data], dtype=float)
prediction_log_minmaxscaled = model.predict(x, batch_size=1)
prediction_log = scaler.inverse_transform(prediction_log_minmaxscaled)
prediction = np.exp(prediction_log) - 1
print(prediction)
|
jupyter_gateway_deployment/nn-analytics-service-pcf.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('base')
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.model_selection import train_test_split
from numpy import sqrt, array, random, argsort
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
#from google.colab import drive
#drive.mount('/content/gdrive')
# -
df = pd.read_csv("https://raw.githubusercontent.com/AIML-Makgeolli/CpE-AIDL/main/thesis_database/Crop_recommendation.csv")
df_train = df.drop(['label','rainfall'], axis = 1)
df_train.head()
X_N = df_train[['N']] #.iloc[:100]
X_P = df_train[['P']]
X_K = df_train[['K']]
X_temp = df_train[['temperature']]
X_moist = df_train[['humidity']]
y = df_train[['ph']] #.iloc[:100]
# +
class DBscan():
def __init__(self):
return
def input_train(self, X_in, y_in):
self.X = X_in
self.y = y_in
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y,test_size=0.3, random_state=42)
self.data = pd.concat([X_train, y_train], axis=1).to_numpy()
return self.data
def dbscan_test(self):
self.X_trn = StandardScaler().fit_transform(self.data)
y_pred = DBSCAN(eps=0.3, min_samples=10).fit(self.X_trn)
self.core_samples_mask = np.zeros_like(y_pred.labels_, dtype=bool)
self.core_samples_mask[y_pred.core_sample_indices_] = True
self.labels = y_pred.labels_
self.n_clusters_ = len(set(self.labels)) - (1 if -1 in self.labels else 0)
n_noise_ = list(self.labels).count(-1)
print("Estimated number of clusters: %d" % self.n_clusters_)
print("Estimated number of noise points: %d" % n_noise_)
print("Calinski-Harabasz Index: %0.3f" % metrics.calinski_harabasz_score(self.X_trn, self.labels))
print("Davies-Bouldin Index: %0.3f" % metrics.davies_bouldin_score(self.X_trn, self.labels))
print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(self.X_trn, self.labels))
return
def dbscan_outlier(self):
unique_labels = set(self.labels)
colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = self.labels == k
xy = self.data[class_member_mask & self.core_samples_mask]
plt.plot(xy[:, 0],xy[:, 1],"o",markerfacecolor=tuple(col),markeredgecolor="k",markersize=8,)
xy = self.data[class_member_mask & ~self.core_samples_mask]
plt.plot(xy[:, 0],xy[:, 1],"o",markerfacecolor=tuple(col), markeredgecolor="k",markersize=6,)
plt.title("Estimated number of clusters: %d" % self.n_clusters_)
plt.show()
dbscantest = DBscan()
# -
# Nitrogen and pH
dbscantest.input_train(X_N,y)
dbscantest.dbscan_test()
dbscantest.dbscan_outlier()
# Phosphorus and pH
dbscantest.input_train(X_P,y)
dbscantest.dbscan_test()
dbscantest.dbscan_outlier()
# Potassium and pH
dbscantest.input_train(X_K,y)
dbscantest.dbscan_test()
dbscantest.dbscan_outlier()
# Temperature and pH
dbscantest.input_train(X_temp,y)
dbscantest.dbscan_test()
dbscantest.dbscan_outlier()
# Moisture and pH
dbscantest.input_train(X_moist,y)
dbscantest.dbscan_test()
dbscantest.dbscan_outlier()
|
SMARF/dbscan.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] deletable=true editable=true
# <h1> Feature Engineering </h1>
#
# In this notebook, you will learn how to incorporate feature engineering into your pipeline.
# <ul>
# <li> Working with feature columns </li>
# <li> Adding feature crosses in TensorFlow </li>
# <li> Reading data from BigQuery </li>
# <li> Creating datasets using Dataflow </li>
# <li> Using a wide-and-deep model </li>
# </ul>
# + deletable=true editable=true
import tensorflow as tf
import apache_beam as beam
import shutil
print tf.__version__
# + [markdown] deletable=true editable=true
# <h2> 1. Environment variables for project and bucket </h2>
#
# <li> Your project id is the *unique* string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: <b>Project ID:</b> cloud-training-demos </li>
# <li> Cloud training often involves saving and restoring model files. Therefore, we should <b>create a single-region bucket</b>. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available) </li>
# </ol>
# <b>Change the cell below</b> to reflect your Project ID and bucket name.
#
# + deletable=true editable=true
import os
REGION = 'us-central1' # Choose an available region for Cloud MLE from https://cloud.google.com/ml-engine/docs/regions.
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME. Use a regional bucket in the region you selected.
PROJECT = 'cloud-training-demos' # CHANGE THIS
# + deletable=true editable=true
# for bash
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
# + deletable=true editable=true
# %bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
# + [markdown] deletable=true editable=true
# <h2> 2. Specifying query to pull the data </h2>
#
# Let's pull out a few extra columns from the timestamp.
# + deletable=true editable=true
def create_query(phase, EVERY_N):
"""
phase: 1 = train 2 = valid
"""
base_query = """
SELECT
(tolls_amount + fare_amount) AS fare_amount,
DAYOFWEEK(pickup_datetime) AS dayofweek,
HOUR(pickup_datetime) AS hourofday,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
CONCAT(STRING(pickup_datetime), STRING(pickup_longitude), STRING(pickup_latitude), STRING(dropoff_latitude), STRING(dropoff_longitude)) AS key
FROM
[nyc-tlc:yellow.trips]
WHERE
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
"""
if EVERY_N == None:
if phase < 2:
# Training
query = "{0} AND ABS(HASH(pickup_datetime)) % 4 < 2".format(base_query)
else:
# Validation
query = "{0} AND ABS(HASH(pickup_datetime)) % 4 == {1}".format(base_query, phase)
else:
query = "{0} AND ABS(HASH(pickup_datetime)) % {1} == {2}".format(base_query, EVERY_N, phase)
return query
print create_query(2, 100000)
# + [markdown] deletable=true editable=true
# Try the query above in https://bigquery.cloud.google.com/table/nyc-tlc:yellow.trips if you want to see what it does (ADD LIMIT 10 to the query!)
# + [markdown] deletable=true editable=true
# <h2> 3. Preprocessing Dataflow job from BigQuery </h2>
#
# This code reads from BigQuery and saves the data as-is on Google Cloud Storage. We can do additional preprocessing and cleanup inside Dataflow, but then we'll have to remember to repeat that prepreprocessing during inference. It is better to use tf.transform which will do this book-keeping for you, or to do preprocessing within your TensorFlow model. We will look at this in future notebooks. For now, we are simply moving data from BigQuery to CSV using Dataflow.
#
# While we could read from BQ directly from TensorFlow (See: https://www.tensorflow.org/api_docs/python/tf/contrib/cloud/BigQueryReader), it is quite convenient to export to CSV and do the training off CSV. Let's use Dataflow to do this at scale.
#
# Because we are running this on the Cloud, you should go to the GCP Console (https://console.cloud.google.com/dataflow) to look at the status of the job. It will take several minutes for the preprocessing job to launch.
# + deletable=true editable=true
# %bash
gsutil -m rm -rf gs://$BUCKET/taxifare/ch4/taxi_preproc/
# + deletable=true editable=true
import datetime
def to_csv(rowdict):
import copy
days = ['null', 'Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
CSV_COLUMNS = 'fare_amount,dayofweek,hourofday,pickuplon,pickuplat,dropofflon,dropofflat,passengers,key'.split(',')
result = copy.deepcopy(rowdict)
result['dayofweek'] = days[result['dayofweek']]
return ','.join([str(result[k]) for k in CSV_COLUMNS])
def preprocess(EVERY_N, RUNNER):
job_name = 'preprocess-taxifeatures' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')
print 'Launching Dataflow job {} ... hang on'.format(job_name)
OUTPUT_DIR = 'gs://{0}/taxifare/ch4/taxi_preproc/'.format(BUCKET)
options = {
'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),
'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),
'job_name': 'preprocess-taxifeatures' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S'),
'project': PROJECT,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline(RUNNER, options=opts)
for n, step in enumerate(['train', 'valid']):
query = create_query(n+1, EVERY_N)
outfile = os.path.join(OUTPUT_DIR, '{}.csv'.format(step))
(
p | 'read_{}'.format(step) >> beam.io.Read(beam.io.BigQuerySource(query=query))
| 'tocsv_{}'.format(step) >> beam.Map(to_csv)
| 'write_{}'.format(step) >> beam.io.Write(beam.io.WriteToText(outfile))
)
p.run()
# + deletable=true editable=true
# Change as needed
preprocess(50*100000, 'DirectRunner') # runs locally
#preprocess(50*1000, 'DataflowRunner') # runs on cloud; change first arg to None to preprocess full dataset
# + deletable=true editable=true
# %bash
gsutil ls -l gs://$BUCKET/taxifare/ch4/taxi_preproc/
# + deletable=true editable=true
# %bash
gsutil cat "gs://$BUCKET/taxifare/ch4/taxi_preproc/train.csv-00000-of-*" | head
# + [markdown] deletable=true editable=true
# <h2> 4. Develop model with new inputs </h2>
#
# Download a fraction of the preprocessed data to enable local development.
# + deletable=true editable=true
# %bash
# mkdir sample
gsutil cp "gs://$BUCKET/taxifare/ch4/taxi_preproc/train.csv-00000-of-*" sample/train.csv
gsutil cp "gs://$BUCKET/taxifare/ch4/taxi_preproc/valid.csv-00000-of-*" sample/valid.csv
# + [markdown] deletable=true editable=true
# We have two new inputs in the INPUT_COLUMNS, three engineered features, and the estimator involves bucketization and feature crosses.
# + deletable=true editable=true
# !grep -A 20 "INPUT_COLUMNS =" taxifare/trainer/model.py
# + deletable=true editable=true
# !grep -A 50 "build_estimator" taxifare/trainer/model.py
# + deletable=true editable=true
# !grep -A 15 "add_engineered(" taxifare/trainer/model.py
# + [markdown] deletable=true editable=true
# Try out the new model on the local sample to make sure it works fine.
# + deletable=true editable=true
# %bash
# rm -rf taxifare.tar.gz taxi_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare
python -m trainer.task \
--train_data_paths="${PWD}/sample/train*" \
--eval_data_paths=${PWD}/sample/valid.csv \
--output_dir=${PWD}/taxi_trained \
--train_steps=1000 \
--job-dir=/tmp
# + deletable=true editable=true
# !ls taxi_trained/export/exporter/
# + deletable=true editable=true
# %writefile /tmp/test.json
{"dayofweek": "Sun", "hourofday": 17, "pickuplon": -73.885262, "pickuplat": 40.773008, "dropofflon": -73.987232, "dropofflat": 40.732403, "passengers": 2}
# + deletable=true editable=true
# %bash
model_dir=$(ls ${PWD}/taxi_trained/export/exporter)
gcloud ml-engine local predict \
--model-dir=${PWD}/taxi_trained/export/exporter/${model_dir} \
--json-instances=/tmp/test.json
# + [markdown] deletable=true editable=true
# <h2> 5. Train on cloud </h2>
#
# + deletable=true editable=true
# might need this ...
# # !gcloud --quiet components update
# + deletable=true editable=true language="bash"
# OUTDIR=gs://${BUCKET}/taxifare/ch4/taxi_trained
# JOBNAME=lab4a_$(date -u +%y%m%d_%H%M%S)
# echo $OUTDIR $REGION $JOBNAME
# gsutil -m rm -rf $OUTDIR
# gcloud ml-engine jobs submit training $JOBNAME \
# --region=$REGION \
# --module-name=trainer.task \
# --package-path=${PWD}/taxifare/trainer \
# --job-dir=$OUTDIR \
# --staging-bucket=gs://$BUCKET \
# --scale-tier=BASIC \
# --runtime-version=1.4 \
# -- \
# --train_data_paths="gs://$BUCKET/taxifare/ch4/taxi_preproc/train*" \
# --eval_data_paths="gs://${BUCKET}/taxifare/ch4/taxi_preproc/valid*" \
# --train_steps=5000 \
# --output_dir=$OUTDIR
# + [markdown] deletable=true editable=true
# The RMSE is now 8.33249, an improvement over the 9.3 that we were getting ... of course, we won't know until we train/validate on a larger dataset. Still, this is promising. But before we do that, let's do hyper-parameter tuning.
# + [markdown] deletable=true editable=true
# <h2> 6. Hyper-parameter tune </h2>
#
# Look at <a href="hyperparam.ipynb">hyper-parameter tuning notebook</a> to decide what parameters to use for model. Based on that run, I ended up choosing:
# <ol>
# <li> train_batch_size: 512 </li>
# <li> nbuckets: 16 </li>
# <li> hidden_units: "64 64 64 8" </li>
# </ol>
#
# This gives an RMSE of 5, a considerable improvement from the 8.3 we were getting earlier ... Let's try this over a larger dataset.
# + [markdown] deletable=true editable=true
# <h1> Run Cloud training on 2 million row dataset </h1>
#
# This run uses as input 2 million rows and takes ~20 minutes with 10 workers (STANDARD_1 pricing tier). The model is exactly the same as above. The only changes are to the input (to use the larger dataset) and to the Cloud MLE tier (to use STANDARD_1 instead of BASIC -- STANDARD_1 is approximately 10x more powerful than BASIC). Because the Dataflow preprocessing takes about 15 minutes, we train here using CSV files in a public bucket.
#
# When doing distributed training, use train_steps instead of num_epochs. The distributed workers don't know how many rows there are, but we can calculate train_steps = num_rows \* num_epochs / train_batch_size. In this case, we have 2141023 * 100 / 512 = 418168 train steps.
# + deletable=true editable=true language="bash"
#
# WARNING -- this uses significant resources and is optional. Remove this line to run the block.
#
# OUTDIR=gs://${BUCKET}/taxifare/feateng2m
# JOBNAME=lab4a_$(date -u +%y%m%d_%H%M%S)
# TIER=STANDARD_1
# echo $OUTDIR $REGION $JOBNAME
# gsutil -m rm -rf $OUTDIR
# gcloud ml-engine jobs submit training $JOBNAME \
# --region=$REGION \
# --module-name=trainer.task \
# --package-path=${PWD}/taxifare/trainer \
# --job-dir=$OUTDIR \
# --staging-bucket=gs://$BUCKET \
# --scale-tier=$TIER \
# --runtime-version=1.4 \
# -- \
# --train_data_paths="gs://cloud-training-demos/taxifare/train*" \
# --eval_data_paths="gs://cloud-training-demos/taxifare/valid*" \
# --output_dir=$OUTDIR \
# --train_steps=418168 \
# --train_batch_size=512 --nbuckets=16 --hidden_units="64 64 64 8"
# + deletable=true editable=true
from google.datalab.ml import TensorBoard
OUTDIR='gs://{0}/taxifare/feateng2m'.format(BUCKET)
print OUTDIR
TensorBoard().start(OUTDIR)
# + [markdown] deletable=true editable=true
# The RMSE after training on the 2-million-row dataset is \$3.03. This graph shows the improvements so far ...
# + deletable=true editable=true
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
df = pd.DataFrame({'Lab' : pd.Series(['1a', '2-3', '4a', '4b', '4c']),
'Method' : pd.Series(['Heuristic Benchmark', 'tf.learn', '+Feature Eng.', '+ Hyperparam', '+ 2m rows']),
'RMSE': pd.Series([8.026, 9.4, 8.3, 5.0, 3.03]) })
ax = sns.barplot(data = df, x = 'Method', y = 'RMSE')
ax.set_ylabel('RMSE (dollars)')
ax.set_xlabel('Labs/Methods')
plt.plot(np.linspace(-20, 120, 1000), [5] * 1000, 'b');
# + deletable=true editable=true
# %bash
gsutil -m mv gs://${BUCKET}/taxifare/ch4/ gs://${BUCKET}/taxifare/ch4_1m/
# + [markdown] deletable=true editable=true
# Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
courses/machine_learning/feateng/feateng.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/HKang42/DS-Unit-2-Kaggle-Challenge/blob/master/module3-cross-validation/Harrison_Kang_LS_DS_223_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="k1rzIhL0UEtN" colab_type="text"
# Lambda School Data Science
#
# *Unit 2, Sprint 2, Module 3*
#
# ---
# + [markdown] id="jbw6zTBgUEtS" colab_type="text"
# # Cross-Validation
#
#
# ## Assignment
# - [ ] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.
# - [ ] Continue to participate in our Kaggle challenge.
# - [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.
# - [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)
# - [ ] Commit your notebook to your fork of the GitHub repo.
#
#
# You won't be able to just copy from the lesson notebook to this assignment.
#
# - Because the lesson was ***regression***, but the assignment is ***classification.***
# - Because the lesson used [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html), which doesn't work as-is for _multi-class_ classification.
#
# So you will have to adapt the example, which is good real-world practice.
#
# 1. Use a model for classification, such as [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
# 2. Use hyperparameters that match the classifier, such as `randomforestclassifier__ ...`
# 3. Use a metric for classification, such as [`scoring='accuracy'`](https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values)
# 4. If you’re doing a multi-class classification problem — such as whether a waterpump is functional, functional needs repair, or nonfunctional — then use a categorical encoding that works for multi-class classification, such as [OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html) (not [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html))
#
#
#
# ## Stretch Goals
#
# ### Reading
# - <NAME>, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation
# - <NAME>, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)
# - <NAME>, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation
# - <NAME>, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)
# - <NAME>, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85)
#
# ### Doing
# - Add your own stretch goals!
# - Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). See the previous assignment notebook for details.
# - In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.
# - _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:
#
# > You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...
#
# The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines?
#
# + [markdown] id="xuBxeefeUEtT" colab_type="text"
# ### BONUS: Stacking!
#
# Here's some code you can use to "stack" multiple submissions, which is another form of ensembling:
#
# ```python
# import pandas as pd
#
# # Filenames of your submissions you want to ensemble
# files = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']
#
# target = 'status_group'
# submissions = (pd.read_csv(file)[[target]] for file in files)
# ensemble = pd.concat(submissions, axis='columns')
# majority_vote = ensemble.mode(axis='columns')[0]
#
# sample_submission = pd.read_csv('sample_submission.csv')
# submission = sample_submission.copy()
# submission[target] = majority_vote
# submission.to_csv('my-ultimate-ensemble-submission.csv', index=False)
# ```
# + id="2AC9g9_jUEtU" colab_type="code" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# + id="KHjl_MQvUEtZ" colab_type="code" colab={}
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# + [markdown] id="SWq_B0ZxXbq3" colab_type="text"
# # Code from previous assignment (clean data, split, build model)
# + id="ZhP8L7zRXjGO" colab_type="code" colab={}
import numpy as np
import datetime as dt
def wrangler(df):
df = df.copy()
# fix latitude
df['latitude'] = df['latitude'].replace(-2e-08, 0)
# replace zeroes with NaN's for appropriate columns
zero_cols = ['longitude', 'latitude', 'gps_height', 'population', 'construction_year']
for col in zero_cols:
df[col] = df[col].replace(0, np.NaN)
# drop duplicate columns
df = df.drop(columns = ['quantity_group', 'payment_type'])
# drop columns with useless information
df = df.drop(columns = ['id','recorded_by'])
# convert date_recorded to datetime
df['date_recorded'] = pd.to_datetime (df['date_recorded'], infer_datetime_format=True)
# replace date_recorded with an ordinal version
# trying to use non-ordinal date-time will raise an invalid promotion error for
# the random forest classifier
df['date_recorded_ord'] = df['date_recorded'].apply(dt.datetime.toordinal)
df = df.drop(columns = ['date_recorded'])
return df
# + id="1iJDNC4Lev_Y" colab_type="code" colab={}
# re-initialize the train and test data sets so we don't get errors if we need to re-run
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
# apply function to our data sets
train = wrangler(train)
test = wrangler(test)
# + [markdown] id="zuEsXx0YXRX0" colab_type="text"
# # Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.
# + id="GEBrWgAam3Qv" colab_type="code" colab={}
# set target and features
target = 'status_group'
features = train.columns.drop([target])
xtrain = train[features]
ytrain = train[target]
# + id="a5BfQe1ZUEtd" colab_type="code" colab={}
# Build model
import category_encoders as ce
import numpy as np
#from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
pipeline = make_pipeline(ce.OrdinalEncoder(), \
SimpleImputer(), \
RandomForestClassifier( random_state=42, n_jobs=-1) )
# + id="ZEI-GI8vo5aH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4483fc34-301d-49b8-a68a-f6127f36259d"
from sklearn.model_selection import cross_val_score
k = 3
scores = cross_val_score(pipeline, xtrain, ytrain, cv=k, scoring = 'accuracy')
'''
import sklearn
sorted(sklearn.metrics.SCORERS.keys())
'''
print(f'Accuracy for {k} folds:', scores)
# + id="tz-znLWapQoO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="c09767f7-144a-4313-b291-3b1d0d042a3e"
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from scipy.stats import randint, uniform
param_distributions = {
# Remove this hyperparameter, because of an issue:
# https://github.com/scikit-learn-contrib/categorical-encoding/issues/184
# 'targetencoder__smoothing': uniform(1, 1000),
'simpleimputer__strategy': ['mean', 'median'],
'randomforestclassifier__max_leaf_nodes': randint(5, 20),
'randomforestclassifier__min_samples_leaf': [5, 10, 15, 20, None],
'randomforestclassifier__min_samples_split': [1,2,3,4,5],
}
# If you're on Colab, decrease n_iter & cv parameters
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=10,
cv=3,
scoring='accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(xtrain, ytrain);
# + id="IvlRKqoC4f5h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="e57ac74f-3a60-4265-ad6e-a2f0beba8274"
print('Best hyperparameters', search.best_params_)
print('Cross-validation Accuracy', search.best_score_)
|
module3-cross-validation/Harrison_Kang_LS_DS_223_assignment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# +
# -*- coding: utf-8 -*-
from __future__ import division
import logging
import os
import xml.etree.ElementTree as ET
from senpy.plugins import EmotionPlugin, SenpyPlugin
from senpy.models import Results, EmotionSet, Entry, Emotion
logger = logging.getLogger(__name__)
# my packages
import codecs, csv, re, nltk
import numpy as np
import math, itertools
from drevicko.twitter_regexes import cleanString, setupRegexes, tweetPreprocessor
import preprocess_twitter
from collections import defaultdict
from stop_words import get_stop_words
from sklearn.feature_extraction.text import CountVectorizer
# from sklearn.externals import joblib
# from sklearn.svm import SVC, SVR
from nltk.tokenize import TweetTokenizer
import nltk.tokenize.casual as casual
import gzip
from datetime import datetime
import random
os.environ['KERAS_BACKEND']='theano'
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.models import load_model, model_from_json
class fivePointRegression(EmotionPlugin):
def __init__(self, info, *args, **kwargs):
super(fivePointRegression, self).__init__(info, *args, **kwargs)
self.name = info['name']
self.id = info['module']
self._info = info
local_path = os.path.dirname(os.path.abspath(__file__))
self._categories = {'sadness':[],
'disgust':[],
'surprise':[],
'anger':[],
'fear':[],
'joy':[]}
self._wnaffect_mappings = {'sadness':'sadness',
'disgust':'disgust',
'surprise':'surprise',
'anger':'anger',
'fear':'fear',
'joy':'joy'}
self._vad_mappings = {'confident':'D',
'excited':'A',
'happy':'V',
'surprised':'S'}
self._maxlen = 65
self._paths = {
"word_emb": "glove.twitter.27B.100d.txt",
"word_freq": 'wordFrequencies.dump',
"classifiers" : 'classifiers',
"ngramizers": 'ngramizers'
}
self._savedModelPath = local_path + "/classifiers/LSTM/fivePointRegression"
self._path_wordembeddings = os.path.dirname(local_path) + '/glove.twitter.27B.100d.txt.gz'
self._emoNames = ['confident','excited','happy', 'surprised']
# self._emoNames = ['sadness', 'disgust', 'surprise', 'anger', 'fear', 'joy']
# self._emoNames = ['anger','fear','joy','sadness']
self.centroids= {
"anger": {
"A": 6.95,
"D": 5.1,
"V": 2.7},
"disgust": {
"A": 5.3,
"D": 8.05,
"V": 2.7},
"fear": {
"A": 6.5,
"D": 3.6,
"V": 3.2},
"joy": {
"A": 7.22,
"D": 6.28,
"V": 8.6},
"sadness": {
"A": 5.21,
"D": 2.82,
"V": 2.21}
}
self.emotions_ontology = {
"anger": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#anger",
"disgust": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#disgust",
"fear": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#negative-fear",
"joy": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#joy",
"neutral": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#neutral-emotion",
"sadness": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#sadness"
}
self._centroid_mappings = {
"V": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence",
"A": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal",
"D": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance",
"S": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#surprise"
}
def activate(self, *args, **kwargs):
np.random.seed(1337)
st = datetime.now()
self._fivePointRegressionModel = self._load_model_and_weights(self._savedModelPath)
logger.info("{} {}".format(datetime.now() - st, "loaded _fivePointRegressionModel"))
st = datetime.now()
self._Dictionary, self._Indices = self._load_original_vectors(
filename = self._path_wordembeddings,
sep = ' ',
wordFrequencies = None,
zipped = True) # leave wordFrequencies=None for loading the entire WE file
logger.info("{} {}".format(datetime.now() - st, "loaded _wordEmbeddings"))
logger.info("fivePointRegression plugin is ready to go!")
def deactivate(self, *args, **kwargs):
try:
logger.info("fivePointRegression plugin is being deactivated...")
except Exception:
print("Exception in logger while reporting deactivation of fivePointRegression")
#MY FUNCTIONS
def _load_model_and_weights(self, filename):
with open(filename+'.json', 'r') as json_file:
loaded_model_json = json_file.read()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(filename+'.h5')
return loaded_model
def _lists_to_vectors(self, text):
train_sequences = [self._text_to_sequence(text)]
X = sequence.pad_sequences(train_sequences, maxlen=self._maxlen)
return X
def _text_to_sequence(self,text):
train_sequence = []
for token in text.split():
try:
train_sequence.append(self._Indices[token])
except:
train_sequence.append(0)
train_sequence.extend([0]*( self._maxlen-len(train_sequence)) )
return np.array(train_sequence)
def _text_preprocessor(self, text):
text = preprocess_twitter.tokenize(text)
text = casual.reduce_lengthening(text)
text = cleanString(setupRegexes('twitterProAna'),text)
text = ' '.join([span for notentity,span in tweetPreprocessor(text, ("urls", "users", "lists")) if notentity])
text = text.replace('\t','')
text = text.replace('< ','<').replace(' >','>')
text = text.replace('):', '<sadface>').replace('(:', '<smile>')
text = text.replace(" 't", "t")#.replace("#", "")
return ' '.join(text.split())
def tokenise_tweet(text):
text = preprocess_twitter.tokenize(text)
text = preprocess_tweet(text)
return ' '.join(text.split())
def _load_original_vectors(self, filename = 'glove.27B.100d.txt', sep = ' ', wordFrequencies = None, zipped = False):
def __read_file(f):
Dictionary, Indices = {},{}
i = 1
for line in f:
line_d = line.decode('utf-8').split(sep)
token = line_d[0]
token_vector = np.array(line_d[1:], dtype = 'float32')
if(wordFrequencies):
if(token in wordFrequencies):
Dictionary[token] = token_vector
Indices.update({token:i})
i+=1
else:
Dictionary[token] = token_vector
Indices.update({token:i})
i+=1
return(Dictionary, Indices)
if zipped:
with gzip.open(filename, 'rb') as f:
return(__read_file(f))
else:
with open(filename, 'rb') as f:
return(__read_file(f))
def _extract_features(self, X):
# if self._ESTIMATION == 'Probabilities':
# y_predict = np.array(self._fivePointRegressionModel.predict(X))[0]
# else:
# y_predict = np.array([self._blank[y_] for y_ in self._fivePointRegressionModel.predict_classes(X)][0])
y_predict = np.array(self._fivePointRegressionModel.predict(X))[0]
feature_set = {self._vad_mappings[emo]:float(y_) for emo, y_ in zip(self._emoNames, y_predict)}
return feature_set
# CONVERSION EKMAN TO VAD
def _backwards_conversion(self, original):
"""Find the closest category"""
dimensions = list(self.centroids.values())[0]
def distance(e1, e2):
return sum((e1[k] - e2.get(k, 0)) for k in dimensions)
distances = { state:distance(self.centroids[state], original) for state in self.centroids }
mindistance = max(distances.values())
for state in distances:
if distances[state] < mindistance:
mindistance = distances[state]
emotion = state
result = Emotion(onyx__hasEmotionCategory=emotion)
return result
def analyse(self, **params):
logger.debug("fivePointRegression LSTM Analysing with params {}".format(params))
st = datetime.now()
text_input = params.get("input", None)
text = self._text_preprocessor(text_input)
X = self._lists_to_vectors(text = text)
feature_text = self._extract_features(X = X)
response = Results()
entry = Entry()
entry.nif__isString = text_input
emotionSet = EmotionSet()
emotionSet.id = "Emotions"
emotion = Emotion()
for dimension in ["V","A","D","S"]:
# emotion[self._centroid_mappings[dimension]] = float((2+feature_text[dimension])*2.5)
emotion[dimension] = float(feature_text[dimension]*10)
emotionSet.onyx__hasEmotion.append(emotion)
# emotionSet.onyx__hasEmotion.append(self._backwards_conversion(emotion))
"""
for semeval
dimensions = list(self.centroids.values())[0]
def distance(e1, e2):
return sum((e1[k] - e2.get(k, 0)) for k in dimensions)
distances = { state:distance(self.centroids[state], emotion) for state in self.centroids }
mindistance = max(distances.values())
dummyfix = sorted(distances.values(),reverse=True)
for state in distances:
if state != 'joy':
if distances[state] in dummyfix[0:3]:
emotionSet.onyx__hasEmotion.append(
Emotion(
onyx__hasEmotionCategory = state,
onyx__hasEmotionIntensity = int(1)))
else:
emotionSet.onyx__hasEmotion.append(
Emotion(
onyx__hasEmotionCategory = state,
onyx__hasEmotionIntensity = int(0)))
emotionSet.onyx__hasEmotion.append(
Emotion(
onyx__hasEmotionCategory = 'surprise',
onyx__hasEmotionIntensity = float((2+feature_text['S'])/4)))
emotionSet.onyx__hasEmotion.append(
Emotion(
onyx__hasEmotionCategory = 'joy',
onyx__hasEmotionIntensity = float((2+feature_text['V'])/4)))
emotionSet.prov__wasGeneratedBy = self.id
for semeval
"""
entry.emotions = [emotionSet,]
response.entries.append(entry)
return response
# +
# centroids= {
# "anger": {
# "A": 6.95,
# "D": 5.1,
# "V": 2.7},
# "disgust": {
# "A": 5.3,
# "D": 8.05,
# "V": 2.7},
# "fear": {
# "A": 6.5,
# "D": 3.6,
# "V": 3.2},
# "joy": {
# "A": 7.22,
# "D": 6.28,
# "V": 8.6},
# "sadness": {
# "A": 5.21,
# "D": 2.82,
# "V": 2.21}
# }
# +
# def _backwards_conversion(original):
# """Find the closest category"""
# dimensions = list(centroids.values())[0]
# def distance(e1, e2):
# return sum((e1[k] - e2.get(k, 0)) for k in dimensions)
# def _vectors_similarity(v1 , v2):
# return( 1 - spatial.distance.cosine(v1,v2) )
# distances = { state:abs(distance(centroids[state], original)) for state in centroids }
# print(np.array(centroids['anger'].values()))
# distances2 = {state:_vectors_similarity(centroids[state].values() , feature_text.values()) for state in centroids}
# mindistance = max(distances.values())
# print(distances)
# print(distances2)
# for state in distances:
# if distances[state] < mindistance:
# mindistance = distances[state]
# emotion = state
# result = Emotion(onyx__hasEmotionCategory=emotion, onyx__hasEmotionIntensity=emotion)
# return result
# feature_text = {
# "A":5.9574053436517715,
# "D":6.3352929055690765,
# "V":2.9072564840316772
# }
# import numpy as np
# from senpy.models import Emotion
# from scipy import spatial
# emotion = Emotion()
# for dimension in ["V","A","D"]:
# emotion[dimension] = float((feature_text[dimension]))
# _backwards_conversion(emotion)
# +
# for state in centroids:
# # print(centroids[state])
# # print([i for i in feature_text.values()])
# # print(([i for i in centroids[state].values()]))
# print(state)
# print(_vectors_similarity(
# [i for i in feature_text.values()],
# [i for i in centroids[state].values()]))
|
fivePointRegression/fivePointRegression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# %matplotlib inline
import numpy as np
import bettermoments as bm
import matplotlib.pyplot as plt
from imgcube.cube import imagecube
from mpl_toolkits.axes_grid1 import AxesGrid
from matplotlib.ticker import MultipleLocator
import matplotlib.cm as cm
# read in the data and build the velocity axis
data, x0, dx, pix, uncertainty = bm.data_from_fits('HD135344B_13CO.fits', nchan=3)
axis = (np.arange(-data.shape[1] // 2, data.shape[1] // 2) + 0.5) * pix
velax = np.arange(data.shape[0]) * dx + x0
# calculate contour of 3% of peak integrated intensity for plotting
# using a 3 sigma clip.
rms = np.nanstd([data[:3], data[-3:]])
clip = 3. * rms
zeroth = np.sum(np.where(np.abs(data) >= clip, data, 0.0), axis=0)
zeroth /= np.max(zeroth)
# calculate the intensity weighted average, clipping at 2 sigma.
# include some smaller scatter as weights can't sum to zero.
clip = 2. * rms
weights = np.where(np.abs(data) >= clip, data, 1e-10 * np.random.randn(data.size).reshape(data.shape))
velocities = velax[:, None, None] * np.ones(data.shape)
v_first = np.average(velocities, weights=weights, axis=0)
# calculate the velocity of the peak pixels.
v_ninth = np.take(velax, np.argmax(data, axis=0))
# calculate the velocity with better moments.
v_quadratic = bm.quadratic(data, axis=0, x0=velax[0], dx=np.diff(velax).mean())[0]
# +
# make the plot.
fig = plt.figure(figsize=(6.0, 2.5))
axs = AxesGrid(fig, 111, nrows_ncols=(1, 3), axes_pad=0.1, share_all=True,
label_mode="L", cbar_location="right", cbar_mode="single", cbar_pad=0.1)
labels = [r'${\rm First \,\, Moment \,\, (2\sigma \,\, Clip)}$',
r'${\rm Ninth \,\, Moment}$',
'Quadratic']
for ax, mom, label in zip(axs, [v_first, v_ninth, v_quadratic], labels):
im = ax.contourf(axis[::-1] - 0.1, axis - 0.1, np.where(zeroth >= 0.05, mom / 1e3, np.nan),
levels=np.linspace(4.8, 9.2, 50), cmap=cm.RdBu)
for c in im.collections:
c.set_edgecolor("face")
ax.set_aspect(1)
ax.set_xlim(1.2, -1.2)
ax.set_ylim(-1.2, 1.2)
ax.xaxis.set_major_locator(MultipleLocator(1.0))
ax.yaxis.set_major_locator(MultipleLocator(1.0))
ax.set_xlabel(r'Offset (arcsec)')
ax.set_ylabel(r'Offset (arcsec)')
ax.set_title(label, fontsize=7)
cb = axs.cbar_axes[0].colorbar(im)
cb.solids.set_edgecolor("face")
axs.cbar_axes[0].set_ylabel(r'${\rm Velocity \quad (km\,s^{-1})}$', rotation=270., labelpad=13)
plt.tight_layout()
#plt.savefig('moment_comparison.pdf', bbox_inches='tight')
# -
|
docs/_static/notebooks/moment-comparison.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reset -f
# +
import os
import warnings
import pickle as pkl
from joblib import dump, load
from time import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.linear_model import LogisticRegression,Lasso
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB,GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.metrics import *
from sklearn.model_selection import (KFold, ParameterGrid, RandomizedSearchCV,
cross_val_score, train_test_split)
from sklearn.preprocessing import Imputer, StandardScaler
from xgboost import XGBClassifier
# %matplotlib inline
warnings.filterwarnings('ignore')
pd.options.display.float_format = '{:,.2f}'.format
# -
cwd = os.getcwd()
data_path = cwd + "../data/raw/"
models_path = cwd + "../models/"
results_path = cwd + "../results/"
os.chdir(data_path)
# +
na_vals = ["", "?", "-", "None", "NA"]
train_vals = pd.read_csv("TrainingSetValues.csv", sep=",", na_values=na_vals)
train_labels = pd.read_csv("TrainingSetLabels.csv", sep=",", na_values=na_vals)
test = pd.read_csv("TestSetValues.csv", sep=",", na_values=na_vals)
# -
test_id = test['id']
train_vals.head(3)
train_labels.head(3)
test.head(3)
train_vals.info()
train_labels.info()
train = pd.merge(train_vals,train_labels,left_on="id",right_on="id")
train.info()
del train_labels
del train_vals
train.drop('id',axis=1,inplace=True)
def dropCols(data,cols,inplace_param = True):
existing_cols = data.columns.intersection(cols)
return data.drop(existing_cols,axis=1,inplace= inplace_param)
dropCols(train,['id'])
dropCols(test,['id'])
train.columns
cat_var = train.dtypes[train.dtypes == 'object'].index
cat_var = cat_var.union(['district_code','region_code','construction_year'])
print("Categorical variables: %d"%len(cat_var),"\n",cat_var)
num_var = train.columns.difference(cat_var)
print("Numerical variables:%d"%len(num_var),"\n",num_var)
train[cat_var] = train[cat_var].apply(lambda x: x.astype('category'))
train.dtypes
# +
#water_quality can be ordinal
#construction_year, date_recorded datetime type
# -
# ### Exploratory Data Analysis
# + run_control={"marked": false}
#train_bkp = train.copy(deep=True)
#test_bkp = test.copy(deep=True)
# -
train.describe(include='category').T.sort_values(by='unique',ascending=False)
np.unique(train[['extraction_type','extraction_type_group']].values)
pd.unique(train[['extraction_type','extraction_type_group']].values.ravel())
train.columns
train.groupby(["status_group"]).mean()
train.groupby(["status_group"]).agg(['min','max','mean','count'])
# +
#same as above. kept just for reference of syntax
#train.groupby(["status_group"])['amount_tsh','gps_height','num_private','population'].agg(['min','max','mean','count'])
# -
pd.pivot_table(data=train,index='status_group',columns=['source'],values=['amount_tsh'])
# #### dams,river,shallowwell,spring which have more water are functional
# #### handdtw,machinedbh,unknown source have more water but most of them need to be repaired
# #### lakes have more water but are not functional
pd.pivot_table(data=train,index='status_group',columns=['source'],values=['population'],aggfunc=np.sum)
# #### dams,machine dbh,rainwater harvesting, river,shallow well,spring which have more population are functional
# #### lake,unknown sources which have more population are non-functional
# ### Preprocessing
rand_num = 3423
target_var = 'status_group'
y = train[target_var]
X = train.drop(target_var, axis=1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=rand_num)
del train
del X
del y
print(X_train.shape)
print(X_test.shape)
# +
cols_to_drop = ['longitude','latitude','recorded_by','scheme_name','ward',
'subvillage','wpt_name','date_recorded','funder','installer',
'lga','construction_year','region_code','district_code',
'management','scheme_management','extraction_type',
'extraction_type_group','region',
'quantity_group','source_type','source_class','payment_type',
'quantity_group','waterpoint_type','water_quality']
dropCols(X_train,cols_to_drop)
dropCols(X_test,cols_to_drop)
dropCols(test,cols_to_drop)
# -
print(X_train.describe().T)
print(X_train.describe(include=['category']).T)
X_train['source'].dtype
X_train['source'] = X_train['source'].str.replace('unknown','other')
X_test['source'] = X_test['source'].str.replace('unknown','other')
test['source'] = test['source'].str.replace('unknown','other')
print(X_train.source.unique())
X_train['management_group'] = X_train['management_group'].str.replace('unknown','other')
X_test['management_group'] = X_test['management_group'].str.replace('unknown','other')
test['management_group'] = test['management_group'].str.replace('unknown','other')
print(X_train.management_group.unique())
X_train['payment'] = X_train['payment'].str.replace('unknown','other')
X_test['payment'] = X_test['payment'].str.replace('unknown','other')
test['payment'] = test['payment'].str.replace('unknown','other')
print(X_train.payment.unique())
new_cat_var = X_train.dtypes[X_train.dtypes == 'object'].index
print(new_cat_var)
print(X_train.info())
X_train[new_cat_var] = X_train[new_cat_var].apply(lambda x: x.astype('category'))
X_test[new_cat_var] = X_test[new_cat_var].apply(lambda x: x.astype('category'))
test[new_cat_var] = test[new_cat_var].apply(lambda x: x.astype('category'))
# #### Imputation
X_train[['public_meeting','permit']].info()
X_train['public_meeting'].fillna(X_train['public_meeting'].value_counts().index[0],inplace=True)
X_test['public_meeting'].fillna(X_test['public_meeting'].value_counts().index[0],inplace=True)
test['public_meeting'].fillna(test['public_meeting'].value_counts().index[0],inplace=True)
X_train['permit'].fillna(X_train['permit'].value_counts().index[0],inplace=True)
X_test['permit'].fillna(X_test['permit'].value_counts().index[0],inplace=True)
test['permit'].fillna(test['permit'].value_counts().index[0],inplace=True)
print(X_train.isna().sum())
# #### Standardize the numerical variables
new_num_var = X_train.dtypes[X_train.dtypes != 'category'].index
new_cat_var = X_train.dtypes[X_train.dtypes == 'category'].index
print(X_train[new_num_var].info())
scaler = StandardScaler()
X_train[new_num_var] = scaler.fit_transform(X_train[new_num_var])
X_test[new_num_var] = scaler.transform(X_test[new_num_var])
test[new_num_var] = scaler.transform(test[new_num_var])
print(X_train[new_num_var].describe())
# #### Dummy code Categorical variables
#X_train = pd.get_dummies(X_train,drop_first=True,prefix='d',prefix_sep='_')
X_train = pd.get_dummies(X_train,drop_first=True)
X_test = pd.get_dummies(X_test,drop_first=True)
test = pd.get_dummies(test,drop_first=True)
X_train.info()
# #### Build Models
metric = 'accuracy'
models = []
model_names = []
scores = []
train_preds = None
val_preds = None
# +
#main method to fit a model,do predictions and print metrics
def predictModelMetrics(model,model_name):
train_preds,val_preds = resetVar()
train_preds,val_preds = doPredict(model,model_name)
printMetrics(model,train_preds,val_preds)
#reset prediction variables before running next model
def resetVar():
train_preds = None
val_preds = None
return train_preds,val_preds
#make predictions
def doPredict(model,model_name):
model.fit(X_train,y_train)
train_preds = model.predict(X_train)
val_preds = model.predict(X_test)
models.append(model_name)
return train_preds,val_preds
#prints and captures metrics
def printMetrics(model,train_preds,val_preds):
print("Train Accuracy: %.2f"%accuracy_score(y_train,train_preds))
#k-fold with k=5
val_scores = cross_val_score(model,X_test,y_test, cv=5, scoring=metric)
cv_score = val_scores.mean()
print("Cross Validation Accuracy: %.2f" %cv_score)
print("-------------------------------------")
print("Confusion Matrix:")
print(pd.crosstab(y_test,val_preds,rownames=['Actual'],colnames=['Predicted'],margins=True))
#print(classification_report(y_val,val_preds))
scores.append(round(cv_score,2))
#print summary of model accuracies
def summarizeResults():
results = pd.DataFrame({"Model":models, "Score":scores})
print(results)
def loadModel(model_name):
try:
model = load(models_path + model_name + ".joblib")
return model
except:
return None
def saveModel(model,model_name):
dump(model,models_path + modelName + ".joblib")
# -
modelName = "LogisticRegression"
lr = LogisticRegression(multi_class='ovr')
predictModelMetrics(lr,modelName)
modelName = "LinearSVC"
svc = LinearSVC(C=0.01)
predictModelMetrics(svc,modelName)
acc_scores = []
rng = range(2,20)
for i in rng:
knn = KNeighborsClassifier(i)
knn.fit(X_train,y_train)
t_preds = knn.predict(X_train)
acc_scores.append(accuracy_score(y_train,t_preds))
plt.figure(figsize=(12,6))
plt.plot(rng,acc_scores,marker="o")
plt.xlabel("N")
plt.ylabel("Accuracy Score")
plt.suptitle("N vs Accuracy Score")
plt.show()
modelName = "KNN5"
knn = KNeighborsClassifier(n_neighbors=2)
predictModelMetrics(knn,modelName)
modelName = "RandomForestClassifier"
rfc = loadModel(modelName)
if(rfc == None):
rfc = RandomForestClassifier(n_estimators=500,max_depth=15,random_state=rand_num)
predictModelMetrics(rfc,modelName)
saveModel(rfc,modelName)
modelName = "XGBoost_Classifier"
xgbc = loadModel(modelName)
if(xgbc == None):
xgbc = XGBClassifier(max_depth=10,learning_rate= 0.01,n_estimators= 300,random_state=rand_num)
predictModelMetrics(xgbc,modelName)
saveModel(xgbc,modelName)
test_preds = rfc.predict(test)
result = pd.DataFrame({'id':test_id,'status_group':test_preds})
result.to_csv(results_path + "Submission.csv",index=False)
|
notebooks/DrivenData_PumpItUp.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# **420-A58-SF - Algorithmes d'apprentissage non supervisé - Été 2021 - Spécialisation technique en Intelligence Artificielle**<br/>
# MIT License - Copyright (c) 2021 <NAME>
# <br/>
# 
# <br/>
# **Objectif: Séance d'exercices sur les itemsets fréquents et règles d'association**
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# ## Exercice 1
# Pour cet exercice, nous considérons 100 articles (items), numérotés de 1 à 100, ainsi que 100 paniers (baskets), également numérotés de 1 à 100. L'article *i* est placé dans le panier *b* si et seulement si *i* divise *b* sans reste.
# Ainsi, l'article 1 est dans tous les paniers, l'article 2 est dans les 50 paniers ayant un numéro pair, etc .... Le panier 12 contient les articles { 1, 2, 3, 4, 6, 12 }, puisque'il s'agit de tous les entiers divisant 12.
# ### Q1.1: Si s = 5, quels articles sont fréquents ?
# +
# --- Votre réponse ---
# -
# ### Q1.2: Si s = 5, quelles paires d'articles sont fréquentes ?
# +
# --- Votre réponse ---
# -
# ### Q1.3: Quelle est la somme des tailles de tous les paniers ?
# +
# --- Votre réponse ---
# -
# ### Q1.4: Quel(s) panier(s) est/sont le plus grand ?
# +
# --- Votre réponse ---
# -
# ### Q1.5: Quelle sont les confiances des règles d'association {5,7}-> 2 et {2,3,4} -> 5 ?
# +
# --- Votre réponse ---
# -
# ## Exercice 2 (optionnel)
# Nous considérons 100 éléments (items), numérotés de 1 à 100, ainsi que 100 paniers (baskets), également numérotés de 1 à 100. L'item *i* est placé dans le basket *b* **si et seulement si *b* divise *i* sans reste.**
# par exemple, le basket 12 contient les items {12,24,36,48,60,72,84,96}.
# ### Q2.1: Si s = 5, quels items sont fréquents ?
# +
# --- Votre réponse ---
# -
# ### Q2.2: Si s = 5, quelles paires d'items sont fréquentes ?
# +
# --- Votre réponse ---
# -
# ### Q2.3: Quelle est la somme des tailles de tous les baskets ?
# +
# --- Votre réponse ---
# -
# ### Q2.4: Quelles sont les confiances des règles d'association {24,60}-> 8 et {2,3,4} -> 5 ?
# +
# --- Votre réponse ---
# -
# ## Exercice 3 (optionnel)
# Considérons un jeu de données pour lequel rien d'intéressant ne peut être appris à propos des itemsets fréquents, car il n'existe aucun ensemble d'items corrélés. Supposons que les items soient numérotés de 1 à 10 et que chaque basket soit **indépendamment** construit en incluant l'item *i* avec une probabilité *1/i*.
# Ainsi, tous les baskets contiennent l'item 1, la moitié des baskets contiennent l'item 2, etc...
# Soit s = 1% du nombre de baskets. Quels sont les itemsets fréquents ?
# +
# --- Votre réponse ---
# -
# ## Fin du TP
|
nbs/02-04-regles-association/02-04-tp.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Assignment
#
# #### Data Description
# - covid data of daily cummulative cases of India as reported from January 2020 to 8th August 2020
# - Source: https://www.kaggle.com/sudalairajkumar/covid19-in-india
#
# #### Conduct below Insight investigation
# 1. Find which state has highest mean of cummulative confirmed cases since reported from Jan 2020
# <br>- Plot line graph plotting means of top 10 States having highest daily confirmed cases
# 2. Which state has highest Death Rate for the month of June, July & Aug
# <br> - Plot bar graph of Death Rates for the top 5 states
# 3. Explore Trend in Confirmed Cases for the state of Maharashtra
# <br> - Plot line graph with x axis as Date column and y axis as daily confirmed cases. - such a graph is also called
# <br> as Time Series Plot
#
# #### Below key steps to be adopted to solve above Questions
# - Load Data --> Clean data / Data munging --> Grouping of Data by State --> Exploration using plots
# - update required code where everywhere '?' is mentioned in the code block
# #### Load Packages
import pandas as pd # for cleaning and loading data from csv file
import numpy as np
from matplotlib import pyplot as plt # package for plotting graphs
import datetime
import seaborn as sns; sns.set(color_codes=True)
# %matplotlib inline
# #### Load data
df = pd.read_csv("covid_19_india.csv")
df.head() # Preview first 5 rows of dataframe
# Convert Date column which is a string into datetime object
df["Date"] = pd.to_datetime(df["Date"], format = "%d/%m/%y")
df.head()
# Explore more about datetime conversion of column in pandas here:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html?highlight=datetime#pandas.to_datetime
# #### Cleaning of data
# - The dataset consists of cummulative values, aim is to create columns with daily reported deaths and confirmed cases.
# - Below method is helper function to create column consisting of daily cases reported from Cummulative freq column
ex = np.unique(df['State/UnionTerritory'])
ex
# From above unique values of states it is clear that Telangana is represented in multiple ways. We will change each occurrence of Telangna state with standard spelling
def clean_stateName(stateName):
if stateName == 'Telangana***':
stateName = 'Telangana'
elif stateName == 'Telengana':
stateName = 'Telangana'
elif stateName == 'Telengana***':
stateName = 'Telangana'
return stateName
def clean_stateName(stateName):
if stateName == 'Dadar Nagar Haveli':
stateName = 'Dadra and Nagar Haveli and Daman and Diu'
elif stateName == 'Daman & Diu':
stateName = 'Dadra and Nagar Haveli and Daman and Diu'
return stateName
# - Apply method is used to apply either user defined or builtin function across every cell of dataframe
# - Commonly lambda function is used to apply method across each cell
# - A lambda function is a small anonymous function.
# - A lambda function can take any number of arguments, but can only have one expression.
df["State/UnionTerritory"] = df["State/UnionTerritory"].apply(lambda x: clean_stateName(x))
np.unique(df["State/UnionTerritory"]) # to identify all unique values in a column of dataframe or array
# Helper function to find daily confirmed cases from cumulative values
def daily_cases(dframe, stateColumn,dateColumn, cummColumn):
# Sort column containing state and then by date in ascending order
dframe.sort_values(by = [stateColumn, dateColumn], inplace = True)
newColName = 'daily_' + cummColumn
dframe[newColName] = dframe[cummColumn].diff() # diff is pandas method to caclucate difference between consecutive values
# print(dframe.tail())
'''
Below line uses shift method of pandas to compare consecutive state names and if they are not different
as shown by using ! symbol then create list of boolean, True for if they are different else False
'''
mask = dframe[stateColumn] != dframe[stateColumn].shift(1)
dframe[newColName][mask] = np.nan # where value of mask =True the cell value will be replaced by NaN
dframe[newColName] = dframe[newColName].apply(lambda x: 0 if x < 0 else x) # replace negative values by 0
# dframe.drop('diffs',axis=1, inplace = True)
return dframe
df_new = daily_cases(dframe= df, stateColumn= 'State/UnionTerritory',dateColumn= 'Date', cummColumn= 'Confirmed')
# Preview data
df_new[df_new["State/UnionTerritory"]=="Maharashtra"].tail(n=5)
# #### Q1. Find which state has highest mean of cummulative confirmed cases since reported from Jan 2020
# Hint : Groupby state names to find their means for confirmed cases
df_group = ?
df_group = df_group.sort_values(ascending= False)[0:10]
df_group
df_group.index
ax = sns.lineplot(x= ? , y= df_group.values) # df_group.values - extract only the values of a column
plt.scatter(x= ? , y= ? , c = 'r')
ax.figure.set_figwidth(12)
ax.figure.set_figheight(4)
ax.set_ylabel("Mean of Daily Confirmed Cases")
# #### Q2. Which state has highest Death Rate for the month of June, July & Aug
# Hint - explore how a datetime column of dataframe can be filtered using specific months as a list
df_months = df_new['Date'].apply(lambda x: x.month in ?) # this will create boolean basis comparison of months from a list
df_final = df_new[df_months] # Filtered dataframe consisting of data from June, July & Aug
df_final.tail()
df_final['death_rate'] = df_final['Deaths'] / df_final['Confirmed'] *100
df_final.tail()
df_groups_deaths = ?
top_10_deathrates = ?
fig, ax = plt.subplots()
fig.set_figwidth(15)
fig.set_figheight(6)
ax.bar(x = ?, height = ?)
ax.set_xlabel('States')
ax.set_ylabel('Death Rates %')
ax.set_title('Top 10 States with Highest Death Rate since June 2020')
for i, v in enumerate(top_10_deathrates.values):
ax.text(i, v, s = ("%.2f" % v), color='blue', fontweight='bold', fontsize = 12) # %.2f will print decimals upto 2 places
plt.xticks(rotation=45) # this line will rotate the x axis label in 45 degrees to make it more readable
# ### Q3. Explore Trend in Confirmed Cases for the state of Maharashtra
# - Plot line graph with x axis as Date column and y axis as daily confirmed cases. - such a graph is also called
# as Time Series Plot
# #### Hint - explore on google or in matplotlib for Time series graph from a dataframe
|
1.Python/covid_data_analysis-Assignment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="b7noD9NjFRL-"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/xla/tutorials/autoclustering_xla"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/g3doc/tutorials/autoclustering_xla.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/g3doc/tutorials/autoclustering_xla.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="mz65veHXsmnS"
# # Classifying CIFAR-10 with XLA
#
# This tutorial trains a TensorFlow model to classify the [CIFAR-10](https://en.wikipedia.org/wiki/CIFAR-10) dataset, and we compile it using XLA.
#
# Load and normalize the dataset using the Keras API:
# + colab={} colab_type="code" id="7vm2QsMisCxI"
import tensorflow as tf
# Check that GPU is available: cf. https://colab.research.google.com/notebooks/gpu.ipynb
assert(tf.test.is_gpu_available())
tf.keras.backend.clear_session()
tf.config.optimizer.set_jit(False) # Start with XLA disabled.
def load_data():
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = x_train.astype('float32') / 256
x_test = x_test.astype('float32') / 256
# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)
return ((x_train, y_train), (x_test, y_test))
(x_train, y_train), (x_test, y_test) = load_data()
# + [markdown] colab_type="text" id="MgNM2tbgtScx"
# We define the model, adapted from the Keras [CIFAR-10 example](https://keras.io/examples/cifar10_cnn/):
# + colab={} colab_type="code" id="3ZRQSwoRsKM_"
def generate_model():
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Conv2D(32, (3, 3)),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(64, (3, 3), padding='same'),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Conv2D(64, (3, 3)),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10),
tf.keras.layers.Activation('softmax')
])
model = generate_model()
# + [markdown] colab_type="text" id="-M4GtGDZtb8a"
# We train the model using the
# [RMSprop](https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer)
# optimizer:
#
# + colab={} colab_type="code" id="UKCmrhF0tiMa"
def compile_model(model):
opt = tf.keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
model = compile_model(model)
def train_model(model, x_train, y_train, x_test, y_test, epochs=25):
model.fit(x_train, y_train, batch_size=256, epochs=epochs, validation_data=(x_test, y_test), shuffle=True)
def warmup(model, x_train, y_train, x_test, y_test):
# Warm up the JIT, we do not wish to measure the compilation time.
initial_weights = model.get_weights()
train_model(model, x_train, y_train, x_test, y_test, epochs=1)
model.set_weights(initial_weights)
warmup(model, x_train, y_train, x_test, y_test)
# %time train_model(model, x_train, y_train, x_test, y_test)
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# + [markdown] colab_type="text" id="SLpfQ0StRgsu"
# Now let's train the model again, using the XLA compiler.
# To enable the compiler in the middle of the application, we need to reset the Keras session.
# + colab={} colab_type="code" id="jxU-Tzy4SX7p"
# We need to clear the session to enable JIT in the middle of the program.
tf.keras.backend.clear_session()
tf.config.optimizer.set_jit(True) # Enable XLA.
model = compile_model(generate_model())
(x_train, y_train), (x_test, y_test) = load_data()
warmup(model, x_train, y_train, x_test, y_test)
# %time train_model(model, x_train, y_train, x_test, y_test)
# + [markdown] colab_type="text" id="iWHz6P1se92F"
# On a machine with a Titan V GPU and an Intel Xeon E5-2690 CPU the speed up is ~1.17x.
|
tensorflow/compiler/xla/g3doc/tutorials/autoclustering_xla.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:Python_Requirements_Windows] *
# language: python
# name: conda-env-Python_Requirements_Windows-py
# ---
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, classification_report
from matplotlib import pyplot as plt
train_df = pd.read_csv(Path('Resources/2019loans.csv'))
test_df = pd.read_csv(Path('Resources/2020Q1loans.csv'))
# Convert categorical data to numeric and separate target feature for training data
y_train = train_df['target']
y_train_label = LabelEncoder().fit_transform(y_train)
train_dummies_data = pd.get_dummies(train_df.drop(['target'], axis = 1))
print(train_dummies_data.columns)
train_dummies_data.head()
# Convert categorical data to numeric and separate target feature for testing data
y_test = test_df['target']
y_test_label = LabelEncoder().fit_transform(y_test)
print(y_test_label)
test_dummies_data = pd.get_dummies(test_df.drop(['target'], axis = 1))
print(test_dummies_data.columns)
# add missing dummy variables to testing set
test_dummies_data['debt_settlement_flag_Y'] = 0
test_dummies_data.head()
# # Before you create, fit, and score the models, make a prediction as to which model you think will perform better.
#
#
# I think the random forest will perform better because i think it will be able to better drill down which features are more predictive out of the large number of features. It will be able to ignore the "noise" variables better.
# +
# Train the Logistic Regression model on the unscaled data and print the model score
classifier = LogisticRegression()
classifier.fit(train_dummies_data, y_train_label)
print(f"Training Data Score: {classifier.score(train_dummies_data, y_train_label)}")
print(f'Testing Score: {classifier.score(test_dummies_data, y_test_label )}')
# -
# Train a Random Forest Classifier model and print the model score
classifier = RandomForestClassifier(random_state=1, n_estimators=500).fit(train_dummies_data, y_train_label)
print(f"Training Data Score: {classifier.score(train_dummies_data, y_train_label)}")
print(f'Testing Score: {classifier.score(test_dummies_data, y_test_label )}')
# # The data going into these models was never scaled, an important step in preprocessing. Use StandardScaler to scale the training and testing sets. Before re-fitting the LogisticRegression and RandomForestClassifier models on the scaled data, make another prediction about how you think scaling will affect the accuracy of the models. Write your predictions down and provide justification.
# I think scaling the data will help th models perform better as several of the features on are on much larger or smaller scales which would affect the weighting of those features more during the training process
# Scale the data
scaler = StandardScaler().fit(train_dummies_data)
X_train_scaled = scaler.transform(train_dummies_data)
X_test_scaled = scaler.transform(test_dummies_data)
# Train the Logistic Regression model on the scaled data and print the model score
classifier = LogisticRegression()
classifier.fit(X_train_scaled, y_train_label)
print(f"Training Data Score: {classifier.score(X_train_scaled, y_train_label)}")
print(f'Testing Score: {classifier.score(X_test_scaled, y_test_label)}')
y_pred = classifier.predict(X_test_scaled)
print(confusion_matrix(y_test_label, y_pred))
print(classification_report(y_test_label, y_pred))
# Train a Random Forest Classifier model on the scaled data and print the model score
Rclassifier = RandomForestClassifier(random_state=1, n_estimators=500).fit(X_train_scaled, y_train)
print(f"Training Data Score: {Rclassifier.score(X_train_scaled, y_train)}")
print(f'Testing Score: {Rclassifier.score(X_test_scaled, y_test)}')
y_pred = Rclassifier.predict(X_test_scaled)
print(confusion_matrix(y_test_label, y_pred))
print(classification_report(y_test_label, y_pred))
features = classifier.feature_importances_
print(features)
plt.bar(x = range(len(features)), height=features)
plt.show()
# # How do the model scores compare to each other, and to the previous results on unscaled data? How does this compare to your prediction? Write down your results and thoughts.
# Scaling the data helped the regression model perform better, but did not affect the accuracy of the Random Forest classifier. After scaling the data the logistic regression classifier seems to outperform the random forest classifier. As seen in the feature importance, there does not seem to be many individually highly predictive features in the dataset according to the random forest. Contrary to prediction, The logistic regression appears to be doing a better job weighting the varous features to predict the outcome.
|
Credit Risk Evaluator.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import os
# import pydub
# import glob
mp3=glob.glob('./*.mp3')
# # wav=os.path.splitext(mp3)[0]+'.wav'
# sound=pydub.AudioSegment.from_mp3(mp3)
# sound.export('wav',format="wav")
# # mp3_to_wav()
# print(len(mp3))
from pydub import AudioSegment
sound = AudioSegment.from_mp3('./BMWDRIVEBY.mp3')
sound.export("./fff", format="wav")
# -
import librosa
import os
import pydub
import glob
def downsample():
mp3=glob.glob('./*.wav')
for mp3_file in mp3:
y, sr = librosa.load(mp3_file, sr=8000) # Downsample to 8kHz
librosa.output.write_wav(mp3_file, y, sr)
downsample()
|
pythoncode/.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.10.2 64-bit (windows store)
# language: python
# name: python3
# ---
# # Primer programa
# +
from datetime import date
date.today() #No es necesario poner esto para imprimir la fecha.
print("La fecha de hoy es: " + str(date.today()))
# -
# ## Convertidor de unidades
# +
parsec = 11
lightyears = parsec * 3.26156
print(str(parsec) + " parsec, is " + str(lightyears) + " lightyears")
|
Kata 1/kata1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''.env'': venv)'
# name: python385jvsc74a57bd01abf8de982abe96e657c85b87b33ad381284ca2f54fc0f95d7e60df1292fbfaf
# ---
# ### Grab data
# Commentary:
#
# The popular [Abalone](https://archive.ics.uci.edu/ml/datasets/Abalone) data set originally from the UCI data repository \[1\] will be used.
#
# > \[1\] <NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.
# +
from pathlib import Path
import boto3
for p in ['raw_data', 'training_data', 'validation_data']:
Path(p).mkdir(exist_ok=True)
s3 = boto3.client('s3')
s3.download_file('sagemaker-sample-files', 'datasets/tabular/uci_abalone/abalone.libsvm', 'raw_data/abalone')
# -
# ### Prepare training and validation data
# +
from sklearn.datasets import load_svmlight_file, dump_svmlight_file
from sklearn.model_selection import train_test_split
X, y = load_svmlight_file('raw_data/abalone')
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=1984, shuffle=True)
dump_svmlight_file(x_train, y_train, 'training_data/abalone.train')
dump_svmlight_file(x_test, y_test, 'validation_data/abalone.test')
# -
# ### Train model
# Commentary:
#
# Notice that the [SageMaker XGBoost container](https://github.com/aws/sagemaker-xgboost-container) framework version is set to be `1.2-1`. This is extremely important – the older `0.90-2` version will NOT work with SageMaker Neo out of the box. This is because in February of 2021, the SageMaker Neo team updated their XGBoost library version to `1.2` and backwards compatibility was not kept.
#
# Moreover, notice that we are using the open source XGBoost algorithm version, so we must provide our own training script and model loading function. These two required components are defined in `entrypoint.py`, which is part of the `neo-blog` repository. The training script is very basic, and the inspiration was taken from another sample notebook [here](https://github.com/aws/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/xgboost_abalone/xgboost_abalone_dist_script_mode.ipynb). Please note also that for `instance_count` and `instance_type`, the values are `1` and `local`, respectively, which means that the training job will run locally on our notebook instance. This is beneficial because it eliminates the startup time of training instances when a job runs remotely instead.
#
# Finally, notice that the number of boosting rounds has been set to 10,000. This means that the model will consist of 10,000 individual trees and will be computationally expensive to run, which we want for load testing purposes. A side effect will be that the model will severely overfit on the training data, but that is okay since accuracy is not a priority here. A computationally expensive model could have also been achieved by increasing the `max_depth` parameter as well.
#
# +
import sagemaker
from sagemaker.xgboost.estimator import XGBoost
from sagemaker.session import Session
from sagemaker.inputs import TrainingInput
bucket = Session().default_bucket()
role = sagemaker.get_execution_role()
# initialize hyperparameters
hyperparameters = {
"max_depth":"5",
"eta":"0.2",
"gamma":"4",
"min_child_weight":"6",
"subsample":"0.7",
"verbosity":"1",
"objective":"reg:squarederror",
"num_round":"10000"
}
# construct a SageMaker XGBoost estimator
# specify the entry_point to your xgboost training script
estimator = XGBoost(entry_point = "entrypoint.py",
framework_version='1.2-1', # 1.x MUST be used
hyperparameters=hyperparameters,
role=role,
instance_count=1,
instance_type='local',
output_path=f's3://{bucket}/neo-demo') # gets saved in bucket/neo-demo/job_name/model.tar.gz
# define the data type and paths to the training and validation datasets
content_type = "libsvm"
train_input = TrainingInput('file://training_data', content_type=content_type)
validation_input = TrainingInput('file://validation_data', content_type=content_type)
# execute the XGBoost training job
estimator.fit({'train': train_input, 'validation': validation_input}, logs=['Training'])
# -
# ### Deploy unoptimized model
# Commentary:
#
# There are two interesting things to note here. The first of which is that although the training job was local, the model artifact was still set up to be stored in [Amazon S3](https://aws.amazon.com/s3/) upon job completion. The other peculiarity here is that we must create an `XGBoostModel` object and use its `deploy` method, rather than calling the `deploy` method of the estimator itself. This is due to the fact that we ran the training job in local mode, so the estimator is not aware of any “official” training job that is viewable in the SageMaker console and associable with the model artifact. Because of this, the estimator will error out if its own `deploy` method is used, and the `XGBoostModel` object must be constructed first instead.
#
# Notice also that we will be hosting the model on a c5 (compute-optimized) instance type. This instance will be particularly well suited for hosting the XGBoost model, since XGBoost by default runs on CPU and it’s a CPU-bound algorithm for inference (on the other hand, during training XGBoost is a memory bound algorithm). The c5.large instance type is also marginally cheaper to run in the us-east-1 region at $0.119 per hour compared to a t2.large at $0.1299 per hour.
#
# +
from sagemaker.xgboost.model import XGBoostModel
# grab the model artifact that was written out by the local training job
s3_model_artifact = estimator.latest_training_job.describe()['ModelArtifacts']['S3ModelArtifacts']
# we have to switch from local mode to remote mode
xgboost_model = XGBoostModel(
model_data=s3_model_artifact,
role=role,
entry_point="entrypoint.py",
framework_version='1.2-1',
)
unoptimized_endpoint_name = 'unoptimized-c5'
xgboost_model.deploy(
initial_instance_count = 1,
instance_type='ml.c5.large',
endpoint_name=unoptimized_endpoint_name
)
# -
# ### Optimize model with SageMaker Neo
job_name = s3_model_artifact.split("/")[-2]
neo_model = xgboost_model.compile(
target_instance_family="ml_c5",
role=role,
input_shape =f'{{"data": [1, {X.shape[1]}]}}',
output_path =f's3://{bucket}/neo-demo/{job_name}', # gets saved in bucket/neo-demo/model-ml_c5.tar.gz
framework = "xgboost",
job_name=job_name # what it shows up as in console
)
# ### Deploy Neo model
# +
optimized_endpoint_name = 'neo-optimized-c5'
neo_model.deploy(
initial_instance_count = 1,
instance_type='ml.c5.large',
endpoint_name=optimized_endpoint_name
)
# -
# ### Validate that endpoints are working
# +
import boto3
smr = boto3.client('sagemaker-runtime')
resp = smr.invoke_endpoint(EndpointName='neo-optimized-c5', Body=b'2,0.675,0.55,0.175,1.689,0.694,0.371,0.474', ContentType='text/csv')
print('neo-optimized model response: ', resp['Body'].read())
resp = smr.invoke_endpoint(EndpointName='unoptimized-c5', Body=b'2,0.675,0.55,0.175,1.689,0.694,0.371,0.474', ContentType='text/csv')
print('unoptimized model response: ', resp['Body'].read())
# -
# ### Create CloudWatch dashboard for monitoring performance
# +
import json
cw = boto3.client('cloudwatch')
dashboard_name = 'NeoDemo'
region = Session().boto_region_name # get region we're currently in
body = {
"widgets": [
{
"type": "metric",
"x": 0,
"y": 0,
"width": 24,
"height": 12,
"properties": {
"metrics": [
[ "AWS/SageMaker", "Invocations", "EndpointName", optimized_endpoint_name, "VariantName", "AllTraffic", { "stat": "Sum", "yAxis": "left" } ],
[ "...", unoptimized_endpoint_name, ".", ".", { "stat": "Sum", "yAxis": "left" } ],
[ ".", "ModelLatency", ".", ".", ".", "." ],
[ "...", optimized_endpoint_name, ".", "." ],
[ "/aws/sagemaker/Endpoints", "CPUUtilization", ".", ".", ".", ".", { "yAxis": "right" } ],
[ "...", unoptimized_endpoint_name, ".", ".", { "yAxis": "right" } ]
],
"view": "timeSeries",
"stacked": False,
"region": region,
"stat": "Average",
"period": 60,
"title": "Performance Metrics",
"start": "-PT1H",
"end": "P0D"
}
}
]
}
cw.put_dashboard(DashboardName=dashboard_name, DashboardBody=json.dumps(body))
print('link to dashboard:')
print(f'https://console.aws.amazon.com/cloudwatch/home?region={region}#dashboards:name={dashboard_name}')
# -
# ### Install node.js
# %conda install -c conda-forge nodejs
# ### Validate successful installation
# !node --version
# ### Install Serverless framework and Serverless Artillery
# !npm install -g serverless@1.80.0 serverless-artillery@0.4.9
# ### Validate successful installations
# !serverless --version
# !slsart --version
# ### Deploy Serverless Artillery
# Commentary:
#
# The most important file that makes up part of the load generating function under the `serverless_artillery` directory is `processor.js`, which is responsible for generating the payload body and signed headers of each request that gets sent to the SageMaker endpoints. Please take a moment to review the file’s contents. In it, you’ll see that we’re manually signing our requests using the AWS Signature Version 4 algorithm. When you use any AWS SDK like [boto3](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html), your requests are automatically signed for you by the library. Here, however, we are directly interacting with AWS’s SageMaker API endpoints, so we must sign requests ourselves. The access keys and session token of the load-generating lambda function’s role are used to sign the request, and the role is given permissions to invoke SageMaker endpoints in its role statements (defined in serverless.yml on line 18). When a request is sent, AWS will first validate the signed headers, then validate that the assumed role has permission to invoke endpoints, and then finally let the request from the Lambda to pass through.
#
# !cd serverless_artillery && npm install && slsart deploy --stage dev
# ### Create Serverless Artillery load test script
# +
from IPython.core.magic import register_line_cell_magic
@register_line_cell_magic
def writefilewithvariables(line, cell):
with open(line, 'w') as f:
f.write(cell.format(**globals()))
# Get region that we're currently in
region = Session().boto_region_name
# +
# %%writefilewithvariables script.yaml
config:
variables:
unoptimizedEndpointName: {unoptimized_endpoint_name} # the xgboost model has 10000 trees
optimizedEndpointName: {optimized_endpoint_name} # the xgboost model has 10000 trees
numRowsInRequest: 125 # Each request to the endpoint contains 125 rows
target: 'https://runtime.sagemaker.{region}.amazonaws.com'
phases:
- duration: 120
arrivalRate: 20 # 1200 total invocations per minute (600 per endpoint)
- duration: 120
arrivalRate: 40 # 2400 total invocations per minute (1200 per endpoint)
- duration: 120
arrivalRate: 60 # 3600 total invocations per minute (1800 per endpoint)
- duration: 120
arrivalRate: 80 # 4800 invocations per minute (2400 per endpoint... this is the max of the unoptimized endpoint)
- duration: 120
arrivalRate: 120 # only the neo endpoint can handle this load...
- duration: 120
arrivalRate: 160
processor: './processor.js'
scenarios:
- flow:
- post:
url: '/endpoints/{{{{ unoptimizedEndpointName }}}}/invocations'
beforeRequest: 'setRequest'
- flow:
- post:
url: '/endpoints/{{{{ optimizedEndpointName }}}}/invocations'
beforeRequest: 'setRequest'
# -
# ### Perform load tests
# !slsart invoke --stage dev --path script.yaml
print("Here's the link to the dashboard again:")
print(f'https://console.aws.amazon.com/cloudwatch/home?region={region}#dashboards:name={dashboard_name}')
# ### Clean up resources
# +
# delete endpoints and endpoint configurations
sm = boto3.client('sagemaker')
for name in [unoptimized_endpoint_name, optimized_endpoint_name]:
sm.delete_endpoint(EndpointName=name)
sm.delete_endpoint_config(EndpointConfigName=name)
# +
# remove serverless artillery resources
# !slsart remove --stage dev
|
NeoBlog.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data 620 Project 3
# ## Classification
# <NAME> and <NAME>
# June 29, 2020
#
# **Dataset**
# The data used in this project is the names corpus included in the NLTK package.
#
# **Analysis:**
# After the names corpus is split into train, dev-test, and test subsets, initial classification using NLTK's maximum entropy classifier is performed with one feature. Additional features are added to improve the maximum entropy classifier. Further features are added and sklearn's Logistic Regression, Naive Bayes, and Random Forest classification techniques are attempted. Accuracy and confusion matrices are produced.
#
# [Video clip](https://youtu.be/_bGnfSwzZ1Q)
from nltk.corpus import names
import random
from nltk.classify import apply_features
from plotly.offline import init_notebook_mode, plot, iplot
# %matplotlib inline
import pandas as pd
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
init_notebook_mode(connected=True)
import nltk, re, pprint
from nltk import word_tokenize
import string
import re
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV, ShuffleSplit
from sklearn.naive_bayes import MultinomialNB, GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import resample
# ### Investigate the NLTK Names Corpus
names = ([(name, 'male') for name in names.words('male.txt')] + [(name, 'female') for name in names.words('female.txt')])
# +
#print(names[:10])
print('Count of total names in the corpus is: ' , (len(names)))
females = [(name, gender) for name, gender in names if gender == 'female']
print('Count of female names in the corpus is: ' , (len(females)))
males = [(name, gender) for name, gender in names if gender == 'male']
print('Count of male names in the corpus is: ' , (len(males)))
# -
# The corpus has more female names (~63%) than male names (37%).
# ### Remove any trailing spaces from names
names = list(tuple("".join(i.rsplit()) for i in a) for a in names)
# ### Split the names data set into train, test, and devtest
# The corpus is split into three subsets per the instructions.
# 500 dev-test
# 500 test
# 6,944 train
random.shuffle(names)
train_names = names[1000:]
devtest_names = names[500:1000]
test_names = names[:500]
print(len(train_names))
print(len(devtest_names))
print(len(test_names))
# ### Create dataframes for later use
train_name_df = pd.DataFrame(train_names)
train_name_df.columns = ['name', 'gender']
test_name_df = pd.DataFrame(test_names)
test_name_df.columns = ['name', 'gender']
# ### Last letter feature
# Use the function provided in the book that takes an input word and returns the last letter
def gender_features(name):
return {'last_letter': name[-1]}
# ### Run 1 - Max Entropy Classifier
# Feature: last letter.
# We started with the maximum entropy classifier because it does not assume the features are independent. (The Naive Bayes classifier assumes features are independent and this may be an unreasonable assumption for classifying gender from names.) The maximum entropy classifier builds a conditional model. It takes the data as given and uses an interative technique to maximize the likelihood of the training corpus.
# +
train_set = apply_features(gender_features, train_names)
test_set = apply_features(gender_features, test_names)
devtest_set = apply_features(gender_features, devtest_names)
classifier = nltk.MaxentClassifier.train(train_set, algorithm='iis', trace=0, max_iter=1000)
print('Run 1: Gender correctly identified: ', "{:.1%}".format(nltk.classify.accuracy(classifier, devtest_set)))
# -
# With just a single feature of last letter of the name, the dev-test set yields an accuracy of 76.8%.
# ### Add additional features for first letter and length of the name
#
def gender_features2(name):
features = {}
features["last_letter"] = name[-1].lower()
features["first_letter"] = name[0].lower()
features["name_length"] = len(name)
return features
# ### Run 2 - Max Entropy Classifier
# Features: last letter, first letter, length of name
# +
train_set2 = apply_features(gender_features2, train_names)
test_set2 = apply_features(gender_features2, test_names)
devtest_set2 = apply_features(gender_features2, devtest_names)
#classifier2 = nltk.NaiveBayesClassifier.train(train_set2)
classifier2 = nltk.MaxentClassifier.train(train_set2, algorithm='iis', trace=0, max_iter=1000)
print('Run 2: Gender correctly identified: ', "{:.1%}".format(nltk.classify.accuracy(classifier2, devtest_set2)))
# -
# The additional features of first letter and length of the name have increased the accuracy to 80.6% on the devtest set.
# ### Run 2 - Create Confusion Matrix
# +
tag2 = []
guess2 = []
for (name, label) in devtest_names:
observed2 = classifier2.classify(gender_features2(name))
tag2.append(label)
guess2.append(observed2)
print(nltk.ConfusionMatrix(tag2, guess2))
# -
# The classifier is not as good at determining male gender from name as it is female gender. This could be because the corpus was more heavily skewed to female names.
# ### Run 2 - Check the errors
# +
errors = []
for (name, tag) in devtest_names:
guess = classifier2.classify(gender_features2(name))
if guess != tag:
errors.append( (tag, guess, name) )
for (tag, guess, name) in sorted(errors):
print('correct=%-8s guess=%-8s name=%-30s' %
(tag, guess, name))
print(len(errors))
# -
# ### Run 2 - Most Important Features
classifier2.show_most_informative_features(5)
# We can get the weights for the most important features. Name length of 14 is the most important feature. It is positive for the female label, meaning the female label is more likely. The classifier ranks names ending in 'c' and 'j' as informative features also, and they are positive weight for male, meaning the male label is more likely. Last letter 'a' is also important, but is negative for male, meaning the male label is less likely.
# ### Test the Maximum Entropy Classifier
# Now use the test set to get the accuracy of the classifier.
print('Maximum Entropy test: Gender correctly identified: ', "{:.1%}".format(nltk.classify.accuracy(classifier2, test_set2)))
# On the test set, the maximum entropy classifier with features of first letter, last letter, and name length, we achieve an accuracy of 78.8%.
# ### Add additional features - counts of "a", "i", "o", "y" and create dummy columns for first and last letter features
train_name_df['last_letter'] = train_name_df['name'].apply(lambda x: x[-1])
train_name_df['first_letter'] = train_name_df['name'].apply(lambda x: x[0])
train_name_df['len_name'] = train_name_df['name'].apply(lambda x: len(x))
train_name_df['a_count'] = train_name_df['name'].apply(lambda x: len(re.findall('a',x)))
train_name_df['i_count'] = train_name_df['name'].apply(lambda x: len(re.findall('i',x)))
train_name_df['o_count'] = train_name_df['name'].apply(lambda x: len(re.findall('o',x)))
train_name_df['y_count'] = train_name_df['name'].apply(lambda x: len(re.findall('y',x)))
train_name_df = pd.get_dummies(train_name_df, columns=['last_letter','first_letter'])
train_name_df.head()
test_name_df['last_letter'] = test_name_df['name'].apply(lambda x: x[-1])
test_name_df['first_letter'] = test_name_df['name'].apply(lambda x: x[0])
test_name_df['len_name'] = test_name_df['name'].apply(lambda x: len(x))
test_name_df['a_count'] = test_name_df['name'].apply(lambda x: len(re.findall('a',x)))
test_name_df['i_count'] = test_name_df['name'].apply(lambda x: len(re.findall('i',x)))
test_name_df['o_count'] = test_name_df['name'].apply(lambda x: len(re.findall('o',x)))
test_name_df['y_count'] = test_name_df['name'].apply(lambda x: len(re.findall('y',x)))
test_name_df = pd.get_dummies(test_name_df, columns=['last_letter','first_letter'])
test_name_df = test_name_df.reindex(columns = train_name_df.columns, fill_value=0)
test_name_df.head()
# ### SK-Learn Models
#
# Let's use SK-Learn's random forest classifier, logistic regressor and naive bayes classifier on our recently engineered data. Each model will use tenfold cross-validation with a grid-search on specified parameters
# #### Run 3 - Logistic Classifier with Additional Features
#
# The function will run a grid-search on the best L2 penalty parameter
def logistic_classifier(train_df, test_df):
X_train = train_df.drop(labels=['gender','name'],
axis=1)
y_train = train_df['gender']
X_test = test_df.drop(labels=['gender','name'],
axis=1)
y_test = test_df['gender']
lr = LogisticRegression(penalty='l2',
dual=False,
max_iter=1000,
tol=.0001)
pipeline = Pipeline(steps=[('logistic', lr)])
param_grid = {
'logistic__C': np.arange(1, 50, 10)}
cv = ShuffleSplit(n_splits=10, test_size=.3, random_state=0)
search = GridSearchCV(pipeline,
param_grid,
cv=cv,
return_train_score=True
)
search.fit(X_train,y_train)
y_pred = search.predict(X_test)
print('Run 3: Logistic Classifier')
print('Train: Gender correctly identified: ', "{:.1%}".format(search.score(X_train, y_train)))
print('Test: Gender correctly identified: ', "{:.1%}".format(search.score(X_test, y_test)))
print('')
print('Confusion Matrix')
print(confusion_matrix(y_test, y_pred))
return search
lr = logistic_classifier(train_name_df, test_name_df)
# The logistic classifier shows accuracy of 81%. However, the minority class has been misclassified with specificity of 71.7%
# #### Run 4 - Naive Bayes Classifier with additional features
#
# The model will run a grid-search on the best smoothing parameter
def m_naive_bayes_classifier(train_df, test_df):
X_train = train_df.drop(labels=['gender','name'],
axis=1)
y_train = train_df['gender']
X_test = test_df.drop(labels=['gender','name'],
axis=1)
y_test = test_df['gender']
nb = MultinomialNB()
# Create a pipeline that standardizes, then runs logistic regression
pipeline = Pipeline(steps=[('nb', nb)])
param_grid = {
'nb__alpha': np.arange(.1, 1, .2)}
cv = ShuffleSplit(n_splits=10, test_size=.3, random_state=0)
search = GridSearchCV(pipeline,
param_grid,
cv=cv,
return_train_score=True
)
search.fit(X_train,y_train)
y_pred = search.predict(X_test)
print('Run 4: Naive Bayes Classifier')
print('Train: Gender correctly identified: ', "{:.1%}".format(search.score(X_train, y_train)))
print('Test: Gender correctly identified: ', "{:.1%}".format(search.score(X_test, y_test)))
print('')
print('Confusion Matrix')
print(confusion_matrix(y_test, y_pred))
return search
nb = m_naive_bayes_classifier(train_name_df, test_name_df)
# The naive bayes classifier has done well, but once again, it is misclassifying the minority class with specificity of 68%
# #### Run 5 - Random Forest Classifier with additional features
#
# The function will run a grid-search on the best number of trees as well at the ideal tree depth.
def random_forest_classifier(train_df, test_df):
X_train = train_df.drop(labels=['gender','name'],
axis=1)
y_train = train_df['gender']
X_test = test_df.drop(labels=['gender','name'],
axis=1)
y_test = test_df['gender']
rf=RandomForestClassifier()
# Create a pipeline that standardizes, then runs logistic regression
pipeline = Pipeline(steps=[('rf', rf)])
param_grid = {
'rf__min_samples_split': np.arange(2, 10, 2),
'rf__n_estimators': np.arange(10, 20, 5)}
cv = ShuffleSplit(n_splits=10, test_size=.3, random_state=0)
search = GridSearchCV(pipeline,
param_grid,
cv=cv,
return_train_score=True
)
search.fit(X_train,y_train)
y_pred = search.predict(X_test)
print('Run 5: Random Forest Classifier')
print('Train: Gender correctly identified: ', "{:.1%}".format(search.score(X_train, y_train)))
print('Test: Gender correctly identified: ', "{:.1%}".format(search.score(X_test, y_test)))
print('')
print('Confusion Matrix')
print(confusion_matrix(y_test, y_pred))
return search
rf = random_forest_classifier(train_name_df, test_name_df)
# We have a similar test accuracy and misclassification rate for the minority class as the other two models
# ### Upsampling Minority Class
#
# In all cases, the minority class has been misclassified to a fair degree. Let's upsample the minority class.
# +
df_minority = train_name_df[train_name_df['gender']=='male']
df_minority_upsampled = resample(df_minority,
replace=True,
n_samples=2000,
random_state=123)
df_upsampled = pd.concat([train_name_df,
df_minority_upsampled])
# -
# ### Re-Run Models with Upsampled Data
# #### Logistic Rerun
lr = logistic_classifier(df_upsampled, test_name_df)
# We see a drop in accuracy, but more of a balance in terms of an error rate for the two classes. Specificity is at 81%
# #### Logistic Regressor Coefficients
pd.DataFrame.from_dict({feature:(abs(coef), coef) for
feature, coef in zip(test_name_df.iloc[:,2:].columns,
lr.best_estimator_['logistic'].coef_[0])},
orient='index').rename({0:'Abs Coef', 1:'Coef'},
axis=1).sort_values(by='Abs Coef',
ascending=False)[['Coef']][:5]
# The logistic classifier tells us that the features capable of differentiating males from females the most are features specifying whether the last letters are a, i, k, c, and e.
# #### Naive Bayes Rerun
nb = m_naive_bayes_classifier(df_upsampled, test_name_df)
# Once again, we see a drop in accuracy, but more of a balance in terms of an error rate for the two classes. Specificity is at 83%
# #### Naive Bayes Feature Log Probabilities
# +
nb_df = pd.DataFrame.from_dict({feature:[coef1, coef2] for
feature, coef1, coef2 in zip(test_name_df.iloc[:,2:].columns,
nb.best_estimator_['nb'].feature_log_prob_[0],
nb.best_estimator_['nb'].feature_log_prob_[1])},
orient='index').rename({0:'Coef1', 1:'Coef2'},
axis=1)
(nb_df['Coef1']/nb_df['Coef2']).sort_values(ascending=False)[:5]
# -
# The naive bayes classifier tells us that the features capable of differentiating males from females the most are features specifying whether the last letters are c, k, d, o, and f.
# #### Rendom Forest Rerun
rf = random_forest_classifier(df_upsampled, test_name_df)
# Accuracy has suffered again, but specificity is at 75%
# #### Random Forest Feature Importances
pd.DataFrame.from_dict({feature: coef for
feature, coef in zip(test_name_df.iloc[:,2:].columns,
rf.best_estimator_['rf'].feature_importances_)},
orient='index').rename({0: 'Coef'},
axis=1).sort_values(by='Coef',
ascending=False)[:5]
# The random forest classifier's feature importances give us more of a variety for the top features than the other two classifiers. It seems to prioritize the length of the name, the counts of vowels a and i, along with the features specifying whether the last letters are a and i.
# ### Conclusions of SK-Learn Models
#
# * All models classified test data with accuracy above 76%, but specificity was low on average
# * After upsampling the minority class, accuracy suffered, but specificity rose significantly
# * Significant features varied across all three models
|
Project_3/Project_3_final.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
from pycbc import waveform
from astropy import constants
from astropy import units as u
# +
def analytic_remaining_time(r0, M1, M2):
M = M1 + M2
mu = M1 * M2 / M
return 5 * np.power(constants.c, 5) * np.power(r0, 4) / (256 * np.power(constants.G, 3) * mu * np.power(M, 2))
def analytic_radius(t, M1, M2):
M = M1 + M2
mu = M1 * M2 / M
alpha = 5 * np.power(constants.c, 5) / (256 * np.power(constants.G, 3))
return np.power(-mu * np.power(M, 2) / alpha * t, 1/4)
def analytic_radial_freq(t, M1, M2):
M = M1 + M2
mu = M1 * M2 / M
alpha = 5 * np.power(constants.c, 5) / (256 * np.power(constants.G, 3))
return np.sqrt(constants.G * M) * np.power(-mu * np.power(M, 2) * t / alpha, -3/8)
def analytic_phase(t, M1, M2):
M = M1 + M2
mu = M1 * M2 / M
alpha = 5 * np.power(constants.c, 5) / (256 * np.power(constants.G, 3))
orbit_phase = -(8 / 5) * np.sqrt(constants.G * M) * np.power(mu * np.power(M, 2) / alpha, -3/8) * np.power(-t, 5/8)
return 2 * orbit_phase # waveform frequency and phase are double the orbit frequency and phase
def analytic_freq(t, M1, M2):
return (1 / (2*np.pi)) * 2 * analytic_radial_freq(t, M1, M2)
def analytic_power(t, M1, M2):
M = M1 + M2
mu = M1 * M2 / M
alpha = 5 * np.power(constants.c, 5) / (256 * np.power(constants.G, 3))
# return (1 / (8 * alpha)) * constants.G * np.power(mu, 2) * np.power(M, 3) \
# * np.power(- mu * np.power(M, 2) * t / alpha, -5/4)
return 32 * constants.G * np.power(mu, 2) / (5 * np.power(constants.c, 5)) \
* np.power(analytic_radius(t, M1, M2), 4) \
* np.power(analytic_radial_freq(t, M1, M2), 6)
def analytic_amplitude(t, M1, M2, distance=1*1000000*constants.pc):
"""
source: http://www.physics.usu.edu/Wheeler/GenRel2013/Notes/GravitationalWaves.pdf
"""
M_c = np.power(M1 * M2, 3/5) / np.power(M1 + M2, 1/5)
return 4 * constants.G / np.power(constants.c, 2) * M_c / distance * \
np.power(constants.G / np.power(constants.c, 3) * np.pi * analytic_freq(t, M1, M2) * M_c, 2/3)
# -
def normalize_phase(phase_series):
return phase_series - phase_series.at_time(0)
r = 1.0
masses = list(range(20, 150, 10))
polarizations = [] # (hp, hc)
for m in tqdm(masses):
M1 = m
M2 = r*M1
hp, hc = waveform.get_td_waveform(approximant='SEOBNRv2',
mass1=M1,
mass2=M2,
delta_t=1e-3,
f_lower=1.5)
polarizations.append((hp, hc))
# +
pct_errors = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
phase = waveform.utils.phase_from_polarizations(hp, hc)
phase = normalize_phase(phase)
phase_analytic = analytic_phase(phase.sample_times*u.s, M1*constants.M_sun, M2*constants.M_sun)
pct_error = 100 * (np.abs(phase - phase_analytic) / phase)
pct_errors.append(pct_error)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(pct_errors[i].sample_times, pct_errors[i], color=colors[i], label=label)
plt.title("Analytic vs. SEOBNRv2 phase error", fontsize=15)
plt.ylabel("% error", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
plt.xscale('symlog')
plt.xlim(-400, 0.1)
plt.ylim(-10, 1)
plt.legend()
# +
pct_errors = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
frequency = waveform.utils.frequency_from_polarizations(hp, hc)
frequency_analytic = analytic_freq(frequency.sample_times*u.s, M1*constants.M_sun, M2*constants.M_sun).value
pct_error = 100 * (np.abs(frequency - frequency_analytic) / frequency)
pct_errors.append(pct_error)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(pct_errors[i].sample_times, pct_errors[i], color=colors[i], label=label)
plt.title("Analytic vs. SEOBNRv2 frequency error", fontsize=15)
plt.ylabel("% error", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
plt.xscale('symlog')
plt.xlim(-400, 0.1)
plt.ylim(-1, 10)
plt.legend()
# +
pct_errors = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
amplitude = waveform.utils.amplitude_from_polarizations(hp, hc)
amplitude_analytic = analytic_amplitude(amplitude.sample_times*u.s, M1*constants.M_sun, M2*constants.M_sun).value
pct_error = 100 * (np.abs(amplitude - amplitude_analytic) / amplitude)
pct_errors.append(pct_error)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(pct_errors[i].sample_times, pct_errors[i], color=colors[i], label=label)
plt.title("Analytic vs. SEOBNRv2 amplitude error", fontsize=15)
plt.ylabel("% error", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
plt.xscale('symlog')
plt.xlim(-500, 0.1)
plt.ylim(-1, 15)
plt.legend()
# +
phases = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
phase = waveform.utils.phase_from_polarizations(hp, hc)
phase = normalize_phase(phase)
phases.append(phase)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(phases[i].sample_times, phases[i], color=colors[i], label=label)
plt.title("SEOBNRv2 phases", fontsize=12)
plt.ylabel("Phase", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
# plt.xscale('symlog')
# plt.yscale('symlog')
plt.xlim(-20, 0.1)
plt.ylim(-1750, 1)
plt.legend()
# +
phases = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
phase = waveform.utils.phase_from_polarizations(hp, hc)
phase = normalize_phase(phase)
gamma = m / masses[0]
phase /= np.power(gamma, -5/8)
phases.append(phase)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(phases[i].sample_times, phases[i], color=colors[i], label=label)
plt.title("SEOBNRv2 mass-corrected phases", fontsize=12)
plt.ylabel("Phase (normalized)", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
# plt.xscale('symlog')
# plt.yscale('symlog')
plt.xlim(-20, 0.8)
plt.ylim(-1750, 300)
plt.legend()
# +
frequencies = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
frequency = waveform.utils.frequency_from_polarizations(hp, hc)
# frequency = normalize_frequency(frequency)
gamma = m / masses[0]
frequency /= np.power(gamma, -5/8)
frequencies.append(frequency)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(frequencies[i].sample_times, frequencies[i], color=colors[i], label=label)
plt.title("SEOBNRv2 mass-corrected frequencies", fontsize=12)
plt.ylabel("frequency (normalized)", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
# plt.xscale('symlog')
plt.yscale('symlog')
plt.xlim(-20, 0.8)
plt.ylim(-1, 1000)
plt.legend()
# +
amplitudes = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
amplitude = waveform.utils.amplitude_from_polarizations(hp, hc)
# amplitude = normalize_amplitude(amplitude)
gamma = m / masses[0]
amplitude /= np.power(gamma, 5/4)
amplitudes.append(amplitude)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(amplitudes[i].sample_times, amplitudes[i], color=colors[i], label=label)
plt.title("SEOBNRv2 mass-corrected amplitudes", fontsize=12)
plt.ylabel("Amplitude (normalized)", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
# plt.xscale('symlog')
# plt.yscale('symlog')
plt.xlim(-0.5, 0.1)
# plt.ylim(-1, 1000)
plt.legend()
# -
# ## Let's do all these with EOBNRv2
r = 1.0
masses = list(range(20, 150, 10))
polarizations = [] # (hp, hc)
for m in tqdm(masses):
M1 = m
M2 = r*M1
hp, hc = waveform.get_td_waveform(approximant='EOBNRv2',
mass1=M1,
mass2=M2,
delta_t=1e-3,
f_lower=1.5)
polarizations.append((hp, hc))
# +
pct_errors = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
phase = waveform.utils.phase_from_polarizations(hp, hc)
phase = normalize_phase(phase)
phase_analytic = analytic_phase(phase.sample_times*u.s, M1*constants.M_sun, M2*constants.M_sun)
pct_error = 100 * (np.abs(phase - phase_analytic) / phase)
pct_errors.append(pct_error)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(pct_errors[i].sample_times, pct_errors[i], color=colors[i], label=label)
plt.title("Analytic vs. EOBNRv2 phase error", fontsize=15)
plt.ylabel("% error", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
plt.xscale('symlog')
plt.xlim(-400, 0.1)
plt.ylim(-10, 1)
plt.legend()
# +
pct_errors = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
frequency = waveform.utils.frequency_from_polarizations(hp, hc)
frequency_analytic = analytic_freq(frequency.sample_times*u.s, M1*constants.M_sun, M2*constants.M_sun).value
pct_error = 100 * (np.abs(frequency - frequency_analytic) / frequency)
pct_errors.append(pct_error)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(pct_errors[i].sample_times, pct_errors[i], color=colors[i], label=label)
plt.title("Analytic vs. EOBNRv2 frequency error", fontsize=15)
plt.ylabel("% error", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
plt.xscale('symlog')
plt.xlim(-400, 0.1)
plt.ylim(-1, 10)
plt.legend()
# +
pct_errors = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
amplitude = waveform.utils.amplitude_from_polarizations(hp, hc)
amplitude_analytic = analytic_amplitude(amplitude.sample_times*u.s, M1*constants.M_sun, M2*constants.M_sun).value
pct_error = 100 * (np.abs(amplitude - amplitude_analytic) / amplitude)
pct_errors.append(pct_error)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(pct_errors[i].sample_times, pct_errors[i], color=colors[i], label=label)
plt.title("Analytic vs. EOBNRv2 amplitude error", fontsize=15)
plt.ylabel("% error", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
plt.xscale('symlog')
plt.xlim(-500, 0.1)
plt.ylim(-1, 15)
plt.legend()
# +
phases = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
phase = waveform.utils.phase_from_polarizations(hp, hc)
phase = normalize_phase(phase)
phases.append(phase)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(phases[i].sample_times, phases[i], color=colors[i], label=label)
plt.title("EOBNRv2 phases", fontsize=12)
plt.ylabel("Phase", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
# plt.xscale('symlog')
# plt.yscale('symlog')
plt.xlim(-20, 0.1)
plt.ylim(-1750, 1)
plt.legend()
# +
phases = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
phase = waveform.utils.phase_from_polarizations(hp, hc)
phase = normalize_phase(phase)
gamma = m / masses[0]
phase /= np.power(gamma, -5/8)
phases.append(phase)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(phases[i].sample_times, phases[i], color=colors[i], label=label)
plt.title("EOBNRv2 mass-corrected phases", fontsize=12)
plt.ylabel("Phase (normalized)", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
# plt.xscale('symlog')
# plt.yscale('symlog')
plt.xlim(-20, 0.8)
plt.ylim(-1750, 300)
plt.legend()
# +
frequencies = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
frequency = waveform.utils.frequency_from_polarizations(hp, hc)
# frequency = normalize_frequency(frequency)
gamma = m / masses[0]
frequency /= np.power(gamma, -5/8)
frequencies.append(frequency)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(frequencies[i].sample_times, frequencies[i], color=colors[i], label=label)
plt.title("EOBNRv2 mass-corrected frequencies", fontsize=12)
plt.ylabel("frequency (normalized)", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
# plt.xscale('symlog')
plt.yscale('symlog')
plt.xlim(-20, 0.8)
plt.ylim(-1, 1000)
plt.legend()
# +
amplitudes = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
amplitude = waveform.utils.amplitude_from_polarizations(hp, hc)
# amplitude = normalize_amplitude(amplitude)
gamma = m / masses[0]
amplitude /= np.power(gamma, 5/4)
amplitudes.append(amplitude)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(amplitudes[i].sample_times, amplitudes[i], color=colors[i], label=label)
plt.title("EOBNRv2 mass-corrected amplitudes", fontsize=12)
plt.ylabel("Amplitude (normalized)", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
# plt.xscale('symlog')
# plt.yscale('symlog')
plt.xlim(-0.5, 0.1)
# plt.ylim(-1, 1000)
plt.legend()
# -
# ## Let's just brute force a stretch and scale to see if we can get the amplitude curves to collapse
amp1 = amplitudes[0]
amp2 = amplitudes[4]
plt.plot(amp1.sample_times, amp1)
plt.plot(amp2.sample_times, amp2)
plt.xlim(-1, 0.4)
# +
amp3 = amp2.copy()
scale = 3.0
amp3_sample_times = amp3.sample_times / scale
amp3 = amp3 * np.power(scale, 1/4)
plt.plot(amp1.sample_times, amp1)
plt.plot(amp2.sample_times, amp2)
plt.plot(amp3_sample_times, amp3)
plt.xlim(-0.3, 0.3)
# -
# ### Okay success! We can compress time by the mass factor and then scale amplitude by the same mass factor
r = 1.0
masses = list(range(20, 150, 10))
polarizations = [] # (hp, hc)
for m in tqdm(masses):
M1 = m
M2 = r*M1
hp, hc = waveform.get_td_waveform(approximant='SEOBNRv2',
mass1=M1,
mass2=M2,
delta_t=1e-3,
f_lower=3.0)
polarizations.append((hp, hc))
# +
amplitudes = []
times = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
amplitude = waveform.utils.amplitude_from_polarizations(hp, hc)
gamma = m / 100
sample_times = amplitude.sample_times
sample_times /= gamma
amplitude /= gamma
times.append(sample_times)
amplitudes.append(amplitude)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(times[i], amplitudes[i], color=colors[i], label=label)
plt.title("SEOBNRv2 mass-corrected amplitudes", fontsize=12)
plt.ylabel("Amplitude (normalized)", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
# plt.xscale('symlog')
# plt.yscale('symlog')
plt.xlim(-0.5, 0.1)
# plt.ylim(-1, 1000)
plt.legend()
# -
r = 3.0
masses = list(range(20, 150, 10))
polarizations = [] # (hp, hc)
for m in tqdm(masses):
M1 = m
M2 = r*M1
hp, hc = waveform.get_td_waveform(approximant='SEOBNRv2',
mass1=M1,
mass2=M2,
delta_t=1e-3,
f_lower=3.0)
polarizations.append((hp, hc))
# +
amplitudes = []
times = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
amplitude = waveform.utils.amplitude_from_polarizations(hp, hc)
gamma = m / 100
sample_times = amplitude.sample_times
sample_times /= gamma
amplitude /= gamma
times.append(sample_times)
amplitudes.append(amplitude)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(times[i], amplitudes[i], color=colors[i], label=label)
plt.title("SEOBNRv2 mass-corrected amplitudes", fontsize=12)
plt.ylabel("Amplitude (normalized)", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
# plt.xscale('symlog')
# plt.yscale('symlog')
plt.xlim(-0.5, 0.1)
# plt.ylim(-1, 1000)
plt.legend()
# -
r = 0.8
masses = list(range(20, 150, 10))
polarizations = [] # (hp, hc)
for m in tqdm(masses):
M1 = m
M2 = r*M1
hp, hc = waveform.get_td_waveform(approximant='SEOBNRv2',
mass1=M1,
mass2=M2,
delta_t=1e-3,
f_lower=3.0)
polarizations.append((hp, hc))
# +
amplitudes = []
times = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
amplitude = waveform.utils.amplitude_from_polarizations(hp, hc)
gamma = m / 100
sample_times = amplitude.sample_times
sample_times /= gamma
amplitude /= gamma
times.append(sample_times)
amplitudes.append(amplitude)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(times[i], amplitudes[i], color=colors[i], label=label)
plt.title("SEOBNRv2 mass-corrected amplitudes", fontsize=12)
plt.ylabel("Amplitude (normalized)", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
# plt.xscale('symlog')
# plt.yscale('symlog')
plt.xlim(-0.5, 0.1)
# plt.ylim(-1, 1000)
plt.legend()
# -
def normalize_phase(sample_times, phases):
assert len(sample_times) == len(phases)
index_at_zero = len(sample_times) * (1 - sample_times[-1] / (sample_times[-1] - sample_times[0]))
index_at_zero = int(index_at_zero) - 1
phase_at_zero = phases[index_at_zero]
return phases - phase_at_zero
# +
phases = []
times = []
for i, m in enumerate(masses):
M1 = m
M2 = r*M1
hp, hc = polarizations[i]
phase = waveform.utils.phase_from_polarizations(hp, hc)
sample_times = phase.sample_times
phase = normalize_phase(sample_times, phase)
gamma = min(M1, M2) / 100
sample_times /= gamma
# phase *= np.power(gamma, 13/8)
times.append(sample_times)
phases.append(phase)
colors = plt.cm.viridis(np.linspace(0, 1, len(masses)))
for i, m in enumerate(masses):
if i == 0 or i == len(masses) - 1:
label = f"{r}*M1 = M2 = {m}M☉"
else:
label = None
plt.plot(times[i], phases[i], color=colors[i], label=label)
plt.title("SEOBNRv2 mass-corrected phases", fontsize=12)
plt.ylabel("phase (normalized)", fontsize=15)
plt.xlabel("Time (s)", fontsize=15)
# plt.xscale('symlog')
# plt.yscale('symlog')
plt.xlim(-0.5, 0.1)
plt.ylim(-50, 75)
plt.legend()
# -
|
sanity-check-v2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Just for some quick analyses or visualizations.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#plt.style.use('ggplot')
#plt.style.use('seaborn-whitegrid')
#plt.style.use('seaborn-colorblind')
plt.style.use('dark_background')
plt.rcParams['figure.dpi'] = 300
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['savefig.bbox'] = 'tight'
plt.rcParams['savefig.transparent'] = True
import datetime
date = datetime.datetime.now().strftime('%Y%m%d')
# %matplotlib inline
# -
merge_df = pd.read_csv('data/spe+bulk_dataset_20201008.csv', index_col=0)
merge_df
# # Spectrum demo
y_mean = merge_df.iloc[:, :2048].mean(axis=0)
y_err = merge_df.iloc[:, :2048].std(axis=0)
fig, ax = plt.subplots(2, 1, sharex='col', figsize=(5.5, 5.5))
ax[0].errorbar(range(2048), np.log10(y_mean), yerr=np.log10(y_err), ecolor='lightgray')
ax[1].errorbar(range(2048), y_mean/1000, yerr=y_err/1000, ecolor='lightgray')
ax[0].set_ylabel('log(count)')
ax[1].set_ylabel('Count (k)')
ax[1].set_xlabel('Channel')
fig.subplots_adjust(hspace=.05, top=.94)
fig.suptitle('Mean of 382 spectra')
fig.savefig('results/spectrum.png')
# # Data distribution and correlation
# ## Whole dataset
# +
from scipy import stats
#sns.set_style("ticks")
#plt.style.use('dark_background')
def corrfunc(x, y, **kws):
r, _ = stats.pearsonr(x, y)
ax = plt.gca()
ax.annotate("r = {:.2f}".format(r),
xy=(.1, .9), xycoords=ax.transAxes, size=12)
def maxmin(x, **kws):
ax = plt.gca()
ax.annotate("max. = {:.1f}".format(np.max(x)),
xy=(.4, .9), xycoords=ax.transAxes, size=12)
ax.annotate("min. = {:.1f}".format(np.min(x)),
xy=(.4, .8), xycoords=ax.transAxes, size=12)
# +
g = sns.PairGrid(merge_df.iloc[:, -5:-2])
g.map_upper(plt.scatter, s=10, alpha=0.5)
g.map_diag(sns.histplot, kde=False)
g.map_diag(maxmin)
g.map_lower(corrfunc)
g.map_lower(sns.kdeplot, cmap='Oranges_r')
g.savefig('results/bulk_infos_{}.png'.format(date))
# -
# ## Training and test sets, respectively
# +
from sklearn.model_selection import ShuffleSplit
rs = ShuffleSplit(n_splits=1, test_size=.2, random_state=24)
train_index, test_index = next(rs.split(merge_df.iloc[:, :2048]))
# -
print(len(train_index), len(test_index))
# +
g = sns.PairGrid(merge_df.iloc[train_index, -5:-2])
g.map_upper(plt.scatter, s=10, alpha=0.5)
g.map_diag(sns.histplot, kde=False)
g.map_diag(maxmin)
g.map_lower(corrfunc)
g.map_lower(sns.kdeplot, cmap='Oranges_r')
g.savefig('results/train_bulk_infos_{}.png'.format(date))
# +
g = sns.PairGrid(merge_df.iloc[test_index, -5:-2])
g.map_upper(plt.scatter, s=10, alpha=0.5)
g.map_diag(sns.histplot, kde=False)
g.map_diag(maxmin)
g.map_lower(corrfunc)
g.map_lower(sns.kdeplot, cmap='Oranges_r')
g.savefig('results/test_bulk_infos_{}.png'.format(date))
# +
fig, axes = plt.subplots(3, 3, sharex='col', sharey='row', figsize=(7.5, 6))
for col in range(3):
for row, index in enumerate([range(len(merge_df)), train_index, test_index]):
axes[row, col].hist(merge_df.iloc[index, -5+col])
axes[row, col].text(0.5, 0.75, "max. = {:.1f}\nmin. = {:.1f}".format(
np.max(merge_df.iloc[index, -5+col]), np.min(merge_df.iloc[index, -5+col])), transform=axes[row, col].transAxes)
#axes[0, i].text(0.5, 0.8, "max. = {:.1f}".format(np.max(merge_df.iloc[:, -5+i])), transform=axes[0, i].transAxes)
#axes[0, i].text(0.5, 0.8, "max. = {:.1f}".format(np.max(merge_df.iloc[:, -5+i])), transform=axes[0, i].transAxes)
axes[2, col].set_xlabel(merge_df.columns[-5+col])
axes[0, 0].set_ylabel('Whole set\n(N: {})'.format(len(merge_df)))
axes[1, 0].set_ylabel('Training set\n(N: {})'.format(len(train_index)))
axes[2, 0].set_ylabel('Test set\n(N: {})'.format(len(test_index)))
fig.suptitle('Data distribution')
fig.subplots_adjust(hspace=.05, wspace=.05, top=.94)
fig.savefig('results/bulk_infos_hist_{}.png'.format(date))
|
quick_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook was prepared by [<NAME>](https://github.com/rishihot55). Source and license info is available on [Github](https://github.com/donnemartin/interactive-coding-challenges).
# # Challenge Notebook
# ## Problem: Find all valid combinations of n-pairs of parentheses.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# * [Solution Notebook](#Solution-Notebook)
# ## Constraints
#
# * Is the input an integer representing the number of pairs?
# * Yes
# * Can we assume the inputs are valid?
# * No
# * Is the output a list of valid combinations?
# * Yes
# * Should the output have duplicates?
# * No
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# <pre>
# * None -> Exception
# * Negative -> Exception
# * 0 -> []
# * 1 -> ['()']
# * 2 -> ['(())', '()()']
# * 3 -> ['((()))', '(()())', '(())()', '()(())', '()()()']
# </pre>
# ## Algorithm
#
# Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/recursion_dynamic/n_pairs_parentheses/n_pairs_parentheses_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
# ## Code
class Parentheses(object):
def find_pair(self, num_pairs):
# TODO: implement me
pass
# ## Unit Test
# +
# # %load test_n_pairs_parentheses.py
import unittest
class TestPairParentheses(unittest.TestCase):
def test_pair_parentheses(self):
parentheses = Parentheses()
self.assertRaises(TypeError, parentheses.find_pair, None)
self.assertRaises(ValueError, parentheses.find_pair, -1)
self.assertEqual(parentheses.find_pair(0), [])
self.assertEqual(parentheses.find_pair(1), ['()'])
self.assertEqual(parentheses.find_pair(2), ['(())',
'()()'])
self.assertEqual(parentheses.find_pair(3), ['((()))',
'(()())',
'(())()',
'()(())',
'()()()'])
print('Success: test_pair_parentheses')
def main():
test = TestPairParentheses()
test.test_pair_parentheses()
if __name__ == '__main__':
main()
# -
# ## Solution Notebook
#
# Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/recursion_dynamic/n_pairs_parentheses/n_pairs_parentheses_solution.ipynb) for a discussion on algorithms and code solutions.
|
recursion_dynamic/n_pairs_parentheses/n_pairs_parentheses_challenge.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (xrft)
# language: python
# name: xrft
# ---
import numpy as np
import numpy.testing as npt
import xarray as xr
import xrft
import dask.array as dsar
from matplotlib import colors
import matplotlib.pyplot as plt
# %matplotlib inline
# # Parallelized Bartlett's Method
# For long data sets that have reached statistical equilibrium, it is useful to chunk the data, calculate the periodogram for each chunk and then take the average to reduce variance.
n = int(2**8)
da = xr.DataArray(np.random.rand(n,int(n/2),int(n/2)), dims=['time','y','x'])
da
# ## One dimension
# ### Discrete Fourier Transform
daft = xrft.dft(da.chunk({'time':int(n/4)}), dim=['time'], shift=False , chunks_to_segments=True).compute()
daft
data = da.chunk({'time':int(n/4)}).data
data_rs = data.reshape((4,int(n/4),int(n/2),int(n/2)))
da_rs = xr.DataArray(data_rs, dims=['time_segment','time','y','x'])
da1 = xr.DataArray(dsar.fft.fftn(data_rs, axes=[1]).compute(),
dims=['time_segment','freq_time','y','x'])
da1
# We assert that our calculations give equal results.
npt.assert_almost_equal(da1, daft.values)
# ### Power Spectrum
ps = xrft.power_spectrum(da.chunk({'time':int(n/4)}), dim=['time'], chunks_to_segments=True)
ps
# Taking the mean over the segments gives the Barlett's estimate.
ps = ps.mean(['time_segment','y','x'])
ps
fig, ax = plt.subplots()
ax.semilogx(ps.freq_time[int(n/8)+1:], ps[int(n/8)+1:])
# ## Two dimension
# ### Discrete Fourier Transform
daft = xrft.dft(da.chunk({'y':32,'x':32}), dim=['y','x'], shift=False , chunks_to_segments=True).compute()
daft
data = da.chunk({'y':32,'x':32}).data
data_rs = data.reshape((256,4,32,4,32))
da_rs = xr.DataArray(data_rs, dims=['time','y_segment','y','x_segment','x'])
da2 = xr.DataArray(dsar.fft.fftn(data_rs, axes=[2,4]).compute(),
dims=['time','y_segment','freq_y','x_segment','freq_x'])
da2
# We assert that our calculations give equal results.
npt.assert_almost_equal(da2, daft.values)
# ### Power Spectrum
ps = xrft.power_spectrum(da.chunk({'time':1,'y':64,'x':64}), dim=['y','x'],
chunks_to_segments=True, window='True', detrend='linear')
ps = ps.mean(['time','y_segment','x_segment'])
ps
fig, ax = plt.subplots()
ps.plot(ax=ax, norm=colors.LogNorm(), vmin=6.5e-4, vmax=7.5e-4)
|
doc/chunk_example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Combinatorial library properties
# ## Aim of this notebook
#
# In this notebook we want to analyze properties of the combinatorial library:
#
# 1. Recombined ligands compliant with Lipinski's rule of five (criteria) in comparison to KLIFS and PKIDB ligands
# 2. Recombined ligand sizes (number of heavy atoms)
#
# **Note** that the combinatorial library is stored as `json` file (6.7M molecules). The data needed for this notebook was extracted previously in notebook `4_1_combinatorial_library_data.ipynb` for easy and fast access here. In order to run this notebook, download data from zenodo as instructed in `../data/combinatorial_library/README.md`.
# ## Table of contents
#
# 1. Lipinski's rule of five
# 2. Recombined ligand size
# 3. Number of subpockets
# %load_ext autoreload
# %autoreload 2
# +
from datetime import datetime
from pathlib import Path
import ijson
import matplotlib.pyplot as plt
import pandas as pd
from rdkit import Chem
from kinfraglib import utils
# -
HERE = Path(_dh[-1])
# ## 1. Lipinski's rule of five
# ### Recombined ligands
# Load recombined ligands' properties (number of ligands that fulfill Lipinski's rule of five (and its individual criteria) and number of ligands in total).
properties = pd.read_csv(
HERE / '../data/combinatorial_library/ro5.csv',
header=None,
index_col=0,
squeeze=True
)
properties
# NBVAL_CHECK_OUTPUT
# Get ratio of ligands that fulfill the Lipinski's rule of five (and its individual criteria)
ro5_comb = round(
properties[['mw', 'logp', 'hbd', 'hba', 'lipinski']] / properties['n_ligands'] * 100,
0
)
ro5_comb = ro5_comb.astype('int')
ro5_comb
# NBVAL_CHECK_OUTPUT
# ### PKIDB ligands
#
# Compare the recombined ligands to the PKIDB ligands, i.e. approved and investigorial kinase inhibitors.
#
# - Database: https://www.icoa.fr/pkidb/
# - Publication: [Carles et al. (2018)](https://www.mdpi.com/1420-3049/23/4/908) and [Bournez et al. (2020)](https://www.mdpi.com/1420-3049/25/14/3226)
#
# The PKIDB dataset was downloaded and added to this repository from https://www.icoa.fr/pkidb/ using the download option (dataset from 2020-07-15).
# Load SDF file
mol_supplier = Chem.SDMolSupplier(str(HERE / '../data/external/pkidb_2020-07-15.sdf'))
# Get SMILES for each molecule
data_pkidb = pd.DataFrame([mol.GetProp('Canonical_Smiles') for mol in mol_supplier], columns=['Canonical_Smiles'])
print(f'Number of PKIDB ligands: {data_pkidb.shape[0]}')
ro5_pkidb = round(
data_pkidb.Canonical_Smiles.apply(
Chem.MolFromSmiles
).apply(utils.get_ro5_from_mol).sum() / data_pkidb.shape[0] * 100,
0
)
ro5_pkidb = ro5_pkidb.astype('int')
ro5_pkidb
# NBVAL_CHECK_OUTPUT
# ### Original ligands
#
# Compare the recombined ligands to the KLIFS ligands that were used to generate the fragment library (original ligands).
#
# - Database: https://klifs.vu-compmedchem.nl/
# - Publications: [van Linden et al. (2014)](https://pubs.acs.org/doi/abs/10.1021/jm400378w) and [Kooistra et al. (2016)](https://academic.oup.com/nar/article/44/D1/D365/2502606)
#
# Note that we only use a subset of KLIFS ligands here, i.e. the ligands used for the fragmentation.
data_klifs = pd.read_json(HERE / '../data/fragment_library/original_ligands.json')
print(f'Number of original KLIFS ligands: {data_klifs.shape[0]}')
# NBVAL_CHECK_OUTPUT
data_klifs.head(2)
ro5_klifs = round(
data_klifs.smiles.apply(
Chem.MolFromSmiles
).apply(utils.get_ro5_from_mol).sum() / data_klifs.shape[0] * 100,
0
)
ro5_klifs = ro5_klifs.astype('int')
ro5_klifs
# NBVAL_CHECK_OUTPUT
# ### Plot properties
# Collect data in one DataFrame
bar_data = pd.DataFrame(
{
f'Recombined ligands (#{properties["n_ligands"]})': ro5_comb,
f'Original KLIFS ligands (#{data_klifs.shape[0]})': ro5_klifs,
f'PKIDB ligands (#{data_pkidb.shape[0]})': ro5_pkidb
}
)
bar_data = bar_data.astype('int32')
bar_data.index.name = None
bar_data
# NBVAL_CHECK_OUTPUT
# +
# Plot data
ax = bar_data.plot(
kind='bar',
width=0.85,
rot=0,
figsize=(3.33, 4),
color=['cornflowerblue', 'darkgrey', 'grey']
)
# Edit labels for y axis and x/y ticks
plt.ylabel('# Molecules [%]', fontsize=12)
plt.xticks(range(5), ['MWT\n'+r'$\leq 500$', 'logP\n'+r'$\leq 5$', 'HBD\n'+r'$\leq 5$', 'HBA\n'+r'$\leq 10$', 'Rule\nof 5'], fontsize=12)
plt.yticks(fontsize=12)
plt.legend(fontsize=12, loc='upper left')
legend = plt.legend(loc='upper center',
bbox_to_anchor=(0.5,-.2),
fontsize=12
)
# Add percentages to bars
bars = ax.patches
bar_labels = bar_data.transpose().values.flatten()
for bar, label in zip(bars, bar_labels):
plt.text(
bar.get_x() + bar.get_width() / 1.6,
bar.get_height() - 6,
label,
ha='center',
va='center',
fontsize=10,
color='white',
rotation=90
)
# -
ax.get_figure().savefig(
HERE / 'figures/combinatorial_library_ro5.pdf',
bbox_extra_artists=(legend,),
bbox_inches='tight'
)
# ## 2. Recombined ligand size
#
# Take a look at the average number of heavy atoms in the combinatorial library.
n_atoms = pd.read_csv(HERE / '../data/combinatorial_library/n_atoms.csv', header=None).squeeze()
len(n_atoms)
# NBVAL_CHECK_OUTPUT
plt.hist(n_atoms)
plt.xlabel('Number of heavy atoms')
n_atoms.describe(percentiles=[.01, .1, .25, .5, .75, .99])
# NBVAL_CHECK_OUTPUT
# 98% of recombined ligands have between 22 and 51 heavy atoms (on average 36).
# Delete object (since very large with ~6M entries)
del n_atoms
# ## 3. Number of subpockets
subpockets = pd.read_csv(HERE / '../data/combinatorial_library/subpockets.csv', index_col=0)
subpockets['n_subpockets'] = [len(i.split('-')) for i in subpockets.index]
subpockets
# NBVAL_CHECK_OUTPUT
n_subpockets = subpockets.groupby('n_subpockets').sum()
n_subpockets['ratio'] = round(n_subpockets['count'] / n_subpockets['count'].sum() * 100, 2)
n_subpockets
# NBVAL_CHECK_OUTPUT
# The majority of recombined ligands are composed of 4 fragments (occupying 4 subpockets), whereas the majority of original igands is smaller and occupies 2-3 subpockets (check out `notebooks/2_2_fragment_analysis_statistics.ipynb`). This is to be expected since all fragment combinations were allowed up to 4 fragments during the recombination process.
|
notebooks/4_2_combinatorial_library_properties.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#densenet
# -
from tensorflow.keras.models import load_model as tf_load_model
from keras.models import load_model as keras_load_model
model = keras_load_model('densenet121_m_f_14_14.h5')
# !pip freeze
# !pip install keras==2.4.3 Keras-Preprocessing==1.1.2
# !pip install Pillow==7.1.2
# changed packages:
# keras 2.2.4 to 2.4.3
# keras pre processing : 1.0.9 to 1.1.2
# pillow : 5.3.0 to 7.1.2
# !pip install Pillow==5.3.0 Keras==2.2.4 Keras-Preprocessing==1.0.9
# +
pip install absl-py==0.12.0 alabaster==0.7.12 albumentations==0.1.12 altair==4.1.0 appdirs==1.4.4 argon2-cffi==20.1.0 astor==0.8.1 astropy==4.2.1 astunparse==1.6.3 async-generator==1.10 atari-py==0.2.6 atomicwrites==1.4.0 attrs==20.3.0 audioread==2.1.9 autograd==1.3 Babel==2.9.0 backcall==0.2.0 blis==0.4.1 bokeh==2.3.1 Bottleneck==1.3.2 branca==0.4.2 catalogue==1.0.0 certifi==2020.12.5 cffi==1.14.5 chainer==7.4.0 chardet==3.0.4 click==7.1.2 cloudpickle==1.3.0 cmake==3.12.0 cmdstanpy==0.9.5 colorcet==2.0.6 colorlover==0.3.0 community==1.0.0b1 contextlib2==0.5.5 convertdate==2.3.2 coverage==3.7.1 coveralls==0.5 crcmod==1.7 cufflinks==0.17.3 cupy-cuda101==7.4.0 cvxopt==1.2.6 cvxpy==1.0.31 cycler==0.10.0 cymem==2.0.5 Cython==0.29.22 daft==0.0.4 dask==2.12.0 datascience==0.10.6 debugpy==1.0.0 decorator==4.4.2 defusedxml==0.7.1 descartes==1.1.0 dill==0.3.3 distributed==1.25.3 dlib==19.18.0 dm-tree==0.1.6 docopt==0.6.2 docutils==0.17 dopamine-rl==1.0.5 earthengine-api==0.1.260 easydict==1.9 ecos==2.0.7.post1 editdistance==0.5.3 en-core-web-sm==2.2.5 entrypoints==0.3 ephem==3.7.7.1 et-xmlfile==1.0.1 fa2==0.3.5 fancyimpute==0.4.3 fastprogress==1.0.0 fastrlock==0.6 fbprophet==0.7.1 feather-format==0.4.1 filelock==3.0.12 firebase-admin==4.4.0 fix-yahoo-finance==0.0.22 Flask==1.1.2 flatbuffers==1.12 folium==0.8.3 future==0.16.0 gast==0.3.3 GDAL==2.2.2 gdown==3.6.4 gensim==3.6.0 geographiclib==1.50 geopy==1.17.0 gin-config==0.4.0 glob2==0.7
google==2.0.3
google-api-core==1.26.3
google-api-python-client==1.12.8
google-auth==1.28.1
google-auth-httplib2==0.0.4
google-auth-oauthlib==0.4.4
google-cloud-bigquery==1.21.0
google-cloud-bigquery-storage==1.1.0
google-cloud-core==1.0.3
google-cloud-datastore==1.8.0
google-cloud-firestore==1.7.0
google-cloud-language==1.2.0
google-cloud-storage==1.18.1
google-cloud-translate==1.5.0
google-colab==1.0.0
google-pasta==0.2.0
google-resumable-media==0.4.1
googleapis-common-protos==1.53.0
googledrivedownloader==0.4
graphviz==0.10.1
greenlet==1.0.0
grpcio==1.32.0
gspread==3.0.1
gspread-dataframe==3.0.8
gym==0.17.3
h5py==2.10.0
HeapDict==1.0.1
hijri-converter==2.1.1
holidays==0.10.5.2
holoviews==1.14.3
html5lib==1.0.1
httpimport==0.5.18
httplib2==0.17.4
httplib2shim==0.0.3
humanize==0.5.1
hyperopt==0.1.2
ideep4py==2.0.0.post3
idna==2.10
imageio==2.4.1
imagesize==1.2.0
imbalanced-learn==0.4.3
imblearn==0.0
imgaug==0.2.9
importlib-metadata==3.10.1
importlib-resources==5.1.2
imutils==0.5.4
inflect==2.1.0
iniconfig==1.1.1
intel-openmp==2021.2.0
intervaltree==2.1.0
ipykernel==4.10.1
ipython==5.5.0
ipython-genutils==0.2.0
ipython-sql==0.3.9
ipywidgets==7.6.3
itsdangerous==1.1.0
jax==0.2.12
jaxlib==0.1.65+cuda110
jdcal==1.4.1
jedi==0.18.0
jieba==0.42.1
Jinja2==2.11.3
joblib==1.0.1
jpeg4py==0.1.4
jsonschema==2.6.0
jupyter==1.0.0
jupyter-client==5.3.5
jupyter-console==5.2.0
jupyter-core==4.7.1
jupyterlab-pygments==0.1.2
jupyterlab-widgets==1.0.0
kaggle==1.5.12
kapre==0.1.3.1
Keras==2.4.3
Keras-Preprocessing==1.1.2
keras-vis==0.4.1
kiwisolver==1.3.1
knnimpute==0.1.0
korean-lunar-calendar==0.2.1
librosa==0.8.0
lightgbm==2.2.3
llvmlite==0.34.0
lmdb==0.99
LunarCalendar==0.0.9
lxml==4.2.6
Markdown==3.3.4
MarkupSafe==1.1.1
matplotlib==3.2.2
matplotlib-venn==0.11.6
missingno==0.4.2
mistune==0.8.4
mizani==0.6.0
mkl==2019.0
mlxtend==0.14.0
more-itertools==8.7.0
moviepy==0.2.3.5
mpmath==1.2.1
msgpack==1.0.2
multiprocess==0.70.11.1
multitasking==0.0.9
murmurhash==1.0.5
music21==5.5.0
natsort==5.5.0
nbclient==0.5.3
nbconvert==5.6.1
nbformat==5.1.3
nest-asyncio==1.5.1
networkx==2.5.1
nibabel==3.0.2
nltk==3.2.5
notebook==5.3.1
np-utils==0.5.12.1
numba==0.51.2
numexpr==2.7.3
numpy==1.19.5
nvidia-ml-py3==7.352.0
oauth2client==4.1.3
oauthlib==3.1.0
okgrade==0.4.3
opencv-contrib-python==4.1.2.30
opencv-python==4.1.2.30
openpyxl==2.5.9
opt-einsum==3.3.0
osqp==0.6.2.post0
packaging==20.9
palettable==3.3.0
pandas==1.1.5
pandas-datareader==0.9.0
pandas-gbq==0.13.3
pandas-profiling==1.4.1
pandocfilters==1.4.3
panel==0.11.2
param==1.10.1
parso==0.8.2
pathlib==1.0.1
patsy==0.5.1
pexpect==4.8.0
pickleshare==0.7.5
Pillow==7.1.2
pip-tools==4.5.1
plac==1.1.3
plotly==4.4.1
plotnine==0.6.0
pluggy==0.7.1
pooch==1.3.0
portpicker==1.3.1
prefetch-generator==1.0.1
preshed==3.0.5
prettytable==2.1.0
progressbar2==3.38.0
prometheus-client==0.10.1
promise==2.3
prompt-toolkit==1.0.18
protobuf==3.12.4
psutil==5.4.8
psycopg2==2.7.6.1
ptyprocess==0.7.0
py==1.10.0
pyarrow==3.0.0
pyasn1==0.4.8
pyasn1-modules==0.2.8
pycocotools==2.0.2
pycparser==2.20
pyct==0.4.8
pydata-google-auth==1.1.0
pydot==1.3.0
pydot-ng==2.0.0
pydotplus==2.0.2
PyDrive==1.3.1
pyemd==0.5.1
pyerfa==1.7.2
pyglet==1.5.0
Pygments==2.6.1
pygobject==3.26.1
pymc3==3.7
PyMeeus==0.5.11
pymongo==3.11.3
pymystem3==0.2.0
PyOpenGL==3.1.5
pyparsing==2.4.7
pyrsistent==0.17.3
pysndfile==1.3.8
PySocks==1.7.1
pystan==2.19.1.1
pytest==3.6.4
python-apt==0.0.0
python-chess==0.23.11
python-dateutil==2.8.1
python-louvain==0.15
python-slugify==4.0.1
python-utils==2.5.6
pytz==2018.9
pyviz-comms==2.0.1
PyWavelets==1.1.1
PyYAML==3.13
pyzmq==22.0.3
qdldl==0.1.5.post0
qtconsole==5.0.3
QtPy==1.9.0
regex==2019.12.20
requests==2.23.0
requests-oauthlib==1.3.0
resampy==0.2.2
retrying==1.3.3
rpy2==3.4.3
rsa==4.7.2
scikit-image==0.16.2
scikit-learn==0.22.2.post1
scipy==1.4.1
screen-resolution-extra==0.0.0
scs==2.1.3
seaborn==0.11.1
Send2Trash==1.5.0
setuptools-git==1.2
Shapely==1.7.1
simplegeneric==0.8.1
six==1.15.0
sklearn==0.0
sklearn-pandas==1.8.0
smart-open==5.0.0
snowballstemmer==2.1.0
sortedcontainers==2.3.0
SoundFile==0.10.3.post1
spacy==2.2.4
Sphinx==1.8.5
sphinxcontrib-serializinghtml==1.1.4
sphinxcontrib-websupport==1.2.4
SQLAlchemy==1.4.7
sqlparse==0.4.1
srsly==1.0.5
statsmodels==0.10.2
sympy==1.7.1
tables==3.4.4
tabulate==0.8.9
tblib==1.7.0
tensorboard==2.4.1
tensorboard-plugin-wit==1.8.0
tensorflow==2.4.1
tensorflow-datasets==4.0.1
tensorflow-estimator==2.4.0
tensorflow-gcs-config==2.4.0
tensorflow-hub==0.12.0
tensorflow-metadata==0.29.0
tensorflow-probability==0.12.1
termcolor==1.1.0
terminado==0.9.4
testpath==0.4.4
text-unidecode==1.3
textblob==0.15.3
textgenrnn==1.4.1
Theano==1.0.5
thinc==7.4.0
tifffile==2021.4.8
toml==0.10.2
toolz==0.11.1
torch==1.8.1+cu101
torchsummary==1.5.1
torchtext==0.9.1
torchvision==0.9.1+cu101
tornado==5.1.1
tqdm==4.41.1
traitlets==5.0.5
tweepy==3.10.0
typeguard==2.7.1
typing-extensions==3.7.4.3
tzlocal==1.5.1
uritemplate==3.0.1
urllib3==1.24.3
vega-datasets==0.9.0
wasabi==0.8.2
wcwidth==0.2.5
webencodings==0.5.1
Werkzeug==1.0.1
widgetsnbextension==3.5.1
wordcloud==1.5.0
wrapt==1.12.1
xarray==0.15.1
xgboost==0.90
xkit==0.0.0
xlrd==1.1.0
xlwt==1.3.0
yellowbrick==0.9.1
zict==2.0.0
zipp==3.4.1
# -
# pip install absl-py==0.12.0 alabaster==0.7.12 altair==4.1.0 appdirs==1.4.4 argon2-cffi==20.1.0 astor==0.8.1 astropy==4.2.1 astunparse==1.6.3 async-generator==1.10 atari-py==0.2.6 atomicwrites==1.4.0 attrs==20.3.0 audioread==2.1.9 autograd==1.3 Babel==2.9.0 backcall==0.2.0 blis==0.4.1 bokeh==2.3.1 Bottleneck==1.3.2 branca==0.4.2 catalogue==1.0.0 certifi==2020.12.5 cffi==1.14.5 chainer==7.4.0 chardet==3.0.4 click==7.1.2 cloudpickle==1.3.0 cmake==3.12.0 cmdstanpy==0.9.5 colorcet==2.0.6 colorlover==0.3.0 community==1.0.0b1 contextlib2==0.5.5 convertdate==2.3.2 coverage==3.7.1 coveralls==0.5 crcmod==1.7 cufflinks==0.17.3 cupy-cuda101==7.4.0 cvxopt==1.2.6 cvxpy==1.0.31 cycler==0.10.0 cymem==2.0.5 Cython==0.29.22 daft==0.0.4 dask==2.12.0 debugpy==1.0.0 decorator==4.4.2 defusedxml==0.7.1 descartes==1.1.0 dill==0.3.3 distributed==1.25.3 dlib==19.18.0 dm-tree==0.1.6 docopt==0.6.2 docutils==0.17 dopamine-rl==1.0.5 easydict==1.9 ecos==2.0.7.post1 editdistance==0.5.3 entrypoints==0.3 ephem==3.7.7.1 et-xmlfile==1.0.1 fa2==0.3.5 fancyimpute==0.4.3 fastprogress==1.0.0 fastrlock==0.6 fbprophet==0.7.1 feather-format==0.4.1 filelock==3.0.12 firebase-admin==4.4.0 fix-yahoo-finance==0.0.22 Flask==1.1.2 flatbuffers==1.12 folium future==0.16.0 gast==0.3.3 GDAL==2.2.2 gdown==3.6.4 gensim==3.6.0 geographiclib==1.50 geopy==1.17.0 gin-config==0.4.0 glob2==0.7 google==2.0.3 graphviz==0.10.1 greenlet==1.0.0 grpcio==1.32.0 gspread==3.0.1 gspread-dataframe==3.0.8 gym==0.17.3 h5py==2.10.0 HeapDict==1.0.1 hijri-converter==2.1.1 holidays==0.10.5.2 holoviews==1.14.3 html5lib==1.0.1 httpimport==0.5.18 httplib2==0.17.4 httplib2shim==0.0.3 humanize==0.5.1 hyperopt==0.1.2 idna==2.10 imageio==2.4.1 imagesize==1.2.0 imbalanced-learn==0.4.3 imblearn==0.0 importlib-metadata==3.10.1 importlib-resources==5.1.2 imutils==0.5.4 inflect==2.1.0 iniconfig==1.1.1 intel-openmp==2021.2.0 intervaltree==2.1.0 ipython==5.5.0 ipython-genutils==0.2.0 ipython-sql==0.3.9 ipywidgets==7.6.3 itsdangerous==1.1.0 jax==0.2.12 jdcal==1.4.1 jedi==0.18.0 jieba==0.42.1 Jinja2==2.11.3 joblib==1.0.1 jpeg4py==0.1.4 jsonschema==2.6.0 jupyter==1.0.0 jupyter-core==4.7.1 jupyterlab-pygments==0.1.2 jupyterlab-widgets==1.0.0 kaggle==1.5.12 kapre==0.1.3.1 Keras==2.4.3 Keras-Preprocessing==1.1.2 keras-vis==0.4.1 kiwisolver==1.3.1 knnimpute==0.1.0 korean-lunar-calendar==0.2.1 librosa==0.8.0 lightgbm==2.2.3 llvmlite==0.34.0 lmdb==0.99 LunarCalendar==0.0.9 lxml==4.2.6 Markdown==3.3.4 MarkupSafe==1.1.1 matplotlib==3.2.2 matplotlib-venn==0.11.6 missingno==0.4.2 mistune==0.8.4 mizani==0.6.0 mkl==2019.0 mlxtend==0.14.0 more-itertools==8.7.0 moviepy==0.2.3.5 mpmath==1.2.1 msgpack==1.0.2 multiprocess==0.70.11.1 multitasking==0.0.9 murmurhash==1.0.5 music21==5.5.0 natsort==5.5.0 nbconvert==5.6.1 nbformat==5.1.3 nest-asyncio==1.5.1 networkx==2.5.1 nibabel==3.0.2 nltk==3.2.5 notebook==5.3.1 np-utils==0.5.12.1 numba==0.51.2 numexpr==2.7.3 numpy==1.19.5 nvidia-ml-py3==7.352.0 oauth2client==4.1.3 oauthlib==3.1.0 okgrade==0.4.3 opencv-contrib-python==4.1.2.30 opencv-python==4.1.2.30 openpyxl==2.5.9 opt-einsum==3.3.0 osqp==0.6.2.post0 packaging==20.9 palettable==3.3.0 pandas==1.1.5 pandas-datareader==0.9.0 pandas-gbq==0.13.3 pandas-profiling==1.4.1 pandocfilters==1.4.3 panel==0.11.2 param==1.10.1 parso==0.8.2 pathlib==1.0.1 patsy==0.5.1 pexpect==4.8.0 pickleshare==0.7.5 Pillow==7.1.2 pip-tools==4.5.1 plac==1.1.3 plotly==4.4.1 plotnine==0.6.0 pluggy==0.7.1 pooch==1.3.0 portpicker==1.3.1 prefetch-generator==1.0.1 preshed==3.0.5 prettytable==2.1.0 progressbar2==3.38.0 prometheus-client==0.10.1 promise==2.3 prompt-toolkit==1.0.18 protobuf==3.12.4 psutil==5.4.8 psycopg2==2.7.6.1 ptyprocess==0.7.0 py==1.10.0 pyarrow==3.0.0 pyasn1==0.4.8 pyasn1-modules==0.2.8 pycocotools==2.0.2 pycparser==2.20 pyct==0.4.8 pydata-google-auth==1.1.0 pydot==1.3.0 pydot-ng==2.0.0 pydotplus==2.0.2 PyDrive==1.3.1 pyemd==0.5.1 pyerfa==1.7.2 pyglet==1.5.0 Pygments==2.6.1 pygobject pymc3==3.7 PyMeeus==0.5.11 pymongo==3.11.3 pymystem3==0.2.0 PyOpenGL==3.1.5 pyparsing==2.4.7 pyrsistent==0.17.3 pysndfile==1.3.8 PySocks==1.7.1 pystan==2.19.1.1 pytest==3.6.4 python-apt==0.0.0 python-chess==0.23.11 python-dateutil==2.8.1 python-louvain==0.15 python-slugify==4.0.1 python-utils==2.5.6 pytz==2018.9 pyviz-comms==2.0.1 PyWavelets==1.1.1 PyYAML==3.13 pyzmq==22.0.3 qdldl==0.1.5.post0 qtconsole==5.0.3 QtPy==1.9.0 regex==2019.12.20 requests==2.23.0 requests-oauthlib==1.3.0 resampy==0.2.2 retrying==1.3.3 rpy2==3.4.3 rsa==4.7.2 scikit-image==0.16.2 scikit-learn==0.22.2.post1 scipy==1.4.1 scs==2.1.3 seaborn==0.11.1 Send2Trash==1.5.0 setuptools-git==1.2 Shapely==1.7.1 simplegeneric==0.8.1 six==1.15.0 sklearn==0.0 sklearn-pandas==1.8.0 smart-open==5.0.0 snowballstemmer==2.1.0 sortedcontainers==2.3.0 SoundFile==0.10.3.post1 spacy==2.2.4 Sphinx==1.8.5 sphinxcontrib-serializinghtml==1.1.4 sphinxcontrib-websupport==1.2.4 SQLAlchemy==1.4.7 sqlparse==0.4.1 srsly==1.0.5 statsmodels==0.10.2 sympy==1.7.1 tables==3.4.4 tabulate==0.8.9 tblib==1.7.0 tensorboard==2.4.1 tensorboard-plugin-wit==1.8.0 tensorflow==2.4.1 tensorflow-datasets==4.0.1 tensorflow-estimator==2.4.0 tensorflow-gcs-config==2.4.0 tensorflow-hub==0.12.0 tensorflow-metadata==0.29.0 tensorflow-probability==0.12.1 termcolor==1.1.0 terminado==0.9.4 testpath==0.4.4 text-unidecode==1.3 textblob==0.15.3 textgenrnn==1.4.1 Theano==1.0.5 thinc==7.4.0 tifffile==2021.4.8 toml==0.10.2 toolz==0.11.1 torchsummary==1.5.1 tornado==5.1.1 tqdm==4.41.1 traitlets==5.0.5 tweepy==3.10.0 typeguard==2.7.1 typing-extensions==3.7.4.3 tzlocal==1.5.1 uritemplate==3.0.1 urllib3==1.24.3 vega-datasets==0.9.0 wasabi==0.8.2 wcwidth==0.2.5 webencodings==0.5.1 Werkzeug==1.0.1 widgetsnbextension==3.5.1 wordcloud==1.5.0 wrapt==1.12.1 xarray==0.15.1 xlrd==1.1.0 xlwt==1.3.0 yellowbrick==0.9.1 zict==2.0.0 zipp==3.4.1 ipykernel jupyter-client jupyter-console nbclient
#
#
datascience==0.10.6 folium==0.8.3 google-api-core==1.26.3 google-api-python-client==1.12.8 google-auth==1.28.1 google-auth-httplib2==0.0.4 google-auth-oauthlib==0.4.4 google-cloud-bigquery==1.21.0 google-cloud-bigquery-storage==1.1.0 google-cloud-core==1.0.3 google-cloud-datastore==1.8.0 google-cloud-firestore==1.7.0 google-cloud-language==1.2.0 google-cloud-storage==1.18.1 google-cloud-translate==1.5.0 google-colab==1.0.0 google-pasta==0.2.0 google-resumable-media==0.4.1 googleapis-common-protos==1.53.0 googledrivedownloader==0.4 earthengine-api==0.1.260
|
densenet exp.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
#from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import GradientBoostingClassifier
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
#time
from datetime import datetime
from datetime import timedelta
import jieba
import jieba.analyse
jieba.set_dictionary('dict.idkrsi.txt') # 改預設字典
jieba.analyse.set_stop_words("stopword.goatwang.kang.txt") #指定stopwords字典
# get data
# # ! conda install pandas-datareader s
#import pandas_datareader as pdr
# visual
# # ! pip install mpl-finance
#import matplotlib.pyplot as plt
#import mpl_finance as mpf
#import seaborn as sns
# https://github.com/mrjbq7/ta-lib
# # ! pip install ta-lib
#import talib
# +
df_bbs = pd.read_csv("bda2019_dataset/bbs2.csv",encoding="utf-8")
df_forum = pd.read_csv("bda2019_dataset/forum2.csv",encoding="utf-8")
df_news = pd.read_csv("bda2019_dataset/news2.csv",encoding="utf-8")
df_news['comment_count']=0
df_article = pd.concat([df_forum, df_bbs, df_news]) #三個合併
del df_bbs, df_forum, df_news
df_article['post_time'] = pd.to_datetime(df_article['post_time'])
df_article['post_time2'] = df_article['post_time'].dt.date # .dt.date用在dataframe .date()用在一個 #只留日期
#df_article['label'] = 'even'
df_article['content'] = df_article['content'].astype(str).str.replace(',' , ' ').str.replace('\n' , ' ').str.replace('"' , ' ').str.replace("'" , ' ')
df_article['title'] = df_article['title'].astype(str).str.replace(',' , ' ').str.replace('\n' , ' ').str.replace('"' , ' ').str.replace("'" , ' ')
df_article = df_article.sort_values(by=['post_time']).reset_index(drop=True) # 用post_time排序 # 在重設index
df_article = df_article.rename(index=str, columns={"author": "author_", "content": "content_", "id": "id_", "title": "title_"}) # 換column名 以免跟切詞重複
df_article.head(2)
#df_article2 = df_article[['post_time2','title','content']]
# +
df_TWSE2018 = pd.read_csv("bda2019_dataset/TWSE2018.csv",encoding="utf-8")
df_TWSE2017 = pd.read_csv("bda2019_dataset/TWSE2017.csv",encoding="utf-8")
df_TWSE2016 = pd.read_csv("bda2019_dataset/TWSE2016.csv",encoding="utf-8")
df_TWSE = pd.concat([df_TWSE2016, df_TWSE2017, df_TWSE2018]) #三年合併
del df_TWSE2016, df_TWSE2017, df_TWSE2018
# ['開盤價(元)', '最高價(元)', '最低價(元)', '收盤價(元)', '成交量(千股)', '成交值(千元)', '成交筆數(筆)', '流通在外股數(千股)', '本益比-TSE', '股價淨值比-TSE']
df_TWSE['證券代碼'] = df_TWSE['證券代碼'].astype(str)
df_TWSE['年月日'] = pd.to_datetime(df_TWSE['年月日'])
df_TWSE['開盤價(元)'] = df_TWSE['開盤價(元)'].str.replace(',' , '').astype('float64') # 1,000 to 1000 to float
df_TWSE['最高價(元)'] = df_TWSE['最高價(元)'].str.replace(',' , '').astype('float64')
df_TWSE['最低價(元)'] = df_TWSE['最低價(元)'].str.replace(',' , '').astype('float64')
df_TWSE['收盤價(元)'] = df_TWSE['收盤價(元)'].str.replace(',' , '').astype('float64')
df_TWSE['成交量(千股)'] = df_TWSE['成交量(千股)'].str.replace(',' , '').astype('float64')
df_TWSE['成交值(千元)'] = df_TWSE['成交值(千元)'].str.replace(',' , '').astype('float64')
df_TWSE['成交筆數(筆)'] = df_TWSE['成交筆數(筆)'].str.replace(',' , '').astype('int64')
df_TWSE['流通在外股數(千股)'] = df_TWSE['流通在外股數(千股)'].str.replace(',' , '').astype('float64')
df_TWSE['本益比-TSE'] = df_TWSE['本益比-TSE'].str.replace(',' , '').astype('float64')
df_TWSE['股價淨值比-TSE'] = df_TWSE['股價淨值比-TSE'].astype('float64')
df_TWSE.head(2)
# +
# 選那家股票
#company_name = '國巨'
company_name = '奇力新'
# 文章包含那家字
#company_words = '被動元件|積層陶瓷電容|MLCC|電感|晶片電阻|車用電子|凱美|同欣電|大毅|君耀|普斯|國巨'
company_words = '被動元件|積層陶瓷電容|MLCC|電感|晶片電阻|車用電子|飛磁|旺詮|美磊|美桀|向華科技|奇力新'
# 漲跌幾%
PA = 0.05
# even幾%
PAE = 0.003
# +
# 用日期排序 再把index重排
#2327
#df_trend = df_TWSE[df_TWSE['證券代碼'].str.contains('國巨')].sort_values(by=['年月日']).reset_index(drop=True)
#2456
#df_trend = df_TWSE[df_TWSE['證券代碼'].str.contains('奇力新')].sort_values(by=['年月日']).reset_index(drop=True)
#2478
#df_trend = df_TWSE[df_TWSE['證券代碼'].str.contains('大毅')].sort_values(by=['年月日']).reset_index(drop=True)
#6271
#df_trend = df_TWSE[df_TWSE['證券代碼'].str.contains('同欣電')].sort_values(by=['年月日']).reset_index(drop=True)
df_trend = df_TWSE[df_TWSE['證券代碼'].str.contains(company_name)].sort_values(by=['年月日']).reset_index(drop=True)
del df_TWSE
df_trend.head(2)
# -
##增欄位:fluctuation幅度 tag漲跌平
df_trend['fluctuation'] = 0.0
df_trend['tag']='--'
df_trend['closeshift'] = 0.0
df_trend.head(2)
# +
# ##增欄位:fluctuation幅度 tag漲跌平
# df_trend['fluctuation'] = 0.0
# df_trend['tag']='--'
# ###計算漲跌
# for index, row in df_trend.iterrows():
# try:
# margin =(float(df_trend.loc[index,'收盤價(元)']) - float(df_trend.loc[index-1,'收盤價(元)']) )/ float(df_trend.loc[index-1,'收盤價(元)'])
# df_trend.loc[index,'fluctuation']=margin
# if margin >=0.03:
# df_trend.loc[index,'tag']='up'
# elif margin <= -0.03:
# df_trend.loc[index,'tag']='down'
# else:
# df_trend.loc[index,'tag']='even'
# except:
# continue
# -
df_trend['closeshift'] = df_trend['收盤價(元)'].shift(periods=1)#.fillna(value=0.0, inplace=True)
#df_trend['closeshift'].fillna(value= 0.0, inplace=True)
df_trend.head(2)
df_trend['fluctuation'] = (df_trend['收盤價(元)'] - df_trend['closeshift']) / df_trend['closeshift']
df_trend.head(2)
print('fluctuation std = ',df_trend['fluctuation'].std(axis=0))
print('fluctuation mean = ',df_trend['fluctuation'].mean(axis=0))
df_trend.loc[df_trend['fluctuation'] >= PA, 'tag'] = 'up'
df_trend.loc[df_trend['fluctuation'] <= -PA, 'tag'] = 'down'
df_trend.loc[(df_trend['fluctuation'] >= -PAE) & (df_trend['fluctuation'] <= PAE), 'tag'] = 'even'
df_trend.head(2)
len(df_trend[df_trend['tag']=='up'])
len(df_trend[df_trend['tag']=='down'])
len(df_trend[df_trend['tag']=='even'])
#df_company = df_article[ df_article['content'].str.contains('國巨')] # df 某欄位 string contains "國巨"
#df_company = df_article[ df_article['content'].str.contains('奇力新')]
#df_company = df_article[ df_article['content'].str.contains('大毅')]
#df_company = df_article[ df_article['content'].str.contains('同欣電 ')]
df_company = df_article[ df_article['content_'].str.contains(company_words)]
print(len(df_company))
del df_article
df_company.head(2)
stopwords=list()
with open('stopword.goatwang.kang.txt', 'r',encoding='utf-8') as data:
for stopword in data:
stopwords.append(stopword.strip('\n'))
# +
# 'content'全部切詞
corpus = [] # array
for index, row in df_company.iterrows():
not_cut = df_company.loc[index,'content_']
# not_cut = row['description'] # 跟上一行一樣意思
seg_generator = jieba.cut(not_cut, cut_all=False) # genarator
seglist = list(seg_generator) # 整篇文章string切出來的list
# seglist = list(filter(lambda a: a not in stopwords and a != '\n', seglist )) #去除停用詞 #未必需要這步驟
corpus.append(' '.join(seglist)) # ' '.join(seg_generator)也可
df_company["content2"]=corpus
# -
df_company.head(2)
df_trend.loc[2,'年月日'].date() + timedelta(days=-1) == df_trend.loc[1,'年月日'].date()
df_trend.loc[5,'年月日'].date() + timedelta(days=-1) == df_trend.loc[4,'年月日'].date()
d = df_trend.loc[1,'年月日'].date() - df_trend.loc[ 1-1 ,'年月日'].date() #相減差幾天
d
d.days #只取天數
int(d.days) #幾天 轉整數
df_trend.loc[3,'年月日'].date()
df_company[ df_company['post_time2'] == df_trend.loc[3,'年月日'].date() ].head() # 某欄位 == n 的 全部撈出來
# +
# # 演算法
# for index, row in df_2327.iterrows():
# try:
# if df_2327.loc[index,'年月日'].date() + timedelta(days=-1) == df_2327.loc[index-1,'年月日'].date():
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-1), 'label'] = df_2327.loc[index,'tag']
# # 如果股票前一筆差1天 # 那前1天的文章標上當天的漲跌
# elif df_2327.loc[index,'年月日'].date() + timedelta(days=-2) == df_2327.loc[index-1,'年月日'].date():
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-1), 'label'] = df_2327.loc[index,'tag']
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-2), 'label'] = df_2327.loc[index,'tag']
# # 如果股票前一筆差2天 #那前2天的文章標上當天的漲跌
# elif df_2327.loc[index,'年月日'].date() + timedelta(days=-3) == df_2327.loc[index-1,'年月日'].date():
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-1), 'label'] = df_2327.loc[index,'tag']
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-2), 'label'] = df_2327.loc[index,'tag']
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-3), 'label'] = df_2327.loc[index,'tag']
# elif df_2327.loc[index,'年月日'].date() + timedelta(days=-4) == df_2327.loc[index-1,'年月日'].date():
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-1), 'label'] = df_2327.loc[index,'tag']
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-2), 'label'] = df_2327.loc[index,'tag']
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-3), 'label'] = df_2327.loc[index,'tag']
# df_forum.loc[df_forum['post_time2'] == df_2327.loc[index,'年月日'].date() + timedelta(days=-4), 'label'] = df_2327.loc[index,'tag']
# except:
# continue
# +
# 看所有相差的天數
# for index, row in df_2327.iterrows():
# try:
# n = df_2327.loc[index,'年月日'].date() - df_2327.loc[index-1,'年月日'].date()
# print(n)
# except:
# continue
# 最多12天
# -
# 如果股票前一筆差n天 # 那前n天的文章標上當天的漲跌
df_company['label5566']='--'
for index, row in df_trend.iterrows():
try:
n = int((df_trend.loc[index,'年月日'].date() - df_trend.loc[index-1,'年月日'].date()).days ) # 差幾個datetime # 轉天數 # 再轉整數
# print(n)
for i in range(1, n+1):
# print(i)
df_company.loc[df_company['post_time2'] == df_trend.loc[index,'年月日'].date() + timedelta(days=-i), 'label5566'] = df_trend.loc[index,'tag']
except:
continue
print(len(df_company[df_company['label5566']=='down']))
df_company[df_company['label5566']=='down'].head(2)
print(len(df_company[df_company['label5566']=='up']))
df_company[df_company['label5566']=='up'].head(2)
print(len(df_company[df_company['label5566']=='even']))
df_company[df_company['label5566']=='even'].head(2)
# +
#df_company2 = df_company[df_company['label5566'].str.contains('up|down|even')]
#df_company2.to_csv('5pa.csv')
# -
df_keyword = pd.read_csv("chi_5pa_word.csv",encoding="utf-8")
df_keyword.head()
features = df_keyword['word'].astype(str).to_numpy()
features = list(features)
# +
# df_keyword1 = pd.read_csv("final_higher_tf_idf_part.csv",encoding="utf-8") #上漲形容詞
# df_keyword2 = pd.read_csv("final_lower_tf_idf_part.csv",encoding="utf-8") #下跌形容詞
# df_keyword = pd.concat([df_keyword1,df_keyword2])
# del df_keyword1,df_keyword2
# df_keyword.head()
# features = df_keyword['key'].astype(str).to_numpy()
# features = list(features)
# +
# import re
# features = [] # features=list()
# with open('finance.words.txt', 'r',encoding='utf-8') as data:
# for line in data:
# # line = re.sub('[a-zA-Z0-9\W]', '', line) # 把數字英文去掉
# line = re.sub('[0-9]', '', line) # 把數字去掉
# features.append(line.replace('\n', '').replace(' ', '')) # 空格 \n去掉
# print(len(features))
# print(type(features))
# features[:10]
# +
from sklearn.feature_extraction.text import TfidfVectorizer
#features = [ '上漲','下跌','看好','走高','走低','漲停','跌停']
features = features[:20000]
#cv = TfidfVectorizer() #預設有空格就一個feature
cv = TfidfVectorizer(vocabulary = features) # 設定自己要的詞
r = pd.SparseDataFrame(cv.fit_transform(df_company['content2']),
df_company.index,
cv.get_feature_names(),
default_fill_value=0.0)
r.fillna(value=0.0, inplace=True)
r.head(2)
# +
# from sklearn.feature_extraction.text import CountVectorizer
# #features = [ '上漲','下跌','看好','走高','走低','漲停','跌停']
# #features = features[:1000]
# #cv = CountVectorizer() #預設有空格就一個feature
# cv = CountVectorizer(vocabulary = features) # 設定自己要的詞
# r = pd.SparseDataFrame(cv.fit_transform(df_company['content2']),
# df_company.index,
# cv.get_feature_names(),
# default_fill_value=0)
# r.head(2)
# -
df_company2 = pd.concat([df_company,r], axis=1)
df_company2.head(2)
df_company2 = df_company2[df_company2['label5566'].str.contains('up|down|even')] #只取漲跌
df_train = df_company2[(df_company2['post_time'] >= '2016-1-1 00:00:00') & (df_company2['post_time'] < '2018-10-1 00:00:00')]
df_validation = df_company2[(df_company2['post_time'] >= '2018-10-1 00:00:00') & (df_company2['post_time'] < '2019-1-1 00:00:00')]
print(len(df_train))
print(len(df_validation))
print(len(df_train[df_train['label5566']=='up']))
print(len(df_train[df_train['label5566']=='down']))
print(len(df_train[df_train['label5566']=='even']))
print(len(df_validation[df_validation['label5566']=='up']))
print(len(df_validation[df_validation['label5566']=='down']))
print(len(df_validation[df_validation['label5566']=='even']))
X_train = df_train[features] # features 要 轉成list 用numpy出了問題
X_train.fillna(value=0.0, inplace=True)
X_train = X_train.to_numpy()
X_validation = df_validation[features]
X_validation.fillna(value=0.0, inplace=True)
X_validation = X_validation.to_numpy()
Y_train = df_train['label5566']
Y_validation = df_validation['label5566']
# +
model_RandomForest = RandomForestClassifier()
model_RandomForest.fit(X_train, Y_train)
print(model_RandomForest.score(X_train, Y_train))
predictions = model_RandomForest.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
# +
from lightgbm.sklearn import LGBMClassifier
model_LGBMClassifier = LGBMClassifier()
model_LGBMClassifier.fit(X_train, Y_train)
print(model_LGBMClassifier.score(X_train, Y_train))
predictions = model_LGBMClassifier.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
# +
model_XGBClassifier = XGBClassifier()
model_XGBClassifier.fit(X_train, Y_train)
print(model_XGBClassifier.score(X_train, Y_train))
predictions = model_XGBClassifier.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
# +
model_GradientBoost = GradientBoostingClassifier()
model_GradientBoost.fit(X_train, Y_train)
print(model_GradientBoost.score(X_train, Y_train))
predictions = model_GradientBoost.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
# +
model_DecisionTree = DecisionTreeClassifier()
model_DecisionTree.fit(X_train, Y_train)
print(model_DecisionTree.score(X_train, Y_train))
predictions = model_DecisionTree.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
# +
# 用10-Fold CV並且列出平均的效率
from sklearn.model_selection import cross_validate
from sklearn.model_selection import RepeatedStratifiedKFold
# 呼叫單個model MLP
model_KNeighbors = KNeighborsClassifier()
name = 'KNN'
seed = 7
kfold = model_selection.RepeatedStratifiedKFold(n_splits=5, n_repeats=1, random_state=seed) #分割 10% cross validation
cv_results = model_selection.cross_validate(model_KNeighbors, X_train, Y_train, cv=kfold, scoring='accuracy')
#model用MLP() cross valitation
print(cv_results['test_score'])
print("%s: %f (%f)" % (name, cv_results['test_score'].mean(), cv_results['test_score'].std()))
print(cv_results['train_score'])
print(cv_results['fit_time'])
print(cv_results['score_time'])
# +
model_KNeighbors = KNeighborsClassifier()
model_KNeighbors.fit(X_train, Y_train)
print(model_KNeighbors.score(X_train, Y_train))
predictions = model_KNeighbors.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
# +
model_GaussianNB = GaussianNB()
model_GaussianNB.fit(X_train, Y_train)
print(model_GaussianNB.score(X_train, Y_train))
predictions = model_GaussianNB.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
# +
model_MultinomialNB = MultinomialNB()
model_MultinomialNB.fit(X_train, Y_train)
print(model_MultinomialNB.score(X_train, Y_train) )
predictions = model_MultinomialNB.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
print(model_MultinomialNB.coef_)
# +
# 用10-Fold CV並且列出平均的效率
from sklearn.model_selection import cross_validate
from sklearn.model_selection import RepeatedStratifiedKFold
# 呼叫單個model MLP
model_LogisticRegression = LogisticRegression()
name = 'LogisticRegression'
seed = 7
kfold = model_selection.RepeatedStratifiedKFold(n_splits=5, n_repeats=1, random_state=seed) #分割 10% cross validation
cv_results = model_selection.cross_validate(model_LogisticRegression, X_train, Y_train, cv=kfold, scoring='accuracy')
#model用MLP() cross valitation
print(cv_results['test_score'])
print("%s: %f (%f)" % (name, cv_results['test_score'].mean(), cv_results['test_score'].std()))
print(cv_results['train_score'])
print(cv_results['fit_time'])
print(cv_results['score_time'])
# +
model_LogisticRegression = LogisticRegression()
model_LogisticRegression.fit(X_train, Y_train)
print(model_LogisticRegression.score(X_train, Y_train))
predictions = model_LogisticRegression.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
print(model_LogisticRegression.coef_)
# -
model_svclinear = SVC(kernel='linear')
model_svclinear.fit(X_train, Y_train)
print(model_svclinear.score(X_train, Y_train))
predictions = model_svclinear.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
print(model_svclinear.coef_)
from sklearn.svm import LinearSVC
model_LinearSVC = LinearSVC()
model_LinearSVC.fit(X_train, Y_train)
print(model_LinearSVC.score(X_train, Y_train))
predictions = model_LinearSVC.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
print(model_LinearSVC.coef_)
from sklearn.linear_model import SGDClassifier
model_SGDClassifier = SGDClassifier(loss='hinge')
model_SGDClassifier.fit(X_train, Y_train)
print(model_SGDClassifier.score(X_train, Y_train))
predictions = model_SGDClassifier.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
print(model_SGDClassifier.coef_)
# +
# # 用10-Fold CV並且列出平均的效率
# from sklearn.model_selection import cross_validate
# from sklearn.model_selection import RepeatedStratifiedKFold
# # 呼叫單個model MLP
# model_MLP = MLPClassifier(hidden_layer_sizes=(256,256, ),max_iter=256)
# name = 'MLP'
# seed = 7
# kfold = model_selection.RepeatedStratifiedKFold(n_splits=5, n_repeats=1, random_state=seed) #分割 10% cross validation
# cv_results = model_selection.cross_validate(model_MLP, X_train, Y_train, cv=kfold, scoring='accuracy')
# #model用MLP() cross valitation
# print(cv_results['test_score'])
# print("%s: %f (%f)" % (name, cv_results['test_score'].mean(), cv_results['test_score'].std()))
# print(cv_results['train_score'])
# print(cv_results['fit_time'])
# print(cv_results['score_time'])
# +
model_MLP = MLPClassifier(hidden_layer_sizes=(256, 256,), max_iter=256)
model_MLP.fit(X_train, Y_train)
print(model_MLP.score(X_train, Y_train))
predictions = model_MLP.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
#print(model_MLP.coefs_)
# -
# 選KNN
predictions = model_KNeighbors.predict(X_validation)
df_validation['predict5566'] = predictions
df_validation.head()
# +
df_validation2 = df_validation[['author_', 'comment_count', 'content_', 'id_', 'p_type', 'page_url', 'post_time', 's_area_name',
's_name', 'title_', 'post_time2', 'content2', 'label5566', 'predict5566']]
df_validation2.head()
# -
df_validation2.to_csv('df_validation_3.csv')
# +
#df_company2.to_csv('5pa.csv')
# -
|
2.per_article_tfidf_3_time.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py27]
# language: python
# name: conda-env-py27-py
# ---
#future is the missing compatibility layer between Python 2 and Python 3.
#It allows you to use a single, clean Python 3.x-compatible codebase to
#support both Python 2 and Python 3 with minimal overhead.
from __future__ import absolute_import, division, print_function
#encoding. word encodig
import codecs
#finds all pathnames matching a pattern, like regex
import glob
#log events for libraries
import logging
#concurrency
import multiprocessing
#dealing with operating system , like reading file
import os
#pretty print, human readable
import pprint
#regular expressions
import re
#natural language toolkit
import nltk
#word 2 vec
import gensim.models.word2vec as w2v
#dimensionality reduction
import sklearn.manifold
#math
import numpy as np
#plotting
import matplotlib.pyplot as plt
#parse dataset
import pandas as pd
#visualization
import seaborn as sns
# %pylab inline
#stopwords like the at a an, unnecesasry
#tokenization into sentences, punkt
#http://www.nltk.org/
nltk.download("punkt")
nltk.download("stopwords")
#get the book names, matching txt file
book_filenames = sorted(glob.glob("/*.txt"))
#print books
print("Found books:")
book_filenames
# +
#step 1 process data
#initialize rawunicode , we'll add all text to this one bigass file in memory
corpus_raw = u""
#for each book, read it, open it un utf 8 format,
#add it to the raw corpus
for book_filename in book_filenames:
print("Reading '{0}'...".format(book_filename))
with codecs.open(book_filename, "r", "utf-8") as book_file:
corpus_raw += book_file.read()
print("Corpus is now {0} characters long".format(len(corpus_raw)))
print()
# -
#tokenizastion! saved the trained model here
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
#tokenize into sentences
raw_sentences = tokenizer.tokenize(corpus_raw)
#convert into list of words
#remove unecessary characters, split into words, no hyhens and shit
#split into words
def sentence_to_wordlist(raw):
clean = re.sub("[^a-zA-Z]"," ", raw)
words = clean.split()
return words
#for each sentece, sentences where each word is tokenized
sentences = []
for raw_sentence in raw_sentences:
if len(raw_sentence) > 0:
sentences.append(sentence_to_wordlist(raw_sentence))
#print an example
print(raw_sentences[5])
print(sentence_to_wordlist(raw_sentences[5]))
#count tokens, each one being a sentence
token_count = sum([len(sentence) for sentence in sentences])
print("The book corpus contains {0:,} tokens".format(token_count))
# +
#step 2 build our model, another one is Glove
#define hyperparameters
# Dimensionality of the resulting word vectors.
#more dimensions mean more traiig them, but more generalized
num_features = 300
#
# Minimum word count threshold.
min_word_count = 3
# Number of threads to run in parallel.
num_workers = multiprocessing.cpu_count()
# Context window length.
context_size = 7
# Downsample setting for frequent words.
#rate 0 and 1e-5
#how often to use
downsampling = 1e-3
# Seed for the RNG, to make the results reproducible.
seed = 1
# -
thrones2vec = w2v.Word2Vec(
sg=1,
seed=seed,
workers=num_workers,
size=num_features,
min_count=min_word_count,
window=context_size,
sample=downsampling
)
thrones2vec.build_vocab(sentences)
print("Word2Vec vocabulary length:", len(thrones2vec.vocab))
#train model on sentneces
thrones2vec.train(sentences)
#save model
if not os.path.exists("trained"):
os.makedirs("trained")
thrones2vec.save(os.path.join("trained", "thrones2vec.w2v"))
#load model
thrones2vec = w2v.Word2Vec.load(os.path.join("trained", "thrones2vec.w2v"))
#squash dimensionality to 2
#https://www.oreilly.com/learning/an-illustrated-introduction-to-the-t-sne-algorithm
tsne = sklearn.manifold.TSNE(n_components=2, random_state=0)
#put it all into a giant matrix
all_word_vectors_matrix = thrones2vec.syn0
#train t sne
all_word_vectors_matrix_2d = tsne.fit_transform(all_word_vectors_matrix)
#plot point in 2d space
points = pd.DataFrame(
[
(word, coords[0], coords[1])
for word, coords in [
(word, all_word_vectors_matrix_2d[thrones2vec.vocab[word].index])
for word in thrones2vec.vocab
]
],
columns=["word", "x", "y"]
)
points.head(10)
#plot
sns.set_context("poster")
points.plot.scatter("x", "y", s=10, figsize=(20, 12))
def plot_region(x_bounds, y_bounds):
slice = points[
(x_bounds[0] <= points.x) &
(points.x <= x_bounds[1]) &
(y_bounds[0] <= points.y) &
(points.y <= y_bounds[1])
]
ax = slice.plot.scatter("x", "y", s=35, figsize=(10, 8))
for i, point in slice.iterrows():
ax.text(point.x + 0.005, point.y + 0.005, point.word, fontsize=11)
plot_region(x_bounds=(4.0, 4.2), y_bounds=(-0.5, -0.1))
plot_region(x_bounds=(0, 1), y_bounds=(4, 4.5))
thrones2vec.most_similar("Stark")
thrones2vec.most_similar("Aerys")
thrones2vec.most_similar("direwolf")
#distance, similarity, and ranking
def nearest_similarity_cosmul(start1, end1, end2):
similarities = thrones2vec.most_similar_cosmul(
positive=[end2, start1],
negative=[end1]
)
start2 = similarities[0][0]
print("{start1} is related to {end1}, as {start2} is related to {end2}".format(**locals()))
return start2
nearest_similarity_cosmul("Stark", "Winterfell", "Riverrun")
nearest_similarity_cosmul("Jaime", "sword", "wine")
nearest_similarity_cosmul("Arya", "Nymeria", "dragons")
|
Siraj_Akash/word_vectors_game_of_thrones-LIVE-master/demo.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.1
# language: julia
# name: julia-1.4
# ---
# # Julia By Example
# ## Hello World
println("hello world")
#> hello world
# ## Simple Functions
# function to calculate the volume of a sphere
function sphere_vol(r)
# julia allows Unicode names (in UTF-8 encoding)
# so either "pi" or the symbol π can be used
return 4/3*pi*r^3
end
# functions can also be defined more succinctly
quadratic(a, sqr_term, b) = (-b + sqr_term) / 2a
# calculates x for 0 = a*x^2+b*x+c, arguments types can be defined in function definitions
function quadratic2(a::Float64, b::Float64, c::Float64)
# unlike other languages 2a is equivalent to 2*a
# a^2 is used instead of a**2 or pow(a,2)
sqr_term = sqrt(b^2-4a*c)
r1 = quadratic(a, sqr_term, b)
r2 = quadratic(a, -sqr_term, b)
# multiple values can be returned from a function using tuples
# if the return keyword is omitted, the last term is returned
r1, r2
end
vol = sphere_vol(3)
# @printf allows number formatting but does not automatically append the \n to statements, see below
using Printf
@printf "volume = %0.3f\n" vol
#> volume = 113.097
quad1, quad2 = quadratic2(2.0, -2.0, -12.0)
println("result 1: ", quad1)
#> result 1: 3.0
println("result 2: ", quad2)
#> result 2: -2.0
# ## Strings Basics
# strings are defined with double quotes
# like variables, strings can contain any unicode character
s1 = "The quick brown fox jumps over the lazy dog α,β,γ"
println(s1)
#> The quick brown fox jumps over the lazy dog α,β,γ
# println adds a new line to the end of output
# print can be used if you dont want that:
print("this")
#> this
print(" and")
#> and
print(" that.\n")
#> that.
# chars are defined with single quotes
c1 = 'a'
println(c1)
#> a
# the ascii value of a char can be found with Int():
println(c1, " ascii value = ", Int(c1))
#> a ascii value = 97
println("Int('α') == ", Int('α'))
#> Int('α') == 945
# so be aware that
println(Int('1') == 1)
#> false
# strings can be converted to upper case or lower case:
s1_caps = uppercase(s1)
s1_lower = lowercase(s1)
println(s1_caps, "\n", s1_lower)
#> THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG Α,Β,Γ
#> the quick brown fox jumps over the lazy dog α,β,γ
# sub strings can be indexed like arrays:
# (show prints the raw value)
show(s1[11]); println()
#> 'b'
# or sub strings can be created:
show(s1[1:10]); println()
#> "The quick "
# end is used for the end of the array or string
show(s1[end-10:end]); println()
#> "dog α,β,γ"
# julia allows string Interpolation:
a = "welcome"
b = "julia"
println("$a to $b.")
#> welcome to julia.
# this can extend to evaluate statements:
println("1 + 2 = $(1 + 2)")
#> 1 + 2 = 3
# strings can also be concatenated using the * operator
# using * instead of + isn't intuitive when you start with Julia,
# however people think it makes more sense
s2 = "this" * " and" * " that"
println(s2)
#> this and that
# as well as the string function
s3 = string("this", " and", " that")
println(s3)
#> this and that
# ## String: Converting and formatting
# strings can be converted using float and int:
e_str1 = "2.718"
e = parse(Float64, e_str1)
println(5e)
#> 13.59
num_15 = parse(Int, "15")
println(3num_15)
#> 45
# numbers can be converted to strings and formatted using printf
using Printf
@printf "e = %0.2f\n" e
#> e = 2.72
# or to create another string sprintf
e_str2 = @sprintf("%0.3f", e)
# to show that the 2 strings are the same
println("e_str1 == e_str2: $(e_str1 == e_str2)")
#> e_str1 == e_str2: true
# available number format characters are f, e, a, g, c, s, p, d:
# (pi is a predefined constant; however, since its type is
# "MathConst" it has to be converted to a float to be formatted)
@printf "fix trailing precision: %0.3f\n" float(pi)
#> fix trailing precision: 3.142
@printf "scientific form: %0.6e\n" 1000pi
#> scientific form: 3.141593e+03
@printf "float in hexadecimal format: %a\n" 0xff
#> float in hexadecimal format: 0xf.fp+4
@printf "fix trailing precision: %g\n" pi*1e8
#> fix trailing precision: 3.14159e+08
@printf "a character: %c\n" 'α'
#> a character: α
@printf "a string: %s\n" "look I'm a string!"
#> a string: look I'm a string!
@printf "right justify a string: %50s\n" "width 50, text right justified!"
#> right justify a string: width 50, text right justified!
@printf "a pointer: %p\n" 100000000
#> a pointer: 0x0000000005f5e100
@printf "print an integer: %d\n" 1e10
#> print an integer: 10000000000
# ## String Manipulations
s1 = "The quick brown fox jumps over the lazy dog α,β,γ"
# search returns the first index of a char
i = findfirst(isequal('b'), s1)
println(i)
#> 11
# the second argument is equivalent to the second argument of split, see below
# or a range if called with another string
r = findfirst("brown", s1)
println(r)
#> 11:15
# string replace is done thus:
r = replace(s1, "brown" => "red")
show(r); println()
#> "The quick red fox jumps over the lazy dog α,β,γ"
# search and replace can also take a regular expressions by preceding the string with 'r'
r = findfirst(r"b[\w]*n", s1)
println(r)
#> 11:15
# again with a regular expression
r = replace(s1, r"b[\w]*n" => "red")
show(r); println()
#> "The quick red fox jumps over the lazy dog α,β,γ"
# there are also functions for regular expressions that return RegexMatch types
# match scans left to right for the first match (specified starting index optional)
r = match(r"b[\w]*n", s1)
println(r)
#> RegexMatch("brown")
# RegexMatch types have a property match that holds the matched string
show(r.match); println()
#> "brown"
# eachmatch returns an iterator over all the matches
r = eachmatch(r"[\w]{4,}", s1)
for i in r print("\"$(i.match)\" ") end
#> "quick" "brown" "jumps" "over" "lazy"
println()
r = collect(m.match for m = eachmatch(r"[\w]{4,}", s1))
println(r)
#> SubString{String}["quick", "brown", "jumps", "over", "lazy"]
# a string can be repeated using the repeat function,
# or more succinctly with the ^ syntax:
r = "hello "^3
show(r); println() #> "hello hello hello "
# the strip function works the same as python:
# e.g., with one argument it strips the outer whitespace
r = strip("hello ")
show(r); println() #> "hello"
# or with a second argument of an array of chars it strips any of them;
r = strip("hello ", ['h', ' '])
show(r); println() #> "ello"
# (note the array is of chars and not strings)
# similarly split works in basically the same way as python:
r = split("hello, there,bob", ',')
show(r); println() #> SubString{String}["hello", " there", "bob"]
r = split("hello, there,bob", ", ")
show(r); println() #> SubString{String}["hello", "there,bob"]
r = split("hello, there,bob", [',', ' '], limit=0, keepempty=false)
show(r); println() #> SubString{String}["hello", "there", "bob"]
# (the last two arguements are limit and include_empty, see docs)
# the opposite of split: join is simply
r = join(collect(1:10), ", ")
println(r) #> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
|
julia_by_example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
#listings = pd.read_csv("data/listings.csv") Dont use this. All the data is in Listings summary
df_raw = pd.read_csv("data/listings_summary.csv")
# -
df_raw.shape
columns = ['summary','host_is_superhost', 'beds', 'price', 'security_deposit', 'cleaning_fee',
'property_type', 'minimum_nights', 'extra_people', 'latitude', 'longitude',
'accommodates', 'bathrooms', 'bedrooms', 'cancellation_policy', 'room_type']
df_raw[columns].isnull().sum()
df_raw.head()
# +
from collections import ChainMap
def clean_dataframe(df):
df = df.copy()
# keeping the columns we need
columns = ['host_is_superhost', 'beds', 'price', 'security_deposit', 'cleaning_fee',
'property_type', 'minimum_nights', 'extra_people', 'latitude', 'longitude',
'accommodates', 'bathrooms', 'bedrooms', 'cancellation_policy', 'room_type']
df = df[columns]
# stripping $ and ,
df['cleaning_fee_cleaned'] = df['cleaning_fee'].replace({'[\$,]': ''}, regex=True).astype(float)
df['security_deposit_cleaned'] = df['security_deposit'].replace('[\$,]', '', regex=True).astype(float)
df['price_cleaned'] = df['price'].replace({'[\$,]': ''}, regex=True).astype(float)
df['extra_people_cleaned'] = df['extra_people'].replace({'[\$,]': ''}, regex=True).astype(float)
# filling the NaN's
df['cleaning_fee_filled'] = df['cleaning_fee_cleaned'].fillna(0)
df['security_deposit_filled'] = df['security_deposit_cleaned'].fillna(0)
# dropping NaN's
df = df.dropna()
# engineering new columns
list = ['t']
df['superhost'] = df['host_is_superhost'].isin(list)
# engineering property type
guesthouse = ['Guest suite', 'Guesthouse']
apartment = ['Apartment', 'Serviced apartment', 'Aparthotel']
condo = ['Townhouse', 'Loft', 'Condominium']
house = ['House', 'Bed and breakfast', 'Bungalow', 'Villa', 'Tiny house', 'Cabin', 'Cottage', 'Chalet']
other = ['Hostel', 'Other', 'Boat', 'Houseboat', 'Boutique hotel', 'Camper/RV', 'Hotel',
'Resort', 'Train', 'Casa particular (Cuba)', 'In-law', 'Cave', 'Island']
d = ChainMap(dict.fromkeys(guesthouse, 0),
dict.fromkeys(apartment, 1),
dict.fromkeys(condo, 2),
dict.fromkeys(house, 3),
dict.fromkeys(other, 4))
df['property_type_cleaned'] = df['property_type'].map(d.get).astype(int)
# engineeering room type
df['room_type_cleaned'] = df['room_type'].replace({'Entire home/apt':1, 'Private room':0, 'Shared room':2}).astype(int)
# engineering cancelation policy
df['cancellation_policy_cleaned'] = df['cancellation_policy'].replace({'strict_14_with_grace_period':0, 'flexible':1,
'moderate':2, 'super_strict_30':3,
'super_strict_60':4}).astype(int)
# turning columns into int
df['beds'] = df['beds'].astype(int)
df['bedrooms'] = df['bedrooms'].astype(int)
# dropping outliers in columns
df = df[(df['price_cleaned'] != 0.0) & (df['price_cleaned'] < 1000.0)]
df = df[df['beds'] <= 12]
df = df[df['bedrooms'] <= 8]
df = df[df['bathrooms'] <= 5.0]
# dropping extra columns
drop_columns = ['cleaning_fee_cleaned', 'security_deposit_cleaned', 'cleaning_fee', 'security_deposit',
'price', 'extra_people', 'host_is_superhost', 'property_type', 'room_type', 'cancellation_policy']
df = df.drop(columns=drop_columns)
return df
# +
# df['property_type_cleaned'] = df['property_type']
# df['property_type_cleaned'] = np.where((df['property_type'].isin(guesthouse)), 0, df['property_type_cleaned'])
# df['property_type_cleaned'] = np.where((df['property_type'].isin(apartment)), 1, df['property_type_cleaned'])
# df['property_type_cleaned'] = np.where((df['property_type'].isin(condo)), 2, df['property_type_cleaned'])
# df['property_type_cleaned'] = np.where((df['property_type'].isin(house)), 3, df['property_type_cleaned'])
# df['property_type_cleaned'] = np.where((df['property_type'].isin(other)), 4, df['property_type_cleaned'])
# df['property_type_cleaned'] = df['property_type_cleaned'].astype(int)
# -
df = clean_dataframe(df_raw)
df.head()
df.dtypes
df.shape
df.price_cleaned.value_counts()
|
Notebook Folders/Data Engineering.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.0
# language: julia
# name: julia-1.4
# ---
# # Stochastic differential equation model using StochasticDiffEq.jl
# ### <NAME> (@sdwfrost)
# ### 2020-04-27
# ## Introduction
#
# A stochastic differential equation version of the SIR model is:
#
# - Stochastic
# - Continuous in time
# - Continuous in state
#
# This implementation uses `StochasticDiffEq.jl`, which has a variety of SDE solvers.
#
# ## Libraries
using DifferentialEquations
using StochasticDiffEq
using Random
using SparseArrays
using DataFrames
using StatsPlots
# ## Transitions
#
# We begin by specifying the ODE kernel.
function sir_ode!(du,u,p,t)
(S,I,R) = u
(β,c,γ) = p
N = S+I+R
@inbounds begin
du[1] = -β*c*I/N*S
du[2] = β*c*I/N*S - γ*I
du[3] = γ*I
end
nothing
end
# Define a sparse matrix by making a dense matrix and setting some values as not zero
A = zeros(3,2)
A[1,1] = 1
A[2,1] = 1
A[2,2] = 1
A[3,2] = 1
A = SparseArrays.sparse(A)
# Make `g` write the sparse matrix values
function sir_noise!(du,u,p,t)
(S,I,R) = u
(β,c,γ) = p
N = S+I+R
ifrac = β*I/N*S
rfrac = γ*I
du[1,1] = -sqrt(ifrac)
du[2,1] = sqrt(ifrac)
du[2,2] = -sqrt(rfrac)
du[3,2] = sqrt(rfrac)
end
# ## Time domain
#
# Note that even though I'm using fixed time steps, `DifferentialEquations.jl` complains if I pass integer timespans, so I set the timespan to be `Float64`.
δt = 0.1
tmax = 40.0
tspan = (0.0,tmax)
t = 0.0:δt:tmax;
# ## Initial conditions
u0 = [990.0,10.0,0.0]
# ## Parameter values
p = [0.05,10.0,0.25]
# ## Random number seed
Random.seed!(1234);
# ## Running the model
prob_sde = SDEProblem(sir_ode!,sir_noise!,u0,tspan,p,noise_rate_prototype=A)
sol_sde = solve(prob_sde,SRA1())
# ## Post-processing
#
# We can convert the output to a dataframe for convenience.
df_sde = DataFrame(sol_sde(t)')
df_sde[!,:t] = t;
# ## Plotting
#
# We can now plot the results.
@df df_sde plot(:t,
[:x1 :x2 :x3],
label=["S" "I" "R"],
xlabel="Time",
ylabel="Number")
|
notebook/sde_stochasticdiffeq/sde_stochasticdiffeq.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Nonlinear Model Predictive Control with C-GMRES
from IPython.display import Image
Image(filename="Figure_4.png",width=600)
Image(filename="Figure_1.png",width=600)
Image(filename="Figure_2.png",width=600)
Image(filename="Figure_3.png",width=600)
# 
# ### Mathematical Formulation
#
# Motion model is
#
# $$\dot{x}=vcos\theta$$
#
# $$\dot{y}=vsin\theta$$
#
# $$\dot{\theta}=\frac{v}{WB}sin(u_{\delta})$$ (tan is not good for optimization)
#
# $$\dot{v}=u_a$$
#
# Cost function is
#
# $$J=\frac{1}{2}(u_a^2+u_{\delta}^2)-\phi_a d_a-\phi_\delta d_\delta$$
#
# Input constraints are
#
# $$|u_a| \leq u_{a,max}$$
#
# $$|u_\delta| \leq u_{\delta,max}$$
#
# So, Hamiltonian is
#
# $$J=\frac{1}{2}(u_a^2+u_{\delta}^2)-\phi_a d_a-\phi_\delta d_\delta\\ +\lambda_1vcos\theta+\lambda_2vsin\theta+\lambda_3\frac{v}{WB}sin(u_{\delta})+\lambda_4u_a\\
# +\rho_1(u_a^2+d_a^2+u_{a,max}^2)+\rho_2(u_\delta^2+d_\delta^2+u_{\delta,max}^2)$$
#
# Partial differential equations of the Hamiltonian are:
#
# $\begin{equation*}
# \frac{\partial H}{\partial \bf{x}}=\\
# \begin{bmatrix}
# \frac{\partial H}{\partial x}= 0&\\
# \frac{\partial H}{\partial y}= 0&\\
# \frac{\partial H}{\partial \theta}= -\lambda_1vsin\theta+\lambda_2vcos\theta&\\
# \frac{\partial H}{\partial v}=-\lambda_1cos\theta+\lambda_2sin\theta+\lambda_3\frac{1}{WB}sin(u_{\delta})&\\
# \end{bmatrix}
# \\
# \end{equation*}$
#
#
# $\begin{equation*}
# \frac{\partial H}{\partial \bf{u}}=\\
# \begin{bmatrix}
# \frac{\partial H}{\partial u_a}= u_a+\lambda_4+2\rho_1u_a&\\
# \frac{\partial H}{\partial u_\delta}= u_\delta+\lambda_3\frac{v}{WB}cos(u_{\delta})+2\rho_2u_\delta&\\
# \frac{\partial H}{\partial d_a}= -\phi_a+2\rho_1d_a&\\
# \frac{\partial H}{\partial d_\delta}=-\phi_\delta+2\rho_2d_\delta&\\
# \end{bmatrix}
# \\
# \end{equation*}$
# ### Ref
#
# - [Shunichi09/nonlinear\_control: Implementing the nonlinear model predictive control, sliding mode control](https://github.com/Shunichi09/nonlinear_control)
#
# - [非線形モデル予測制御におけるCGMRES法をpythonで実装する \- Qiita](https://qiita.com/MENDY/items/4108190a579395053924)
|
PathTracking/cgmres_nmpc/cgmres_nmpc.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''.venv'': venv)'
# name: python3
# ---
# +
# Personally I had to add the root folder of the repo to the sys.path. If certain imports do not work you should uncomment and set the following.
# import sys
# sys.path.append('/root/of/repo/folder/')
# -
# # Edge Node graph from OpenStreetMaps
#
# To create a map of edge nodes that somewhat resembles population density and shows clusters around dense areas we want to use the national train connections. We make use of the [OpenStreetMap Data](https://download.geofabrik.de) and use the [osmium-tool](https://osmcode.org/osmium-tool/manual.html) to query the data. To interface with the file from Python we use [PyOsmium](https://docs.osmcode.org/pyosmium/latest/).
#
# We use the `StationHandler` to extract the `nodes` and `ways` in the dataset that represent `stop_position`s for trains and `railway`s, respectively.
# +
import osmium
osm_file = "./out/netherlands-latest.osm.bz2"
# -
class StationHandler(osmium.SimpleHandler):
def __init__(self):
osmium.SimpleHandler.__init__(self)
self.stops = {}
self.stations = {}
self.track = []
def node(self, n):
# We filter on n\train=yes to get only train related nodes, additionally we found that stations are the only items with n\wikidata=.
if n.tags.get('train') == 'yes':
if n.tags.get('public_transport') == "stop_position":
self.stops[str(n.id)] = dict(n.tags)
if n.tags.get("wikidata", "") != "":
self.stations[str(n.id)] = dict(n.tags)
def way(self, w):
# We filter on w\railway=rail
if w.tags.get('railway') == 'rail':
self.track.append([ str(x) for x in w.nodes ])
def relation(self, r):
pass
s = StationHandler()
s.apply_file(osm_file)
# ## TrainData
#
# As the extracting from the full-sized dataset can take a long time (on my machine it took about 30 minutes for the NL dataset) we want to save the extracted data. This is done by storing the data in a `TrainData` object that can be pickled.
# +
from dataclasses import dataclass
@dataclass
class TrainData:
stops: dict[str, dict[str, any]]
stations: dict[str, dict[str, any]]
track: list[list[str]]
# -
train_data = TrainData(stops=s.stops, stations=s.stations, track=s.track)
import pickle
pickle_file = "nl-train-data.pickle"
# with open(pickle_file, 'wb') as f:
# pickle.dump(train_data, f)
import pickle
with open(pickle_file, 'rb') as f:
train_data = pickle.load(f)
# ## Processing
#
# Our goal is to create a graph where each vertex is a station and the edges represent train track between them. Each station can have different stop positions. For this reason we make use of the name of a stop position as they are the same for all stop positions at a station.
#
# To create our graph we need to follow different pieces of track (ways) from one stop position until we find a different stop position. This process has been implemented in a recursive function with a limit of `10000` on the recursion depth.
station_stops = set(train_data.stops.keys())
stations_by_ele = { v['name']: (k,v) for k, v in train_data.stations.items() }
# +
from collections import defaultdict
graph = defaultdict(set)
for track in train_data.track:
prev_node = None
for node in track:
if prev_node:
graph[prev_node].add(node)
graph[node].add(prev_node)
prev_node = node
# +
from functools import reduce
import sys
sys.setrecursionlimit(10000)
def get_station_at(loc):
stop = train_data.stops.get(loc)
if stop != None:
return stop.get('name', 'No name')
return None
def find_next_station(station, loc, visited, depth):
"""Recursive function to find the next station (stop position) on the line.
Is limited by the recursionlimit to avoid killing the kernel.
"""
if depth >= sys.getrecursionlimit():
return []
visited.add(loc)
st = get_station_at(loc)
if st != None and st != station:
return [ st ]
outgoing_track = [ x for x in graph[loc] if x not in visited ]
if len(outgoing_track) == 0:
return []
return reduce(lambda a,b:a+b, [ find_next_station(station, x, visited, depth + 1) for x in outgoing_track ], [])
# -
station_graph = defaultdict(set)
for stop in train_data.stops.keys():
station = get_station_at(stop)
station_graph[station].update(find_next_station(station, stop, set(), 0))
# ## Station Graph
#
# Here we save the station graph to file such that we do not need to re-run the logic above a second time.
# +
import json
# with open('graph-nl.json', 'w') as f:
# f.write(json.dumps({ name: list(values) for name, values in station_graph.items() }))
with open('graph-nl.json', 'r') as f:
station_graph = defaultdict(set)
loaded_graph = json.loads(f.read())
for name, connections in loaded_graph.items():
if len(connections) > 0:
station_graph[name] = set(connections)
print(f"Found {len(station_graph)} stations!")
# -
# ## Visualisation
#
# As a quick debugging tool we make use of `graphviz` to visualise the extracted graph.
# +
import graphviz
from collections import defaultdict
def visualise(graph, name):
g = graphviz.Graph('G', filename=f"{name}.gv", engine='sfdp')
connected = defaultdict(set)
def is_connected(start, end):
return end in connected[start] or start in connected[end]
for station, linked_stations in graph.items():
for l in linked_stations:
if not is_connected(station, l) and station != l:
g.edge(station, l)
connected[station].add(l)
g.view()
# -
# ## Different Setups
#
# Now that we have a graph of stations and tracks between them we need to create different setups that represent more centralised setups. To do this we define a function that correctly updates the graph while grouping a `nodeB` with `nodeA`.
def group_nodes(graph, nodeA, nodeB):
"""Group two nodes A and B together into node A.
Delete node B, join all outgoing links from B with A, and update all
incoming links for B to A.
"""
outgoing_links = [ x for x in graph[nodeB] if x != nodeB ]
del graph[nodeB]
graph[nodeA] = (graph[nodeA] | set(outgoing_links)) - { nodeA, nodeB }
for node, values in graph.items():
if nodeB in values:
graph[nodeA].add(node)
graph[node] = (graph[node] - { nodeB }) | { nodeA }
# In the next block we define the main function that creates an alternate graph with a target in terms of number of nodes for the graph. Over several iterations the function will decrease the graph by grouping the least occuring node with one of its neighbours. The least occuring node is defined as the node that is still part of the graph (has not been grouped into a different node) and has the least amount of nodes pointing to it. A node points to a different node when it is grouped into that different node.
#
# This strategy allows us to create a reasonably uniform distribution of mapped nodes. This is important for the Hybrid strategies as a uniform distribution of nodes is important to get similar performance over the different groupings.
# +
import random
def count_occurances(mapping) -> dict[str, int]:
"""Count the occurances of target nodes in the mapping."""
counter = defaultdict(int)
for value in mapping.values():
counter[value] += 1
return counter
def get_least_occuring_node(mapping: dict[str, str]) -> str:
"""Return a node that occurs the least in the mapping.
If there are multiple nodes that occur the least a random node is selected.
"""
occurances = count_occurances(mapping)
items = list(occurances.items())
random.shuffle(items)
return min(items, key=lambda x: x[1])[0]
def create_mapping(mapping, from_node, to_node):
"""Create a mapping `from` to a different node.
Updates the maping by inserting the new mapping and updating all existing
mappings that used to point to `from_node` to `to_node`.
"""
mapping[from_node] = to_node
for node, to in mapping.items():
if to == from_node:
mapping[node] = to_node
def create_alternate_graph(original_graph, no_nodes: int):
alternate_graph = dict.copy(original_graph)
mapping = { node: node for node in alternate_graph.keys() }
while len(alternate_graph) > no_nodes:
main_node = get_least_occuring_node(mapping)
neighbouring_nodes = list(alternate_graph[main_node])
if len(neighbouring_nodes) == 0:
print(f"No outgoing links: {main_node}")
continue
neighbouring_node = random.choice(list(alternate_graph[main_node]))
group_nodes(alternate_graph, main_node, neighbouring_node)
create_mapping(mapping, neighbouring_node, main_node)
return alternate_graph, mapping
# -
# The block below is used for quickly testing the grouping strategy and visualising the result.
alternate_graph, mapping = create_alternate_graph(station_graph, 256)
print(f"Found {len(set(mapping.values()))} nodes in the mapping:")
print(json.dumps(count_occurances(mapping), indent=4))
print(f"Found {len(set(alternate_graph.keys()))} nodes in the graph:")
print(json.dumps({ node: list(values) for node, values in alternate_graph.items() }, indent=4))
visualise(alternate_graph, "test")
# ## Setups
# To test the effect of differently sized edge networks we want to create a subset of sizes that we will use.
max_nodes = len(station_graph)
setups = []
no_nodes = 1
while no_nodes < max_nodes:
setups.append(no_nodes)
no_nodes = no_nodes * 2
setups.append(max_nodes)
print(setups)
def clear_names(graph):
"""Sets more generic names (cdn1, cdn2, ..., cdnN) for the nodes in the graph."""
name_map = { name: f"cdn{n}" for n, name in enumerate(graph.keys()) }
return { name_map[name]: set([ name_map[v] for v in values ]) for name, values in graph.items() }
# +
generic_graph = clear_names(station_graph)
for size in setups:
alternate_graph, mapping = create_alternate_graph(generic_graph, size)
visualise(alternate_graph, str(size))
with open(f"./out/setups/graph-{size}.json", 'w') as f:
json.dump({ node: list(values) for node, values in alternate_graph.items()}, f)
with open(f"./out/setups/mapping-{size}.json", 'w') as f:
json.dump(mapping, f)
|
dataset/edge_network_extraction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R [conda env:scanpy_r]
# language: R
# name: conda-env-scanpy_r-r
# ---
# +
suppressMessages(library(DESeq2))
readcounts.dir <- '/staging/as/skchoudh/SRP010679_tx_counts/'
design.file <- '/staging/as/skchoudh/SRP010679_tx_counts/metadata_ribo.tsv'
## Suffix of htseq-count output
counts.suffix <- '.tsv'
t2g <- read.table('/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/tx_to_gene_type_name.tsv', stringsAsFactors = F, header = T)
colnames(t2g) <- c('target_id', 'ens_gene', 'transcript_type', 'gene_type', 'ext_gene')
t2g <- t2g[, c('target_id', 'ens_gene', 'ext_gene')]
rownames(t2g) <- t2g$target_id
write_results <- function(df, prefix){
df<- as.data.frame(df)
df <- df[order(df$padj),]
df$gene_id <- t2g[rownames(df),]$ens_gene
df$gene_name <- t2g[rownames(df),]$ext_gene
df.sig <- subset(df, padj<0.05)
df.sig.up <- subset(df.sig, log2FoldChange>0)
df.sig.down <- subset(df.sig, log2FoldChange<0)
write.table(df, file = file.path(paste(prefix, 'tsv', sep='.')), sep = '\t')
write.table(df.sig, file = file.path(paste(prefix, 'sig', 'tsv', sep='.')), sep = '\t')
write.table(df.sig.up, file = file.path(paste(prefix, 'sig', 'up', 'tsv', sep='.')), sep = '\t')
write.table(df.sig.down, file = file.path(paste(prefix, 'sig', 'down', 'tsv', sep='.')), sep = '\t')
return (df.sig)
}
# +
design.info <- read.table(design.file, header=T, stringsAsFactors=FALSE, sep='\t')
design.info <- design.info[design.info$treatment %in% c("vehicle", "rapamycin") ,]
design.info$treatment <- factor(design.info$treatment, levels=c("vehicle", "rapamycin"))
files <- paste(design.info$experiment_accession, counts.suffix, sep='')
sampleName <- design.info$experiment_accession
sampleTable <- data.frame(sampleName = sampleName,
fileName = files,
treatment=design.info$treatment)
ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
directory = readcounts.dir,
design = ~ treatment)
rownames(ddsHTSeq) <- gsub('\\.[0-9]+', '', rownames(ddsHTSeq))
ddsHTSeq <- ddsHTSeq[ rowSums(counts(ddsHTSeq)) > 1, ]
dds <- DESeq(ddsHTSeq)
# +
rapamycin_vs_vehicle <- results(dds)
write_results(rapamycin_vs_vehicle, '/staging/as/skchoudh/SRP010679_tx_differential_analysis/ribo_rapamycin_vs_vehicle')
# +
design.info <- read.table(design.file, header=T, stringsAsFactors=FALSE, sep='\t')
design.info <- design.info[design.info$treatment %in% c("vehicle", "pp242") ,]
design.info$treatment <- factor(design.info$treatment, levels=c("vehicle", "pp242"))
files <- paste(design.info$experiment_accession, counts.suffix, sep='')
sampleName <- design.info$experiment_accession
sampleTable <- data.frame(sampleName = sampleName,
fileName = files,
treatment=design.info$treatment)
ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
directory = readcounts.dir,
design = ~ treatment)
rownames(ddsHTSeq) <- gsub('\\.[0-9]+', '', rownames(ddsHTSeq))
ddsHTSeq <- ddsHTSeq[ rowSums(counts(ddsHTSeq)) > 1, ]
dds <- DESeq(ddsHTSeq)
pp242_vs_vehicle <- results(dds)
write_results(pp242_vs_vehicle, '/staging/as/skchoudh/SRP010679_tx_differential_analysis/ribo_pp242_vs_vehicle')
# -
# # uORF DF
# +
design.info <- read.table(design.file, header=T, stringsAsFactors=FALSE, sep='\t')
design.info <- design.info[design.info$treatment %in% c("vehicle", "pp242") ,]
design.info$treatment <- factor(design.info$treatment, levels=c("vehicle", "pp242"))
files <- paste(design.info$experiment_accession, counts.suffix, sep='')
sampleName <- design.info$experiment_accession
sampleTable <- data.frame(sampleName = sampleName,
fileName = files,
treatment=design.info$treatment)
ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
directory = '/staging/as/skchoudh/SRP010679_uORF_differential_analysis/',
design = ~ treatment)
ddsHTSeq <- ddsHTSeq[ rowSums(counts(ddsHTSeq)) > 1, ]
dds <- DESeq(ddsHTSeq)
pp242_vs_vehicle <- results(dds)
write_results(pp242_vs_vehicle, '/staging/as/skchoudh/SRP010679_uORF_differential_analysis/ribo_pp242_vs_vehicle')
# +
design.info <- read.table(design.file, header=T, stringsAsFactors=FALSE, sep='\t')
design.info <- design.info[design.info$treatment %in% c("vehicle", "pp242") ,]
design.info$treatment <- factor(design.info$treatment, levels=c("vehicle", "pp242"))
files <- paste(design.info$experiment_accession, counts.suffix, sep='')
sampleName <- design.info$experiment_accession
sampleTable <- data.frame(sampleName = sampleName,
fileName = files,
treatment=design.info$treatment)
ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
directory = '/staging/as/skchoudh/SRP010679_uORF_translating_only_differential_analysis/',
design = ~ treatment)
ddsHTSeq <- ddsHTSeq[ rowSums(counts(ddsHTSeq)) > 1, ]
dds <- DESeq(ddsHTSeq)
pp242_vs_vehicle <- results(dds)
write_results(pp242_vs_vehicle, '/staging/as/skchoudh/SRP010679_uORF_translating_only_differential_analysis/ribo_pp242_vs_vehicle')
# -
# # Apply the size factors obtained from the CDS data to the uORF data
# +
get_dds_obj_corrected <- function(ddsNew, ddsOld){
ddsOld <- estimateSizeFactors(ddsOld)
sizeFactors(ddsNew) <- sizeFactors(ddsOld)
#dispersionFunction(ddsNew) <- dispersionFunction(ddsOld)
return(ddsNew)
}
design.info <- read.table(design.file, header=T, stringsAsFactors=FALSE, sep='\t')
design.info <- design.info[design.info$treatment %in% c("vehicle", "pp242") ,]
design.info$treatment <- factor(design.info$treatment, levels=c("vehicle", "pp242"))
files <- paste(design.info$experiment_accession, counts.suffix, sep='')
sampleName <- design.info$experiment_accession
sampleTable <- data.frame(sampleName = sampleName,
fileName = files,
treatment=design.info$treatment)
ddsHTSeq.cds <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
directory = '/staging/as/skchoudh/SRP010679_tx_counts/',
design = ~ treatment)
ddsHTSeq.uorf <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
directory = '/staging/as/skchoudh/SRP010679_uORF_translating_only_differential_analysis/',
design = ~ treatment)
ddsHTSeq.cds <- DESeq(ddsHTSeq.cds)
# -
dim(counts(ddsHTSeq.cds))
dim(counts(ddsHTSeq.cds[ rowSums(counts(ddsHTSeq.cds)) > 0, ]))
get_dds_obj_corrected <- function(ddsNew, ddsOld){
ddsOld <- estimateSizeFactors(ddsOld)
sizeFactors(ddsNew) <- sizeFactors(ddsOld)
#dispersionFunction(ddsNew) <- dispersionFunction(ddsOld)
return(ddsNew)
}
ddsHTSeq.uorf <- get_dds_obj_corrected(ddsHTSeq.uorf, ddsHTSeq.cds)
ddsHTSeq.uorf <- estimateDispersions(ddsHTSeq.uorf)
sizeFactors(ddsHTSeq.cds)
ddsHTSeq.uorf
# +
ddsHTSeq.uorf <- DESeq(ddsHTSeq.uorf)
pp242_vs_vehicle <- results(ddsHTSeq.uorf)
write_results(pp242_vs_vehicle, '/staging/as/skchoudh/SRP010679_uORF_translating_only_differential_analysis/ribo_same_size_factor_pp242_vs_vehicle')
# -
|
notebooks/Feb2019-uORF-DESeq2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pylab inline
# +
N=5
q = linspace(0,1,N)
def lin(i,q):
return lambda x: x.q[i]
def lagrange(j,q):
product(lin[(i,q) for i in range(len(q)) if i not j])
# -
x = linspace(0,1,1025)
y = array([lin[i](x)]) for i in range(len(q))
L = []
L
def lagrange(f,q)
|
Untitled Folder/Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from bs4 import BeautifulSoup
import requests
import pandas as pd
import time
import progressbar
# Let's get started: scrape main page
url = "https://daphnecaruanagalizia.com"
response = requests.get(url)
daphne = BeautifulSoup(response.text, 'html.parser')
# Get structural information based on developer tools in Google Chrome
posts = daphne.find_all("div", class_="postmaster")
# Explore first entry
posts[0]
# url
posts[0].a["href"]
# time stamp
posts[0].find(class_="time").get_text()
# title of posts
posts[0].a["title"]
# post id
posts[0].get('data-postid')
# +
# Extract relevant content from main page, loop through posts
new_lst = []
for element in posts:
url = element.a["href"]
title = element.a["title"]
title = title[18:]
date = element.find(class_="time").get_text()
post_id = element.get('data-postid')
#print(url)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
text = soup.find('div', {'class': 'entry'}).text.strip()
temp_dict = {'URL': url,
'Title': title,
'Date': date,
'ID': post_id,
'Txt': text}
new_lst.append(temp_dict)
# -
pd.DataFrame(new_lst)[0:5]
# +
# Putting everything together: scrape posts from all pages for relevant content
bar = progressbar.ProgressBar()
new_lst = []
# showcase for the first 9 pages / to get all pages change to range(1,1443)
for elem,i in zip(range(1,10), bar((range(1,10)))):
page = "https://daphnecaruanagalizia.com/page/" + str(elem)
response = requests.get(page)
soup = BeautifulSoup(response.text, 'html.parser')
posts = soup.find_all("div", class_="postmaster")
for element in posts:
url = element.a["href"]
url_temp = url.replace("https://daphnecaruanagalizia.com/", "")
date_y = url_temp[:4]
date_m = url_temp[5:7]
# dealing with error message stemming from one post on page 127
try:
date_t = element.find(class_="time").get_text()
except AttributeError:
date_t = "n.a."
title = element.a["title"]
title = title.replace("Permanent Link to ", "")
post_id = element.get('data-postid')
response = requests.get(url)
abc = BeautifulSoup(response.text, 'html.parser')
text = abc.find('div', {'class': 'entry'}).text.strip()
text = text.replace('\n', ' ')
temp_dict = {'Link': url,
'Title': title,
'Txt': text,
'Date_1': date_y,
'Date_2': date_m,
'Date_3': date_t,
'ID_post': post_id,
'ID_page': i }
new_lst.append(temp_dict)
df = pd.DataFrame(new_lst)
df.to_csv('daphne.csv', sep='\t', encoding='utf-16')
# -
pd.DataFrame(new_lst)[0:5]
|
10 beautifulsoup practice, server, pandas plotting/01 daphne_(felix).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py37_torch] *
# language: python
# name: conda-env-py37_torch-py
# ---
# +
# %matplotlib inline
import os
import cv2
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from weapons.Se_0a import seg_model
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
def parse_filename(filename):
"""
vertices: se, sw, nw, ne
lp_indices: indices in provinces, alphabets, and ads
area_ratio: in float
clearliness: in int, the bigger, the more clear.
"""
name = filename[:filename.index(".")].split('-')
area_ratio = float('0.'+name[0])
clearliness = int(name[-1])
lp_indices = [int(x) for x in name[-3].split('_')]
vertices = [tuple([int(y) for y in x.split("&")]) for x in name[3].split('_')]
return vertices, lp_indices, area_ratio, clearliness
def lp_indices2numbers(lp_indices, provinces, alphabets, ads):
return ''.join([provinces[lp_indices[0]]] + \
[alphabets[lp_indices[1]]] + \
[ads[x] for x in lp_indices[2:]])
def build_mask(width, length, points, epsilon=1e-8):
lr, ll, ul, ur = points
result = np.ones([length, width])
ys = np.array([np.ones(width)*x for x in range(length)])
xs = np.array([np.arange(width) for _ in range(length)])
if np.abs(lr[0]-ll[0])>epsilon:
liney1a = (lr[1]-ll[1])/(lr[0]-ll[0])
else:
liney1a = (lr[1]-ll[1])/epsilon
liney1b = lr[1]-liney1a*lr[0]
if np.abs(ur[0]-ul[0])>epsilon:
liney2a = (ur[1]-ul[1])/(ur[0]-ul[0])
else:
liney2a = (ur[1]-ul[1])/epsilon
liney2b = ur[1]-liney2a*ur[0]
mask1 = (ys>(liney2a*xs+liney2b)) * (ys<(liney1a*xs+liney1b))
if np.abs(ul[0]-ll[0])>epsilon:
linex1a = (ul[1]-ll[1])/(ul[0]-ll[0])
else:
linex1a = (ul[1]-ll[1])/epsilon
linex1b = ul[1]-linex1a*ul[0]
if np.abs(ur[0]-lr[0])>epsilon:
linex2a = (ur[1]-lr[1])/(ur[0]-lr[0])
else:
linex2a = (ur[1]-lr[1])/epsilon
linex2b = ur[1]-linex2a*ur[0]
if np.abs(linex1a)<epsilon: linex1a = epsilon
if np.abs(linex2a)<epsilon: linex2a = epsilon
mask2 = (xs>((ys-linex1b)/linex1a)) * (xs<((ys-linex2b)/linex2a))
result*=mask1*mask2
result = result.astype(np.int32)
return result
def plot_mask(img, mask):
plt.figure(figsize=(20,10))
plt.imshow(img)
plt.imshow(mask, cmap='gray', alpha=0.7)
plt.show()
def _upper_lower_bound(img, upper, lower):
result = (img>upper)*255+(img<upper)*img
result = (result<lower)*lower+(result>lower)*result
return result
def random_aug(img, verbose = False):
"""
img as int from 0 to 255
"""
indicator1 = np.random.random()*3-1
result = img
if indicator1>1:
# 改contrast
contrast = np.random.random()+0.5
if verbose: print("contrast:", contrast)
result = _upper_lower_bound(img*contrast, 254, 0).astype(int)
elif indicator1<0:
# 改brightness
brightness = np.random.randint(201)-100
if verbose: print("brightness:", brightness)
result = _upper_lower_bound(img+brightness, 254, 0).astype(int)
result = result/255.0
indicator2 = np.random.random()*3-1
if indicator2>1:
# 变糊
blurriness = int(np.random.randint(12))*2+3
if verbose: print("blurriness:", blurriness)
result = cv2.GaussianBlur(result,(blurriness,blurriness),0)
elif indicator2<0:
# 分辨率降低
l = np.random.randint(193)+64
if verbose: print("image resized to ("+str(l)+","+str(l)+")")
result = cv2.resize(result, (l, l))
return result/max(np.max(result),1.0)
def path_to_xy_segmentation(path, filename, x_shape = (512,512), y_shape = (512,512),
verbose = False, augmentation = True):
img = cv2.imread(path+filename)
img = np.dot(img,np.array([[0,0,1],[0,1,0],[1,0,0]]))
if augmentation:
x = random_aug(img)
x = cv2.resize(x, x_shape)
else:
x = cv2.resize(img/255.1, x_shape)
vertices, lp_indices, area_ratio, clearliness = parse_filename(filename)
mask = build_mask(img.shape[1], img.shape[0], vertices)/1.0
y = cv2.resize(mask, y_shape)
if verbose: plot_mask(x, y)
return x,y
def get_batch(file_dict, batch_size,
x_shape = (512,512), y_shape = (512,512), augmentation = True):
xs = []
ys = []
folders = list(file_dict.keys())
for _ in range(batch_size):
folder = np.random.choice(folders)
filename = np.random.choice(file_dict[folder])
x,y = path_to_xy_segmentation(folder, filename,x_shape = x_shape, y_shape = y_shape,
augmentation = augmentation)
xs.append(x)
ys.append(y)
return np.array(xs), np.array(ys)
def seg_to_vertices(img, use_dilated = False, verbose = False):
if verbose:
plt.imshow(img)
plt.show()
if use_dilated:
# dilate thresholded image - merges top/bottom
kernel = np.ones((3,3))
dilated = cv2.dilate(img, kernel, iterations=3)
if verbose:
plt.imshow(dilated)
plt.show()
current_img = dilated
else:
current_img = img
# find contours
contours, hierarchy = cv2.findContours(current_img.astype(np.uint8),
cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if verbose: print("largest contour has ",len(contours[0]),"points")
if len(contours)<=0 or len(contours[0])<4:
if verbose: print("No result")
return []
# simplify contours
used_index = set([])
index = 0.01
step = 2
while(len(used_index)<10):
epsilon = index*cv2.arcLength(contours[0],True)
approx = cv2.approxPolyDP(contours[0],epsilon,True)
if verbose: print("index =", index, ", # points =", len(approx))
if len(approx)==4:
break
elif len(approx)>4:
if (index*step) in used_index:
step = 1+(step-1)/2
used_index.add(index*step)
index*=step
else:
used_index.add(index*step)
index*=step
else:
if (index/step) in used_index:
step = 1+(step-1)/2
used_index.add(index/step)
index/=step
else:
used_index.add(index/step)
index/=step
if len(approx)!=4:
if verbose: print("No result")
return []
if verbose:
cv2.drawContours(img, [approx], 0, (255,255,255), 3)
plt.imshow(img)
plt.show()
return approx
def _bound(value, upper, lower):
return min(max(value, lower), upper)
def rearange_vertices(vertices, img_shape):
sorted_vertices_y = sorted(vertices, key=lambda x: x[0,1])
sorted_vertices_x = sorted(vertices, key=lambda x: x[0,0])
mid_y = (sorted_vertices_y[2][0][1]-sorted_vertices_y[1][0][1])
max_y = (sorted_vertices_y[3][0][1]-sorted_vertices_y[0][0][1])
mid_x = (sorted_vertices_x[2][0][0]-sorted_vertices_x[1][0][0])
max_x = (sorted_vertices_x[3][0][0]-sorted_vertices_x[0][0][0])
if (mid_y/max_y) > (mid_x/max_x):
sorted_vertices = sorted_vertices_y
if sorted_vertices[0][0][0]<sorted_vertices[1][0][0]:
nw = sorted_vertices[0][0]
ne = sorted_vertices[1][0]
else:
nw = sorted_vertices[1][0]
ne = sorted_vertices[0][0]
if sorted_vertices[2][0][0]<sorted_vertices[3][0][0]:
sw = sorted_vertices[2][0]
se = sorted_vertices[3][0]
else:
sw = sorted_vertices[3][0]
se = sorted_vertices[2][0]
else:
sorted_vertices = sorted_vertices_x
if sorted_vertices[0][0][1]<sorted_vertices[1][0][1]:
nw = sorted_vertices[0][0]
sw = sorted_vertices[1][0]
else:
nw = sorted_vertices[1][0]
sw = sorted_vertices[0][0]
if sorted_vertices[2][0][1]<sorted_vertices[3][0][1]:
ne = sorted_vertices[2][0]
se = sorted_vertices[3][0]
else:
ne = sorted_vertices[3][0]
se = sorted_vertices[2][0]
diagonal_length = ((se[0]-nw[0])**2+(se[1]-nw[1])**2)**0.5
diagonal_length+= ((ne[0]-sw[0])**2+(ne[1]-sw[1])**2)**0.5
diagonal_length/= 2
extension = diagonal_length*0.05
####################################
# 真正输出的时候不要加这个 extension
# 让 recognition 算法自己来添加该部分
####################################
return [(_bound(se[0]+extension, img_shape[0], 0), _bound(se[1]+extension, img_shape[1], 0)),
(_bound(sw[0]-extension, img_shape[0], 0), _bound(sw[1]+extension, img_shape[1], 0)),
(_bound(nw[0]-extension, img_shape[0], 0), _bound(nw[1]-extension, img_shape[1], 0)),
(_bound(ne[0]+extension, img_shape[0], 0), _bound(ne[1]-extension, img_shape[1], 0)),]
def crop_out_plate(img, vertices):
pts1 = np.float32(vertices)
pts2 = np.float32([[300,150],[0,150],[0,0],[300,0]])
M=cv2.getPerspectiveTransform(pts1,pts2)
return cv2.warpPerspective(np.uint8(img),M,(300,150))
file_dict = {}
training_dict = {}
test_dict = {}
# folders = ["CCPD2019/ccpd_base/", "CCPD2019/ccpd_blur/", "CCPD2019/ccpd_challenge/",
# "CCPD2019/ccpd_db/", "CCPD2019/ccpd_fn/",
# "CCPD2019/ccpd_rotate/", "CCPD2019/ccpd_tilt/", "CCPD2019/ccpd_weather/"]
folders = ["CCPD2019/ccpd_base/"]
for folder in folders:
file_dict[folder] = [x for x in os.listdir(folder) if x[0]!='.']
split_point = int(len(file_dict[folder])/20)
training_dict[folder] = file_dict[folder][:-split_point]
test_dict[folder] = file_dict[folder][-split_point:]
xs, ys = get_batch(file_dict, 10, x_shape = (512,512), y_shape = (64,64))
print(xs.shape, ys.shape)
# +
BATCH_SIZE = 16
n_batch = 2000
learning_rate = 2e-4
saving_period = 100
model_name = "Se_0a_1"
if model_name not in os.listdir('models/'):
os.mkdir('models/'+model_name)
x_shape = (512,512)
y_shape = (64,64)
tf.reset_default_graph()
model = seg_model()
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
allow_soft_placement=True,
log_device_placement=False)) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, "models/Se_0a_1/Se_0a_1_0.ckpt")
# tensorboard --logdir logs/
# summary_writer = tf.summary.FileWriter(logdir = "logs", graph = tf.get_default_graph())
xs, ys = get_batch(file_dict, 1, x_shape = x_shape, y_shape = y_shape)
prediction = model.predict(sess, xs)
show_result(xs, ys, prediction)
for i in range(1, 1+n_batch):
bn_momentum = min(0.7, (1-10/(i+10)))
xs, ys = get_batch(training_dict, BATCH_SIZE, x_shape = (512,512), y_shape = (64,64))
loss, pred, summary = model.train(sess, learning_rate, bn_momentum, xs, ys)
# summary_writer.add_summary(summary, i)
if i%10 == 0:
print(i, loss)
if i%saving_period == 0:
save_path = saver.save(sess, "models/"+model_name+"/"+model_name+"_"+str(int(i/10000))+".ckpt")
print("Model saved in path: "+save_path)
xs, ys = get_batch(test_dict, BATCH_SIZE, x_shape = (512,512), y_shape = (64,64))
prediction = model.predict(sess, xs)
show_result(xs, ys, prediction)
# -
|
01_car_plate_detection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/chiwoongMOON/202111PythonGrammarStudy/blob/master/chapter07.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="U31SiWiGmkfo"
# # chatper 07 True, False 그리고 if 와 그 형제들
# + [markdown] id="5cRAaZUimrXN"
# ## 07-1. 참과 거짓을 의미하는 값(데이터)
# + id="84beJvF2mAR1"
True # True는 그 단어의 의미처럼 '참'을 뜻한다.
# + id="ED1djiahmwUW"
False # False는 그 단어의 의미처럼 '거짓'을 뜻한다.
# + id="rUbHMivGmxX1"
3 > 10 # 3이 10보다 크니?
# + id="iJrzww4lmyfv"
3 < 10 # 3이 10보다 작으니?
# + id="AI3UWPfxmzhN"
r = 3 < 10 # < 연산의 결과인 True가 변수 r에 저장된다.
r
# + id="1FTyah18m1k8"
print("type(True) : ", type(True))
print("type(False) : ", type(False))
# + [markdown] id="oxu8tB8qnIGX"
# ## 데이터 중간정리
# - int형 데이터 ex) 3, 5, 7, 9
# - float형 데이터 ex) 2.2, 4.4, 6.6, 8.8
# - 리스트형 데이터 ex) [3, 5, 7, 9], [2.2, 4.4, 6.6, 8.8]
# - 스트링형 데이터 ex) "I am a boy", 'You are a girl'
# - 부울형 데이터 ex) True, False
# + [markdown] id="w3YkvWyqnWjD"
# ## 07-2. 소스파일에 main함수 만들기
# + id="movGIoTUm2dV"
# main.py
def main(): # main 함수의 정의
print("Simple Frame")
main() # main 함수의 호출을 명령함
# + [markdown] id="XX2dinpMneRC"
# ## 07-3. if문: 조건이 맞으면 실행하라
# + id="_M82n9ncnbX5"
# if_positive.py
def main(): # main 함수의 정의
num = int(input("정수 입력: "))
if num > 0:
print("양의 정수입니다.")
main() # 위의 main 함수를 실행해라!
# + id="eQ82MypBnmVx"
num = 2
if num > 0: print("양의 정수입니다.") # 한 줄이면 이렇게도 가능
# + [markdown] id="OOkbJ_hfn4bv"
# ## 07-4. if~else문 : 이쪽 길! 아니면 저쪽 길!
# + id="qh9Uvkvfn02M"
# if_else.py
def main():
num = int(input("정수 입력: "))
if num > 0:
print("0보다 큰 수입니다.") # num이 0보다 크면 이 문장 실행
else:
print("0보다 크지 않은 수입니다.") # num이 0보다 크지 않으면 이 문장 실행
main()
# + [markdown] id="ggGhZY0FoPXg"
# ## 07-5. if ~ elif ~ else문: 여러 길 중에서 하나의 길만 선택!
# + id="ZUHI9ivOoGm3"
# if_elif_else.py
def main():
num = int(input("정수 입력: "))
if num > 0:
print("0보다 큰 수입니다.") # 문장 1
elif num < 0:
print("0보다 작은 수입니다.") # 문장 2
else:
print("0으로 판단이 됩니다.") # 문장 3
main()
# + [markdown] id="rXBm9DQtofrs"
# ## 07-6. True 또는 False를 반환하는 연산들
# + id="K8Q0hz7hoddw"
# if_elif_else.py
def main():
num = int(input("정수 입력: "))
if num == 1:
print("1을 입력했습니다.")
elif num == 2:
print("2을 입력했습니다.")
elif num == 3:
print("3을 입력했습니다.")
else:
print("1, 2, 3 아닌 정수를 입력했습니다.")
main()
# + id="O2kepnQ6pDBH"
# if_and_if.py
def main():
num = int(input("2의 배수이면서 3의 배수인 수 입력: "))
if num % 2 == 0:
if num % 3 == 0:
print("OK!")
else:
print("NO!")
else:
print("NO!")
main()
# + [markdown] id="04_09diPpjKu"
# - and 연산자
# + id="FzgXwnaNpc2J"
True and True
# + id="l6N2f9n1pgzo"
True and False
# + id="e7sq97RjpnKh"
False and True
# + id="-u3Rfo8WplIU"
False and False
# + [markdown] id="64IhAXbGpqJD"
# - or 연산자
# + id="mWzHK1GQpxWm"
True or True
# + id="dx9j9hF4pxWo"
True or False
# + id="rGJt3XqYpxWp"
False or True
# + id="9F9hbiappxWp"
False or False
# + [markdown] id="iqp5H2wVp35F"
# - not 연산자
# + id="6RcXprxfpmfQ"
not False
# + id="QIvwwZYXp7RK"
not True
# + id="gkkpvowjp8I0"
# and.py
def main():
num = int(input("2의 배수이면서 3의 배수인 수 입력: "))
if num % 2 == 0 and num % 3 == 0:
print("OK!")
else:
print("NO!")
main()
# + [markdown] id="cK__P99NqgoK"
# ## 과제
# + [markdown] id="PRCiu74Oqidg"
# - 우리는 이제 if도 할 수 있습니다.
# - 얼른 백준으로 가서 문제를 또 풀어보지요
# + id="rNzZvJiNICeG"
n1, n2, n3 = input().split()
n1 = int(n1)
n2 = int(n2)
n3 = int(n3)
if n1 == n2 == n3:
print(10000 + n1 * 1000)
elif n1 == n2:
print(1000 + n1 * 100)
elif n1 == n3:
print(1000 + n1 * 100)
elif n2 == n3:
print(1000 + n2 * 100)
else:
if n1 > n2:
if n1 > n3:
print(n1 * 100)
else:
print(n3 * 100)
# + id="Soh1l48pLT4Q"
n1, n2, n3 = input().split()
n1 = int(n1)
n2 = int(n2)
n3 = int(n3)
if n1 == n2 and n2 == n3:
print(10000 + n1 * 1000)
elif n1 == n2:
print(1000 + n1 * 100)
elif n1 == n3:
print(1000 + n1 * 100)
elif n2 == n3:
print(1000 + n2 * 100)
else:
answer = max(n1, n2)
answer = max(n3, answer)
print(answer * 100)
# + id="AP31wNsdLVhy"
n, m = map(int, input().split())
for i in range(n, m+1, 1):
if n == m:
break
print(i)
# + id="I14POP8lMnHQ"
N, M = map(int, input().split())
if N - M < 0:
print(M - N)
else :
print(N - M)
# + id="mdhg1lOAMXHH"
N, M = map(int, input().split())
print(abs(N - M))
#abs 절댓값 구하는 함수
# + [markdown] id="fKTrwgYXqowE"
# ## 07-7. 리스트와 문자열을 대상으로도 동작하는 >=, <=, ==, !=
# + id="zVzvut6lqQAG"
'abc' == 'abc' # 두 문자열이 같은가?
# + id="p-BpxH0aqswp"
'abc' != 'abc' # 두 문자열이 다른가?
# + id="az2qGMMQqt1B"
[1, 2, 3] == [1, 2] # 두 리스트가 같은가?
# + id="yhYmFrgyqvHc"
[1, 2, 3] != [1, 2] # 두 리스트가 다른가?
# + [markdown] id="xAyFu2tQqycs"
# ## 07-8. True 또는 False로만 답하는 함수들
# + [markdown] id="8mYxPunwq535"
# - isdigit()
# + id="kyofpvgzqwIU"
st1 = "123"
st2 = "OneTwoThree"
st1.isdigit() # st1은 숫자로만 이뤄져 있나요?
# + id="2YcXwaLaq4Ae"
st2.isdigit() # st2는 숫자로만 이뤄져 있나요?
# + [markdown] id="jaE3HsiJq950"
# - isalpha()
# + id="NVDTtSkUq5f5"
st1 = "123"
st2 = "OneTwoThree"
st1.isalpha()
# + id="aSfkDMeOrBEr"
st2.isalpha()
# + [markdown] id="tdnadMUYrE6X"
# - startswith()
# + id="LBFHpcLTrCQO"
str = "Supersprint"
str.startswith("Super") # 문자열이 'Super'로 시작하는가?
# + [markdown] id="adjJwBhurJCt"
# - endswith()
# + id="Wxh1jLa2rH4H"
str = "Supersprint"
str.endswith("int") # 문자열이 'int'로 끝나는가?
# + id="ZNJE5DpQrO5t"
# is_phone_num.py
def main():
pnum = input("스마트폰 번호 입력: ")
if pnum.isdigit() and pnum.startswith("010"):
print("정상적인 입력입니다.")
else:
print("정상적이지 않은 입력입니다.")
main()
# + [markdown] id="lXuwBn1wrc-N"
# ## 07-9. in, not in
# + id="AWqimN25rVB3"
s = "Tomato spaghetti"
if s.find("ghe") != -1:
print("있습니다.")
else:
print("없습니다.")
# + id="2OMQ23uSrg8N"
if "ghe" in s:
print("있습니다.")
else:
print("없습니다.")
# + id="dkuz-3FirnRf"
3 in [1, 2, 3] # 리스트 [1, 2, 3] 안에 3이 있는가?
# + id="ZufCJC29rtyT"
4 in [1, 2, 3] # 리스트 [1, 2, 3] 안에 4가 있는가?
# + id="cwwIpqjyru6z"
3 not in [1, 2, 3]
# + id="R7Ou-uUirwFl"
4 not in [1, 2, 3]
# + id="aLeRG2AvrxCE"
"he" not in "hello"
# + id="JEBB63nQrx1z"
"oo" not in "hello"
# + [markdown] id="IpCeTIB6r1Ju"
# ## 과제
# + [markdown] id="BGdCwC-qr3Nh"
# - 프로그램 사용자가 정수를 입력하면, 그 수의 거듭제곱 값을 출력한다
# - 프로그램 사용자가 정수가 아닌 것을 입력하면, "정수가 아닙니다." 는 main()함수를 구현한다.
# + id="l18_XTBAOebX"
def main(num):
if num == int(num):
print(num * num)
else:
print("정수가 아닙니다.")
# + id="DYwiNBlFP8wi"
def main():
num = input("정수를 입력해주세요! ")
if num.isdigit():
num = int(num)
print(num ** 2)
else:
print("정수가 아닙니다")
main()
# + id="2Q2DG75mO-Py"
main(5)
# + [markdown] id="wSpGLzvVsJDZ"
# ## 07-10. 수를 True와 False로 인식하는 방식
# + id="bOBx3yV9ryya"
num = 1
if num:
print("num은 True입니다.")
else:
print("num은 False입니다.")
# + id="3xC0zH7AsUex"
num = 0
if num:
print("num은 True입니다.")
else:
print("num은 False입니다.")
# + id="oLANiWEWsWB2"
num = 2
if num:
print("num은 True입니다.")
else:
print("num은 False입니다.")
# + id="jg6MfbcWsW1u"
num = -1
if num:
print("num은 True입니다.")
else:
print("num은 False입니다.")
# + [markdown] id="8vUXTPEAsaIp"
# - 0 이 오는 경우 False가 온 것으로 간주한다.
# - 0 아닌 수가 오는 경우 True가 온 것으로 간주한다.
# + id="nuODNsrIsZoB"
bool(5)
# + id="yuXgtUMasl2u"
bool("what")
# + id="KKt0X7kUsmt4"
bool("")
# + id="nCj4Lcwbsnk4"
bool([1, 2, 3])
# + id="2R5VATVJsoUl"
bool([])
# + [markdown] id="SMVMZ2icsp_U"
# - 문자열과 유사하게 빈 리스트는 False, 값이 있는 리스트는 True로 해석된다.
# + id="gEetvaCwspPv"
|
chapter07.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Continuous Change Detection (CCDC) Land Cover
# This code is adapted from the CCDC methodology detailed in the following manuscript: https://www.sciencedirect.com/science/article/abs/pii/S0034425714000248
# +
import ee
import geemap
from geemap import *
import json
from geemap import geojson_to_ee, ee_to_geojson
from ipyleaflet import GeoJSON
import os
import sklearn
import js2py
# # !pip install geemap
utils = ('users/parevalo_bu/gee-ccdc-tools:ccdcUtilities/api')
# -
# ### Define parameters
# +
#Change detection parameters
changeDetection = {
'breakpointBands': ['GREEN','RED','NIR','SWIR1','SWIR2'],
'tmaskBands': ['GREEN','SWIR2'],
'minObservations': 6,
'chiSquareProbability': .99,
'minNumOfYearsScaler': 1.33,
'dateFormat': 2,
'lambda': 20/10000,
'maxIterations': 25000
}
'''
#Classification parameters
classification = {
'bandNames': ["B1","B2","B3","B4","B5","B6","B7"],
'inputFeatures': ["INTP", "SLP","PHASE","RMSE"],
'coefs': ["INTP", "SLP","COS", "SIN","RMSE","COS2","SIN2","COS3","SIN3"],
'ancillaryFeatures': ["ELEVATION","ASPECT","DEM_SLOPE","RAINFALL","TEMPERATURE"],
'resultFormat': 'SegCollection',
'classProperty': 'LC_Class',
'yearProperty': 'year',
'classifier': ee.Classifier.smileRandomForest,
'classifierParams': {
'numberOfTrees': 150,
'variablesPerSplit': null,
'minLeafPopulation': 1,
'bagFraction': 0.5,
'maxNodes': null
},
'outPath': '/Users/joycelynlongdon/Desktop/Cambridge/CambridgeCoding/MRES/GEE_examples/Notebooks/Change Detection',
'segs': ["S1", "S2", "S3", "S4", "S5", "S6"],
#'trainingPath': projects/GLANCE/TRAINING/MASTER/NA/NA_V1/NA_Training_Master_V1_NO_LCMAP_2021_03_17',
#'trainingPathPredictors': 'projects/GLANCE/TRAINING/MASTER/NA/NA_V1/NA_Training_Master_V1_NO_LCMAP_2021_03_17_predictors',
}
'''
#define study region
file_path = os.path.abspath('/Users/joycelynlongdon/Desktop/Cambridge/CambridgeCoding/MRES/GEE_examples/Input Data/mai_ndombe.json')
with open(file_path) as f:
aoi_poly = json.load(f)
studyRegion = ee.FeatureCollection(aoi_poly).first().geometry()
params = {
'start': '2013-01-01',
'end': '2019-01-01',
'ChangeDetection': changeDetection,
'Classification': classification,
'StudyRegion': studyRegion
}
#Filter Landsat data by date and location
filteredLandsat = utils.Inputs.getLandsat().filterBounds(params.StudyRegion).filterDate(params.start, params.end)
print(filteredLandsat.size())
# -
|
Notebooks/Exploration/Change_Detection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="N6ZDpd9XzFeN"
# ##### Copyright 2018 The TensorFlow Hub Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# + cellView="form" colab_type="code" id="KUu4vOt5zI9d" colab={}
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# + colab_type="code" id="innBbve1LdjE" colab={}
# + [markdown] colab_type="text" id="edfbxDDh2AEs"
# ## Predict Shakespeare with Cloud TPUs and Keras
# + [markdown] colab_type="text" id="RNo1Vfghpa8j"
# ## Overview
#
# This example uses [tf.keras](https://www.tensorflow.org/guide/keras) to build a *language model* and train it on a Cloud TPU. This language model predicts the next character of text given the text so far. The trained model can generate new snippets of text that read in a similar style to the text training data.
#
# The model trains for 10 epochs and completes in approximately 5 minutes.
#
# This notebook is hosted on GitHub. To view it in its original repository, after opening the notebook, select **File > View on GitHub**.
# + [markdown] colab_type="text" id="dgAHfQtuhddd"
# ## Learning objectives
#
# In this Colab, you will learn how to:
# * Build a two-layer, forward-LSTM model.
# * Convert a `tf.keras` model to an equivalent TPU version and then use the standard Keras methods to train: `fit`, `predict`, and `evaluate`.
# * Use the trained model to make predictions and generate your own Shakespeare-esque play.
#
#
#
#
#
# + [markdown] colab_type="text" id="QrprJD-R-410"
# ## Instructions
# + [markdown] colab_type="text" id="_I0RdnOSkNmi"
# <h3> Train on TPU</h3>
#
# 1. On the main menu, click Runtime and select **Change runtime type**. Set "TPU" as the hardware accelerator.
# 1. Click Runtime again and select **Runtime > Run All**. You can also run the cells manually with Shift-ENTER.
# + [markdown] colab_type="text" id="kYxeFuKCUx9d"
# TPUs are located in Google Cloud, for optimal performance, they read data directly from Google Cloud Storage (GCS)
# + [markdown] colab_type="text" id="Lvo0t7XVIkWZ"
# ## Data, model, and training
# + [markdown] colab_type="text" id="xzpUtDMqmA-x"
# In this example, you train the model on the combined works of <NAME>, then use the model to compose a play in the style of *The Great Bard*:
#
# <blockquote>
# Loves that led me no dumbs lack her Berjoy's face with her to-day.
# The spirits roar'd; which shames which within his powers
# Which tied up remedies lending with occasion,
# A loud and Lancaster, stabb'd in me
# Upon my sword for ever: 'Agripo'er, his days let me free.
# Stop it of that word, be so: at Lear,
# When I did profess the hour-stranger for my life,
# When I did sink to be cried how for aught;
# Some beds which seeks chaste senses prove burning;
# But he perforces seen in her eyes so fast;
# And _
# </blockquote>
#
# + [markdown] colab_type="text" id="KRQ6Fjra3Ruq"
# ### Download data
#
# Download *The Complete Works of <NAME>* as a single text file from [Project Gutenberg](https://www.gutenberg.org/). You use snippets from this file as the *training data* for the model. The *target* snippet is offset by one character.
# + colab_type="code" id="j8sIXh1DEDDd" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="8a6bf19c-11c1-4a40-b3e5-f472718cd0e4"
# !wget --show-progress --continue -O /content/starwars.txt http://www.scifiscripts.com/scripts/swd1_5-74.txt
# + [markdown] colab_type="text" id="AbL6cqCl7hnt"
# ### Build the data generator
# + colab_type="code" id="E3V4V-Jxmuv3" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="9d138291-a6a6-400d-9497-b566fac7141c"
import numpy as np
import six
import tensorflow as tf
import time
import os
# This address identifies the TPU we'll use when configuring TensorFlow.
TPU_WORKER = 'grpc://' + os.environ['COLAB_TPU_ADDR']
SHAKESPEARE_TXT = '/content/starwars.txt'
tf.logging.set_verbosity(tf.logging.INFO)
def transform(txt, pad_to=None):
# drop any non-ascii characters
output = np.asarray([ord(c) for c in txt if ord(c) < 255], dtype=np.int32)
if pad_to is not None:
output = output[:pad_to]
output = np.concatenate([
np.zeros([pad_to - len(txt)], dtype=np.int32),
output,
])
return output
def training_generator(seq_len=100, batch_size=1024):
"""A generator yields (source, target) arrays for training."""
with tf.gfile.GFile(SHAKESPEARE_TXT, 'r') as f:
txt = f.read()
tf.logging.info('Input text [%d] %s', len(txt), txt[:50])
source = transform(txt)
while True:
offsets = np.random.randint(0, len(source) - seq_len, batch_size)
# Our model uses sparse crossentropy loss, but Keras requires labels
# to have the same rank as the input logits. We add an empty final
# dimension to account for this.
yield (
np.stack([source[idx:idx + seq_len] for idx in offsets]),
np.expand_dims(
np.stack([source[idx + 1:idx + seq_len + 1] for idx in offsets]),
-1),
)
six.next(training_generator(seq_len=10, batch_size=1))
# + [markdown] colab_type="text" id="Bbb05dNynDrQ"
# ### Build the model
#
# The model is defined as a two-layer, forward-LSTM—with two changes from the `tf.keras` standard LSTM definition:
#
# 1. Define the input `shape` of the model to comply with the [XLA compiler](https://www.tensorflow.org/performance/xla/)'s static shape requirement.
# 2. Use `tf.train.Optimizer` instead of a standard Keras optimizer (Keras optimizer support is still experimental).
# + colab_type="code" id="yLEM-fLJlEEt" colab={}
EMBEDDING_DIM = 512
def lstm_model(seq_len=100, batch_size=None, stateful=True):
"""Language model: predict the next word given the current word."""
source = tf.keras.Input(
name='seed', shape=(seq_len,), batch_size=batch_size, dtype=tf.int32)
embedding = tf.keras.layers.Embedding(input_dim=256, output_dim=EMBEDDING_DIM)(source)
lstm_1 = tf.keras.layers.LSTM(EMBEDDING_DIM, stateful=stateful, return_sequences=True)(embedding)
lstm_2 = tf.keras.layers.LSTM(EMBEDDING_DIM, stateful=stateful, return_sequences=True)(lstm_1)
predicted_char = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(256, activation='softmax'))(lstm_2)
model = tf.keras.Model(inputs=[source], outputs=[predicted_char])
model.compile(
optimizer=tf.train.RMSPropOptimizer(learning_rate=0.01),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
return model
# + [markdown] colab_type="text" id="VzBYDJI0_Tfm"
# ### Train the model
#
# The `tf.contrib.tpu.keras_to_tpu_model` function converts a `tf.keras` model to an equivalent TPU version. You then use the standard Keras methods to train: `fit`, `predict`, and `evaluate`.
# + colab_type="code" id="ExQ922tfzSGA" colab={"base_uri": "https://localhost:8080/", "height": 1176} outputId="4ddb956a-eedd-4dde-c195-604a5e103df9"
tf.keras.backend.clear_session()
training_model = lstm_model(seq_len=100, batch_size=128, stateful=False)
tpu_model = tf.contrib.tpu.keras_to_tpu_model(
training_model,
strategy=tf.contrib.tpu.TPUDistributionStrategy(
tf.contrib.cluster_resolver.TPUClusterResolver(TPU_WORKER)))
tpu_model.fit_generator(
training_generator(seq_len=100, batch_size=1024),
steps_per_epoch=100,
epochs=10,
)
tpu_model.save_weights('/tmp/bard.h5', overwrite=True)
# + [markdown] colab_type="text" id="TCBtcpZkykSf"
# ### Make predictions with the model
#
# Use the trained model to make predictions and generate your own Shakespeare-esque play.
# Start the model off with a *seed* sentence, then generate 250 characters from it. The model makes five predictions from the initial seed.
# + colab_type="code" id="tU7M-EGGxR3E" colab={"base_uri": "https://localhost:8080/", "height": 1006} outputId="472f5bb2-faaf-4567-9c93-f4c32a798eec"
BATCH_SIZE = 5
PREDICT_LEN = 250
# Keras requires the batch size be specified ahead of time for stateful models.
# We use a sequence length of 1, as we will be feeding in one character at a
# time and predicting the next character.
prediction_model = lstm_model(seq_len=1, batch_size=BATCH_SIZE, stateful=True)
prediction_model.load_weights('/tmp/bard.h5')
# We seed the model with our initial string, copied BATCH_SIZE times
seed_txt = 'Worry never robs tomorrow of its sorrow '
seed = transform(seed_txt)
seed = np.repeat(np.expand_dims(seed, 0), BATCH_SIZE, axis=0)
# First, run the seed forward to prime the state of the model.
prediction_model.reset_states()
for i in range(len(seed_txt) - 1):
prediction_model.predict(seed[:, i:i + 1])
# Now we can accumulate predictions!
predictions = [seed[:, -1:]]
for i in range(PREDICT_LEN):
last_word = predictions[-1]
next_probits = prediction_model.predict(last_word)[:, 0, :]
# sample from our output distribution
next_idx = [
np.random.choice(256, p=next_probits[i])
for i in range(BATCH_SIZE)
]
predictions.append(np.asarray(next_idx, dtype=np.int32))
for i in range(BATCH_SIZE):
print('PREDICTION %d\n\n' % i)
p = [predictions[j][i] for j in range(PREDICT_LEN)]
generated = ''.join([chr(c) for c in p])
print(generated)
print()
assert len(generated) == PREDICT_LEN, 'Generated text too short'
# + [markdown] colab_type="text" id="2a5cGsSTEBQD"
# ## What's next
#
# * Learn about [Cloud TPUs](https://cloud.google.com/tpu/docs) that Google designed and optimized specifically to speed up and scale up ML workloads for training and inference and to enable ML engineers and researchers to iterate more quickly.
# * Explore the range of [Cloud TPU tutorials and Colabs](https://cloud.google.com/tpu/docs/tutorials) to find other examples that can be used when implementing your ML project.
#
# On Google Cloud Platform, in addition to GPUs and TPUs available on pre-configured [deep learning VMs](https://cloud.google.com/deep-learning-vm/), you will find [AutoML](https://cloud.google.com/automl/)*(beta)* for training custom models without writing code and [Cloud ML Engine](https://cloud.google.com/ml-engine/docs/) which will allows you to run parallel trainings and hyperparameter tuning of your custom models on powerful distributed hardware.
#
|
TPU/Write_Star_Wars_Script_with_Cloud_TPUs_and_Keras.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
raw_data_dir = '../data/raw/'
interim_data_dir = '../data/interim/'
# +
import requests
import json
import pandas as pd
url= "https://crashviewer.nhtsa.dot.gov/CrashAPI"
#/crashes/GetCrashesByLocation?fromCaseYear=2014&toCaseYear=2015&state=1&county=1&format=json
fromCaseYear = "2010"
toCaseYear = "2020"
state = "6"
qurl = f"{url}/crashes/GetCrashesByLocation?fromCaseYear={fromCaseYear}&toCaseYear={toCaseYear}&state={state}&county=73&format=json"
cali = requests.get(qurl).json()
# -
cali_df = pd.DataFrame(cali['Results'][0]).groupby('ST_CASE', as_index=False).max()
# +
year = 2015
st_case = 60022
qurl = f"{url}/crashes/GetCaseDetails?stateCase={st_case}&caseYear={year}&state=6&format=json"
data = requests.get(qurl).json()
case = data['Results'][0][0]['CrashResultSet']
# -
case.keys()
case['LONGITUD']
# +
def extract_people(v):
for p in v['Persons']:
yield {
'Speed Limit Exceeded': v['SPEEDRELNAME'],
'Speed limit': v['TRAV_SP'],
'Vin Number': v['VINNAME'],
'Traveled Speed Veh': v['VSPD_LIM'],
'Make': v['MAKENAME'],
'Make/Model': v['MAK_MODNAME'],
'Model': v['MODELNAME'],
'Type of Vehicle': v['BODY_TYPNAME'],
"Driver ZIP Code": v['DR_ZIP'],
**person_dict(p),
}
def person_dict(p):
return {
"Age": p['AGE'],
"Age Name": p['AGENAME'],
"County": p['COUNTYNAME'],
"Death Day of Month": p['DEATH_DANAME'],
"DOA Name": p['DOANAME'],
# injury sev
"Injury Severity Name": p['INJ_SEVNAME'],
"Race": p['RACENAME'],
"Road Type": p["ROAD_FNCNAME"],
"Sex": p["SEXNAME"],
"Make": p["MAKENAME"],
}
def get_people(case):
hour = case['HOUR']
minute = case['MINUTE']
time = f"{hour}:{minute}"
accident_info = {
'Lng': case['LONGITUD'],
'Lat': case['LATITUDE'],
'Case Number': case['ST_CASE'],
"Description of Veh Coll": case['CF2NAME'],
"Day of Week": case['DAY_WEEKNAME'],
"Drunk Driver": case['DRUNK_DR'],
"Year": case['CaseYear'],
"Month": case['MonthName'],
"Hour": hour,
"Time of Accident": time,
}
vehicles = case['Vehicles']
pk = 'NPersons'
pedestrians = [] if (pk not in case.keys() or case[pk] is None) else case[pk]
people = [{**accident_info, **p} for v in vehicles for p in extract_people(v)] \
+ [{**accident_info, **person_dict(p)} for p in pedestrians]
return pd.DataFrame(people)
def get_events(case):
c_events = [{
'Case Number': case['ST_CASE'],
# In a traffic accident AOI is Area of Impact. The spot the two cars collided is measured
# to a fixed object, usually the curb, so it can be reconstructed later.
'Area of Impact': e['AOI1NAME'],
# standard of evidence
# https://safety.fhwa.dot.gov/rsdp/cdip_rpti.aspx
'Standard of Evenidence': e['SOENAME'],
'Event Number': e['EVENTNUM'],
'Vehicle 1': e['VNUMBER1'],
'Vehicle 2': e['VNUMBER2'],
} for e in case['CEvents']]
return pd.DataFrame(c_events)
# +
# Run Census Search to retrieve data on all zip codes (2013 ACS5 Census)
# See: https://github.com/CommerceDataService/census-wrapper for library documentation
# See: https://gist.github.com/afhaque/60558290d6efd892351c4b64e5c01e9b for labels
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import requests
from census import Census
# Census API Key
from api_config import census_api_key
c = Census(census_api_key, year=2013)
class CensusData(object):
census_cache = {}
@classmethod
def census_by_year(cls, year):
if year in cls.census_cache:
return cls.census_cache[year]
file_path = f'{ interim_data_dir }census_{ year }'
if os.path.exists(file_path):
df = pd.read_csv(file_path)
if 'Year' not in df.columns:
df['Year'] = year
df.to_csv(file_path)
return df
try:
census_data = c.acs5.get(("NAME", "B19013_001E", "B01003_001E", "B01002_001E",
"B19301_001E",
"B17001_002E"), {'for': 'zip code tabulation area:*'}, year=year)
# Convert to DataFrame
census_pd = pd.DataFrame(census_data)
# Column Reordering
census_pd = census_pd.rename(columns={"B01003_001E": "Population",
"B01002_001E": "Median Age",
"B19013_001E": "Household Income",
"B19301_001E": "Per Capita Income",
"B17001_002E": "Poverty Count",
"NAME": "Name",
"zip code tabulation area": "Zipcode"})
census_pd['Year'] = str(year)
census_pd.to_csv(file_path)
return census_pd
except:
print('no data')
return None
@classmethod
def all_years(cls):
# load all census tables 2011 - 2018
years = range(2011, 2019)
all_years = [cls.census_by_year(y) for y in years]
df = pd.concat(all_years, ignore_index=True)
df['Zipcode'] = df['Zipcode'].astype('str')
df['Year'] = df['Year'].astype('str')
return df
@classmethod
def ensure_census_columns(cls, df):
if 'Per Capita Income' in df.columns: return df
merged = pd.merge(df, cls.all_years(), how='left', left_on=['Year', 'Accident ZIP'], right_on=['Year', 'Zipcode'])
return merged
# -
CensusData.all_years().dtypes
# +
import json
from shapely.geometry import shape, Point
# depending on your version, use: from shapely.geometry import shape, Point
class ZipCoder(object):
def __init__(self):
self.js = None
def __get_zip(self, lat, lng):
point = Point(lng, lat)
for feature in self.js['features']:
polygon = shape(feature['geometry'])
if polygon.contains(point):
zip_code = feature['properties']['zip']
return zip_code
def __row_to_zip(self, r):
lat = float(r['Lat'])
lng = float(r['Lng'])
return self.__get_zip(lat, lng)
def ensure_acc_zips(self, df):
with open(f'{ raw_data_dir }Zip Codes.geojson') as f:
self.js = json.load(f)
acc_zip_col = 'Accident ZIP'
if acc_zip_col not in df.columns:
zip_codes = df.apply(self.__row_to_zip, axis=1)
df[acc_zip_col] = zip_codes
# +
from IPython.display import clear_output
import grequests
from itertools import islice
import os
def chunk(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
# LARGE CHUNK SIZE WILL BLOW UP SERVER AND CAUSING: AttributeError: 'NoneType' object has no attribute 'json'
chunk_size = 5
fromCaseYear = "2010"
toCaseYear = "2020"
state = "6"
case_file_base = raw_data_dir
data_lists = {}
def url_from_row(r):
statecase = r["ST_CASE"]
caseyear = r["CaseYear"]
return f"{url}/crashes/GetCaseDetails?stateCase={statecase}&caseYear={caseyear}&state=6&format=json"
def get_file_path(case):
return f'{ case_file_base }{ case["ST_CASE"] }.json'
def load_case(file_path):
with open(file_path, 'r') as f:
case = json.load(f)
return case
def __get_cases():
urls = []
found_locally = 0
for i, r in cali_df.iterrows():
file_path = get_file_path(r)
if os.path.exists(file_path):
found_locally += 1
clear_output(wait=True)
print(f'{ found_locally } files found locally')
yield load_case(file_path)
else:
url = url_from_row(r)
urls.append(url)
print(f'{ len(urls) } need to be fetched. ')
for c in __chunk_and_fetch(urls):
yield c
def __fetch_cases(urls):
rs = (grequests.get(u) for u in urls)
case_data = grequests.map(rs)
return [data.json()['Results'][0][0]['CrashResultSet'] for data in case_data]
def __save_case(case):
file_path = get_file_path(case)
with open(file_path, 'w') as json_file:
json.dump(case, json_file)
def __chunk_and_fetch(urls):
chunked = chunk(urls, chunk_size)
i = 0
for chunked_urls in chunked:
i += 1
clear_output(wait=True)
print(f'Retrieving chunk { i } of { len(urls) / chunk_size } ...')
cases = __fetch_cases(chunked_urls)
for case in cases:
__save_case(case)
yield case
people_key = 'people'
events_key = 'events'
def __get_case_lists():
# actualize list to avoid redundant api calls
case_list = list(__get_cases())
file_path_people = f"{ interim_data_dir }people.csv"
people_list = [get_people(case) for case in case_list]
people_df = pd.concat(people_list, ignore_index=True, sort=False)
people_df.to_csv(file_path_people)
data_lists[people_key] = people_df
file_path_events = f"{ interim_data_dir }events.csv"
event_list = [get_events(case) for case in case_list]
event_df = pd.concat(event_list, sort=False)
event_df.to_csv(file_path_events)
data_lists[events_key] = event_df
return people_df, event_df
def __ensure_updates(df):
ZipCoder().ensure_acc_zips(df)
merged = CensusData.ensure_census_columns(df)
unnecessary_columns = [
'Unnamed: 0', 'Unnamed: 0.1',
'Unnamed: 0_x', 'Unnamed: 0_x',
'Unnamed: 0.1_x', 'Unnamed: 0.1.1',
'Unnamed: 0_y', 'Unnamed: 0.1_y']
for c in unnecessary_columns:
if c in df.columns:
merged.drop(c,
1,
inplace=True)
renames = {
'ZIP Code': 'Driver ZIP Code',
}
for k in renames.keys():
if k in merged.columns:
merged.rename(columns=renames, inplace=True)
file_path = f"{ interim_data_dir }people.csv"
merged.to_csv(file_path)
return merged
def get_people_list():
cached = get_cached_list(people_key)
if cached is not None:
return __ensure_updates(cached)
df = __get_case_lists()[0]
return __ensure_updates(df)
def get_event_list():
cached = get_cached_list(events_key)
if cached is not None:
return cached
df = __get_case_lists()[1]
file_path = f"{ interim_data_dir }events.csv"
df.to_csv(file_path)
return df
def get_cached_list(key):
if key in data_lists:
return data_lists[key]
file_path = f"{ interim_data_dir }{ key }.csv"
if os.path.exists(file_path):
df = pd.read_csv(file_path)
data_lists[key] =
\df
return df
return None
# -
df = get_people_list()
grouped = df.groupby('Case Number').count()
assert len(cali_df) == len(grouped)
# +
# Import downloaded data from https://public.opendatasoft.com/explore/dataset/us-zip-code-latitude-and-longitude/table/.
# Use dtype="object" to match other
#zip_latlng = pd.read_csv("zip_latlng.csv", dtype="object")
zip_latlng = pd.read_csv("../Data/interim/zip_latlng.csv")
zip_latlng = zip_latlng.rename(columns={"Zip": "Zipcode"})
# Visualize
zip_latlng.head()
# -
get_people_list()['Accident ZIP']
# +
# Merge the two data sets along zip code
data_complete = pd.merge(
zip_latlng, census_pd, how="left", on=["Zipcode", "Zipcode"])
# Remove rows missing data
data_complete = data_complete.dropna()
print(len(data_complete))
# +
sd_cty_zip = [
92152,
92196,
92192,
92161,
92132,
92193,
92143,
92138,
92182,
92198,
92150,
92199,
92038,
92140,
92112,
92093,
92145,
92092,
92091,
92014,
92173,
92027,
92118,
92119,
92124,
92106,
92107,
92116,
92139,
92029,
92071,
92113,
92102,
92104,
92025,
92037,
92120,
92110,
91945,
92129,
92105,
92103,
92131,
92114,
92117,
91942,
92128,
92111,
92122,
91932,
92109,
92126,
92127,
92123,
92108,
92115,
92121,
92154,
92130,
92101,
91911]
data_complete_dropneg = data_complete.loc[(data_complete["Per Capita Income"] > 0) &
(data_complete["Zipcode"].isin(sd_cty_zip)),
:]
# Visualize
print(len(data_complete_dropneg))
data_complete_dropneg.head()
#data_complete.to_csv("data_complete.csv")
# +
import gmaps
# Import API key
from api_config import g_key
# Configure gmaps with API key
gmaps.configure(api_key=g_key)
# +
# Store 'Lat' and 'Lng' into locations
locations = data_complete_dropneg[["Latitude", "Longitude"]].astype(float)
# Convert income and age to float and store
# HINT: be sure to handle NaN values
income = data_complete_dropneg["Per Capita Income"].astype(float)
#age = data_complete_dropneg["Median Age"].astype(float)
#data_complete_dropneg["Per Capita Income"].value_counts()
# +
# Create an income Heatmap layer
fig = gmaps.figure()
heat_layer = gmaps.heatmap_layer(locations, weights=income,
dissipating=True,
point_radius = 10)
fig.add_layer(heat_layer)
fig
# +
income_symbol_layer = gmaps.symbol_layer(
locations, fill_color='blue',
stroke_color='blue', scale=3,
# locations_accident, fill_color='rgba(0, 150, 0, 0.4)',
# stroke_color='rgba(0, 0, 150, 0.4)', scale=3,
# info_box_content=[f"Bank amount: {bank}" for bank in bank_rate]
)
fig = gmaps.figure()
fig.add_layer(income_symbol_layer)
fig
# -
people_df_new = pd.read_csv("people.csv")
people_df_new.columns
people_df_new['Case Count'] = ""
people_df_new2 = people_df_new.groupby(['Case Number'], as_index=False).agg({'Case Count': 'count', 'Lat': 'first', 'Lng': 'first'})
people_df_new2
# Convert accident frequency to list
top_accidents = people_df_new2.nlargest(10, "Case Count")
top_accidents
# +
locations_accident = top_accidents[["Lat", "Lng"]].astype(float)
accident_rate = top_accidents['Case Count'].tolist()
#name = hotel_df['Hotel Name'].tolist()
#city = hotel_df['City'].tolist()
#country = hotel_df['Country'].tolist()
#fig = gmaps.figure(layout=figure_layout)
fig = gmaps.figure()
# Assign the marker layer to a variable
#markers = gmaps.marker_layer(locations_accident, info_box_content=hotel_info)
markers = gmaps.marker_layer(locations_accident)
# Add the layer to the map
fig.add_layer(markers)
# Display Map
fig
# +
# Create accident layer
accident_layer = gmaps.symbol_layer(
locations_accident, fill_color='red',
stroke_color='red', scale=2,
# locations_accident, fill_color='rgba(0, 150, 0, 0.4)',
# stroke_color='rgba(0, 0, 150, 0.4)', scale=3,
# info_box_content=[f"Bank amount: {bank}" for bank in bank_rate]
)
fig = gmaps.figure()
fig.add_layer(accident_layer)
fig
# +
# Create an accident Heatmap layer
fig = gmaps.figure()
accident_heat_layer = gmaps.heatmap_layer(locations_accident, weights=accident_rate,
dissipating=True,
point_radius = 10,
)
fig.add_layer(accident_heat_layer)
fig
# +
# Create a combined map
fig = gmaps.figure()
fig.add_layer(heat_layer) # weighted by income
#fig.add_layer(markers) # top number of accidents
fig.add_layer(accident_layer) # top number of accidents
#fig.add_layer(income_symbol_layer)
fig
|
notebooks/data_processing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="UFxjMI4KKpAQ"
# #Loica and Flapjack setup
# + colab={"base_uri": "https://localhost:8080/"} id="24nElpjH2Mxl" executionInfo={"status": "ok", "timestamp": 1630940998621, "user_tz": 180, "elapsed": 18433, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}} outputId="bd1706e1-9a69-4799-84b1-5410a7637768"
# !pip install git+https://github.com/SynBioUC/flapjack.git --quiet
# + colab={"base_uri": "https://localhost:8080/"} id="W9Tt6CYi3Xvy" executionInfo={"status": "ok", "timestamp": 1630941028910, "user_tz": 180, "elapsed": 5078, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}} outputId="108228ce-fb05-4114-ed6e-2585890fd2de"
#uncomment when this work
# !pip install git+https://github.com/SynBioUC/LOICA.git --quiet
# + colab={"base_uri": "https://localhost:8080/"} id="CP96WoyD6WH7" executionInfo={"status": "ok", "timestamp": 1630941053628, "user_tz": 180, "elapsed": 24720, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}} outputId="60099580-b47d-4176-e3d9-be0089b2c4cc"
from google.colab import drive
drive.mount("/content/gdrive")
# + colab={"base_uri": "https://localhost:8080/"} id="AeHJx7pT6sI5" executionInfo={"status": "ok", "timestamp": 1630941054376, "user_tz": 180, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}} outputId="d1b3dc22-326f-44aa-b230-c62f711f40ee"
% cd /content/gdrive/My Drive/
# + colab={"base_uri": "https://localhost:8080/"} id="_RO_soM560ae" executionInfo={"elapsed": 482, "status": "ok", "timestamp": 1625689806423, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gig9cbE0pKf7keFaX1J0hA6MJEStbDo3y7TVyhU=s64", "userId": "12749265851558648895"}, "user_tz": 240} outputId="ce9cd335-3132-4e3a-8142-0c01a17ba5e6"
#uncomment if you dont have LOICA cloned in your drive or to update it
# #!git clone https://github.com/SynBioUC/LOICA.git
# + colab={"base_uri": "https://localhost:8080/"} id="-j3HTHGJ5uRQ" executionInfo={"status": "ok", "timestamp": 1630941073705, "user_tz": 180, "elapsed": 417, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}} outputId="1f281003-8699-47be-a31f-365f78587c68"
% cd LOICA/
# + colab={"base_uri": "https://localhost:8080/"} id="L-xMkn7R5-S9" executionInfo={"elapsed": 8294, "status": "ok", "timestamp": 1625689821834, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gig9cbE0pKf7keFaX1J0hA6MJEStbDo3y7TVyhU=s64", "userId": "12749265851558648895"}, "user_tz": 240} outputId="8a3cb701-b644-4e0b-d3a7-90fc5da1f98a"
# #!pip install -e .
# + id="z3kusyMY1B-v" executionInfo={"status": "ok", "timestamp": 1630941625003, "user_tz": 180, "elapsed": 877, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}}
from flapjack import *
from loica import *
import numpy as np
import getpass
import datetime
import random as rd
import pandas as pd
from numpy.fft import fft, ifft, fftfreq
from scipy.interpolate import interp1d, UnivariateSpline
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_poisson_deviance
from sklearn.metrics import mean_gamma_deviance
from sklearn.metrics import mean_absolute_error
from scipy.signal import savgol_filter, medfilt
import matplotlib.pyplot as plt
import seaborn as sns
color_inverse = 'dodgerblue'
color_direct = 'orangered'
color_indirect ='gold'
# %matplotlib inline
SMALL_SIZE = 6
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=SMALL_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=SMALL_SIZE) # fontsize of the figure title
# + [markdown] id="m1r5dC3FSL1E"
# #Login
# + colab={"base_uri": "https://localhost:8080/"} id="WZ4GrHkz6GMa" executionInfo={"status": "ok", "timestamp": 1630941097410, "user_tz": 180, "elapsed": 6240, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}} outputId="1b781e97-aa2b-4448-ee3d-0fdee160948c"
user = input()
passwd = <PASSWORD>()
fj = Flapjack('flapjack.rudge-lab.org:8000')
fj.log_in(username=user, password=passwd)
# + id="zpgBeJ-6Viqn" executionInfo={"status": "ok", "timestamp": 1630941102716, "user_tz": 180, "elapsed": 1722, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}}
dna = fj.get('dna', name='Rep')
if len(dna)==0:
dna = fj.create('dna', name='Rep')
vector = fj.get('vector', name='Rep')
if len(vector)==0:
vector = fj.create('vector', name='Rep', dnas=dna.id)
cfp = fj.get('signal', name='CFP')
yfp = fj.get('signal', name='YFP')
rfp = fj.get('signal', name='RFP')
media = fj.get('media', name='Loica')
if len(media)==0:
media = fj.create('media', name='Loica', description='Simulated loica media')
strain = fj.get('strain', name='Loica strain')
if len(strain)==0:
strain = fj.create('strain', name='Loica strain', description='Loica test strain')
biomass_signal = fj.get('signal', name='OD')
# + id="FVZgXl5X0sN7" executionInfo={"status": "ok", "timestamp": 1630941106180, "user_tz": 180, "elapsed": 2281, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}}
media_id = fj.get('media', name='M9-glycerol').id
strain_id = fj.get('strain', name='Top10').id
peda_id = fj.get('vector', name='pEDA').id
pbaa_id = fj.get('vector', name='pBAA').id
pbca_id = fj.get('vector', name='pBCA').id
paaa_id = fj.get('vector', name='pAAA').id
pgaa_id = fj.get('vector', name='pGAA').id
rfp_id = fj.get('signal', name='RFP').id
yfp_id = fj.get('signal', name='YFP').id
cfp_id = fj.get('signal', name='CFP').id
od_id = fj.get('signal', name='OD').id
study_id = fj.get('study', search='context').id
# + colab={"base_uri": "https://localhost:8080/"} id="V-SUHHQ60UTY" executionInfo={"status": "ok", "timestamp": 1630941251420, "user_tz": 180, "elapsed": 129028, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}} outputId="b10a9024-aa04-49a0-f4d5-01c67cd41cb8"
df_direct = fj.analysis(study=study_id,
media=media_id,
strain=strain_id,
signal=yfp_id,
type='Expression Rate (direct)',
degr=0,
eps_L=1e-5,
biomass_signal=od_id,
)
# + colab={"base_uri": "https://localhost:8080/"} id="v2bXAMHp0nN7" executionInfo={"status": "ok", "timestamp": 1630941392976, "user_tz": 180, "elapsed": 21985, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}} outputId="2c563a81-48e4-47f5-bf0a-0407ad7491c5"
df_ref = fj.analysis(study=study_id,
vector=paaa_id,
media=media_id,
strain=strain_id,
signal=rfp_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-2,
n_gaussians=24,
biomass_signal=od_id,
)
# + colab={"base_uri": "https://localhost:8080/"} id="q5vYMbyV01P2" executionInfo={"status": "ok", "timestamp": 1630941497436, "user_tz": 180, "elapsed": 16336, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}} outputId="677b5534-0265-43e6-ae2e-1f9921071b17"
df = fj.analysis(study=study_id,
vector=pbaa_id,
media=media_id,
strain=strain_id,
signal=rfp_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-2,
n_gaussians=24,
biomass_signal=od_id,
)
# + colab={"base_uri": "https://localhost:8080/"} id="nnESSGIPsBg6" executionInfo={"status": "ok", "timestamp": 1630941594492, "user_tz": 180, "elapsed": 30537, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}} outputId="caf8a2a4-c0db-4ebc-e902-ff9211c80505"
df_indirect = fj.analysis(study=study_id,
media=media_id,
strain=strain_id,
signal=yfp_id,
type='Expression Rate (indirect)',
pre_smoothing=11,
post_smoothing=0,
biomass_signal=od_id,
)
# + [markdown] id="245o7okT3JpG"
# # pAAA
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Uu5_AqCV07Wc" executionInfo={"status": "ok", "timestamp": 1630966343459, "user_tz": 180, "elapsed": 872234, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}} outputId="98582802-9965-4f3e-aabe-37daebe8d04e"
medias = ['M9-glycerol', 'M9-glucose']
strains = ['MG1655z1', 'Top10']
for media in medias:
for strain in strains:
media_id = fj.get('media', name=media).id
strain_id = fj.get('strain', name=strain).id
df_indirect = fj.analysis(
media=media_id,
study=study_id,
strain=strain_id,
vector=paaa_id,
type='Expression Rate (indirect)',
biomass_signal=od_id,
pre_smoothing=11,
post_smoothing=0,
#bg_correction=2,
#min_biomass=0.05,
#remove_data=False
)
df_direct = fj.analysis(study=study_id,
vector=paaa_id,
media=media_id,
strain=strain_id,
type='Expression Rate (direct)',
degr=0,
eps_L=1e-5,
biomass_signal=od_id,
)
df_inverse = fj.analysis(study=study_id,
vector=paaa_id,
media=media_id,
strain=strain_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-2,
n_gaussians=24,
biomass_signal=od_id,
)
signals = ['OD', 'RFP', 'YFP', 'CFP']
titles = ['Growth', 'RFP', 'YFP', 'CFP']
colors = ['k', 'r', 'g', 'b']
w = 3.16 #3.3
fig,axs = plt.subplots(2,2,figsize=(w, w* 0.75), sharex=True)
for sig,ax,title,color in zip(signals, axs.ravel(), titles, colors):
rfp_direct = df_direct[df_direct.Signal==sig].groupby('Time').mean().Rate
t_direct = df_direct[df_direct.Signal==sig].groupby('Time').mean().index
rfp_direct_std = df_direct[df_direct.Signal==sig].groupby('Time').std().Rate
rfp_inverse = df_inverse[df_inverse.Signal==sig].groupby('Time').mean().Rate
t_inverse = df_inverse[df_inverse.Signal==sig].groupby('Time').mean().index
rfp_inverse_std = df_inverse[df_inverse.Signal==sig].groupby('Time').std().Rate
rfp_indirect = df_indirect[df_indirect.Signal==sig].groupby('Time').mean().Rate
t_indirect = df_indirect[df_indirect.Signal==sig].groupby('Time').mean().index
ax.plot(rfp_indirect, color=color_indirect, linestyle='-', linewidth='0.5')
ax.plot(rfp_direct, color=color_direct, linestyle='-', linewidth='0.5')
#plt.fill_between(t_direct, rfp_direct-rfp_direct_std, rfp_direct+rfp_direct_std, color='red', alpha=0.2)
ax.plot(rfp_inverse, color=color_inverse, linestyle='-', linewidth='0.5')
#plt.fill_between(t_inverse, rfp_inverse-rfp_inverse_std, rfp_inverse+rfp_inverse_std, color='blue', alpha=0.2)
#plt.ticklabel_format(axis='y', style='sci', scilimits=(-1,1))
ax.set_xticks([0,12,24])
ax.set_ylabel('Expr. rate (AU/h)')
ax.set_ylim(-0.5, rfp_inverse.max()*1.2)
#ax.set_title(title)
ax.ticklabel_format(axis='y', style='sci', scilimits=(-1,1))
#plt.suptitle(f'{media}, {strain}')
axs[0,0].set_ylabel(r'Growth rate ($h^{-1}$)')
axs[1,0].set_xlabel('Time (h)')
axs[1,1].set_xlabel('Time (h)')
#plt.legend(['Direct', 'Inverse'])
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.savefig(f'pAAA_{media}_{strain}_subplots.png', dpi=300)
# + colab={"base_uri": "https://localhost:8080/"} id="qHIrD9mxFTiZ" executionInfo={"status": "ok", "timestamp": 1630948136573, "user_tz": 180, "elapsed": 332, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}} outputId="4d7b58a7-2fc8-4d3f-8414-26058820d136"
rfp_inverse.max()
# + [markdown] id="VQx61Qcn3NgS"
# #Context
# + id="T6N-zc7r1iN1"
prom_map = {
'A': 'J23101',
'B': 'J23106',
'C': 'J23107',
'D': 'R0011',
'E': 'R0040',
'F': 'pLas81',
'G': 'pLux76'
}
# + [markdown] id="UaZxpyQz3Q0E"
# ## Direct YFP profiles
# + colab={"base_uri": "https://localhost:8080/", "height": 795} id="5zacg8Rz26Xd" executionInfo={"elapsed": 68691, "status": "error", "timestamp": 1622934212776, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}, "user_tz": 240} outputId="2998aaa0-ab8c-436c-c03c-c2af364d6964"
yfp_vectors = [
['pBFA', 'pEFA', 'pGFA'],
['pBDA', 'pEDA', 'pGDA'],
['pBCA', 'pECA', 'pGCA'],
['pAAA', 'pBAA', 'pEAA', 'pGAA']
]
yfp_vector_ids = [[fj.get('vector', name=name).id[0] for name in vecs] for vecs in yfp_vectors]
yfp_id = fj.get('signal', name='YFP').id
medias = ['M9-glycerol', 'M9-glucose']
strains = ['Top10', 'MG1655z1']
# YFP figures
for media in medias:
for strain in strains:
print(media, strain)
media_id = fj.get('media', name=media).id
strain_id = fj.get('strain', name=strain).id
df_ref = fj.analysis(vector=paaa_id,
media=media_id,
strain=strain_id,
signal=yfp_id,
type='Expression Rate (direct)',
degr=0,
eps_L=1e-5,
n_gaussians=24,
biomass_signal=od_id,
)
df_ref_gr = fj.analysis(vector=paaa_id,
media=media_id,
strain=strain_id,
signal=od_id,
type='Expression Rate (direct)',
degr=0,
eps_L=1e-5,
n_gaussians=24,
biomass_signal=od_id)
mdf_ref_gr = df_ref_gr.groupby('Time').mean()
ref_grt = mdf_ref_gr.index
ref_gr = mdf_ref_gr.Rate
ref_pk_idx = np.where(ref_gr==ref_gr.max())[0][0]
ref_pk_time = ref_grt[ref_pk_idx]
print('ref_pk_time ', ref_pk_time)
for vi,vector_id in enumerate(yfp_vector_ids):
df = fj.analysis(vector=vector_id,
media=media_id,
strain=strain_id,
signal=yfp_id,
type='Expression Rate (direct)',
degr=0,
eps_L=1e-5,
n_gaussians=24,
biomass_signal=od_id)
plt.figure(figsize=(1.5,1.25))
fname = '-'.join([media, strain, yfp_vectors[vi][0][2], '-direct-YFP.png'])
for name,vec in df.groupby('Vector'):
print(name)
yfp = vec.groupby('Time').mean().Rate
yfpt = vec.groupby('Time').mean().index
df_gr = fj.analysis(vector=fj.get('vector', name=name).id,
media=media_id,
strain=strain_id,
signal=od_id,
type='Expression Rate (direct)',
degr=0,
eps_L=1e-5,
n_gaussians=24,
biomass_signal=od_id)
mdf_gr = df_gr.groupby('Time').mean()
grt = mdf_gr.index
gr = mdf_gr.Rate
pk_idx = np.where(gr==gr.max())[0][0]
pk_time = grt[pk_idx]
print(pk_time)
plt.plot(yfpt - pk_time, (yfp-yfp.mean()) / yfp.std(), linewidth=0.5)
yfp_ref = df_ref.groupby('Time').mean().Rate
tref = df_ref.groupby('Time').mean().index
plt.plot(tref - ref_pk_time, (yfp_ref-yfp_ref.mean()) / yfp_ref.std(), 'k--', linewidth=0.5)
plt.title(f'{media}, {strain}')
#plt.legend([prom_map[vec[1]] for vec in yfp_vectors])
plt.tight_layout()
#fig = flapjack.layout_print(fig, width=1.5, height=1.25)
#fig.update_yaxes(title='')
#fig.update_xaxes(title='')
#fig.layout.annotations[0].update(text=f'{media}, {strain}')
#for vec in yfp_vectors[vi]:
# rfp_code = vec[1]
# fig.update_traces(name=prom_map[rfp_code], selector=dict(name=vec))
#io.write_image(fig, fname)
plt.savefig(fname, dpi=300)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="bw5l8oeQ3ZAd" executionInfo={"elapsed": 3330, "status": "ok", "timestamp": 1623003871393, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}, "user_tz": 240} outputId="f776b9f8-9222-49ad-d50f-45e10175dafb"
yfp_vectors = [
['pBFA', 'pEFA', 'pGFA'],
['pBDA', 'pEDA', 'pGDA'],
['pBCA', 'pECA', 'pGCA'],
['pAAA', 'pBAA', 'pEAA', 'pGAA']]
for vectors in yfp_vectors:
print(vectors)
plt.figure()
for v in vectors:
plt.plot(0,0)
plt.legend([prom_map[vec[1]] for vec in vectors])
plt.savefig(f'legend-{vectors[0][2]}-YFP.png', dpi=300)
# + [markdown] id="gijsu05K3cr-"
# ## Direct RFP profiles
# + id="8vjjAm7g3cXS"
rfp_vectors = [
['pBAA', 'pBCA', 'pBDA', 'pBFA'],
['pEAA', 'pECA', 'pEDA', 'pEFA'],
['pGAA', 'pGCA', 'pGDA', 'pGEA', 'pGFA']
]
rfp_vector_ids = [[fj.get('vector', name=name).id[0] for name in vecs] for vecs in rfp_vectors]
rfp_id = fj.get('signal', name='RFP').id
medias = ['M9-glucose', 'M9-glycerol']
strains = ['MG1655z1', 'Top10']
# RFP figures
for media in medias:
for strain in strains:
print(media, strain)
media_id = fj.get('media', name=media).id
strain_id = fj.get('strain', name=strain).id
df_ref = fj.analysis(vector=paaa_id,
media=media_id,
strain=strain_id,
signal=rfp_id,
type='Expression Rate (direct)',
degr=0,
eps_L=1e-5,
n_gaussians=24,
biomass_signal=od_id,
)
df_ref_gr = fj.analysis(vector=paaa_id,
media=media_id,
strain=strain_id,
signal=od_id,
type='Expression Rate (direct)',
degr=0,
eps_L=1e-5,
n_gaussians=24,
biomass_signal=od_id)
mdf_ref_gr = df_ref_gr.groupby('Time').mean()
ref_grt = mdf_ref_gr.index
ref_gr = mdf_ref_gr.Rate
ref_pk_idx = np.where(ref_gr==ref_gr.max())[0][0]
ref_pk_time = ref_grt[ref_pk_idx]
print('ref_pk_time ', ref_pk_time)
for vi,vector_id in enumerate(rfp_vector_ids):
df = fj.analysis(vector=vector_id,
media=media_id,
strain=strain_id,
signal=rfp_id,
type='Expression Rate (direct)',
degr=0,
eps_L=1e-5,
n_gaussians=24,
biomass_signal=od_id)
plt.figure(figsize=(1.5,1.25))
fname = '-'.join([media, strain, rfp_vectors[vi][0][1], '-direct-RFP.png'])
for name,vec in df.groupby('Vector'):
print(name)
rfp = vec.groupby('Time').mean().Rate
rfpt = vec.groupby('Time').mean().index
df_gr = fj.analysis(vector=fj.get('vector', name=name).id,
media=media_id,
strain=strain_id,
signal=od_id,
type='Expression Rate (direct)',
degr=0,
eps_L=1e-5,
n_gaussians=24,
biomass_signal=od_id)
mdf_gr = df_gr.groupby('Time').mean()
grt = mdf_gr.index
gr = mdf_gr.Rate
pk_idx = np.where(gr==gr.max())[0][0]
pk_time = grt[pk_idx]
print(pk_time)
plt.plot(rfpt - pk_time, (rfp-rfp.mean()) / rfp.std(), linewidth=0.5)
rfp_ref = df_ref.groupby('Time').mean().Rate
tref = df_ref.groupby('Time').mean().index
plt.plot(tref - ref_pk_time, (rfp_ref-rfp_ref.mean()) / rfp_ref.std(), 'k--', linewidth=0.5)
plt.title(f'{media}, {strain}')
plt.tight_layout()
#ax.set_ylim([0,1])
#ax.set_xticks([0,12,24])
#ax.set_yticks([0,0.5,1])
#fig = flapjack.layout_print(fig, width=1.5, height=1.25)
#fig.update_yaxes(title='')
#fig.update_xaxes(title='')
#fig.layout.annotations[0].update(text=f'{media}, {strain}')
#for vec in yfp_vectors[vi]:
# rfp_code = vec[1]
# fig.update_traces(name=prom_map[rfp_code], selector=dict(name=vec))
#io.write_image(fig, fname)
plt.savefig(fname, dpi=300)
# + colab={"base_uri": "https://localhost:8080/", "height": 797} id="08LEWDA43itG" executionInfo={"elapsed": 2816, "status": "ok", "timestamp": 1623003975243, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}, "user_tz": 240} outputId="0f3abc00-b07c-4415-fa76-6a1a0413b69f"
rfp_vectors = [
['pBAA', 'pBCA', 'pBDA', 'pBFA'],
['pEAA', 'pECA', 'pEDA', 'pEFA'],
['pGAA', 'pGCA', 'pGDA', 'pGEA', 'pGFA']
]
for vectors in rfp_vectors:
print(vectors)
plt.figure()
for v in vectors:
plt.plot(0,0)
plt.legend([prom_map[vec[2]] for vec in vectors])
plt.savefig(f'legend-{vectors[0][1]}-RFP.png', dpi=300)
# + [markdown] id="lrqLhleZ4Tnb"
# ## Inverse YFP profiles
#
# Change direct to inverse, change eps_L for eps, did I need to change eps -3?
# + colab={"base_uri": "https://localhost:8080/", "height": 448} id="rlr44aJn4YDq" executionInfo={"elapsed": 344859, "status": "ok", "timestamp": 1625690214812, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gig9cbE0pKf7keFaX1J0hA6MJEStbDo3y7TVyhU=s64", "userId": "12749265851558648895"}, "user_tz": 240} outputId="80868bda-3670-4fef-e6a1-aa429d84f568"
yfp_vectors = [
['pBFA', 'pEFA', 'pGFA'],
#['pBDA', 'pEDA', 'pGDA'],
#['pBCA', 'pECA', 'pGCA'],
#['pAAA', 'pBAA', 'pEAA', 'pGAA']
]
yfp_vector_ids = [[fj.get('vector', name=name).id[0] for name in vecs] for vecs in yfp_vectors]
yfp_id = fj.get('signal', name='YFP').id
medias = ['M9-glycerol'] #, 'M9-glucose']
strains = ['Top10'] #, 'MG1655z1']
# YFP figures
for media in medias:
for strain in strains:
print(media, strain)
media_id = fj.get('media', name=media).id
strain_id = fj.get('strain', name=strain).id
df_ref = fj.analysis(vector=paaa_id,
media=media_id,
strain=strain_id,
signal=yfp_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-2,
n_gaussians=24,
biomass_signal=od_id,
)
df_ref_gr = fj.analysis(vector=paaa_id,
media=media_id,
strain=strain_id,
signal=od_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-2,
n_gaussians=24,
biomass_signal=od_id)
mdf_ref_gr = df_ref_gr.groupby('Time').mean()
ref_grt = mdf_ref_gr.index
ref_gr = mdf_ref_gr.Rate
ref_pk_idx = np.where(ref_gr==ref_gr.max())[0][0]
ref_pk_time = ref_grt[ref_pk_idx]
print('ref_pk_time ', ref_pk_time)
for vi,vector_id in enumerate(yfp_vector_ids):
df = fj.analysis(vector=vector_id,
media=media_id,
strain=strain_id,
signal=[yfp_id, cfp_id],
type='Expression Rate (inverse)',
degr=0,
eps=1e-2,
n_gaussians=24,
biomass_signal=od_id)
plt.figure(figsize=(1.5,1.25))
fname = '-'.join([media, strain, yfp_vectors[vi][0][2], '-inverse-YFP.png'])
for name,vec in df.groupby('Vector'):
print(name)
yfp = vec[vec.Signal=='YFP'].groupby('Time').mean().Rate
cfp = vec[vec.Signal=='CFP'].groupby('Time').mean().Rate
yfpt = vec[vec.Signal=='YFP'].groupby('Time').mean().index
df_gr = fj.analysis(vector=fj.get('vector', name=name).id,
media=media_id,
strain=strain_id,
signal=od_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-2,
n_gaussians=24,
biomass_signal=od_id)
mdf_gr = df_gr.groupby('Time').mean()
grt = mdf_gr.index
gr = mdf_gr.Rate
pk_idx = np.where(gr==gr.max())[0][0]
pk_time = grt[pk_idx]
print(pk_time)
#plt.plot(yfpt - pk_time, (yfp-yfp.mean()) / yfp.std(), linewidth=0.5)
plt.plot(yfpt - pk_time, yfp/cfp.mean(), linewidth=0.5)
yfp_ref = df_ref.groupby('Time').mean().Rate
tref = df_ref.groupby('Time').mean().index
#plt.plot(tref - ref_pk_time, (yfp_ref-yfp_ref.mean()) / yfp_ref.std(), 'k--', linewidth=0.5)
plt.title(f'{media}, {strain}')
plt.tight_layout()
#fig = flapjack.layout_print(fig, width=1.5, height=1.25)
#fig.update_yaxes(title='')
#fig.update_xaxes(title='')
#fig.layout.annotations[0].update(text=f'{media}, {strain}')
#for vec in yfp_vectors[vi]:
# rfp_code = vec[1]
# fig.update_traces(name=prom_map[rfp_code], selector=dict(name=vec))
#io.write_image(fig, fname)
plt.savefig(fname, dpi=300)
# + [markdown] id="MYUOFyCe5Me-"
# ## Inverse RFP profiles
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="5gttmeYO5L8i" executionInfo={"elapsed": 10235513, "status": "ok", "timestamp": 1622945228821, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gim15d6_3u_LuT5c6CPjzJmHwszeZE2sTEe0PVr=s64", "userId": "02830934807145008198"}, "user_tz": 240} outputId="d702e193-4c97-483d-9815-fedf8ec628b7"
rfp_vectors = [
['pBAA', 'pBCA', 'pBDA', 'pBFA'],
['pEAA', 'pECA', 'pEDA', 'pEFA'],
['pGAA', 'pGCA', 'pGDA', 'pGEA', 'pGFA']
]
rfp_vector_ids = [[fj.get('vector', name=name).id[0] for name in vecs] for vecs in rfp_vectors]
rfp_id = fj.get('signal', name='RFP').id
medias = ['M9-glucose', 'M9-glycerol']
strains = ['MG1655z1', 'Top10']
# RFP figures
for media in medias:
for strain in strains:
print(media, strain)
media_id = fj.get('media', name=media).id
strain_id = fj.get('strain', name=strain).id
df_ref = fj.analysis(vector=paaa_id,
media=media_id,
strain=strain_id,
signal=rfp_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-5,
n_gaussians=24,
biomass_signal=od_id,
)
df_ref_gr = fj.analysis(vector=paaa_id,
media=media_id,
strain=strain_id,
signal=od_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-5,
n_gaussians=24,
biomass_signal=od_id)
mdf_ref_gr = df_ref_gr.groupby('Time').mean()
ref_grt = mdf_ref_gr.index
ref_gr = mdf_ref_gr.Rate
ref_pk_idx = np.where(ref_gr==ref_gr.max())[0][0]
ref_pk_time = ref_grt[ref_pk_idx]
print('ref_pk_time ', ref_pk_time)
for vi,vector_id in enumerate(rfp_vector_ids):
df = fj.analysis(vector=vector_id,
media=media_id,
strain=strain_id,
signal=rfp_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-5,
n_gaussians=24,
biomass_signal=od_id)
plt.figure(figsize=(1.5,1.25))
fname = '-'.join([media, strain, rfp_vectors[vi][0][1], '-inverse-RFP.png'])
for name,vec in df.groupby('Vector'):
print(name)
rfp = vec.groupby('Time').mean().Rate
rfpt = vec.groupby('Time').mean().index
df_gr = fj.analysis(vector=fj.get('vector', name=name).id,
media=media_id,
strain=strain_id,
signal=od_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-5,
n_gaussians=24,
biomass_signal=od_id)
mdf_gr = df_gr.groupby('Time').mean()
grt = mdf_gr.index
gr = mdf_gr.Rate
pk_idx = np.where(gr==gr.max())[0][0]
pk_time = grt[pk_idx]
print(pk_time)
plt.plot(rfpt - pk_time, (rfp-rfp.mean()) / rfp.std(), linewidth=0.5)
rfp_ref = df_ref.groupby('Time').mean().Rate
tref = df_ref.groupby('Time').mean().index
plt.plot(tref - ref_pk_time, (rfp_ref-rfp_ref.mean()) / rfp_ref.std(), 'k--', linewidth=0.5)
plt.title(f'{media}, {strain}')
plt.tight_layout()
#fig = flapjack.layout_print(fig, width=1.5, height=1.25)
#fig.update_yaxes(title='')
#fig.update_xaxes(title='')
#fig.layout.annotations[0].update(text=f'{media}, {strain}')
#for vec in yfp_vectors[vi]:
# rfp_code = vec[1]
# fig.update_traces(name=prom_map[rfp_code], selector=dict(name=vec))
#io.write_image(fig, fname)
plt.savefig(fname, dpi=300)
# + [markdown] id="2C-AOWMq3q1S"
# ## Inverse all CFP profiles
# + colab={"background_save": true, "base_uri": "https://localhost:8080/", "output_embedded_package_id": "1LLJfwl6-H97xdNL6PKuRONQBdRU6iMks"} id="Nj79gXDs3l38" outputId="eea2e2bf-fa30-41dd-8ea6-cc3dd13504b3"
medias = ['M9-glycerol','M9-glucose']
strains = ['Top10', 'MG1655z1']
cfp_id = fj.get('signal', name='CFP').id
for media in medias:
for strain in strains:
media_id = fj.get('media', name=media).id
strain_id = fj.get('strain', name=strain).id
df = fj.analysis(study=study_id,
signal=cfp_id,
media=media_id,
strain=strain_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-2,
n_gaussians=24,
biomass_signal=od_id)
plt.figure(figsize=(1.5,1.25))
for name,vec in df.groupby('Vector'):
cfp = vec.groupby('Time').mean().Rate
cfpt = vec.groupby('Time').mean().index
df_gr = fj.analysis(vector=fj.get('vector', name=name).id,
media=media_id,
strain=strain_id,
signal=od_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-2,
n_gaussians=24,
biomass_signal=od_id)
mdf_gr = df_gr.groupby('Time').mean()
grt = mdf_gr.index
gr = mdf_gr.Rate
pk_idx = np.where(gr==gr.max())[0][0]
pk_time = grt[pk_idx]
print(pk_time)
plt.plot(cfpt - pk_time, (cfp-cfp.mean()) / cfp.std(), linewidth=0.5, color='blue', alpha=0.2)
plt.title(f'{media}, {strain}')
plt.tight_layout()
#fig = flapjack.layout_print(fig, width=1.5, height=1.25)
#fig.update_traces(showlegend=False, line=dict(color='rgba(0, 0, 255, 0.2)'))
#fig.update_yaxes(title='')
#fig.update_xaxes(title='')
#fig.layout.annotations[0].update(text=f'{media}, {strain}')
fname = fname = '-'.join([media, strain, 'CFP.png'])
#io.write_image(fig, fname)
plt.savefig(fname, dpi=300)
# + [markdown] id="0YD2i26KSBWv"
# ##Growth
# + id="IPkIwSwpSFV8"
medias = ['M9-glycerol', 'M9-glucose']
strains = ['Top10', 'MG1655z1']
cfp_id = fj.get('signal', name='CFP').id
for media in medias:
for strain in strains:
media_id = fj.get('media', name=media).id
strain_id = fj.get('strain', name=strain).id
df_ref_gr = fj.analysis(vector=paaa_id,
media=media_id,
strain=strain_id,
signal=od_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-2,
n_gaussians=24,
biomass_signal=od_id)
mdf_ref_gr = df_ref_gr.groupby('Time').mean()
ref_grt = mdf_ref_gr.index
ref_gr = mdf_ref_gr.Rate
ref_pk_idx = np.where(ref_gr==ref_gr.max())[0][0]
ref_pk_time = ref_grt[ref_pk_idx]
print('ref_pk_time ', ref_pk_time)
#for vi,vector_id in enumerate(yfp_vector_ids):
fname = '-'.join([media, strain, '-inverse-gr.png'])
#for name,vec in df.groupby('Vector'):
#print(name)
df_gr = fj.analysis(vector=fj.get('vector', name=name).id,
media=media_id,
strain=strain_id,
signal=od_id,
type='Expression Rate (inverse)',
degr=0,
eps=1e-2,
n_gaussians=24,
biomass_signal=od_id)
mdf_gr = df_gr.groupby('Time').mean()
grt = mdf_gr.index
gr = mdf_gr.Rate
pk_idx = np.where(gr==gr.max())[0][0]
pk_time = grt[pk_idx]
print(pk_time)
#yfp = vec.groupby('Time').mean().Rate
#yfpt = vec.groupby('Time').mean().index
yfp = df_gr.groupby('Time').mean().Rate
yfpt = df_gr.groupby('Time').mean().index
plt.plot(yfpt - pk_time, (yfp-yfp.mean()) / yfp.std(), linewidth=0.5)
#yfp_ref = df_ref.groupby('Time').mean().Rate
#tref = df_ref.groupby('Time').mean().index
yfp_ref = df_ref_gr.groupby('Time').mean().Rate
tref = df_ref_gr.groupby('Time').mean().index
plt.plot(tref - ref_pk_time, (yfp_ref-yfp_ref.mean()) / yfp_ref.std(), 'k--', linewidth=0.5)
plt.title(f'{media}, {strain}')
plt.tight_layout()
#fig = flapjack.layout_print(fig, width=1.5, height=1.25)
#fig.update_yaxes(title='')
#fig.update_xaxes(title='')
#fig.layout.annotations[0].update(text=f'{media}, {strain}')
#for vec in yfp_vectors[vi]:
# rfp_code = vec[1]
# fig.update_traces(name=prom_map[rfp_code], selector=dict(name=vec))
#io.write_image(fig, fname)
plt.savefig(fname, dpi=300)
|
Code/Fig_4_5_experimental_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/abysee/OpenUE/blob/main/OpenUE_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="I2yJb0NxolBB" outputId="1239febf-10fb-42b2-d409-c0ab5f0eb352"
# ! nvidia-smi
# ! pip install openue
# ! git clone https://github.com/zjunlp/OpenUE.git
# ! pip install pytorch_lightning==1.3.1
# + [markdown] id="-cyHCOGDyuT5"
#
# + id="ibWdrf_ZpDkX"
import argparse
import importlib
import numpy as np
import torch
import pytorch_lightning as pl
import openue.lit_models as lit_models
import yaml
import time
from transformers import AutoConfig
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# + id="VwfUoPkApKpj"
# 设置一些参数和动态调用包
def _import_class(module_and_class_name: str) -> type:
module_name, class_name = module_and_class_name.rsplit(".", 1)
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
return class_
def _setup_parser():
"""Set up Python's ArgumentParser with data, model, trainer, and other arguments."""
parser = argparse.ArgumentParser(add_help=False)
# Add Trainer specific arguments, such as --max_epochs, --gpus, --precision
# trainer_parser = pl.Trainer.add_argparse_args(parser)
# trainer_parser._action_groups[1].title = "Trainer Args" # pylint: disable=protected-access
# parser = argparse.ArgumentParser(add_help=False, parents=[trainer_parser])
# Basic arguments
parser.add_argument("--wandb", action="store_true", default=False)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--litmodel_class", type=str, default="SEQLitModel")
parser.add_argument("--data_class", type=str, default="REDataset")
parser.add_argument("--model_class", type=str, default="BertForRelationClassification")
parser.add_argument("--load_checkpoint", type=str, default=None)
# Get the data and model classes, so that we can add their specific arguments
temp_args, _ = parser.parse_known_args()
data_class = _import_class(f"openue.data.{temp_args.data_class}")
model_class = _import_class(f"openue.models.{temp_args.model_class}")
# Get data, model, and LitModel specific arguments
data_group = parser.add_argument_group("Data Args")
data_class.add_to_argparse(data_group)
model_group = parser.add_argument_group("Model Args")
model_class.add_to_argparse(model_group)
lit_model_group = parser.add_argument_group("LitModel Args")
lit_models.BaseLitModel.add_to_argparse(lit_model_group)
parser.add_argument("--help", "-h", action="help")
return parser
def _save_model(litmodel, tokenizer, path):
os.system(f"mkdir -p {path}")
litmodel.model.save_pretrained(path)
tokenizer.save_pretrained(path)
# + colab={"base_uri": "https://localhost:8080/", "height": 954, "referenced_widgets": ["<KEY>", "4bee809f7021400bbeace14ef5d4e8d8", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "0b4ef71a312f4215ab1ab75a3e85ae87", "<KEY>", "<KEY>", "<KEY>", "f52183e10a73483a82c05930329aa1a6", "f5bb4079310d424fac6ebef687c22df8", "<KEY>", "8146387b34e7445b992db73eb617e5b7", "<KEY>", "<KEY>", "<KEY>", "4dd53dc095964c67817fe6d4b76e0f1e", "31704e4759f045498f6d3430a488004b", "2219f59d2a574f4abb4d2ef3554b5437", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "c07a1e1b636d4c8caf657a589becd2a7", "<KEY>", "5993da0bf4c044959979ee47462b4396", "788511da2aab4424ad3e42c35d883660", "b4a6d337d73341a3b5ea049a7796c7b8", "3cba5778ce2e4ac1ac9d47d90f6d5236", "1d4d72080fde4f48b7e829f2f23beb86", "f3546ed5bea34f7888a20fa12ba93c10", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "7e83c9ae52a648e6b8a8d16fad340fde", "2b6fe822e67b4a8dab971ee91d37e252", "c0e06fcda1cf47e1afaede89494bb4d7", "d8263be61eda425bb2e3991065fb8e5f", "8112cce4377a44238d587c4338dcf2f4", "56d67eea7f7249fa879151861be24ea2", "<KEY>"]} id="YyM8i5RRpxHC" outputId="63476a8d-468c-46ea-8c34-2afbfe413daa"
parser = _setup_parser()
args = parser.parse_args(args=[])
path = "OpenUE/config/run_seq.yaml"
# 使用config.yaml 载入超参设置
opt = yaml.load(open(path), Loader=yaml.FullLoader)
args.__dict__.update(opt)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
data_class = _import_class(f"openue.data.{args.data_class}")
model_class = _import_class(f"openue.models.{args.model_class}")
litmodel_class = _import_class(f"openue.lit_models.{args.litmodel_class}")
data = data_class(args)
lit_model = litmodel_class(args=args, data_config=data.get_config())
logger = pl.loggers.TensorBoardLogger("/logs")
if args.wandb:
logger = pl.loggers.WandbLogger(project="openue demo")
logger.log_hyperparams(vars(args))
early_callback = pl.callbacks.EarlyStopping(monitor="Eval/f1", mode="max", patience=5)
model_checkpoint = pl.callbacks.ModelCheckpoint(monitor="Eval/f1", mode="max",
filename='{epoch}-{Eval/f1:.2f}',
dirpath="output",
save_weights_only=True
)
callbacks = [early_callback, model_checkpoint]
trainer = pl.Trainer.from_argparse_args(args, callbacks=callbacks, logger=logger,
default_root_dir="training/logs")
trainer.fit(lit_model, datamodule=data)
trainer.test(lit_model, datamodule=data)
# _save_model(litmodel=lit_model, tokenizer=data.tokenizer, path="seq_model")
# + id="-SY84hjtewXO" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["cabf6990e5694da98ef968232fcbb759", "73366d47c61f47b390ae111e9ca3ba62", "a1b94ec1cc0a4b34823deabc3f7327e1", "1f5f26f5defd44c783079687aef2f5c2", "d5b37bdae61e4081b9efc53fd3d93505", "654b691118214439ba96df57e485875d", "19c809914f574671bef1213e3d57b044", "67020172b30c433e9a0be1c5a5bb6e4d", "<KEY>", "4fb5b3958845406d8fac8d2287fed50a", "1db8f658e8214b81b7e1245df77f7b7b", "00066c3143114d64952f11f488e21493", "dd9931ade56c4301b3e5638a9e4747c6", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "c45ac1ec83f84c119a62274c7ca56452", "25ee4f8e6dba4f389ee926121020fbd3", "de9931d7c3e547c88104cfdcd9645905", "<KEY>", "<KEY>", "16466199a0b64396a11a4467890f1a4f", "<KEY>", "2b373b60777a47c29d704de4d2436764", "c56fd91a93f54fd48d48374a2d8682af", "<KEY>", "9f952514a1aa4820be8d4aca01c6eb67", "<KEY>", "<KEY>", "4e92c4902d5f43e6969429337a5787db", "<KEY>", "6e18c978738e4a63a51a23f2ab9d1267"]} outputId="9cf4f43b-cb0b-4186-e578-b8a6493ed252"
parser = _setup_parser()
args = parser.parse_args(args=[])
path = "OpenUE/config/run_ner.yaml"
# 使用config.yaml 载入超参设置
opt = yaml.load(open(path), Loader=yaml.FullLoader)
args.__dict__.update(opt)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
data_class = _import_class(f"openue.data.{args.data_class}")
model_class = _import_class(f"openue.models.{args.model_class}")
litmodel_class = _import_class(f"openue.lit_models.{args.litmodel_class}")
data = data_class(args)
lit_model = litmodel_class(args=args, data_config=data.get_config())
logger = pl.loggers.TensorBoardLogger("/logs")
if args.wandb:
logger = pl.loggers.WandbLogger(project="openue demo")
logger.log_hyperparams(vars(args))
early_callback = pl.callbacks.EarlyStopping(monitor="Eval/f1", mode="max", patience=5)
model_checkpoint = pl.callbacks.ModelCheckpoint(monitor="Eval/f1", mode="max",
filename='{epoch}-{Eval/f1:.2f}',
dirpath="output",
save_weights_only=True
)
callbacks = [early_callback, model_checkpoint]
trainer = pl.Trainer.from_argparse_args(args, callbacks=callbacks, logger=logger,
default_root_dir="training/logs")
trainer.fit(lit_model, datamodule=data)
trainer.test(lit_model, datamodule=data)
# _save_model(litmodel=lit_model, tokenizer=data.tokenizer, path="seq_model")
# + id="JypyW-MjuohO" colab={"base_uri": "https://localhost:8080/"} outputId="a009b399-800f-450a-854a-40cff9d6eb1d"
args
# + id="Zl7BfmY3uo9f"
|
OpenUE_demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### ISM5935 - Fundamentals of Data Visualization
# --------------------------
# # Class Demonstration:<br> <span style="color:#960018">*Pandas* </span>
# ## <span style="color:#960018">PART 1: Importing necessary libraries</span>
# ______________________
# %config IPCompleter.greedy=True
import pandas as pd
# The cell below adjusts Jupyter notebook display settings. These commands make the notebook wider and utilizes more space on the screen. This is probably only useful on desktop settings.
from IPython.core.display import display, HTML
display(HTML("<style>ner {width:90% !important;}</style>"))
pd.set_option('display.max_columns', 50)
pd.set_option('display.max_rows', 10)
# ## <span style="color:#960018">Part 2: Loading data </span>
# ____________________________________________
data = pd.read_csv('happiness_with_continent.csv')
# The cell above loads comma-separated-values, there are other load functions: go to (https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html)
# <img src="read_functions.jpg"
# alt="missing data" height = "800" width = "800"
# align = "midel" />
#
# ## <span style="color:#960018">PART 3: Basic Inspection of Dataset</span>
# ________________________________
data.shape #RETURNS THE DIMENSIONS OF THE DATAFRAME (ROWS, COLUMNS)
data.head(10)
# ### <span style="color:#960018">Inspecting the data</span>
data.columns #RETURNS A LIST OF ALL COLUMN NAMES IN THE DATAFRAME
data.index #RETURNS THE 'INTERNAL' ROW NAMES/NUMBERS OF THE DATAFRAME
data.info() #THIS GIVES US INFORMATION ABOUT THE COLUMN DATA: THE 'TYPE' AND THE PRESENCE/ABSENCE OF 'NULL-VALUES'
data.describe() #THIS RETURNS DESCRIPTIVE STATISTICAL INFORMATION ABOUT THE NUMERICAL VARIABLES IN THE DATAFRAME
data.describe().index
# ## <span style="color:#960018">Part 4: Sorting the data</span>
# ______________________
data.sort_values(by='Country name', ascending=True)
#THIS WON'T WORK (it will throw an error) BECAUSE WE NEED TO TELL PANDAS WHICH VARIABLE TO USE FOR THE SORT.
data.sort_values(by='Year') #THIS SORTS BY YEAR. THE DEFAULT IS ASCENDING: (ascending=True)
data.head(10)
data.sort_values(by=['Country name','Year']).head(5)
data.sort_values(by=['Country name','Year'], ascending=[False,True]).head(5)
data.sort_index(ascending=False).head(5)
# ## <span style="color:#960018">Part 5: Filtering the data</span>
# _________________________________
data.Year.head(5)
data['Year'].head(5) #THIS IS PREFERABLE TO THE PREVIOUS COMMAND, ALTHOUGH THEY PRODUCE THE SAME RESULTS. WHY IS THIS PREFERABLE? TRY TO EXECUTE THE NEXT COMMAND
data['Country name','Life Ladder']
# THIS SHOULD FAIL
data[['Country name','Life Ladder']]
# syntax of `iloc` is `data.iloc[start_row:end_row (,start_col:end_coll)]`
data.sort_values(by='Year').iloc[0]
data.iloc[10,3]
data.iloc[903:907]
data.iloc[903:907,0:3]
data.set_index('Country name',inplace=True)
data.sample(5) #Return a random sample of items from an axis of object.
data.index
# Syntax for `loc`: `data.loc[index_label (,col_label)]`
data.loc['United States']
data.loc['United States','Life Ladder']
data.loc['Denmark':'Germany', ['Year','Life Ladder']].sample(5)
# ## <span style="color:#960018">Part 6: Filtering on boolean values</span>
# ______________________
# +
# Option 1
# data.loc[data['Life Ladder'] > 4]
# Option 2
# condition = data['Life Ladder'] > 4
# data.loc[condition]
# Option 3
Condition=data[data['Life Ladder'] > 4]
data.loc[condition]
# +
life_condition = data['Life Ladder'] > 4
year_condition = data['Year'] > 2014
gen_condition = data['Generosity'] > .2
data.loc[life_condition & year_condition & gen_condition]
# +
foo = 4
(foo < 6 | foo > 8)
# -
# ## <span style="color:#960018">Part 7: Summarizing the Data</span>
# ________________
data.max()
data.sum()
data.mean()
#data['Life Ladder'].mean()
data.median()
data['Year'] / data['Life Ladder']
data['Continent'] + '_' + data['Year'].astype(str)
data.head()
data.std()
# ## <span style="color:#960018">Part 8: Groupby</span>
#
# ________________
# ### <span style="color:#960018">Split</span>
data.reset_index
by_country=data.groupby('Country name')
print(by_country)
# To view the unique groups for which the data has been split, you can use the `.groups` function
#
# "The result [will show] that the object is just a dictionary containing the group as the key, and the values of that group as the value."
by_country.groups
# +
by_continent = data.groupby('Continent')
print(by_continent)
by_continent.groups
# -
# ### <span style="color:#960018">Apply</span>
by_country['Life Ladder'].mean()
by_continent['Life Ladder'].mean()
# ### <span style="color:#960018">Combine</span>
Country_Avg_LifeLadder = pd.DataFrame(by_country['Life Ladder'].mean())
continent_avg_lifeladder = pd.DataFrame(by_continent['Life Ladder'].mean())
Country_Avg_LifeLadder.sort_values(ascending=False, by='Life Ladder', inplace=True)
continent_avg_lifeladder.sort_values(ascending=False, by="Life Ladder", inplace=True)
Country_Avg_LifeLadder.head(5) #Top five countries with highest mean Life Ladder
continent_avg_lifeladder.head(5)
Country_Avg_LifeLadder.tail(5)
continent_avg_lifeladder.tail(5)
|
pandas review 2/Pandas Class Demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dual CRISPR Screen Analysis
# # Count Plots
# <NAME>, CCBB, UCSD (<EMAIL>)
# ## Instructions
#
# To run this notebook reproducibly, follow these steps:
# 1. Click **Kernel** > **Restart & Clear Output**
# 2. When prompted, click the red **Restart & clear all outputs** button
# 3. Fill in the values for your analysis for each of the variables in the [Input Parameters](#input-parameters) section
# 4. Click **Cell** > **Run All**
# <a name = "input-parameters"></a>
#
# ## Input Parameters
g_timestamp = ""
g_dataset_name = "20160510_A549"
g_count_alg_name = "19mer_1mm_py"
g_fastq_counts_dir = '/Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/data/interim/20160510_D00611_0278_BHK55CBCXX_A549'
g_fastq_counts_run_prefix = "19mer_1mm_py_20160615223822"
g_collapsed_counts_dir = "/Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/data/processed/20160510_A549"
g_collapsed_counts_run_prefix = "20160510_A549_19mer_1mm_py_20160616101309"
g_combined_counts_dir = ""
g_combined_counts_run_prefix = ""
g_plots_dir = ""
g_plots_run_prefix = ""
g_code_location = "/Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python"
# ## Matplotlib Display
# %matplotlib inline
# ## CCBB Library Imports
import sys
sys.path.append(g_code_location)
# ## Automated Set-Up
# # %load -s describe_var_list /Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python/ccbbucsd/utilities/analysis_run_prefixes.py
def describe_var_list(input_var_name_list):
description_list = ["{0}: {1}\n".format(name, eval(name)) for name in input_var_name_list]
return "".join(description_list)
from ccbbucsd.utilities.analysis_run_prefixes import check_or_set, get_run_prefix, get_timestamp
g_timestamp = check_or_set(g_timestamp, get_timestamp())
g_collapsed_counts_dir = check_or_set(g_collapsed_counts_dir, g_fastq_counts_dir)
g_collapsed_counts_run_prefix = check_or_set(g_collapsed_counts_run_prefix, g_fastq_counts_run_prefix)
g_combined_counts_dir = check_or_set(g_combined_counts_dir, g_collapsed_counts_dir)
g_combined_counts_run_prefix = check_or_set(g_combined_counts_run_prefix, g_collapsed_counts_run_prefix)
g_plots_dir = check_or_set(g_plots_dir, g_combined_counts_dir)
g_plots_run_prefix = check_or_set(g_plots_run_prefix,
get_run_prefix(g_dataset_name, g_count_alg_name, g_timestamp))
print(describe_var_list(['g_timestamp','g_collapsed_counts_dir', 'g_collapsed_counts_run_prefix',
'g_combined_counts_dir', 'g_combined_counts_run_prefix', 'g_plots_dir',
'g_plots_run_prefix']))
from ccbbucsd.utilities.files_and_paths import verify_or_make_dir
verify_or_make_dir(g_collapsed_counts_dir)
verify_or_make_dir(g_combined_counts_dir)
verify_or_make_dir(g_plots_dir)
# ## Count File Suffixes
# # %load -s get_counts_file_suffix /Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python/ccbbucsd/malicrispr/construct_counter.py
def get_counts_file_suffix():
return "counts.txt"
# +
# # %load -s get_collapsed_counts_file_suffix,get_combined_counts_file_suffix /Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python/ccbbucsd/malicrispr/count_combination.py
def get_collapsed_counts_file_suffix():
return "collapsed.txt"
def get_combined_counts_file_suffix():
return "counts_combined.txt"
# -
# ## Count Plots Functions
# +
# # %load /Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python/ccbbucsd/malicrispr/count_plots.py
# third-party libraries
import matplotlib.pyplot
import numpy
import pandas
# ccbb libraries
from ccbbucsd.utilities.analysis_run_prefixes import strip_run_prefix
from ccbbucsd.utilities.files_and_paths import build_multipart_fp, get_file_name_pieces, get_filepaths_by_prefix_and_suffix
# project-specific libraries
from ccbbucsd.malicrispr.count_files_and_dataframes import get_counts_df
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "prototype"
DEFAULT_PSEUDOCOUNT = 1
def get_boxplot_suffix():
return "boxplots.png"
def make_log2_series(input_series, pseudocount_val):
revised_series = input_series + pseudocount_val
log2_series = revised_series.apply(numpy.log2)
nan_log2_series = log2_series.replace([numpy.inf, -numpy.inf], numpy.nan)
return nan_log2_series.dropna().reset_index(drop=True)
# note that .reset_index(drop=True) is necessary as matplotlib boxplot function (perhaps among others)
# throws an error if the input series doesn't include an item with index 0--which can be the case if
# that first item was NaN and was dropped, and series wasn't reindexed.
def show_and_save_histogram(output_fp, title, count_data):
matplotlib.pyplot.figure(figsize=(20,20))
matplotlib.pyplot.hist(count_data)
matplotlib.pyplot.title(title)
matplotlib.pyplot.xlabel("log2(raw counts)")
matplotlib.pyplot.ylabel("Frequency")
matplotlib.pyplot.savefig(output_fp)
matplotlib.pyplot.show()
def show_and_save_boxplot(output_fp, title, samples_names, samples_data, rotation_val=0):
fig = matplotlib.pyplot.figure(1, figsize=(20,20))
ax = fig.add_subplot(111)
bp = ax.boxplot(samples_data)
ax.set_xticklabels(samples_names, rotation=rotation_val)
ax.set_xlabel("samples")
ax.set_ylabel("log2(raw counts)")
matplotlib.pyplot.title(title)
fig.savefig(output_fp, bbox_inches='tight')
matplotlib.pyplot.show()
def plot_raw_counts(input_dir, input_run_prefix, counts_suffix, output_dir, output_run_prefix, boxplot_suffix):
counts_fps_for_run = get_filepaths_by_prefix_and_suffix(input_dir, input_run_prefix, counts_suffix)
for curr_counts_fp in counts_fps_for_run:
_, curr_sample, _ = get_file_name_pieces(curr_counts_fp)
stripped_sample = strip_run_prefix(curr_sample, input_run_prefix)
count_header, curr_counts_df = get_counts_df(curr_counts_fp, input_run_prefix)
curr_counts_df.rename(columns={count_header:stripped_sample}, inplace=True)
count_header = stripped_sample
log2_series = make_log2_series(curr_counts_df[count_header], DEFAULT_PSEUDOCOUNT)
title = " ".join([input_run_prefix, count_header, "with pseudocount", str(DEFAULT_PSEUDOCOUNT)])
output_fp_prefix = build_multipart_fp(output_dir, [count_header, input_run_prefix])
boxplot_fp = output_fp_prefix + "_" + boxplot_suffix
show_and_save_boxplot(boxplot_fp, title, [count_header], log2_series)
hist_fp = output_fp_prefix + "_" + "hist.png"
show_and_save_histogram(hist_fp, title, log2_series)
def plot_combined_raw_counts(input_dir, input_run_prefix, combined_suffix, output_dir, output_run_prefix, boxplot_suffix):
output_fp = build_multipart_fp(output_dir, [output_run_prefix, boxplot_suffix])
combined_counts_fp = build_multipart_fp(input_dir, [input_run_prefix, combined_suffix])
combined_counts_df = pandas.read_table(combined_counts_fp)
samples_names = combined_counts_df.columns.values[1:] # TODO: remove hardcode
samples_data = []
for curr_name in samples_names:
log2_series = make_log2_series(combined_counts_df[curr_name], DEFAULT_PSEUDOCOUNT)
samples_data.append(log2_series.tolist())
title = " ".join([input_run_prefix, "all samples", "with pseudocount", str(DEFAULT_PSEUDOCOUNT)])
show_and_save_boxplot(output_fp, title, samples_names, samples_data, 90)
# -
# ## Individual fastq Plots
from ccbbucsd.utilities.files_and_paths import summarize_filenames_for_prefix_and_suffix
print(summarize_filenames_for_prefix_and_suffix(g_fastq_counts_dir, g_fastq_counts_run_prefix, get_counts_file_suffix()))
# this call makes one boxplot per raw fastq
plot_raw_counts(g_fastq_counts_dir, g_fastq_counts_run_prefix, get_counts_file_suffix(), g_plots_dir,
g_plots_run_prefix, get_boxplot_suffix())
# ## Individual Sample Plots
print(summarize_filenames_for_prefix_and_suffix(g_collapsed_counts_dir, g_collapsed_counts_run_prefix,
get_collapsed_counts_file_suffix()))
plot_raw_counts(g_collapsed_counts_dir, g_collapsed_counts_run_prefix, get_collapsed_counts_file_suffix(),
g_plots_dir, g_plots_run_prefix, get_boxplot_suffix())
# ## Combined Samples Plots
print(summarize_filenames_for_prefix_and_suffix(g_combined_counts_dir, g_combined_counts_run_prefix,
get_combined_counts_file_suffix()))
plot_combined_raw_counts(g_combined_counts_dir, g_combined_counts_run_prefix, get_combined_counts_file_suffix(),
g_plots_dir, g_plots_run_prefix, get_boxplot_suffix())
|
notebooks/crispr/Dual CRISPR 5-Count Plots.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ODYM Example no. 1. System with two processes, two parameters, one material.
#
# A simple MFA system with one material (represented by the indicator element carbon 'C'), a time horizon of 30 years [1980-2010], two processes, and a time-dependent parameter is analysed.
#
# <img src="Images/MFAExample1.png" width="404" height="490" alt="Simple MFA system">
#
#
# The model equations are as follows:
# + $a(t) = D(t)$ (exogenous input flow)
# + $d(t) = \alpha (t)\cdot b(t)$ (recovery efficiency parameter)
# + $a(t) +d(t) = b(t) $ (mass balance process 1)
# + $b(t) = c(t) + d(t) $ (mass balance process 2)
#
# From these equations the system solution follows:
# + $c(t) = a(t) = D(t)$
# + $b(t) = \frac{1}{1-\alpha (t)}\cdot D(t)$
# + $c(t) = \frac{\alpha}{1-\alpha (t)}\cdot D(t)$
#
# We will now programm this solution into ODYM. That is overkill, as ODYM was developed for handling much more complex MFA systems, but instructive.
#
#
# ### 1) Load ODYM
#
# +
# Load a local copy of the current ODYM branch:
import sys, os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
# For Ipython Notebook only
# %matplotlib inline
sys.path.insert(0, os.path.join('..', 'odym', 'modules')) # add ODYM module directory to system path, relative
sys.path.insert(0, os.path.join(os.getcwd(),'..', 'odym', 'modules')) # add ODYM module directory to system path, absolute
import ODYM_Classes as msc # import the ODYM class file
import ODYM_Functions as msf # import the ODYM function file
import dynamic_stock_model as dsm # import the dynamic stock model library
# -
# ### 2) Define MFA system
# With the model imported, we cannow set up the system definition. The 'classical' elements of the system definition in MFA include: The processes, flows, and stocks, the material, the region, and the time frame studied. Next to these elements, ODYM features/requires the following elements to be specified:
# + The list of chemical elements considered
# + The classification(s) of the system variables (stocks and flows): Which materials, products, regions, or waste groups are considered?
# + An index letter to quickly/directly access a model aspect.
# + A dictionary of model parameters
#
# For all these items ODYM has a specific structure, which is used below.
#
# First, we define a classification of all objects flowing. In this example we conduct a 'classical' dynamic MFA with just one material/chemical element/indicator element considered, and our classification can therefore be as simple as possible: it contains only one chemical element, in this case, we choose carbon ('C'):
# +
ModelClassification = {} # Create dictionary of model classifications
ModelClassification['Time'] = msc.Classification(Name = 'Time', Dimension = 'Time', ID = 1,
Items = list(np.arange(1980,2011)))
# Classification for time labelled 'Time' must always be present,
# with Items containing a list of odered integers representing years, months, or other discrete time intervals
ModelClassification['Element'] = msc.Classification(Name = 'Elements', Dimension = 'Element', ID = 2,
Items = ['C'])
# Classification for elements labelled 'Element' must always be present,
# with Items containing a list of the symbols of the elements covered.
# Get model time start, end, and duration:
Model_Time_Start = int(min(ModelClassification['Time'].Items))
Model_Time_End = int(max(ModelClassification['Time'].Items))
Model_Duration = Model_Time_End - Model_Time_Start
# -
# That dictionary of classifications enteres the index table defined for the system. The indext table lists all aspects needed and assigns a classification and index letter to each aspect.
# +
IndexTable = pd.DataFrame({'Aspect' : ['Time','Element'], # 'Time' and 'Element' must be present!
'Description' : ['Model aspect "time"', 'Model aspect "Element"'],
'Dimension' : ['Time','Element'], # 'Time' and 'Element' are also dimensions
'Classification': [ModelClassification[Aspect] for Aspect in ['Time','Element']],
'IndexLetter' : ['t','e']}) # Unique one letter (upper or lower case) indices to be used later for calculations.
# Default indexing of IndexTable, other indices are produced on the fly
IndexTable.set_index('Aspect', inplace = True)
IndexTable
# -
# We can now define our MFA system:
Dyn_MFA_System = msc.MFAsystem(Name = 'TestSystem',
Geogr_Scope = 'TestRegion',
Unit = 'Mt',
ProcessList = [],
FlowDict = {},
StockDict = {},
ParameterDict = {},
Time_Start = Model_Time_Start,
Time_End = Model_Time_End,
IndexTable = IndexTable,
Elements = IndexTable.loc['Element'].Classification.Items) # Initialize MFA system
# This system has a name, a geographical scope, a system-wide unit, a time frame, an index table with all aspects defined, and a list of chemical elements considered.
#
# ### 3) Inserting data into the MFA system
#
# It is lacking a list of processes, stocks, flows, and parameters, and these are now defined and inserted into the system:
# +
Dyn_MFA_System.ProcessList = [] # Start with empty process list, only process numbers (IDs) and names are needed.
Dyn_MFA_System.ProcessList.append(msc.Process(Name = 'Environment', ID = 0))
Dyn_MFA_System.ProcessList.append(msc.Process(Name = 'Process 1' , ID = 1))
Dyn_MFA_System.ProcessList.append(msc.Process(Name = 'Process 2' , ID = 2))
# Print list of processes:
Dyn_MFA_System.ProcessList
# +
ParameterDict = {}
# Define parameter Inflow (D) with indices 'te' (years x element) and matching time series Values (array with size 31 x 1).
# In a more advanced setup the parameters are defined in a data template and then read into the software.
ParameterDict['D'] = msc.Parameter(Name = 'Inflow', ID = 1, P_Res = 1, MetaData = None, Indices = 'te', Values= np.arange(0,31).reshape(31,1), Unit = 'Mt/yr')
# Define parameter Recovery rate (alpha) with indices 'te' (years x element) and matching time series Values(array with size 31 x 1).
# In a more advanced setup the parameters are defined in a data template and then read into the software.
ParameterDict['alpha'] = msc.Parameter(Name = 'Recovery rate', ID = 2, P_Res = 2, MetaData = None, Indices = 'te', Values= np.arange(2,33).reshape(31,1)/34, Unit = '1')
# Assign parameter dictionary to MFA system:
Dyn_MFA_System.ParameterDict = ParameterDict
# +
# Define the four flows a,b,c,d of the system, and initialise their values:
Dyn_MFA_System.FlowDict['a'] = msc.Flow(Name = 'Input' , P_Start = 0, P_End = 1, Indices = 't,e', Values=None)
Dyn_MFA_System.FlowDict['b'] = msc.Flow(Name = 'Consumption' , P_Start = 1, P_End = 2, Indices = 't,e', Values=None)
Dyn_MFA_System.FlowDict['c'] = msc.Flow(Name = 'Output' , P_Start = 2, P_End = 0, Indices = 't,e', Values=None)
Dyn_MFA_System.FlowDict['d'] = msc.Flow(Name = 'Recovered material' , P_Start = 2, P_End = 1, Indices = 't,e', Values=None)
# Assign empty arrays to flows according to dimensions.
Dyn_MFA_System.Initialize_FlowValues()
# -
# Check whether flow value arrays match their indices, etc. See method documentation.
Dyn_MFA_System.Consistency_Check()
# ### 4) Programming a solution of the MFA system
# Now the system definition is complete, and we can program the model solution:
Dyn_MFA_System.FlowDict['a'].Values = Dyn_MFA_System.ParameterDict['D'].Values
Dyn_MFA_System.FlowDict['b'].Values = 1 / (1 - Dyn_MFA_System.ParameterDict['alpha'].Values) * \
Dyn_MFA_System.ParameterDict['D'].Values
Dyn_MFA_System.FlowDict['c'].Values = Dyn_MFA_System.ParameterDict['D'].Values
Dyn_MFA_System.FlowDict['d'].Values = Dyn_MFA_System.ParameterDict['alpha'].Values / \
(1 - Dyn_MFA_System.ParameterDict['alpha'].Values) * Dyn_MFA_System.ParameterDict['D'].Values
# ### 5) Mass-balance-check, analyse, and store the model solution
# One major advantage of the ODYM system structure is that mass balance checks can be performed automatically using unit-tested routines without further programming need:
Bal = Dyn_MFA_System.MassBalance()
print(Bal.shape) # dimensions of balance are: time step x process x chemical element
print(np.abs(Bal).sum()) # reports the sum of all absolute balancing errors.
# The ODYM mass balance array reports the balance for each chemical element, each year, and each process, including the system balance (process 0).
fig, ax = plt.subplots()
ax.plot(Dyn_MFA_System.IndexTable['Classification']['Time'].Items, Dyn_MFA_System.FlowDict['a'].Values)
ax.set_ylabel('Flow a in Mt/yr', fontsize =16)
fig, ax = plt.subplots()
ax.plot(Dyn_MFA_System.IndexTable['Classification']['Time'].Items, Dyn_MFA_System.FlowDict['b'].Values)
ax.set_ylabel('Flow b in Mt/yr', fontsize =16)
# Save entire system:
pickle.dump({'MFATestSystem': Dyn_MFA_System}, open("Tutorial1_MFATestSystem.p", "wb") )
|
docs/.ipynb_checkpoints/ODYM Example and Tutorial No. 1. System with two processes, two parameters, one material-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import re
import json
import utils
import random
import gensim
import warnings
import numpy as np
import pandas as pd
from tasks import *
from pprint import pprint
from tqdm.notebook import tqdm
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from yellowbrick.cluster import KElbowVisualizer
warnings.filterwarnings('ignore')
# -
complete_df = pd.read_csv("data/clean_df.csv")
complete_df.shape
complete_df.head(2)
complete_df.describe()
# +
# Keep only texts with minimal number of words
complete_df = complete_df[complete_df['text'].apply(lambda x: len(re.findall(r"(?i)\b[a-z]+\b", x))) > 1000]
complete_df.shape
# +
# See random text
complete_df.reset_index(inplace = True, drop = True)
complete_df.iloc[42]['abstract'][:500]
# -
frac_of_articles = 1
train_df = complete_df.sample(frac = frac_of_articles, random_state = 42)
train_corpus = (list(utils.read_corpus(train_df, 'abstract')))
# +
# Using distributed memory model
model = gensim.models.doc2vec.Doc2Vec(dm = 1, vector_size = 50, min_count = 10, dm_mean = 1, epochs = 20, seed = 42, workers = 6)
model.build_vocab(train_corpus)
model.train(train_corpus, total_examples = model.corpus_count, epochs = model.epochs)
# -
list_of_tasks = [task_1, task_2, task_3, task_4, task_5, task_6, task_7, task_8, task_9]
abstract_vectors = model.docvecs.vectors_docs
array_of_tasks = [utils.get_doc_vector(task, model) for task in list_of_tasks]
train_df['abstract_vector'] = [vec for vec in abstract_vectors]
# ### Nearest Neigbors search
train_df = train_df[train_df['abstract'].apply(lambda x: len(re.findall(r"(?i)\b[a-z]+\b", x))) > 40]
train_df.shape
train_array = train_df['abstract_vector'].values.tolist()
ball_tree = NearestNeighbors(algorithm = 'ball_tree', leaf_size = 20).fit(train_array)
# +
# Query for all tasks
distances, indices = ball_tree.kneighbors(array_of_tasks, n_neighbors = 3)
# -
for i, info in enumerate(list_of_tasks):
print("="*80, f"\n\nTask = {info[:100]}\n", )
df = train_df.iloc[indices[i]]
abstracts = df['abstract']
titles = df['title']
dist = distances[i]
for l in range(len(dist)):
print(f" Text index = {indices[i][l]} \n Distance = {distances[i][l]} \n Title: {titles.iloc[l]} \n Abstract extract: {abstracts.iloc[l][:200]}\n\n")
# ### Clustering and visualisation
abstract_vectors = model.docvecs.vectors_docs
kmeans = KMeans(init = 'k-means++', max_iter = 300, random_state = 42)
visualizer = KElbowVisualizer(kmeans, k = (2, 16))
visualizer.fit(abstract_vectors)
visualizer.show()
# +
# Clearly 7 clusters are here
# -
kmeans = KMeans(n_clusters = 7, init = 'k-means++', max_iter = 100, random_state = 42)
train_df['labels'] = kmeans.fit_predict(train_array)
train_df[['text','labels']][:20]
# ### Save Binaries
model.save('models/CORD-doc2vec')
# ### Save KeyedVectors and prep Spacy Model
model.wv.save_word2vec_format("data/word2vec.txt")
# Prep data for spacy model
# !gzip data/word2vec.txt
# Init spacy model
# !python -m spacy init-model en models/cord-doc2vec.model --vectors-loc data/word2vec.txt.gz
# Load Model
import spacy
nlp = spacy.load('models/cord-doc2vec.model/')
doc = nlp('The impact of COVID-19 has been tremendous on our economy.')
dox2 = nlp('Flipravir can be a viable solution as a medication against COVID-19.')
doc.similarity(dox2)
|
COVID-19 Research Data/doc2vec.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: torchdyn
# language: python
# name: torchdyn
# ---
# ## Multiple Shooting methods for ODEs
# +
import torchdyn
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from torchdyn.numerics import Euler, RungeKutta4, Tsitouras45, DormandPrince45, MSZero
from torchdyn.numerics import odeint, odeint_mshooting, Lorenz
import torchdiffeq
import time
# %load_ext autoreload
# %autoreload 2
# -
x0 = torch.randn(8, 3) + 15
t_span = torch.linspace(0, 3, 2000)
sys = Lorenz()
# +
t0 = time.time()
t_eval, accurate_sol = odeint(sys, x0, t_span, solver='dopri5', atol=1e-8, rtol=1e-8)
accurate_sol_time = time.time() - t0
t0 = time.time()
t_eval, base_sol = odeint(sys, x0, t_span, solver='euler')
base_sol_time = time.time() - t0
t0 = time.time()
t_eval, rk4_sol = odeint(sys, x0, t_span, solver='rk4')
rk4_sol_time = time.time() - t0
t0 = time.time()
t_eval, dp5_low_sol = odeint(sys, x0, t_span, solver='dopri5', atol=1e-3, rtol=1e-3)
dp5_low_time = time.time() - t0
t0 = time.time()
t_eval, ms_sol = odeint_mshooting(sys, x0, t_span, solver='mszero', fine_steps=2, maxiter=4)
ms_sol_time = time.time() - t0
# -
def smape(yhat, y):
return torch.abs(yhat - y) / (torch.abs(yhat) + torch.abs(y)) / 2
# +
err_zero = smape(ms_sol, accurate_sol).mean(1).sum(1)
err_base = smape(base_sol, accurate_sol).mean(1).sum(1)
err_rk4 = smape(rk4_sol, accurate_sol).mean(1).sum(1)
err_dp5 = smape(dp5_low_sol, accurate_sol).mean(1).sum(1)
fig = plt.figure(figsize=(16,4))
ax = fig.add_subplot(1,4,1)
for i in range(4):
for j in range(3):
ax.plot(accurate_sol[:,i,j], c='b')
ax.plot(ms_sol[1:,i,j], c='r')
ax.plot(rk4_sol[:,i,j], c='green')
ax.plot(dp5_low_sol[:,i,j], c='orange')
ax.plot(base_sol[:,i,j], c='black')
ax.legend(['Ground-Truth', 'Zero', 'Euler', 'RK4', 'DP45(low)'])
ax = fig.add_subplot(1,4,2)
for i in range(4):
ax.plot(accurate_sol[:,i,0], accurate_sol[:,i,2], c='blue')
ax.plot(ms_sol[:,i,0], ms_sol[:,i,2], c='r')
ax.plot(rk4_sol[:,i,0], rk4_sol[:,i,2], c='g')
ax.plot(dp5_low_sol[:,i,0], dp5_low_sol[:,i,2], c='orange')
ax.plot(base_sol[:,i,0], base_sol[:,i,2], c='black')
ax.legend(['Ground-Truth', 'Zero', 'Euler', 'RK4', 'DP45(low)'])
ax = fig.add_subplot(1,4,3)
ax.plot(err_zero, c='r')
ax.plot(err_base, c='black')
ax.plot(err_rk4, c='green')
ax.plot(err_dp5, c='orange')
ax.set_yscale('log')
ax.legend(['Ground-Truth', 'Zero', 'Euler', 'RK4', 'DP45(low)'])
ax.set_title('Errors, logscale')
ax = fig.add_subplot(1,4,3)
ax.plot(err_zero, c='r')
ax.plot(err_base, c='green')
ax.plot(err_rk4, c='orange')
ax.plot(err_dp5, c='black')
ax.set_yscale('log')
ax.legend(['Zero', 'Euler', 'RK4', 'DP45(low)'])
ax.set_title('Errors, logscale')
times = [accurate_sol_time, ms_sol_time, base_sol_time, rk4_sol_time, dp5_low_time]
ax = fig.add_subplot(1,4,4)
colors = ['b', 'r', 'g', 'orange', 'black']
for k in range(5):
barlist = ax.bar(0 + k, times[k:k+1], color=colors[k], alpha=0.4, edgecolor='black')
ax.legend(['GT(DP45 1e-8)', 'Zero', 'Euler', 'RK4', 'DP45(low)'])
ax.set_title('Wall-clock sol time')
# -
|
benchmarks/numerics/bench_mshooting.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime
# %matplotlib inline
# !ls -alt
# !ls -alt PP*
df_Texas_all_high_SBA = pd.read_csv('PPP Data 150k plus 080820.csv')
a = df_Texas_all_high_SBA
len(a)
a['State'].value_counts()
b = a[a['State'] == 'TX']
len(b)
c = b[b['City'] == 'San Antonio']
len(c)
c.LoanRange.value_counts()
c[0:30]
c
c.BusinessName.str.contains('CHILD SUPPORT-2').value_counts()
c.dtypes
c[0:10]
c.BusinessName.str.contains('FORD').value_counts()
c[11:20]
c[21:40]
c[41:60]
|
Dec_2020_PPP_Work.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/GeorgeZard/FinRL/blob/master/Weights_and_Bias_Trials_FinRL_for_Stable_Baselines3_models.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="k7tNSkCdnxu-"
# %%capture
# !pip install git+https://github.com/AI4Finance-LLC/FinRL-Library.git
# !pip install wandb
# + id="8l_5ypFddser"
# # %%capture
# # !pip install torch==1.4.0
# + id="UL_DUpELpVyK" colab={"base_uri": "https://localhost:8080/"} outputId="d86e828b-3f58-4719-dd24-7cece4331754"
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# matplotlib.use('Agg')
import datetime
# %matplotlib inline
from finrl.apps import config
from finrl.finrl_meta.preprocessor.yahoodownloader import YahooDownloader
from finrl.finrl_meta.preprocessor.preprocessors import FeatureEngineer, data_split
from finrl.finrl_meta.env_stock_trading.env_stocktrading import StockTradingEnv
from finrl.finrl_meta.env_stock_trading.env_stocktrading_np import StockTradingEnv as StockTradingEnv_numpy
# from finrl.drl_agents.stablebaselines3.models import DRLAgent as DRLAgent_sb3
from finrl.finrl_meta.data_processor import DataProcessor
from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline
import ray
from pprint import pprint
import pprint
import sys
sys.path.append("../FinRL-Library")
import itertools
# + id="E-WdUod78OtH"
import wandb
from wandb.integration.sb3 import WandbCallback
# + colab={"base_uri": "https://localhost:8080/"} id="uhLdgzQ7ry-Z" outputId="d4748d6a-c05a-47e0-8e19-a4f980034428"
wandb.login()
# + id="kcuU0wz6ppVF"
import os
if not os.path.exists("./" + config.DATA_SAVE_DIR):
os.makedirs("./" + config.DATA_SAVE_DIR)
if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
os.makedirs("./" + config.TRAINED_MODEL_DIR)
if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
if not os.path.exists("./" + config.RESULTS_DIR):
os.makedirs("./" + config.RESULTS_DIR)
# + id="csBR7bjGm9CY"
def model_params(model_name):
sweep_config = {
'method': 'bayes'
}
metric = {
'name': 'Val sharpe',
'goal': 'maximize'
}
sweep_config['metric'] = metric
ddpg_param_dict = {
"buffer_size": {
"values":[int(1e4), int(1e5), int(1e6)]
},
"learning_rate": {
"distribution": "log_uniform",
"min": 1e-5,
"max": 1,
},
"batch_size" :{
'values':[32, 64, 128, 256, 512]
},
}
a2c_param_dict = {
"n_steps": {
'values': [128, 256, 512, 1024, 2048]},
"ent_coef": {
"distribution": "log_uniform",
"min": 1e-8,
"max": 1,
},
"learning_rate": {
"distribution": "log_uniform",
"min": 1e-5,
"max": 1,
},
}
ppo_param_dict = {
"ent_coef": {
"distribution": "log_uniform",
"min": 1e-8,
"max": 1,
},
"n_steps": {
'values':[128, 256, 512, 1024, 2048]},
"learning_rate": {
"distribution": "log_uniform",
"min": 1e-2,
"max": 1,
},
"batch_size": {
'values':[32, 64, 128, 256, 512]
},
}
stopping_criteria = {'type': 'hyperband', 's': 2, 'eta': 2, 'max_iter': 12}
sweep_config['early_terminate'] = stopping_criteria
if model_name == 'ddpg':
sweep_config['parameters'] = ddpg_param_dict
elif model_name == 'ppo':
sweep_config['parameters'] = ppo_param_dict
elif model_name == 'a2c':
sweep_config['parameters'] = a2c_param_dict
return sweep_config
# + colab={"base_uri": "https://localhost:8080/"} id="gdiVZzPG7v2y" outputId="eeb9f3b1-c930-4b35-fa08-9736b3bbb6fa"
# %%writefile model_wandb.py
import wandb
from wandb.integration.sb3 import WandbCallback
import time
import numpy as np
import pandas as pd
from finrl.apps import config
# from finrl.finrl_meta.env_stock_trading.env_stocktrading import StockTradingEnv
from finrl.finrl_meta.preprocessor.preprocessors import data_split
from stable_baselines3 import A2C, DDPG, PPO, SAC, TD3
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.noise import (
NormalActionNoise,
OrnsteinUhlenbeckActionNoise,
)
from stable_baselines3.common.vec_env import DummyVecEnv
import pprint
MODELS = {"a2c": A2C, "ddpg": DDPG, "td3": TD3, "sac": SAC, "ppo": PPO}
MODEL_KWARGS = {x: config.__dict__[f"{x.upper()}_PARAMS"] for x in MODELS.keys()}
NOISE = {
"normal": NormalActionNoise,
"ornstein_uhlenbeck": OrnsteinUhlenbeckActionNoise,
}
class DRLAgent_SB3:
def __init__(self,env,run):
self.env = env
# self.run = wandb.init(reinit=True,
# project = 'finrl-sweeps-sb3',
# sync_tensorboard = True,
# save_code = True
# )
self.run = run
def get_model(
self,
model_name,
policy_kwargs=None,
model_kwargs=None,
verbose=1,
seed=None,
):
if model_name not in MODELS:
raise NotImplementedError("NotImplementedError")
if model_kwargs is None:
model_kwargs = MODEL_KWARGS[model_name]
if "action_noise" in model_kwargs:
n_actions = self.env.action_space.shape[-1]
model_kwargs["action_noise"] = NOISE[model_kwargs["action_noise"]](
mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions)
)
print(model_kwargs)
model = MODELS[model_name](
policy='MlpPolicy',
env=self.env,
tensorboard_log=f"runs/{self.run.id}",
verbose=verbose,
policy_kwargs=policy_kwargs,
seed=seed,
**model_kwargs,
)
return model
def train_model(self, model,total_timesteps):
model = model.learn(
total_timesteps=total_timesteps,
callback = WandbCallback(
gradient_save_freq = 100, model_save_path = f"models/{self.run.id}",
verbose = 2
),
)
return model
@staticmethod
def DRL_prediction_load_from_file(run , model_name, environment,val_or_test='val'):
if model_name not in MODELS:
raise NotImplementedError("NotImplementedError, Pass correct model name")
try:
# load agent
model = MODELS[model_name].load(f"models/{run.id}/model.zip") #print_system_info=True
print("Successfully load model", f"models/{run.id}")
except BaseException:
raise ValueError("Fail to load agent!")
# test on the testing env
state = environment.reset()
episode_returns = list() # the cumulative_return / initial_account
episode_total_assets = list()
episode_total_assets.append(environment.initial_total_asset)
done = False
while not done:
action = model.predict(state)[0]
state, reward, done, _ = environment.step(action)
total_asset = (
environment.amount
+ (environment.price_ary[environment.day] * environment.stocks).sum()
)
episode_total_assets.append(total_asset)
episode_return = total_asset / environment.initial_total_asset
episode_returns.append(episode_return)
def calculate_sharpe(df):
df['daily_return'] = df['account_value'].pct_change(1)
if df['daily_return'].std() !=0:
sharpe = (252**0.5)*df['daily_return'].mean()/ \
df['daily_return'].std()
return sharpe
else:
return 0
print("episode_return", episode_return)
print("Test Finished!")
sharpe_df = pd.DataFrame(episode_total_assets,columns=['account_value'])
sharpe = calculate_sharpe(sharpe_df)
if val_or_test == "val":
wandb.log({"Val sharpe":sharpe})
elif val_or_test == "test":
wandb.log({"Test sharpe":sharpe})
print(f'Test Sharpe for {run.id} is {sharpe}')
# run.finish()
return sharpe, episode_total_assets
# + id="dZwuaatxW1oJ"
from model_wandb import DRLAgent_SB3
# + id="5m0ZdifPpqVX"
def train_agent_env(start_date, end_date, ticker_list, data_source, time_interval,
technical_indicator_list, env, model_name, if_vix = True,
**kwargs):
#fetch data
DP = DataProcessor(data_source, **kwargs)
data = DP.download_data(ticker_list, start_date, end_date, time_interval)
data = DP.clean_data(data)
data = DP.add_technical_indicator(data, technical_indicator_list)
if if_vix:
data = DP.add_vix(data)
# data.to_csv('train_data.csv')
# data = pd.read_csv('train_data.csv')
price_array, tech_array, turbulence_array = DP.df_to_array(data, if_vix)
env_config = {'price_array':price_array,
'tech_array':tech_array,
'turbulence_array':turbulence_array,
'if_train':True}
env_instance = env(config=env_config)
return env_instance
def train(config=None):
with wandb.init(config=config, sync_tensorboard = True, save_code = True) as run:
#Get the training environment
train_env_instance = train_agent_env(TRAIN_START_DATE, TRAIN_END_DATE, ticker_list, data_source, time_interval,
technical_indicator_list, env, model_name)
config = wandb.config
#Initialize the training agent
agent_train = DRLAgent_SB3(train_env_instance,run)
#For current set of hyperparameters initialize the model
model = agent_train.get_model(model_name, model_kwargs = config)
#train the model
trained_model = agent_train.train_model(model,total_timesteps)
run_ids[run.id] = run
print('Training finished!')
#Log the validation sharpe
sharpe,val_episode_total_asset = val_or_test(
VAL_START_DATE, VAL_END_DATE,run,ticker_list,
data_source, time_interval,
technical_indicator_list, env, model_name
)
#Log the testing sharpe
sharpe,val_episode_total_asset = val_or_test(
TEST_START_DATE, TEST_END_DATE,run,ticker_list,
data_source, time_interval,
technical_indicator_list, env, model_name,val_or_test = 'test'
)
# + id="kIFpQvW44LxI"
def val_or_test(start_date, end_date,run, ticker_list, data_source, time_interval,
technical_indicator_list, env, model_name,val_or_test='val', if_vix = True,
**kwargs):
DP = DataProcessor(data_source, **kwargs)
data = DP.download_data(ticker_list, start_date, end_date, time_interval)
data = DP.clean_data(data)
data = DP.add_technical_indicator(data, technical_indicator_list)
if if_vix:
data = DP.add_vix(data)
# if val_or_test == 'val':
# data.to_csv('val.csv')
# elif val_or_test == 'test':
# data.to_csv('test.csv')
# if val_or_test == 'val':
# data=pd.read_csv('val.csv')
# elif val_or_test == 'test':
# data = pd.read_csv('test.csv')
price_array, tech_array, turbulence_array = DP.df_to_array(data, if_vix)
test_env_config = {'price_array':price_array,
'tech_array':tech_array,
'turbulence_array':turbulence_array,
'if_train':False}
env_instance = env(config=test_env_config)
run_ids[run.id] = run
sharpe,episode_total_assets = DRLAgent_SB3.DRL_prediction_load_from_file(run,model_name,env_instance,val_or_test)
return sharpe, episode_total_assets
# + id="FsjYYCpdyLxP"
TRAIN_START_DATE = '2009-01-01'
TRAIN_END_DATE = '2019-07-30'
VAL_START_DATE = '2019-08-01'
VAL_END_DATE = '2020-07-30'
TEST_START_DATE = '2020-08-01'
TEST_END_DATE = '2021-10-01'
# ticker_list = config.DOW_30_TICKER
ticker_list = ['TSLA']
data_source = 'yahoofinance'
time_interval = '1D'
technical_indicator_list = config.TECHNICAL_INDICATORS_LIST
env = StockTradingEnv_numpy
model_name = "a2c"
# PPO_PARAMS = {
# "n_steps": 2048,
# "ent_coef": 0.01,
# "learning_rate": 0.00025,
# "batch_size": 128,
# }
total_timesteps = 15000
run_ids = {}
# + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["cbae9280b6d04326bea5bcf554b77d42", "72adcbd8e2c945039c99583ff9dbc6d7", "5b1f54663a30497c8f9e567186062be7", "44ac76e3f26944c19ac5faab57270ae6", "84a0beda627f4c309ff80a25e85cf48d", "c1609424400640138801c5ed2d0a3c74", "32e54ca9f4504e69bc6aa11eec520cbe", "bd2485b248834069b51a000116eb7135", "f202544dce234439a80ea89908b9a814", "a44bbd0f72964b3297bd784bc5ea3e9c", "<KEY>", "0766e209c38f47e49a3e5671c2ef5c14", "f62d13dc76ce495fbce461ac4332f630", "213067568ebe4d629dcbf9f1b82f3039", "bef99a3144614ded892d00487bb19b51", "5f0e2b7023394ac89c73cc8da84abfe4", "9579f246f04f4869a076be6a0280903d", "f078898a315145bda32d4f44a1d892c5", "<KEY>", "d9336950a0614576947f7a1dbca1d27e", "<KEY>", "c2d321c660ad4ad5b32fe47a5c894ddf", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "63ce2a569cb146bea8676ce2bc2b5260", "e3754d1ea34a4f849908323c332e88ec", "47bed26dfa9849c982cf8fde65f98587", "7fe37d6f4fb84e3cb47d04ce46f1bb38", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "9476057e33624c24853939e30c8aa005", "fd243f7496194b6daf27ac233274e0e8", "c797ef47051a4a78a8d550fafae37168", "<KEY>", "<KEY>", "73756507f69940d9b4bbb640e5dc1344", "6203007a759f4313bba61353f50ade4c", "<KEY>", "<KEY>", "<KEY>", "37a1fb50a9644d6493bbd12fd52ac5e7", "5e31cac65f2847d98f1d01a793872f9a", "3d8d85a5938e49cdad3eb1388ea65215", "<KEY>", "bde4ef80ae274d40a47f9c6595fc8cce", "076f75460e4f42da816704d269e80700", "<KEY>", "<KEY>", "a3cbe2cf2377425e9d88be214fa41489", "9c8f9fc690774befa65c2e3a2e646762", "<KEY>", "3d322f5770fc4719a45bb55f7161e4c9", "0297f545a1494df197af96de22fcc0dc", "d85548ba296a4a169f76a7da7f76b223", "fed852edf09b4dcbb4d2d7431f5997a2", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "73fe41275cd74322883a6cd569b67dad", "39623562d8c94a078e33e67fd04b5961", "53adfa7e7d2345d5b7258b91179d2dc3", "2d357d586ba94ce3a961c9c80ced3923", "6423d3b03af74a4282ba4ec2f746cee7", "996469ef504d42b98aca8b94737708a0", "ca0c22f26d904f87aed747d758c28e33", "<KEY>", "2cfbea61d5ba4c4c96191db27376ec3c", "<KEY>", "3f62f6c767e04c9481d902b38fbbe9fa", "<KEY>", "<KEY>", "<KEY>", "f7ad3a8e505e4684bd59324d550167af", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "1dfe21a5256f48ec92a226a80fd80150", "663dd1867ad549dca59de63612aec3f4", "6b76778eaf6f412d86dc1f39945f7b48", "<KEY>", "d9cc23ea41db43568ed77e1b000dcb1d", "1499528040c74398a802e7df6c0e773c", "dd8cea7630154e29a4414ceabbdeca88", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "05b53ddd158f4a84b9b12eb40a75a4d7", "<KEY>", "687c0b58d4464beab069bccc7c052e44", "<KEY>", "<KEY>", "e924c86249d1490c822e5e1828ad619c", "<KEY>", "<KEY>", "<KEY>", "44986a45c8a545bcb9ec2544f36496e5", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "01055fef01a94c6ab8b64ba869f14313", "<KEY>", "3816355464ee498a9f4ed5035f927502", "<KEY>", "37d93dee7b544e3383c4117f4d47de6a", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "0505d962981f45d5a2f7bbb9733e2782", "<KEY>", "f1cf10ea4991435c850eea312258e4e2", "<KEY>", "f831b122ee4043d0a3edb56bae5d3159", "<KEY>", "7e74212efdf248818de17f6a3e92ca6d", "5e84068311d8448795dd63c9d969b963", "554b931851e24f4fa04a59202a0b4e41", "59a38f7c870e453198e7342448628804", "<KEY>", "bf7d227e0fa543df85e3fda793e643d3", "<KEY>", "15a0c5a4a92e402fa85126e18fc65e0b", "<KEY>", "<KEY>", "cad4d7c3f92445dba654249f7b5a1f8a", "baf094e0989d4df8bcf21d2e0ae97d19", "<KEY>", "9cfdfd48fdd842c2b5a59feedb2e40c8", "bde87aad888946e18946c47366c0ed26", "e9d765dd2f6f4f19ade2de95e6a3d15a", "74dce53d69c8462290efe05e5908f282", "<KEY>", "3cd257d597f649d5960efc1ce1c57731", "<KEY>", "<KEY>", "7fa3e941e646457da60164998702200c", "<KEY>", "<KEY>", "055765a770984376870da7f192279083", "<KEY>", "d354b0ca3df24453ac8be43afa15e4cc", "753d8e06e89949468c49476acb1dfd60", "<KEY>", "d131526025dd49ee80da0f8d285a58de", "9d36ee935cdf430bba892b4e2dd3488c", "04134c9a37734d8eb09ce390e4f2cd2e", "1a840d2310f84c7b90a6223d0c4d0bea", "f418dd993b9e460d93b45049d3a2e33c", "c13d9afad1b8421c897717cae130a66d", "f007d252019c407ebed9515ef5d256a8", "700e3913e8ab4de7a01ea77b7974b6d6", "<KEY>", "90daf73f3f7d4601ac233a3d3e894f36", "<KEY>", "<KEY>", "c8cee27ce5c049b087f2873faf5ec828", "03917621aab5452c8fc3f866a887a631", "5041bea178a1495cbece9776bd0a1f48", "<KEY>", "<KEY>", "7654c096085b48be91a5d2b193652c87", "a7d797b875ef4d5f9a7eee83ec65c553", "85d889371d3340ab84b04da2d1545521", "<KEY>", "<KEY>", "4947f380ad284d38a21db925830df622", "3b7ef3eb79d04692a16a59ac42999093", "e5224d0a4ccf4284afe1befe7f1fde60", "<KEY>", "f772be3558ea49d5a14bbd457862a54a", "<KEY>", "<KEY>", "31ca0a0755df4badb2307bf707e267ba", "<KEY>", "<KEY>", "<KEY>", "9631024baddb4048ae292a8a16ece612", "aa4a9910b32e43adaef33288fb79bedd", "6b6e66e475174a9c9ef320d8229ad7cd", "8ce8d294051647fd89a393a7ca33e508", "fa9863e4965d44eeacfb46a748571a2a", "<KEY>", "f3a43f87835f4d888eacd5f382ef5adc", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "5c6a211cedbc4d41833c86df456dff00", "<KEY>", "<KEY>", "19fe318573244d0a918f556fc8a43117", "c5f62b90497f4527b25217aced99ee18", "6d22d54365ad4858a7bb7492d2096d24", "3d9ba10f7f7a48a5858d3fae9b5e382e", "2709dddd2151408590c059ca4be2b12e", "e76997c2aeea477aace37b05be48235b", "ae01484fd2df45b38761ebfe1d43b37a", "<KEY>", "<KEY>", "16c34e2e80d04a0da589e33182bb01fb", "<KEY>", "b1eadd03c96a4bee8f5843a0105b3cf6", "b44925d2dde84a56bdec9a6dfff5e044", "<KEY>", "e8f061b8801d4d1a9cd6178ea1859fe4", "<KEY>", "aea19ae7e2fe4551bee236664dd7a788", "fcd718ee253d49389ee6f33ba577c658", "7d786e3e83d8459e817372d9ade86ce7", "360de5d294ae44e38b3df495c730742a", "<KEY>", "2a81d8916b9e40208f909b02ec17ee4f", "<KEY>", "<KEY>", "<KEY>", "7e433c6e445a427a87ff2113d75cc151", "c8e94d06005c430f9a3547415c3f96af", "<KEY>", "<KEY>", "5138d23251604e80b4362ed4680ad69b", "acd72f65185c4c42a73055445350d867", "efde9503691c480d9ef8c95d0ac772e5", "5e739d4ab2ff42619875304219063364", "<KEY>", "b1d6fa7078384f099f3aefff3f7cc988", "a3c5e4351596449f8364ca93fb8efae3", "1ddd793e07b44ec1a1fb1866137860b7", "fd83d202102b48fcadee23ec284f2259", "c64f1aef0f3b4a08ba14838b06128995"]} id="E4XBN4a6nSlV" outputId="be424205-1990-4e4b-eca3-68f19fb15315"
count = 30
os.environ['WANDB_AGENT_MAX_INITIAL_FAILURES']= str(count-5)
project_name = 'finrl-sweeps-sb3'
sweep_config = model_params(model_name)
sweep_id = wandb.sweep(sweep_config,project=project_name)
wandb.agent(sweep_id, train, count=count)
# + id="xL2-eqxJ4hYb" colab={"base_uri": "https://localhost:8080/"} outputId="b54757c1-402c-48eb-d286-27a5cb80c10d"
run_ids
# + id="n6E8iv5JmAE7"
|
Weights_and_Bias_Trials_FinRL_for_Stable_Baselines3_models.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style="width: 100%; overflow: hidden;">
# <div style="width: 150px; float: left;"> <img src="https://raw.githubusercontent.com/DataForScience/Networks/master/data/D4Sci_logo_ball.png" alt="Data For Science, Inc" align="left" border="0" width=150px> </div>
# <div style="float: left; margin-left: 10px;"> <h1>Causal Inference In Statistics - A Primer</h1>
# <h1>3.5 Conditional Interventions</h1>
# <p><NAME><br/>
# <a href="http://www.data4sci.com/">www.data4sci.com</a><br/>
# @bgoncalves, @data4sci</p></div>
# <div style="float: right; margin-right:10px;"> <p><a href="https://amzn.to/3gsFlkO" target=_blank><img src='data/causality.jpeg' width='100px'>
# <!--Amazon Affiliate Link--></a></p></div>
# </div>
# +
from collections import Counter
from pprint import pprint
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from CausalModel import CausalModel
import watermark
# %load_ext watermark
# %matplotlib inline
# -
# We start by print out the versions of the libraries we're using for future reference
# %watermark -n -v -m -g -iv
# Load default figure style
plt.style.use('./d4sci.mplstyle')
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Let's load the DAG of Fig 3.3
G = CausalModel('dags/Primer.Fig.3.3.dot')
fig, ax = plt.subplots(1, figsize=(3, 2.5))
G.plot(ax=ax)
# The conditional intervention graph is:
G2 = G.conditional_intervention_graph('X', [('Z', 'X')], drop_nodes=True)
fig, ax = plt.subplots(1, figsize=(3, 2.5))
G2.plot_path(('Z', 'X'), ax=ax, conditional=True, lw=1)
# We start by loading up the DAG from Fig 3.8
G = CausalModel('dags/Primer.Fig.3.8.dot')
fig, ax = plt.subplots(1, figsize=(3, 2.5))
G.plot(ax=ax)
# We can easily find all paths between X and Y
paths = G.all_paths('X', 'Y')
# And visualize all the non-direct paths:
# +
fig, ax_lst = plt.subplots(2, 2, figsize=(6, 5))
ax_lst = np.array(ax_lst).flatten()
pos = 0
for i, path in enumerate(paths):
if 'W' in path: # skip the direct path
continue
G.plot_path(path, ax=ax_lst[pos])
pos += 1
ax_lst[-1].axis('off');
# -
# Since we're conditioning on C our conditioning set is Z, C as all backdoor paths go through Z.
# <div style="width: 100%; overflow: hidden;">
# <img src="data/D4Sci_logo_full.png" alt="Data For Science, Inc" align="center" border="0" width=300px>
# </div>
|
3.5 - Conditional Interventions and Covariate-Specific Effects.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Modeling - Malware Labeling - Logistic Regression
# +
# constants
INPUT_GENERIC_FPATH = "../../../data/prepared/malware_category_labeling/{split}.csv"
OUTPUT_TEST_PREDICTION_FPATH = "../../../results/evaluation/predictions/malware_category_labeling/logistic_regression.csv"
SEED = 42
# -
# !pip install -q pandas
# +
import os
import sys
import pandas as pd
from sklearn.metrics import cohen_kappa_score, make_scorer
# add directory to path in order to import own module
sys.path.insert(0, "../../..")
from android_malware_labeling.training.logistic_regression import train_logistic_regression
from android_malware_labeling.evaluation.evaluation import (
evaluate_imbalanced_multiclass_prediction,
plot_conf_matrix
)
# -
# ## Loading
train_X = pd.read_csv(INPUT_GENERIC_FPATH.format(split="train_X"), index_col=0, squeeze=True)
train_y = pd.read_csv(INPUT_GENERIC_FPATH.format(split="train_y"), index_col=0, squeeze=True)
validation_X = pd.read_csv(INPUT_GENERIC_FPATH.format(split="validation_X"), index_col=0, squeeze=True)
validation_y = pd.read_csv(INPUT_GENERIC_FPATH.format(split="validation_y"), index_col=0, squeeze=True)
# ## Training
lr, _ = train_logistic_regression(
train_X.values,
train_y.values,
validation_X.values,
validation_y.values,
seed=SEED,
scoring="f1_macro"
)
lr.get_params()
# ## Evaluation on Validation Set
validation_pred = lr.predict(validation_X)
evaluate_imbalanced_multiclass_prediction(validation_y, validation_pred)
plot_conf_matrix(validation_y, validation_pred)
# ## Prediction and Saving
test_X = pd.read_csv(INPUT_GENERIC_FPATH.format(split="test_X"), index_col=0, squeeze=True)
predictions = pd.DataFrame(lr.predict(test_X), columns=[train_y.name], index=test_X.index)
predictions.to_csv(OUTPUT_TEST_PREDICTION_FPATH)
|
notebooks/modeling/malware_category_labeling/logistic_regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import torch
import gym
import pybullet_envs
import os
import utils
import TD3
# +
from gym.envs.registration import registry, make, spec
def register(id, *args, **kvargs):
if id in registry.env_specs:
return
else:
return gym.envs.registration.register(id, *args, **kvargs)
# -
register(id='MyAntBulletEnv-v0',
entry_point='override_ant_random_points:MyAntBulletEnv',
max_episode_steps=1000,
reward_threshold=2500.0)
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=10):
eval_env = gym.make(env_name)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
r = np.linalg.norm([20,20])
rand_deg = np.random.randint(0,360) # degrees here for reader clarity, rather than directly in 2pi
rand_x = r*np.cos(np.pi/180 * rand_deg)
rand_y = r*np.sin(np.pi/180 * rand_deg)
eval_env.robot.walk_target_x = rand_x
eval_env.robot.walk_target_y = rand_y
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
torch.set_num_threads(4) # the training will eat all available cores by default and does not scale well, you can play around with this setting for your own machine
torch.get_num_threads()
def main():
args = {
"policy" : "TD3", # Policy name (TD3, DDPG or OurDDPG)
"env" : "MyAntBulletEnv-v0", # OpenAI gym environment name
"seed" : 0, # Sets Gym, PyTorch and Numpy seeds
"start_timesteps" : 25e3, # Time steps initial random policy is used
"eval_freq" : 5e3, # How often (time steps) we evaluate
"max_timesteps" : 2e6, # Max time steps to run environment
"expl_noise" : 0.1, # Std of Gaussian exploration noise
"batch_size" : 256, # Batch size for both actor and critic
"discount" : 0.99, # Discount factor
"tau" : 0.007, # Target network update rate
"policy_noise" : 0.2, # Noise added to target policy during critic update
"noise_clip" : 0.5, # Range to clip target policy noise
"policy_freq" : 2, # Frequency of delayed policy updates
"save_model" : "store_true", # Save model and optimizer parameters
"load_model" : "", # Model load file name, "" doesn't load, "default" uses file_name
}
file_name = f"{args['policy']}_{args['env']}_{args['seed']}_{args['tau']}"
print("---------------------------------------")
print(f"Policy: {args['policy']}, Env: {args['env']}, Seed: {args['seed']}")
print("---------------------------------------")
if not os.path.exists("./results"):
os.makedirs("./results")
if args['save_model'] and not os.path.exists("./models"):
os.makedirs("./models")
env = gym.make(args['env'])
# Set seeds
env.seed(args['seed'])
env.action_space.seed(args['seed'])
torch.manual_seed(args['seed'])
np.random.seed(args['seed'])
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": args['discount'],
"tau": args['tau'],
}
# Initialize policy
if args['policy'] == "TD3":
# Target policy smoothing is scaled wrt the action scale
kwargs["policy_noise"] = args['policy_noise'] * max_action
kwargs["noise_clip"] = args['noise_clip'] * max_action
kwargs["policy_freq"] = args['policy_freq']
policy = TD3.TD3(**kwargs)
if args['load_model'] != "":
policy_file = file_name if args['load_model'] == "default" else args['load_model']
policy.load(f"./models/{policy_file}")
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
# Evaluate untrained policy
evaluations = [eval_policy(policy, args['env'], args['seed'])]
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num = 0
for t in range(int(args['max_timesteps'])):
episode_timesteps += 1
# Select action randomly or according to policy
if t < args['start_timesteps']:
action = env.action_space.sample()
else:
action = (
policy.select_action(np.array(state))
+ np.random.normal(0, max_action * args['expl_noise'], size=action_dim)
).clip(-max_action, max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
# Train agent after collecting sufficient data
if t >= args['start_timesteps']:
policy.train(replay_buffer, args['batch_size'])
if done:
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
# Reset environment
#samples x,y from a circle of r=sqrt(20**2+20**2)
r = np.linalg.norm([20,20])
rand_deg = np.random.randint(0,360) # degrees here for reader clarity, rather than directly in 2pi
rand_x = r*np.cos(np.pi/180 * rand_deg)
rand_y = r*np.sin(np.pi/180 * rand_deg)
env.robot.walk_target_x = rand_x
env.robot.walk_target_y = rand_y
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % args['eval_freq'] == 0:
evaluations.append(eval_policy(policy, args['env'], args['seed']))
np.save(f"./results/{file_name}", evaluations)
if args['save_model']: policy.save(f"./models/{file_name}")
main()
|
TD3notebook-random_points.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Beautiful Photometry
#
# * [GitHub]()
#
# A set of tools to compute lighting photometric data and generate beautiful graphics. This is a work in progress.
#
# It is based on the Colour Science library ([GitHub](https://github.com/colour-science/colour)) ([Docs](https://www.colour-science.org/)).
# +
# First, some boilerplate code just to run this notebook
# displays plots in this notebook
# %matplotlib inline
# Since we are in the ./examples folder, add the root folder (../) to the path
import sys
sys.path.append('../')
# Colour Science Library
import colour
# +
from src.spectrum import import_spd
# Get the SPDs
incandescent = import_spd('../CSVs/incandescent.csv', 'Incandescent', normalize=True)
halogen = import_spd('../CSVs/halogen.csv', 'Halogen', normalize=True)
led2700 = import_spd('../CSVs/led2700.csv', '2700 K LED', normalize=True)
# Print the CRI
incandescent_cri = colour.colour_rendering_index(incandescent)
halogen_cri = colour.colour_rendering_index(halogen)
led2700_cri = colour.colour_rendering_index(led2700)
print('Incandescent CRI:', incandescent_cri)
print('Halogen CRI:', halogen_cri)
print('2700 K LED CRI:', led2700_cri)
# +
from src.plot import plot_spectrum, plot_multi_spectrum
plot_spectrum(incandescent, hideyaxis=True, melanopic_curve=True, melanopic_stimulus=True)
plot_spectrum(halogen, hideyaxis=True, melanopic_curve=True, melanopic_stimulus=True, filename='../out/halogen.png')
plot_spectrum(led2700, hideyaxis=True)
plot_multi_spectrum([incandescent, halogen, led2700], melanopic_curve=True, hideyaxis=True)
# +
# Get the SPDs
daylight = import_spd('../CSVs/daylight.csv', 'Daylight', normalize=True)
night = import_spd('../CSVs/moonlight.csv', 'Night', normalize=True)
candle = import_spd('../CSVs/candle.csv', 'Fire', normalize=True)
plot_spectrum(daylight, hideyaxis=True, melanopic_curve=True, melanopic_stimulus=True, filename='../out/daylight.png')
plot_spectrum(night, hideyaxis=True, melanopic_curve=True, melanopic_stimulus=True, filename='../out/night.png')
plot_spectrum(candle, hideyaxis=True, melanopic_curve=True, melanopic_stimulus=True, filename='../out/candle.png')
# +
# Bedtime Bulb Comparison
bedtimebulb = import_spd('../CSVs/bedtime_bulb.csv', 'Bedtime Bulb', normalize=True)
bedtimebulbweighted = import_spd('../CSVs/bedtime_bulb.csv', 'Bedtime Bulb', normalize=True, weight=350/650)
soraa = import_spd('../CSVs/soraa.csv', 'Soraa Healthy', normalize=True)
soraaweighted = import_spd('../CSVs/soraa.csv', 'Soraa Healthy', normalize=True, weight=600/650)
lsg = import_spd('../CSVs/lsg.csv', 'LSG Goodnight', normalize=True)
lsgweighted = lsg
plot_spectrum(bedtimebulb, hideyaxis=True, melanopic_curve=True, melanopic_stimulus=True, filename='../out/Bedtime Bulb Spectrum.png')
plot_spectrum(soraa, hideyaxis=True, melanopic_curve=True, melanopic_stimulus=True)
plot_spectrum(lsg, hideyaxis=True, melanopic_curve=True, melanopic_stimulus=True)
plot_multi_spectrum([bedtimebulbweighted, soraaweighted, lsgweighted], melanopic_curve=True, hideyaxis=True, filename='../out/Low Blue Comparison.png')
# +
bedtimebulbweighted = import_spd('../CSVs/bedtime_bulb.csv', 'Bedtime Bulb', normalize=True, weight=350/800)
halogenweighted = import_spd('../CSVs/halogen.csv', 'Halogen 650 lm', normalize=True, weight=650/800)
incandescent = import_spd('../CSVs/incandescent.csv', 'Incandescent 800 lm', normalize=True)
led2700 = import_spd('../CSVs/led2700.csv', 'LED 2700K 800 lm', normalize=True)
plot_multi_spectrum([bedtimebulbweighted, incandescent, halogenweighted, led2700], melanopic_curve=True, hideyaxis=True, filename='../out/Traditional Source Comparison.png')
# +
# Melanopic Ratio
from src.human_circadian import melanopic_ratio, melanopic_response, melanopic_lumens, melanopic_photopic_ratio
from src.human_visual import scotopic_photopic_ratio
print('melanopic response')
print(melanopic_response(bedtimebulb))
print(melanopic_response(incandescent))
print(melanopic_response(halogen))
print(melanopic_response(led2700))
print('melanopic ratio')
print(melanopic_ratio(bedtimebulb))
print(melanopic_ratio(incandescent))
print(melanopic_ratio(halogen))
print(melanopic_ratio(led2700))
print('melanopic lumens')
print(melanopic_lumens(bedtimebulb, 350))
print(melanopic_lumens(incandescent, 800))
print(melanopic_lumens(halogen, 650))
print(melanopic_lumens(led2700, 800))
print('s/p ratio')
print(scotopic_photopic_ratio(bedtimebulb))
print(scotopic_photopic_ratio(incandescent))
print(scotopic_photopic_ratio(halogen))
print(scotopic_photopic_ratio(led2700))
print('m/p ratio')
print(melanopic_photopic_ratio(bedtimebulb))
print(melanopic_photopic_ratio(incandescent))
print(melanopic_photopic_ratio(halogen))
print(melanopic_photopic_ratio(led2700))
# -
# Indoor vs. Outdoor light
daylight = import_spd('../CSVs/daylight.csv', 'Daylight', normalize=True) # ~10,000 lux
night = import_spd('../CSVs/moonlight.csv', 'Outdoor Night', normalize=True, weight=0.32/10000) # 0.32 lux
indoor = import_spd('../CSVs/halogen.csv', 'Indoor Recommended', normalize=True, weight=500/10000) # 500 lux, indoor halogen
plot_multi_spectrum([daylight, night, indoor], melanopic_curve=True, hideyaxis=True, filename='../out/Indoor vs Outdoor.png')
# Interior vs. nighttime light
night = import_spd('../CSVs/moonlight.csv', 'Outdoor Night', normalize=True, weight=0.32/500) # 0.32 lux
indoor = import_spd('../CSVs/halogen.csv', 'Indoor Recommended', normalize=True) # 500 lux, indoor halogen
bedtimebulbweighted = import_spd('../CSVs/bedtime_bulb.csv', 'Bedtime Bulb', normalize=True, weight=100/500) # 100 lux
plot_multi_spectrum([night, indoor, bedtimebulbweighted], melanopic_curve=True, hideyaxis=True, filename='../out/Indoor Comparison.png')
plot_multi_spectrum([night, indoor], melanopic_curve=True, hideyaxis=True, filename='../out/Indoor Comparison no BB.png')
# GE Align PM vs BB
gealignpm = import_spd('../CSVs/GE_Align_PM.csv', 'GE Align PM', normalize=True)
plot_multi_spectrum([bedtimebulb, gealignpm], melanopic_curve=True, hideyaxis=True)
# +
# S/P/M
from src.human_circadian import get_melanopic_curve
from src.human_visual import get_scotopic_curve, get_photopic_curve
plot_multi_spectrum([get_melanopic_curve(), get_scotopic_curve(), get_photopic_curve()], hideyaxis=True)
# -
from src.human_visual import get_l_cone_curve, get_m_cone_curve, get_s_cone_curve
plot_multi_spectrum([get_l_cone_curve(), get_m_cone_curve(), get_s_cone_curve()], hideyaxis=True)
## Blue blockers article
# sunlight vs indoor 2700K LED
daylight = import_spd('../CSVs/daylight.csv', 'Daylight', normalize=True) # ~10,000 lux
indoor = import_spd('../CSVs/led2700.csv', 'LED 2700K', normalize=True, weight=500/10000) # 500 lux, indoor halogen
plot_multi_spectrum([daylight, indoor], melanopic_curve=True, hideyaxis=True, filename='../out/Daylight vs 2700K.png')
# 2700K LED
plot_spectrum(led2700, hideyaxis=True, melanopic_curve=True, melanopic_stimulus=True, filename='../out/LED 2700K.png')
# moonlight vs 2700K LED
indoor = import_spd('../CSVs/led2700.csv', 'LED 2700K', normalize=True) # 500 lux, indoor halogen
night = import_spd('../CSVs/moonlight.csv', 'Outdoor Night', normalize=True, weight=0.32/500) # 0.32 lux
plot_multi_spectrum([indoor, night], melanopic_curve=True, hideyaxis=True, filename='../out/Moonlight vs 2700K.png')
# Orange glasses
uvex = import_spd('../CSVs/filters/uvex_sct_orange.csv', 'Uvex', normalize=True)
plot_spectrum(uvex, hideyaxis=True, melanopic_curve=True, melanopic_stimulus=True, filename='../out/Uvex SCT Orange.png')
# Yellow glasses
gammaray = import_spd('../CSVs/filters/gamma_ray_computer_yellow.csv', 'Gamma Ray', normalize=True)
plot_spectrum(gammaray, hideyaxis=True, melanopic_curve=True, melanopic_stimulus=True, filename='../out/Gamma Ray Yellow.png')
# Clear glasses
blutech = import_spd('../CSVs/filters/blutech_clear.csv', 'Blutech', normalize=True)
plot_spectrum(blutech, hideyaxis=True, melanopic_curve=True, melanopic_stimulus=True, filename='../out/Blutech Clear.png')
|
examples/beautiful_photometry.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:root] *
# language: python
# name: conda-root-py
# ---
# ### Using MLRun with a remote Spark service
# ### Build a simple read CSV function using Spark
# +
# #!/conda/bin/python
import mlrun
from mlrun.datastore import DataItem
from mlrun.execution import MLClientCtx
from pyspark.sql import SparkSession
def describe_spark(context: MLClientCtx,
dataset: DataItem,
artifact_path):
# get file location
location = dataset.local()
# build spark session
spark = SparkSession.builder.appName("Spark job").getOrCreate()
# read csv
df = spark.read.csv(location, header=True, inferSchema= True)
# show
df.show(5)
# sample for logging
df_to_log = df.sample(False, 0.1).toPandas()
# log final report
context.log_dataset("df_sample",
df=df_to_log,
format="csv", index=False,
artifact_path=context.artifact_subpath('data'))
spark.stop()
# +
# mlrun: end-code
# -
# ### Create a remote-spark MLRun function
fn = mlrun.code_to_function(handler="describe_spark", kind="remote-spark")
fn.with_spark_service(spark_service="iguazio-spark-service-name")
fn.deploy()
fn.run(inputs={"dataset": "iris_dataset.csv"}, artifact_path="/User")
|
examples/remote-spark.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from numpy import array
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from scipy.stats import norm, skew
import matplotlib.pyplot as plt
import warnings
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.metrics import r2_score
from sklearn import preprocessing
from sklearn.preprocessing import OneHotEncoder
warnings.filterwarnings("ignore")
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
# ### Define the following functions as per the description, so that they can be called later to perform the tasks.
# +
# to treat missing values
def treat_null_values(df, method):
"""Treats the missing values in the dataframe.
This function accepts a dataframe and the method to treat the missing value.
Depending on the method passed, impute/drop the missing values.
Keyword arguments:
df -- pandas dataframe for which we want to treat the missing values
method -- method to treat the missing values
"""
if method == 'drop':
df.dropna(inplace = True)
elif method == 'mean':
df.fillna(round(df.mean(),1), inplace = True)
elif method == 'median':
df.fillna(df.median(), inplace = True)
elif method == 'mode':
df.fillna(df.mode()[0], inplace = True)
else:
df.fillna('NA', inplace = True)
# +
# to seperate the numerical and categorical columns
def num_and_cat_columns(df):
"""Return seperate list of numerical & categorical columns.
This function accepts a dataframe and returns two lists,
one containing the names of numerical columns(num_cols) and the other categorical columns(cat_cols).
Keyword arguments:
df -- pandas dataframe for which we want the list of columns
Returns:
num_cols -- list of numerical columns
cat_cols -- list of categorical columns
"""
cols = df.columns
num_cols = df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
return num_cols, cat_cols
# +
# to encode the categorical column so as to convert them to numeric.
def encode_category(df, enc, col, method):
"""Encodes the categorical columns of the dataframe.
This function accepts a dataframe and columns to be encoded along with the method to be used for encoding.
Keyword arguments:
df -- pandas dataframe for which we want to encode the columns -- this dataframe would be transformed
enc -- the encoder - fitted on the train data
col -- list of columns that is to be encoded
method -- method to be used while encoding
Returns:
df1 -- the encoded dataframe
"""
if method == 'label':
# lb = LabelEncoder()
# lb.fit(df[col])
df[col] = enc.transform(df[col].astype(str))
elif method == 'onehot':
# ohe = OneHotEncoder(handle_unknown='ignore')
# ohe.fit(df[[col]])
tempdf = enc.transform(df[[col]]).toarray()
newdf = pd.DataFrame(tempdf, columns = np.array(ohe.categories_).ravel())
df = pd.concat([df, newdf], axis=1)
df.drop(columns=[col], inplace = True)
return df
# +
# to clean the dollar sign from the currency column & convert it to float.
def clean_dollar(df, col):
"""Removes "$" sign from a column & converts it to float.
This function accepts a dataframe and columns with $ sign to be converted to float.
Keyword arguments:
df -- pandas dataframe for which we want to encode the columns
col -- list of columns that is to be converted
Returns:
df -- the converted dataframe
"""
df[col] = df[col].apply(lambda s: s.strip('$')).astype(float)
return df
# +
# to visualize distributions of the column(s) by plotting them.
def plot_distribution(kind_, df, *col):
"""Plot distribution of the column(s).
This function will plot a chart of the passed column as the 'kind' specified in kind_.
You can pass multiple columns to this function.
Keyword arguments:
knid_ -- 'kind' of chart that will be plotted
df -- pandas dataframe which has the data
*col -- list of all the features for which we want to plot the distribution
"""
if kind_ == 'box':
if len(col) == 1:
boxplot = df.boxplot(column = col[0], rot = 90)
plt.show()
elif len(col) > 1:
for c in col[1:]:
boxplot = df.boxplot(column = col[0], by = c, rot = 90)
plt.show()
else:
if len(col) == 0:
df.plot(kind = kind_)
plt.show()
elif len(col) == 1:
df[col[0]].plot(kind = kind_)
plt.xlabel(col[0])
plt.show()
elif len(col) == 2:
df.plot(x = col[0], y = col[1], kind = kind_)
plt.xlabel(col[0])
plt.ylabel(col[1])
plt.show()
else:
print("Unable to plot a chart with given parameters.")
# +
# to apply transformation to any column
def transform_column(df, col, transformation):
"""Apply transformation to the column(s).
This function will apply the passed transformation to the given dataframe & columns.
You can pass multiple columns to this function.
Keyword arguments:
df -- pandas dataframe which has the data
col -- list of all the features for which we want to apply the transformation
transformation -- the transformation we want to apply
"""
if transformation == 'log':
df = np.log(df)
elif transformation == 'square':
df = np.square(df)
elif transformation == 'sqrt':
df = np.sqrt(df)
# +
# to check outliers using box plot
def check_outliers(df, col, cat):
"""Check outliers through boxplot.
This function plots and displays the boxplot of the feature col vs all the categories defined.
Check for any outlier present.
Keyword arguments:
df -- pandas dataframe which has the data
col -- the feature for which we want to plot the boxplot
cat -- the list of features (categorical) by which we want to check the outliers (for each category in each feature)
"""
if len(cat) == 0:
boxplot = df.boxplot(column=[col], rot = 90)
else:
for c in cat:
boxplot = df.boxplot(column=[col], by=[c], rot = 90)
# +
# to fit the model & return the score
def fit_model(X_train, X_test, y_train, y_test, model):
"""Fit the model & return the score of the fitted model.
This function accepts the test & train data and fits the given model to it and returns the score of the model.
Keyword arguments:
X_train -- Train feature
X_test -- Test/Validation feature
y_train -- Train target
y_test -- Tets/Validation target
model -- the model to be fitted
Returns:
r2 -- R-Square of the fitted model
"""
if model == 'LinearRegression':
regressor=LinearRegression()
regressor.fit(X_train,y_train)
y_pred =regressor.predict(X_test)
r2 = r2_score(y_test, y_pred)
elif model == 'Lasso':
lasso = Lasso()
lasso.fit(X_train, y_train)
lasso_pred = lasso.predict(X_test)
r2 = r2_score(y_test, lasso_pred)
elif model == 'Ridge':
ridge = Ridge()
ridge.fit(X_train, y_train)
ridge_pred = ridge.predict(X_test)
r2 = r2_score(y_test, ridge_pred)
else:
model = make_pipeline(PolynomialFeatures(2), LinearRegression())
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
r2= r2_score(y_test,y_pred)
return r2
# -
# ### Read the dataset. Take a look at the dataset.
#
# * Check the data types present in the dataframe.
# * Call the num_and_cat_columns() with train as the parameter and store the results.
# * Are there any missing values? Are there any Outliers? How do you want to treat them?
#
# +
train = pd.read_csv("train.csv")
print(train.head())
#Split the data into X and Y
X = train.drop(columns = ['Total Compensation'])
y = train[['Total Compensation']]
print(X.head())
print(y.head())
a, b = num_and_cat_columns(X)
print(a,len(a))
print(b, len(b))
print(X.isnull().sum())
check_outliers(y, 'Total Compensation', [])
X.drop(columns = ['Department'], inplace = True)
# -
# ### Visualize the data
#
# - Check for the categorical & continuous features.
# - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.
# - Clean the data and apply some data preprocessing techniques
# +
# Have a look at the features, check the type where any currency is involved.
# We have columns with compensations stored in form of strings. We need to clean it and convert this to numeric.
# Call the clean_dollar() to do the same. Apply it to all train, val & test data.
for c in ['Retirement', 'Health and Dental', 'Other Benefits']:
X = clean_dollar(X, c)
# X_test = clean_dollar(X_test, c)
# +
# Check the distribution of certain columns you might find of interest.
# This can be done by calling the plot_distribution().
# Apply some good transformation if required. Call transform_column() to do the same.
plot_distribution('hist', X, 'Retirement')
plot_distribution('hist', X, 'Health and Dental')
plot_distribution('hist', X, 'Other Benefits')
# +
# Treat the null values by calling the treat_null_values()
treat_null_values(X['Union'], 'mode')
treat_null_values(X['Job'], 'mode')
# +
# Before we proceed with the model fitting, we need to get rid of the categorical columns.
# We can use One Hot Encoding or Label Encoding to convert the categorical columns to numeric.
# Call the encode_category() with the list of columns and the method of encoding to do the same.
# Split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=0,test_size=0.2)
X_train_ = X_train.copy() # Create a copy of the train data to train the encoder
for col in ['Union', 'Job Family', 'Job', 'Year Type', 'Organization Group']:
lb = LabelEncoder()
lb.fit(X_train_[col])
X_test = encode_category(X_test, lb, col, 'label')
X_train = encode_category(X_train, lb, col, 'label')
# +
# Now since we have encoded all the categorical columns, there shouldn't be any left in the data.
# Check the same by calling num_and_cat_columns()
a, b = num_and_cat_columns(X_train)
print("Numerical Columns:", a,len(a))
print("Categorical Columns:", b, len(b))
# -
# ### Model building
#
# - Separate the features and target and then split the train data into train and validation set.
# - Now let's come to the actual task, using linear regression, predict the `Total Compensation`.
# - Try improving upon the `r2_score` (R-Square) using different parameters that give the best score.
#
#
# +
# Now let's come to the actual task, using linear regression to predict the Total Compensation.
# We will check the model accuracy using `r^2 score`
# Call the fit_model() with respective parameters.
r2 = fit_model(X_train, X_test, y_train, y_test, 'LinearRegression')
print ("Linear Regression: ", r2)
# # Check if the accuracy increases after using the Lasso Regularization technique.
# # Call the fit_model() with respective parameters.
r2 = fit_model(X_train, X_test, y_train, y_test, 'Lasso')
print ("Lasso: ", r2)
# # Check if the accuracy increases after using the Ridge Regularization technique.
# # Call the fit_model() with respective parameters.
r2 = fit_model(X_train, X_test, y_train, y_test, 'Ridge')
print ("Ridge: ", r2)
# Check if the accuracy increases after using the Polypipeline technique.
r2 = fit_model(X_train, X_test, y_train, y_test, 'Pipeline')
print ("Pipeline: ", r2)
# Which technique was the best? That is your final model.
# -
# ### Prediction on the test data and creating the sample submission file.
#
# - Load the test data and store the `Id` column in a separate variable.
# - Perform the same operations on the test data that you have performed on the train data.
# - Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
# +
# Code Starts here
# Prediction on test data
# Read the test data
test = pd.read_csv('test.csv')
# Storing the id from the test file
id_ = test['Id']
# Dropping the same columns from the test data and applying same transformation
test.drop(columns = ['Department'], inplace = True)
treat_null_values(test['Union'], 'mode')
treat_null_values(test['Job'], 'mode')
for c in ['Retirement', 'Health and Dental', 'Other Benefits']:
test = clean_dollar(test, c)
for col in ['Union', 'Job Family', 'Job', 'Year Type', 'Organization Group']:
lb = LabelEncoder()
lb.fit(X_train_[col])
test = encode_category(test, lb, col, 'label')
# Applying pipeline on test data
model = make_pipeline(PolynomialFeatures(2), LinearRegression())
model.fit(X_train, y_train)
# Predict on the test data
y_pred_test = model.predict(test)
y_pred_test = y_pred_test.flatten()
# Create a sample submission file
sample_submission = pd.DataFrame({'Id':id_,'Total Compensation':y_pred_test})
# Convert the sample submission file into a csv file
sample_submission.to_csv('comp1.csv',index=False)
# Code ends here
|
Employee_Compensation_Solution-final.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/DKojen/Matrix_car/blob/master/day5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="4uodkZmbxlEn" colab_type="code" outputId="b1a98ad6-00eb-4857-be89-b2434ecf05f4" colab={"base_uri": "https://localhost:8080/", "height": 695}
# !pip install --upgrade tables
# !pip install eli5
# !pip install xgboost
# !pip install hyperopt
# + id="_hkKVriVxxJ6" colab_type="code" outputId="73a242ce-e29b-48b3-e5e4-572f77c66480" colab={"base_uri": "https://localhost:8080/", "height": 190}
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score
from hyperopt import hp,fmin, tpe, STATUS_OK
import eli5
from eli5.sklearn import PermutationImportance
# + id="ygxI-61ZyC7w" colab_type="code" outputId="6c074f10-eabe-46f6-ab1f-5a9495c0240e" colab={"base_uri": "https://localhost:8080/", "height": 35}
# cd "/content/drive/My Drive/Colab Notebooks/Matrix/matrix_two/Matrix_car"
# + id="aB89RvvQyJNH" colab_type="code" outputId="ef2401c6-5048-408f-d9f6-3b9dc28f5e42" colab={"base_uri": "https://localhost:8080/", "height": 35}
df = pd.read_hdf('data/car.h5')
df.shape
# + [markdown] id="WPyr8kq9yQ5d" colab_type="text"
# ## Feature Enfineering
# + id="SR87WPWgyUCo" colab_type="code" colab={}
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue
factorized_values = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_values
else:
df[feat + SUFFIX_CAT] = factorized_values
# + id="93s4pgGxyYye" colab_type="code" colab={}
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x))
df['param_moc'] = df['param_moc'].map(lambda x:-1 if str(x) == 'None' else int(x.split(' ')[0]))
df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x:-1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ','')) )
# + id="fRqcfA1RYfyn" colab_type="code" colab={}
def run_model(model, feats):
X = df[feats].values
y = df['price_value'].values
scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="w0ThkR7rzQTm" colab_type="code" outputId="0b515b29-188f-4ff3-f6f0-5b22dc3883ba" colab={"base_uri": "https://localhost:8080/", "height": 108}
feats = ['param_napęd__cat',
'param_rok-produkcji',
'param_stan__cat',
'param_skrzynia-biegów__cat',
'param_faktura-vat__cat',
'param_moc',
'param_marka-pojazdu__cat',
'feature_kamera-cofania__cat',
'param_typ__cat',
'param_pojemność-skokowa',
'seller_name__cat',
'feature_wspomaganie-kierownicy__cat',
'param_model-pojazdu__cat',
'param_wersja__cat',
'param_kod-silnika__cat',
'feature_system-start-stop__cat',
'feature_asystent-pasa-ruchu__cat',
'feature_czujniki-parkowania-przednie__cat',
'feature_łopatki-zmiany-biegów__cat',
'feature_regulowane-zawieszenie__cat']
xgb_params = {
'max_depth': 5,
'n_estimators': 50,
'learning_rate': 0.1,
'seed':0
}
run_model(xgb.XGBRegressor(**xgb_params), feats)
# + id="Ru0XZZEKzfX_" colab_type="code" colab={}
# + [markdown] id="C0yPvpKH34Sb" colab_type="text"
# ## Hyperopt
# + id="i7bA_bqA36Gs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 944} outputId="bb14afdd-bc1a-455e-c82d-c509c97d0990"
def obj_func(params):
print("Training with params: ")
print(params)
mean_mae, score_std = run_model(xgb.XGBRegressor(**params), feats)
return {'loss': np.abs(mean_mae), 'status': STATUS_OK}
#space
xgb_reg_params = {
'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)),
'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype = int)),
'subsample': hp.quniform('subsample', 0.5, 1, 0.05),
'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05),
'objective': 'reg:squarederror',
'n_estimators': 100,
'seed': 0,
}
#run
best = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals = 25)
|
day5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ------------ First A.I. activity ------------
# ## 1. IBOVESPA volume prediction
# -> Importing libraries that are going to be used in the code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# -> Importing the datasets
dataset = pd.read_csv("datasets/ibovespa.csv",delimiter = ";")
# -> Converting time to datetime in order to make it easy to manipulate
# +
dataset['Data/Hora'] = dataset['Data/Hora'].str.replace("/","-")
dataset['Data/Hora'] = pd.to_datetime(dataset['Data/Hora'])
# -
# -> Visualizing the data
dataset.head()
# -> creating date dataframe and splitting its features
# date = dataset.iloc[:,0:1]
#
# date['day'] = date['Data/Hora'].dt.day
# date['month'] = date['Data/Hora'].dt.month
# date['year'] = date['Data/Hora'].dt.year
#
# date = date.drop(columns = ['Data/Hora'])
#
# -> removing useless columns
dataset = dataset.drop(columns = ['Data/Hora','Unnamed: 7','Unnamed: 8','Unnamed: 9'])
# -> transforming atributes to the correct format
for key, value in dataset.head().iteritems():
dataset[key] = dataset[key].str.replace(".","").str.replace(",",".").astype(float)
"""
for key, value in date.head().iteritems():
dataset[key] = date[key]
"""
# -> Means
dataset.mean()
# -> plotting graphics
plt.boxplot(dataset['Volume'])
plt.title('boxplot')
plt.xlabel('volume')
plt.ylabel('valores')
plt.ticklabel_format(style='sci', axis='y', useMathText = True)
dataset['Maxima'].median()
dataset['Minima'].mean()
# -> Média truncada
from scipy import stats
m = stats.trim_mean(dataset['Minima'], 0.1)
print(m)
# -> variancia e standard deviation
v = dataset['Cotacao'].var()
print(v)
d = dataset['Cotacao'].std()
print(v)
m = dataset['Cotacao'].mean()
print(m)
# -> covariancia dos atributos, mas antes fazer uma standard scaler pra facilitar a visão e depois transforma de volta pra dataframe pandas
# #### correlation shows us the relationship between the two variables and how are they related while covariance shows us how the two variables vary from each other.
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
dataset_cov = sc.fit_transform(dataset)
dataset_cov = pd.DataFrame(dataset_cov)
dataset_cov.cov()
# -> plotting the graph may be easier to observe the correlation
corr = dataset.corr()
corr.style.background_gradient(cmap = 'coolwarm')
pd.plotting.scatter_matrix(dataset, figsize=(6, 6))
plt.show()
plt.matshow(dataset.corr())
plt.xticks(range(len(dataset.columns)), dataset.columns)
plt.yticks(range(len(dataset.columns)), dataset.columns)
plt.colorbar()
plt.show()
|
drafts/exercises/ibovespa.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Euler Problem 70
# ================
#
# Euler's Totient function, φ(n) [sometimes called the phi function], is used to
# determine the number of positive numbers less than or equal to n which are
# relatively prime to n. For example, as 1, 2, 4, 5, 7, and 8, are all less than
# nine and relatively prime to nine, φ(9)=6. The number 1 is considered to be
# relatively prime to every positive number, so φ(1)=1.
#
# Interestingly, φ(87109)=79180, and it can be seen that 87109 is a permutation
# of 79180.
#
# Find the value of n, 1 < n < 10^7, for which φ(n) is a permutation of n and the
# ratio n/φ(n) produces a minimum.
from sympy import sieve, primepi
N = 10 ** 7
n = int(N ** 0.5)
min_ratio = 1.005
best_n = None
primes = list(sieve.primerange(1, N))
pi = primepi(n)
num_primes = len(primes)
for i in range(pi, -1, -1):
p = primes[i]
ratio = p / (p - 1)
if ratio > min_ratio:
break
for j in range(i+1, num_primes):
q = primes[j]
n = p * q
if n > N:
break
if p / (p - 1) > min_ratio:
break
if sorted(str(n)) == sorted(str(n - p - q + 1)):
ratio = 1.0 * p * q / (p - 1) / (q - 1)
if ratio < min_ratio:
min_ratio = ratio
best_n = n
print(best_n)
# **Discussion:** The ratio n/φ(n) is equal to the product of p/(p-1) for all distinct prime factors p of n.
# We may assume that n has no repeated factors.
#
# If n is prime then φ(n) = n - 1, so the digits of φ(n) cannot be a permutation of the digits of n.
#
# If n is the product of three or more prime factors, then its smallest prime factor is less than 200, so n/φ(n) > 1.005.
#
# Suppose that n is the product of two distinct prime factors p and q (p < q). Then n/φ(n) = p/(p-1) * q/(q-1). If the minimum value realized in this case is less than 1.005, then we have found the optimal value of n.
|
Euler 070 - Totient permutation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Real Estate Appraisal
# ## 0. Table of Contents
# 1. Imports and Settings
# 2. Load and Transform
# 3. Assignment of Independent Variables (X), Dependent Variables (y), examples (n), and features (k)
# 4. Assessed Property Data
# 5. Exploratory Data Analysis
# 6. Multiple Linear Regression Model with all variables (no regularization)
# 7. Removal of Variables with Lower Significance and New Linear Model
# 8. Regularized Multiple Linear Regression Model (α proportional to the P(t) of each variable)
# 9. Comparing Models
# ### 1. Imports and Settings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
plt.style.use('seaborn')
# %matplotlib inline
# ### 2. Load and Transform
df = pd.read_csv("../../data/sample.csv")
df.head()
# ### 3. Assignment of Independent Variables (X), Dependent Variables (y), examples (n), and features (k)
# +
X = df.iloc[:, 3:14]
X = sm.add_constant(X).dropna()
ind = X.index
y = (df.loc[ind, 'total_value'] / df.loc[ind, 'area']).rename("unitary_value")
n = np.size(X, 0)
k = np.size(X, 1) - 1
# -
data = pd.concat([X, y], axis=1).drop(columns="const")
# ### 4. Assessed Property Data
assessed_list = np.reshape([1, 100, 3795.13, 3, 2, 2, 0, 0, 1, 1, 1, 1340], (1, 12))
assessed = pd.DataFrame(assessed_list, index=[0], columns=X.columns)
assessed
# ### 5. Exploratory Data Analysis
X.describe()
plt.hist(y);
plt.title("Target Variable Histogram (Unitary value)");
plt.xlabel("Unitary value in R$/m²");
plt.ylabel("Frequency");
for i in range(k):
fig, axs = plt.subplots(1, 3, figsize=(12, 4))
axs[0].hist(X[X.columns[i+1]])
axs[1].scatter(X[X.columns[i+1]], y)
axs[2].boxplot(X[X.columns[i+1]])
fig.suptitle(f"Histogram, Dispersion e Boxplot of variable {X.columns[i+1]}")
fig.subplots_adjust(left=.01, right=.99, bottom=.1, top=.9,
wspace=.2, hspace=.4)
# #### Correlation Matrix:
data.corr()
# #### Dispersion Matrix:
pd.plotting.scatter_matrix(data, alpha=0.8, figsize=(18, 18));
plt.tight_layout()
# #### Observations:
# The graphic analysis of the histograms, dispersions, and boxplots of the independent variables shows an important degree of unbalance in the sample data, most of which do not present normal distribution behavior in their histograms, do not present constant variance and normality of errors in their dispersions with the dependent variable.
# ### 6. Multiple Linear Regression Model with all variables (no regularization)
# #### Unitary value of population____________y = β0 + β1 . x1 + β2 . x2 + ... + βk . xk + ε
# #### Unitary value of sample_______________ŷ = b0 + b1 . x1 + b2 . x2 + ... + bk . xk + e
# #### Least squares method________________min Σe²
model = sm.OLS(y, X)
results = model.fit()
results.summary()
assessed_UV_01 = sum(results.params*assessed.loc[0])
assessed_TV_01 = assessed_UV_01 * assessed.loc[0, 'area']
print(f"The property value estimated by the Nonregularized Multiple\nLinear Regression Model with all variables is:\nR$ {assessed_TV_01:,.2f}.")
# #### P>|t| or Student's t-test
# t indicates the probability, assuming the null hypothesis (bi = 0), of being an extreme sample. Therefore, this metric allows one to assume the significance of an explanatory variable for the model in the Confidence Interval of 1 - t.
# ### 7. Removal of Variables with Lower Significance and New Linear Model
X.drop(columns=["Vap", "restrooms", "balcony", "back", "maids_room"], inplace=True)
assessed.drop(columns=["Vap", "restrooms", "balcony", "back", "maids_room"], inplace=True)
k = np.size(X, 1) - 1
model = sm.OLS(y, X)
results = model.fit()
results.summary()
assessed_UV_02 = sum(results.params*assessed.loc[0])
assessed_TV_02 = assessed_UV_02 * assessed.loc[0, 'area']
print(f"The property value estimated by the Regularized Multiple Linear Regression\nModel with only variables significant to the model (at 89.4% CI) is:\nR$ {assessed_TV_02:,.2f}.")
# ### 8. Regularized Multiple Linear Regression Model (α proportional to the P(t) of each variable)
alpha = [0, 0, 10, 30, 0, 0, 10]
# +
model = sm.OLS(y, X)
results_reg = model.fit_regularized(method='elastic_net', alpha=alpha, L1_wt=1.0,
start_params=None, profile_scale=False, refit=False)
results_reg.params
# -
assessed_UV_03 = sum(results.params*assessed.loc[0])
assessed_TV_03 = assessed_UV_03 * assessed.loc[0, 'area']
print(f"The property value estimated by the Regularized Multiple Linear Regression\nModel by vector α and with only variables significant to the model (at 89.4% CI) is:\nR$ {assessed_TV_03:,.2f}.")
SSE = np.sum((np.dot(X, results_reg.params) - y).pow(2))
SST = np.sum((np.mean(y) - y).pow(2))
R2 = 1 - SSE/SST
R2
R2_Aj = 1 - (1 - R2)*((n - 1)/(n - k - 1))
R2_Aj
# ### 9. Comparing Models
# 1. OLS with all variables
# Av = R$ 1.129.140,39
# R² = 0,687
# R²aj = 0,564
#
# 2. OLS with significant variables (IC de 89,4%)
# Av = R$ 1.146.177,03
# R² = 0,665
# R²aj = 0,604
#
# 3. OLS regularized (vector α) with significant variables (IC de 89,4%)
# Av = R$ 1.078.836,24
# R² = 0,628
# R²aj = 0,561
|
scripts/en/.ipynb_checkpoints/regression_models_v03-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#default_exp vision.data
# -
#export
from fastai2.torch_basics import *
from fastai2.data.all import *
from fastai2.vision.core import *
#hide
from nbdev.showdoc import *
# # Vision data
#
# > Helper functions to get data in a `DataBunch` un the vision applicaiton and higher class `ImageDataBunch`
# ## ImageDataBunch -
#export
def _using_attr(f, attr, x):
return f(getattr(x,attr))
#export
def using_attr(f, attr):
"Change function `f` to operate on `attr`"
return partial(_using_attr, f, attr)
t = Path('/a/b.txt')
f = using_attr(str.upper, 'name')
test_eq(f(t), 'B.TXT')
# +
#export
class ImageDataBunch(DataBunch):
@classmethod
@delegates(DataBunch.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, **kwargs):
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_name_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, valid_pct=valid_pct, seed=seed, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`."
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with re expression `pat`."
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, **kwargs):
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, **kwargs):
"Create from list of `fnames` in `path`."
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
TransformBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed))
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataBunch.from_csv = delegates(to=ImageDataBunch.from_df)(ImageDataBunch.from_csv)
ImageDataBunch.from_path_re = delegates(to=ImageDataBunch.from_path_func)(ImageDataBunch.from_path_re)
# -
show_doc(ImageDataBunch.from_folder)
show_doc(ImageDataBunch.from_path_func)
show_doc(ImageDataBunch.from_path_re)
show_doc(ImageDataBunch.from_name_func)
show_doc(ImageDataBunch.from_name_re)
show_doc(ImageDataBunch.from_df)
show_doc(ImageDataBunch.from_csv)
show_doc(ImageDataBunch.from_lists)
#export
class SegmentationDataBunch(DataBunch):
@classmethod
@delegates(DataBunch.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, ImageBlock(cls=PILMask)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
if codes is not None: res.vocab = codes
return res
# ## Show methods
#export
def get_grid(n, rows=None, cols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False):
rows = rows or int(np.ceil(math.sqrt(n)))
cols = cols or int(np.ceil(n/rows))
if double: cols*=2 ; n*=2
figsize = (cols*3, rows*3+add_vert) if figsize is None else figsize
fig,axs = subplots(rows, cols, figsize=figsize)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
# ## Helper functions for object detection
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones."
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
# ## `TransformBlock`s for vision
#export
def ImageBlock(cls=PILImage): return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
MaskBlock = TransformBlock(type_tfms=PILMask.create, batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dbunch_kwargs = {'before_batch': bb_pad})
#export
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dbunch_kwargs = {'before_batch': bb_pad})
#export
def BBoxLblBlock(vocab=None, add_na=True):
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
# # Export -
#hide
from nbdev.export import notebook2script
notebook2script()
|
nbs/08_vision.data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--BOOK_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="fig/cover-small.jpg">
# *This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by <NAME>; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).*
#
# *The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).*
#
# <!--NAVIGATION-->
# < [Basic Python Semantics: Operators](04-Semantics-Operators.ipynb) | [Contents](Index.ipynb) | [Built-In Data Structures](06-Built-in-Data-Structures.ipynb) >
# # Built-In Types: Simple Values
# When discussing Python variables and objects, we mentioned the fact that all Python objects have type information attached. Here we'll briefly walk through the built-in simple types offered by Python.
# We say "simple types" to contrast with several compound types, which will be discussed in the following section.
#
# Python's simple types are summarized in the following table:
#
# <center>**Python Scalar Types**</center>
#
# | Type | Example | Description |
# |-------------|----------------|--------------------------------------------------------------|
# | ``int`` | ``x = 1`` | integers (i.e., whole numbers) |
# | ``float`` | ``x = 1.0`` | floating-point numbers (i.e., real numbers) |
# | ``complex`` | ``x = 1 + 2j`` | Complex numbers (i.e., numbers with real and imaginary part) |
# | ``bool`` | ``x = True`` | Boolean: True/False values |
# | ``str`` | ``x = 'abc'`` | String: characters or text |
# | ``NoneType``| ``x = None`` | Special object indicating nulls |
#
# We'll take a quick look at each of these in turn.
# ## Integers
# The most basic numerical type is the integer.
# Any number without a decimal point is an integer:
x = 1
type(x)
# Python integers are actually quite a bit more sophisticated than integers in languages like ``C``.
# C integers are fixed-precision, and usually overflow at some value (often near $2^{31}$ or $2^{63}$, depending on your system).
# Python integers are variable-precision, so you can do computations that would overflow in other languages:
2 ** 200
# Another convenient feature of Python integers is that by default, division up-casts to floating-point type:
5 / 2
# Note that this upcasting is a feature of Python 3; in Python 2, like in many statically-typed languages such as C, integer division truncates any decimal and always returns an integer:
# ``` python
# # Python 2 behavior
# >>> 5 / 2
# 2
# ```
# To recover this behavior in Python 3, you can use the floor-division operator:
5 // 2
# Finally, note that although Python *2.x* had both an ``int`` and ``long`` type, Python 3 combines the behavior of these two into a single ``int`` type.
# ## Floating-Point Numbers
# The floating-point type can store fractional numbers.
# They can be defined either in standard decimal notation, or in exponential notation:
x = 0.000005
y = 5e-6
print(x == y)
x = 1400000.00
y = 1.4e6
print(x == y)
# In the exponential notation, the ``e`` or ``E`` can be read "...times ten to the...",
# so that ``1.4e6`` is interpreted as $~1.4 \times 10^6$.
# An integer can be explicitly converted to a float with the ``float`` constructor:
float(1)
# ### Aside: Floating-point precision
# One thing to be aware of with floating point arithmetic is that its precision is limited, which can cause equality tests to be unstable. For example:
0.1 + 0.2 == 0.3
# Why is this the case? It turns out that it is not a behavior unique to Python, but is due to the fixed-precision format of the binary floating-point storage used by most, if not all, scientific computing platforms.
# All programming languages using floating-point numbers store them in a fixed number of bits, and this leads some numbers to be represented only approximately.
# We can see this by printing the three values to high precision:
print("0.1 = {0:.17f}".format(0.1))
print("0.2 = {0:.17f}".format(0.2))
print("0.3 = {0:.17f}".format(0.3))
# We're accustomed to thinking of numbers in decimal (base-10) notation, so that each fraction must be expressed as a sum of powers of 10:
# $$
# 1 /8 = 1\cdot 10^{-1} + 2\cdot 10^{-2} + 5\cdot 10^{-3}
# $$
# In the familiar base-10 representation, we represent this in the familiar decimal expression: $0.125$.
#
# Computers usually store values in binary notation, so that each number is expressed as a sum of powers of 2:
# $$
# 1/8 = 0\cdot 2^{-1} + 0\cdot 2^{-2} + 1\cdot 2^{-3}
# $$
# In a base-2 representation, we can write this $0.001_2$, where the subscript 2 indicates binary notation.
# The value $0.125 = 0.001_2$ happens to be one number which both binary and decimal notation can represent in a finite number of digits.
#
# In the familiar base-10 representation of numbers, you are probably familiar with numbers that can't be expressed in a finite number of digits.
# For example, dividing $1$ by $3$ gives, in standard decimal notation:
# $$
# 1 / 3 = 0.333333333\cdots
# $$
# The 3s go on forever: that is, to truly represent this quotient, the number of required digits is infinite!
#
# Similarly, there are numbers for which binary representations require an infinite number of digits.
# For example:
# $$
# 1 / 10 = 0.00011001100110011\cdots_2
# $$
# Just as decimal notation requires an infinite number of digits to perfectly represent $1/3$, binary notation requires an infinite number of digits to represent $1/10$.
# Python internally truncates these representations at 52 bits beyond the first nonzero bit on most systems.
#
# This rounding error for floating-point values is a necessary evil of working with floating-point numbers.
# The best way to deal with it is to always keep in mind that floating-point arithmetic is approximate, and *never* rely on exact equality tests with floating-point values.
# ## Complex Numbers
# Complex numbers are numbers with real and imaginary (floating-point) parts.
# We've seen integers and real numbers before; we can use these to construct a complex number:
complex(1, 2)
# Alternatively, we can use the "``j``" suffix in expressions to indicate the imaginary part:
1 + 2j
# Complex numbers have a variety of interesting attributes and methods, which we'll briefly demonstrate here:
c = 3 + 4j
c.real # real part
c.imag # imaginary part
c.conjugate() # complex conjugate
abs(c) # magnitude, i.e. sqrt(c.real ** 2 + c.imag ** 2)
# ## String Type
# Strings in Python are created with single or double quotes:
message = "what do you like?"
response = 'spam'
# Python has many extremely useful string functions and methods; here are a few of them:
# length of string
len(response)
# Make upper-case. See also str.lower()
response.upper()
# Capitalize. See also str.title()
message.capitalize()
# concatenation with +
message + response
# multiplication is multiple concatenation
5 * response
# Access individual characters (zero-based indexing)
message[0]
# For more discussion of indexing in Python, see ["Lists"](06-Built-in-Data-Structures.ipynb#Lists).
# ## None Type
# Python includes a special type, the ``NoneType``, which has only a single possible value: ``None``. For example:
type(None)
# You'll see ``None`` used in many places, but perhaps most commonly it is used as the default return value of a function.
# For example, the ``print()`` function in Python 3 does not return anything, but we can still catch its value:
return_value = print('abc')
print(return_value)
# Likewise, any function in Python with no return value is, in reality, returning ``None``.
# ## Boolean Type
# The Boolean type is a simple type with two possible values: ``True`` and ``False``, and is returned by comparison operators discussed previously:
result = (4 < 5)
result
type(result)
# Keep in mind that the Boolean values are case-sensitive: unlike some other languages, ``True`` and ``False`` must be capitalized!
print(True, False)
# Booleans can also be constructed using the ``bool()`` object constructor: values of any other type can be converted to Boolean via predictable rules.
# For example, any numeric type is False if equal to zero, and True otherwise:
bool(2014)
bool(0)
bool(3.1415)
# The Boolean conversion of ``None`` is always False:
bool(None)
# For strings, ``bool(s)`` is False for empty strings and True otherwise:
bool("")
bool("abc")
# For sequences, which we'll see in the next section, the Boolean representation is False for empty sequences and True for any other sequences
bool([1, 2, 3])
bool([])
# <!--NAVIGATION-->
# < [Basic Python Semantics: Operators](04-Semantics-Operators.ipynb) | [Contents](Index.ipynb) | [Built-In Data Structures](06-Built-in-Data-Structures.ipynb) >
|
Lectures/05-Built-in-Scalar-Types.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
def create():
stack = []
return stack
def push(stack, res):
stack.append(res)
def pop(stack):
if stack ==[]:
return None
else:
ele = stack.pop()
return ele
def isempty(stack):
if stack == []:
return True
else:
return False
def top(stack):
if isempty(stack):
return None
else:
return stack[-1]
def sortstack(stack):
tempstack = create()
while isempty(stack) == False:
temp = pop(stack)
while isempty(tempstack) == False and top(tempstack) > temp:
push(stack, top(tempstack))
pop(tempstack)
push(tempstack, temp)
return tempstack
def Print(stack):
for i in range(len(stack) - 1, -1, -1):
print(stack[i], end = " ")
if __name__=='__main__':
stack = create()
push(stack, 34)
push(stack, 3)
push(stack, 31)
push(stack, 98)
push(stack, 92)
push(stack, 23)
Print(stack)
temp1 = sortstack(stack)
print('\n')
Print(temp1)
# -
|
Stack/Sort Using Temporary Stack.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Los Alamos National Lab Data Preparation
# *Source:* [LANL dataset](https://csr.lanl.gov/data/cyber1/)
#
# This data set represents 58 consecutive days of de-identified event data collected from five sources within Los Alamos National Laboratory’s corporate, internal computer network.
#
# Only the auth.txt file is used in our current work, as all red team activity appearing in the data correspond exclusively to authentication events. Future work includes utilizing additional data streams (namely; proc.txt.gz). We perform a pre-processing step on the file redteam.txt.gz so that its log lines are expanded to match the full log line which they correspond to in the auth.txt.gz file. This adds general convenience, and speeds up the process of querying to find out if a given log line is malicious.
#
# This notebook outlines methods used for translating log lines into integer vectors which can be acted on by event level models. Note that the scripts in **/safekit/features/lanl** can also be used standalone to accomplish the same translation, but only operate on the auth.txt data file.
#
# ### Character Level
# ----
# *note: /safekit/features/lanl/char_feats.py*
# At the character level, the ascii value for each character in a log line is used as a token in the input sequence for the model.
#
# The translation used is
def translate_line(string, pad_len):
return "0 " + " ".join([str(ord(c) - 30) for c in string]) + " 1 " + " ".join(["0"] * pad_len) + "\n"
# Where **string** is a log line to be translated, and **pad_len** is the number of 0's to append so that the length of the translated string has the same number of characters as the longest log line in the dataset (character-wise). **0** and **1** are used to describe start and end of the translated sentence.
#
# The length of the max log line can be obtained using
max_len = 0
with open("auth_proc.txt", "r") as infile:
for line in infile:
tmp = line.strip().split(',')
line_minus_time = tmp[0] + ',' + ','.join(tmp[2:])
if len(line_minus_time) > max_len:
max_len = len(line)
print (max_len)
# We chose to filter out weekends for this dataset, as they capture little activity. These days are
weekend_days = [3, 4, 10, 11, 17, 18, 24, 25, 31, 32, 38, 39, 45, 46, 47, 52, 53]
# It is also convenient to keep track of which lines are in fact red events. Note that labels are not used during unsupervised training, this step is included to simplify the evaluation process later on.
with open("redevents.txt", 'r') as red:
redevents = set(red.readlines())
# **redevents.txt** contains all of the red team log lines verbatim from auth.txt.
# It is now possible to parse the data file, reading in (raw) and writing out (translated) log lines.
import math
with open("auth_proc.txt", 'r') as infile, open("ap_char_feats.txt", 'w') as outfile:
outfile.write('line_number second day user red seq_len start_sentence\n') # header
infile.readline()
for line_num, line in enumerate(infile):
tmp = line.strip().split(',')
line_minus_time = tmpline_minus_time = tmp[0] + ',' + ','.join(tmp[2:])[0] + ',' + ','.join(tmp[2:])
diff = max_len - len(line_minus_time)
raw_line = line.split(",")
sec = raw_line[1]
user = raw_line[2].strip().split('@')[0]
day = math.floor(int(sec)/86400)
red = 0
line_minus_event = ",".join(raw_line[1:])
red += int(line_minus_event in redevents) # 1 if line is red event
if user.startswith('U') and day not in weekend_days:
translated_line = translate_line(line_minus_time, 120) # diff
outfile.write("%s %s %s %s %s %s %s" % (line_num, sec, day,
user.replace("U", ""),
red, len(line_minus_time) + 1, translated_line))
# The final preprocessing step is to split the translated data into multiple files; one for each day.
import os
os.mkdir('./char_feats')
with open('./ap_char_feats.txt', 'r') as data:
current_day = '0'
outfile = open('./char_feats/' + current_day + '.txt', 'w')
for line in data:
larray = line.strip().split(' ')
if int(larray[2]) == int(current_day):
outfile.write(line)
else:
outfile.close()
current_day = larray[2]
outfile = open('./char_feats/' + current_day + '.txt', 'w')
outfile.write(line)
outfile.close()
# The char_feats folder can now be passed to the tiered or simple language model.
# The config (data spec) or this experiment is shown below (write as string to json file).
# >{**"sentence_length"**: 129,
# **"token_set_size"**: 96,
# **"num_days"**: 30,
# **"test_files"**: ["0head.txt", "1head.txt", "2head.txt"],
# **"weekend_days"**: [3, 4, 10, 11, 17, 18, 24, 25]}
# ### Word Level
# ----
# Instead of using the ascii values of individual characters, this approach operates on the features of a log line as if they were a sequence. In other words, each log line is split on "," and each index in the resulting array corresponds to a timestep in the input sequence of the event level model.
#
# To map the token strings to integer values, a vocabulary is constructed for the dataset. Any tokens encountered during evaluation which were not present in the initial data are mapped to a common "out of vocabulary" (OOV) value during translation. If the number of unique tokens within the data is known to be prohibitively large, a count dictionary can be used to infer an arbitrary cutoff which maps the least likely tokens in the data to the OOV integer. Eg; all tokens which appear less than 5 times in the data map to the OOV token during translation.
#
# Note that in our AICS paper we have seperate OOV tokens for user, pc, and domain tokens.
# In the following code, every unique token is included in the vocabulary.
# +
index = 4 # <sos> : 1, <eos>: 2, auth_event: 3, proc_event: 4
vocab = {"OOV": "0", "<sos>": "1", "<eos>": "2","auth_event": "3", "proc_event": "4"}
def lookup(key):
global index, vocab
if key in vocab:
return vocab[key]
else:
index += 1
vocab[key] = str(index)
return str(index)
# -
# Translated log lines should be padded out to the maximum length fields_list. In the case of LANL, the proc events are longer and contain 11 tokens.
def translate(fields_list):
translated_list = list(map(lookup, fields_list))
while len(translated_list) < 11:
translated_list.append("0")
translated_list.insert(0, "1") # <sos>
translated_list.append("2") # <eos>
translated_list.insert(0, str(len(translated_list))) # sentence len
return translated_list
# We chose to filter out weekends for this dataset, as they capture little activity. It is also convenient to keep track of which lines are in fact red events. Note that labels are not used during unsupervised training, this step is included to simplify the evaluation process later on.
# +
weekend_days = [3, 4, 10, 11, 17, 18, 24, 25, 31, 32, 38, 39, 45, 46, 47, 52, 53]
with open("redevents.txt", 'r') as red:
redevents = set(red.readlines())
# -
# Since OOV cutoffs are not being considered, the translation can be done in a single pass over the data.
import math
with open("auth_proc.txt", 'r') as infile, open("ap_word_feats.txt", 'w') as outfile:
outfile.write('line_number second day user red sentence_len translated_line padding \n')
for line_num, line in enumerate(infile):
line = line.strip()
fields = line.replace("@", ",").replace("$", "").split(",")
sec = fields[1]
translated = translate(fields[0:1] + fields[2:])
user = fields[2]
day = math.floor(int(sec)/86400)
red = 0
red += int(line in redevents)
if user.startswith('U') and day not in weekend_days:
outfile.write("%s %s %s %s %s %s" % (line_num, sec, day, user.replace("U", ""),
red, " ".join(translated)))
print(len(vocab))
# The final preprocessing step is to split the translated data into multiple files; one for each day.
import os
os.mkdir('./word_feats')
with open('./ap_word_feats.txt', 'r') as data:
current_day = '0'
outfile = open('./word_feats/' + current_day + '.txt', 'w')
for line in data:
larray = line.strip().split(' ')
if int(larray[2]) == int(current_day):
outfile.write(line)
else:
outfile.close()
current_day = larray[2]
outfile = open('./word_feats/' + current_day + '.txt', 'w')
outfile.write(line)
outfile.close()
# The word_feats directory can now be passed to the tiered or simple language model.
# The config json (data spec) for the expiriment is shown below (write as string to json file).
# >{**"sentence_length"**: 12,
# **"token_set_size"**: len(vocab),
# **"num_days"**: 30,
# **"test_files"**: ["0head.txt", "1head.txt", "2head.txt"],
# **"weekend_days"**: [3, 4, 10, 11, 17, 18, 24, 25]}
|
examples/LANL_LM_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from generate_samples import load_rnn_samples, load_human_samples
rnn_poems = load_rnn_samples()
human_poems = load_human_samples()
# # Evaluation
from evaluate import eval_poems
from rhyme import RhymeEvaluator
import pandas as pd
# %matplotlib inline
# %autoreload 2
# %reload_ext autoreload
evaluator = RhymeEvaluator()
rnn_scores, rnn_mean, rnn_std = eval_poems(evaluator, rnn_poems)
human_scores, human_mean, human_std = eval_poems(evaluator, human_poems)
rnn_df = pd.DataFrame(rnn_scores, columns=['Combined Score'])
ax = rnn_df.plot(kind='hist', bins=100, figsize=(10, 10), legend=False, fontsize=14)
ax.set_xlabel('Combined Score', fontsize=20)
ax.set_ylabel('Frequency', fontsize=20)
ax.set_title('Score Distribution for Generated Sample', fontsize=24)
human_df = pd.DataFrame(human_scores, columns=['Combined Score'])
ax = human_df.plot(kind='hist', bins=100, figsize=(10, 10), legend=False, fontsize=14)
ax.set_xlabel('Combined Score', fontsize=20)
ax.set_ylabel('Frequency', fontsize=20)
ax.set_title('Score Distribution for Training Data Sample', fontsize=24)
# # Individual
poem = human_poems[0]
poem
evaluator.rhyme_util.get_poem_tone_types(poem)
(max_combined, max_tone, max_rhyme), scores = evaluator.eval(poem, output_all_scores=True, output_split=True)
(max_combined, max_tone, max_rhyme)
scores
|
notebooks/Evaluation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # 2-layer FNN on MNIST
#
# This is MLP (784-200-200-10) on MNIST. Adam algorithm (lr=0.001) with 100 epoches.
#
#
# #### 100 hidden units
#
# Total params: 89,610
# Trainable params: 89,610
# Non-trainable params: 0
#
# #### 200 hidden units
#
# Total params: 199,210
# Trainable params: 199,210
# Non-trainable params: 0
#
# #### 200 hidden units with 10 intrinsic dim
#
# Total params: 2,191,320
# Trainable params: 10
# Non-trainable params: 2,191,310
#
# #### 200 hidden units with 5000 intrinsic dim
# Total params: 996,254,210
# Trainable params: 5,000
# Non-trainable params: 996,249,210
import os, sys
import numpy as np
from matplotlib.pyplot import *
# %matplotlib inline
# +
results_dir = '../results'
class Results():
def __init__(self):
self.train_loss = []
self.train_accuracy = []
self.train_loss = []
self.valid_loss = []
self.run_time = []
def add_entry(self, train_loss, train_accuracy, valid_loss, valid_accuracy, run_time):
self.train_loss.append(train_loss)
self.train_accuracy.append(train_accuracy)
self.train_loss.append(train_loss)
self.valid_loss.append(valid_loss)
self.run_time.append(run_time)
def add_entry_list(self, entry):
self.add_entry(entry[0], entry[1], entry[2], entry[3], entry[4])
def list2np(self):
self.train_loss = []
self.train_accuracy = []
self.train_loss = []
self.valid_loss = []
self.run_time = []
dim = [10, 50, 100, 300, 500, 1000, 2000, 3000, 4000, 5000]
i = 0
# filename list of diary
diary_names = []
for subdir, dirs, files in os.walk(results_dir):
for file in files:
if file == 'diary':
fname = os.path.join(subdir, file)
diary_names.append(fname)
diary_names_ordered = []
for d in dim:
for f in diary_names:
if str(d)+'/' in f:
# print "%d is in" % d + f
diary_names_ordered.append(f)
if '_200dir/' in f:
diary_names_dir = f
if '_dir/' in f:
diary_names_dir_100 = f
# extrinsic update method
with open(diary_names_dir,'r') as ff:
lines0 = ff.readlines()
R_dir = extract_num(lines0)
with open(diary_names_dir_100,'r') as ff:
lines0 = ff.readlines()
R_dir_100 = extract_num(lines0)
print "200 hiddent units:\n" + str(R_dir) + "\n"
print "100 hiddent units:\n" + str(R_dir_100) + "\n"
# intrinsic update method
Rs = []
i = 0
for fname in diary_names_ordered:
with open(fname,'r') as ff:
lines0 = ff.readlines()
R = extract_num(lines0)
print "%d dim:\n"%dim[i] + str(R) + "\n"
i += 1
Rs.append(R)
Rs = np.array(Rs)
# -
def extract_num(lines0):
valid_loss_str = lines0[-5]
valid_accuracy_str = lines0[-6]
train_loss_str = lines0[-8]
train_accuracy_str = lines0[-9]
run_time_str = lines0[-10]
valid_loss = float(valid_loss_str.split( )[-1])
valid_accuracy = float(valid_accuracy_str.split( )[-1])
train_loss = float(train_loss_str.split( )[-1])
train_accuracy = float(train_accuracy_str.split( )[-1])
run_time = float(run_time_str.split( )[-1])
return valid_loss, valid_accuracy, train_loss, train_accuracy, run_time
# ## Performance comparison with Baseline
# +
N = 10
fig, ax = subplots(1)
ax.plot(dim, Rs[:,0],'b-', label="Testing")
ax.plot(dim, R_dir[0]*np.ones(N),'b-', label="Testing: baseline")
ax.plot(dim, Rs[:,2],'g-', label="Training")
ax.plot(dim, R_dir[2]*np.ones(N),'g-', label="Training: baseline")
ax.scatter(dim, Rs[:,0])
ax.scatter(dim, Rs[:,2])
ax.set_xlabel('Intrinsic Dim')
ax.set_ylabel('Cross Entropy Loss')
ax.set_title('Cross Entropy Loss')
ax.legend()
ax.grid()
ax.set_ylim([-0.1,1.1])
fig.set_size_inches(8, 5)
# +
fig, ax = subplots(1)
ax.plot(dim, Rs[:,1],'b-', label="Testing")
ax.plot(dim, R_dir[1]*np.ones(N),'b-', label="Testing: baseline")
ax.plot(dim, Rs[:,3],'g-', label="Training")
ax.plot(dim, R_dir[3]*np.ones(N),'g-', label="Training: baseline")
ax.scatter(dim, Rs[:,1])
ax.scatter(dim, Rs[:,3])
ax.set_xlabel('Intrinsic Dim')
ax.set_ylabel('Cross Entropy Accuracy')
ax.set_title('Cross Entropy Accuracy')
ax.legend()
ax.grid()
ax.set_ylim([0.75,1.01])
fig.set_size_inches(8, 5)
# +
fig, ax = subplots(1)
ax.plot(dim, Rs[:,4],'g-', label="Training")
ax.plot(dim, R_dir[4]*np.ones(N),'g-', label="Training: baseline")
ax.scatter(dim, Rs[:,4])
ax.set_xlabel('Intrinsic Dim')
ax.set_ylabel('Time (second)')
ax.set_title('Wall Clock Time')
ax.legend()
ax.grid()
# ax.set_ylim([0.75,100.01])
fig.set_size_inches(8, 5)
# -
# ## Performance Per Dim
# +
NRs = Rs/np.array(dim).reshape(N,1)
print NRs
fig, ax = subplots(1)
ax.plot(dim, NRs[:,0],'b-', label="Testing")
ax.scatter(dim, NRs[:,0])
ax.set_xlabel('Intrinsic Dim')
ax.set_ylabel('Cross Entropy Loss per dim')
ax.set_title('Cross Entropy Loss per Dim')
ax.legend()
ax.grid()
fig.set_size_inches(8, 5)
fig, ax = subplots(1)
ax.plot(dim, NRs[:,2],'g-', label="Training")
ax.scatter(dim, NRs[:,2])
ax.set_xlabel('Intrinsic Dim')
ax.set_ylabel('Cross Entropy Loss per dim')
ax.set_title('Cross Entropy Loss per Dim')
ax.legend()
ax.grid()
fig.set_size_inches(8, 5)
# +
fig, ax = subplots(1)
ax.plot(dim, NRs[:,1],'b-', label="Testing")
ax.scatter(dim, NRs[:,1])
ax.set_xlabel('Intrinsic Dim')
ax.set_ylabel('Cross Entropy Loss per dim')
ax.set_title('Cross Entropy Loss per Dim')
ax.legend()
ax.grid()
fig.set_size_inches(8, 5)
fig, ax = subplots(1)
ax.plot(dim, NRs[:,3],'g-', label="Training")
ax.scatter(dim, NRs[:,3])
ax.set_xlabel('Intrinsic Dim')
ax.set_ylabel('Cross Entropy Loss per dim')
ax.set_title('Cross Entropy Loss per Dim')
ax.legend()
ax.grid()
fig.set_size_inches(8, 5)
# +
fig, ax = subplots(1)
ax.plot(dim, NRs[:,4],'g-', label="Training")
ax.scatter(dim, NRs[:,4])
ax.set_xlabel('Intrinsic Dim')
ax.set_ylabel('Time (second)')
ax.set_title('Wall Clock Time')
ax.legend()
ax.grid()
# ax.set_ylim([0.75,100.01])
fig.set_size_inches(8, 5)
# -
|
intrinsic_dim/plots/more/fnn_mnist.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Optimización mano de obra
# ## integrantes:
# + Integrante 1: <NAME>
# + Integrante 2: <NAME>
# + Integrante 3: <NAME>
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
# **Definimos variables**
# - $x_1$: número de maestros contratados en año i
# - $x_2$: número de albañiles contratados en año i
# - $x_3$: número de chalanes contratados en año i
# - $x_4$: número de maestros reclutados en año i
# - $x_5$: número de albañiles reclutados en año i
# - $x_6$: número de chalanes reclutados en año i
# - $x_7$: número de chalanes entrenados para albañiles en año i
# - $x_8$: número de albañiles entrenados para maestros en año i
# - $x_9$: número de maestros que descendieron a albañiles en año i
# - $x_10$: número de maestros que descendieron a chalanes en año i
# - $x_11$: número de albañiles que descendieron a chalanes en año i
# - $x_12$: número de maestros redundantes en año i
# - $x_13$: número de albañiles redundantes en año i
# - $x_14$: número de chalanes redundantes en año i
# - $x_15$: número de maestros que renunciaron en año i
# - $x_16$: número de albañiles que renunciaron en año i
# - $x_17$: número de chalanes que renunciaron en año i
# - $x_18$: número de maestros de sobra en año i
# - $x_19$: número de albañiles de sobra en año i
# - $x_110$: número de chalanes de sobra en año i
#
# **Definir funciones de restricción**
# + $x_1$= (0.95$x_1$+0.90$x_4$+0.95$x_8$-$x_9$-$x_1 0$-$x_1 2$)
# + $x_2$= (0.95$x_2$+0.80$x_5$+0.95$x_7$-$x_8$-0.50$x_9$-$x_1 1$-$x_1 3$)
# + $x_3$= (0.90$x_3$+0.75$x_7$-$x_1 0$+0.50$x_1 1$-$x_1 4$)
# + $x_8$-0.25$x_1$<=0
# + $x_1 8$+$x_1 9$+$x_1 10$<=150
# + $x_1$-$-x_1 8$-0.5$x_1 5$=1000
# + $x_2$-$x_1 9$-0.5$x_1 6$=1400
# + $x_3$-$x_1 10$-0.5$x_1 7$=1000
# + $x_4$<=500
# + $x_5$<=800
# + $x_6$<=500
# + $x_1 5$<=50
# + $x_1 6$<=50
# + $x_1 7$<=50
# + $x_5$<=200
# **Definir función para minimizar redundancia**
# + $x_1 2$+$x_1 3$+$x_1 4$
# **Definir función para minimizar costo**
# + 400$x_7$+500$x_8$+200$x_1 4$+500$x_1 3$+500$x_1 2$+500$x_1 7$+400$x_1 6$+1500$x_1 10$+2000$x_1 9$+3000$x_1 8$
# +
A=np.array(([1,1,0,0,0,0,0,0,0,0,0],[0,0,1,1,1,0,0,0,0,0,0],[0,0,0,0,0,0,0,-1,-1,0,0],[0,0,0,0,0,0,-1,0,0,-1,0],[0,0,0,0,0,-1,0,0,0,0,-1]))
c=np.array(([400,500,500,500,200,400,400,500,3000,2000,1500]))
b=(([575,900,-1000,-100,0]))
resultado=opt.linprog(c,A_ub=A,b_ub=b,)
resultado
# -
help(opt.linprog)
|
Proyecto_Modulo1_LeyvaA_MendozaF_SalazarG final.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Collaborative Filtering using fastai
# ---
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from fastai.collab import *
import fastai
fastai.__version__
# ## Load Movielens Data
# ### Download Data
# ! touch ~/.fastai/data/ml-100k.zip
# ! curl 'http://files.grouplens.org/datasets/movielens/ml-100k.zip' --output ~/.fastai/data/ml-100k.zip
# ! unzip ~/.fastai/data/ml-100k.zip -d ~/.fastai/data/
path = Path('/home/aman/.fastai/data/ml-100k')
path.ls()
# ### Read into DataFrame
ratings = pd.read_csv(path/'u.data', sep='\t', header=None, names=['userID', 'itemID','rating', 'timestamp'])
ratings.head()
movies = pd.read_csv(path/'u.item', sep='|', header=None, encoding='latin-1',names=['itemID', 'title', *[f'col_{i}' for i in range(22)]])
movies.head()
movies_ratings = ratings.merge(movies[['itemID', 'title']])
movies_ratings.head()
# ### Create DataBunch
data = CollabDataBunch.from_df(movies_ratings, valid_pct=0.1,
user_name='userID', item_name='title', rating_name='rating')
data.show_batch()
ratings_range = [0,5.5]
# ## Train Collaborative Filtering Learner
learner = collab_learner(data, n_factors=50, y_range=ratings_range, metrics=accuracy_thresh)
learner.model
learner.lr_find()
learner.recorder.plot(skip_end=15)
lr =1e-2
learner.fit_one_cycle(3, lr)
learner.fit_one_cycle(3, lr)
learner.save('dotprod')
# ## Interpretation
learner = collab_learner(data, n_factors=50, y_range=ratings_range, metrics=accuracy_thresh)
learner.load('dotprod');
learner.model
# ### For Most Rated Movies
movies_ratings.head()
g = movies_ratings.groupby('title')['rating'].count()
top_movies = g.sort_values(ascending=False)[:1000]
top_movies[:10]
top_movies[-10:]
# ### Movie Bias
bias = learner.bias(top_movies.index)
bias.shape
mean_ratings = movies_ratings.groupby('title')['rating'].mean()
mean_ratings.head()
movie_bias = [(i,b, mean_ratings[i]) for i,b in zip(top_movies.index, bias)]
movie_bias[:5]
mean_ratings['Star Wars (1977)'], bias[0]
sorted(movie_bias, key=lambda x:x[1], reverse=True)[:10]
sorted(movie_bias, key=lambda x:x[1], reverse=False)[:10]
# ### Movie Weights
weights = learner.weight(top_movies.index)
weights.shape
(fac1, fac2) = weights.pca(k=2).t()
movie_weigts = [(i, f1, f2, mean_ratings[i]) for i,f1,f2 in zip(top_movies.index, fac1, fac2)]
# **Factor 1 representation**
print(*sorted(movie_weigts, key=lambda x:x[1], reverse=True)[:10], sep='\n')
print(*sorted(movie_weigts, key=lambda x:x[1], reverse=False)[:10], sep='\n')
# **Factor 2 representation**
print(*sorted(movie_weigts, key=lambda x:x[2], reverse=True)[:10], sep='\n')
print(*sorted(movie_weigts, key=lambda x:x[2], reverse=False)[:10], sep='\n')
# **PCA Visualization**
idxs = np.random.choice(len(top_movies), size=50, replace=False)
x = fac1[idxs]
y = fac2[idxs]
movie_titles = top_movies[idxs]
fig, ax = plt.subplots(figsize=(15,15))
ax.scatter(x, y)
for title, x_i, y_i in zip(movie_titles.index, x, y):
ax.text(x_i,y_i,title)
|
notebooks/lesson4-collab.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
# Black-box interpretation of models: LIME
# =========================================
#
# See also skater: a more modern variant relying on heavier dependencies
# https://github.com/datascienceinc/Skater
#
# First we need to install lime with the following shell line::
#
# $ pip install --user lime
#
# Lime uses the notion of "explainers", for different types of data,
# tabular, image, text.
#
#
#
# Regression on tabular data: factors of prices of houses
# --------------------------------------------------------
#
# Load the data, create and fit a regressor
#
#
# +
from sklearn import datasets, ensemble, model_selection
boston = datasets.load_boston()
X_train, X_test, y_train, y_test = model_selection.train_test_split(
boston.data, boston.target)
regressor = ensemble.RandomForestRegressor()
regressor.fit(X_train, y_train)
# -
# Inspect predictions for a few houses
#
# For this, separate out the categorical features:
#
#
import numpy as np
categorical_features = [i for i, col in enumerate(boston.data.T)
if np.unique(col).size < 10]
# Now use a lime explainer for tabular data
#
#
# +
from lime.lime_tabular import LimeTabularExplainer
explainer = LimeTabularExplainer(X_train,
feature_names=boston.feature_names,
class_names=['price'],
categorical_features=categorical_features,
mode='regression')
# Now explain a prediction
exp = explainer.explain_instance(X_test[25], regressor.predict,
num_features=10)
exp.as_pyplot_figure()
from matplotlib import pyplot as plt
plt.tight_layout()
# -
print(exp.as_list())
# Explain a few more predictions
#
#
for i in [7, 50, 66]:
exp = explainer.explain_instance(X_test[i], regressor.predict,
num_features=10)
exp.as_pyplot_figure()
plt.tight_layout()
|
interpreting_ml_tuto/black_box_interpretation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sigmoid Function
# ### Normal Step Function
def step_function(x):
if x > 0:
return 1
else:
return 0
# +
def step_function(x):
y = x > 0
return y.astype(np.int)
import numpy as np
x = np.array([-1.0, 1.0, 2.0])
print(x)
y = x > 0
print(y)
y = y.astype(np.int)
print(y)
# +
# 階梯函數的圖表
import numpy as np
import matplotlib.pyplot as plt
def step_function(x):
return np.array(x > 0, dtype=np.int)
x = np.arange(-5.0, 5.0, 0.1)
y = step_function(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1)
plt.show()
# -
# ### Sigmoid Function
# +
def sigmoid(x):
return 1/(1 + np.exp(-x))
x = np.array([-1.0, 1.0, 2.0])
print(sigmoid(x))
# -
t = np.array([1.0, 2.0, 3.0])
print(1.0 + t)
print(1.0/t)
# +
# Sigmoid函數的圖表
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1/(1 + np.exp(-x))
x = np.arange(-5.0, 5.0, 0.1)
y = sigmoid(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1)
plt.show()
# -
# ### 比較 Sigmoid Function & Step Function
# #### 1. sigmoid function 為一平滑曲線(連續性輸出),step function 則會明確改變輸出
# #### 2. step function 只能回傳0或1兩個值,sigmoid function則可以回傳連續性的實數
# #### 3. 相似:越重要的輸入訊息,回傳值愈大;且皆在0與1之間
# #### 4. 兩者皆為非線性函數
# # ReLU Function (Rectified Linear Unit)
# #### 輸入 > 0,則將輸入直接輸出
# #### 輸入 < 0,則輸出0的函數
def relu(x):
return np.maximum(0, x)
# # 多維陣列
# 一維陣列
import numpy as np
A = np.array([1,2,3,4])
print(A)
print(np.ndim(A))
print(A.shape)
print(A.shape[0])
# 二維陣列
import numpy as np
B = np.array([[1,2], [3,4], [5,6]])
print(B)
print(np.ndim(B))
print(B.shape)
# 矩陣乘積
import numpy as np
A = np.array([[1,2],[3,4]])
print(A.shape)
B = np.array([[5,6],[7,8]])
print(B.shape)
print(np.dot(A,B)) # 矩陣乘法的 function
# # 執行三層神經網路
# +
# 執行第一層神經元的計算:以一個神經元為例
import numpy as np
# 輸入的神經元
X = np.array([1.0, 0.5])
print(X.shape)
# 神經元權值
W1 = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
print(W1.shape)
# 神經元偏權值
B1 = np.array([0.1, 0.2, 0.3])
print(B1.shape)
# 進行矩陣運算
A1 = np.dot(X, W1) + B1
# 加入活化函數
Z1 = sigmoid(A1)
print(A1)
print(Z1)
# +
# 執行第二層神經元的計算
W2
# -
# # 3.5.1 恆等函數與softmax函數
# +
import numpy as np
a = np.array([0.3, 2.9, 4.0]) #指數函數
exp_a = np.exp(a)
print(exp_a)
sum_exp_a = np.sum(exp_a) # 指數函數的和
print(sum_exp_a)
y = exp_a / sum_exp_a
print(y)
# +
# 普通softmax函數
def softmax(a):
exp_a = np.exp(a)
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
print(softmax(a))
# +
# 加入避免溢位的softmax函數
def softmax(a):
c = np.max(a)
exp_a = np.exp(a - c) # 避免溢位
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
# -
# # 3.6 辨識手寫數字
# ## 建議在有辦法自網路下載資料的地方,重新修改本篇章的讀書筆記
# +
import sys, os
sys.path.append(os.pardir)
from TextbookProgram.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)
print(x_train.shape)
print(t_train.shape)
print(x_test.shape)
print(t_test.shape)
# +
import sys, os
sys.path.append(os.pardir)
import numpy as np
from TextbookProgram.mnist import load_mnist
from PIL import Image
def img_show(img):
pil_img = Image.fromarray(np.uint8(img))
pil_img.show()
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)
img = x_train[0]
label = t_train[0]
print(label)
print(img.shape)
img = img.reshape(28,28)
print(img.shape)
img_show(img)
# -
# # 3.6.2 神經網路的推論處理
# +
from TextbookProgram.functions import sigmoid, softmax
import pickle
import sys, os
sys.path.append(os.pardir)
import numpy as np
def get_data():
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False, one_hot_label=False)
return x_test, t_test
def init_network():
with open("TextbookProgram/sample_weight.pkl", 'rb') as f:
network = pickle.load(f)
return network
def predict(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = softmax(a3)
return y # 因為是多分類問題,所以用softmax
# +
x, t = get_data()
network = init_network()
accuracy_cnt = 0
for i in range(len(x_test)):
y = predict(network, x_test[i])
p = np.argmax(y)
if p == t_test[i]:
accuracy_cnt = accuracy_cnt + 1
print("Accuracy:" + str(float(accuracy_cnt) / len(x_test)))
# -
|
BookExercises/DeepLearningFromScratch/.ipynb_checkpoints/Chapter3_190802-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .ps1
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .NET (PowerShell)
# language: PowerShell
# name: .net-powershell
# ---
# # P3 testing your code tutorial
#
# * test using the command line
# * test using a shell script
# ## Command line
dotnet run "A2 2D 93 61 7F DC 0D 8E C6 3E A7 74 51 1B 24 B2" 251 465 255 1311 2101864342 8995936589171851885163650660432521853327227178155593274584417851704581358902 "F2 2C 95 FC 6B 98 BE 40 AE AD 9C 07 20 3B B3 9F F8 2F 6D 2D 69 D6 5D 40 0A 75 45 80 45 F2 DE C8 6E C0 FF 33 A4 97 8A AF 4A CD 6E 50 86 AA 3E DF" AfYw7Z6RzU9ZaGUloPhH3QpfA1AXWxnCGAXAwk3f6MoTx
# ## Shell script
chmod +x testYourProgram.sh # linux/mac make the script executable
./testYourProgram.sh # on windows, you would use ./testYourProgram.ps1
|
samples/notebooks/P3 interactive/interactive Terminal.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib
import matplotlib.pyplot as plt
import scipy.io as scio
import numpy as np
import os
import random
import networkx as nx
from tqdm import tqdm
# +
constellation_name = "StarLink"
number_of_satellites = 1584
period = 5731
bound = 3.78
sat_per_orbit = 66
fac_id = 1585
# -
path = 'matlab_code\\' + constellation_name + '\\delay\\1.mat'
data = scio.loadmat(path)
delay = data['delay']
len(delay)
delay = np.array(delay)
care = np.where(delay[fac_id][:1584] < bound)[0]
last_idx = -1
for idx in care:
print(idx, idx-last_idx, delay[fac_id][idx], delay[fac_id][idx] < bound)
last_idx = idx
# # StarPerf Bug 1: Miss links between last orbit and first orbit
# For satellites of 1st orbit, whose id in range(1, 66+1), they miss a link
# to the satellites in the last orbit, whose id in range(1585-66,1585)
sat_id = 10
np.where(delay[sat_id] > 0)
# for ground station 1585, calculate best satellite in every second, and see how long a satellite can keep
best_sat = []
for second in tqdm(range(1, period+1)):
path = os.path.join('matlab_code\StarLink\delay', str(second)+'.mat')
data = scio.loadmat(path)
delay = np.array(data['delay'][fac_id][:1584])
min_index = np.where(delay == delay.min())[0][0]
best_sat.append(min_index)
with open("best_sat.txt", 'w') as fbest:
for idx in best_sat:
fbest.write('%d\n' % idx)
fig, ax = plt.subplots()
x = np.arange(1, period+1)
ax.plot(x, best_sat)
# +
import sp_utils
slices = []
slice = 0
cur = best_sat[0]
for idx in best_sat:
if idx == cur:
slice+=1
else:
slices.append(slice)
slice = 1
cur = idx
slices.append(slice)
print(len(best_sat))
best_times, best_orbits = sp_utils.orbit_times(best_sat, sat_per_orbit)
for t, o in zip(best_times, best_orbits):
print("%5d: %3d" % (t,o))
# -
with open('./data/'+constellation_name+'/best_sat.csv', 'w') as fbest:
for i in range(len(best_times)):
fbest.write('%d,%d\n' % (best_times[i], best_orbits[i]))
fig, ax = plt.subplots()
ax.set(xlabel='Number of Orbit Change', ylabel='Service Time (second)',
title='Continuous Service Time for Each Orbit in a Period')
best_x = np.arange(1, len(best_times)+1)
ax.plot(best_x, best_times)
|
Greedy-Policy-Valid-Time.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 03 - TFX Interactive Training Pipeline Execution
#
# The purpose of this notebook is to interactively run the following `TFX` pipeline steps:
# 1. Receive hyperparameters using `hyperparam_gen` custom Python component.
# 2. Extract data from BigQuery using `BigQueryExampleGen` component.
# 3. Validate the raw data using `StatisticsGen` and `ExampleValidator` components.
# 4. Process the data using `Transform` component.
# 5. Train a custom model using `Trainer` component.
# 7. Evaluat and Validate the custom model using `ModelEvaluator` component.
# 7. Save the blessed to model registry location using `Pusher` component.
# 8. Upload the model to Vertex AI using `vertex_model_pusher` custom Python component
#
# The custom components are implemented in the [tfx_pipeline/components.py](tfx_pipeline/components) module.
# !pip install google-cloud-aiplatform==1.3.0
# ## Setup
# ### Import libraries
# +
import os
import json
import numpy as np
import tfx
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow_data_validation as tfdv
import tensorflow_model_analysis as tfma
from tensorflow_transform.tf_metadata import schema_utils
import logging
!
from src.common import features
from src.model_training import data
from src.tfx_pipelines import components
logging.getLogger().setLevel(logging.ERROR)
tf.get_logger().setLevel('ERROR')
print("Tensorflow Version:", tfx.__version__)
print("Tensorflow Version:", tf.__version__)
# -
# ### Setup Google Cloud project
# +
PROJECT = 'aiops-industrialization' # Change to your project id.
REGION = 'us-central1' # Change to your region.
BUCKET = 'aiops-industrialization-bucket-ravi' # Change to your bucket name.
SERVICE_ACCOUNT = "175728527123-compute@<EMAIL>"
if PROJECT == "" or PROJECT is None or PROJECT == "[your-project-id]":
# Get your GCP project id from gcloud
# shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT = shell_output[0]
if SERVICE_ACCOUNT == "" or SERVICE_ACCOUNT is None or SERVICE_ACCOUNT == "[your-service-account]":
# Get your GCP project id from gcloud
# shell_output = !gcloud config list --format 'value(core.account)' 2>/dev/null
SERVICE_ACCOUNT = shell_output[0]
if BUCKET == "" or BUCKET is None or BUCKET == "[your-bucket-name]":
# Get your bucket name to GCP projet id
BUCKET = PROJECT
# Try to create the bucket if it doesn'exists
# ! gsutil mb -l $REGION gs://$BUCKET
print("")
PARENT = f"projects/{PROJECT}/locations/{REGION}"
print("Project ID:", PROJECT)
print("Region:", REGION)
print("Bucket name:", BUCKET)
print("Service Account:", SERVICE_ACCOUNT)
print("Vertex API Parent URI:", PARENT)
# -
# ### Set configurations
# +
VERSION = 'v01'
DATASET_DISPLAY_NAME = 'chicago-taxi-tips'
MODEL_DISPLAY_NAME = f'{DATASET_DISPLAY_NAME}-classifier-{VERSION}'
WORKSPACE = f'gs://{BUCKET}/{DATASET_DISPLAY_NAME}'
RAW_SCHEMA_DIR = 'src/raw_schema'
MLMD_SQLLITE = 'mlmd.sqllite'
ARTIFACT_STORE = os.path.join(WORKSPACE, 'tfx_artifacts')
MODEL_REGISTRY = os.path.join(WORKSPACE, 'model_registry')
PIPELINE_NAME = f'{MODEL_DISPLAY_NAME}-training-pipeline'
PIPELINE_ROOT = os.path.join(ARTIFACT_STORE, PIPELINE_NAME)
print(ARTIFACT_STORE)
# -
# ## Create Interactive Context
# +
REMOVE_ARTIFACTS = True
if tf.io.gfile.exists(ARTIFACT_STORE) and REMOVE_ARTIFACTS:
print("Removing previous artifacts...")
tf.io.gfile.rmtree(ARTIFACT_STORE)
if tf.io.gfile.exists(MLMD_SQLLITE) and REMOVE_ARTIFACTS:
print("Deleting previous mlmd.sqllite...")
tf.io.gfile.rmtree(MLMD_SQLLITE)
print(f'Pipeline artifacts directory: {PIPELINE_ROOT}')
print(f'Local metadata SQLlit path: {MLMD_SQLLITE}')
# +
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.sqlite.filename_uri = MLMD_SQLLITE
connection_config.sqlite.connection_mode = 3 # READWRITE_OPENCREATE
mlmd_store = mlmd.metadata_store.MetadataStore(connection_config)
context = InteractiveContext(
pipeline_name=PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
metadata_connection_config=connection_config
)
# -
# ## 1. Hyperparameter generation
# +
from src.tfx_pipelines import components
hyperparams_gen = components.hyperparameters_gen(
num_epochs=5,
learning_rate=0.001,
batch_size=512,
hidden_units='64,64',
)
context.run(hyperparams_gen, enable_cache=False)
# -
json.load(
tf.io.gfile.GFile(
os.path.join(
hyperparams_gen.outputs['hyperparameters'].get()[0].uri, 'hyperparameters.json')
)
)
# ## 2. Data extraction
from src.common import datasource_utils
from tfx.extensions.google_cloud_big_query.example_gen.component import BigQueryExampleGen
from tfx.proto import example_gen_pb2, transform_pb2
# ### Extract train and eval splits
# +
sql_query = datasource_utils.get_training_source_query(
PROJECT, REGION, DATASET_DISPLAY_NAME, ml_use='UNASSIGNED', limit=5000)
output_config = example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(
splits=[
example_gen_pb2.SplitConfig.Split(name="train", hash_buckets=4),
example_gen_pb2.SplitConfig.Split(name="eval", hash_buckets=1),
]
)
)
train_example_gen = BigQueryExampleGen(query=sql_query, output_config=output_config)
beam_pipeline_args=[
f"--project={PROJECT}",
f"--temp_location={os.path.join(WORKSPACE, 'tmp')}"
]
context.run(
train_example_gen,
beam_pipeline_args=beam_pipeline_args,
enable_cache=False
)
# -
# ### Extract test split
# +
sql_query = datasource_utils.get_training_source_query(
PROJECT, REGION, DATASET_DISPLAY_NAME, ml_use='TEST', limit=1000)
output_config = example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(
splits=[
example_gen_pb2.SplitConfig.Split(name="test", hash_buckets=1),
]
)
)
test_example_gen = BigQueryExampleGen(query=sql_query, output_config=output_config)
beam_pipeline_args=[
f"--project={PROJECT}",
f"--temp_location={os.path.join(WORKSPACE, 'tmp')}"
]
context.run(
test_example_gen,
beam_pipeline_args=beam_pipeline_args,
enable_cache=False
)
# -
print(train_example_gen.outputs['examples'].get()[0].uri)
# ### Read sample extract tfrecords
# +
#train_uri = os.path.join(train_example_gen.outputs.examples.get()[0].uri, "Split-train/*")
train_uri= os.path.join(train_example_gen.outputs['examples'].get()[0].uri,"Split-train/*")
source_raw_schema = tfdv.load_schema_text(os.path.join(RAW_SCHEMA_DIR, 'schema.pbtxt'))
raw_feature_spec = schema_utils.schema_as_feature_spec(source_raw_schema).feature_spec
def _parse_tf_example(tfrecord):
return tf.io.parse_single_example(tfrecord, raw_feature_spec)
tfrecord_filenames = tf.data.Dataset.list_files(train_uri)
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
dataset = dataset.map(_parse_tf_example)
for raw_features in dataset.shuffle(1000).batch(3).take(1):
for key in raw_features:
print(f"{key}: {np.squeeze(raw_features[key], -1)}")
print("")
# -
# ## 3. Data validation
# ### Import raw schema
# +
schema_importer = tfx.components.common_nodes.importer_node.ImporterNode(
source_uri=RAW_SCHEMA_DIR,
artifact_type=tfx.types.standard_artifacts.Schema,
reimport=False,
#instance_name='',
)
context.run(schema_importer)
# -
# ### Generate statistics
statistics_gen = tfx.components.StatisticsGen(
examples=train_example_gen.outputs['examples'])
context.run(statistics_gen)
# !rm -r {RAW_SCHEMA_DIR}/.ipynb_checkpoints/
# ### Validate statistics against schema
# +
example_validator = tfx.components.ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_importer.outputs['result'],
)
context.run(example_validator)
# -
context.show(example_validator.outputs['anomalies'])
# ## 4. Data transformation
# +
_transform_module_file = 'src/preprocessing/transformations.py'
transform = tfx.components.Transform(
examples=train_example_gen.outputs['examples'],
schema=schema_importer.outputs['result'],
module_file=_transform_module_file,
splits_config=transform_pb2.SplitsConfig(
analyze=['train'], transform=['train', 'eval']),
)
context.run(transform, enable_cache=False)
# -
# ### Read sample transformed tfrecords
# +
transformed_train_uri = os.path.join(transform.outputs['transformed_examples'].get()[0].uri, "Split-train/*")
#transformed_train_uri = 'gs://aiops-industrialization-bucket-ravi/chicago-taxi-tips/tfx_artifacts/chicago-taxi-tips-classifier-v01-training-pipeline/Transform/transformed_examples/7/train/transformed_examples-00000-of-00001.gz'
transform_graph_uri = transform.outputs['transform_graph'].get()[0].uri
tft_output = tft.TFTransformOutput(transform_graph_uri)
transform_feature_spec = tft_output.transformed_feature_spec()
for input_features, target in data.get_dataset(
transformed_train_uri, transform_feature_spec, batch_size=3).take(1):
for key in input_features:
print(f"{key} ({input_features[key].dtype}): {input_features[key].numpy().tolist()}")
print(f"target: {target.numpy().tolist()}")
# -
# #!pip install tfx
#import tfx
#help(tfx.version)
# #!pip install -i https://pypi-nightly.tensorflow.org/simple --pre tfx
# #!pip install -U tfx
from tfx import v1 as tfx
tfx.__version__
# ## 5. Model training
import tfx
from tfx.components.base import executor_spec
from tfx.components.trainer import executor as trainer_executor
from tfx.dsl.components.common.resolver import Resolver
from tfx.dsl.experimental import latest_artifacts_resolver
from tfx.dsl.experimental import latest_blessed_model_resolver
# ### Get the latest model to warm start
# +
latest_model_resolver = Resolver(
strategy_class=latest_artifacts_resolver.LatestArtifactsResolver,
latest_model=tfx.types.Channel(type=tfx.types.standard_artifacts.Model)
)
context.run(latest_model_resolver, enable_cache=False)
# -
# ### Train the model
# +
_train_module_file = 'src/model_training/runner.py'
trainer = tfx.components.Trainer(
custom_executor_spec=executor_spec.ExecutorClassSpec(trainer_executor.GenericExecutor),
module_file=_train_module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_importer.outputs['result'],
base_model=latest_model_resolver.outputs['latest_model'],
transform_graph=transform.outputs['transform_graph'],
train_args=tfx.proto.trainer_pb2.TrainArgs(num_steps=0),
eval_args=tfx.proto.trainer_pb2.EvalArgs(num_steps=None),
hyperparameters=hyperparams_gen.outputs['hyperparameters'],
)
context.run(trainer, enable_cache=False)
# -
# ## 6. Model evaluation
# ### Get the latest blessed model for model validation.
# +
blessed_model_resolver = Resolver(
strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=tfx.types.Channel(type=tfx.types.standard_artifacts.Model),
model_blessing=tfx.types.Channel(type=tfx.types.standard_artifacts.ModelBlessing)
)
context.run(blessed_model_resolver, enable_cache=False)
# -
# ### Evaluate and validate the model against the baseline model.
from tfx.components import Evaluator
# +
eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(
signature_name='serving_tf_example',
label_key=features.TARGET_FEATURE_NAME,
prediction_key='probabilities')
],
slicing_specs=[
tfma.SlicingSpec(),
],
metrics_specs=[
tfma.MetricsSpec(
metrics=[
tfma.MetricConfig(class_name='ExampleCount'),
tfma.MetricConfig(
class_name='BinaryAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.7}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))),
])
])
evaluator = Evaluator(
examples=test_example_gen.outputs['examples'],
example_splits=['test'],
model=trainer.outputs['model'],
baseline_model=blessed_model_resolver.outputs['model'],
eval_config=eval_config,
schema=schema_importer.outputs['result']
)
context.run(evaluator, enable_cache=False)
# +
evaluation_results = evaluator.outputs['evaluation'].get()[0].uri
print("validation_ok:", tfma.load_validation_result(evaluation_results).validation_ok, '\n')
for entry in list(tfma.load_metrics(evaluation_results))[0].metric_keys_and_values:
value = entry.value.double_value.value
if value:
print(entry.key.name, ":", round(entry.value.double_value.value, 3))
# -
# ## 7. Model pushing
# +
exported_model_location = os.path.join(MODEL_REGISTRY, MODEL_DISPLAY_NAME)
push_destination=tfx.proto.pusher_pb2.PushDestination(
filesystem=tfx.proto.pusher_pb2.PushDestination.Filesystem(
base_directory=exported_model_location,
)
)
pusher = tfx.components.Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=push_destination
)
context.run(pusher, enable_cache=False)
# -
# ## 8. Model Upload to AI Platform
# +
serving_runtime = 'tf2-cpu.2-4'
serving_image_uri = f"gcr.io/cloud-aiplatform/prediction/{serving_runtime}:latest"
vertex_model_uploader = components.vertex_model_uploader(
#project=PROJECT,
project='aiops-industrialization',
region=REGION,
model_display_name=MODEL_DISPLAY_NAME,
pushed_model_location=exported_model_location,
serving_image_uri=serving_image_uri,
explanation_config=''
)
context.run(vertex_model_uploader, enable_cache=False)
# -
vertex_model_uploader.outputs['uploaded_model'].get()[0].get_string_custom_property('model_uri')
statistics_gen = tfx.components.StatisticsGen(examples=train_example_gen.outputs['examples']).with_id("StatisticsGen")
statistics=statistics_gen.outputs['statistics'],
print(statistics)
schema=schema_importer.outputs['result']
print(schema)
|
03-training-formalization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Applications
#
# Index of advanced applications and dashboards built using `bqplot` and `ipywidgets`.
#
# <b>Some of these examples will not be rendered well in the output cell of the notebook. These can be rendered on full screen using [voila](https://github.com/voila-dashboards/voila).</b>
#
# 1. [Logs Analytics](Logs%20Analytics/Logs%20Dashboard.ipynb)
# 2. Wealth Of Nations
# * [Bubble Chart](Wealth%20Of%20Nations/Bubble%20Chart.ipynb)
# * [Choropleth](Wealth%20Of%20Nations/Choropleth.ipynb)
# 3. [US Equity Index Performance](Equity%20Index%20Performance/Time%20Series.ipynb)
|
examples/Applications/Index.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# %matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
code_root='/home/syqian/relative3d'
import sys
import numpy as np
import os.path as osp
import scipy.misc
import scipy.io as sio
import torch
import matplotlib.pyplot as plt
sys.path.append(osp.join(code_root, '..'))
import pdb
from absl import flags
from relative3d.demo import demo_utils
# -
torch.cuda.is_available()
# +
detection=True
flags.FLAGS(['demo'])
opts = flags.FLAGS
opts.batch_size = 1
opts.num_train_epoch = 1
opts.name = 'suncg_relnet_dwr_pos_ft'
opts.classify_rot = True
opts.classify_dir = True
opts.pred_voxels = False# else:
# inputs['bboxes'] = [torch.from_numpy(bboxes)]
# inputs['scores'] = [torch.from_numpy(bboxes[:,0]*0+1)]
opts.use_context = True
opts.pred_labels=True
opts.upsample_mask=True
opts.pred_relative=True
opts.use_mask_in_common=True
opts.use_spatial_map=True
opts.pretrained_shape_decoder=True
opts.do_updates=True
opts.dwr_model=True
if opts.classify_rot:
opts.nz_rot = 24
else:
opts.nz_rot = 4
# +
checkpoint = '../cachedir/snapshots/{}/pred_net_{}.pth'.format(opts.name, opts.num_train_epoch)
pretrained_dict = torch.load(checkpoint)
def clean_checkpoint_file(ckpt_file):
checkpoint = torch.load(ckpt_file)
keys = checkpoint.keys()
temp = [key for key in keys if 'relative_quat_predictor' in key ] + [key for key in keys if 'relative_encoder.encoder_joint_scale' in key]
if len(temp) > 0:
for t in temp:
checkpoint.pop(t)
torch.save(checkpoint, ckpt_file)
# +
ckpt_file = '../cachedir/snapshots/{}/pred_net_{}.pth'.format(opts.name, opts.num_train_epoch)
clean_checkpoint_file(ckpt_file)
tester = demo_utils.DemoTester(opts)
tester.init_testing()
# +
dataset = 'suncg'
img = scipy.misc.imread('./data/{}_img.png'.format(dataset))
img_fine = scipy.misc.imresize(img, (opts.img_height_fine, opts.img_width_fine))
img_fine = np.transpose(img_fine, (2,0,1))
img_coarse = scipy.misc.imresize(img, (opts.img_height, opts.img_width))
img_coarse = np.transpose(img_coarse, (2,0,1))
temp = sio.loadmat('./data/{}_proposals.mat'.format(dataset))
proposals = temp['proposals'][:, 0:4]
gtInds = temp['gtInds']
# bboxes = sio.loadmat('./data/{}_bboxes_1.mat'.format(dataset))['bboxes'].astype(np.float)
inputs = {}
inputs['img'] = torch.from_numpy(img_coarse/255.0).unsqueeze(0)
inputs['img_fine'] = torch.from_numpy(img_fine/255.0).unsqueeze(0)
if detection:
inputs['bboxes'] = [torch.from_numpy(proposals)]
inputs['empty'] = False
tester.set_input(inputs)
objects = tester.predict_box3d()
visuals = tester.render_outputs()
# +
f, axarr = plt.subplots(2, 2, figsize=(20, 8))
axarr[0, 0].imshow(visuals['img'])
axarr[0, 0].axis('off')
axarr[0, 1].imshow(visuals['b_pred_objects_cam_view'])
axarr[0, 1].axis('off')
axarr[1, 0].imshow(visuals['img_roi'])
axarr[1, 0].axis('off')
axarr[1, 1].imshow(visuals['b_pred_scene_cam_view'])
axarr[1, 1].axis('off')
plt.show()
|
object_branch/demo/demo_suncg.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Parameters
#
# The next code block sets parameters that are used throughout the remainder of the notebook.
# + tags=["parameters"]
# pylint: disable=invalid-name,missing-module-docstring
# an integer, increment if you need to redo your analysis
# will be appended to your username to create analysis_id
analysis_number = 0
# experiment ID that must match the parent folder containing the LCMS output files
# An example experiment ID is '20201116_JGI-AK_LH_506489_SoilWarm_final_QE-HF_HILICZ_USHXG01530'
experiment = "REPLACE ME"
# group will only be used in RT prediction if their name has a substring match to this list of strings
include_groups = ["_QC_"]
# Exclude files with names containing any of the substrings in this list. Eg., ['peas', 'beans']
exclude_files = []
# list of substrings that will group together when creating groups
# this provides additional grouping beyond the default grouping on field #12
groups_controlled_vocab = ["QC", "InjBl", "ISTD"]
# The rest of this block contains project independent parameters
# Full path to the directory where you have cloned the metatlas git repo.
# If you ran the 'git clone ...' command in your home directory on Cori,
# then you'll want '/global/homes/FIRST-INITIAL-OF-USERNAME/USERNAME/metatlas'
# where the uppercase letters are replaced based on your NERSC username.
metatlas_repo_path = "/global/homes/FIRST-INITIAL-OF-USERNAME/USERNAME/metatlas"
# Full path to the directory where you want this notebook to store data.
# A subdirectory will be auto created within this directory for each project.
# You can place this anywhere on cori's filesystem, but placing it within your
# global home directory is recommended so that you do not need to worry about
# your data being purged. Each project will take on the order of 100 MB.
project_directory = "/global/homes/FIRST-INITIAL-OF-USERNAME/USERNAME/metabolomics_projects"
# ID from Google Drive URL for base output folder .
# The default value is the ID that corresponds to 'JGI_Metabolomics_Projects'.
google_folder = "0B-ZDcHbPi-aqZzE5V3hOZFc0dms"
# thresholds for filtering out compounds with weak MS1 signals
num_points = 5
peak_height = 4e5
# maximum number of CPUs to use
# when running on jupyter.nersc.gov, you are not allowed to set this above 4
max_cpus = 4
# Threshold for how much status information metatlas functions print in the notebook
# levels are 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
log_level = "INFO"
# -
# +
# pylint: disable=wrong-import-position,import-error
import logging # noqa: E402
import os # noqa: E402
import sys # noqa: E402
sys.path.insert(0, metatlas_repo_path)
logger = logging.getLogger("metatlas.jupyter")
logger.debug("sys.executable=%s", sys.executable)
logger.debug("sys.path=%s.", sys.path)
logger.debug("metatlas_repo_path=%s.", metatlas_repo_path)
if not os.path.exists(metatlas_repo_path):
logging.critical(
"Directory set for metatlas_repo_path parameter (%s) does not exist or is not accessible.",
metatlas_repo_path,
)
raise ValueError("Invalid metatlas_repo_path parameter in Jupyter Notebook.")
try:
from metatlas.tools import notebook, predict_rt # noqa: E402
except ModuleNotFoundError as err:
if str(err) == "No module named 'metatlas.tools'":
logging.critical(
("Could not find metatlas module at %s. " "In the Parameters block, please check the value of metatlas_repo_path."),
metatlas_repo_path,
)
else:
logger.critical('Please check that the kernel is set to "Metatlas Targeted".')
raise ModuleNotFoundError from err
notebook.setup(log_level)
# -
ids = predict_rt.get_analysis_ids_for_rt_prediction(experiment, project_directory, google_folder, analysis_number)
predict_rt.generate_outputs(ids, max_cpus, metatlas_repo_path, num_points, peak_height)
|
notebooks/reference/RT_Prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pymysql
con = pymysql.connect( db = 'db', user = 'root', password = '<PASSWORD>', host = 'localhost')
print(con)
cur = con.cursor()
uid = input('Enter User Name')
pwd = input('Enter password')
# +
try:
i = cur.execute("insert into login values('%s','%s')"%(uid,pwd))
if i >= 1:
con.commit()
print('Record Saved')
except Exception as ex:
print(ex)
# -
try:
cur.execute("select * from login")
rs = cur.fetchall()
size = len(rs)
for x in range(size):
print(rs[x][0],'\t',rs[x][1])
except Exception as ex:
print(ex)
#finally: con.close()
|
pymsql_file.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Moneyball Project
#
# This is a project from an R Data Science course I completed on DataCamp. This project involves a problem faced in 2002 by the Oakland A's. Following the 2001 season, the team lost three key players. The front office took advantage of new baseball stastics to replace the lost players with cheaper ones to win 20 consecutive games. The story became the premise of <NAME>' 2003 book Moneyball: The Art of Winning an Unfair Game. This project is based off the book written by <NAME> (later turned into a movie staring Brad Pitt). <br>
#
# *Data from Sean Lahman:* http://www.seanlahman.com/baseball-archive/statistics/ <br>
# Batting and Salaries tables are used for this project
#
# #### Goal Of This Project
# Using these three metrics, replace 3 lost players with comparable stats and cheaper salaries<br><p>
# Batting Average (BA): Hits/AtBat<br>
# On Base Percentage (OBP): (Hits + BasesOnBalls + HitByPitch)/(AtBat + BasesOnBalls + HitByPitch + SacrificeFly)<br>
# Slugging Percentage (SLG): ('1B' + (2*'2B') + (3*'3B0') + (4*HomeRuns)) / AtBat <br>
#
# **The players lost were**<br>
# first baseman 2000 AL MVP <NAME> (giambja01)<br>
# outfielder <NAME> (damonjo01)<br>
# infielder <NAME> "Ray" Olmedo (saenzol01)<br>
# + tags=[]
## load libraries and data
library(tidyverse)
Batting <- read_csv("Batting.csv")
Salaries <- read_csv("Salaries.csv")
# -
# #### Data Wrangling and Exploration
# +
## Look at Batting dataframe
head(Batting)
# Calculate BA, OBP, and SLG batting meterics. Singles = (H-HR-3B-2B)
BattingNew <- Batting %>%
mutate(BA = H/AB,
OBP = (H + BB + HBP)/(AB + BB + HBP + SF),
SLG = ((H-HR-X3B-X2B) + (2*X2B) + (3*X3B) + (4*HR))/AB
)
# +
# Check new df
head(BattingNew)
## Look at Salaries df
head(Salaries)
# +
## Add salary to Batting df. Year and player are going to be the overlapping columns.
# Check year for each dataframe and chop Batting down to same timeframe as Salaries.
summary(BattingNew$yearID)
summary(Salaries$yearID)
BattingChopped <- BattingNew %>%
filter(yearID >= 1985)
summary(BattingChopped$yearID)
BattingMerged <- merge(BattingChopped, Salaries, by = c('playerID','yearID'))
## Look at stats for three lost players (giambja01, damonjo01, saenzol01)
LostBoys <- BattingMerged %>%
filter(playerID == 'giambja01' | playerID == 'damonjo01' | playerID == 'saenzol01')
# Filter for year 2001 when we want to replace them, get H,2B,3B,HR,OBP,SLG,BA,AB,salary stats only
LostBoysSubset <- LostBoys %>%
filter(yearID == 2001) %>%
select(playerID, H, X2B, X3B, HR, OBP, SLG, BA, AB, salary)
LostBoysSubset
## Find list of players to replace the three 'Lost Boys'
# salary of three new cannot > 15 million
# combined AB >= combined lost players
# mean OBP >= mean OBP of lost players
sum(LostBoysSubset$salary) # $11,493,333
sum(LostBoysSubset$AB) # 1469
mean(LostBoysSubset$OBP) # 0.3638687
## Chop dataset to 2001 and the same stats
BattingMergedChopped <- BattingMerged %>%
filter(yearID == 2001) %>%
select(playerID, H, X2B, X3B, HR, OBP, SLG, BA, AB, salary)
#unique(BattingMergedChopped$playerID) #817 players in list
# -
# ### Make Histograms to Define Cutoffs for Poorly Performing Players
# +
## Look at the spread for OBP, BA, SLG
## OBP (.3 - .4 is good), can remove players with < 0.3
ggplot(BattingMergedChopped, aes(OBP)) + geom_histogram(bins=60) +
scale_x_continuous(breaks = seq(0.1, 1, length = 10))
## BA (.3 is excellent), can remove players with < 0.21 based on spread
ggplot(BattingMergedChopped, aes(BA)) + geom_histogram(bins = 60) +
scale_x_continuous(breaks = seq(0.1, 1, length = 10))
## SLG: peak is around < 0.5, maybe not best metric to filter by
ggplot(BattingMergedChopped, aes(SLG)) + geom_histogram(bins = 60)
# +
## Remove players based on their low OBP and low BA
BattingRmLow <- BattingMergedChopped %>%
filter(OBP >= 0.3 & BA >= 0.21)
## Mean, median, mode for stats
mean(BattingRmLow$OBP) # 0.3622655
median(BattingRmLow$OBP) # 0.3391054
mean(BattingRmLow$BA) # 0.2942963
median(BattingRmLow$BA) # 0.27559
mean(BattingRmLow$SLG) # 0.4604938
median(BattingRmLow$SLG) # 0.4388026
# -
## Look at spread of salary
ggplot(BattingRmLow, aes(salary)) + geom_histogram(bins = 60)
# +
## Find cheapest players in the 3rd quartile for OBP and BA
summary(BattingRmLow$OBP) #0.37
summary(BattingRmLow$BA) #0.30
## Filter players in 3rd Quartile for OBP and BA. Chopped down to 69 top players
Players3Q <- BattingRmLow %>%
filter(OBP >= 0.37 & BA >= 0.30)
## Sort by highest OBP, BA, and cheapest salary. Noticed a lot of low AB values.
Players3Q <- Players3Q %>%
arrange(desc(OBP), desc(BA), salary)
# +
## Filter out low At Bats
ggplot(Players3Q, aes(AB)) + geom_histogram(bins = 60)
## Spread a bit, over 400 looks like a good cutoff
# +
## PlayersHighAB now has 36 players
PlayersHighAB <- Players3Q %>%
filter(AB > 400)
## Check out spread for player stats
summary(PlayersHighAB)
## H: 141-242, HR: 2-73, OBP: 0.37-0.51, SLG: 0.41-0.86, BA: 0.3-0.35
## Sort again by lowest salary, highest OBP, highest SLG.
PlayersHighAB <- PlayersHighAB %>%
arrange(salary, desc(OBP), desc(SLG))
# -
# ### Solutions!
# The goal is three replacement players with: sum(AB) >= 1469 & mean(OBP) >= 0.3638687 & salary < 15000000
# +
### TWO SOLUTIONS, but many combos on the list of 36 would work.
# One high salary player (28 row: bondsba01 or row 29: jonesch06)
# 2 low salary players (1: pujolal01, 2: mientdo01, or any 2 from row 4 up)
sum(PlayersHighAB[1:2,]$AB, PlayersHighAB[28,]$AB) # 1609
mean(PlayersHighAB[1:2,]$OBP, PlayersHighAB[28,]$OBP) #0.39
sum(PlayersHighAB[1:2,]$salary, PlayersHighAB[28,]$salary) #10,715,000
## Dirt cheap, use 3 lowest salary. Batting stats even higher than lost players.
sum(PlayersHighAB[1:3,]$AB) # 1750
mean(PlayersHighAB[1:3,]$OBP) # 0.39
sum(PlayersHighAB[1:3,]$salary) # 630,000
# -
## Sorted by cheapest salary
PlayersHighAB
|
notebooks/.ipynb_checkpoints/Moneyball-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import nltk
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import string
import json
# %matplotlib inline
# #### Stopwords are words which are filtered out before or after processing of natural language data, usually don't affect meaning
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('rslp')
with open('data/cleaned_base_flat.json') as json_file:
json_comp = json.load(json_file)
json_df = pd.DataFrame(json_comp)
json_df.head()
db = []
for data in json_comp:
db.append((data['text'], data['sentiment']))
def clean_doc(doc):
# split into tokens by white space
tokens = doc.split()
# remove punctuation from each token
table = str.maketrans('', '', string.punctuation)
tokens = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
tokens = [word for word in tokens if word.isalpha()]
# filter out stop words
stop_words = set(stopwords.words('portuguese'))
tokens = [w for w in tokens if not w in stop_words]
# filter out short tokens
tokens = [word.lower() for word in tokens if len(word) > 1]
return tokens
# #### Stemming is the process of reducing inflected (or sometimes derived) words to their word stem
def stemmer_treatment(texto):
stemmer = nltk.stem.RSLPStemmer()
frases_sem_stemming = []
for (palavras, sentimento) in texto:
tokens_list = clean_doc(palavras)
com_stemming = [str(stemmer.stem(p)) for p in tokens_list]
frases_sem_stemming.append((com_stemming, sentimento))
return frases_sem_stemming
def busca_palavras(frases):
all = []
for (palavras, sentimento) in frases:
all.extend(palavras)
return all
def busca_freq(palavras):
return nltk.FreqDist(palavras)
def busca_palavras_unicas(frequencia):
freq = frequencia.keys()
return freq
# #### Return document's unique words
def extrator_palavras_treino(documento):
doc = set(documento)
caract = {}
for palavras in unique_treino:
caract['%s' % palavras] = (palavras in doc)
return caract
def extrator_palavras_teste(documento):
doc = set(documento)
caract = {}
for palavras in unique_teste:
caract['%s' % palavras] = (palavras in doc)
return caract
# #### Check database labeling for balace check
db_frame = pd.DataFrame(stemmer_treatment(db))
db_frame.columns = ['text', 'sentiment']
print('-- Classes ratio --')
print((db_frame.sentiment.value_counts() / db_frame.shape[0]) * 100)
# #### Split traning in holdout
X = db_frame['text']
y = db_frame['sentiment']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=42, stratify=y)
# +
base_palavras = busca_palavras(stemmer_treatment(db))
treino_palavras = np.concatenate([p for p in X_train])
teste_palavras = np.concatenate([p for p in X_test])
print('Quantidade de palavras na base: {}'.format(pd.DataFrame(base_palavras).count()))
print('Quantidade de palavras para treino: {}'.format(pd.DataFrame(treino_palavras).count()))
print('Quantidade de palavras para teste: {}'.format(pd.DataFrame(teste_palavras).count()))
# -
# #### Gets word frequency on both test and training lists
freq_treino = busca_freq(treino_palavras)
freq_teste = busca_freq(teste_palavras)
# #### Gets unique words
unique_treino = busca_palavras_unicas(freq_treino)
unique_teste = busca_palavras_unicas(freq_teste)
# #### Merged features and labels
base_train_merged = [(X_train.get(key), y_train.get(key)) for key in X_train.keys()]
base_teste_merged = [(X_test.get(key), y_test.get(key)) for key in X_test.keys()]
# #### The primary purpose of this function is to avoid the memory overhead involved in storing all the featuresets for every token in a corpus
base_treino_features = nltk.classify.apply_features(extrator_palavras_treino, base_train_merged)
base_teste_features = nltk.classify.apply_features(extrator_palavras_treino, base_teste_merged)
# #### Naive Bayes
model = nltk.NaiveBayesClassifier.train(base_treino_features)
# #### Labels
model.labels()
model.show_most_informative_features(10)
# +
nova_frase = 'produto nao sofisticado'
testeStem = []
stemmer = nltk.stem.RSLPStemmer()
for (p_train) in nova_frase.split():
st = [p for p in p_train.split()]
testeStem.append(str(stemmer.stem(st[0])))
words_extracted = extrator_palavras_treino(testeStem)
distrib = model.prob_classify(words_extracted)
for classe in distrib.samples():
print('%s: %f' % (classe, distrib.prob(classe)))
# -
|
naive_bayes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] hide_input=true slideshow={"slide_type": "slide"}
# # `tuple` - N-Koteak (_Tuplak_)
#
# * `tuple` motako objektuak
# * Objektu sekuentzia **ALDAEZINA**.
# * Objektu guztiek ez dute mota berdinekoa izan beharrik.
# * Propietateak: Iteragarriak, Indexagarriak, Aldaezinak
# * Metodoak: 2 (`count`, `index`)
# * Eragileak: `+` , `*` , `in` , `not in`
# + [markdown] slideshow={"slide_type": "slide"}
# ## Zer da aldaezina izatea?
#
# * N-Koteek *eduki* aldaezina dute
# * N-Koteek *balio* aldakorra izan dezakete
# + slideshow={"slide_type": "fragment"}
t = (1,2,3,[4,5,6])
print(t)
t[-1].append("????")
print(t)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Balio anitzak bueltatzen dituzten funtzioak
#
# * Funtzioek objektu/balio bakarra bueltatzen dute
# * Bueltatzen duten objektua sekuentzia bat izan daiteke
# * *N* balio bueltatu nahi dugunean, *N-Kote* bat erabili
# + slideshow={"slide_type": "fragment"}
def zatidura_osoa_hondarra(a,b):
n = 0
while a >= b :
a -= b
n += 1
return n,a
z,h = zatidura_osoa_hondarra(17,5)
print(f'17 = 5 x {z} + {h}')
# + [markdown] slideshow={"slide_type": "-"}
# <table border="0" width="100%" style="margin: 0px;">
# <tr>
# <td style="text-align:left"><a href="Zerrendak.ipynb">< < Zerrendak < <</a></td>
# <td style="text-align:right"><a href="Hiztegiak.ipynb">> > Hiztegiak > ></a></td>
# </tr>
# </table>
|
Gardenkiak/NKoteak.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **Neural Networks**
#
# ---------------
#
# Neural networks (NN) are a more contemporary approach for modeling a time series.
# Multilayer perceptrons (MLPs) are a basic type of NN for modeling time series.
#
# ## **Examples From Previous Models**
#
# ---------------
#
# ### **SLR**
#
# The following would be used to express SLR
#
# $$
# Y_t = \beta_0 + \beta_1 X_{1} + \epsilon
# $$
#
# 
#
# ### **MLR**
#
# The following would be used to express MLR
#
# $$
# Y_t = \beta_0 + \beta_1 X_{1} + \beta_2 X_{2} + \beta_3 X_{3} + \epsilon
# $$
#
# 
#
# ## **Time Series**
#
# ---------------
#
# The following equation would be used to express the MLP for an AR(3).
#
# $$
# Y_t = \beta_0 + \beta_1 y_{t-1} + \beta_2 y_{t-2} + \beta_3 y_{t-3} + \epsilon
# $$
#
# 
#
# ## **Non-Linear Models**
#
# ---------------
#
# It can be shown that a **hidden layer** can be used to approximate **any continuous function**.
# An example of a non-linear NN is shown below.
#
# 
#
# ## **NN in R**
#
# ---------------
#
# The library `nnfor` can be used for constructing neural networks.
# There are a large number of parameters that can be tuned for NNs, such as hidden layers, activation functions, dropouts, etc.
# The following slide should some details on parameters that can be used in `nnfor::mlp`
#
# 
|
notes/nn.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NSD Data Loader
#
# This script illustrates usage of the NSD Data Loader Class.
# For this class to work as intended nsd_access should be installed: https://github.com/tknapen/nsd_access
# Both nsd_access and the data loader expect nsd data to be available in the original folder structure.
from nsdloader import NSDLoader
import os.path as op
# +
nsdfolder = op.join("..", "nsd") # adjust this to point to local copy of nsd dataset
nsdl = NSDLoader(nsdfolder)
# -
# Print a summary of the locally available data. This function will only show participants for which local data is available. It does so by checking for which participants beta data folders exist in the folder structure. It will therefore not catch whether data for all sessions has been downloaded. The number of available trials is read in from the behaviour data for each participant. The function returns a dictionary with each available subject as key and as value another dictionary of the form (Session: #Trials)
nsdl.get_data_info(verbose=True)
# Before loading the data, split it into training and test sets. The split is computed so that stimuli should only occur either in the training or the test set. (All repetitions of the same stimulus end up in the same set). The function accepts the approximate fraction of data to be held out for testing as test_fraction. Optionally only include stimuli that were shown to all participants
train_stims, test_stims = nsdl.create_image_split(test_fraction=.2, subset="shared", participant="subj01")
print(train_stims.shape)
print(test_stims.shape)
print(test_stims) # pandas dataframe containing the stimulus ids
# (cocoID to retrieve captions, 73KIDs used to get images and betas)
# Next, retrieve all trials in which the selected stimuli were shown, for all subjects specified.
trialdata_train = nsdl.trials_for_stim(['subj01', 'subj02'], train_stims)
trialdata_test = nsdl.trials_for_stim(['subj01', 'subj02'], test_stims)
trialdata_train # for subjects that completed all 30000 trials
# each stimulus was shown three times, for two subjects this amounts to
# 800 * 3 * 2 = 4800 trials in the training set with shared images
# Finally, compute the session index and load the data. Images can be excluded to save memory.
# The session index is needed to convert from the nsd indexing scheme:
#
# subject, session, run, trial
#
# To the nsd_access indexing scheme:
#
# subject, session, trial
#
# (I only have the first two sessions for subjects 1+2 available locally to save space, so I filter the data. This should not be necessary with the full dataset available.)
# +
train_mask = (trialdata_train["SUBJECT"].isin([1,2])) & (trialdata_train["SESSION"].isin([1,2]))
test_mask = (trialdata_test["SUBJECT"].isin([1,2])) & (trialdata_test["SESSION"].isin([1,2]))
trialdata_train = nsdl.calculate_session_index(trialdata_train[train_mask])
trialdata_test = nsdl.calculate_session_index(trialdata_test[test_mask])
train_betas, train_captions, train_imgs = nsdl.load_data(trialdata_train)
test_betas, test_captions, test_imgs = nsdl.load_data(trialdata_test)
# +
import matplotlib.pyplot as plt
print(train_betas.shape)
print(test_betas.shape) # dimensions of betas depend on the format selected, fsaverage is loaded here
print(len(train_captions)) # list of captions for each trial
print(len(test_captions))
print(train_imgs.shape)
print(test_imgs.shape)
trials = [42,43,44]
plt.figure()
for i in range(len(trials)):
plt.subplot(131 + i)
plt.imshow(train_imgs[i])
print(train_captions[i])
plt.show()
# -
# Data can also be loaded in batches to save memory. This returns an iterator that loads one batch of data at a time:
batchdataloader = nsdl.load_batch_data(trialdata_train, batchsize=100, load_imgs=True)
b,c,i = batchdataloader.__next__()
print(f"betas: {b.shape}\ncaptions: {len(c)}\nimages: {i.shape}")
|
Demo_NSD_DataLoader.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.1
# language: julia
# name: julia-1.6
# ---
# # "Numerai Analysis & Tips in Julia!"
# > "This post is a (near) 1:1 replication of the Python Jupyter notebook analysis & tips for the Numerai data science competition, but written in Julia"
#
# - toc: false
# - badges: true
# - comments: true
# - author: <NAME>
# - categories: [numerai, MLJ, scikit-learn, datascience, julia]
# - image: images/numerai.png
# - hide_binder_badge: true
# - hide_colab_badge: true
# +
using DataFrames # Requires > 0.22.0 for rownums function
using CSV
using Statistics
using LinearAlgebra
using Plots
using StatsBase
using Distributions
using MLJ
using MLJLinearModels
using MLJXGBoostInterface
using XGBoost
import MLJBase: train_test_pairs
# Using for Logistic + CV options, also as an example of how to use Sklearn within Julia
using ScikitLearn
@sk_import linear_model: (LogisticRegression, LinearRegression)
@sk_import model_selection: (TimeSeriesSplit, KFold, GroupKFold, cross_val_score)
@sk_import metrics: make_scorer
# +
df = CSV.File("numerai_training_data.csv") |> DataFrame
first(df,5)
# +
# There are 501808 rows grouped into eras, and a single target (target)
size(df)
# -
features = select(df, r"feature") |> names
df.erano = parse.(Int64, replace.(df.era, "era" => ""))
eras = df.erano
target = "target"
length(features)
# The features are grouped together into 6 types
feature_groups =
Dict(g => [c for c in features if startswith(c, "feature_$g")]
for g in ["intelligence", "wisdom", "charisma", "dexterity", "strength", "constitution"])
# +
# The models should be scored based on the rank-correlation (spearman) with the target
# There's probably (definitely) a better way to write this - [ordinalrank would solve the ranking]
function numerai_score(y_true, y_pred, df)
rank_pred = sort(combine(groupby(DataFrame(y_pred = y_pred
, eras = df.erano
, rnum = rownumber.(eachrow(df)))
, :eras)
, sdf -> sort(sdf, :y_pred)
, :eras => eachindex => :rank
, nrow => :n)
, :rnum)
rank_pred = rank_pred.rank ./ rank_pred.n
cor(y_true, rank_pred)
end
# It can also be convenient while working to evaluate based on the regular (pearson) correlation
# R2 Score to replicate the Python library outputs
function r2_score(y_true, y_pred)
@assert length(y_true) == length(y_pred)
ss_res = sum((y_true .- y_pred).^2)
mean = sum(y_true) / length(y_true)
ss_total = sum((y_true .- mean).^2)
return 1 - ss_res/(ss_total + eps(eltype(y_pred)))
end
# cor() returns a matrix with no need for manipulation, so no need to replicate that here
# +
# There are 120 eras numbered from 1 to 120
describe(df, :all, cols=:erano)
# +
# The earlier eras are smaller, but generally each era is 4000-5000 rows
group_df = combine(groupby(df, :erano), nrow => :count)
plot(group_df.erano, group_df.count)
# +
# The target is discrete and takes on 5 different values
combine(groupby(df, :target), nrow => :count)
# -
# # Some of the features are very correlated
# Especially within feature groups
# +
feature_corrs = DataFrame(cor(Matrix(df[!, names(df, features)])), features)
insertcols!(feature_corrs, 1, :features => features)
first(feature_corrs,5)
# -
first(stack(feature_corrs), 5)
# +
tdf = stack(feature_corrs)
tdf = tdf[coalesce.(tdf.variable .< tdf.features, false), :]
sort!(tdf, :value)
vcat(first(tdf, 5), last(tdf, 5))
# -
# ### The correlation can change over time
# You can see this by comparing feature correlations on the first half and second half on the training set
# +
df₁ = df[coalesce.(eras .<= median(eras), false), :]
df₂ = df[coalesce.(eras .> median(eras), false), :]
corr₁ = DataFrame(cor(Matrix(df₁[!, names(df₁, features)])), features)
insertcols!(corr₁, 1, :features => features)
corr₁ = stack(corr₁)
corr₁ = corr₁[coalesce.(corr₁.variable .< corr₁.features, false), :]
corr₂ = DataFrame(cor(Matrix(df₂[!, names(df₂, features)])), features)
insertcols!(corr₂, 1, :features => features)
corr₂ = stack(corr₂)
corr₂ = corr₂[coalesce.(corr₂.variable .< corr₂.features, false), :]
tdf = leftjoin(corr₁, corr₂, on = [:variable, :features], makeunique=true)
rename!(tdf, [:value, :value_1] .=> [:corr₁, :corr₂])
tdf.corr_diff = tdf.corr₂ - tdf.corr₁
sort!(tdf, :corr_diff)
vcat(first(tdf,5), last(tdf,5))
# -
# ## Some features are predictive on their own
feature_scores =
Dict(feature => numerai_score(df.target, df[!, feature], df)
for feature in features);
sort(collect(feature_scores), by=x->x[2])
# +
# Single features do not work consistently though
by_era_correlation =
sort(Dict(values(erano)[1] => cor(tdf.target, tdf.feature_strength34)
for (erano, tdf) in pairs(groupby(df, :erano))))
plot(by_era_correlation)
# +
# With a rolling 10 era average you can see some trends
function rolling_mean(arr, n)
rs = cumsum(arr)[n:end] .- cumsum([0.0; arr])[1:end-n]
return rs ./ n
end
n_window = 10
plot(Dict(zip(collect(n_window-1:length(by_era_correlation)),
rolling_mean(collect(values(by_era_correlation)),n_window))))
# -
# # Gotcha: MSE looks worse than correlation out of sample
# Models will generally be overconfident, so even if they are good at ranking rows, the Mean-Squared-Error of the residuals could be larger than event the Mean-Squared-Error of the target (r-squared<0)
df₁ = df[coalesce.(eras .<= median(eras), false), :]
df₂ = df[coalesce.(eras .> median(eras), false), :];
# +
# This is using MLJ, Julia's homegrown machine-learning library
Linear = @load LinearRegressor pkg=MLJLinearModels verbosity=0
linear = Linear()
lin₁ = machine(linear, df₁[!, names(df₁, features)], df₁.target)
fit!(lin₁, verbosity=0)
lin₂ = machine(linear, df₂[!, names(df₂, features)], df₂.target)
fit!(lin₂, verbosity=0);
# +
# Note in particular that the R-squared of (train_on_1, eval_on_2) is slightly negative!
r2₁ = [
r2_score(dfₓ.target, MLJ.predict(model, dfₓ[!, names(dfₓ, features)]))
for dfₓ in [df1, df2]
for model in [lin1, lin2]]
DataFrame(reshape(r2₁, 2, 2), ["eval_on_1","eval_on_2"])
# +
# Note in particular that the correlation of (train_on_1, eval_on_2) is quite decent (comparatively)
corrs = [
numerai_score(MLJ.predict(model, dfₓ[!, names(dfₓ, features)]), dfₓ.target, dfₓ)
for dfₓ in [df₁, df₂]
for model in [lin₁, lin₂]]
DataFrame(reshape(corrs, 2, 2), ["eval_on_1","eval_on_2"])
# +
# This can be be run with XGB as well
# +
XGB = @load XGBoostRegressor pkg=XGBoost verbosity=0
xgb = XGB()
xgb₁ = machine(xgb, df₁[!, names(df₁, features)], df₁.target)
fit!(xgb₁, verbosity=0)
xgb₂ = machine(xgb, df₂[!, names(df₂, features)], df₂.target)
fit!(xgb₂, verbosity=0);
# +
r2₂ = [
r2_score(dfₓ.target, MLJ.predict(model, dfₓ[!, names(dfₓ, features)]))
for dfₓ in [df₁, df₂]
for model in [xgb₁, xgb₂]]
DataFrame(reshape(r2₂, 2, 2), ["eval_on_1","eval_on_2"])
# +
corrs2 = [
numerai_score(MLJ.predict(model, dfₓ[!, names(dfₓ, features)]), dfₓ.target, dfₓ)
for dfₓ in [df₁, df₂]
for model in [xgb₁, xgb₂]]
DataFrame(reshape(corrs2, 2, 2), ["eval_on_1","eval_on_2"])
# -
# # Gotcha: {0, 1} are noticeably different from {0.25, 0.75}
# This makes training a classifier one-versus-rest behave counterintuitively.
#
# Specifically, the 0-vs-rest and 1-vs-rest classifiers seem to learn how to pick out extreme targets, and their predictions are the most correlated
# +
# Mostly doing this in Scikitlearn.JL due to not figuring out how to handle predict_proba in MLJ
logistic = LogisticRegression()
ScikitLearn.fit!(logistic, Matrix(df[!, names(df, features)]), convert.(Int, df.target*4))
ScikitLearn.score(logistic, Matrix(df[!, names(df, features)]), convert.(Int, df.target*4))
# +
# The first and last class are highly correlated
log_corrs = cor(transpose(ScikitLearn.predict_proba(logistic, Matrix(df[!, names(df, features)]))), dims=2)
display(log_corrs)
heatmap(log_corrs, c=palette(:RdYlGn))
# +
# In-sample correlation
prob_matrix = ScikitLearn.predict_proba(logistic, Matrix(df[!, names(df, features)]))
classes = logistic.classes_
numerai_score(df.target, prob_matrix * classes, df)
# -
# A standard linear model has a slightly higher correlation
linear = LinearRegression()
ScikitLearn.fit!(linear, Matrix(df[!, names(df, features)]), df.target)
ScikitLearn.score(linear, Matrix(df[!, names(df, features)]), df.target)
preds = ScikitLearn.predict(linear, Matrix(df[!, names(df, features)]))
numerai_score(df.target, preds, df)
# # Gotcha: eras are homogenous, but different from each other
# ## Random cross-validation will look much better than cross-validating by era
#
# Even for a simple linear model, taking a random shuffle reports a correlation of 4.3%, but a time series split reports a lower score of 3.4%
# +
#linear = LinearRegression()
#ScikitLearn.fit!(linear, Matrix(df[!, names(df, features)]), df.target)
# +
crossvalidators = [KFold(5), KFold(5, shuffle = true), GroupKFold(5), TimeSeriesSplit(5)]
for cv in crossvalidators
println(cv)
println(
mean(
cross_val_score(estimator = LinearRegression(),
X = Matrix(df[!, names(df, features)]),
y = df.target,
cv = cv,
groups = eras,
scoring = make_scorer(cor, greater_is_better = true)
)
)
)
end
# -
# ## Eras can be more or less applicable to other eras
# You can test this be splitting the eras into blocks of 10, training on each block, and evaluating on each other block.
eras10 = (eras .÷ 10) * 10
countmap(eras10)
# +
gdf = copy(df)
gdf[:, :eras10 ] = eras10
gdf = groupby(filter(row -> row[:eras10] < 120, xdf), :eras10);
results10 = DataFrame(train_era = Int32[], test_era = Int32[], value = Float32[])
for train_era in keys(gdf)
println(train_era[1])
gdf₁ = gdf[train_era]
model = LinearRegression()
ScikitLearn.fit!(model, Matrix(gdf₁[!, names(gdf₁, features)]), gdf₁.target)
for test_era in keys(gdf)
gdf₂ = gdf[test_era]
push!(results10, [train_era[1],
test_era[1],
cor(gdf₂.target, ScikitLearn.predict(model, Matrix(gdf₂[!, names(gdf₂, features)])))])
end
end
# -
results_df = unstack(results10, :test_era, :value)
heatmap(clamp!(Matrix(select(results_df, Not(:train_era))), -.04, .04), c=palette(:RdYlGn))
# Here is an advanced paper that talks about generalization.
# Eras can be thought about in the same way that "distributions" or "environments" are talked about here
# https://arxiv.org/pdf/1907.02893.pdf
# ## Gotcha: Since the signal-to-noise ratio is so low, models can take many more iterations than expected, and have scarily high in-sample performance
df₁ = df[coalesce.(eras .<= median(eras), false), :]
df₂ = df[coalesce.(eras .> median(eras), false), :];
# +
function our_score(preds, dtrain)
return "score", cor(get_info(dtrain, "label"), preds)
end
dtrain = DMatrix(Matrix(df₁[!, features]), label=df₁.target)
dtest = DMatrix(Matrix(df₂[!, features]), label=df₂.target)
dall = DMatrix(Matrix(df[!, features]), label=df.target);
# +
# This part I wasn't able to replicate perfectly, XGBoost on Julia seems to(?) lack an evals_result to push the data into
# the source code shows only that it prints to stderr - one could redirect it to an IOBuffer and regex parse it into an
# array but realistically the amount of effort isn't worth it, since one can clearly see the out-of-sample performance
# differneces purely from the numbers printed
param = Dict(
"eta" => 0.1,
"max_depth" => 3,
"objective" => "reg:squarederror",
"eval_metric" => "rmse"
)
xgboost(dtrain,
100,
param = param,
watchlist = [(dtrain, "train"), (dtest, "test")],
feval = our_score
)
# -
# # The results are sensitive to the choice of parameters, which should be picked through cross-validation
df₁ = df[coalesce.(eras .<= median(eras), false), :]
df₂ = df[coalesce.(eras .> median(eras), false), :];
XGB = @load XGBoostRegressor pkg=XGBoost verbosity=0
Linear = @load LinearRegressor pkg=MLJLinearModels verbosity=0
Elastic = @load ElasticNetRegressor pkg=MLJLinearModels verbosity=0
models = vcat(
[Linear()],
[Elastic(lambda = λ) for λ in [0.01, 0.005, 0.002, 0.001, 0.0005, 0.0002, 0.0001, 0.00005, 0.00002, 0.00001]],
[XGB()],
[XGB(eta = 0.01, num_round=1000)],
[XGB(eta = 0.01, colsample_bytree=0.1, num_round=1000)],
[XGB(eta = 0.01, colsample_bytree=0.1, num_round=1000, max_depth=5)],
[XGB(eta = 0.001, colsample_bytree=0.1, num_round=1000, max_depth=5)]
);
for model in models
print(" -- ", model, "\n")
mach = machine(model, df₁[!, features], df₁.target)
MLJ.fit!(mach, verbosity=0)
outsample = numerai_score(df₂.target, MLJ.predict(mach, df₂[!, features]), df₂)
insample = numerai_score(df₁.target, MLJ.predict(mach, df₁[!, features]), df₁)
print("outsample: $outsample, insample: $insample", "\n")
end
# ## Gotcha: Models with large exposures to individual features tend to perform poorly or inconsistently out of sample ##
# +
# MLJ matches the XGBoost implementation in Python, where num_round == n_estimators
XGB = @load XGBoostRegressor pkg=XGBoost verbosity=0
xgb = XGB(eta = 0.01, max_depth=5, num_round=1000);
mach = machine(xgb, df₁[!, features], df₁.target)
MLJ.fit!(mach, verbosity=0)
xgb_preds = MLJ.predict(mach, df₂[!, features]);
# -
xgb_preds
# ### Our predictions have correlation > 0.2 in either direction for some single features!
# Sure hope those features continue to act as they have in the past!
# +
cor_list = []
for feature in features
append!(cor_list, cor(df₂[!, feature], xgb_preds))
end
describe(DataFrame(cor_list = cor_list), :all)
# +
# treating as one function since Julia gets snippy about subsetting with [!, column] in groupbys
function norm_neut(df, columns, feats, proportion=1.0)
scores = quantile(Normal(0.0,1.0),(ordinalrank(df[!, columns]) .- 0.5) ./ length(df[!, columns]))
exposures = Matrix(df[!, feats])
neutralized = scores - proportion * exposures * (pinv(exposures) * scores)
return neutralized / std(neutralized)
end;
# +
df₂.preds = xgb_preds
df₂[:, :preds_neutralized] = combine(x -> norm_neut(x, :preds, features, 0.5), groupby(df₂, :erano)).x1
x_min = minimum(df₂.preds_neutralized)
x_max = maximum(df₂.preds_neutralized)
X_std = (df₂.preds_neutralized .- x_min) / (x_max .- x_min)
df₂[!, :preds_neutralized] = X_scaled = X_std * (1 - 0) .+ 0;
# -
describe(df₂.preds_neutralized)
# ### Now our single feature exposures are much smaller
# +
cor_list2 = []
for feature in features
append!(cor_list2, cor(df₂[!, feature], df₂.preds_neutralized))
end
describe(DataFrame(cor_list2 = cor_list2), :all)
# -
# ### Our overall score goes down, but the scores are more consistent than before. This leads to a higher sharpe
unbalanced_scores_per_era = combine(x -> cor(x.preds, x.target), groupby(df₂, :era))
balanced_scores_per_era = combine(x -> cor(x.preds_neutralized, x.target), groupby(df₂, :era));
# +
println("score for high feature exposure: ", mean(unbalanced_scores_per_era.x1))
println("score for balanced feature expo: ", mean(balanced_scores_per_era.x1))
println("std for high feature exposure: ", std(unbalanced_scores_per_era.x1))
println("std for balanced feature expo: ", std(balanced_scores_per_era.x1))
println("sharpe for high feature exposure: ", mean(unbalanced_scores_per_era.x1)/std(unbalanced_scores_per_era.x1))
println("sharpe for balanced feature expo: ", mean(balanced_scores_per_era.x1)/std(balanced_scores_per_era.x1))
# -
describe(balanced_scores_per_era.x1)
describe(unbalanced_scores_per_era.x1)
|
_notebooks/2021-07-14-analysis_and_tips_julia.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ashikshafi08/Learning_Tensorflow/blob/main/Experiments/De_Scrambling_Text_from_scratch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Mu8GiMfGJ_7G"
# # Converting Scrambled sequence into a Unscrambled sequence using attention.
#
# Reference: https://www.tensorflow.org/text/tutorials/nmt_with_attention
# + colab={"base_uri": "https://localhost:8080/"} id="ZXrtEA9lGzCW" outputId="6b6a301a-be42-4781-f30e-b40d96dcd9f5"
# !pip install aicrowd-cli
API_KEY = ''
# !aicrowd login --api-key $API_KEY
# Downloading the Dataset
# !rm -rf data
# !mkdir data
# !aicrowd dataset download --challenge de-shuffling-text -j 3 -o data
# + id="dVPH-jwHIONU"
# Importing all the packages we need
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="P9ZJq4reN10H" outputId="b785561d-fc11-4795-e60b-334de391b9c7"
# Importing the data
train_data = pd.read_csv('data/train.csv')
val_data = pd.read_csv('data/val.csv')
test_data = pd.read_csv('data/test.csv')
# Printing out all shapes of our data
print(f'Shape of the train data: {train_data.shape}')
print(f'Shape of the validation data: {val_data.shape}')
print(f'Shape of the test data: {test_data.shape}')
# + colab={"base_uri": "https://localhost:8080/", "height": 289} id="aiYByVBPN6bS" outputId="9ae982c2-727f-45f4-a577-3ad5b1ef5673"
# How does our train data looks like?
train_data.head()
# + colab={"base_uri": "https://localhost:8080/"} id="vgk0Pc6-Woxl" outputId="37c8cb56-b9d3-4c54-f451-f9b8ff5de2fe"
# Shuffling our train data
train_data_shuffled = train_data.sample(frac = 1 , random_state = 42)
train_data_shuffled.head() , train_data_shuffled.shape
# + colab={"base_uri": "https://localhost:8080/"} id="Lc3LZNjiN_Ck" outputId="548cf8c2-b15e-4d47-b364-895ecc57c095"
# Splitting sentences and labels
train_sentences = train_data_shuffled['text'].to_numpy()
train_labels = train_data_shuffled['label'].to_numpy()
val_sentences = val_data['text'].to_numpy()
val_labels = val_data['label'].to_numpy()
test_sentences = test_data['text'].to_numpy()
test_labels = test_data['label'].to_numpy()
# Checking the shapes
print(f'Shape of the train sentences: {train_sentences.shape}')
print(f'Shape of the validation sentences: {val_sentences.shape}')
print(f'Shape of the train labels: {train_labels.shape}')
print(f'Shape of the validation labels: {val_labels.shape}')
# + colab={"base_uri": "https://localhost:8080/"} id="4bz8V1gpa7sN" outputId="9702cdbe-3025-45ef-8184-224b46643f15"
# Creating a tf.data.dataset of our sentences and labels
train_dataset = tf.data.Dataset.from_tensor_slices((train_sentences , train_labels)).shuffle(1000)
val_dataset = tf.data.Dataset.from_tensor_slices((val_sentences , val_labels))
# Adding a batch
train_dataset = train_dataset.batch(64)
train_dataset , val_dataset
# + colab={"base_uri": "https://localhost:8080/"} id="ZaAWBS79bLVM" outputId="0d440542-771b-4a04-b3b0-f2d1bdb0a605"
# Looking into our train_dataset just a batch (only 5 first texts in a batch)
for scrambled_text , unscrambled_text in train_dataset.take(1):
print(f'Below is the Scrambled version:\n {scrambled_text[:5]}')
print('\n----------\n')
print(f'Below is the Un-Scrambled version:\n {unscrambled_text[:5]}')
# + id="MveFqAiWd3s7"
# Creating text vectorization layer for the scrambled words
max_vocab_length = 10000
input_text_vectorizer = tf.keras.layers.experimental.preprocessing.TextVectorization(
standardize = 'lower_and_strip_punctuation' ,
ngrams = 2 ,
max_tokens = max_vocab_length
)
# Fitting on our train sentences (scrambled words )
input_text_vectorizer.adapt(train_sentences)
# + colab={"base_uri": "https://localhost:8080/"} id="eooVx9i0pw6P" outputId="497f3a3e-3d90-43fd-aaf0-93bd55021c32"
# First 10 words from the vocabulary
input_text_vectorizer.get_vocabulary()[:10]
# + id="W1TtNwlVnv8D"
# Creating a text vectorization layer for the unscrambled words
output_text_vectorizer = tf.keras.layers.experimental.preprocessing.TextVectorization(
standardize = 'lower_and_strip_punctuation' ,
ngrams = 2,
max_tokens = max_vocab_length
)
# Fitting on our train labels (unscrambled words)
output_text_vectorizer.adapt(train_labels)
# + colab={"base_uri": "https://localhost:8080/"} id="cP-f3FPAqnjK" outputId="fddab936-cdb4-4c01-b21e-9b4848a95a8e"
# First 10 words from the vocab
output_text_vectorizer.get_vocabulary()[:10]
# + colab={"base_uri": "https://localhost:8080/"} id="gTRBmXyIqtmm" outputId="ec791f9f-6c64-4cdd-b98e-1bb4eaa503de"
# Passing a scrambled text (strings) into our layer
scrambled_tokens = input_text_vectorizer(scrambled_text)
scrambled_tokens[:3]
# + [markdown] id="KXjg8ZRRsJyk"
# In the above example we passed our text (strings) into our text vectorizer layer and it returns us a vector of token ID's of our sequence.
#
# Likewise we can get the corresponding sequence of a token ID, that is convert token ids back to text using `get_vocabulary()` method.
# + colab={"base_uri": "https://localhost:8080/"} id="E9Kh1G5CsY77" outputId="5a15c943-5e18-4117-84bc-3b637087252c"
# Creating a numpy array of the vocabulary
input_vocab = np.array(input_text_vectorizer.get_vocabulary())
input_vocab
# + colab={"base_uri": "https://localhost:8080/"} id="1T56Lb4ctRv3" outputId="8ac5c216-37a6-4090-e634-555776f99a97"
# Indexing our scrambled tokens into the array of vocbulary
tokens = input_vocab[scrambled_tokens.numpy()]
print(f'Actual sequence:\n\n {scrambled_text[:3]}\n')
print(f'\nThe sequence in tokens:\n\n {tokens[:3]}')
# + [markdown] id="PH15QmLVt8Ep"
# ## Modelling Part
#
# Here we are going to build a seq2seq architecture from scratch we will start building from,
# - Encoder
# - Decoder
# - Attention Head
# + [markdown] id="AZNvJICLuNP3"
# Since we are going to use a lot of low level API
# s where it's easy to get the shapes wrong, this `SpaceChecker` is used to check shapes throughout the tutorial.
# + id="BEKdhaCB8-0f"
class ShapeChecker():
def __init__(self):
# Keep a cache of every axis-name seen
self.shapes = {}
def __call__(self, tensor, names, broadcast=False):
if not tf.executing_eagerly():
return
if isinstance(names, str):
names = (names,)
shape = tf.shape(tensor)
rank = tf.rank(tensor)
if rank != len(names):
raise ValueError(f'Rank mismatch:\n'
f' found {rank}: {shape.numpy()}\n'
f' expected {len(names)}: {names}\n')
for i, name in enumerate(names):
if isinstance(name, int):
old_dim = name
else:
old_dim = self.shapes.get(name, None)
new_dim = shape[i]
if (broadcast and new_dim == 1):
continue
if old_dim is None:
# If the axis name is new, add its length to the cache.
self.shapes[name] = new_dim
continue
if new_dim != old_dim:
raise ValueError(f"Shape mismatch for dimension: '{name}'\n"
f" found: {new_dim}\n"
f" expected: {old_dim}\n")
# + id="T6fNLob2-3IS"
# Defining needed constants for our model
embedding_dim = 256
units = 1024
# + [markdown] id="g1m9z9iU_ARb"
# #### Building our encoder layer
# The encoder,
# - Takes a list of token IDs (from input_text_vectorizer)
# - Looks up an embedding vector for each token (we will create that using `layers.Embedding`)
# - Processes the embeddings into a new sequences (using a `layers.GRU`)
# - **Returns**
# - The processed sequence. This will be passed to the attention head.
# - The internal state. This will be used to initialize the encoder.
#
# + id="Ccira5SaAOAU"
# Building a Encoder layer
class Encoder(tf.keras.layers.Layer):
def __init__(self ,input_vocab_size , embedding_dim , enc_units):
super(Encoder , self).__init__()
self.enc_units = enc_units
self.input_vocab_size = input_vocab_size
# This embedding layer converst tokens to vectors
self.embedding = tf.keras.layers.Embedding(self.input_vocab_size ,
embedding_dim)
# Using GRU layers to processes those vectors sequentially
self.gru = tf.keras.layers.GRU(self.enc_units ,
return_sequences = True ,
return_state = True ,
recurrent_initializer = 'glorot_uniform')
def call(self , tokens , state = None):
shape_checker = ShapeChecker()
shape_checker(tokens, ('batch', 's'))
# 2. The embedding layers looks up the embedding for each token
vectors = self.embedding(tokens) # gives us the vectors for each token
shape_checker(vectors , ('batch' , 's' , 'embed_dim'))
# 3. The GRU processes the embedding sequence
# output shape: (batch , s , enc_units)
# state_shape: (batch , enc_units)
output , state = self.gru(vectors , initial_state = state)
shape_checker(output , ('batch' ,'s' , 'enc_units'))
shape_checker(state , ('batch' , 'enc_units'))
# 4. Return the new sequence and it's state
return output , state
# + [markdown] id="13HASLcgF5QP"
# Alright that's complicated let's see how it works.
# + colab={"base_uri": "https://localhost:8080/"} id="FNLXsz1LGGWq" outputId="b39a9a22-406c-458a-927d-274735440415"
# Firstly conver the input text to token using Textvectorizer
example_tokens = input_text_vectorizer(scrambled_text)
example_tokens
# + id="V2CdItXlGTEz"
# Encode the input sequence (apply everything we wrote in our class)
encoder = Encoder(input_vocab_size= input_text_vectorizer.vocabulary_size() ,
embedding_dim = embedding_dim, enc_units = units)
# + id="hTBQdSOKGsgx"
# Unravelling with each variable by applying on our example toke
example_encoder_output , example_encoder_state = encoder(example_tokens)
# + colab={"base_uri": "https://localhost:8080/"} id="osuKJ0sBG4nS" outputId="f9d3412d-387d-4192-ee66-a92b092f62b9"
# Good, let's print them one by one
print(f'Input batch, shape (batch): {scrambled_text.shape}\n')
print(f'Input batch tokens , shape (batch ,s): {example_tokens.shape}\n')
print(f'Encoder output , shape (batch, s , units): {example_encoder_output.shape}\n')
print(f'Encoder state , shape (batch, units): {example_encoder_state.shape}\n')
# + [markdown] id="drENKdsdJuIV"
# ##### **Attention Head**
#
# - The decoder uses attention to selectively focus on parts of the input sequence.
# - The attention takes a **sequence of vectors** for each example and returns an **`attention vector` for each example**.
# - This attention layer is similar to `layers.GlobalAveragePooling1D` but the attention layer performs a weighted average.
#
# The attention head equation,
# - Calculates the attention weights, as a softmax across the encoders output and sequence.
# - Calculates the context vector as the weighted sum of the encoder outputs.
#
# We use (Bahdanau's additive attention)[https://arxiv.org/pdf/1409.0473.pdf] and TensorFlow includes the implementations of both as `layers.Attention` and `layers.AdditiveAttention`.
#
# + id="afEvgk3vZd44"
# Building the Bahdanau's attention head
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self , units):
super().__init__()
# Calculating the Bahadanau attention
self.W1 = tf.keras.layers.Dense(units , use_bias = False)
self.W2 = tf.keras.layers.Dense(units , use_bias = False)
self.attention = tf.keras.layers.AdditiveAttention()
def call(self , query , value , mask):
shape_checker = ShapeChecker()
shape_checker(query , ('batch' , 't' , 'query_units'))
shape_checker(value , ('batch' , 's' , 'value_units') )
shape_checker(mask , ('batch' , 's'))
# Calculating W1@ht
w1_query = self.W1(query)
shape_checker(w1_query , ('batch' , 't' , 'attn_units'))
# Calculating W2@ht
w2_key = self.W2(value)
shape_checker(w2_key , ('batch' , 's' , 'attn_units'))
query_mask = tf.ones(tf.shape(query)[:-1] , dtype = bool)
value_mask = mask
# Creating the context vector
context_vector, attention_weights = self.attention(
inputs = [w1_query ,value , w2_key] ,
mask = [query_mask , value_mask],
return_attention_scores = True,
)
shape_checker(context_vector , ('batch' , 't' , 'value_units'))
shape_checker(attention_weights , ('batch' , 't' , 's'))
return context_vector , attention_weights
# + colab={"base_uri": "https://localhost:8080/"} id="0DIXI0IG5BVL" outputId="11c46316-e242-43a9-ec92-7a94163b4892"
# Testing the attention layer by creating a bahdanau attention layer
attention_layer = BahdanauAttention(units)
attention_layer
# + [markdown] id="5x7sbnjA5LOK"
# The above Bahdanau layer will take 3 inputs,
#
# - `query`: this will be generated by decoder later
# - `value`: output of the encoder
# - `mask`: to exclude the padding, `example_tokens ! = 0`
#
# + [markdown] id="shoN_lby5l79"
# The vectorized implementation of the attention layer let's you pass a batch of sequences of query vectors and a batch of sequence of value vectors.
# + colab={"base_uri": "https://localhost:8080/"} id="xU8BvFvZ5pQJ" outputId="fdeed8a7-9409-4727-8814-3950355f9737"
# Later, the decoder will generate this attention query
example_attention_query = tf.random.normal(shape = [len(example_tokens) , 2 , 10])
example_attention_query
# + colab={"base_uri": "https://localhost:8080/"} id="Pa110xb66WqJ" outputId="c3a78184-a199-4e5c-b5cd-930af0fc2618"
# Passing this to our attention layer
context_vector , attention_weights = attention_layer(
query = example_attention_query,
value = example_encoder_output,
mask = (example_tokens != 0)
)
print(f'Attention result shape: (batch_size, query_seq_length, units): {context_vector.shape}')
print(f'Attention weights shape: (batch_size, query_seq_length, value_seq_length): {attention_weights.shape}')
# + [markdown] id="Grc5TjmA6qEU"
# The attention weights should sum to 1.0 for each response.
# + colab={"base_uri": "https://localhost:8080/"} id="rzEB38h564eL" outputId="e29abc8e-18aa-4dc9-e665-2dda7113344a"
attention_weights.shape
# + colab={"base_uri": "https://localhost:8080/"} id="aVVxLqEu7XYu" outputId="e8cad409-5102-412c-98fc-398797cf8b28"
attention_slice = attention_weights[0 , 0].numpy()
print(attention_slice)
attention_slice = attention_slice[attention_slice != 0]
# + colab={"base_uri": "https://localhost:8080/", "height": 376} id="N-JuWoIF7OWp" outputId="86a91fe9-c80f-4eb8-b7d1-0199f286868c"
plt.suptitle('Attention weights for one sequence')
plt.figure(figsize=(12, 6))
a1 = plt.subplot(1, 2, 1)
plt.bar(range(len(attention_slice)), attention_slice)
# freeze the xlim
plt.xlim(plt.xlim())
plt.xlabel('Attention weights')
a2 = plt.subplot(1, 2, 2)
plt.bar(range(len(attention_slice)), attention_slice)
plt.xlabel('Attention weights, zoomed')
# zoom in
top = max(a1.get_ylim())
zoom = 0.85*top
a2.set_ylim([0.90*top, top])
a1.plot(a1.get_xlim(), [zoom, zoom], color='k')
# + [markdown] id="Vd7_thH-7UDc"
# #### **Decoder**
# The decoder job is to generate predictions for the next output token.
# + [markdown] id="u7YYBtP375Xk"
# This is getting long, so found another tutorial the difference it's in the tensorflow addons section so it manages alot of heavy lifting..
#
# Will continue on this later, but this was a good exercise, learnt alot along the way.
# + id="shvIMXp99man"
|
Experiments/De_Scrambling_Text_from_scratch.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demystifying Neural Networks
#
# ---
# # Training an ANN
#
# We already said that we were using *Stochastic Gradient Descent* (SDG)
# to train the network. But what that SDG actually is.
#
# The *Stochastic* bit just means that we use a random sample as a batch
# at every step in the training. We have an example of this in our `pytorch` ANN.
# But the *Gradient Descent* is more mathematical.
# ## ANN the function
#
# Whatever an ANN looks like we can say that it takes a multidimensional input
# and spits a multidimensional output.
# We generated completely random matrices and they worked as an ANN.
# The random matrices produced completely random outputs
# but produced outputs in the correct number of dimensions.
#
# In other words the only difference between an untrained network and
# a trained network are the values of weights.
# Well, we kind of knew that already but now we can see it in mathematical terms.
#
# Since we just said that both a trained and untrained ANN is something
# which given a multidimensional input gives a multidimensional output,
# we can argue that an ANN can be understood as a function.
# A function parametrized by all the weights inside all the matrices.
# Specifically we call the action of out ANN $N$ and say:
#
# $$
# N_{w_1, w_2, \dots, w_n}: \mathbb{R}^n \rightarrow \mathbb{R}^m
# $$
#
# For the case of our ANN dealing with the pulsars dataset we have:
#
# $$
# N_{w_1, w_2, \dots, w_n}: \mathbb{R}^8 \rightarrow \mathbb{R}^2
# $$
#
# We also say that our ANN is a model, i.e. an estimator:
#
# $$
# \hat{\vec{y}} = N_{w_1, w_2, \dots, w_n}(\vec{x})
# $$
# Next we imagine that out there exists a perfect model of our data.
# We do not know the perfect model but we know the values it would output.
# In the case of pulsars we know that for a certain input we have
# $1$ for pulsar and $0$ for non-pulsar.
# Or more exactly $[0, 1]$ for pulsar and $[1, 0]$ for non-pulsar
# since the output it 2-dimensional. We call this output $\vec{y}$ (the label).
#
# The difference between the correct label and our estimated label is the error
# out ANN is performing.
#
# $$
# E = \vec{y} - \hat{\vec{y}}
# $$
#
# There's a problem here though.
# Since the error can be positive or negative it is difficult to compare two errors.
# Therefore we use the squared error.
#
# $$
# SE = (\vec{y} - \hat{\vec{y}})^2
# $$
#
# Now we can define the function $F$ as follows:
#
# $$
# F_{w_1, w_2, \dots, w_n} = (\vec{y} - N_{w_1, w_2, \dots, w_n}(\vec{x}))^2
# $$
#
# And this has a nice property that,
# if $F$ decreases out ANN is getting better, if $F$ increases our ANN is getting worse!
# So all we need to do is to change the values of the weights until
# we get a minimum value of $F$.
# # Extending $F$
#
# As we saw when we wrote the SGD ourselves,
# we never train the ANN with a single sample.
# We train it with a small batch of samples at a time.
#
# Therefore we are not really using $\vec{y}$ as the comparison.
# Instead we are using several $\vec{y}$ together, we will call it $Y$,
# a matrix with each column containing a $\vec{y}$.
# With that in mind we are also not using $\vec{x}$ but several samples at a time.
# We will write $X$, a matrix with $\vec{x}$ as columns.
#
# Finally we write $F$ as:
#
# $$
# F_{w_1, w_2, \dots, w_n} = (Y - N_{w_1, w_2, \dots, w_n}(X))^2
# $$
#
# There's a problem with this though, since the squared error was
# $(\vec{y} - \hat{\vec{y}})^2$ the output of $F$ is not an error anymore,
# it is several errors.
# Nothing too difficult to solve, we can just get the mean of all those values,
# resulting in yet another approach to $F$
#
# $$
# MSE = F_{w_1, w_2, \dots, w_n} = \text{mean}(Y - N_{w_1, w_2, \dots, w_n}(X))^2
# $$
#
# This is often called the *Mean Squared Error* measure.
# Other error functions (e.g. cross-entropy) exist but for simplicity
# we will stick with MSE.
# ## Gradient
#
# Now that we have a function $F$ that mirrors the behavior of our ANN
# we could perturb the weights until we find a minimum.
# Yet, there is a better way.
# We can use the following fact.
#
# > The *sign* of the partial derivative of a function wrt. one of its parameters
# > points in the direction the function is increasing in that dimension.
#
# So, for every single weight we have a possible dimension in which to tune our function.
# And for every one of those dimensions (say, dimension $w_1$) we can compute
#
# $$
# g_1 = \frac{\partial MSE}{\partial w_1}
# $$
#
# And we know that the function increases in the direction of $g_1$.
# But we want to find a minimum, so we also know that the function
# decreases in the direction of $-g_1$.
#
# This technique is called *Gradient Descent* because it is described through
# the calculation of the gradient. The gradient is:
#
# $$
# \nabla MSE_{w_1, w_2, \dots, w_n} =
# \left[
# \frac{\partial MSE}{w_1},
# \frac{\partial MSE}{w_2},
# \dots,
# \frac{\partial MSE}{w_n},
# \right]
# $$
# In other words, the gradient gives us the partial derivatives against every single weight.
# The gradient therefore gives us the direction in which to go in order to make the ANN
# perform better, it does not give us how far we need to go though.
# Since the gradient may be quite large sometimes we then multiply it by a small constant
# to make sure we do not wander too far. This small constant is called the *learning rate*.
#
# If we'd be able to flatten out all weights into an array we could write
#
# $$
# [w_1, w_2, \dots, w_n] = [w_1, w_2, \dots, w_n] - \alpha \cdot \nabla MSE_{w_1, w_2, \dots, w_n}
# $$
#
# Where $\alpha$ is the learning rate.
#
# Note: the actual implementation keeps the gradients together with the weights,
# in the same matrices. Next we will look at `autograd` which is an implementation
# that allows one to calculate the gradients and extends `numpy` arrays to
# keep the computed gradients together with the weight matrices.
|
08-training.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# 说明:
# 给定字符串croak Of Frogs,它表示来自不同青蛙的字符串“croak”的组合,也就是说,多个青蛙可以同时发出嘎吱声,因此混合了多个“ croak”。
# 返回最少数量的不同青蛙,以完成给定字符串中的所有吱吱声。
# 有效的“croack”表示青蛙正在依次打印5个字母“ c”,“ r”,“ o”,“ a”,“ k”。
# 青蛙必须打印所有五个字母才能完成吱吱作响。
# 如果给定的字符串不是有效的“croak”的组合,则返回-1。
#
# Example 1:
# Input: croakOfFrogs = "croak croak"
# Output: 1
# Explanation: One frog yelling "croak" twice.
#
# Example 2:
# Input: croakOfFrogs = "crcoakroak"
# Output: 2
# Explanation: The minimum number of frogs is two.
# The first frog could yell "crcoak roak".
# The second frog could yell later "crcoakroak".
#
# Example 3:
# Input: croakOfFrogs = "croak crook"
# Output: -1
# Explanation: The given string is an invalid combination of "croak" from different frogs.
#
# Example 4:
# Input: croakOfFrogs = "croakcroa"
# Output: -1
#
# Constraints:
# 1、1 <= croakOfFrogs.length <= 10^5
# 2、All characters in the string are: 'c', 'r', 'o', 'a' or 'k'.
# -
class Solution:
def minNumberOfFrogs(self, croakOfFrogs: str) -> int:
chr_c, chr_r, chr_o, chr_a = 0, 0, 0, 0
count = 0
for s in croakOfFrogs:
if s == 'c':
chr_c += 1
elif s == 'r':
if chr_c <= 0:
return -1
chr_c -= 1
chr_r += 1
elif s == 'o':
if chr_r <= 0:
return -1
chr_r -= 1
chr_o += 1
elif s == 'a':
if chr_o <= 0:
return -1
chr_o -= 1
chr_a += 1
else:
if chr_a <= 0:
return -1
chr_a -= 1
print(chr_c, chr_r, chr_o, chr_a)
count = max(count, chr_c + chr_r + chr_o + chr_a)
# 没有唱完整五个字符,那么必然会大于0
if chr_c + chr_r + chr_o + chr_a > 0:
return -1
return count
solution = Solution()
solution.minNumberOfFrogs("crcoakroak")
|
Dynamic Programming/1009/1419. Minimum Number of Frogs Croaking.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp utils
# -
# # Utilities
#
# > Helper functions used throughout the library not related to timeseries data.
# +
#export
from tsai.imports import *
from fastcore.test import *
# +
#export
def my_setup(*pkgs):
import warnings
warnings.filterwarnings("ignore")
try:
import platform
print(f'os : {platform.platform()}')
except:
pass
try:
from platform import python_version
print(f'python : {python_version()}')
except:
pass
try:
import tsai
print(f'tsai : {tsai.__version__}')
except:
print(f'tsai : N/A')
try:
import fastai
print(f'fastai : {fastai.__version__}')
except:
print(f'fastai : N/A')
try:
import fastcore
print(f'fastcore : {fastcore.__version__}')
except:
print(f'fastcore : N/A')
if pkgs is not None:
for pkg in listify(pkgs):
try: print(f'{pkg.__name__:15}: {pkg.__version__}')
except: pass
try:
import torch
print(f'torch : {torch.__version__}')
iscuda = torch.cuda.is_available()
print(f'n_cpus : {cpus}')
print(f'device : {device} ({torch.cuda.get_device_name(0)})' if iscuda else f'device : {device}')
except: print(f'torch : N/A')
computer_setup = my_setup
# -
my_setup()
import matplotlib
import numpy as np
import pandas as pd
import scipy as sp
my_setup(matplotlib, np, pd, sp)
#export
import inspect
import sklearn
# ensure these folders exist for testing purposes
fns = ['data', 'export', 'models']
for fn in fns:
path = Path('.')/fn
if not os.path.exists(path): os.makedirs(path)
# +
#export
def totensor(o):
if isinstance(o, torch.Tensor): return o
elif isinstance(o, np.ndarray): return torch.from_numpy(o)
else:
try: return torch.tensor(o)
except: warnings.warn(f"Can't convert {type(o)} to torch.Tensor", Warning)
def toarray(o):
if isinstance(o, np.ndarray): return o
elif isinstance(o, torch.Tensor): return o.cpu().numpy()
else:
try: return np.asarray(o)
except: warnings.warn(f"Can't convert {type(o)} to np.array", Warning)
def toL(o):
if isinstance(o, L): return o
elif isinstance(o, (np.ndarray, torch.Tensor)): return L(o.tolist())
else:
try: return L(o)
except: warnings.warn(f'passed object needs to be of type L, list, np.ndarray or torch.Tensor but is {type(o)}', Warning)
def to3dtensor(o):
o = totensor(o)
if o.ndim == 3: return o
elif o.ndim == 1: return o[None, None]
elif o.ndim == 2: return o[:, None]
assert False, f'Please, review input dimensions {o.ndim}'
def to2dtensor(o):
o = totensor(o)
if o.ndim == 2: return o
elif o.ndim == 1: return o[None]
elif o.ndim == 3: return o[0]
assert False, f'Please, review input dimensions {o.ndim}'
def to1dtensor(o):
o = totensor(o)
if o.ndim == 1: return o
elif o.ndim == 3: return o[0,0]
if o.ndim == 2: return o[0]
assert False, f'Please, review input dimensions {o.ndim}'
def to3darray(o):
o = toarray(o)
if o.ndim == 3: return o
elif o.ndim == 1: return o[None, None]
elif o.ndim == 2: return o[:, None]
assert False, f'Please, review input dimensions {o.ndim}'
def to2darray(o):
o = toarray(o)
if o.ndim == 2: return o
elif o.ndim == 1: return o[None]
elif o.ndim == 3: return o[0]
assert False, f'Please, review input dimensions {o.ndim}'
def to1darray(o):
o = toarray(o)
if o.ndim == 1: return o
elif o.ndim == 3: o = o[0,0]
elif o.ndim == 2: o = o[0]
assert False, f'Please, review input dimensions {o.ndim}'
def to3d(o):
if o.ndim == 3: return o
if isinstance(o, np.ndarray): return to3darray(o)
if isinstance(o, torch.Tensor): return to3dtensor(o)
def to2d(o):
if o.ndim == 2: return o
if isinstance(o, np.ndarray): return to2darray(o)
if isinstance(o, torch.Tensor): return to2dtensor(o)
def to1d(o):
if o.ndim == 1: return o
if isinstance(o, np.ndarray): return to1darray(o)
if isinstance(o, torch.Tensor): return to1dtensor(o)
def to2dPlus(o):
if o.ndim >= 2: return o
if isinstance(o, np.ndarray): return to2darray(o)
elif isinstance(o, torch.Tensor): return to2dtensor(o)
def to3dPlus(o):
if o.ndim >= 3: return o
if isinstance(o, np.ndarray): return to3darray(o)
elif isinstance(o, torch.Tensor): return to3dtensor(o)
def to2dPlusTensor(o):
return to2dPlus(totensor(o))
def to2dPlusArray(o):
return to2dPlus(toarray(o))
def to3dPlusTensor(o):
return to3dPlus(totensor(o))
def to3dPlusArray(o):
return to3dPlus(toarray(o))
def todtype(dtype):
def _to_type(o, dtype=dtype):
if o.dtype == dtype: return o
elif isinstance(o, torch.Tensor): o = o.to(dtype=dtype)
elif isinstance(o, np.ndarray): o = o.astype(dtype)
return o
return _to_type
# -
a = np.random.rand(100).astype(np.float32)
b = torch.from_numpy(a).float()
test_eq(totensor(a), b)
test_eq(a, toarray(b))
test_eq(to3dtensor(a).ndim, 3)
test_eq(to2dtensor(a).ndim, 2)
test_eq(to1dtensor(a).ndim, 1)
test_eq(to3darray(b).ndim, 3)
test_eq(to2darray(b).ndim, 2)
test_eq(to1darray(b).ndim, 1)
# +
#export
def bytes2size(size_bytes):
if size_bytes == 0: return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def bytes2GB(byts):
return round(byts / math.pow(1024, 3), 2)
def get_size(o, return_str=False):
s = sys.getsizeof(o)
if return_str: return bytes2size(s)
else: return s
# -
a = np.random.rand(10, 5, 3)
test_eq(get_size(a, True), '1.3 KB')
#export
def delete_all_in_dir(tgt_dir, exception=None):
if exception is not None and len(L(exception)) > 1: exception = tuple(exception)
for file in os.listdir(tgt_dir):
if exception is not None and file.endswith(exception): continue
file_path = os.path.join(tgt_dir, file)
if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path)
elif os.path.isdir(file_path): shutil.rmtree(file_path)
#export
def reverse_dict(dictionary):
return {v: k for k, v in dictionary.items()}
#export
def is_tuple(o): return isinstance(o, tuple)
#export
def itemify(*o, tup_id=None):
o = [o_ for o_ in L(*o) if o_ is not None]
items = L(o).zip()
if tup_id is not None: return L([item[tup_id] for item in items])
else: return items
a = [1, 2, 3]
b = [4, 5, 6]
print(itemify(a, b))
test_eq(len(itemify(a, b)), len(a))
a = [1, 2, 3]
b = None
print(itemify(a, b))
test_eq(len(itemify(a, b)), len(a))
a = [1, 2, 3]
b = [4, 5, 6]
c = None
print(itemify(a, b, c))
test_eq(len(itemify(a, b, c)), len(a))
# +
#export
def isnone(o):
return o is None
def exists(o): return o is not None
def ifelse(a, b, c):
"`b` if `a` is True else `c`"
return b if a else c
# -
a = np.array(3)
test_eq(isnone(a), False)
test_eq(exists(a), True)
b = None
test_eq(isnone(b), True)
test_eq(exists(b), False)
# +
#export
def is_not_close(a, b, eps=1e-5):
"Is `a` within `eps` of `b`"
if hasattr(a, '__array__') or hasattr(b, '__array__'):
return (abs(a - b) > eps).all()
if isinstance(a, (Iterable, Generator)) or isinstance(b, (Iterable, Generator)):
return is_not_close(np.array(a), np.array(b), eps=eps)
return abs(a - b) > eps
def test_not_close(a, b, eps=1e-5):
"`test` that `a` is within `eps` of `b`"
test(a, b, partial(is_not_close, eps=eps), 'not_close')
def test_type(a, b):
return test_eq(type(a), type(b))
def test_ok(f, *args, **kwargs):
try:
f(*args, **kwargs)
e = 0
except:
e = 1
pass
test_eq(e, 0)
def test_not_ok(f, *args, **kwargs):
try:
f(*args, **kwargs)
e = 0
except:
e = 1
pass
test_eq(e, 1)
def test_error(error, f, *args, **kwargs):
try: f(*args, **kwargs)
except Exception as e:
test_eq(str(e), error)
def test_eq_nan(a,b):
"`test` that `a==b` excluding nan values (valid for torch.Tensor and np.ndarray)"
mask_a = torch.isnan(a) if isinstance(a, torch.Tensor) else np.isnan(a)
mask_b = torch.isnan(b) if isinstance(b, torch.Tensor) else np.isnan(b)
test(a[~mask_a],b[~mask_b],equals, '==')
# -
#export
def assert_fn(*args, **kwargs): assert False, 'assertion test'
test_error('assertion test', assert_fn, 35, a=3)
# +
#export
def test_gt(a,b):
"`test` that `a>b`"
test(a,b,gt,'>')
def test_ge(a,b):
"`test` that `a>=b`"
test(a,b,ge,'>')
def test_lt(a,b):
"`test` that `a>b`"
test(a,b,lt,'<')
def test_le(a,b):
"`test` that `a>b`"
test(a,b,le,'<=')
# +
test_ok(test_gt, 5, 4)
test_not_ok(test_gt, 4, 4)
test_ok(test_ge, 4, 4)
test_not_ok(test_ge, 3, 4)
test_ok(test_lt, 3, 4)
test_not_ok(test_lt, 4, 4)
test_ok(test_le, 4, 4)
test_not_ok(test_le, 5, 4)
# -
t = torch.rand(100)
t[t<.5] = np.nan
test_ne(t, t)
test_eq_nan(t, t)
# +
#export
def stack(o, axis=0, retain=True):
if hasattr(o, '__array__'): return o
if isinstance(o[0], torch.Tensor):
return retain_type(torch.stack(tuple(o), dim=axis), o[0]) if retain else torch.stack(tuple(o), dim=axis)
else:
return retain_type(np.stack(o, axis), o[0]) if retain else np.stack(o, axis)
def stack_pad(o, padding_value=np.nan):
'Converts a an iterable into a numpy array using padding if necessary'
row_length = len(max(o, key=len))
result = np.full((len(o), row_length), padding_value)
for i,row in enumerate(o): result[i, :len(row)] = row
return result
# -
a = [[0,1,2], [4,5,6,7]]
test_eq(stack_pad(a).shape, (2, 4))
test_eq(type(stack_pad(a)), np.ndarray)
test_eq(np.isnan(stack_pad(a)).sum(), 1)
a = np.random.rand(2, 3, 4)
t = torch.from_numpy(a)
test_eq_type(stack(itemify(a, tup_id=0)), a)
test_eq_type(stack(itemify(t, tup_id=0)), t)
#export
def match_seq_len(*arrays):
max_len = stack([x.shape[-1] for x in arrays]).max()
return [np.pad(x, pad_width=((0,0), (0,0), (max_len - x.shape[-1], 0)), mode='constant', constant_values=0) for x in arrays]
a = np.random.rand(10, 5, 8)
b = np.random.rand(3, 5, 10)
c, d = match_seq_len(a, b)
test_eq(c.shape[-1], d.shape[-1])
#export
def random_shuffle(o, random_state=None):
res = sklearn.utils.shuffle(o, random_state=random_state)
if isinstance(o, L): return L(list(res))
return res
a = np.arange(10)
test_eq_type(random_shuffle(a, 1), np.array([2, 9, 6, 4, 0, 3, 1, 7, 8, 5]))
t = torch.arange(10)
test_eq_type(random_shuffle(t, 1), tensor([2, 9, 6, 4, 0, 3, 1, 7, 8, 5]))
l = list(a)
test_eq(random_shuffle(l, 1), [2, 9, 6, 4, 0, 3, 1, 7, 8, 5])
l2 = L(l)
test_eq_type(random_shuffle(l2, 1), L([2, 9, 6, 4, 0, 3, 1, 7, 8, 5]))
#export
def cat2int(o):
cat = Categorize()
cat.setup(o)
return stack(TfmdLists(o, cat)[:])
a = np.array(['b', 'a', 'a', 'b', 'a', 'b', 'a'])
test_eq_type(cat2int(a), TensorCategory([1, 0, 0, 1, 0, 1, 0]))
TensorBase([1,2,3])
# +
#export
def cycle_dl(dl):
for _ in dl: _
def cycle_dl_to_device(dl):
for bs in dl: [b.to(default_device()) for b in bs]
# +
#export
def cache_data(o, slice_len=10_000, verbose=False):
start = 0
n_loops = (len(o) - 1) // slice_len + 1
pv(f'{n_loops} loops', verbose)
timer.start(False)
for i in range(n_loops):
o[slice(start,start + slice_len)]
if verbose and (i+1) % 10 == 0: print(f'{i+1:4} elapsed time: {timer.elapsed()}')
start += slice_len
pv(f'{i+1:4} total time : {timer.stop()}\n', verbose)
memmap2cache = cache_data
cache_memmap = cache_data
# -
#export
def get_func_defaults(f):
fa = inspect.getfullargspec(f)
if fa.defaults is None: return dict(zip(fa.args, [''] * (len(fa.args))))
else: return dict(zip(fa.args, [''] * (len(fa.args) - len(fa.defaults)) + list(fa.defaults)))
#export
def get_idx_from_df_col_vals(df, col, val_list):
return [df[df[col] == val].index[0] for val in val_list]
#export
def get_sublist_idxs(aList, bList):
"Get idxs that when applied to aList will return bList. aList must contain all values in bList"
sorted_aList = aList[np.argsort(aList)]
return np.argsort(aList)[np.searchsorted(sorted_aList, bList)]
x = np.array([3, 5, 7, 1, 9, 8, 6, 2])
y = np.array([6, 1, 5, 7])
idx = get_sublist_idxs(x, y)
test_eq(x[idx], y)
x = np.array([3, 5, 7, 1, 9, 8, 6, 6, 2])
y = np.array([6, 1, 5, 7, 5])
idx = get_sublist_idxs(x, y)
test_eq(x[idx], y)
# +
#export
def flatten_list(l):
return [item for sublist in l for item in sublist]
# -
#export
def display_pd_df(df, max_rows:Union[bool, int]=False, max_columns:Union[bool, int]=False):
if max_rows:
old_max_rows = pd.get_option('display.max_rows')
if max_rows is not True and isinstance(max_rows, Integral): pd.set_option('display.max_rows', max_rows)
else: pd.set_option('display.max_rows', df.shape[0])
if max_columns:
old_max_columns = pd.get_option('display.max_columns')
if max_columns is not True and isinstance(max_columns, Integral): pd.set_option('display.max_columns', max_columns)
else: pd.set_option('display.max_columns', df.shape[1])
display(df)
if max_rows: pd.set_option('display.max_rows', old_max_rows)
if max_columns: pd.set_option('display.max_columns', old_max_columns)
old_max_rows, old_max_columns = pd.get_option('display.max_rows'), pd.get_option('display.max_columns')
df = pd.DataFrame(np.random.rand(70, 25))
display_pd_df(df, max_rows=2, max_columns=3)
test_eq(old_max_rows, pd.get_option('display.max_rows'))
test_eq(old_max_columns, pd.get_option('display.max_columns'))
# +
#export
def ttest(data1, data2, equal_var=False):
"Calculates t-statistic and p-value based on 2 sample distributions"
t_stat, p_value = scipy.stats.ttest_ind(data1, data2, equal_var=equal_var)
return t_stat, np.sign(t_stat) * p_value
def tscore(o):
if o.std() == 0: return 0
else: return np.sqrt(len(o)) * o.mean() / o.std()
# -
a = np.random.normal(0.5, 1, 100)
b = np.random.normal(0.15, .5, 50)
plt.hist(a, 50)
plt.hist(b, 50)
plt.show()
ttest(a,b)
a = np.random.normal(0.5, 1, 100)
t = torch.normal(0.5, 1, (100, ))
tscore(a), tscore(t)
#export
def ttest_tensor(a, b):
"differentiable pytorch function equivalent to scipy.stats.ttest_ind with equal_var=False"
# calculate standard errors
se1, se2 = torch.std(a)/np.sqrt(len(a)), torch.std(b)/np.sqrt(len(b))
# standard error on the difference between the samples
sed = torch.sqrt(se1**2.0 + se2**2.0)
# calculate the t statistic
t_stat = (torch.mean(a) - torch.mean(b)) / sed
return t_stat
a = torch.rand(100).requires_grad_(True) + .1
b = torch.rand(100).requires_grad_(True)
ttest_tensor(a, b)
# +
#export
from scipy.stats import pearsonr, spearmanr
def pcc(a, b):
return pearsonr(a, b)[0]
def scc(a, b):
return spearmanr(a, b)[0]
a = np.random.normal(0.5, 1, 100)
b = np.random.normal(0.15, .5, 100)
pcc(a, b), scc(a, b)
# -
#export
def remove_fn(fn, verbose=False):
"Removes a file (fn) if exists"
try:
os.remove(fn)
pv(f'{fn} file removed', verbose)
except OSError:
pv(f'{fn} does not exist', verbose)
pass
# +
#export
def npsave(array_fn, array, verbose=True):
remove_fn(array_fn, verbose)
pv(f'saving {array_fn}...', verbose)
np.save(array_fn, array)
pv(f'...{array_fn} saved', verbose)
np_save = npsave
# -
fn = 'data/remove_fn_test.npy'
a = np.zeros(1)
npsave(fn, a)
del a
np.load(fn, mmap_mode='r+')
remove_fn(fn, True)
remove_fn(fn, True)
#export
def permute_2D(array, axis=None):
"Permute rows or columns in an array. This can be used, for example, in feature permutation"
if axis == 0: return array[np.random.randn(*array.shape).argsort(axis=0), np.arange(array.shape[-1])[None, :]]
elif axis == 1 or axis == -1: return array[np.arange(len(array))[:,None], np.random.randn(*array.shape).argsort(axis=1)]
return array[np.random.randn(*array.shape).argsort(axis=0), np.random.randn(*array.shape).argsort(axis=1)]
s = np.arange(100 * 50).reshape(100, 50)
test_eq(permute_2D(s, axis=0).mean(0), s.mean(0))
test_ne(permute_2D(s, axis=0), s)
test_eq(permute_2D(s, axis=1).mean(1), s.mean(1))
test_ne(permute_2D(s, axis=1), s)
test_ne(permute_2D(s), s)
# +
#export
def random_normal():
"Returns a number between -1 and 1 with a normal distribution"
while True:
o = np.random.normal(loc=0., scale=1/3)
if abs(o) <= 1: break
return o
def random_half_normal():
"Returns a number between 0 and 1 with a half-normal distribution"
while True:
o = abs(np.random.normal(loc=0., scale=1/3))
if o <= 1: break
return o
def random_normal_tensor(shape=1, device=None):
"Returns a tensor of a predefined shape between -1 and 1 with a normal distribution"
return torch.empty(shape, device=device).normal_(mean=0, std=1/3).clamp_(-1, 1)
def random_half_normal_tensor(shape=1, device=None):
"Returns a tensor of a predefined shape between 0 and 1 with a half-normal distribution"
return abs(torch.empty(shape, device=device).normal_(mean=0, std=1/3)).clamp_(0, 1)
# +
#export
from matplotlib.backends.backend_agg import FigureCanvasAgg
def default_dpi():
DPI = plt.gcf().get_dpi()
plt.close()
return int(DPI)
def get_plot_fig(size=None, dpi=default_dpi()):
fig = plt.figure(figsize=(size / dpi, size / dpi), dpi=dpi, frameon=False) if size else plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
config = plt.gcf()
plt.close('all')
return config
def fig2buf(fig):
canvas = FigureCanvasAgg(fig)
fig.canvas.draw()
return np.asarray(canvas.buffer_rgba())[..., :3]
# -
default_dpi()
#export
def plot_scatter(x, y, deg=1):
linreg = sp.stats.linregress(x, y)
plt.scatter(x, y, label=f'R2:{linreg.rvalue:.2f}', color='lime', edgecolor='black', alpha=.5)
plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, deg))(np.unique(x)), color='r')
plt.legend(loc='best')
plt.show()
a = np.random.rand(100)
b = np.random.rand(100)**2
plot_scatter(a, b)
#export
def get_idxs(o, aList): return array([o.tolist().index(v) for v in aList])
a = random_shuffle(np.arange(100, 200))
b = np.random.choice(a, 10, False)
idxs = get_idxs(a, b)
test_eq(a[idxs], b)
# export
def apply_cmap(o, cmap):
o = toarray(o)
out = plt.get_cmap(cmap)(o)[..., :3]
out = tensor(out).squeeze(1)
return out.permute(0, 3, 1, 2)
# +
a = np.random.rand(16, 1, 40, 50)
s = L(a.shape)
s[1] = 3
test_eq(L(apply_cmap(a, 'viridis').shape), s)
s[0] = 1
a = np.random.rand(1, 40, 50)
test_eq(L(apply_cmap(a, 'viridis').shape), s)
# -
#export
def torch_tile(a, n_tile, dim=0):
init_dim = a.size(dim)
repeat_idx = [1] * a.dim()
repeat_idx[dim] = n_tile
a = a.repeat(*(repeat_idx))
order_index = torch.cat([init_dim * torch.arange(n_tile) + i for i in range(init_dim)]).to(device=a.device)
return torch.index_select(a, dim, order_index)
test_eq(torch_tile(torch.arange(2), 3), tensor([0, 0, 0, 1, 1, 1]))
#export
def to_tsfresh_df(ts):
r"""Prepares a time series (Tensor/ np.ndarray) to be used as a tsfresh dataset to allow feature extraction"""
ts = to3d(ts)
if isinstance(ts, np.ndarray):
ids = np.repeat(np.arange(len(ts)), ts.shape[-1]).reshape(-1,1)
joint_ts = ts.transpose(0,2,1).reshape(-1, ts.shape[1])
cols = ['id'] + np.arange(ts.shape[1]).tolist()
df = pd.DataFrame(np.concatenate([ids, joint_ts], axis=1), columns=cols)
elif isinstance(ts, torch.Tensor):
ids = torch_tile(torch.arange(len(ts)), ts.shape[-1]).reshape(-1,1)
joint_ts = ts.transpose(1,2).reshape(-1, ts.shape[1])
cols = ['id']+np.arange(ts.shape[1]).tolist()
df = pd.DataFrame(torch.cat([ids, joint_ts], dim=1).numpy(), columns=cols)
df['id'] = df['id'].astype(int)
df.reset_index(drop=True, inplace=True)
return df
ts = torch.rand(16, 3, 20)
a = to_tsfresh_df(ts)
ts = ts.numpy()
b = to_tsfresh_df(ts)
# +
#export
from scipy.stats import skew, kurtosis
def pcorr(a, b):
return scipy.stats.pearsonr(a, b)
def scorr(a, b):
corr = scipy.stats.spearmanr(a, b)
return corr[0], corr[1]
# -
#export
def torch_diff(t, lag=1, pad=True):
import torch.nn.functional as F
diff = t[..., lag:] - t[..., :-lag]
if pad: return F.pad(diff, (lag,0))
else: return diff
t = torch.arange(24).reshape(2,3,4)
test_eq(torch_diff(t, 1)[..., 1:].float().mean(), 1.)
test_eq(torch_diff(t, 2)[..., 2:].float().mean(), 2.)
# +
#export
def get_outliers_IQR(o, axis=None):
tt = False
if isinstance(o, torch.Tensor):
tt = True
device = o.device
tdtype = o.dtype
o = o.detach().cpu().numpy()
Q1 = np.nanpercentile(o, 25, axis=axis, keepdims=axis is not None)
Q3 = np.nanpercentile(o, 75, axis=axis, keepdims=axis is not None)
IQR = Q3 - Q1
if tt:
Q1 = torch.tensor(Q1, dtype=tdtype, device=device)
Q3 = torch.tensor(Q3, dtype=tdtype, device=device)
IQR = torch.tensor(IQR, dtype=tdtype, device=device)
return Q1 - 1.5 * IQR, Q3 + 1.5 * IQR
def clip_outliers(o, axis=None):
min_outliers, max_outliers = get_outliers_IQR(o, axis=axis)
if isinstance(o, (np.ndarray, pd.core.series.Series)):
return np.clip(o, min_outliers, max_outliers)
elif isinstance(o, torch.Tensor):
return torch.clamp(o, min_outliers, max_outliers)
def get_percentile(o, percentile, axis=None):
if isinstance(o, torch.Tensor): o = o.detach().cpu().numpy()
return np.nanpercentile(o, percentile, axis=axis, keepdims=axis is not None)
def torch_clamp(o, min=None, max=None):
r"""Clamp torch.Tensor using 1 or multiple dimensions"""
if min is not None: o = torch.max(o, min)
if max is not None: o = torch.min(o, max)
return o
# -
t = torch.randn(2,3,100)
test_eq(type(get_outliers_IQR(t, -1)[0]), torch.Tensor)
a = np.random.randn(2,3,100)
test_eq(type(get_outliers_IQR(a, -1)[0]), np.ndarray)
#export
def torch_slice_by_dim(t, index, dim=-1, **kwargs):
if not isinstance(index, torch.Tensor): index = torch.Tensor(index)
assert t.ndim == index.ndim, "t and index must have the same ndim"
index = index.long()
return torch.gather(t, dim, index, **kwargs)
t = torch.rand(5, 3)
index = torch.randint(0, 3, (5, 1))
# index = [[0, 2], [0, 1], [1, 2], [0, 2], [0, 1]]
torch_slice_by_dim(t, index)
# +
#export
def torch_nanmean(o, dim=None, keepdim=False):
"""There's currently no torch.nanmean function"""
mask = torch.isnan(o)
if mask.any():
output = torch.from_numpy(np.asarray(np.nanmean(o.cpu().numpy(), axis=dim, keepdims=keepdim))).to(o.device)
if output.shape == mask.shape:
output[mask] = 0
return output
else:
return torch.mean(o, dim=dim, keepdim=keepdim) if dim is not None else torch.mean(o)
def torch_nanstd(o, dim=None, keepdim=False):
"""There's currently no torch.nanstd function"""
mask = torch.isnan(o)
if mask.any():
output = torch.from_numpy(np.asarray(np.nanstd(o.cpu().numpy(), axis=dim, keepdims=keepdim))).to(o.device)
if output.shape == mask.shape:
output[mask] = 1
return output
else:
return torch.std(o, dim=dim, keepdim=keepdim) if dim is not None else torch.std(o)
# -
t = torch.rand(1000)
t[:100] = float('nan')
assert torch_nanmean(t).item() > 0
#export
def concat(*ls, dim=0):
"Concatenate tensors, arrays, lists, or tuples by a dimension"
if not len(ls): return []
it = ls[0]
if isinstance(it, torch.Tensor): return torch.cat(ls, dim=dim)
elif isinstance(it, np.ndarray): return np.concatenate(ls, axis=dim)
else:
res = np.concatenate(ls, axis=dim).tolist()
return retain_type(res, typ=type(it))
#export
def reduce_memory_usage(df):
start_memory = df.memory_usage().sum() / 1024**2
print(f"Memory usage of dataframe is {start_memory} MB")
for col in df.columns:
col_type = df[col].dtype
if col_type != 'object':
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
pass
else:
df[col] = df[col].astype('category')
end_memory = df.memory_usage().sum() / 1024**2
print(f"Memory usage of dataframe after reduction {end_memory} MB")
print(f"Reduced by {100 * (start_memory - end_memory) / start_memory} % ")
return df
# export
def cls_name(o): return o.__class__.__name__
test_eq(cls_name(timer), 'Timer')
# +
#export
def roll2d(o, roll1: Union[None, list, int] = None, roll2: Union[None, list, int] = None):
"""Rolls a 2D object on the indicated axis
This solution is based on https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently
"""
assert o.ndim == 2, "roll2D can only be applied to 2d objects"
axis1, axis2 = np.ogrid[:o.shape[0], :o.shape[1]]
if roll1 is not None:
if isinstance(roll1, int): axis1 = axis1 - np.array(roll1).reshape(1,1)
else: axis1 = np.array(roll1).reshape(o.shape[0],1)
if roll2:
if isinstance(roll2, int): axis2 = axis2 - np.array(roll2).reshape(1,1)
else: axis2 = np.array(roll2).reshape(1,o.shape[1])
return o[axis1, axis2]
def roll3d(o, roll1: Union[None, list, int] = None, roll2: Union[None, list, int] = None, roll3: Union[None, list, int] = None):
"""Rolls a 3D object on the indicated axis
This solution is based on https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently
"""
assert o.ndim == 3, "roll3D can only be applied to 3d objects"
axis1, axis2, axis3 = np.ogrid[:o.shape[0], :o.shape[1], :o.shape[2]]
if roll1 is not None:
if isinstance(roll1, int): axis1 = axis1 - np.array(roll1).reshape(1,1,1)
else: axis1 = np.array(roll1).reshape(o.shape[0],1,1)
if roll2:
if isinstance(roll2, int): axis2 = axis2 - np.array(roll2).reshape(1,1,1)
else: axis2 = np.array(roll2).reshape(1,o.shape[1],1)
if roll3:
if isinstance(roll3, int): axis3 = axis3 - np.array(roll3).reshape(1,1,1)
else: axis3 = np.array(roll3).reshape(1,1,o.shape[2])
return o[axis1, axis2, axis3]
def random_roll2d(o, axis=()):
"""Rolls a 2D object on the indicated axis
This solution is based on https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently
"""
assert o.ndim == 2, "roll2D can only be applied to 2d objects"
axis1, axis2 = np.ogrid[:o.shape[0], :o.shape[1]]
if 0 in axis:
axis1 = np.random.choice(np.arange(o.shape[0]), o.shape[0], replace).reshape(-1, 1)
if 1 in axis:
axis2 = np.random.choice(np.arange(o.shape[1]), o.shape[1], replace).reshape(1, -1)
return o[axis1, axis2]
def random_roll3d(o, axis=(), replace=False):
"""Randomly rolls a 3D object along the indicated axes
This solution is based on https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently
"""
assert o.ndim == 3, "random_roll3d can only be applied to 3d objects"
axis1, axis2, axis3 = np.ogrid[:o.shape[0], :o.shape[1], :o.shape[2]]
if 0 in axis:
axis1 = np.random.choice(np.arange(o.shape[0]), o.shape[0], replace).reshape(-1, 1, 1)
if 1 in axis:
axis2 = np.random.choice(np.arange(o.shape[1]), o.shape[1], replace).reshape(1, -1, 1)
if 2 in axis:
axis3 = np.random.choice(np.arange(o.shape[2]), o.shape[2], replace).reshape(1, 1, -1)
return o[axis1, axis2, axis3]
def rotate_axis0(o, steps=1):
return o[np.arange(o.shape[0]) - steps]
def rotate_axis1(o, steps=1):
return o[:, np.arange(o.shape[1]) - steps]
def rotate_axis2(o, steps=1):
return o[:, :, np.arange(o.shape[2]) - steps]
# -
a = np.tile(np.arange(10), 3).reshape(3, 10) * np.array([1, 10, 100]).reshape(-1, 1)
a
roll2d(a, roll1=[2, 1, 0])
roll2d(a, roll2=3)
o = torch.arange(24).reshape(2,3,4)
test_eq(rotate_axis0(o)[1], o[0])
test_eq(rotate_axis1(o)[:,1], o[:,0])
test_eq(rotate_axis2(o)[...,1], o[...,0])
# +
#export
def chunks_calculator(shape, dtype='float32', n_bytes=1024**3):
"""Function to calculate chunks for a given size of n_bytes (default = 1024**3 == 1GB).
It guarantees > 50% of the chunk will be filled"""
X = np.random.rand(1, *shape[1:]).astype(dtype)
byts = get_size(X)
n = n_bytes // byts
if shape[0] / n <= 1: return False
remainder = shape[0] % n
if remainder / n < .5:
n_chunks = shape[0] // n
n += np.ceil(remainder / n_chunks).astype(int)
return (n, -1, -1)
# +
shape = (1_000, 10, 1000)
dtype = 'float32'
test_eq(chunks_calculator(shape, dtype), False)
shape = (54684, 10, 1000)
dtype = 'float32'
test_eq(chunks_calculator(shape, dtype), (27342, -1, -1))
# +
#export
def create_array(shape, fname=None, path='./data', on_disk=True, dtype='float32', mode='r+', fill_value='rand', chunksize='auto', verbose=True, **kwargs):
"""
mode:
‘r’: Open existing file for reading only.
‘r+’: Open existing file for reading and writing.
‘w+’: Create or overwrite existing file for reading and writing.
‘c’: Copy-on-write: assignments affect data in memory, but changes are not saved to disk. The file on disk is read-only.
fill_value: 'rand' (for random numbers), int or float
chunksize = 'auto' to calculate chunks of 1GB, or any integer (for a given number of samples)
"""
if on_disk:
assert fname is not None, 'you must provide a fname (filename)'
path = Path(path)
if not fname.endswith('npy'): fname = f'{fname}.npy'
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
# Save a small empty array
_temp_fn = path/'temp_X.npy'
np.save(_temp_fn, np.empty(0))
# Create & save file
arr = np.memmap(_temp_fn, dtype=dtype, mode='w+', shape=shape, **kwargs)
np.save(filename, arr)
del arr
os.remove(_temp_fn)
# Open file in selected mode
arr = np.load(filename, mmap_mode=mode)
else:
arr = np.empty(shape, dtype=dtype, **kwargs)
if fill_value != 0:
if isinstance(fill_value, Integral):
arr[:] = fill_value
elif fill_value == "rand":
if chunksize == "auto":
chunksize = chunks_calculator(shape, dtype)
chunksize = len(arr) if not chunksize else chunksize[0]
for i in progress_bar(range((len(arr) - 1) // chunksize + 1), display=verbose, leave=False):
start, end = i * chunksize, min(len(arr), (i + 1) * chunksize)
if start >= len(arr): break
arr[start:end] = np.random.rand(end - start, *shape[1:])
return arr
create_empty_array = partial(create_array, fill_value=0)
# -
fname = 'X_on_disk'
shape = (100, 10, 10)
X = create_array(shape, fname, on_disk=True, mode='r+')
test_ne(abs(X).sum(), 0)
os.remove(X.filename)
del X
# +
fname = 'X_on_disk'
shape = (100, 10, 10)
X = create_empty_array(shape, fname, on_disk=True, mode='r+')
test_eq(abs(X).sum(), 0)
chunksize = 10
pbar = progress_bar(range(math.ceil(len(X) / chunksize)), leave=False)
start = 0
for i in pbar:
end = min(start + chunksize, len(X))
partial_data = np.random.rand(end - start, X.shape[1] , X.shape[2])
X[start:end] = partial_data
start = end
del partial_data
gc.collect()
filename = X.filename
del X
X = np.load(filename, mmap_mode='r+')
test_eq((X == 0).sum(), 0)
test_eq(X.shape, shape)
os.remove(X.filename)
del X
# +
# export
import gzip
def np_save_compressed(arr, fname=None, path='./data', verbose=False, **kwargs):
assert fname is not None, 'you must provide a fname (filename)'
if fname.endswith('npy'): fname = f'{fname}.gz'
elif not fname.endswith('npy.gz'): fname = f'{fname}.npy.gz'
filename = Path(path)/fname
filename.parent.mkdir(parents=True, exist_ok=True)
f = gzip.GzipFile(filename, 'w', **kwargs)
np.save(file=f, arr=arr)
f.close()
pv(f'array saved to {filename}', verbose)
def np_load_compressed(fname=None, path='./data', **kwargs):
assert fname is not None, 'you must provide a fname (filename)'
if fname.endswith('npy'): fname = f'{fname}.gz'
elif not fname.endswith('npy.gz'): fname = f'{fname}.npy.gz'
filename = Path(path)/fname
f = gzip.GzipFile(filename, 'r', **kwargs)
arr = np.load(f)
f.close()
return arr
# -
X1 = np.random.rand(10)
np_save_compressed(X1, 'X_comp', path='./data')
X2 = np_load_compressed('X_comp')
test_eq(X1, X2)
# export
def np2memmap(arr, fname=None, path='./data', dtype='float32', mode='c', **kwargs):
""" Function that turns an ndarray into a memmap ndarray
mode:
‘r’: Open existing file for reading only.
‘r+’: Open existing file for reading and writing.
‘w+’: Create or overwrite existing file for reading and writing.
‘c’: Copy-on-write: assignments affect data in memory, but changes are not saved to disk. The file on disk is read-only.
"""
assert fname is not None, 'you must provide a fname (filename)'
if not fname.endswith('npy'): fname = f'{fname}.npy'
filename = Path(path)/fname
filename.parent.mkdir(parents=True, exist_ok=True)
# Save file
np.save(filename, arr)
# Open file in selected mode
arr = np.load(filename, mmap_mode=mode)
return arr
X1 = np.random.rand(10)
X2 = np2memmap(X1, 'X1_test')
test_eq(X1, X2)
test_ne(type(X1), type(X2))
# +
# export
def torch_mean_groupby(o, idxs):
"""Computes torch mean along axis 0 grouped by the idxs.
Need to ensure that idxs have the same order as o"""
if is_listy(idxs[0]): idxs = flatten_list(idxs)
flattened_idxs = torch.tensor(idxs)
idxs, vals = torch.unique(flattened_idxs, return_counts=True)
vs = torch.split_with_sizes(o, tuple(vals))
return torch.cat([v.mean(0).unsqueeze(0) for k,v in zip(idxs, vs)])
# -
o = torch.arange(6*2*3).reshape(6, 2, 3).float()
idxs = np.array([[0,1,2,3], [2,3]], dtype=object)
output = torch_mean_groupby(o, idxs)
test_eq(o[:2], output[:2])
test_eq(o[2:4].mean(0), output[2])
test_eq(o[4:6].mean(0), output[3])
# export
def torch_flip(t, dims=-1):
if dims == -1: return t[..., np.arange(t.shape[dims])[::-1].copy()]
elif dims == 0: return t[np.arange(t.shape[dims])[::-1].copy()]
elif dims == 1: return t[:, np.arange(t.shape[dims])[::-1].copy()]
elif dims == 2: return t[:, :, np.arange(t.shape[dims])[::-1].copy()]
t = torch.randn(2, 3, 4)
test_eq(torch.flip(t, (2,)), torch_flip(t, dims=-1))
# +
# export
def torch_nan_to_num(o, num=0, inplace=False):
mask = torch.isnan(o)
return torch_masked_to_num(o, mask, num=num, inplace=inplace)
def torch_masked_to_num(o, mask, num=0, inplace=False):
if inplace:
o[:] = o.masked_fill(mask, num)
else:
return o.masked_fill(mask, num)
# -
x = torch.rand(2, 4, 6)
x[:, :3][x[:, :3] < .5] = np.nan
nan_values = torch.isnan(x).sum()
y = torch_nan_to_num(x[:, :3], inplace=False)
test_eq(torch.isnan(y).sum(), 0)
test_eq(torch.isnan(x).sum(), nan_values)
torch_nan_to_num(x[:, :3], inplace=True)
test_eq(torch.isnan(x).sum(), 0)
x = torch.rand(2, 4, 6)
mask = x[:, :3] > .5
x[:, :3] = torch_masked_to_num(x[:, :3], mask, num=0, inplace=False)
test_eq(x[:, :3][mask].sum(), 0)
x = torch.rand(2, 4, 6)
mask = x[:, :3] > .5
torch_masked_to_num(x[:, :3], mask, num=0, inplace=True)
test_eq(x[:, :3][mask].sum(), 0)
# +
# export
def mpl_trend(x, y, deg=1):
return np.poly1d(np.polyfit(x, y, deg))(x)
# -
x = np.sort(np.random.randint(0, 100, 100)/10)
y = np.random.rand(100) + np.linspace(0, 10, 100)
trend = mpl_trend(x, y)
plt.scatter(x, y)
plt.plot(x, trend, 'r')
plt.show()
# +
# export
def int2digits(o, n_digits=None, normalize=True):
if n_digits is not None:
iterable = '0' * (n_digits - len(str(abs(o)))) + str(abs(o))
else:
iterable = str(abs(o))
sign = np.sign(o)
digits = np.array([sign * int(d) for d in iterable])
if normalize:
digits = digits / 10
return digits
def array2digits(o, n_digits=None, normalize=True):
output = np.array(list(map(partial(int2digits, n_digits=n_digits), o)))
if normalize:
output = output / 10
return output
# +
o = -9645
test_eq(int2digits(o, 6), np.array([ 0, 0, -.9, -.6, -.4, -.5]))
a = np.random.randint(-1000, 1000, 10)
test_eq(array2digits(a,5).shape, (10,5))
# +
# export
def sincos_encoding(seq_len, device=None, to_np=False):
if to_np:
sin = np.sin(np.arange(seq_len) / seq_len * 2 * np.pi)
cos = np.cos(np.arange(seq_len) / seq_len * 2 * np.pi)
else:
device = default_device()
sin = torch.sin(torch.arange(seq_len, device=device) / seq_len * 2 * np.pi)
cos = torch.cos(torch.arange(seq_len, device=device) / seq_len * 2 * np.pi)
return sin, cos
# -
sin, cos = sincos_encoding(100)
plt.plot(sin.cpu().numpy())
plt.plot(cos.cpu().numpy())
plt.show()
# +
# export
def linear_encoding(seq_len, device=None, to_np=False, lin_range=(-1,1)):
if to_np:
enc = np.linspace(lin_range[0], lin_range[1], seq_len)
else:
device = default_device()
enc = torch.linspace(lin_range[0], lin_range[1], seq_len, device=device)
return enc
# -
lin = linear_encoding(100)
plt.plot(lin.cpu().numpy())
plt.show()
# +
# export
def encode_positions(pos_arr, min_val=None, max_val=None, linear=False, lin_range=(-1,1)):
""" Encodes an array with positions using a linear or sincos methods
"""
if min_val is None:
min_val = np.nanmin(pos_arr)
if max_val is None:
max_val = np.nanmax(pos_arr)
if linear:
return (((pos_arr - min_val)/(max_val - min_val)) * (lin_range[1] - lin_range[0]) + lin_range[0])
else:
sin = np.sin((pos_arr - min_val)/(max_val - min_val) * 2 * np.pi)
cos = np.cos((pos_arr - min_val)/(max_val - min_val) * 2 * np.pi)
return sin, cos
# -
n_samples = 10
length = 500
_a = []
for i in range(n_samples):
a = np.arange(-4000, 4000, 10)
mask = np.random.rand(len(a)) > .5
a = a[mask]
a = np.concatenate([a, np.array([np.nan] * (length - len(a)))])
_a.append(a.reshape(-1,1))
a = np.concatenate(_a, -1).transpose(1,0)
sin, cos = encode_positions(a, linear=False)
test_eq(a.shape, (n_samples, length))
test_eq(sin.shape, (n_samples, length))
test_eq(cos.shape, (n_samples, length))
plt.plot(sin.T)
plt.plot(cos.T)
plt.xlim(0, 500)
plt.show()
n_samples = 10
length = 500
_a = []
for i in range(n_samples):
a = np.arange(-4000, 4000, 10)
mask = np.random.rand(len(a)) > .5
a = a[mask]
a = np.concatenate([a, np.array([np.nan] * (length - len(a)))])
_a.append(a.reshape(-1,1))
a = np.concatenate(_a, -1).transpose(1,0)
lin = encode_positions(a, linear=True)
test_eq(a.shape, (n_samples, length))
test_eq(lin.shape, (n_samples, length))
plt.plot(lin.T)
plt.xlim(0, 500)
plt.show()
# +
# export
def sort_generator(generator, bs):
g = list(generator)
for i in range(len(g)//bs + 1): g[bs*i:bs*(i+1)] = np.sort(g[bs*i:bs*(i+1)])
return (i for i in g)
# -
generator = (i for i in np.random.permutation(np.arange(1000000)).tolist())
l = list(sort_generator(generator, 512))
test_eq(l[:512], sorted(l[:512]))
# +
#export
def get_subset_dict(d, keys):
return dict((k,d[k]) for k in listify(keys) if k in d)
# -
keys = string.ascii_lowercase
values = np.arange(len(keys))
d = {k:v for k,v in zip(keys,values)}
test_eq(get_subset_dict(d, ['a', 'k', 'j', 'e']), {'a': 0, 'k': 10, 'j': 9, 'e': 4})
# %%file mod_dev.py
a = 5
def sum(b): return a + b
# +
# %%file mod_dev2.py
from fastcore.script import *
from tsai.imports import *
@call_parse
def add(
path: Param('path to A.', str)='',
b: Param('Integer.', int)=0,
):
mod_A = import_file_as_module(path)
output = mod_A.sum(b)
print(output)
return output
# -
from mod_dev2 import *
test_eq(add('mod_dev.py', 3), 8)
# r = !python mod_dev2.py --path "mod_dev.py" --b 3
test_eq(int(r[0]), 8)
if os.path.exists("mod_dev.py"): os.remove("mod_dev.py")
if os.path.exists("mod_dev2.py"): os.remove("mod_dev2.py")
# +
#export
def is_memory_shared(a, b):
r"""Test function to check if 2 array-like object share memory.
Be careful because it changes their values!!!)"""
try:
a[:] = 1
except:
try:
b[:] = 1
except:
print('unknown')
return
return torch.equal(tensor(a), tensor(b))
# -
a = np.random.rand(2,3,4)
t1 = torch.from_numpy(a)
test_eq(is_memory_shared(a, t1), True)
a = np.random.rand(2,3,4)
t2 = torch.as_tensor(a)
test_eq(is_memory_shared(a, t2), True)
a = np.random.rand(2,3,4)
t3 = torch.tensor(a)
test_eq(is_memory_shared(a, t3), False)
#hide
out = create_scripts(); beep(out)
|
nbs/000_utils.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Other-Methods" data-toc-modified-id="Other-Methods-1">Other Methods</a></span><ul class="toc-item"><li><span><a href="#CCA,-PLS-and-PCA" data-toc-modified-id="CCA,-PLS-and-PCA-1.1">CCA, PLS and PCA</a></span></li><li><span><a href="#Applications-to-Network-Compression-and-Neuron-Ablations" data-toc-modified-id="Applications-to-Network-Compression-and-Neuron-Ablations-1.2">Applications to Network Compression and Neuron Ablations</a></span></li><li><span><a href="#Projections-with-CCA" data-toc-modified-id="Projections-with-CCA-1.3">Projections with CCA</a></span></li><li><span><a href="#Projections-with-PLS" data-toc-modified-id="Projections-with-PLS-1.4">Projections with PLS</a></span></li><li><span><a href="#Projection-with-PCA" data-toc-modified-id="Projection-with-PCA-1.5">Projection with PCA</a></span></li><li><span><a href="#Using-CCA,-PLS,-PCA-for-Network-Compression" data-toc-modified-id="Using-CCA,-PLS,-PCA-for-Network-Compression-1.6">Using CCA, PLS, PCA for Network Compression</a></span></li><li><span><a href="#Conv-Layers" data-toc-modified-id="Conv-Layers-1.7">Conv Layers</a></span></li></ul></li></ul></div>
# -
# ## Other Methods
#
# This tutorial looks at other methods like CCA, and applications to Neuron Ablations and Network Compression.
# +
import os, sys
from matplotlib import pyplot as plt
# %matplotlib inline
import numpy as np
import pickle
import pandas
import gzip
sys.path.append("..")
import cca_core
import numpy_pls
import numpy_pca
# -
def _plot_helper(arr, xlabel, ylabel):
plt.plot(arr, lw=2.0)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid()
# ### CCA, PLS and PCA
# Mathematically, CCA is trying to optimize the whitened cross correlation matrix:
# 
# What this means is that CCA is also _invariant to scaling_: if some neurons have much higher magnitude than others, they will not be given greater importance. This can be useful when comparing across different networks with different scalings, but magnitude can be an important signal also.
#
# **_Partial Least Squares (PLS)_** is similar to CCA but optimizes for maximal _cross covariance_, i.e. taking magnitude into account. Mathematically, is a little like the above equation, but without the denominator term:
# 
# This makes PLS even easier to implement than CCA. We've included numpy implementations of both PLS and PCA, in `numpy_pls.py` and `numpy_pca.py` in the repository.
#
# We can think of PCA, PLS and CCA as using different amounts of _correlation_ and _magnitude_ information to extract representations.This [survey](http://www.diva-portal.org/smash/get/diva2:288565/FULLTEXT01.pdf) has a nice overview of all of these methods.
# ### Applications to Network Compression and Neuron Ablations
#
# One place where all of these methods are useful to consider is applications to network compression, or relatedly, neuron ablation tests.
#
# There's extensive evidence that a trained neural network uses much fewer dimensions (neurons) than the total width of a layer. Normally, we map (fully connected) layers $l$ to $l+1$ by performing $z_lW_{l+1}$ -- multiplying the neurons $z$ at layer $l$ with the weights of layer $l+1$.
#
# CCA, PLS and PCA all give a way to perform a _low rank projection_ of these neurons $z_l$. Instead of computing $z_lW_{l+1}$, we compute $(P^TPz_l)W_{l+1}$, where $P$ is a projection matrix given to us by CCA, PLS or PCA.
#
# More concretely, suppose we have 500 neurons in layer $l$, so $z_l$ is 500 dimensional. P might have shape (50, 500), i.e. it projects the 500 neurons down to 50 dimensions. So $P^TPz_l$ lies in a 50 dimensional subspace.
#
# Below we'll overview how to perform projections with CCA, PCA and PLS. The code in fact already computes the matrix for us!
#
# ### Projections with CCA
#
# We'll work with our fully connected network on MNIST that we used in the Introduction. The network architecture has three hidden layers of width 500, so looks like:
#
# 784 (Input) --- 500 --- 500 --- 500 -- 10 (Output)
#
# Let's go ahead, load activations and apply CCA
# +
# Load up second hidden layer of MNIST networks and compare
with open("./model_activations/MNIST/model_0_lay02.p", "rb") as f:
acts1 = pickle.load(f)
with open("./model_activations/MNIST/model_1_lay02.p", "rb") as f:
acts2 = pickle.load(f)
print("activation shapes", acts1.shape, acts2.shape)
results = cca_core.get_cca_similarity(acts1, acts2, epsilon=1e-10, verbose=False)
cacts1 = acts1 - np.mean(acts1, axis=0)
cacts2 = acts2 - np.mean(acts2, axis=0)
print("Returned Keys")
print(results.keys())
# -
# The returned values have all we need to create our projection matrix P. Suppose we want to project `acts1`. The matrix in `results[full_coef_x]` is a 500 x 500 matrix (as acts1 is 500 dimensional), with rows corresponding to the top CCA directions.
#
# So `results[full_coeff_x][:k]` corresponds to the top k CCA directions.
#
# There's one caveat -- when we compute CCA, we have to change basis, and to change back and get our CCA directions from `acts1` we need to multiply by `results[full_invsqrt_xx]`. So `P = results[full_coeff_x][:k]` and `P.TP` is applied to `np.dot(results[full_invsqrt_xx], cacts1)`:
# +
# projection with CCA
cca_proj1 = np.dot(np.dot(results["full_coef_x"].T, np.dot(results["full_coef_x"], results["full_invsqrt_xx"])),
cacts1)
cca_proj2 = np.dot(np.dot(results["full_coef_y"].T, np.dot(results["full_coef_y"], results["full_invsqrt_yy"])),
cacts2)
# -
# Note that when we apply CCA, we set `epsilon > 0` to remove neurons with very small activation vectors. As a result, some of the returned CCA neuron coefficients are 0. This is why our return value has both `results["full_coef_x"]` and `results["coef_x"]`. The former contains all values (even for the 0 terms) and the latter only values for neurons with non-neglible activations, i.e., neurons in `results["x_idxs"]`.
#
# We can visualize one of the non-zero CCA directions as follows (note that the MNIST activations are saved with _datapoints ordered according to class_.
_plot_helper(cca_proj1[results["x_idxs"]][0], "Datapoint Idx", "CCA Dirn Value")
# Note that we're seeing the directions are sensitive to particular classes. The above is for layer 1 and we can do the same for layer 2:
_plot_helper(cca_proj2[results["y_idxs"]][0], "Datapoint Idx", "CCA Dirn Value")
# ### Projections with PLS
#
# We'll use the functions in `numpy_pls` to project with PLS:
pls_results = numpy_pls.get_pls_similarity(acts1, acts2)
print("Returned Keys")
print(pls_results.keys())
# Projecting with PLS is even easier -- as we didn't change basis, our projection matrix P (for acts1), is just
# `pls_results["neuron_coeffs1']`, the rows of which are the different pls directions. Because we've lost the denominator term, we can also keep the neurons with negligible activation vectors, they just won't contribute to the PLS directions.
#
# Let's see the PLS projection in action:
pls_proj1 = np.dot(pls_results["neuron_coeffs1"].T, np.dot(pls_results["neuron_coeffs1"], cacts1))
pls_proj2 = np.dot(pls_results["neuron_coeffs2"].T, np.dot(pls_results["neuron_coeffs2"], cacts2))
_plot_helper(pls_proj1[2], "Datapoint Idx", "PLS Dirn Value")
_plot_helper(pls_proj2[2], "Datapoint Idx", "PLS Dirn Value")
# ### Projection with PCA
# Projection with PCA is a familiar thing, and we've included a numpy implementation `numpy_pca` that lets you do this on a single layer:
pca_results = numpy_pca.get_pca(acts2)
print(pca_results.keys())
pca_proj2 = np.dot(pca_results["neuron_coefs"].T,
np.dot(pca_results["neuron_coefs"], cacts2))
_plot_helper(pca_proj2[0], "Datapoint Idx", "PLS Dirn Value")
# ### Using CCA, PLS, PCA for Network Compression
# The low rank decomposition helps reduce the number of parameters needed for storing network information. However, we need to assert that this decomposition is not adversely affecting the network performance.
#
# To do so, one would test the projection accuracy by (1) computing the projection matrix P (typically on the training data) (2) applying it to batches of test data like we've done (getting `pls_proj, cca_proj, pca_proj`) and then feeding these activations through the network to evaluate accuracy.
#
# In our case we found PLS to perform best, but there's much more exploration to be done!
#
# ### Conv Layers
# In this tutorial, we've concentrated on fully connected layers, but we can do a simliar projection on conv layers, this time applying the projection matrix to each channel separately.
|
tutorials/003_Other_Methods _Neuron_Ablations_and_Network_Compression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Time Series Visualizations
#
# This notebook demonstrates how to use Lets-Plot to investigate time series.
#
# The data is provided by [Kaggle](https://www.kaggle.com/sumanthvrao/daily-climate-time-series-data).
# +
import pandas as pd
from lets_plot import *
LetsPlot.setup_html()
# -
# ### Preparation
df = pd.read_csv("https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/delhi_climate.csv")
df = df.rename(columns={'meantemp': 'mean_temp', 'meanpressure': 'mean_pressure'})
df.date = pd.to_datetime(df.date)
df['day'] = df.date.dt.day
df['month'] = df.date.dt.month
df['year'] = df.date.dt.year
df['day_of_year'] = df.date.dt.dayofyear
df = df.drop(columns=['date'])
df = df[df.year < 2017]
# ### Boxplot: General Information
# +
p1 = ggplot() + \
geom_boxplot(aes(x='year', y='mean_temp', color='year', fill='year'), \
data=df, size=2, alpha=.5) + \
scale_x_discrete(name='year') + \
scale_color_discrete() + scale_fill_discrete() + \
ylab('mean temperature') + \
ggtitle('Mean Temperature Aggregated') + \
theme(legend_position='bottom', panel_grid='blank')
p2 = ggplot() + \
geom_boxplot(aes(x='month', y='mean_temp', color='year', fill='year'), \
data=df, size=.75, alpha=.5) + \
scale_color_discrete() + scale_fill_discrete() + \
facet_grid(x='year') + \
ggtitle('Mean Temperature by Month') + \
theme(legend_position='none', panel_grid='blank')
bunch = GGBunch()
bunch.add_plot(p1, 0, 0, 600, 300)
bunch.add_plot(p2, 0, 300, 600, 200)
bunch.show()
# -
# ### Year-to-Year Temperature Comparison
ggplot() + \
geom_line(aes(x='day', y='mean_temp', group='year', color='year'), data=df, size=2, \
tooltips=layer_tooltips().format('@mean_temp', '.2f')\
.line('mean temperature|@mean_temp')\
.line('date|@month/@day/@year')) + \
scale_x_discrete() + scale_color_discrete() + \
facet_grid(y='month') + \
ylab('month') + \
ggtitle('Mean Temperature for Each Month') + \
theme(legend_position='bottom')
# ### Most Common Temperature Values
ggplot() + \
geom_histogram(aes(x='mean_temp', group='year', color='year', fill='year'), \
data=df, bins=15, size=.5, alpha=.5, \
tooltips=layer_tooltips().line('count|@..count..')\
.format('@mean_temp', '.2f')\
.line('mean temperature|@mean_temp')\
.line('@|@month')\
.line('@|@year')) + \
scale_color_discrete() + scale_fill_discrete() + \
facet_grid(x='month', y='year') + \
xlab('month') + ylab('year') + \
ggtitle('Most Common Temperature') + \
ggsize(800, 400) + \
theme_classic() + theme(legend_position='bottom')
# +
int_mean_temp_df = df[['mean_temp', 'month', 'year']].copy()
int_mean_temp_df.mean_temp = int_mean_temp_df.mean_temp.astype(int)
ggplot() + \
geom_bin2d(aes(x='month', y='mean_temp', fill='mean_temp'), \
data=int_mean_temp_df, stat='identity', size=.5, color='white', alpha=.2,
tooltips=layer_tooltips().format('@mean_temp', '.2f')\
.line('mean temperature|@mean_temp')\
.line('@|@month')\
.line('@|@year')) + \
scale_fill_gradient(name='mean temperature', low='#abd9e9', high='#d7191c') + \
facet_grid(x='year') + \
xlab('month') + ylab('mean temperature') + \
ggtitle('Heatmap of Temperatures by Year') + \
ggsize(600, 300) + \
theme_classic() + theme(axis='blank', axis_title=element_text(), legend_position='bottom')
# -
# ### Observing Mean Temperature and Wind Speed Correlation
ggplot() + \
geom_point(aes(x='wind_speed', y='mean_temp', color='mean_temp', fill='mean_temp'), \
data=df, shape=21, size=3, alpha=.2) + \
scale_color_gradient(name='', low='#abd9e9', high='#d7191c') + \
scale_fill_gradient(name='', low='#abd9e9', high='#d7191c') + \
facet_grid(x='year') + \
xlab('wind speed') + ylab('mean temperature') + \
ggtitle('Relation Between Mean Temperature and Wind Speed') + \
ggsize(600, 200) + \
theme_classic()
# ### Observing Mean Temperature and Humidity Correlation
ggplot() + \
geom_point(aes(x='humidity', y='mean_temp', color='humidity', fill='humidity'), \
data=df, shape=21, size=3, alpha=.2) + \
scale_color_gradient(name='', low='#fdae61', high='#2c7bb6') + \
scale_fill_gradient(name='', low='#fdae61', high='#2c7bb6') + \
facet_grid(x='year') + \
ylab('mean temperature') + \
ggtitle('Relation Between Mean Temperature and Humidity') + \
ggsize(600, 200) + \
theme_classic()
# ### In Search of Correlation on Lag Scatter Plots
# +
df_shifted_by_day = df[['mean_temp', 'year']].copy()
df_shifted_by_day['mean_temp_shifted'] = df.mean_temp.shift(-1)
df_shifted_by_day = df_shifted_by_day.dropna()
p1 = ggplot() + \
geom_point(aes(x='mean_temp', y='mean_temp_shifted', color='mean_temp', fill='mean_temp'), \
data=df_shifted_by_day, shape=21, size=3, alpha=.2) + \
scale_color_gradient(name='', low='#abd9e9', high='#d7191c') + \
scale_fill_gradient(name='', low='#abd9e9', high='#d7191c') + \
facet_grid(x='year') + \
coord_fixed(ratio=1) + \
xlab('mean temperature') + ylab('shifted mean temperature') + \
ggtitle('One Day Lag Scatter Plot') + \
theme_classic()
df_shifted_by_month = df[['mean_temp', 'year']].copy()
df_shifted_by_month['mean_temp_shifted'] = df.mean_temp.shift(-30)
df_shifted_by_month = df_shifted_by_month.dropna()
p2 = ggplot() + \
geom_point(aes(x='mean_temp', y='mean_temp_shifted', color='mean_temp', fill='mean_temp'), \
data=df_shifted_by_month, shape=21, size=3, alpha=.2) + \
scale_color_gradient(name='', low='#abd9e9', high='#d7191c') + \
scale_fill_gradient(name='', low='#abd9e9', high='#d7191c') + \
facet_grid(x='year') + \
coord_fixed(ratio=1) + \
xlab('mean temperature') + ylab('shifted mean temperature') + \
ggtitle('One Month Lag Scatter Plot') + \
theme_classic()
df_shifted_by_year = df[['mean_temp', 'year']].copy()
df_shifted_by_year['mean_temp_shifted'] = df.mean_temp.shift(-365)
df_shifted_by_year = df_shifted_by_year.dropna()[:-1]
p3 = ggplot() + \
geom_point(aes(x='mean_temp', y='mean_temp_shifted', color='mean_temp', fill='mean_temp'), \
data=df_shifted_by_year, shape=21, size=3, alpha=.2) + \
scale_color_gradient(name='', low='#abd9e9', high='#d7191c') + \
scale_fill_gradient(name='', low='#abd9e9', high='#d7191c') + \
facet_grid(x='year') + \
coord_fixed(ratio=1) + \
xlab('mean temperature') + ylab('shifted mean temperature') + \
ggtitle('One Year Lag Scatter Plot') + \
theme_classic()
bunch = GGBunch()
bunch.add_plot(p1, 0, 0, 600, 200)
bunch.add_plot(p2, 0, 200, 600, 200)
bunch.add_plot(p3, 0, 400, 600, 200)
bunch.show()
# -
# ### Annual Path of Mean Temperature and Humidity
# +
mean_df = df.groupby(by=['year', 'month']).mean()[['mean_temp', 'humidity']].reset_index()
ggplot(mean_df) + \
geom_path(aes(x='humidity', y='mean_temp'), color='#99d8c9', size=1) + \
geom_point(aes(x='humidity', y='mean_temp', fill='month'), \
shape=21, size=3, color='#00441b',
tooltips=layer_tooltips().line('@|@month')\
.format('@humidity', '.2f')\
.line('@|@humidity')\
.format('@mean_temp', '.2f')\
.line('mean temperature|@mean_temp')) + \
scale_fill_gradient(name='', low='#e5f5f9', high='#2ca25f') + \
facet_grid(x='year') + \
ylab('mean temperature') + \
ggtitle('Annual Path of Mean Temperature and Humidity') + \
ggsize(600, 200) + \
theme_classic()
# -
# ### Autocorrelation Plots for Mean Temperature, Wind Speed and Humidity
# +
acf_df = pd.DataFrame([
(lag, df.mean_temp.autocorr(lag=lag), df.wind_speed.autocorr(lag=lag), df.humidity.autocorr(lag=lag))
for lag in range(365 * 3)
], columns=['lag', 'mean temperature acf', 'wind speed acf', 'humidity acf']).melt(
id_vars=['lag'],
value_vars=['mean temperature acf', 'wind speed acf', 'humidity acf'],
var_name='acf_type', value_name='acf_value'
)
ggplot() + \
geom_point(aes(x='lag', y='acf_value', color='acf_value'), data=acf_df, size=3) + \
scale_color_gradient(low='#fc8d59', high='#91cf60') + \
facet_grid(y='acf_type') + \
ylab('ACF value') + \
ggtitle('Autocorrelation Functions') + \
ggsize(600, 450) + \
theme(legend_position='none')
|
source/examples/demo/delhi_climate.ipynb
|