code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from os.path import join
import json
import csv
import glob
import os
import rpy2.rinterface
from pandas import read_csv, DataFrame, Series
from qiime2 import Artifact
from scipy.stats import t, sem
from IPython.display import Image
# -
# %reload_ext rpy2.ipython
# + language="R"
# library(ggplot2)
# library(grid)
# -
empo3_dir = '/home/ben/Data/empo_3/'
columns = ['sample type', 'sample', 'fold']
data = {c:[] for c in columns}
for type_dir in glob.glob(join(empo3_dir, '*')):
if not os.path.exists(join(type_dir, 'results', 'weights.qza')):
continue
type_ = os.path.basename(type_dir)
if type_ in ('sterile-water-blank', 'single-strain', 'mock-community', 'nick', 'plant-surface'):
continue
for fold_dir in glob.glob(join(type_dir, 'tmp', 'fold-*')):
_, fold = fold_dir.rsplit('-', 1)
with open(join(fold_dir, 'sample_test.json')) as fh:
try:
samples = json.load(fh)
except UnicodeDecodeError:
print(join(fold_dir, 'sample_test.json'), 'is corrupted')
data['sample'].extend(samples)
data['sample type'].extend([type_]*len(samples))
data['fold'].extend([fold]*len(samples))
folds = DataFrame(data)
eval_er = read_csv(join(empo3_dir, 'eval_taxa_er.tsv'), sep='\t')
folds = folds.set_index(['sample type', 'sample'])
eval_er = eval_er.join(folds, ['sample type', 'sample'])
level7 = eval_er[(eval_er['level'] == 7) | (eval_er['level'] == 6)]
level7 = level7[(level7['class weights'] == 'average') |
(level7['class weights'] == 'bespoke70') |
(level7['class weights'] == 'uniform70')]
grouped = level7.groupby(['sample type', 'class weights', 'fold', 'level'])
def weighted_stats(x):
errors = x['errors'].sum()
abundance = x['reads'].sum()
return Series([errors/abundance], index=['mean'])
figure2 = grouped.apply(weighted_stats)
figure2.reset_index(inplace=True)
old_labels = [
'animal-corpus',
'animal-distal-gut',
'animal-proximal-gut',
'animal-secretion',
'animal-surface',
'plant-corpus',
'plant-rhizosphere',
'sediment-non-saline',
'soil-non-saline',
'surface-non-saline',
'water-non-saline',
'sediment-saline',
'surface-saline',
'water-saline',
'average', 'uniform70', 'bespoke70']
new_labels = [
'Other'] * 14 + ['Average', 'Uniform', 'Bespoke']
for old, new in zip(old_labels, new_labels):
figure2.loc[figure2['class weights'] == old, 'class weights'] = new
old_labels = [
'animal-corpus',
'animal-distal-gut',
'animal-proximal-gut',
'animal-secretion',
'animal-surface',
'plant-corpus',
'plant-rhizosphere',
'sediment-non-saline',
'soil-non-saline',
'surface-non-saline',
'water-non-saline',
'sediment-saline',
'surface-saline',
'water-saline'
]
new_labels = [
'Animal corpus',
'Animal distal gut',
'Animal proximal gut',
'Animal secretion',
'Animal surface',
'Plant corpus',
'Plant rhizosphere',
'Sediment (non-saline)',
'Soil (non-saline)',
'Surface (non-saline)',
'Water (non-saline)',
'Sediment (saline)',
'Surface (saline)',
'Water (saline)'
]
for old, new in zip(old_labels, new_labels):
figure2.loc[figure2['sample type'] == old, 'sample type'] = new
for old, new in zip((6, 7), ('Genus', 'Species')):
figure2.loc[figure2['level'] == old, 'level'] = new
figure2.head()
# + magic_args="-i figure2" language="R"
# figure2$sample.type <- factor(figure2$sample.type, levels=c(
# 'Water (saline)',
# 'Surface (saline)',
# 'Sediment (saline)',
# 'Water (non-saline)',
# 'Surface (non-saline)',
# 'Soil (non-saline)',
# 'Sediment (non-saline)',
# 'Plant rhizosphere',
# 'Plant corpus',
# 'Animal surface',
# 'Animal secretion',
# 'Animal proximal gut',
# 'Animal distal gut',
# 'Animal corpus'))
# figure2$class.weights <- factor(figure2$class.weights, levels=c(
# 'Uniform', 'Average', 'Bespoke'
# ))
# figure2$fill <- paste(figure2$level, figure2$class.weights)
# figure2$fill <- factor(figure2$fill, levels=c(
# 'Species Uniform',
# 'Species Average',
# 'Species Bespoke',
# 'Genus Uniform',
# 'Genus Average',
# 'Genus Bespoke'))
# fillPalette <- c("#56B4E9", "#009E73", "#E69F00", "#D4ECF9", "#BFE6DC", "#F8E7BF")
# colourPalette <- c("#56B4E9", "#009E73", "#E69F00", "#56B4E9", "#009E73", "#E69F00")
# ggplot(data=figure2, aes(x=sample.type, y=mean*100)) +
# geom_boxplot(data=figure2[figure2$level == 'Species',], lwd=0.2, outlier.size=0.2, position=position_dodge(width=0.9),
# aes(fill=fill)) +
# geom_boxplot(data=figure2[figure2$level == 'Genus',], lwd=0.2, outlier.size=0.2, position=position_dodge(width=0.9),
# aes(fill=fill)) +
# coord_flip(clip="off") +
# theme_bw() +
# labs(x='EMPO3 Habitat', y='Error Rate (%)') +
# scale_fill_manual(name='Level & Weights', values=fillPalette) +
# scale_colour_manual(name='Level & Weights', values=colourPalette) +
# theme(plot.margin = unit(c(1, 1, 2, 1), "lines"),
# axis.text.y = element_text(angle = 45)) +
# annotation_custom(
# grob=grid::textGrob(label = "Better Performance"),
# xmin=-0.5, xmax=-0.5, ymin=21, ymax=21
# ) +
# annotation_custom(
# grob=linesGrob(arrow=arrow(type="open", ends="first", length=unit(2,"mm"))),
# xmin=-0.3, xmax=-0.3, ymin=4., ymax=38
# )
#
# ggsave(file="bc-figure1-error-rate.png", width=5, height=10, dpi=300)
# -
Image("bc-figure1-error-rate.png")
| figures-and-tables/error-rate-vs-habitat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Beachballs
#
# ## NAME : Your name
# ## DATE : The date
#
# Shearer would like you to draw a table of beachballs. That is crazy. It's more important that you can read a catalog solution and back out some geophysical insight. So instead, let's let ObsPy do the plotting. Cateogrize the following sources as
#
# 1. Strike-slip
# 2. Normal
# 3. Reverse
# 4. Oblique
#
# And please write "strike-slip" not "1 in the table below.
#
# The sources to classify are
#
# | Date | Region | Strike, dip, rake | Strike, dip, rake | Classification |
# | :--- | :---: | ---: | ---: | ----: |
# | 10/28/83 | <NAME>, Idaho | 304, 29,-103 | 138, 62,-83 | PUT YOUR ANSWER HERE |
# | 09/19/85 | Michoacan, Mexico | 301, 18, 105 | 106, 83, 85 | PUT YOUR ANSWER HERE |
# | 10/18/89 | <NAME>eta, California | 235, 41, 29 | 123, 71, 128 | PUT YOUR ANSWER HERE |
# | 06/20/90 | Western Iran | 200, 59, 160 | 300, 73, 32 | PUT YOUR ANSWER HERE |
# | 07/16/90 | <NAME> | 243, 86, 178 | 333, 88, 4 | PUT YOUR ANSWER HERE |
# | 06/28/92 | Landers, California | 318, 88, 178 | 48, 88, 2 | PUT YOUR ANSWER HERE |
# | 01/17/94 | Northridge, California | 278, 42, 65 | 130, 53, 111 | PUT YOUR ANSWER HERE |
# | 06/09/94 | Northern Bolivia | 302, 10, -60 | 92, 81, -95 | PUT YOUR ANSWER HERE |
# | 01/16/95 | Kobe, Japan | 324, 70, 12 | 239, 79, 160 | PUT YOUR ANSWER HERE |
# | 02/28/01 | Nisqually, Washington | 184, 17,-86 | 360, 73, -91 | PUT YOUR ANSWER HERE |
# | 12/30/15 | Victoria, Canada | 181, 39, -66 | 331, 55, -108 | PUT YOUR ANSWER HERE |
#
# Your task is to plot the beachballs with ObsPy and classify them. Nisqually and Victoria are tricky - they are both deep at around 50 km depth i.e., not what you might initially suspect for subduction zones.
#
# +
# If using Anaconda3 on your machine you can do without this. This is for Azure people.
# #!pip install obspy # TODO Uncomment if on Azure
# -
# Here's an example normal fault
from obspy.imaging.beachball import beachball
# %matplotlib inline
np1 = [92, 81, -95] # Nodal plane (strike, dip, rake) in degrees
beachball(np1)
#from obspy.imaging.beachball import beachball
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(3, 3), dpi=100)
np1 = [138, 62,-83] # Nodal plane (strike, dip, rake) in degrees
beachball(np1, outfile='test.png')
| beachballs/beachballs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 420-A52-SF - Algorithmes d'apprentissage supervisé - Hiver 2020 - Spécialisation technique en Intelligence Artificielle<br/>
# MIT License - Copyright (c) 2020 <NAME>
# <br/>
# 
# <br/>
# **Objectif:** cette séance de travaux pratiques a pour objectif la recherche des meilleurs hyperparamètres appliqués à l'ensemble des algorithmes vus en cours jusqu'à maintenant. Le jeu de données utilisée sera **Titanic**
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# ## Exercice 1 - Chargement et exploration sommaire des données
# #### Conversion des variables `embarked` et `sex`
# #### Vérification de la proportion des classes positives (Survided) et négatives (Died)
# #### Imputation des valeurs manquantes
# Les valeurs manquantes seront imputées pour l'exercice pour simplififer
# #### Préparation du jeu de test
# ## Exercice 2 - Recherche sur grille
# ### 2-1 - Régression logistique
# [class sklearn.linear_model.LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver='lbfgs', max_iter=100, multi_class='auto', verbose=0, warm_start=False, n_jobs=None, l1_ratio=None)](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html)
# #### Vérification du meilleur score
# #### Aire sous la courbe
# #### Courbe ROC
# ### 2-2 - K plus proches voisins
# [class sklearn.neighbors.KNeighborsClassifier(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None, **kwargs)](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html)
# #### Aire sous la courbe
# #### Courbe ROC
# ## Exercice 3 - Recherche aléatoire
# ### 3-1 - Arbres de décision
# [class sklearn.tree.DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort='deprecated', ccp_alpha=0.0)](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html)
# #### Aire sous la courbe
# #### Courbe ROC
# ### 3-2 - Bagging
# [class sklearn.ensemble.BaggingClassifier(base_estimator=None, n_estimators=10, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=None, random_state=None, verbose=0)](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html)
# #### Aire sous la courbe
# #### Courbe ROC
# ## Exercice 4 - Hyperopt avec Forêts aléatoires et gradient boosting
# ### 4-1 - Gradient boosting
# #### Définition de l'hyperespace
# #### Fonction objective
# #### Lancement de l'optimisation
# #### Meilleurs paramètres
# #### Réentraînement du gradient boosting avec les meilleurs hyperparamètres
# #### Aire sous la courbe
# #### Courbe ROC
# ### 4-2 - Gradient boosting et forêts aléatoires (optionnel)
# ## Exercice 5 - Performances sur le jeu de tests
# #### Aire sous la courbe
# #### Courbe ROC
| nbs/17-optimisation-des-hyperparametres-101/17-tp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lists in Python
list1 = [1, 2, 3, 4]
print(list1)
list1
list1[0]
list1[1]
list1[2]
list1[3]
list1[-1]
list1[-2]
list1[-3]
list[1:3]
list1[1:]
list2 = [1, 2, 1, 2, 3]
print(list2)
len(list1)
len(list2)
# +
l1 = [1, 2, 3]
l2 = ['ABC', 'BCD', 'CDE']
l3 = [True, False, True]
print(l1, l2, l3)
# -
l1 = [1, 'ABC', True]
print(l1)
print(type(l1))
l1 = [1, 2, 3]
l1
l1[2] = 4
l1
del l1[0]
l1
l1[4]
l1
l1.append(3)
2 in l1
1 in l1
for i in l1:
print(i)
l1 * 4
l1, l2
l1 + l2
l1.extend([1, 5])
l1
l1.sort()
l1
l1.reverse()
l1
del l1
l1
| Section02/01_Lists.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.11 64-bit (''PythonData'': conda)'
# name: python3
# ---
# # Billboard Hot 100
# ## Import Dependencies
#Importing dependencies for web scrapping
from bs4 import BeautifulSoup
import requests
import pandas as pd
# ## Web Scrapping - Hot 100
from web_scraping import web_scraping
# running the web scrapping function
top_100_scrapped_df = web_scraping()
# # Spotify Data Search
# ## Spotify for Developers - Using Spotify Library
# +
"""
Link to spotipy library docs
https://spotipy.readthedocs.io/en/2.19.0/#
"""
#Dependencies
import spotipy as sp
from spotipy.oauth2 import SpotifyClientCredentials
import numpy as np
#Importing Spotify client_ID and secret_code
from keys import client_id, client_secret
# +
#adding empty columns to the 'top_100_scrapped_df' to indicate search info
top_100_scrapped_df['track_spotify_ID']=""
top_100_scrapped_df['artist_spotify_ID']=""
top_100_scrapped_df['song_duration[ms]']=""
top_100_scrapped_df['song_release_date']=""
top_100_scrapped_df['spotify_popularity']=""
top_100_scrapped_df['album_name'] =""
top_100_scrapped_df['album_type']=""
#Displaying main table
top_100_scrapped_df
# -
#Authenticating requests using Client Credential Flow
scope = "user-library-read"
sp = sp.Spotify(client_credentials_manager=SpotifyClientCredentials(client_id=client_id, client_secret=client_secret))
# ## Search function - Extracting song, artist and album data from Spotify
# +
# function to extract data for any track using search
# Note: The function below extracts song_title from the a table with column_header called 'song_title'
from search_function import *
# +
# Iterating search function for each row of the top_100_scrapped_df
for index,row in top_100_scrapped_df.iterrows():
top_100_scrapped_df.loc[index,:] = my_search(row)
# creating output csv for main table
top_100_scrapped_df.to_csv('Output_CSV/top_100_scrapped_df.csv', index=False)
# -
# displaying top_100_scrapped_df
top_100_scrapped_df
# +
# dropping songs for failed searchs
nan_value = float("NaN")
top_100_cleaned_df = top_100_scrapped_df.replace("", nan_value)
top_100_cleaned_df = top_100_cleaned_df.dropna(subset = ["track_spotify_ID"])
print(f'\nSongs with no search results have been dropped.\nDataFrame contains {len(top_100_cleaned_df)} rows')
top_100_cleaned_df
# -
# ## Creating DataFrames - song_df, artist_df, album_df, concert_df
# ### Inspecting and cleaning the collected data
# +
# ERD Diagram for reference
# -
# adding song_ID
# Each song is unique, hence will have a unique song ID
songs_limit = len(top_100_cleaned_df)+1
song_ID = range(1,songs_limit)
top_100_cleaned_df['song_ID'] = song_ID
# +
# Grouping by artist spotify ID
artists = top_100_cleaned_df.groupby('artist_spotify_ID')['artist_name'].count()
print(f'There are a total of top {len(artists)} artists in the top 100 songs \nEach artist will be assigned a unique artist_ID')
# +
# creating artist_df
artist_df = top_100_cleaned_df[['artist_name', 'artist_spotify_ID']]
artist_grouped_df = artist_df.drop_duplicates(['artist_spotify_ID'])
# displaying artist_df
artist_grouped_df
# -
# generating csv file for album_table
artist_grouped_df.to_csv('Output_CSV/artist_df.csv', index = False)
# +
# Grouping by album_name
albums = top_100_cleaned_df.groupby('album_name')['album_name'].count()
print(f'There are a total of top {len(albums)} albums in the top 100 songs \nEach album will be assigned a unique album_ID')
# +
# creating album_df
album_df = top_100_cleaned_df[['album_name', 'album_type']]
album_grouped_df = album_df.drop_duplicates(['album_name'])
# displaying artist_df
album_grouped_df
# -
# creating output csv for main table
album_grouped_df.to_csv('Output_CSV/album_df.csv', index=False)
# +
# creating song_df
song_df = top_100_cleaned_df[['song_ID', 'song_title', 'album_name','track_spotify_ID', 'artist_spotify_ID', 'song_ranking', 'spotify_popularity', 'song_duration[ms]', 'song_release_date']]
# generating csv file for song_table
song_df.to_csv('Output_CSV/song_df.csv', index = False)
# displaying song_table
song_df
# -
# # Concert Data
import json
from api_keys import BIT_api
import pprint
import os
# +
# converting artinst_names to list
artist_list = artist_grouped_df['artist_name'].tolist()
# printing the list
artist_list
# -
# importing event_api function from events_api.py
from events_api import *
# +
# generating event_df
initial_event_df = event_api(artist_list)
initial_event_df
# -
#adding 'artist_spotify_ID' column
merged_df = initial_event_df.merge(top_100_cleaned_df, on ='artist_name', how = 'inner')
event_df = merged_df[['artist_spotify_ID', 'api_artist', 'venue', 'country','location','datetime']]
# printing event_df
event_df
# +
# generating csv file for concert_table
event_df.to_csv('Output_CSV/event_df.csv')
# displaying album_table
event_df = event_df.drop_duplicates()
event_df
# -
# # SQL Alchemy ORM
from sqlalchemy import create_engine
from sqlalchemy.engine import URL
from sqlalchemy.dialects import postgresql
from urllib.parse import quote_plus as urlquote
from credentials import *
import datetime as dt
# +
connection_url = URL.create(
drivername = "postgresql",
username = db_user,
password = <PASSWORD>,
host = "localhost",
port = 5432,
database = "music_ddl_create_table",
)
engine = create_engine(connection_url)
# -
# Reflect postgresql database
from sqlalchemy import MetaData
metadata_obj = MetaData()
metadata_obj.reflect(bind=engine)
artist = metadata_obj.tables["artist"]
songs = metadata_obj.tables["songs"]
album = metadata_obj.tables["album"]
concert = metadata_obj.tables["concert"]
# Upsert: artist
insert_statement = postgresql.insert(artist).values(artist_grouped_df.to_dict(orient='records'))
upsert_statement = insert_statement.on_conflict_do_update(
index_elements=['artist_spotify_ID'],
set_={c.key: c for c in insert_statement.excluded if c.key not in ['artist_spotify_ID']})
engine.execute(upsert_statement)
# Upsert: songs
insert_statement = postgresql.insert(songs).values(song_df.to_dict(orient='records'))
upsert_statement = insert_statement.on_conflict_do_update(
index_elements=['song_ID'],
set_={c.key: c for c in insert_statement.excluded if c.key not in ['song_ID']})
engine.execute(upsert_statement)
# Upsert album
insert_statement = postgresql.insert(album).values(album_grouped_df.to_dict(orient='records'))
upsert_statement = insert_statement.on_conflict_do_update(
index_elements=['album_name'],
set_={c.key: c for c in insert_statement.excluded if c.key not in ['album_name']})
engine.execute(upsert_statement)
# Upsert concert
insert_statement = postgresql.insert(concert).values(event_df.to_dict(orient='records'))
upsert_statement = insert_statement.on_conflict_do_update(
index_elements=['artist_spotify_ID'],
set_={c.key: c for c in insert_statement.excluded if c.key not in ['artist_spotify_ID']})
engine.execute(upsert_statement)
print(f"ETL job completed at {dt.datetime.now()}")
| Data sources/ETL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Microservice Tests
#
# Run:
# ```
# docker run -it --rm -p 5000:5000 seldonio/identity_model_rest:0.1
# docker run -it --rm -p 5001:5001 -e PREDICTIVE_UNIT_SERVICE_PORT=5001 seldonio/identity_model_grpc:0.1
# ```
# +
from seldon_core.seldon_client import SeldonClient
sc = SeldonClient()
# -
p = sc.microservice(method="predict", transport="rest")
print(p)
p = sc.microservice(
method="predict", transport="grpc", microservice_endpoint="localhost:5001"
)
print(p)
p = sc.microservice(method="aggregate", transport="rest", ndatas=2)
print(p)
p = sc.microservice(
method="aggregate",
transport="grpc",
ndatas=2,
microservice_endpoint="localhost:5001",
)
print(p)
p = sc.microservice(method="predict", transport="rest")
print(p)
f = sc.microservice_feedback(
prediction_request=p.request,
prediction_response=p.response,
reward=1.0,
transport="rest",
)
print(f)
p = sc.microservice(
method="predict", transport="grpc", microservice_endpoint="localhost:5001"
)
print(p)
f = sc.microservice_feedback(
prediction_request=p.request,
prediction_response=p.response,
reward=1.0,
transport="grpc",
microservice_endpoint="localhost:5001",
)
print(f)
p = sc.microservice(method="predict", transport="rest", bin_data=b"123")
print(p)
p = sc.microservice(method="predict", transport="rest", str_data="123")
print(p)
p = sc.microservice(method="aggregate", transport="rest", datas=["123", "456"])
print(p)
p = sc.microservice(
method="predict",
transport="grpc",
microservice_endpoint="localhost:5001",
bin_data=b"123",
)
print(p)
p = sc.microservice(
method="predict",
transport="grpc",
microservice_endpoint="localhost:5001",
str_data="123",
)
print(p)
p = sc.microservice(
method="aggregate",
transport="grpc",
microservice_endpoint="localhost:5001",
datas=["123", "456"],
)
print(p)
# +
import inspect
class Myclass(object):
def __init__(self, a=1, b=2):
self.a = a
self.b = b
def f1(self, c=2, d=4):
print(vars(self))
print(vars())
x = Myclass()
x.f1()
# -
| testing/notebooks/microservice_tests.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
from tqdm.notebook import tqdm, trange
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
referer = 'https://www.resilience.org/latest-articles/'
headers = {'User-Agent': user_agent, 'referer':referer}
topic_page = 'https://grist.org/all-topics/'
response = requests.get(topic_page, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
topics = ['https://grist.org' + e['href'] for e in soup.find_all('a', {'class':'topic-listing__topic-link'})]
# Remove some topics that don't work
topics.pop(2)
topics.pop(13)
topics.pop(14)
topics.pop(14)
topics.pop(27)
topics.pop(31)
topics.pop(31)
article_links = []
for topic in tqdm(topics):
response = requests.get(topic, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
last_page = int(soup.find_all('li', {'class':'pagination__page'})[-1].text \
.replace("\n", "").replace(",", ""))
for page in trange(1, last_page+1):
url = topic + 'page/{}/'.format(page)
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
new_links = [e['href'] for e in soup.find_all('a', {'class':'tease__link'})]
article_links.extend(new_links)
article_links_unique = list(set(article_links))
print(len(article_links))
print(len(article_links_unique))
def get_article_data(article):
art_dict = {'url':article}
response = requests.get(article, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
try:
art_dict['title'] = soup.find('h1', {'class':'topper-headings__title'}).text \
.replace("\n", "")
except:
pass
#print("Missing title")
try:
art_dict['subtitle'] = soup.find('h2', {'class':'topper-headings__subtitle'}).text \
.replace("\n", "")
except:
pass
#print("Missing subtitle")
try:
art_dict['author'] = soup.find('span', {'class':'contributor-info__name'}).text \
.replace("\n", "")
except:
pass
#print("Missing author")
try:
art_dict['date'] = soup.find('dd', {'class':'article-meta__item-value'}).text
except:
pass
#print("Missing date")
try:
art_dict['text'] = " ".join([p.text for p in soup.find('div', {'class':'article-body js-hang-punc'}) \
.find_all('p')])
#if art_dict['text'] == '':
# print("Missing text")
except:
pass
#print("Missing text")
return art_dict
# +
grist = []
for article in tqdm(article_links_unique):
grist.append(get_article_data(article))
gr = pd.DataFrame(grist)
# -
len(grist)
gr.head()
gr.to_pickle("../Data/Grist/grist.pkl")
| Programs/Grist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/trainsn/cuda-learning/blob/master/udacity_cs344_hw6.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="hse6gSyUS5ka" colab_type="code" outputId="b5475249-f860-4a01-a4be-acbae77397ce" colab={"base_uri": "https://localhost:8080/", "height": 790}
# Homework 6 for Udacity CS344 Course, Intro to Parallel Programming
# clone the code repo,
# !git clone https://github.com/depctg/udacity-cs344-colab
# !pip install git+git://github.com/depctg/nvcc4jupyter.git
# load cuda plugin
# %config NVCCPluginV2.static_dir = True
# %config NVCCPluginV2.relative_dir = "udacity-cs344-colab/src/HW6"
# %load_ext nvcc_plugin
# change to work directory, generate makefiles
# !mkdir udacity-cs344-colab/build
# %cd udacity-cs344-colab/build
# !cmake ../src
# + id="3vA0JP15TORh" colab_type="code" outputId="e9ed6da3-e540-469e-a7c6-d243846592c7" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %%cuda --name student_func.cu
//Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the interior, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "utils.h"
#include <algorithm>
// get 2d position from block
__device__
int2 get2dPos() {
return make_int2(
blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y
);
}
// check whether a a value is within the image bounds
__device__
bool withinBounds(const int x, const int y, const size_t numRowsSource, const size_t numColsSource) {
return ((x < numColsSource) && (y < numRowsSource));
}
__device__
bool masked(uchar4 val) {
return (val.x != 255 || val.y != 255 || val.z != 255);
}
__device__
int getm(int x, int y, size_t numColsSource) {
return y*numColsSource + x;
}
__global__
void maskPredicateKernel(
const uchar4* const d_sourceImg,
int* d_borderPredicate,
int* d_interiorPredicate,
const size_t numRowsSource,
const size_t numColsSource) {
const int2 p = get2dPos();
const int m = getm(p.x, p.y, numColsSource);
if(!withinBounds(p.x, p.y, numRowsSource, numColsSource))
return;
// run through each pixel and determine if its
// on the border, or if its on the interior border
if(masked(d_sourceImg[m])) {
int inbounds = 0;
int interior = 0;
// count how many of our neighbors are masked,
// and how many neighbors we have
if (withinBounds(p.x, p.y+1, numRowsSource, numColsSource)) {
inbounds++;
if(masked(d_sourceImg[getm(p.x, p.y+1, numColsSource)]))
interior++;
}
if (withinBounds(p.x, p.y-1, numRowsSource, numColsSource)) {
inbounds++;
if(masked(d_sourceImg[getm(p.x, p.y-1, numColsSource)]))
interior++;
}
if (withinBounds(p.x+1, p.y, numRowsSource, numColsSource)) {
inbounds++;
if(masked(d_sourceImg[getm(p.x+1, p.y, numColsSource)]))
interior++;
}
if (withinBounds(p.x-1, p.y, numRowsSource, numColsSource)) {
inbounds++;
if(masked(d_sourceImg[getm(p.x-1, p.y, numColsSource)]))
interior++;
}
// clear out the values so we don't
// have to memset this destination stuff
d_interiorPredicate[m] = 0;
d_borderPredicate[m] = 0;
// if all our neighbors are masked, then its interior
if(inbounds == interior) {
d_interiorPredicate[m] = 1;
} else if (interior > 0) {
d_borderPredicate[m] = 1;
}
}
}
__global__
void separateChannelsKernel(
const uchar4* const inputImageRGBA,
float* const redChannel,
float* const greenChannel,
float* const blueChannel,
size_t numRows,
size_t numCols)
{
const int2 p = get2dPos();
const int m = getm(p.x, p.y, numCols);
if(!withinBounds(p.x, p.y, numRows, numCols))
return;
redChannel[m] = (float)inputImageRGBA[m].x;
greenChannel[m] = (float)inputImageRGBA[m].y;
blueChannel[m] = (float)inputImageRGBA[m].z;
}
__global__
void recombineChannelsKernel(
uchar4* outputImageRGBA,
float* const redChannel,
float* const greenChannel,
float* const blueChannel,
size_t numRows,
size_t numCols)
{
const int2 p = get2dPos();
const int m = getm(p.x, p.y, numCols);
if(!withinBounds(p.x, p.y, numRows, numCols))
return;
outputImageRGBA[m].x = (char)redChannel[m];
outputImageRGBA[m].y = (char)greenChannel[m];
outputImageRGBA[m].z = (char)blueChannel[m];
}
__global__
void jacobiKernel(
float* d_in,
float* d_out,
const int* d_borderPredicate,
const int* d_interiorPredicate,
float* d_source,
float* d_dest,
size_t numRows,
size_t numCols)
{
const int2 p = get2dPos();
const int m = getm(p.x, p.y, numCols);
if(!withinBounds(p.x, p.y, numRows, numCols))
return;
// calculate these values as indicated in the videos
int lm;
if(d_interiorPredicate[m]==1) {
float a = 0.f, b=0.f, c=0.0f, d=0.f;
float sourceVal = d_source[m];
if(withinBounds(p.x, p.y+1, numRows, numCols)) {
d++;
lm = getm(p.x, p.y+1, numCols);
if(d_interiorPredicate[lm]==1) {
a += d_in[lm];
} else if(d_borderPredicate[lm]==1) {
b += d_dest[lm];
}
c += (sourceVal-d_source[lm]);
}
if(withinBounds(p.x, p.y-1, numRows, numCols)) {
d++;
lm = getm(p.x, p.y-1, numCols);
if(d_interiorPredicate[lm]==1) {
a += d_in[lm];
} else if(d_borderPredicate[lm]==1) {
b += d_dest[lm];
}
c += (sourceVal-d_source[lm]);
}
if(withinBounds(p.x+1, p.y, numRows, numCols)) {
d++;
lm = getm(p.x+1, p.y, numCols);
if(d_interiorPredicate[lm]==1) {
a += d_in[lm];
} else if(d_borderPredicate[lm]==1) {
b += d_dest[lm];
}
c += (sourceVal-d_source[lm]);
}
if(withinBounds(p.x-1, p.y, numRows, numCols)) {
d++;
lm = getm(p.x-1, p.y, numCols);
if(d_interiorPredicate[lm]==1) {
a += d_in[lm];
} else if(d_borderPredicate[lm]==1) {
b += d_dest[lm];
}
c += (sourceVal-d_source[lm]);
}
d_out[m] = min(255.f, max(0.0, (a + b + c)/d));
} else {
d_out[m] = d_dest[m];
}
}
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_destImg, //IN
uchar4* const h_blendedImg) //OUT
{
// first push the dest and source onto the gpu
size_t imageSize = numRowsSource*numColsSource*sizeof(uchar4);
uchar4* d_sourceImg;
uchar4* d_destImg;
uchar4* d_finalImg;
checkCudaErrors(cudaMalloc(&d_sourceImg, imageSize));
checkCudaErrors(cudaMalloc(&d_destImg, imageSize));
checkCudaErrors(cudaMalloc(&d_finalImg, imageSize));
checkCudaErrors(cudaMemcpy(d_sourceImg, h_sourceImg, imageSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_destImg, h_destImg, imageSize, cudaMemcpyHostToDevice));
// allocate predicate stuff
size_t predicateSize = numRowsSource*numColsSource*sizeof(int);
int* d_borderPredicate;
int* d_interiorPredicate;
checkCudaErrors(cudaMalloc(&d_borderPredicate, predicateSize));
checkCudaErrors(cudaMalloc(&d_interiorPredicate, predicateSize));
// make reusable dims
const dim3 blockSize(32, 32);
const dim3 gridSize(numColsSource/blockSize.x + 1, numRowsSource/blockSize.y + 1);
/**
1) Compute a mask of the pixels from the source image to be copied
The pixels that shouldn't be copied are completely white, they
have R=255, G=255, B=255. Any other pixels SHOULD be copied.
**/
/**
2) Compute the interior and border regions of the mask. An interior
pixel has all 4 neighbors also inside the mask. A border pixel is
in the mask itself, but has at least one neighbor that isn't.
**/
// generate the predicates
maskPredicateKernel<<<gridSize, blockSize>>>(
d_sourceImg,
d_borderPredicate,
d_interiorPredicate,
numRowsSource,
numColsSource
);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
/**
3) Separate out the incoming image into three separate channels
**/
size_t floatSize = numRowsSource*numColsSource*sizeof(float);
float *d_sourceImgR, *d_sourceImgG, *d_sourceImgB;
float *d_destImgR, *d_destImgG, *d_destImgB;
checkCudaErrors(cudaMalloc(&d_sourceImgR, floatSize));
checkCudaErrors(cudaMalloc(&d_sourceImgG, floatSize));
checkCudaErrors(cudaMalloc(&d_sourceImgB, floatSize));
checkCudaErrors(cudaMalloc(&d_destImgR, floatSize));
checkCudaErrors(cudaMalloc(&d_destImgG, floatSize));
checkCudaErrors(cudaMalloc(&d_destImgB, floatSize));
separateChannelsKernel<<<gridSize, blockSize>>>(
d_sourceImg,
d_sourceImgR,
d_sourceImgG,
d_sourceImgB,
numRowsSource,
numColsSource);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
separateChannelsKernel<<<gridSize, blockSize>>>(
d_destImg,
d_destImgR,
d_destImgG,
d_destImgB,
numRowsSource,
numColsSource);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
/**
4) Create two float(!) buffers for each color channel that will
act as our guesses. Initialize them to the respective color
channel of the source image since that will act as our intial guess.
**/
// allocate floats
float *d_r0, *d_r1, *d_g0, *d_g1, *d_b0, *d_b1;
checkCudaErrors(cudaMalloc(&d_r0, floatSize));
checkCudaErrors(cudaMalloc(&d_r1, floatSize));
checkCudaErrors(cudaMalloc(&d_b0, floatSize));
checkCudaErrors(cudaMalloc(&d_b1, floatSize));
checkCudaErrors(cudaMalloc(&d_g0, floatSize));
checkCudaErrors(cudaMalloc(&d_g1, floatSize));
checkCudaErrors(cudaMemcpy(d_r0, d_sourceImgR, floatSize, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_g0, d_sourceImgG, floatSize, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_b0, d_sourceImgB, floatSize, cudaMemcpyDeviceToDevice));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
/**
5) For each color channel perform the Jacobi iteration described
above 800 times.
**/
for(int i = 0; i < 800; i++) {
jacobiKernel<<<gridSize, blockSize>>>(
d_r0,
d_r1,
d_borderPredicate,
d_interiorPredicate,
d_sourceImgR,
d_destImgR,
numRowsSource,
numColsSource
);
std::swap(d_r0, d_r1);
jacobiKernel<<<gridSize, blockSize>>>(
d_g0,
d_g1,
d_borderPredicate,
d_interiorPredicate,
d_sourceImgG,
d_destImgG,
numRowsSource,
numColsSource
);
std::swap(d_g0, d_g1);
jacobiKernel<<<gridSize, blockSize>>>(
d_b0,
d_b1,
d_borderPredicate,
d_interiorPredicate,
d_sourceImgB,
d_destImgB,
numRowsSource,
numColsSource
);
std::swap(d_b0, d_b1);
}
/**
6) Create the output image by replacing all the interior pixels
in the destination image with the result of the Jacobi iterations.
Just cast the floating point values to unsigned chars since we have
already made sure to clamp them to the correct range.
**/
// lets assume that d_r0, d_g0, d_b0 are the final pass
recombineChannelsKernel<<<gridSize, blockSize>>>(
d_finalImg,
d_r0,
d_g0,
d_b0,
numRowsSource,
numColsSource);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// copy device final image to host
checkCudaErrors(cudaMemcpy(h_blendedImg, d_finalImg, imageSize, cudaMemcpyDeviceToHost));
// cleanup
checkCudaErrors(cudaFree(d_sourceImg));
checkCudaErrors(cudaFree(d_destImg));
checkCudaErrors(cudaFree(d_finalImg));
checkCudaErrors(cudaFree(d_sourceImgR));
checkCudaErrors(cudaFree(d_sourceImgG));
checkCudaErrors(cudaFree(d_sourceImgB));
checkCudaErrors(cudaFree(d_destImgR));
checkCudaErrors(cudaFree(d_destImgG));
checkCudaErrors(cudaFree(d_destImgB));
checkCudaErrors(cudaFree(d_r0));
checkCudaErrors(cudaFree(d_r1));
checkCudaErrors(cudaFree(d_g0));
checkCudaErrors(cudaFree(d_g1));
checkCudaErrors(cudaFree(d_b0));
checkCudaErrors(cudaFree(d_b1));
}
# + id="sSAnpiE2nL1T" colab_type="code" outputId="c54627ab-1d95-4346-e696-d491f9cdcf6f" colab={"base_uri": "https://localhost:8080/", "height": 252}
# make the cuda project
# !make HW6
print("\n====== RESULT OF HW6 =======\n")
# !bin/HW6 ../src/HW6/source.png ../src/HW6/blended.gold
# + id="4Zbj4MbVUVxq" colab_type="code" outputId="5b12570e-4fe6-4db6-ff41-6e0286b28516" colab={"base_uri": "https://localhost:8080/", "height": 542}
# plot output images
import matplotlib.pyplot as plt
_,ax = plt.subplots(2,3, dpi=150)
ax[0][0].imshow(plt.imread("../src/HW6/source.png"))
ax[0][0].set_title("original")
ax[0][0].grid(False)
ax[0][2].imshow(plt.imread("HW6_output.png"))
ax[0][2].set_title("output")
ax[0][2].grid(False)
ax[1][0].imshow(plt.imread("HW6_reference.png"))
ax[1][0].set_title("reference")
ax[1][0].grid(False)
ax[1][1].imshow(plt.imread("HW6_differenceImage.png"))
ax[1][1].set_title("difference")
ax[1][1].grid(False)
plt.show()
| udacity_cs344_hw6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import syft
import syft.nn as nn
from syft.controller import tensors, models
import imp
imp.reload(syft.controller)
imp.reload(syft.nn)
imp.reload(syft)
import numpy as np
from syft import FloatTensor
import torch
from torch.autograd import Variable
# +
input = FloatTensor([[0,0,1],[0,1.0,1],[1,0,1],[1,1,1]], autograd=True)
target = FloatTensor([[0],[0],[1],[1]]).autograd(True)
grad = FloatTensor([[1],[1],[1],[1]]).autograd(False)
np.random.seed(1)
weights1 = FloatTensor(np.random.rand(3,4)).autograd(True)
weights2 = FloatTensor(np.random.rand(4,1)).autograd(True)
input_t = Variable(torch.FloatTensor(input.to_numpy()), requires_grad=True)
target_t = Variable(torch.FloatTensor(target.to_numpy()), requires_grad=True)
weights1_t = Variable(torch.FloatTensor(weights1.to_numpy()), requires_grad=True)
weights2_t = Variable(torch.FloatTensor(weights2.to_numpy()), requires_grad=True)
grad_t = Variable(torch.FloatTensor(grad.to_numpy()))
# -
weights1_t.data
weights2_t.data
# +
weights1_t.grad = None
weights2_t.grad = None
target_t.grad = None
input_t.grad = None
layer_1_t = input_t.mm(weights1_t).sigmoid()
layer_2_t = layer_1_t.mm(weights2_t).sigmoid()
diff_t = layer_2_t - target_t
loss_t = diff_t ** 2
loss_t.backward(grad_t)
#print(loss_t.sum().data[0])
weights1_t.data -= weights1_t.grad.data
weights2_t.data -= weights2_t.grad.data
weights1_t.grad.data
# -
weights2_t.grad.data
# +
layer_1 = input.mm(weights1).sigmoid()
layer_2 = layer_1.mm(weights2).sigmoid()
diff = (layer_2 - target)
loss = diff ** 2 # Mean Squared Error Loss
loss.backward(grad)
weights1 -= weights1.grad()
weights2 -= weights2.grad()
# -
weights1.grad()
weights2.grad()
| notebooks/demos/MLP Using Autograd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="1D5p3IcXbD0T" colab_type="text"
# This notebook converts the unicode labels to tokens, pads the converted labels with `<start>`, `<stop>` and the `<pad>` tokens.
# + id="KEQLEgDIZpmm" colab_type="code" outputId="a7ba573c-4315-4f32-a2b6-24c25d8779e0" colab={"base_uri": "https://localhost:8080/", "height": 122}
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# + id="aqUTtsgBKprN" colab_type="code" colab={}
import unicodedata
import tarfile
import pandas as pd
from tqdm import tqdm
# + id="SblPRD-4ZqQn" colab_type="code" colab={}
# tarfile_loc = '/content/drive/My Drive/Project STation/TD/recognition_real_dataset/cropped_data.tar'
tarfile_loc = '/content/drive/My Drive/Project STation/TD/synthetic_cropped.tar.gz'
with tarfile.open(tarfile_loc) as tarf:
tarf.extractall()
# + id="ZGd-3KKKdPHz" colab_type="code" colab={}
# These ranges are in hexadecimal format
def _unicode_to_idx_map(lower_range, upper_range, start_index):
l_range = int(lower_range, 16)
u_range = int(upper_range, 16)
name2idx = dict()
idx = start_index
for char_code in range(l_range, u_range + 1):
code_point = chr(char_code)
char_name = unicodedata.name(code_point)
name2idx[char_name] = idx
idx = idx + 1
return name2idx, idx
# + id="JQLE1NSwmh-0" colab_type="code" colab={}
start_index = 3 ## Start from 3 as the first three inidices are reserved for <start> <stop> and <PAD> token
name2idx, idx = _unicode_to_idx_map("0900", "094D", start_index)
digits, idx = _unicode_to_idx_map("0966", "096F", idx)
om, idx = _unicode_to_idx_map("0950", "0950", idx)
additional, idx = _unicode_to_idx_map("0958", "095F", idx)
name2idx.update(digits)
name2idx.update(om)
name2idx.update(additional)
# + id="5OPBKDGCcUdX" colab_type="code" colab={}
def get_labels(text, name2idx):
labels = list()
for char in text:
name = unicodedata.name(char)
index = name2idx[name]
labels.append(index)
return labels
# + id="f_I-fudlagnO" colab_type="code" colab={}
# annotation_file_loc = '/content/cropped_data/annotations.txt'
annotation_file_loc = '/content/cropped_dir/annotation.txt'
with open(annotation_file_loc) as fp:
# find the max length of the data labels for padding
labels = list()
for line in fp.readlines():
hindi_text = line.split('\t')[1].strip()
try:
labels.append(get_labels(hindi_text, name2idx))
except KeyError:
pass
max_len = max([len(label) for label in labels]) + 2 # including start and stop symbols
# + id="fKuKs29MVybs" colab_type="code" outputId="9bd17e11-6dbb-4e1f-faf8-4508a224b5f6" colab={"base_uri": "https://localhost:8080/", "height": 34}
cols = ['name', 'text', 'labels', 'unpadded_length']
annotation_df = pd.DataFrame()
with open(annotation_file_loc) as fp:
data = {}
for line in tqdm(fp.readlines()):
img_path, hindi_text = line.split('\t')[:2]
img_path = img_path.strip()
hindi_text = hindi_text.strip()
data['name'] = img_path
data['text'] = f"<start> {hindi_text} <stop>"
label = [0] ## Initialize with the <start> token
try:
label.extend(get_labels(hindi_text, name2idx))
except KeyError:
continue
label.append(1) ## End with the <stop> token
data['unpadded_length'] = int(len(label))
extra_padding = max_len - len(label)
label.extend([2] * extra_padding)
data['labels'] = label
annotation_df = annotation_df.append(data, ignore_index=True)
# + id="QjZNijIbbV54" colab_type="code" outputId="604448fa-0fb0-4eac-a999-82895684adbc" colab={"base_uri": "https://localhost:8080/", "height": 153}
print(annotation_df.head())
annotation_df.to_pickle('/content/drive/My Drive/Project STation/TD/annotation_synthetic_preprocessed.pkl')
# + id="nVaufStJWOrv" colab_type="code" colab={}
| project_station_td_preprocess.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Notebook Directory
# ### File paths
#
# File paths are stored in the [Global_Parameters](./Global_Parameters.ipynb) Notebook.
# ### Setup and Processing
#
# Data setup and processing are generally handled in the Preprocessing directory. Note that the data pipeline depends on data being downloaded from the [Broad Firehose website](http://gdac.broadinstitute.org/) and organized in a specific file structure.
#
# The code for downloading and organizing these datasets is available in my [CancerData](https://github.com/theandygross/CancerData) repository. The specific notebook doing the download is available [here](https://github.com/theandygross/CancerData/blob/master/Notebooks/Download_From_Firehose.ipynb).
#
# I apologize for the dependency hell, and will try and simplify this in the near future if possible. If you run into issues, please feel free to send me an email.
#
# To initialize the data run the Notebooks in the following order:
#
# ### Imports and Globals
# * [Global_Parameters](Global_Parameters.ipynb) this is used to store all of the hard paths being used throughout the analysis. You should manually edit this file to point to where your data is stored locally.
# * [Imports](./Imports.ipynb) import some commonly used data and functions into the Python enviroment. This is used to avoid muddying up analysis files with boilerplate import and functions. I generally load everything in the notebook globally by importing it (this requires first loading the [NotebookImport package](https://github.com/theandygross/NotebookImport)).
# * [GTEX](./GTEX.ipynb) Downloads and handles tissue-specific expression data from the GTEX project.
# ### Initial fraction upregulated screen
#
# * [DX_screen.ipynb](./DX_screen.ipynb) is where the fraction upregulated statistic is calculated.
# * [GSEA_fraction_upregulated_expression](./GSEA_fraction_upregulated_expression.ipynb) GSEA on the rna sequencing datasets.
# * [methylation_upregulated_probe_annotation](./methylation_upregulated_probe_annotation.ipynb) exploration of probe and gene-set annotations for fraction upregulated on methylation450k data.
# ### Proliferation signature and detrended fraction-upregulated
# * [metaPCNA](./metaPCNA.ipynb) calculation of meta-PCNA proliferation score and analysis of proliferation in paired mRNA expression dataset.
# * [switchiness_screen](./switchiness_screen.ipynb) analysis and gene set enrichment for detrended fraction upregulated statistic on mRNA sequencing data.
# * [switchiness_miR](./switchiness_miR.ipynb) analysis for detrended fraction upregulated statistic on microRNA sequencing data.
# * [switchiness_methylation](./switchiness_methylation.ipynb) analysis for detrended fraction upregulated statistic on methylation450k data.
# ### Microarray datasets
# For the microarray data, I manually downloaded the series matrix files from GEO. There are links to each datasets in the notebooks, or you can look them up fairly eaisily from the accession codes. You are going to want to point to where you store these .txt files using the __MICROARRAY_PATH__ variable, which is set in the [Global_Parameters](Global_Parameters.ipynb) notebook.
# * [microarray_validation_data](./microarray_validation_data.ipynb) reads in and calculates fraction upregulated on all microarray datasets. Assumes data and mapping files are already downloaded and placed in MICROARRAY_PATH.
# * [microarray_validation_aggregation](./microarray_validation_aggregation.ipynb) reads in microarray data from MICROARRAY_STORE and looks at concordinance of fraction upregulated in pooled microarray data and pooled TCGA rna-sequencing data.
# * [microarray_validation_aggregation_GABA](./microarray_validation_aggregation_GABA.ipynb) targeted analysis of GABA receptors and GABRD specifically in the validation microarray datasets.
# ### Targeted Followup
# This is where I dig into specific results. There are a bunch of other attempts at this that did not make the cut in the Exploratory folder.
# * [GABA_Receptors](./GABA_Receptors.ipynb) exploration of GABA receptor subunits.
# * [GABA_Receptors_GTEX](./GABA_Receptors_GTEX.ipynb) looks at tissue specific expression of GABA subunits in healthy tissue using the GTEX dataset.
| Notebooks/Index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.0-dev
# language: julia
# name: julia-0.6
# ---
# # Bukdu
#
# * https://github.com/wookay/Bukdu.jl
#
# Bukdu is a web development framework for Julia (http://julialang.org).
#
# It's influenced by Phoenix framework (http://phoenixframework.org).
# +
# Pkg.clone("https://github.com/wookay/Bukdu.jl")
# -
importall Bukdu
type WelcomeController <: ApplicationController
end
index(::WelcomeController) = "Hello Bukdu"
Router() do
get("/", WelcomeController, index)
end
Bukdu.start(8080)
# open in browser: http://localhost:8080/
#
index(::WelcomeController) = render(Markdown, "# Hello Markdown")
# open in browser: http://localhost:8080/
#
index(::WelcomeController) = render(JSON, ["Hello", "JSON"])
# ```sh
# $ curl -v http://localhost:8080/
# > GET / HTTP/1.1
# > Host: localhost:8080
# > User-Agent: curl/7.49.0
# > Accept: */*
# >
# < HTTP/1.1 200 OK
# < Connection: keep-alive
# < Content-Length: 16
# < Content-Type: application/json
# < Date: Fri, 02 Sep 2016 16:30:48
# < Content-Language: en
# < Server: Bukdu (commit 7a56c54) with Julia 0.6.0-dev.446
# ["Hello","JSON"]
# ```
Endpoint() do
plug(Plug.Logger)
plug(Router)
end
# open in browser: http://localhost:8080/
#
# +
# Bukdu.stop()
| examples/jupyter/Bukdu.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Detecting Dataset Drift with whylogs
#
# We will be using data from Kaggle (https://www.kaggle.com/yugagrawal95/sample-media-spends-data) that is packaged with this notebook.
# +
# %matplotlib inline
import datetime
import math
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from whylogs import get_or_create_session
# -
# Read our Media Spend dataset as Pandas dataframe
data = pd.read_csv("MediaSpendDataset.csv",
parse_dates=["Calendar_Week"], infer_datetime_format=True)
data
# As we can see here, we have advertising and media impressions and views per week for a number of marketing campaigns for some unknown company. Included with this information is sales against those spends.
#
# ## Exploratory Data Analysis
#
# Let's now explore the dataset; we have very little metadata or context.
data.groupby("Calendar_Week").count().T
data.groupby("Division").count().T
# We see that the *Z* division has double the entries than the other divisions.
fig, ax = plt.subplots(figsize=(10, 3))
sns.lineplot(x="Calendar_Week", y="Sales", data=data, ax=ax)
fig, ax = plt.subplots(figsize=(10, 3))
sns.scatterplot(x="Google_Impressions", y="Sales", data=data, ax=ax)
# Let's compare the data from the first month to the last month, which happens to capture differences in transactions prior to and during the COVID-19 global pandemic.
# ## Profiling with whylogs
model_date = datetime.datetime(2020, 1, 1)
training_data = data[data["Calendar_Week"] < model_date]
test_data = data[data["Calendar_Week"] >= model_date]
session = get_or_create_session()
profiles = []
profiles.append(session.log_dataframe(training_data, dataset_timestamp=model_date))
profiles.append(session.log_dataframe(test_data, dataset_timestamp=datetime.datetime.now()))
profiles
# We can compare the data we'll use for training with that in early 2020.
# Training data profile summary
training_summary = profiles[0].flat_summary()["summary"]
training_summary
# Test data profile summary
test_summary = profiles[1].flat_summary()["summary"]
test_summary
# ## Dataset Drift in whylogs Data
#
# We need to understand how the data changes between that used in training and test data. To do so, let's first view one of the many objects in the dataset profile provided by whylogs, a histogram for each feature tracked. We can then inspect the **Overall_Views** feature.
# +
training_histograms = profiles[0].flat_summary()["hist"]
test_histograms = profiles[1].flat_summary()["hist"]
test_histograms["Overall_Views"]
# -
# While we plan to integrate convienient dataset shift visualization and analysis API soon, you are always able to access the attributes you need.
#
# We will first define a custom range and bins, then utilize our access to the data sketches' probability mass function. We then visualize these values using Seaborn.
# +
def get_custom_histogram_info(variable, n_bins):
min_range = min(training_summary[training_summary["column"]==variable]["min"].values[0],
test_summary[test_summary["column"]==variable]["min"].values[0])
max_range = max(training_summary[training_summary["column"]==variable]["max"].values[0],
test_summary[test_summary["column"]==variable]["max"].values[0])
bins = range(int(min_range), int(max_range), int((max_range-min_range)/n_bins))
training_counts = np.array(
profiles[0].columns[variable].number_tracker.histogram.get_pmf(bins[:-1]))
test_counts = np.array(
profiles[1].columns[variable].number_tracker.histogram.get_pmf(bins[:-1]))
return bins, training_counts, test_counts
def plot_distribution_shift(variable, n_bins):
"""Visualization for distribution shift"""
bins, training_counts, test_counts = get_custom_histogram_info(variable, n_bins)
fig, ax = plt.subplots(figsize=(10, 3))
sns.histplot(x=bins, weights=training_counts, bins=n_bins,
label="Training data", color="teal", alpha=0.7, ax=ax)
sns.histplot(x=bins, weights=test_counts, bins=n_bins,
label="Test data", color="gold", alpha=0.7, ax=ax)
ax.legend()
plt.show()
# -
plot_distribution_shift("Overall_Views", n_bins=60)
# While it is quite clear that the distribution in this case differs between the training and test dataset, we will likely need a quantitative measure. You can also use whylogs histogram metrics to calculate dataset shift using a number of metrics: Population Stability Index (PSI), Kolmogorov-Smirnov statistic, Kullback-Lebler divergence (or other f-divergences), and histogram intersection.
#
# ## Kullback-Lebler divergence
#
# This score, often shortened to K-L divergence, is measure of how one probability distribution is different from a second, reference probability distribution. The K-L divergence can be interpreted as the average difference of the number of bits required for encoding samples of one distribution (*P*) using a code optimized for another (*Q*) rather than one optimized for *P*. KL divergence is not a true statistical metric of spread as it is not symmetric and does not satisfy the triangle inequality.
#
# However, this value has become quite poplular and easy to calculate in Python. We'll use the implementation in `scikit-learn`.
# +
from sklearn.metrics import mutual_info_score
def calculate_kl_divergence(variable, n_bins):
_, training_counts, test_counts = get_custom_histogram_info(variable, n_bins)
return mutual_info_score(training_counts, test_counts)
# -
calculate_kl_divergence("Overall_Views", n_bins=60)
# ## Histogram intersection metric
#
# Our second metric is the histogram intersection score, which is an intuitive metric that measures the area of overlap between the two probability distributions. A histogram intersection score of 0.0 represents no overlap while a score of 1.0 represents identical distributions. This score requires discretized probability distributions and depends heavily on the choice of bin size and scale used.
def calculate_histogram_intersection(variable, n_bins):
_, training_counts, test_counts = get_custom_histogram_info(variable, n_bins)
result = 0
for i in range(n_bins):
result += min(training_counts[i], test_counts[i])
return result
calculate_histogram_intersection("Overall_Views", n_bins=60)
calculate_histogram_intersection("Sales", n_bins=60)
| python/DatasetDrift.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import glob
import os
# +
path = "C:\\mbafiabigdatat8\\zika-data\\data\\parsed\\colombia\\*.csv"
print(os.path.basename(path))
# +
#teste
files = glob.glob(path)
dados = []
tipo = []
dados_region = list()
dados_department = list()
dados_sivigila_code = list()
for name in files:
pd1 = pd.read_csv(name)
nomearq = os.path.basename(name)
pd1['nomearq'] = nomearq
tipo.append(pd1.columns.values[0])
dados.append(pd1)
if pd1.columns.values[0] == 'region':
dados_region.append(pd1)
elif pd1.columns.values[0] == 'department':
dados_department.append(pd1)
else:
dados_sivigila_code.append(pd1)
# -
dados_sivigila_code
| data/parsed/colombia/parsed_colombia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fraud POC
#
#
# This is a prototype/proof-of-concept of a ML solution for Fraud Detection for e-commerce,
# completely built in Python and backed by Dask to parallelize data processing and model training
# and hopeit.engine to "productionize" training pipeline and prediction service as microservices.
# Author: <NAME>
#
# LinkedIn: https://www.linkedin.com/in/leosmerling/
# [Dask](https://dask.org/) is a distributed processing engine for Python that can be used to process high data loads in distributed environments, such a Dask cluster. It has APIs built on top of popular Numpy and Pandas libraries.
#
# [hopeit.engine](https://github.com/hopeit-git/hopeit.engine): is an (upcoming) open-source library that I am contributing to, that enables to quickly develop microservices in Python. hopeit.engine is built on top of aiohttp to provide API endpoints and async processing, and also provides distributed processing of streaming events using [Redis Streams](https://redis.io/topics/streams-intro). Streaming data, authorization, logging, metrics and tracking/tracing are added to your microservice out of the box.
#
# To enable development, testing and working with the data in an unified environment I use [nbdev](https://github.com/fastai/nbdev). nbdev allows to explore data, create the services and test using Jupyter Notebooks. hopeit.engine and Dask plays well also with Juypyter notebooks, so the whole pipeline and prediction service can be developed and tested from Jupyter.
#
# This repo shows and end-to-end example of a Fraud Detection system consisting of:
# - Data preprocessing and partitioning (using Dasks Dataframes API)
# - Feature calculation (using Dasks Dataframes API)
# - Preparing data for model training (using Dasks Dataframes API)
# - Training a model (distributed Dask XGBoost)
# - Preparing data to serve predictions (using Dask + Redis)
# - Prepare and run a microservice to orchestrate and monitor the data + training pipeline (using hopeit.engine)
# - Prepare and run a microservice to predict fraud on new orders (using hopeit.engine)
#
# **DISCLAIMER**: The objective of this project is to quickly show an example on how Data + Feature Extraction +
# Model Trainig + Prediction can be developed and prepared for production. The data used for this example
# is randomly generated orders, and neither the features selected and model parameteres were optimized
# given the nature of data used. The intention is to give an overview of the tools and the approach to quickstart a project that could evolve into a mature state by improving each one of its pieces.
#
#
# ### Getting started
#
# (Feel free to report issues if you find the procedure below not working, I've tested only in a Linux environment)
#
# * I recommend to install [Anaconda](https://docs.anaconda.com/anaconda/install/) (virtualenv can be used also -- not tested --)
#
#
# * Create a conda environment, activate and install jupyterlab, nbdev and dask
# ```
# conda create -n fraud-poc python=3.7
# conda activate fraud-poc
# conda install jupyterlab
# conda install -c conda-forge dask graphviz python-graphviz
# pip install nbdev
# nbdev_install_git_hooks
# ```
#
#
# * Install hopeit.engine from provided library (preview version, do not use in production):
# ```
# # cd install
# source install-hopeit-engine.sh
# # cd ..
# ```
#
#
# * Finally install this project and dependencies in development mode
# ```
# pip install -e .
# ```
#
#
# * In order to run the microservices (optional) Redis is required. You can run redis for development, from the provided docker configuration:
# ```
# # cd docker
# pip install docker-compose
# docker-compose up -d redis
# # cd ..
# ```
#
# * You can start a Dask cluster using docker or locally:
#
# Docker:
# ```
# docker-compose up dask-scheduler
# docker-compose up dask-worker
# ```
#
# Locally:
# ```
# dask-scheduler
# dask-worker tcp://localhost:8786 --memory-limit 2GB
# ```
#
# * Create a folder to store data
# ```
# # mkdir data
# ```
# (location can be changed from config files)
#
#
# * To open, visualize and edit notebooks run from the root folder of the project
# ```
# jupyter lab
# ```
#
#
# ### Overview:
#
# * Notebooks prefixed from 00* to 09* are created for each component/event of the pipeline and prediction service. Check each notebook for a brief description of what they do:
#
# * Cells marked with #export, will generate a python file inside fraud_poc/ folder that can be executed by hopeit.engine
#
# * To generate/update the code, run `nbdev_build_lib` (no need to do it if you haven't change the code, modules are already generated in the repo)
#
# * Usually the last cells of the notebooks, are a test case that can be run locally and invoke the generated file, gather data and do some checks/analysis. I saved the notebooks with the outputs so you can visualize some examples without needing to install anything.
#
# * Inside config/ folder there are configuration files to run two microservices:
#
# * `training_pipeline.json` and `openapi-training.json` describe the service to run data preparation and training pipeline using hopeit.engine
#
# * `fraud-service.json` and `openapi-service.json` configure a service to perform real-time predictions on new orders based on the training model and aggregated data
#
# #### Training Pipeline
#
# As a result of configuration in `config/training-config.json` plus implemented Python modules generated from Notebooks 00* to 07, the following training pipeline is implemented, where event (green) is notified by the previous step using streams (blueish):
# +
#hide
from hopeit.testing.apps import config
from fraud_poc.diagrams import draw_graph
training_config = config('config/training-pipeline.json')
pipeline_diagram = draw_graph(training_config, show_streams=True)
# -
pipeline_diagram
# There are two entry points:
#
# * make_sample_data: endpoint to create sample data and trigger the pipeline steps.
# * submit_training_pipleine: endpoint to trigger pipeline steps from already existing data
# #### Inference Service
#
# As configured in `fraud-service.json` with events implementes in notebooks 08* to 09*, there is a resulting service with two endpoints:
# +
#hide
from hopeit.testing.apps import config
from fraud_poc.diagrams import draw_graph
service_config = config('config/fraud-service.json')
service_diagram = draw_graph(service_config)
# -
service_diagram
# * live.predict: endpoint required to enter order information and return predictions and calculated feature values.
# * test.find_orders: it's a helper endpoint to find random generated orders to be used in this example
# ### Data processing and training pipeline
#
# * To run training pipeline service:
#
# ```
# hopeit_server run --config-files=config/server.json,config/training-pipeline.json --api-file=config/openapi-training.json --start-streams --port=8020
# ```
#
# You should see a couple endpoints in http://localhost:8020/api/docs
#
# * The first endpoint "Data: Make Sample Data" will run the whole data+training pipeline end to end if you click in `Try It`
# 
#
# 1) **create-sample-data**: will create random orders in parquet format into folder `./data/raw/`. This dataset is partitioned by time periods, i.e. 1 file per 30-day batch in this example. Once this step is finished the end of the job will be notified using hopeit.engine streams funcionallity and the next job will take place once the event is consumed.
#
# 2) **preprocess**: reads data generated in previous step and creates new parquet files partitioned by customer_id and email, so aggregations on those two dimensions can be performed more efficiently later. Again, once the job is finished, the next step will be notified. This generated files also can be use for data analysis and feature discovering using Jupyter and Dask.
#
# 3) **feature-calc**: calculates aggregations on customer_ids and emails (i.e. accumulates most recent emails, ip_addrs, counts, order_amounts, etc) and stores a new data set of orders enriched with this extra information.
#
# 4) **training-data**: prepares data for training: obtain labels for the orders (in this POC `is_fraud` label field is just assigned using a combination of calculations with some randomness) and creates a more balanced dataset subsampling non-fraud cases, creates a validation set using more recent non-fraud and fraud labeled transactions. Next step is notified when data is ready. The dataset is shuffle randomly into N partitions (10 in the example) so training can be performed from each partition using fairly-balanced datasets.
#
# 5) **train-model**: trains an XGBoost model on sampled data using Dask distributed implementation. Validates model precision and recall using validation dataset and if validation passes a configured treshold, model is saved to be used in prediction service.
#
# 6) **prepare-db**: stores most recent customer_id and email features calculated in step 3) into a Redis database that can be used for real-time prediction service. (Notice that this data should be continuously updated on new orders but this is not provided in this POC)
#
# Since data generation could be tedious, there is a second endpoint that allows to run just from step 02, assuming
# you already have raw data:
# 
#
#
#
#
#
#
# ### Fraud prediction service
#
# To run the live prediction service:
#
# ```
# hopeit_server run --config-files=config/server.json,config/fraud-service.json --api-file=config/openapi-service.json --start-streams --port=8021
# ```
#
# You can try the endpoints using in http://localhost:8021/api/docs
#
# * First extract some valid customer_id and email using:
# ```
# curl -X GET "http://localhost:8021/api/fraud-poc/0x0x1-service/test/find-orders?prefix=*&num_items=10" \
# -H "Accept: application/json"
# ```
# This POC only can predict fraud for known customer_id and email in the generated data.
#
# Using a customer_id and email, pass a new order to the service using the Live: Predict Endpoint:
# 
#
# And check the results, all calcualted features plus an is_fraud field is returned:
# ```
# {
# "order_id": "ce4798f5-6127-4d6e-bf1d-dda810eab26b",
# "order_date": "2020-07-07T06:33:18+00:00",
# "customer_id": "271d8c5e-e4e3-4377-a3e3-673ccf153664",
# "ip_addr": "f95e9c978b7f88dde5b9eb39417070251603db2d",
# "order_amount": 100.7097195892065,
# "email": "<PASSWORD>",
# "customer_id_by_email": [
# "271d8c5e-e4e3-4377-a3e3-673ccf153664"
# ],
# "num_customer_id_by_email": 1,
# "last_customer_id_by_email": "271d8c5e-e4e3-4377-a3e3-673ccf153664",
# "same_customer_id_by_email": 1,
# "known_customer_id_by_email": 1,
# "order_amount_mean_by_email": 468.79164250074143,
# "order_amount_std_by_email": 317.0635415216074,
# "order_amount_min_by_email": 68.2940660160266,
# "order_amount_max_by_email": 916.7097195892065,
# "order_amount_sum_by_email": 4687.916425007415,
# "order_amount_by_email": [
# 769.0840886685221,
# 68.2940660160266,
# 164.22372869469348,
# 198.35357128773578,
# 454.66931470215576,
# 100.7097195892065,
# 779.1408217338134,
# 916.7097195892065,
# 854.4217419999278,
# 382.3096527261267
# ],
# "key": "271d8c5e-e4e3-4377-a3e3-673ccf153664",
# "email_by_customer_id": [
# "7545576ffe1b7c1d9d8d2e82d0191fa057df695f"
# ],
# "ip_addr_by_customer_id": [
# "<KEY>",
# "788e574cf1934b34e9510ce897d8a593ab9dbcc9",
# "d02eae79264a401d76e853c41bdb781484443db2"
# ],
# "num_email_by_customer_id": 1,
# "num_ip_addr_by_customer_id": 3,
# "last_email_by_customer_id": "7545576ffe1b7c1d9d8d2e82d0191fa057df695f",
# "last_ip_addr_by_customer_id": "<KEY>",
# "same_email_by_customer_id": 1,
# "same_ip_addr_by_customer_id": 1,
# "known_email_by_customer_id": 1,
# "known_ip_addr_by_customer_id": 1,
# "order_amount_mean_by_customer_id": 468.79164250074143,
# "order_amount_std_by_customer_id": 317.0635415216074,
# "order_amount_min_by_customer_id": 68.2940660160266,
# "order_amount_max_by_customer_id": 916.7097195892065,
# "order_amount_sum_by_customer_id": 4687.916425007415,
# "order_amount_by_customer_id": [
# 769.0840886685221,
# 68.2940660160266,
# 164.22372869469348,
# 198.35357128773578,
# 454.66931470215576,
# 100.7097195892065,
# 779.1408217338134,
# 916.7097195892065,
# 854.4217419999278,
# 382.3096527261267
# ],
# "location_lat": 0,
# "location_long": 0,
# "is_fraud": 0.5424039363861084
# }
# ```
#
# So that's it, please feel free to submit feedback and suggestions! Please contact me in case you want to improve pieces like dataset generation, model tuning, etc.
#
# I Hope you enjoyed it!
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Classification
import sys
sys.path.append("..")
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from conf import PROFILE_DIR
from src.img_preprocessing import tf_clean_profile, overview
print(f"TensorFlow version: {tf.__version__}")
print(f"Keras version: {keras.__version__}")
# Define CNN architecture
model = keras.models.Sequential([
keras.layers.experimental.preprocessing.Rescaling(1.0 / 255, input_shape=(256,256, 1)),
keras.layers.Conv2D(64, 7, activation="relu", padding="same"),
keras.layers.MaxPooling2D(2),
keras.layers.Conv2D(128, 3, activation="relu", padding="same"),
keras.layers.Conv2D(128, 3, activation="relu", padding="same"),
keras.layers.MaxPooling2D(2),
keras.layers.Conv2D(256, 3, activation="relu", padding="same"),
keras.layers.Conv2D(256, 3, activation="relu", padding="same"),
keras.layers.MaxPooling2D(2),
keras.layers.Flatten(),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dropout(0.5),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dropout(0.5),
keras.layers.Dense(7, activation="softmax")
])
# Compile model
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999),
metrics=["accuracy"]
)
# Model summary
model.summary()
# +
# Plot model
# keras.utils.plot_model(model)
# -
# Load training set
train_ds = keras.preprocessing.image_dataset_from_directory(
PROFILE_DIR,
validation_split=0.1,
subset="training",
seed=0,
)
# Load validation set
val_ds = keras.preprocessing.image_dataset_from_directory(
PROFILE_DIR,
validation_split=0.1,
subset="validation",
seed=0,
)
# Classes
class_names = train_ds.class_names
print(class_names)
# Overview before preprocessing
overview(train_ds, class_names)
# Preprocess images
train_ds = train_ds.map(tf_clean_profile)
train_ds = train_ds.map(lambda x, y: (tf.image.rgb_to_grayscale(x), y))
val_ds = val_ds.map(tf_clean_profile)
val_ds = val_ds.map(lambda x, y: (tf.image.rgb_to_grayscale(x), y))
# Overview after preprocessing
overview(train_ds, class_names, cmap="gray")
# +
# Configure dataset for performance
# AUTOTUNE = tf.data.experimental.AUTOTUNE
# train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
# val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# +
# Train model
early_stopping_cb = keras.callbacks.EarlyStopping(
patience=10,
restore_best_weights=True
)
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=30,
callbacks=[early_stopping_cb]
)
# -
# Plot learning curves
fig, ax = plt.subplots(figsize=(8, 5))
pd.DataFrame(history.history).plot(ax=ax)
ax.grid()
ax.set(ylim=(0, 3))
# Save model
model.save("../model/clf.h5")
| notebooks/classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.9 64-bit (''ml'': venv)'
# language: python
# name: python3
# ---
import pandas as pd
import re
import numpy as np
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, confusion_matrix, r2_score, plot_confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.ensemble import RandomForestClassifier
import pickle
data = pd.read_excel('clean.xlsx')
data.head()
data.shape
data.head()
data.groupby('Type').count().plot.bar(ylim=0)
plt.show()
# +
stemmer = PorterStemmer()
words = stopwords.words("english")
data['cleaned'] = data['msgContent'].apply(
lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split()
if i not in words]).lower()
)
# -
data.iloc[:][:10]
vectorizer = TfidfVectorizer(min_df= 3, stop_words="english", sublinear_tf=True, norm='l2', ngram_range=(1, 2))
final_features = vectorizer.fit_transform(data['cleaned']).toarray()
final_features.shape
# +
X = data['cleaned']
Y = data['Type']
x_train, x_test, y_train, y_test = train_test_split(X,Y,test_size=0.15,random_state=42)
pipeline = Pipeline([('vect', vectorizer),
('chi', SelectKBest(chi2, k=230)),
('clf', RandomForestClassifier(n_estimators=300))])
# fitting our model and save it in a pickle for later use
model = pipeline.fit(x_train, y_train)
with open('RandomForest.pickle', 'wb') as f:
pickle.dump(model, f)
# -
ytest = np.array(y_test)
print(classification_report(ytest, model.predict(x_test)))
print(confusion_matrix(ytest, model.predict(x_test)))
print(model.predict(np.array(['''Rs.2193.40 spent on POS/Ecom using ICICI Debit card on 26/10/21 18:05 at PAYTM NOIDA from Ac:XXXXXXXXX. Bal:XXXXX CR -ICICI Bank
'''])))
import matplotlib.pyplot as plt
from sklearn.metrics import plot_confusion_matrix
fig, ax = plt.subplots(figsize=(15, 15))
plot_confusion_matrix(model,x_test,y_test, ax=ax)
plt.show()
pickle.dump(model, open('model.pkl', 'wb'))
newModel = pickle.load(open('model.pkl','rb'))
print(newModel.predict(np.array(['Rs.2193.40 spent on POS/Ecom using ICICI Debit card on 26/10/21 18:05 at PAYTM NOIDA from Ac:XXXXXXXXX. Bal:XXXXX CR -ICICI Bank'])))
| ModelCode/randomForest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Use case: choose an airplane company
# This use case applies the *analytic hierarchy process* (AHP) to choose between three airline companies **Crazy price flight**, **Oh boy it's cheap air** (OBIC air) and the **National air** for making national flights. The three candidates or *alternatives* are thus:
#
# * *crazy air flight*
# * *OBIC air*
# * *National air*
#
# The hierarchy of criteria is described below:
# 
#
# The **covering criteria** are:
#
# * Snack service
# * Crew members
# * Reliability
# * Ticket price
# * Price for additional services
# * Price advantage with the company's partners
#
# ## General methodology
#
# The first step of the AHP is to is carry out cross-comparison of the top-level criteria and their sub-criteria against the main objective. The comparison is carried out with the *fundamental scale of absolute numbers* given hereafter. The result of a comparison is measured with the *intensity of importance* based on the series of *odd integers* from 1 to 9. Even integers could be used for introducing a more refined assessment. However, restricting to the *odd integer* scale should be suitable in most cases. A comparison is stated as:
#
# <p style="text-align: center;"> Intensity(A,B) = *Element A* compared to *Element B* </p>
#
# The **reciprocal comparison** is scored with the inverse value of intensity:
#
# <p style="text-align: center;"> Intensity(B,A) = 1/Intensity(A,B) </p>
#
# ---
#
# <p style="text-align: center; font-weight: bold; text-decoration: underline"> Fundamental scale of absolute numbers </p>
#
# |Intensity of importance|Definition |Explanation |
# |-----------------------|--------------------------|-------------------------------------------------------------|
# |1 |Equal importance |The two compared elements **contribute equally** to the |
# | | |objective |
# |3 |Moderate importance |Experience and judgment slightly favor *Element A* over |
# | | |*Element B* |
# |5 |Strong importance |Experience and judgment strongly favor *Element A* over |
# | | |*Element B* |
# |7 |Very strong or demons- |*Element A* is favored very strongly over *Element B*; its |
# | |trated importance |dominance is demonstrated in practice |
# |9 |Extremely strong |The evidence favouring *Element A* over *Element B* is of the|
# | |importance |**highest possible** order of affirmation |
#
# ---
#
# When considering a cross-comparison of $n$ criteria, the number of independant comparisons to carry out is:
#
# $${n \choose 2} = {n! \over (n-2)!2!} = {n(n-1) \over 2}$$
#
# Dependant comparisons are either the comparison of an element with itself which always evaluates to 1. and reciprocal comparisons which values are inverse.
#
# The whole set of cross-comparisons constitutes the **comparison matrix** (or *judgment matrix*) which component inversion property by transposition makes them elements of the set of **reciprocal matrices**.
#
# <p style="font-weight: bold; text-decoration: underline;">Consistency</p>
# The *independance* of comparisons is not related to the notion of consistency. Giving a value to a comparison between elements $X$ and $Y$ and another one for the comparison between elements $Y$ and $Z$ is equivalent to state that:
#
# <p style="text-align: center;"> $X=\alpha Y$ and $Y=\beta Z$ </p>
#
# which implies that $X=\alpha\beta Z$. Consequently, the two comparisons are **inconsistent** if ${X\over Z}$ is significantly different from $\alpha\beta$.
#
# <p style="font-weight: bold; text-decoration: underline;">Priorities</p>
# When a cross-comparison has been carried out, *absolute priorites* can be calculated from the comparison matrix. These values are absolute because they are normalized so that their sum is equal to 1.
#
# <p style="font-weight: bold; text-decoration: underline;">Hierarchy and covering criteria</p>
# The priorities of all criteria are hierarchically weighted by the priority of their parent criterion. The *covering criteria* are the criteria at the leaves of the hierarchical tree:
#
# * Snack onboard
# * Crew members' service onboard
# * Reliability of the flights' schedule
# * Ticket price
# * Price of additional services
# * Discounts with the company's partners
#
# Because of the hierarchical weighting their sum equals to 1.
#
# <p style="font-weight: bold; text-decoration: underline;">Cross-comparison of alternatives</p>
# The alternatives (considered solutions) are cross-compared against every *covering criterion*. The normalization of priorities implies that the sum of priorities of alternatives over covering criteria is 1.
#
# <p style="font-weight: bold; text-decoration: underline;">Conclusion</p>
# The priority of an alternative against the objective is calculated as the sum of its priorities over covering criteria.
# +
import pandas as pd
import numpy as np
from IPython.display import display
def priorities(df):
eig_val, eig_vect = np.linalg.eig(df.as_matrix())
eig_max_ind = eig_val.argmax()
priorities = eig_vect[:,eig_max_ind]
# There is no imaginary part, it is mainly a type conversion
return np.real(priorities / priorities.sum())
def inconsistency(df):
eig_val, eig_vect = np.linalg.eig(df.as_matrix())
n = df.shape[0]
# consistency index
ind_max = eig_val.argmax()
index = (np.real(eig_val[ind_max]) - n) / (n - 1)
# priority vector
w = eig_vect[:,ind_max]
w = np.real(w / w.sum())
D = np.diag(w)
E = np.dot(np.linalg.solve(D,df), D)
return index, E
normalized_priorities = dict()
def update_norm_priorities(w_df):
cnames = w_df.index.tolist()
values = w_df.as_matrix()[:,0].tolist()
normalized_priorities.update(dict([pair for pair in zip(cnames,values)]))
alternative_priorities = pd.DataFrame(np.zeros((3,)), index = ['Crazy price fl.', 'OBIC', 'National air'],
columns = ('priorities',))
# -
# ## Cross-comparison of top level criteria
#
# The 3 criteria to compare are:
#
# * Service on board: quality of services onboard
# * Reliability: flight delays or cancellation, lost bagages
# * Price
#
# A cross-comparison of 3 criteria involves 3 independant comparisons. Here is my *personal opinion*:
#
# * *Reliability* has an *extreme importance* over *Service on board*
# * *Prices* have a *very strong importance* over *Service on board*
# * *Reliability* has a *slight importance* over *Prices*
#
# These comparisons are assessed with **fundamental scale of absolute numbers**. The comparison matrix below is established from these values. The values must be read as: row-label compares to column-label:
labels = ['Service', 'Reliability', 'Price']
comp_mat = np.array([[1,1/9,1/7],[9,1,3],[7,1/3,1]])
comp_df = pd.DataFrame(comp_mat, index = labels, columns=labels)
display(comp_df)
# Now, let's check the *consistency* of this set of cross-comparisons:
ind, mat_E = inconsistency(comp_df)
print('The consistency index is:',ind,'\n')
print('The consistency deviation matrix:')
print(mat_E-1,'\n')
# The statistically maximal admissible *consistency index* is 0.10 which is significantly higher than 0.040.
#
# Eventually the **normalized priorities** are:
w=pd.DataFrame(priorities(comp_df), index = labels, columns = ('priorities',))
display(w.round(3))
update_norm_priorities(w)
# ## Comparison of level-2 sub-criteria of *Service onboard*
# There are 2 criteria to compare:
#
# * Snack service
# * Crew members: are they kind? Reactive?
#
# A cross-comparison of 2 criteria involves only 1 independant comparison. Here is my personal opinion:
#
# * The behavior of crew members is slightly more important than the snack service onboard
labels = ['Snack', 'Crew members']
comp_mat = np.array([[1.,1/3],[3,1]])
comp_df = pd.DataFrame(comp_mat, index = labels, columns=labels)
display(comp_df)
ind, mat_E = inconsistency(comp_df)
print('The consistency index is:',ind,'\n')
print('The consistency deviation matrix:')
print(mat_E-1)
# A 2x2 comparison matrix is necessarily consistent. The normalized priorities are:
w=pd.DataFrame(priorities(comp_df), index = labels, columns = ('priorities',))
display(w.round(3))
update_norm_priorities(w)
# ## Comparison of level-2 sub-criteria of *Prices*
# There are 2 criteria to compare:
#
# * Ticket prices
# * Additional services: additional checked luggages, sport equipement and special luggages
# * Reductions with the company's partners: car rental, hotel reservation
#
# A cross-comparison of 3 criteria involves 3 independant comparisons. Here is my personal opinion:
#
# * Ticket price has a very strong importance compared to the price of additional services
# * Ticket price has an extreme importance compared to reductions with the company's partners
# * The price of additional services is slightly more important than reductions with the company's partners
labels = ['Ticket', 'Add. services', 'Reductions']
comp_mat = np.array([[1,7,9],[1/7,1,3],[1/9,1/3,1]])
comp_df = pd.DataFrame(comp_mat, index = labels, columns=labels)
display(comp_df)
ind, mat_E = inconsistency(comp_df)
print('The consistency index is:',ind,'\n')
print('The consistency deviation matrix:')
print(mat_E-1,'\n')
# The statistically maximal admissible consistency index is 0.10 which is significantly higher than 0.040.
#
# The normalized priorities are:
w=pd.DataFrame(priorities(comp_df), index = labels, columns = ('priorities',))
display(w.round(3))
update_norm_priorities(w)
# ## Weighting of criteria
# Here are the priorities of criteria before they are normalized.
display(normalized_priorities)
# Now, let weight the priorities hierarchically:
for k in ('Crew members','Snack'):
normalized_priorities[k] *= normalized_priorities['Service']
for k in ('Ticket','Add. services','Reductions'):
normalized_priorities[k] *= normalized_priorities['Price']
display(normalized_priorities)
# The covering criteria are listed below. Their weighted priorities must sum to 1.
#
# * Snack onboard
# * Crew members' service onboard
# * Reliability of the flights' schedule
# * Ticket price
# * Price of additional services
# * Discounts with the company's partners
#
covering_criteria = ('Crew members','Snack','Reliability','Ticket','Add. services','Reductions')
print('Sum of priorities of covering criteria:')
print(np.array([normalized_priorities[cov] for cov in covering_criteria]).sum())
# ## Cross-comparison of alternatives against covering criteria
#
# ### Alternatives against *Snack onboard*
labels = ['Crazy price fl.', 'OBIC', 'National air']
comp_mat = np.array([[1,5,1/3],[1/5,1,1/7],[3,7,1]])
comp_df = pd.DataFrame(comp_mat, index = labels, columns=labels)
display(comp_df)
ind, mat_E = inconsistency(comp_df)
print('The consistency index is:',ind,'\n')
print('The consistency deviation matrix:')
print(mat_E-1)
# The statistically maximal admissible consistency index is 0.10 which is significantly higher than 0.032.
#
# The normalized priorities are:
alt_w = pd.DataFrame(priorities(comp_df), index = labels, columns = ('priorities',))
display(alt_w.round(3))
alternative_priorities += alt_w * normalized_priorities['Snack']
# ### Alternatives against *Crew members*
labels = ['Crazy price fl.', 'OBIC', 'National air']
comp_mat = np.array([[1,3,1],[1/3,1,1/3],[1,3,1]])
comp_df = pd.DataFrame(comp_mat, index = labels, columns=labels)
display(comp_df)
ind, mat_E = inconsistency(comp_df)
print('The consistency index is:',ind,'\n')
print('The consistency deviation matrix:')
print(mat_E-1)
# The consistency index is 0., the reciprocal matrix is consistent. The normalized priorities are:
alt_w = pd.DataFrame(priorities(comp_df), index = labels, columns = ('priorities',))
display(alt_w.round(3))
alternative_priorities += alt_w * normalized_priorities['Crew members']
# ### Alternatives against *Reliability*
labels = ['Crazy price fl.', 'OBIC', 'National air']
comp_mat = np.array([[1,3,1/2],[1/3,1,1/4],[2,4,1]])
comp_df = pd.DataFrame(comp_mat, index = labels, columns=labels)
display(comp_df)
ind, mat_E = inconsistency(comp_df)
print('The consistency index is:',ind,'\n')
print('The consistency deviation matrix:')
print(mat_E-1)
# The statistically maximal admissible consistency index is 0.10 which is significantly higher than 0.009. The normalized priorities are:
alt_w = pd.DataFrame(priorities(comp_df), index = labels, columns = ('priorities',))
display(alt_w.round(3))
alternative_priorities += alt_w * normalized_priorities['Reliability']
# ### Alternatives against *Ticket price*
labels = ['Crazy price fl.', 'OBIC', 'National air']
comp_mat = np.array([[1,1/3,5],[3,1,7],[1/5,1/7,1]])
comp_df = pd.DataFrame(comp_mat, index = labels, columns=labels)
display(comp_df)
ind, mat_E = inconsistency(comp_df)
print('The consistency index is:',ind,'\n')
print('The consistency deviation matrix:')
print(mat_E-1)
# The statistically maximal admissible consistency index is 0.10 which is significantly higher than 0.032. The normalized priorities are:
alt_w = pd.DataFrame(priorities(comp_df), index = labels, columns = ('priorities',))
display(alt_w.round(3))
alternative_priorities += alt_w * normalized_priorities['Ticket']
# ### Alternatives against *Price of additional services*
labels = ['Crazy price fl.', 'OBIC', 'National air']
comp_mat = np.array([[1,3,1/7],[1/3,1,1/8],[7,8,1]])
comp_df = pd.DataFrame(comp_mat, index = labels, columns=labels)
display(comp_df)
ind, mat_E = inconsistency(comp_df)
print('The consistency index is:',ind,'\n')
print('The consistency deviation matrix:')
print(mat_E-1)
# The statistically maximal admissible consistency index is 0.10 which is significantly higher than 0.052. The normalized priorities are:
alt_w = pd.DataFrame(priorities(comp_df), index = labels, columns = ('priorities',))
display(alt_w.round(3))
alternative_priorities += alt_w * normalized_priorities['Add. services']
# ### Alternatives against *Discount with partners*
labels = ['Crazy price fl.', 'OBIC', 'National air']
comp_mat = np.array([[1,2,1],[1/2,1,1/2],[1,2,1]])
comp_df = pd.DataFrame(comp_mat, index = labels, columns=labels)
display(comp_df)
ind, mat_E = inconsistency(comp_df)
print('The consistency index is:',ind,'\n')
print('The consistency deviation matrix:')
print(mat_E-1)
# The consistency index is 0., the reciprocal matrix is consistent. The normalized priorities are:
alt_w = pd.DataFrame(priorities(comp_df), index = labels, columns = ('priorities',))
display(alt_w.round(3))
alternative_priorities += alt_w * normalized_priorities['Reductions']
# The priorities of alternatives are multiplied with the priorities of the *covering criteria* they are compared against. They are eventually summed which gives their **priority against the main objective**.
#
# It is checked that these priorities sum to 1.
display(alternative_priorities.round(3))
print('Sum of priorities = {0:f}'.format(alternative_priorities.as_matrix().sum()))
# ## CONCLUSION
# The winner is **National Air** with a score of 45.0%, followed by **Crazy price flights** with a score of 30.9%. **Oh Boy It's Cheap Air** with a score of 24.1% is in last position.
| yeahp-backend/ref_case/Use_case_Choose_an_air_company.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split,cross_val_score
from sklearn.preprocessing import StandardScaler,MinMaxScaler,LabelEncoder
from sklearn.metrics import accuracy_score,confusion_matrix,plot_confusion_matrix,roc_curve,auc,mean_squared_error
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
from sklearn.neural_network import MLPClassifier
# +
#Read the data file and transform into dataframe
df_Forest = pd.read_excel(r'D:/Datasets/Forest.xlsx')
print(df_Forest.columns)
#Check the dataframe to see whether it has missing values and the data type of each column
print(df_Forest.isnull().sum())
print(df_Forest.dtypes)
# +
#Split the dataframe into predictors and target columns
predictors = df_Forest.iloc[:,1:]
target = df_Forest.iloc[:,0]
#Split the predictor and target sets into training and testing sets
predi_train, predi_test, target_train, target_test = train_test_split(predictors, target, test_size=0.3,random_state=100)
# +
#Enlabel the non-numeric column
class_le = LabelEncoder()
target_train = class_le.fit_transform(target_train)
target_test = class_le.transform(target_test)
#Standardise the predictors for Decision Tree classifier
scaler1 = StandardScaler()
predi_train_tree = scaler1.fit_transform(predi_train)
predi_test_tree = scaler1.transform(predi_test)
#Standardise the whole predictors for cross-validation
predictors_tree = scaler1.fit_transform(predictors)
#Normalise the predictors for MultiLayer Perceptron
scaler2 = MinMaxScaler()
predi_train_MLP = scaler2.fit_transform(predi_train)
predi_test_MLP = scaler2.transform(predi_test)
#Normalise the whole predictors for cross-validation
predictors_MLP = scaler2.fit_transform(predictors)
# +
#Build Decision Tree classifier and make predictions
#Determine the best max_depth parameter for Decision Tree by mean-square-error
accuracies=[]
depths=[]
samples_split=[]
samples_leaf =[]
for x in range(1,11):
for y in range(10,50):
for z in range(5,50):
clf_tree_try = DecisionTreeClassifier(criterion='entropy',max_depth=x,min_samples_split=y,min_samples_leaf=z,random_state=100)
clf_tree_try.fit(predi_train_tree,target_train)
predictions_tree = clf_tree_try.predict(predi_test_tree)
accuracies.append(cross_val_score(clf_tree_try,predictors_tree,target,cv=3).mean())
depths.append(x)
samples_split.append(y)
samples_leaf.append(z)
df_param_accu = pd.DataFrame({
'Depth' : depths,
'Min_samples_split' : samples_split,
'Min_samples_leaf' : samples_leaf,
'Accurarcy' : accuracies
})
df_param_accu.head()
# -
df_param_accu.sort_values(by=["Accurarcy"], ascending=False)
#Based on the graph in the above section, the best max_depth of the Decision Tree classifier is 4, now build the tree
clf_tree = DecisionTreeClassifier(criterion='entropy', min_samples_split=17, max_depth=4, min_samples_leaf=7, random_state=100)
clf_tree.fit(predi_train_tree, target_train)
predictions_tree = clf_tree.predict(predi_test_tree)
# +
#Plot Confusion Matrices for Decision Tree and MLP classifiers and calculate their accuracies
#For Decision Tree
plot_conmatrix_tree = plot_confusion_matrix(clf_tree,predi_test_tree,target_test)
plot_conmatrix_tree.ax_.set_title('Confusion Matrix for Decision Tree')
print('The accuracy of Decision Tree classifier is: ',accuracy_score(target_test,predictions_tree))
print("The average accuracy score of Decision Tree under cross validation is: ",cross_val_score(clf_tree,predictors_tree,target,cv=3).mean())
# -
#Build Multilayer Perceptron and make predictions
clf_MLP = MLPClassifier(random_state=100, tol=5e-3)
clf_MLP.fit(predi_train_MLP,target_train)
predictions_MLP = clf_MLP.predict(predi_test_MLP)
#For MLP
plot_conmatrix_MLP = plot_confusion_matrix(clf_MLP,predi_test_MLP,target_test)
plot_conmatrix_MLP.ax_.set_title('Confusion Matrix for Multilayer Perceptron')
print('The accuracy of Multilayer Perceptron classifier is: ',accuracy_score(target_test,predictions_MLP))
print("The average accuracy score of MLP under cross validation is: ",cross_val_score(clf_MLP,predictors_MLP,target,cv=3).mean())
plt.show()
#Generate probability table for both classifiers
prob_tree = clf_tree.predict_proba(predi_test_tree)
prob_MLP = clf_MLP.predict_proba(predi_test_MLP)
#Probability table for the first sample
probability_table_sample1 = pd.DataFrame([[prob_tree[0][0],prob_MLP[0][0]],
[prob_tree[0][1],prob_MLP[0][1]],
[prob_tree[0][2],prob_MLP[0][2]],
[prob_tree[0][3],prob_MLP[0][3]]],
columns=['Decision Tree','MLP'], index=['Class_d','Class_h','Class_o','Class_s'])
print(probability_table_sample1)
#Determine the class of the i-th sample
def Clf_DT_MLP_Aggregate(i):
##create a DataFrame containing all the samples and their probabilities in each class with regard to each classifier
samples=[]
class_b_proba = []
class_h_proba = []
class_o_proba = []
class_s_proba = []
for a in range(len(target_test)):
samples.append(a)
samples.append(a)
class_b_proba.append(prob_tree[a][0])
class_b_proba.append(prob_MLP[a][0])
class_h_proba.append(prob_tree[a][1])
class_h_proba.append(prob_MLP[a][1])
class_o_proba.append(prob_tree[a][2])
class_o_proba.append(prob_MLP[a][2])
class_s_proba.append(prob_tree[a][3])
class_s_proba.append(prob_MLP[a][3])
proba_table_DT_MLP = pd.DataFrame({
'Samples': samples,
'Class_b_proba' : class_b_proba,
'Class_h_proba' : class_h_proba,
'Class_o_proba' : class_o_proba,
'Class_s_proba' : class_s_proba
})
#Use Aggregate and Average functions to generate a new table with the average probability of the two classifiers in each class grouped by 'Samples'
proba_table_average = proba_table_DT_MLP.groupby('Samples').aggregate([np.average])
#Reversely index the column number by the values and this column number is the class ID
for y in range(4):
if proba_table_average.iloc[i,y] == proba_table_average.max(axis=1)[i]:
final_class = y
#print('The No.{0} sample belongs to class {1}'.format(i,final_class))
return final_class
predictions_DT_MLP = []
for i in range(len(target_test)):
predictions_DT_MLP.append(Clf_DT_MLP_Aggregate(i))
accuracy_classifier_DT_MLP = accuracy_score(target_test,predictions_DT_MLP)
print('The accuracy of the Tree-MLP-combined classifier is: ',accuracy_classifier_DT_MLP)
# +
# Pr(class=’s’|DT=’s’) is actually the precision of predicting class 's' by Decision Tree; Get all the precision scores of all the class-predictions
from sklearn.metrics import classification_report
report = classification_report(target_test,predictions_tree,output_dict=True)
precisions = {
0:report['0']['precision'],
1:report['1']['precision'],
2:report['2']['precision'],
3:report['3']['precision']
}
print(precisions)
def classifier_DT_MLP_conditional(i):
samples = []
classes =[]
Proba_DT = []
Proba_MLP = []
samples.append(i)
samples.append(i)
samples.append(i)
samples.append(i)
classes.append(0)
classes.append(1)
classes.append(2)
classes.append(3)
Proba_DT.append(prob_tree[i][0])
Proba_DT.append(prob_tree[i][1])
Proba_DT.append(prob_tree[i][2])
Proba_DT.append(prob_tree[i][3])
Proba_MLP.append(prob_MLP[i][0])
Proba_MLP.append(prob_MLP[i][1])
Proba_MLP.append(prob_MLP[i][2])
Proba_MLP.append(prob_MLP[i][3])
proba_table_condition = pd.DataFrame({
'Samples' : samples,
'Classes' : classes,
'Proba_DT' : Proba_DT,
'Proba_MLP' : Proba_MLP
})
for a in range(4):
if proba_table_condition.iloc[a,2]==proba_table_condition.max()[2]:
max_class_DT = a
for b in range(4):
if proba_table_condition.iloc[b,3]==proba_table_condition.max()[3]:
max_class_MLP = b
P1 = proba_table_condition.max()[2] * precisions[max_class_DT]
P2 = proba_table_condition.max()[3] * precisions[max_class_MLP]
if P1 > P2: return max_class_DT
else: return max_class_MLP
id = int(input('Input the sample_ID you want to predict its class: '))
print('The No.{0} sample belongs to class {1}'.format(id,classifier_DT_MLP_conditional(id)))
# -
| DecisionTree_MLP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Variables
#
# Until now, all of the Python examples have used explicit data values typed directly into the **code cells**. This is okay for typing out a quick numerical calculation, but does not let us do anything any more sophisticated with our data. To do anything more complex your code will need to store and retrieve data, and to change the values of these data as you perform your calculations, which we do using **variables**.
#
# ## What is a variable?
#
# A variable is a Python is an object that stores some data.
#
# This data can be a wide range of **types**, including `int`, `float`, or `complex`; a `string`; a `bool`; even a function, or a **collection** of other variables.
#
# ## Assigning a variable
#
# To create a new variable and **assign** it a value we use `=`.
days_in_october = 31
# This creates a variable named `days_in_october` and assigns it the integer value `31`.
#
# <p align="center">
# <img src="https://github.com/pythoninchemistry/ch40208/raw/master/CH40208/python_basics/images/days_in_october.png" width="32%" alt="The assignment of the value 31 to the variable named days_in_october."/>
# </p>
#
# The left-hand side of the assignment expression is the **variable name**.
# - Variable name must start with a letter or an underscore.
# - Variable names can be any length and can contain upper- and lower-case letters, numbers, and underscores.
# - Variable names are **case sensitive**. `my_name` and `My_Name` are two different variables.
# - Variable names cannot be special **reserved keywords**. You can see the full list of **reserved keywords** by running the command `help("keywords")` in a **code cell**.
#
# Now we can use the variable `days_in_october` in place of the integer `31`:
print("There are", days_in_october, "days in October")
# ### Aside: f-strings
#
# A neater way to do this is to use **f-strings**, which provide a way to embed expressions inside strings. An **f-string** is a **string literal** with an `f` character at the beginning, and curly brackets enclosing any expressions. When the **f-string** is created, any enclosed expressions are evaluated to give a single **interpolated** string:
f"There are {days_in_october} days in October"
# `days_in_october` can be used in any of the ways that we might use the value assigned to it. i.e. `days_in_october` behaves like an **integer**. If we call type with `days_in_october` we can confirm the assigned value is an `int`.
type(days_in_october)
# Variables can also appear on the right-hand side of variable assignments.
a = 3
b = a
print(b)
# - In the first line, we create a variable `a` and assign it the value `3`.
# - In the second line, we create a variable `b` and assign it to the same value as the variable `a`. This is the same as if we had written `b = 3`.
# - In the third line we print `b`.
#
# <p align="center">
# <img src="https://github.com/pythoninchemistry/ch40208/raw/master/CH40208/python_basics/images/a_and_b.png" width="30%" alt="The assignment of 3 to the variable a and the assigment of this to b."/>
# </p>
#
# Because `b` is assigned to the **value** stored in `a`, and not to `a` itself, if we **reassign** `a`, the value stored in `b` does not change.
a = 3
b = a
a = 2
print(f'a = {a}')
print(f'b = {b}')
# <p align="center">
# <img src="https://github.com/pythoninchemistry/ch40208/raw/master/CH40208/python_basics/images/a_and_b_switch.png" width="50%" alt="The assignment of 3 to the variable a and the assigment of this to b, followed by the reassignment of a as 2."/>
# </p>
#
# ## Multiple assignment
#
# Python allows multiple variables to be assigned in the same statement:
a = b = c = 21
print(a, b, c)
# ## Empty variables
#
# A variable may also be set to contain no value by assigning it `None`.
a = None
print(a)
| CH40208/python_basics/variables.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: .NET (C#)
// language: C#
// name: .net-csharp
// ---
// + dotnet_interactive={"language": "csharp"}
#r "BoSSSpad.dll"
using System;
using System.Collections.Generic;
using System.Linq;
using ilPSP;
using ilPSP.Utils;
using BoSSS.Platform;
using BoSSS.Platform.LinAlg;
using BoSSS.Foundation;
using BoSSS.Foundation.XDG;
using BoSSS.Foundation.Grid;
using BoSSS.Foundation.Grid.Classic;
using BoSSS.Foundation.Grid.RefElements;
using BoSSS.Foundation.IO;
using BoSSS.Solution;
using BoSSS.Solution.Control;
using BoSSS.Solution.GridImport;
using BoSSS.Solution.Statistic;
using BoSSS.Solution.Utils;
using BoSSS.Solution.AdvancedSolvers;
using BoSSS.Solution.Gnuplot;
using BoSSS.Application.BoSSSpad;
using BoSSS.Application.XNSE_Solver;
using static BoSSS.Application.BoSSSpad.BoSSSshell;
Init();
// + [markdown] dotnet_interactive={"language": "csharp"}
// This tutorial demostrates the creation, resp. the import of grids/meshes
// into *BoSSS*.
// + [markdown] dotnet_interactive={"language": "csharp"}
// # Cartesian 2D grids
// A 2D Cartesian mesh can be created form an array of $x$- and $y$-nodes
// via the method **Grid2D.Cartesian2DGrid**.
//
// Note that the number of nodes needes to be equal to the
// number of cells $+1$. For instance, for $10$ cells we need $11$ nodes.
// + dotnet_interactive={"language": "csharp"}
int Res = 10;
double[] xNodes = GenericBlas.Linspace(0, 1, Res + 1);
double[] yNodes = GenericBlas.Linspace(0, 1, Res + 1);
int J = (xNodes.Length - 1)*(yNodes.Length - 1);
string GridName = string.Format(BoSSSshell.WorkflowMgm.CurrentProject + "_J" +J);
Console.WriteLine("Creating grid with " + J + " cells. ");
GridCommons g;
g = Grid2D.Cartesian2DGrid(xNodes, yNodes);
g.Name = GridName;
| doc/handbook/GridGeneration/GridGeneration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="VYK2bgrcxIde" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f20bfb70-9bc7-4a19-c599-a0fed46cd2c8"
n = 1042000
while n <= 702648265:
order = len(str(n))
s=0
num = n
while num > 0:
digit = num % 10
s += digit ** order
num //= 10
if n == s:
print("The first armstrong number is :",n)
break
n += 1
# + id="twXF1TKw4db-" colab_type="code" colab={}
| Day_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## <div align="center">Экспериментальный анализ производительности пересечений автоматов</div>
# ### <div align="center">Отчет о проведении эксперимента</div>
# <div align="center">23.09.20</div>
# **Датасеты:**
#
# Использованы датасеты LUBM300, LUBM500, LUBM1M, LUBM1.5M, LUBM1.9M (запуск на других приводил к зависанию без видимого прогресса)
# Регекспы были преобразованы для использования pyformlang (спасибо <NAME>)
# **Железо:**
#
# Architecture: x86_64
# CPU(s): 8
# Thread(s) per core: 2
# Model name: Intel(R) Core(TM) i5-8250U CPU @ 1.60GHz
# CPU MHz: 700.067
# CPU max MHz: 3400,0000
# CPU min MHz: 400,0000
# L1d cache: 32K
# L1i cache: 32K
# L2 cache: 256K
# L3 cache: 6144K
#
# OS: Ubuntu 18.04
# RAM: 2x4Gb DDR4
#
# Замеры производились с помощью встроенного модуля *time*, бралось усредненное значение из 5 экспериментов.
# Контрольные числа(reachable_pairs) были проверены на совпадение на этапе постпроцессинга для каждого из алгоритмов транзитивного замыкания матриц.
# Регулярные выражения были сгруппированы в 12 наборов по структурной схожести
import pandas
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# Пример итоговой таблички (для датасета LUBM500):
df = pandas.read_csv('res_LUBM500.csv')
df.head()
# Все замеры вычисления итоговых пар (терминал, количество ребер с этим терминалом) длились меньше 1 мс (кроме одного выброса)
table_mask = "res_LUBM{num}.csv"
for num in ['300', '500', '1M', '1.5M', '1.9M']:
df = pandas.read_csv(table_mask.format(num=num))
print(df['pairs_time_ms'].unique())
def boxplot(df):
def get_box_data(df):
df = df.drop(['reachable_pairs', 'graph'], axis=1)
df['regex'] = df['regex'].replace(to_replace='^q_', value='q0_', regex=True)
for i in range(10):
df['regex'] = df['regex'].replace(to_replace=f"^q{i}_", value=f"q0{i}_", regex=True)
df['regex'] = df['regex'].apply(lambda regex: regex[:3])
return df
plt.figure(figsize=(20,10))
df_name = df['graph'][0]
box_df = get_box_data(df)
bp = sns.boxplot(y='intersection+closure_time_ms', x='regex',
data=box_df,
palette="Paired",
hue='algo',
width=0.5,
order=sorted(box_df['regex'].unique()),
)
bp.set_title(df_name)
bp.set_yscale("log")
bp.set_ylabel("intersection+closure time, ms")
leg = bp.get_legend()
leg.texts[0].set_text('Multiplication')
leg.texts[1].set_text('Squaring')
for num in ['300', '500', '1M', '1.5M', '1.9M']:
df = pandas.read_csv(table_mask.format(num=num))
# Assert that reachable pairs are equal for each algo
assert df[df['algo'] == 0]['reachable_pairs'].reset_index(drop=True).equals(
df[df['algo'] == 1]['reachable_pairs'].reset_index(drop=True)
)
df = df.drop('pairs_time_ms', axis=1)
boxplot(df)
# # Выводы
# На всех графиках со сравнением времени вычисления пересечения и тензорного произведения на разных датасетах, явно заметно, что запросы из группы q00 и q11 вычисляются дольше остальных.
#
# Нет явной разницы между медианой для вычисления транзитивного замыкания матрицы с помощью возведения в квадрат и умножения на матрицу смежности, кроме незначительных отклонений.
# Это можно объяснить тем, что в первом случае более плотные матрицы но меньше операций умножения, когда во втором - много операций умножения, но матрицы сильно разрежены.
#
# Вероятно на больних датасетах разница будет видна лучше, но данные результаты не несут в себе такой информации.
| benchmark/Report.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Parsing GSE3838 GSE3839 GSE4974 to identify behavior of HDAC and SIRT transcripts in megakaryocytes under multiple conditions
from IPython.display import HTML, display
# *Text is from NCBI GEO*
#
# **GSE3838: **
#
# *Experimental conditions*
# CHRF-288 cells were cultured in the presence of 10 ng/mL phorbol ester (PMA) or equivalent volume of DMSO solvent (0.02%). Unstimulated control cells from time zero (exponentially growing CHRF cells) were also analyzed. For PMA treated legs, only the adherent cells were included in the transcriptional analysis.
#
# |_Treatment/Timepoint_| Experiment # (Number of technical replicates) |
# |---------------------|-----------------------------------------------|
# |No treatment (Time 0)| exp3 (3), exp4 (2) |
# |PMA-1hr| exp3 (2), exp4 (2) |
# |PMA-6hr| exp3 (2), exp4 (2)|
# |PMA-12hr| exp3 (1), exp4 (1)|
# |PMA-1d| exp3 (1), exp4 (2)|
# |PMA-2d| exp3 (1), exp4 (1)|
# |PMA-3d| exp3 (1), exp4 (1)|
# |PMA-4d| exp3 (1), exp4 (1)|
# |PMA-5d| exp3 (2), exp4 (2)|
# |PMA-7d| exp3 (1), exp4 (1)|
# |PMA-9d| exp3 (2), exp4 (1)|
#
#
# *Number of replicates*
# Two biological replicate experiments were analyzed and approximately one-half of the samples with each experiment were technically replicated. Hybridizations were performed in a reference design with all samples labeled with Cy3 and a reference RNA pool labeled with Cy5 (Universal Reference synthesized RNA).
#
# **GSE3839:**
# *Experimental conditions*
# G-CSF mobilized peripheral blood CD34-positive cells were cultured with TPO, IL-3, and Flt3-L to induce Mk differentiation. **Samples prior to day 5, including uncultured starting cells, were analyzed directly, whereas samples after and including day 5 were positively selected for CD41a expression immediately prior to RNA isolation.**
#
# |_Treatment/Timepoint_| Experiment # (Number of technical replicates) |
# |---------------------|-----------------------------------------------|
# | Day 0| Exp1(2), Exp2(2), Exp3(2) |
# | Day 1| Exp1(1), Exp2(1), Exp3(1) |
# | Day 2| Exp1(2), Exp2(1), Exp3(0) |
# | Day 3| Exp1(1), Exp2(1), Exp3(1) |
# | Day 4| Exp1(2), Exp2(2), Exp3(2) |
# | Day 5| Exp1(2), Exp2(2), Exp3(1) |
# | Day 7| Exp1(1), Exp2(1), Exp3(2) |
# | Day 9| Exp1(1), Exp2(2), Exp3(1) |
# | Day 12| Exp1(1), Exp2(1), Exp3(1) |
#
# *Number of replicates*
# Three biological replicate experiments were analyzed and approximately one-half of the samples from each experiment were technically replicated. Hybridizations were performed in a reference design with all samples labeled with Cy3 and a reference RNA pool labeled with Cy5.
#
# **GSE4974:**
#
# *Experimental conditions*
# G-CSF mobilized peripheral blood CD34-positive cells were cultured with TPO and or NIC from day 5. This study focuses on understanding the differences in the temporal gene expression pattern during differentiation with and without nicotinamide. Nicotinamide was added on day 5. For each experiment, a sample was taken on day 5, before nicotinamide addition, and on days 6, 8, and 10 from both nicotinamide and control treated cultures. Each sample was analyzed in duplicate for a total of 14 hybridizations per culture or 28 total hybridizations.
#
# |_Treatment/Timepoint_| Experiment # (Number of technical replicates) |
# |---------------------|-----------------------------------------------|
# | Day 5 (Pre-treatment with NIC) | Exp1(2), Exp2(2) |
# | Day 6 TPO only | Exp1(2), Exp2(2) |
# | Day 8 TPO only | Exp1(2), Exp2(2) |
# | Day 10 TPO only | Exp1(2), Exp2(2) |
# | Day 6 NIC+TPO | Exp1(2), Exp2(2) |
# | Day 8 NIC+TPO | Exp1(2), Exp2(2) |
# | Day 10 NIC+TPO | Exp1(2), Exp2(2) |
#
# *Number of replicates*
# Two biological experiments were analyzed with two technical replicates.
#
#
# ## Comparisons
# 1. Identify genes that may be activated in megakaryoblastic cell line over time. In CHRF cells, plot genes over time when stimulated with PMA
# 2. Identify genes that may be activated over time during differentiation. In primary cells #1, plot genes over time when stimulated with TPO over time (day 0-12)
# 3. In primary cells #2, plot ratio of genes TPO vs NIC+TPO (day 5-10)
# 4. Summarize consistency between CHRF and primary cells
# 5. Summarize consistency between primary cells #1 and primary cells #2
# 6. Identify genes that may be activated in response to long-term treatment with nicotinamide, which is a PAN-sirtuin inhibitor.
#
#
# +
# load the series file
# skip the lines with ! except !Sample_title
# save file into dataframe
GSE_path =
with open(GSE_path)
| notebook-example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Import Required Library
# +
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split , cross_val_score
from sklearn.metrics import r2_score , mean_squared_error , mean_absolute_error,accuracy_score
# -
# ## Library for Visualization
# +
#import seaborn as sns
#from matplotlib import pyplot as plt
#sns.set_style("whitegrid")
# #%matplotlib inline
# -
# #### Filter all the Warnings
import warnings
warnings.filterwarnings("ignore")
# ## Read X and Y Parameter
# +
#x = pd.read_csv("../tsrl10000.csv")
#x = x.iloc[:,1:]
#
#y = pd.read_csv("../srlbininfo_10000",sep ="\t",header =None )
#
#y = pd.DataFrame(y.iloc[:,1])
#y.columns = ["RL"]
#
#df = pd.concat([y,x],axis =1)
#
#df = df[df.RL < 25] #Ignore values more than 25
#y = pd.DataFrame(df.iloc[:,0])
#x = df.iloc[:,1:]
# +
#df.to_csv("../RL_df_10000.csv",index = False) #When Memory Problem Just Load the data
# -
df = pd.read_csv("../RL_df_10000.csv")
y = pd.DataFrame(df.iloc[:,0])
x = df.iloc[:,1:]
print(x.shape, y.shape)
display(x.head(2))
display(y.head(2))
# ## Split the data to training and Testing set
x_train,x_test,y_train, y_test= train_test_split(x,y,test_size=0.2)
x_train.shape,y_train.shape, x_test.shape , y_test.shape
# ## Linear Regression Starts Here
# ### Classical Linear Regressors
# ###### We used Following 4 Regression Methods First
# 1. **LinearRegression**([…])-->Ordinary least squares Linear Regression. <br>
# 2. **Ridge**([alpha, fit_intercept, …]) -->Linear least squares with l2 regularization.<br>
# 3. **RidgeCV**([alphas, …]) -->Ridge regression with built-in cross-validation.<br>
# 4. **SGDRegressor**([loss, penalty, …]) -->Linear model fitted by minimizing a regularized empirical loss with SGD<br>
# ### 1.1 LinearRegression()
# class sklearn.linear_model.LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=None)
from sklearn.linear_model import LinearRegression
name = "Linear _Regression--> "
lr = LinearRegression().fit(x_train,y_train)
y_pred = lr.predict(x_test)
# +
#print(name +" Coefficient ", lr.coef_)
print(name + " Intercept " , lr.intercept_)
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",lr.score(x_train,y_train))
print(name + "Score for test data Set",lr.score(x_test,y_test))
print(name + "Score for Prediction data Set",lr.score(x_test,y_pred))
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test, y_pred))
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test, y_pred))
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test, y_pred))
# -
display(y_pred[:5])
display(y_test[:5])
# #### This is for Visualizing
# +
#y_pred = lr.predict(x_train)
#fig,ax = plt.subplots()
#ax.scatter(y_train,y_pred,edgecolor=("r"))
#ax.plot([y_train.min(),y_train.max()],[y_train.min(),y_train.max()],"b",lw=2)
#ax.set_xlabel("Measured")
#ax.set_ylabel("Predicted")
#plt.show()
#
#fit(self,X,y[sample_weight])
#get_params(self[,deep])
#predict(self,X)
#score(self,X,Y[,sample_weight])
#set_params(self,\)
# +
#y_pred = lr.predict(x_train)
#fig,ax = plt.subplots()
#ax.scatter(y_train,y_pred,edgecolor=("r"))
##ax.plot([y_train.min(),y_train.max()],[y_train.min(),y_train.max()],"b",lw=2)
#ax.set_xlabel("Measured")
#ax.set_ylabel("Predicted")
#plt.show()
# -
# ## Feature Scalling
# +
#from sklearn.preprocessing import StandardScaler
#scaler = StandardScaler()
#
#print(scaler.fit(x_train))
#print("Scaler Mean",scaler.mean_)
#print("Scaler Variance",scaler.var_)
#
#scaled_x_train = scaler.transform(x_train)
#scaled_x_test = scaler.transform(x_test)
#
#slr = linear_model.LinearRegression()
#slr.fit(scaled_x_train,y_train)
# +
#print("Coeff_",slr.coef_)
#print("intercept_",slr.intercept_)
#print("R2 Score for train Set:{:.3f}".format(slr.score(scaled_x_train,y_train)))
#print("R2 Score for test set :{:.3f}".format(slr.score(scaled_x_test,y_test)))
# -
# ###### polynomial Features
# +
#from sklearn.preprocessing import PolynomialFeatures
#poly = PolynomialFeatures(2)
#x_train_poly = poly.fit_transform(x_train)
#x_test_poly = poly.transform(x_test)
#print(x_train.shape , x_train_poly.shape)
# +
# For this big Data Memory Error Occured
# -
# # Regularization
# With as many as 306 features in the model it is natural for the model to get quite complex. The model sticks too much to the data and the model has probably learned the background noise which results in high variance while being fit, which leads to **Overfitting**. This results in poor prediction and generalization power when applied o data outside the training set. To overcome this problem **regularization technique** is used.
#
# To find the best model, the common method in machine learning is to define a loss or cost function that describes how well the model fits the data. The goal is to find the model that minimzes this loss function. The idea is to penalize this loss function by adding a complexity term that would give a bigger loss for more complex models.
#
# **Regularization** allows to shrink the coefficients to zero by introducing a tuning parameter **'lambda'** or **'alpha'**. This ensures:
# - Shrinking of parameters, therefore it is mostly used to prevent multicollinearity.
# - Reduces the model complexity by coefficient shrinkage.
# The two popular methods used to regularize parameters are:
# - Ridge Regression
# - Lasso Regression
#
# **Ridge Regression:** Ridge regression uses L2 penalty to penalize coefficients. L2 penalty is the penalty equivalent to **square of the magnitude of coefficients**
#
# **Lasso Regression:** Lasso regression uses L1 penalty which is the **absolute value of the magnitude of coefficients**
#
# Let us apply Ridge and Lasso models to our data
# ## 1.2 Ridge() Regression
# `class sklearn.linear_model.Ridge(alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='auto', random_state=None)` **Default**
# #### ||y - Xw||^2_2 + alpha * ||w||^2_2 Objective Function
# ##### Methods
# **fit**(self, X, y[, sample_weight])-->Fit Ridge regression model.<br>
# **get_params**(self[, deep])--> Get parameters for this estimator.<br>
# **predict**(self, X)--> Predict using the linear model.<br>
# **score**(self, X, y[, sample_weight])--> Return the coefficient of determination R^2 of the prediction.<br>
# **set_params**(self, \*\*params) --> Set the parameters of this estimator.
name = "Ridge -->"
from sklearn.linear_model import Ridge
clf_ridge = Ridge(alpha=0.1)
clf_ridge.fit(x_test,y_test)
y_pred_ridge = clf_ridge.predict(x_test)
# +
#print(name +" Coefficient ", lr.coef_)
print(name + " Intercept " , clf_ridge.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",clf_ridge.score(x_train,y_train))
print(name + "Score for test data Set",clf_ridge.score(x_test,y_test))
print(name + "Score for Predictecd data Set",clf_ridge.score(x_test,y_pred_ridge)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_ridge)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_ridge)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_ridge)) ##
# -
# ## 1.3 RidgeCV
#
# `class sklearn.linear_model.RidgeCV(alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, cv=None, gcv_mode=None, store_cv_values=False)`
#
# **fit**(self, X, y[, sample_weight]) --> Fit Ridge regression model with cv. <br>
#
# **get_params**(self[, deep])--> Get parameters for this estimator. <br>
#
# **predict**(self, X)--> Predict using the linear model.<br>
#
# **score**(self, X, y[, sample_weight]) --> Return the coefficient of determination R^2 of the prediction.<br>
#
# **set_params**(self, \*\*params) --> Set the parameters of this estimator.<br>
# +
# Import Library
from sklearn.linear_model import RidgeCV
name = "RidgeCV --> "
#Object Creation
clf_ridgecv = RidgeCV(alphas= [1e-3, 1e-2, 1e-1, 1]).fit(x_train , y_train)
y_pred_ridgecv = clf_ridgecv.predict(x_test)
# +
#print(name +" Coefficient ", lr.coef_)
print(name + " Intercept " , clf_ridgecv.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",clf_ridgecv.score(x_train,y_train))
print(name + "Score for test data Set",clf_ridgecv.score(x_test,y_test))
print(name + "Score for Predictecd data Set",clf_ridgecv.score(x_test,y_pred_ridgecv)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_ridgecv)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_ridgecv)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_ridgecv)) ##
# -
# ### 1.4 SGDRegressor()
# `class sklearn.linear_model.SGDRegressor(loss='squared_loss', penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=0.001, shuffle=True, verbose=0, epsilon=0.1, random_state=None, learning_rate='invscaling', eta0=0.01, power_t=0.25, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=False, average=False)`
# #### Methods
#
# **densify**(self) --> Convert coefficient matrix to dense array format.<br>
#
# **fit**(self, X, y[, coef_init, intercept_init, …])--> Fit linear model with Stochastic Gradient Descent.<br>
#
# **get_params**(self[, deep])--> Get parameters for this estimator.<br>
#
# **partial_fit**(self, X, y[, sample_weight])--> Perform one epoch of stochastic gradient descent on given samples.<br>
#
# **predict**(self, X) --> Predict using the linear model<br>
#
# **score**(self, X, y[, sample_weight])--> Return the coefficient of determination R^2 of the prediction.<br>
#
# **set_params**(self, \*\*kwargs)--> Set and validate the parameters of estimator.<br>
#
# **sparsify**(self) --> Convert coefficient matrix to sparse format.<br>
# +
from sklearn.linear_model import SGDRegressor
name = "SGDRegressor-->"
clf_sgd = SGDRegressor(max_iter = 50000, tol = 1e-3)
clf_sgd.fit(x_train, y_train)
y_pred_sgd = clf_sgd.predict(x_test)
# +
#print(name +" Coefficient ", lr.coef_)
print(name + " Intercept " , clf_sgd.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",clf_sgd.score(x_train,y_train))
print(name + "Score for test data Set",clf_sgd.score(x_test,y_test))
print(name + "Score for Predictecd data Set",clf_sgd.score(x_test,y_pred_sgd)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_sgd)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_sgd)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_sgd)) ##
# -
# ### The Above Method is Use less
# ## ********************** Part 2 **************************************
# ### Regressors with variable selection
#
# **1.ElasticNet**([alpha, l1_ratio, …])
# Linear regression with combined L1 and L2 priors as regularizer.<br>
# **2.ElasticNetCV**([l1_ratio, eps, …])
# Elastic Net model with iterative fitting along a regularization path.<br>
# **3.Lars**([fit_intercept, verbose, …])
# Least Angle Regression model a.k.a.<br>
# **4.LarsCV**([fit_intercept, …])
# Cross-validated Least Angle Regression model.<br>
# **5.Lasso**([alpha, fit_intercept, …])
# Linear Model trained with L1 prior as regularizer (aka the Lasso)<br>
# **6.LassoCV**([eps, n_alphas, …])
# Lasso linear model with iterative fitting along a regularization path.<br>
# **7.LassoLars**([alpha, …])
# Lasso model fit with Least Angle Regression a.k.a.<br>
# **8.LassoLarsCV**([fit_intercept, …])
# Cross-validated Lasso, using the LARS algorithm.<br>
# **9.LassoLarsIC**([criterion, …])
# Lasso model fit with Lars using BIC or AIC for model selection<br>
# **10.OrthogonalMatchingPursuit**([…])
# Orthogonal Matching Pursuit model (OMP) <br>
# **11.OrthogonalMatchingPursuitCV**([…])
# Cross-validated Orthogonal Matching Pursuit model (OMP).<br>
# ### 2.1 ElasticNet
# class sklearn.linear_model.**ElasticNet**(alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, precompute=False, max_iter=1000, copy_X=True, tol=0.0001, warm_start=False, positive=False, random_state=None, selection='cyclic')
# ##### Methods
#
# **fit**(self, X, y[, check_input]) --> Fit model with coordinate descent.
#
# **get_params**(self[, deep])--> Get parameters for this estimator.
#
# **path**(X, y[, l1_ratio, eps, n_alphas, …])--> Compute elastic net path with coordinate descent.
#
# **predict**(self, X)Predict using the linear model.
#
# **score**(self, X, y[, sample_weight]) --> Return the coefficient of determination R^2 of the prediction.
#
# **set_params**(self, \*\*params)--> Set the parameters of this estimator.
# +
from sklearn.linear_model import ElasticNet
name = "Elastic net--> "
regr_elasticNet = ElasticNet(alpha=0.01,l1_ratio=10,max_iter=50000,random_state=0)
regr_elasticNet.fit(x_train , y_train)
y_pred_elasticNet = regr_elasticNet.predict(x_test)
# +
#print(name +" Coefficient ", lr.coef_)
print(name + " Intercept " , clf_ridgecv.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",regr_elasticNet.score(x_train,y_train))
print(name + "Score for test data Set",regr_elasticNet.score(x_test,y_test))
print(name + "Score for Predictecd data Set",regr_elasticNet.score(x_test,y_pred_elasticNet)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_elasticNet)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_elasticNet)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_elasticNet)) ##
# -
# ### 2.2 ElasticnetCV() ---------->> **glmnet** in R<<-----------
# class sklearn.linear_model.**ElasticNetCV**(l1_ratio=0.5, eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=0.0001, cv=None, copy_X=True, verbose=0, n_jobs=None, positive=False, random_state=None, selection='cyclic')<br>
# `Elastic Net model with iterative fitting along a regularization path`
# +
from sklearn.linear_model import ElasticNetCV
name = "ElasticnetCV --> "
regr_enetcv = ElasticNetCV(cv=10, random_state=0)
regr_enetcv.fit(x_train,y_train)
y_pred_elasticnetcv = regr_enetcv.predict(x_test)
# +
#print(name +" Coefficient ", lr.coef_)
print(name + " Intercept " , regr_enetcv.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",regr_enetcv.score(x_train,y_train))
print(name + "Score for test data Set",regr_enetcv.score(x_test,y_test))
print(name + "Score for Predictecd data Set",regr_enetcv.score(x_test,y_pred_elasticnetcv)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_elasticnetcv)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_elasticnetcv)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_elasticnetcv)) ##
# -
# #### 2.3 Lars()
# ##### class sklearn.linear_model.Lars(fit_intercept=True, verbose=False, normalize=True, precompute='auto', n_nonzero_coefs=500, eps=2.220446049250313e-16, copy_X=True, fit_path=True)[source]¶
# #### Methods
#
# **fit**(self, X, y[, Xy])-->Fit the model using X, y as training data.
#
# **get_params**(self[, deep])--Get parameters for this estimator.
#
# **predict**(self, X)--Predict using the linear model.
#
# **score**(self, X, y[, sample_weight])--Return the coefficient of determination R^2 of the prediction.
#
# **set_params**(self, \*\*params)--Set the parameters of this estimator.
# +
from sklearn.linear_model import Lars
name = "Lars -->"
reg_lars = Lars()
reg_lars.fit(x_train, y_train)
y_pred_lars = reg_lars.predict(x_test)
# +
#print(name +" Coefficient ", lr.coef_)
print(name + " Intercept " , reg_lars.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",reg_lars.score(x_train,y_train))
print(name + "Score for test data Set",reg_lars.score(x_test,y_test))
print(name + "Score for Predictecd data Set",reg_lars.score(x_test,y_pred_lars)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_lars)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_lars)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_lars)) ##
# -
# ### 2.4 LarsCV()
#
# class sklearn.linear_model.**LarsCV**(fit_intercept=True, verbose=False, max_iter=500, normalize=True, precompute='auto', cv=None, max_n_alphas=1000, n_jobs=None, eps=2.220446049250313e-16, copy_X=True) <br>
#
#
# **fit(self, X, y)** --> Fit the model using X, y as training data.<br>
#
# **get_params(self[, deep])** --> Get parameters for this estimator.<br>
#
# **predict(self, X)** --> Predict using the linear model.<br>
#
# **score(self, X, y[, sample_weight])** --> Return the coefficient of determination R^2 of the prediction.<br>
#
# **set_params(self, \*\*params)** --> Set the parameters of this estimator.<br>
# +
#import Library
from sklearn.linear_model import LarsCV
name = "LarsCV-->"
reg_larsCV = LarsCV(cv=10).fit(x_train, y_train)
y_predict_lars = reg_larsCV.predict(x_test)
reg_larsCV.alpha_
# +
#print(name +" Coefficient ", lr.coef_)
print(name + " Intercept " , reg_lars.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",reg_lars.score(x_train,y_train))
print(name + "Score for test data Set",reg_lars.score(x_test,y_test))
print(name + "Score for Predictecd data Set",reg_lars.score(x_test,y_pred_lars)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_lars)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_lars)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_lars)) ##
# -
# ### 2.5 Lasso
# class sklearn.linear_model.**Lasso**(alpha=1.0, fit_intercept=True, normalize=False, precompute=False, copy_X=True, max_iter=1000, tol=0.0001, warm_start=False, positive=False, random_state=None, selection='cyclic')
#
# **(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1** <br>
# **Linear Model trained with L1 prior as regularizer (aka the Lasso)**
#
# +
# Import Library
from sklearn.linear_model import Lasso
name = "Lasso --> "
clf_lasso = Lasso(alpha = 0.1).fit(x_train,y_train)
y_predict_lasso = clf_lasso.predict(x_test)
# +
#print(name +" Coefficient ", lr.coef_)
print(name + " Intercept " , clf_lasso.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",clf_lasso.score(x_train,y_train))
print(name + "Score for test data Set",clf_lasso.score(x_test,y_test))
print(name + "Score for Predictecd data Set",clf_lasso.score(x_test,y_pred_lars)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test, y_predict_lasso)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_predict_lasso)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_predict_lasso)) ##
# -
# ### 2.6 Lasso CV
#
# **Lasso linear model with iterative fitting along a regularization path.**<br>
# **(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1**
#
# class sklearn.linear_model.**LassoCV**(eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=0.0001, copy_X=True, cv=None, verbose=False, n_jobs=None, positive=False, random_state=None, selection='cyclic')
#
# **fit(self, X, y)** --> Fit linear model with coordinate descent <br>
# **get_params(self[, deep])**--> Get parameters for this estimator.<br>
# **path(X, y[, eps, n_alphas, alphas, …])**--> Compute Lasso path with coordinate descent<br>
# **predict(self, X)**--> Predict using the linear model.<br>
# **score(self, X, y[, sample_weight])** --> Return the coefficient of determination R^2 of the prediction.<br>
# **set_params(self, \*\*params)** -->Set the parameters of this estimator.<br>
# +
# Import library
from sklearn.linear_model import LassoCV
name = "LassoCV"
#make a object
reg_LassoCV = LassoCV(cv =10 , random_state = 0).fit(x_train,y_train)
y_predict_reg_lassoCV = reg_LassoCV.predict(x_test)
#alpha value can be passed seprately to compare results
# +
print(name +" Coefficient ", reg_LassoCV.coef_)
print(name + " Intercept " , reg_LassoCV.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",reg_LassoCV.score(x_train,y_train))
print(name + "Score for test data Set",reg_LassoCV.score(x_test,y_test))
print(name + "Score for Predictecd data Set",reg_LassoCV.score(x_test,y_predict_reg_lassoCV)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,y_predict_reg_lassoCV)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_predict_reg_lassoCV)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_predict_reg_lassoCV)) ##
# -
# ### 2.7 LassoLars
# class sklearn.linear_model.**LassoLars**(alpha=1.0, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=2.220446049250313e-16, copy_X=True, fit_path=True, positive=False)
#
# **Lasso model fit with Least Angle Regression a.k.a. Lars
# It is a Linear Model trained with an L1 prior as regularizer.**
#
# **`(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1`**
# #### Methods
# * **fit(self, X, y[, Xy])**--> Fit the model using X, y as training data.
#
# * **get_params(self[, deep])**--> Get parameters for this estimator.
#
# * **predict(self, X)**--> Predict using the linear model.
#
# * **score(self, X, y[, sample_weight])**-->Return the coefficient of determination R^2 of the prediction.
#
# * **set_params(self, \*\*params)**--> Set the parameters of this estimator.
# +
# Import the Library
from sklearn.linear_model import LassoLars
name = "LassoLars -->"
reg_Lasso_lars = LassoLars(alpha = 0.1).fit(x_train, y_train)
y_pred_lasso_lars = reg_Lasso_lars.predict(x_test)
# +
print(name +" Coefficient ",reg_Lasso_lars.coef_)
print(name + " Intercept " , reg_Lasso_lars.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",reg_Lasso_lars.score(x_train,y_train))
print(name + "Score for test data Set",reg_Lasso_lars.score(x_test,y_test))
print(name + "Score for Predictecd data Set",reg_Lasso_lars.score(x_test,y_predict_reg_lassoCV)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_lasso_lars)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_lasso_lars)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_lasso_lars)) ##
# -
# ### 2.8 LassoLarsCV
# **Lasso linear model with iterative fitting along a regularization path**
# **The best model is selected by cross-validation.**
# **The optimization objective for Lasso is**
#
# **`(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1`**
#
# class sklearn.linear_model.**LassoCV**(eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=0.0001, copy_X=True, cv=None, verbose=False, n_jobs=None, positive=False, random_state=None, selection='cyclic')
# +
# Import the library
from sklearn.linear_model import LassoLarsCV
name = "LassoLars-->"
reg_lassoLarsCV = LassoLarsCV(cv = 10).fit(x_train, y_train)
y_pred_lassoLarsCV = reg_lassoLarsCV.predict(x_test)
# +
print(name +" Coefficient ",reg_lassoLarsCV.coef_)
print(name + " Intercept " , reg_lassoLarsCV.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",reg_lassoLarsCV.score(x_train,y_train))
print(name + "Score for test data Set",reg_lassoLarsCV.score(x_test,y_test))
print(name + "Score for Predictecd data Set",reg_lassoLarsCV.score(x_test,y_pred_lassoLarsCV)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_lassoLarsCV)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_lassoLarsCV)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_lassoLarsCV)) ##
# -
# ### 2.9 LassoLarsIC
# **Lasso model fit with Lars using `BIC or AIC` for model selection** <br>
# The optimization objective for Lasso is:<br>
# **```(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1```**
#
# ``class sklearn.linear_model.**LassoLarsIC**((criterion='aic', fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=2.220446049250313e-16, copy_X=True, positive=False``
# +
#Import the Model
from sklearn.linear_model import LassoLarsIC
name = "LassoLarsIC-->"
#Create a object
reg_LassoLarsIC = LassoLarsIC(criterion='bic').fit(x_train,y_train)
y_pred_lassoLarsIC = reg_LassoLarsIC.predict(x_test)
# +
print(name +" Coefficient ",reg_LassoLarsIC.coef_)
print(name + " Intercept " , reg_LassoLarsIC.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",reg_LassoLarsIC.score(x_train,y_train))
print(name + "Score for test data Set",reg_LassoLarsIC.score(x_test,y_test))
print(name + "Score for Predictecd data Set",reg_LassoLarsIC.score(x_test,y_pred_lassoLarsIC)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_lassoLarsIC)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_lassoLarsIC)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_lassoLarsIC)) ##
# -
# ### 2.10 OrthogonalMatchingPursuit
# **`Orthogonal Matching Pursuit model (OMP)`**
# class sklearn.linear_model.**OrthogonalMatchingPursuit**(n_nonzero_coefs=None, tol=None, fit_intercept=True, normalize=True, precompute='auto')
# +
# Import the Model
from sklearn.linear_model import OrthogonalMatchingPursuit
name = "OPM --> "
#Create a Object
reg_opm = OrthogonalMatchingPursuit().fit(x_train , y_train)
y_pred_opm = reg_opm.predict(x_test)
# +
print(name +" Coefficient ",reg_opm.coef_)
print(name + " Intercept " , reg_opm.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",reg_opm.score(x_train,y_train))
print(name + "Score for test data Set",reg_opm.score(x_test,y_test))
print(name + "Score for Predictecd data Set",reg_opm.score(x_test,y_pred_opm)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_opm)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_opm)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_opm)) ##
# -
# ### 2.11 OrthogonalMatchingPursuitCV
# class sklearn.linear_model.**OrthogonalMatchingPursuitCV**(copy=True, fit_intercept=True, normalize=True, max_iter=None, cv=None, n_jobs=None, verbose=False)
# **Cross-validated Orthogonal Matching Pursuit model (OMP)**<br>
# **fit(self, X, y)**--> Fit the model using X, y as training data.<br>
# **get_params(self[, deep])**--> Get parameters for this estimator.<br>
# **predict(self, X)**--> Predict using the linear model.<br>
# **score(self, X, y[, sample_weight])**--> Return the coefficient of determination R^2 of the prediction.<br>
# **set_params(self, \*\*params)**--> Set the parameters of this estimator.<br>
# +
# import the Library
from sklearn.linear_model import OrthogonalMatchingPursuitCV
name = "OPM CV -->"
reg_opmCV = OrthogonalMatchingPursuitCV(cv = 10).fit(x_train, y_train)
y_pred_opmCV = reg_opmCV.predict(x_test)
# +
print(name +" Coefficient ",reg_opmCV.coef_)
print(name + " Intercept " , reg_opmCV.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",reg_opmCV.score(x_train,y_train))
print(name + "Score for test data Set",reg_opmCV.score(x_test,y_test))
print(name + "Score for Predictecd data Set",reg_opmCV.score(x_test,y_pred_opm)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_opmCV)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_opmCV)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_opmCV)) ##
# -
# # Part 3 . Bayesian Regressors
# ### 3.1 ARDRegression --> bayesian ARD regression
#
# ```class sklearn.linear_model.ARDRegression(n_iter=300, tol=0.001, alpha_1=1e-06, alpha_2=1e-06, lambda_1=1e-06,
# lambda_2=1e-06, compute_score=False, threshold_lambda=10000.0, fit_intercept=True, normalize=False, copy_X=True, verbose=False)```
#
# http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
#
# +
#import library
from sklearn.linear_model import ARDRegression
name = "ARDRegression"
clf_ard = ARDRegression()
clf_ard.fit(x_train,y_train)
pred_y_ard = clf.predict(x_test)
# +
print(name +" Coefficient ",clf_ard.coef_)
print(name + " Intercept " , clf_ard.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",clf_ard.score(x_train,y_train))
print(name + "Score for test data Set",clf_ard.score(x_test,y_test))
print(name + "Score for Predictecd data Set",clf_ard.score(x_test,pred_y_ard)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,pred_y_ard)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,pred_y_ard)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,pred_y_ard)) ##
# -
# ## 3.2 BayesianRidge
#
# **```class sklearn.linear_model.BayesianRidge(n_iter=300, tol=0.001, alpha_1=1e-06, alpha_2=1e-06, lambda_1=1e-06,
# lambda_2=1e-06, alpha_init=None, lambda_init=None, compute_score=False, fit_intercept=True, normalize=False,
# copy_X=True, verbose=False)```**
# +
## Import the library
from sklearn.linear_modle import BayesianRidge
name = "BayesianRidge"
clf_bay = BayesianRidge().fit(x_train , y_train)
y_pred_bay = clf_bay.predict(x_test)
# +
print(name +" Coefficient ",clf_bay.coef_)
print(name + " Intercept " , clf_bay.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",clf_bay.score(x_train,y_train))
print(name + "Score for test data Set",clf_bay.score(x_test,y_test))
print(name + "Score for Predictecd data Set", clf_bay.score(x_test,y_pred_bay)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_bay)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_bay)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_bay)) ##
# -
# ## +++++++++++++++++++++++++++++++++++++++++
# ## Part 4. MultiTask Linear Regressors with variable selection
# These estimators fit multiple regression problems (or tasks) jointly, while including sparse coefficients.While the inferred coefficients may differ between the tasks, they are constrained to agree on the features that are selected(non-zero coefficients)
#
# 1. linear_model.**MultiTaskElasticNet([alpha, …])**--> Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
#
# 2. linear_model.**MultiTaskElasticNetCV([…])**--> Multi-task L1/L2 ElasticNet with built-in cross-validation.
#
# 3. linear_model.**MultiTaskLasso([alpha, …])**--> Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.
#
# 4. linear_model.**MultiTaskLassoCV([eps, …])**--> Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.
# ## 4.1 MultiTaskElasticNet
# class sklearn.linear_model.**MultiTaskElasticNet**(alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=0.0001, warm_start=False, random_state=None, selection='cyclic')
#
# **Multi-task ElasticNet Model trained with L1/L2 mixed-norm as regularizer**
# The optimization objective for MultiTaskElasticNet is : <br>
#
# (1 / (2 * n_samples)) * ||Y - XW||_Fro^2 + alpha * l1_ratio * ||W||_21+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
# <br> where <br>
# ||W||_21 = sum_i sqrt(sum_j w_ij ^ 2)
# +
# import library
from sklearn.linear_model import MultiTaskElasticNet
name = "MT_Enet"
reg_MT_Enet = MultiTaskElasticNet(alpha = 0.10).fit(x_train, y_train)
y_pred_MT_Enet = reg_MT_Enet.predict(X_test)
# +
print(name +" Coefficient ",reg_MT_Enet.coef_)
print(name + " Intercept " , reg_MT_Enet.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",reg_MT_Enet.score(x_train,y_train))
print(name + "Score for test data Set",reg_MT_Enet.score(x_test,y_test))
print(name + "Score for Predictecd data Set", reg_MT_Enet.score(x_test,y_pred_MT_Enet)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_MT_Enet)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_MT_Enet)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_MT_Enet)) ##
# -
# ## 4.2 MultiTaskElasticNetCV
# class sklearn.linear_model.**MultiTaskElasticNetCV**(l1_ratio=0.5, eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=0.0001, cv=None, copy_X=True, verbose=0, n_jobs=None, random_state=None, selection='cyclic')
#
# **Multi-task L1/L2 ElasticNet with built-in cross-validation.**
r """(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 """
# +
#import the Model
from sklearn.linear_model import MultiTaskElasticNetCV
reg_MT_ECV = MultiTaskElasticNetCV(cv =10).fit(x_train , y_train)
y_pred_MT_ECV = reg_MT_ECV.predict(x_test)
# +
print(name +" Coefficient ",reg_MT_ECV.coef_)
print(name + " Intercept " ,reg_MT_ECV.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",reg_MT_ECV.score(x_train,y_train))
print(name + "Score for test data Set",reg_MT_ECV.score(x_test,y_test))
print(name + "Score for Predictecd data Set", reg_MT_Enet.score(x_test,y_pred_MT_ECV)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_MT_ECV)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_MT_ECV)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_MT_ECV)) ##
# -
# ## 4.3 MultiTaskLasso
# class sklearn.linear_model.MultiTaskLasso(alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=0.0001, warm_start=False, random_state=None, selection='cyclic')
#
# The optimization objective for Lasso is:<br>
# **(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21**
# where<br>
# **||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}**
# +
#import the Library
from sklearn.linear_model import MultiTaskLasso
name = "MT_Lasso"
#Create a object
clf_MT_Lasso = MultiTaskLasso(alpha = 0.1).fit(x_train,y_train)
y_pred_MT_Lasso = clf_MT_Lasso.predict(x_test)
# +
print(name +" Coefficient ",clf_MT_Lasso.coef_)
print(name + " Intercept " ,clf_MT_Lasso.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",clf_MT_Lasso.score(x_train,y_train))
print(name + "Score for test data Set",clf_MT_Lasso.score(x_test,y_test))
print(name + "Score for Predictecd data Set", clf_MT_Lasso.score(x_test,y_pred_MT_Lasso)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_MT_Lasso)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_MT_Lasso)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_MT_Lasso)) ##
# -
# ### 4.4 MultiTasklassoCV
# class sklearn.linear_model.**MultiTaskLassoCV**(eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=0.0001, copy_X=True, cv=None, verbose=False, n_jobs=None, random_state=None, selection='cyclic')
# <br>
# **Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer**<br>
# (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
# <br>
# Where<br>
# ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
# +
#import the library
from sklearn.linear_model import MultiTaskLassoCV
name = "MT_LassoCV"
#Create a object
reg_MT_LassoCV = MultiTaskLassoCV(cv =10,noise=4, random_state =0).fit(x_train,y_train)
y_pred_MT_LassoCV = reg_MT_LassoCV.predict(x_test)
# +
print(name +" Coefficient ",reg_MT_LassoCV.coef_)
print(name + " Intercept " ,reg_MT_LassoCV.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",reg_MT_LassoCV.score(x_train,y_train))
print(name + "Score for test data Set",reg_MT_LassoCV.score(x_test,y_test))
print(name + "Score for Predictecd data Set", reg_MT_LassoCV.score(x_test,y_pred_MT_LassoCV)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_MT_LassoCV)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_MT_LassoCV)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_MT_LassoCV)) ##
# -
# # 5 Outlier-robust regressors
# `Any estimator using the Huber loss would also be robust to outliers, e.g. SGDRegressor with loss='huber'.`
#
# 1. **linear_model.HuberRegressor([epsilon, …])--> Linear regression model that is robust to outliers.**
#
# 2. **linear_model.RANSACRegressor([…])--> RANSAC (RANdom SAmple Consensus) algorithm.**
#
# 3. **linear_model.TheilSenRegressor([…])--> Theil-Sen Estimator: robust multivariate regression model.**
#
# ## 5.1 HuberRegressor()
# `class sklearn.linear_model.HuberRegressor(epsilon=1.35, max_iter=100, alpha=0.0001, warm_start=False, fit_intercept=True, tol=1e-05)`
#
# **Methods** <br>
#
# **fit(self, X, y[, sample_weight])**--> Fit the model according to the given training data.
#
# **get_params(self[, deep])**--> Get parameters for this estimator.
#
# **predict(self, X)**--> Predict using the linear model.
#
# **score(self, X, y[, sample_weight])**--> Return the coefficient of determination R^2 of the prediction.
#
# **set_params(self, \*\*params)**--> Set the parameters of this estimator.
# +
# import Library
from sklearn.linear_model import HuberRegressor , LinearRegression
name = "HuberRegressor"
huber = HuberRegressor().fit(x_train,y_train)
y_pred_huber = huber.predict(x_test)
# +
print(name +" Coefficient ",huber.coef_)
print(name + " Intercept " ,huber.intercept_) ##
print("****"*5+"Accuracy Test Model Fitting "+"****"*5)
print(name + "Score for train data set :",huber.score(x_train,y_train))
print(name + "Score for test data Set",huber.score(x_test,y_test))
print(name + "Score for Predictecd data Set", huber.score(x_test,y_pred_huber)) ##
print("\n"+"****"*5+"R2 Score"+"****"*5)
print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_huber)) ##
print("\n"+"****"*5+"Mean Absolute Error"+"****"*5)
print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_huber)) ##
print("\n"+"****"*5+" Mean Squared Error"+"****"*5)
print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_huber)) ##
# -
# ## Deep Learning
# +
#from tensorflow.python import keras
#from tensorflow.python.keras.models import Sequential
#from tensorflow.python.keras.layers import Dense, Flatten, Conv2D, Dropout
# +
#import scipy
#import numpy as np
#import matplotlib as plt
#import pandas as pd
#import sklearn
#import pydot
#import h5py
#import tensorflow
#from tensorflow import keras
##import theano
# +
#print("SCIPY_Version",scipy.__version__)
#print("Numpy_Version",np.__version__) #We imported as np so
#print("matplotlib_Version",plt.__version__) #we imported as plt so
#print("pandas_Version",pd.__version__)
#print("Sk_learn_Version",sklearn.__version__)
#print("pydot_Version",pydot.__version__)
#print("h5py_Version",h5py.__version__)
##print("theano_Version",theano.__version__)
#print("tensorflow_Version",tensorflow.__version__)
# -
# ## Checking Weather the basic Deep Learning Model is running or Not
# ## Build the Model
# +
#model = keras.Sequential([
# keras.layers.Flatten(input_shape = (28,28)),
#
# keras.layers.Dense(128, activation = "relu",input_shape = [len(x_train)]),
# keras.layers.Dense(10,activation= "softmax")
#
# keras.Dense(1)
#])
# +
#model.summary()
#len(x_train)
# -
# ## Compile the Model
# +
#Loss Function --> This Measures how ccurate the Model is during training. We want to minimize this function to "steer"
# the model tight direction
# Optimizer --> This is how the model is updated based on the data it sees and its loss function
#Metrics --> Used to monitor the training and testing steps
#The Following example uses accuracy, the fraction of the images that are correctly classified.
# -
| All_regression_in_one.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Opinion Mining and Sentiment Analysis
#
# > "the beginning of wisdom is the definition of terms" - _Socrates (debated)_
#
# Sentiment analysis has become important in modern unstructured data analysis. One of the pivotal works in this area is [Opinion Mining and Sentiment Analysis](https://www.nowpublishers.com/article/Details/INR-011) by <NAME> Lee, 2008.
#
# What are sentiment analysis useful for?
#
# * Summary of reviews
# 1. Categorization for user consumption
# 1. Calculating some summary statistics or automated ratings
# * As an enabling technology
# 1. Recommendation system: e.g. products
# 1. Monitoring: e.g. [toxic comments](https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge)
# 1. Advertisements: e.g., choose advertisements based on webpage content
#
# ## Featurization
#
# * Text is unstructured data
# * Need to be transformed to features
# * Is it positive? (Yes/No)
# * How positive is it? (ordinal/numeric)
#
# ## Lexicon based Sentiment Analysis
#
# Extracting sentiment from text require incorporating meaning of words (dictionary or otherwise). There are libraries that do this for you.
#
# An example is [VADER (Valence Aware Dictionary and sEntiment Reasoner)](https://www.aaai.org/ocs/index.php/ICWSM/ICWSM14/paper/viewPaper/8109). In linguistics, valence is defined as
# > Valence: the number of grammatical elements with which a particular word, especially a verb, combines in a sentence.
#
# Given some text, VADER will return a set of scores: valence, negative, neutral, positive. Valence score is associated with sentiment intensity.
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sentences = ["VADER is smart, handsome, and funny.", # positive sentence example
"VADER is smart, handsome, and funny!", # punctuation emphasis handled correctly (sentiment intensity adjusted)
"VADER is very smart, handsome, and funny.", # booster words handled correctly (sentiment intensity adjusted)
"VADER is VERY SMART, handsome, and FUNNY.", # emphasis for ALLCAPS handled
"VADER is VERY SMART, handsome, and FUNNY!!!",# combination of signals - VADER appropriately adjusts intensity
"VADER is VERY SMART, really handsome, and INCREDIBLY FUNNY!!!",# booster words & punctuation make this close to ceiling for score
"The book was good.", # positive sentence
"The book was kind of good.", # qualified positive sentence is handled correctly (intensity adjusted)
"The plot was good, but the characters are uncompelling and the dialog is not great.", # mixed negation sentence
"A really bad, horrible book.", # negative sentence with booster words
"At least it isn't a horrible book.", # negated negative sentence with contraction
":) and :D", # emoticons handled
"", # an empty string is correctly handled
"Today sux", # negative slang handled
"Today sux!", # negative slang with punctuation emphasis handled
"Today SUX!", # negative slang with capitalization emphasis
"Today kinda sux! But I'll get by, lol" # mixed sentiment example with slang and constrastive conjunction "but"
]
paragraph = "It was one of the worst movies I've seen, despite good reviews. \
Unbelievably bad acting!! Poor direction. VERY poor production. \
The movie was bad. Very bad movie. VERY bad movie. VERY BAD movie. VERY BAD movie!"
tricky_sentences = [
"Most automated sentiment analysis tools are crappy.",
"VADER sentiment analysis is the shit.",
"Sentiment analysis has never been good.",
"Sentiment analysis with VADER has never been this good.",
"War<NAME>atty has never been so entertaining.",
"I won't say that the movie is astounding and I wouldn't claim that \
the movie is too banal either.",
"I like to hate Michael Bay films, but I couldn't fault this one",
"It's one thing to watch an Uwe Boll film, but another thing entirely \
to pay for it",
"The movie was too good",
"This movie was actually neither that funny, nor super witty.",
"This movie doesn't care about cleverness, wit or any other kind of \
intelligent humor.",
"Those who find ugly meanings in beautiful things are corrupt without \
being charming.",
"There are slow and repetitive parts, BUT it has just enough spice to \
keep it interesting.",
"The script is not fantastic, but the acting is decent and the cinematography \
is EXCELLENT!",
"<NAME> is one of the most compelling variations on this theme.",
"Roger Dodger is one of the least compelling variations on this theme.",
"Roger Dodger is at least compelling as a variation on the theme.",
"they fall in love with the product",
"but then it breaks",
"usually around the time the 90 day warranty expires",
"the twin towers collapsed today",
"However, Mr. Carter solemnly argues, his client carried out the kidnapping \
under orders and in the ''least offensive way possible.''"
]
sentences.extend(tricky_sentences)
sentences
nltk.download('vader_lexicon')
sid = SentimentIntensityAnalyzer()
for sentence in sentences:
print(sentence)
ss = sid.polarity_scores(sentence)
for k in sorted(ss):
print('{0}: {1}, '.format(k, ss[k]), end='')
print('\n')
| lecture-notes/14-Sentiment-Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set_style('white')
# -
bond_df = pd.read_csv('extracted_au_data.csv', sep=";")
bond_df.columns = ["model_name", "predictors", "aug_num", "aug_size", "lookback", "dropout", "type", "t", "p", "l", "r_5", "r", "r_95"]
bond_df.describe()
# +
bond_df['improvement'] = (bond_df['r'] - bond_df['l']) / bond_df['r'] * 100
fig = plt.figure(figsize=(10,7))
sns.distplot(bond_df['improvement'], kde=False, ax=fig.gca())
plt.title('% Improvement in Describing Loss For Simple Bond Models')
plt.ylabel('Count')
plt.xlabel('% Improvement')
plt.show()
# -
fig = plt.figure(figsize=(10,7))
sns.violinplot(x=bond_df['lookback'], y=bond_df['improvement'], ax=fig.gca())
plt.title('% Improvement in Describing Loss For Simple Gold Models')
plt.ylabel('% Improvement')
plt.xlabel('Lookback (days)')
plt.show()
fig = plt.figure(figsize=(10,7))
sns.violinplot(x=bond_df['dropout'], y=bond_df['improvement'], ax=fig.gca())
plt.title('% Improvement in Describing Loss For Simple Gold Models')
plt.ylabel('% Improvement')
plt.xlabel('Dropout Coefficient')
plt.show()
# +
fig = plt.figure(figsize=(10,7))
ax = sns.violinplot(x=bond_df['predictors'], y=bond_df['improvement'], ax=fig.gca())
ax.set_xticklabels(ax.get_xticklabels(), rotation=15)
plt.title('% Improvement in Describing Loss For Simple Gold Models')
plt.ylabel('% Improvement')
plt.xlabel('Predictors')
plt.show()
# -
bond_df.iloc[np.argsort(bond_df['improvement'])].tail(20)
| models/analyze_bulk_Au_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="mt9dL5dIir8X"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="ufPx7EiCiqgR"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" deletable=true editable=true id="ucMoYase6URl"
# # Load images with tf.data
# + [markdown] colab_type="text" id="_Wwu5SXZmEkB"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/beta/tutorials/load_data/images"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/load_data/images.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/load_data/images.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/tutorials/load_data/images.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="Oxw4WahM7DU9"
# This tutorial provides a simple example of how to load an image dataset using `tf.data`.
#
# The dataset used in this example is distributed as directories of images, with one class of image per directory.
# + [markdown] colab_type="text" deletable=true editable=true id="hoQQiZDB6URn"
# ## Setup
# + colab={} colab_type="code" id="QGXxBuPyKJw1"
from __future__ import absolute_import, division, print_function, unicode_literals
# !pip install tensorflow==2.0.0-beta1
import tensorflow as tf
# + colab={} colab_type="code" id="KT6CcaqgQewg"
AUTOTUNE = tf.data.experimental.AUTOTUNE
# + [markdown] colab_type="text" id="rxndJHNC8YPM"
# ## Download and inspect the dataset
# + [markdown] colab_type="text" deletable=true editable=true id="wO0InzL66URu"
# ### Retrieve the images
#
# Before you start any training, you will need a set of images to teach the network about the new classes you want to recognize. You have already created an archive of creative-commons licensed flower photos to use initially:
# + colab={} colab_type="code" id="rN-Pc6Zd6awg"
import pathlib
data_root_orig = tf.keras.utils.get_file(origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
fname='flower_photos', untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
# + [markdown] colab_type="text" id="rFkFK74oO--g"
# After downloading 218MB, you should now have a copy of the flower photos available:
# + colab={} colab_type="code" id="7onR_lWE7Njj"
for item in data_root.iterdir():
print(item)
# + colab={} colab_type="code" id="4yYX3ZRqGOuq"
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
# + colab={} colab_type="code" id="t_BbYnLjbltQ"
all_image_paths[:10]
# + [markdown] colab_type="text" id="vkM-IpB-6URx"
# ### Inspect the images
# Now let's have a quick look at a couple of the images, so you know what you are dealing with:
# + colab={} colab_type="code" id="wNGateQJ6UR1"
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
# + colab={} colab_type="code" id="jgowG2xu88Io"
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
# + colab={} colab_type="code" id="YIjLi-nX0txI"
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
# + [markdown] colab_type="text" id="OaNOr-co3WKk"
# ### Determine the label for each image
# + [markdown] colab_type="text" id="-weOQpDw2Jnu"
# List the available labels:
# + colab={} colab_type="code" id="ssUZ7Qh96UR3"
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
# + [markdown] colab_type="text" id="9l_JEBql2OzS"
# Assign an index to each label:
# + colab={} colab_type="code" id="Y8pCV46CzPlp"
label_to_index = dict((name, index) for index, name in enumerate(label_names))
label_to_index
# + [markdown] colab_type="text" id="VkXsHg162T9F"
# Create a list of every file, and its label index:
# + colab={} colab_type="code" id="q62i1RBP4Q02"
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
# + [markdown] colab_type="text" id="i5L09icm9iph"
# ### Load and format the images
# + [markdown] colab_type="text" id="SbqqRUS79ooq"
# TensorFlow includes all the tools you need to load and process images:
# + colab={} colab_type="code" id="jQZdySHvksOu"
img_path = all_image_paths[0]
img_path
# + [markdown] colab_type="text" id="2t2h2XCcmK1Y"
# Here is the raw data:
# + colab={} colab_type="code" id="LJfkyC_Qkt7A"
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
# + [markdown] colab_type="text" id="opN8AVc8mSbz"
# Decode it into an image tensor:
# + colab={} colab_type="code" id="Tm0tdrlfk0Bb"
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
# + [markdown] colab_type="text" id="3k-Of2Tfmbeq"
# Resize it for your model:
# + colab={} colab_type="code" id="XFpz-3_vlJgp"
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
# + [markdown] colab_type="text" id="aCsAa4Psl4AQ"
# Wrap up these up in simple functions for later.
# + colab={} colab_type="code" id="HmUiZJNU73vA"
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
# + colab={} colab_type="code" id="einETrJnO-em"
def load_and_preprocess_image(path):
image = tf.io.read_file(path)
return preprocess_image(image)
# + colab={} colab_type="code" id="3brWQcdtz78y"
import matplotlib.pyplot as plt
image_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path))
plt.title(label_names[label].title())
print()
# + [markdown] colab_type="text" id="n2TCr1TQ8pA3"
# ## Build a `tf.data.Dataset`
# + [markdown] colab_type="text" id="6H9Z5Mq63nSH"
# ### A dataset of images
# + [markdown] colab_type="text" id="GN-s04s-6Luq"
# The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.
#
# Slicing the array of strings, results in a dataset of strings:
# + colab={} colab_type="code" id="6oRPG3Jz3ie_"
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
# + [markdown] colab_type="text" id="uML4JeMmIAvO"
# The `shapes` and `types` describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
# + colab={} colab_type="code" id="mIsNflFbIK34"
print(path_ds)
# + [markdown] colab_type="text" id="ZjyGcM8OwBJ2"
# Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
# + colab={} colab_type="code" id="D1iba6f4khu-"
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
# + colab={} colab_type="code" id="JLUPs2a-lEEJ"
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n, image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
# + [markdown] colab_type="text" id="P6FNqPbxkbdx"
# ### A dataset of `(image, label)` pairs
# + [markdown] colab_type="text" id="YgvrWLKG67-x"
# Using the same `from_tensor_slices` method you can build a dataset of labels:
# + colab={} colab_type="code" id="AgBsAiV06udj"
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
# + colab={} colab_type="code" id="HEsk5nN0vyeX"
for label in label_ds.take(10):
print(label_names[label.numpy()])
# + [markdown] colab_type="text" id="jHjgrEeTxyYz"
# Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs:
# + colab={} colab_type="code" id="AOEWNMdQwsbN"
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
# + [markdown] colab_type="text" id="yA2F09SJLMuM"
# The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
# + colab={} colab_type="code" id="DuVYNinrLL-N"
print(image_label_ds)
# + [markdown] colab_type="text" id="2WYMikoPWOQX"
# Note: When you have arrays like `all_image_labels` and `all_image_paths` an alternative to `tf.data.dataset.Dataset.zip` is to slice the pair of arrays.
# + colab={} colab_type="code" id="HOFwZI-2WhzV"
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
# + [markdown] colab_type="text" id="vYGCgJuR_9Qp"
# ### Basic methods for training
# + [markdown] colab_type="text" id="wwZavzgsIytz"
# To train a model with this dataset you will want the data:
#
# * To be well shuffled.
# * To be batched.
# * To repeat forever.
# * Batches to be available as soon as possible.
#
# These features can be easily added using the `tf.data` api.
# + colab={} colab_type="code" id="uZmZJx8ePw_5"
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
# + [markdown] colab_type="text" id="6JsM-xHiFCuW"
# There are a few things to note here:
#
# 1. The order is important.
#
# * A `.shuffle` after a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all).
# * A `.shuffle` after a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.
#
# 1. You use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.
#
# 1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.
#
# 1. The shuffeled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.
#
# This last point can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
# + colab={} colab_type="code" id="Ocr6PybXNDoO"
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
# + [markdown] colab_type="text" id="GBBZMSuAmQVL"
# ### Pipe the dataset to a model
#
# Fetch a copy of MobileNet v2 from `tf.keras.applications`.
#
# This will be used for a simple transfer learning example.
#
# Set the MobileNet weights to be non-trainable:
# + colab={} colab_type="code" id="KbJrXn9omO_g"
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
# + [markdown] colab_type="text" id="Y7NVWiLF3Vbf"
# This model expects its input to be normalized to the `[-1,1]` range:
#
# ```
# help(keras_applications.mobilenet_v2.preprocess_input)
# ```
#
# <pre>
# ...
# This function applies the "Inception" preprocessing which converts
# the RGB values from [0, 255] to [-1, 1]
# ...
# </pre>
# + [markdown] colab_type="text" id="CboYya6LmdQI"
# Before you pass the input to the MobilNet model, you need to convert it from a range of `[0,1]` to `[-1,1]`:
# + colab={} colab_type="code" id="SNOkHUGv3FYq"
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
# + [markdown] colab_type="text" id="QDzZ3Nye5Rpv"
# The MobileNet returns a `6x6` spatial grid of features for each image.
#
# Pass it a batch of images to see:
# + colab={} colab_type="code" id="OzAhGkEK6WuE"
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
# + colab={} colab_type="code" id="LcFdiWpO5WbV"
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
# + [markdown] colab_type="text" id="vrbjEvaC5XmU"
# Build a model wrapped around MobileNet and use `tf.keras.layers.GlobalAveragePooling2D` to average over those space dimensions before the output `tf.keras.layers.Dense` layer:
# + colab={} colab_type="code" id="X0ooIU9fNjPJ"
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names))])
# + [markdown] colab_type="text" id="foQYUJs97V4V"
# Now it produces outputs of the expected shape:
# + colab={} colab_type="code" id="1nwYxvpj7ZEf"
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
# + [markdown] colab_type="text" id="pFc4I_J2nNOJ"
# Compile the model to describe the training procedure:
# + colab={} colab_type="code" id="ZWGqLEWYRNvv"
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='sparse_categorical_crossentropy',
metrics=["accuracy"])
# + [markdown] colab_type="text" id="tF1mO6haBOSd"
# There are 2 trainable variables - the Dense `weights` and `bias`:
# + colab={} colab_type="code" id="pPQ5yqyKBJMm"
len(model.trainable_variables)
# + colab={} colab_type="code" id="kug5Wg66UJjl"
model.summary()
# + [markdown] colab_type="text" id="f_glpYZ-nYC_"
# You are ready to train the model.
#
# Note that for demonstration purposes you will only run 3 steps per epoch, but normally you would specify the real number of steps, as defined below, before passing it to `model.fit()`:
# + colab={} colab_type="code" id="AnXPRNWoTypI"
steps_per_epoch=tf.math.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
# + colab={} colab_type="code" id="q_8sabaaSGAp"
model.fit(ds, epochs=1, steps_per_epoch=3)
# + [markdown] colab_type="text" id="UMVnoBcG_NlQ"
# ## Performance
#
# Note: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/guide/performance/datasets).
#
# The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU, but may not be sufficient for GPU training and is totally inappropriate for any sort of distributed training.
# + [markdown] colab_type="text" id="oNmQqgGhLWie"
# To investigate, first build a simple function to check the performance of our datasets:
# + colab={} colab_type="code" id="_gFVe1rp_MYr"
import time
default_timeit_steps = 2*steps_per_epoch+1
def timeit(ds, steps=default_timeit_steps):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(steps+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(steps, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*steps/duration))
print("Total time: {}s".format(end-overall_start))
# + [markdown] colab_type="text" id="TYiOr4vdLcNX"
# The performance of the current dataset is:
# + colab={} colab_type="code" id="ZDxLwMJOReVe"
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
# + colab={} colab_type="code" id="IjouTJadRxyp"
timeit(ds)
# + [markdown] colab_type="text" id="HsLlXMO7EWBR"
# ### Cache
# + [markdown] colab_type="text" id="lV1NOn2zE2lR"
# Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is very efficient, especially when the data fits in memory.
#
# Here the images are cached, after being pre-precessed (decoded and resized):
# + colab={} colab_type="code" id="qj_U09xpDvOg"
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
# + colab={} colab_type="code" id="rdxpvQ7VEo3y"
timeit(ds)
# + [markdown] colab_type="text" id="usIv7MqqZQps"
# One disadvantage to using an in memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
# + colab={} colab_type="code" id="eKX6ergKb_xd"
timeit(ds)
# + [markdown] colab_type="text" id="jUzpG4lYNkN-"
# If the data doesn't fit in memory, use a cache file:
# + colab={} colab_type="code" id="vIvF8K4GMq0g"
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
# + colab={} colab_type="code" id="eTIj6IOmM4yA"
timeit(ds)
# + [markdown] colab_type="text" id="qqo3dyB0Z4t2"
# The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
# + colab={} colab_type="code" id="hZhVdR8MbaUj"
timeit(ds)
# + [markdown] colab_type="text" id="WqOVlf8tFrDU"
# ### TFRecord File
# + [markdown] colab_type="text" id="y1llOTwWFzmR"
# #### Raw image data
#
# TFRecord files are a simple format to store a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.
#
# First, build a TFRecord file from the raw image data:
# + colab={} colab_type="code" id="EqtARqKuHQLu"
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.io.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
# + [markdown] colab_type="text" id="flR2GXWFKcO1"
# Next, build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier:
# + colab={} colab_type="code" id="j9PVUL2SFufn"
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
# + [markdown] colab_type="text" id="cRp1eZDRKzyN"
# Zip that dataset with the labels dataset you defined earlier to get the expected `(image,label)` pairs:
# + colab={} colab_type="code" id="7XI_nDU2KuhS"
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
# + colab={} colab_type="code" id="3ReSapoPK22E"
timeit(ds)
# + [markdown] colab_type="text" id="wb7VyoKNOMms"
# This is slower than the `cache` version because you have not cached the preprocessing.
# + [markdown] colab_type="text" id="NF9W-CTKkM-f"
# #### Serialized Tensors
# + [markdown] colab_type="text" id="J9HzljSPkxt0"
# To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
# + colab={} colab_type="code" id="OzS0Azukkjyw"
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
# + [markdown] colab_type="text" id="onWOwLpYlzJQ"
# Now instead of a dataset of `.jpeg` strings, you have a dataset of tensors.
#
# To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings:
# + colab={} colab_type="code" id="xxZSwnRllyf0"
ds = image_ds.map(tf.io.serialize_tensor)
ds
# + colab={} colab_type="code" id="w9N6hJWAkKPC"
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
# + [markdown] colab_type="text" id="OlFc9dJSmcx0"
# With the preprocessing cached, data can be loaded from the TFrecord file quite efficiently - just remember to de-serialize tensor before using it:
# + colab={} colab_type="code" id="BsqFyTBFmSCZ"
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.io.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
# + [markdown] colab_type="text" id="OPs_sLV9pQg5"
# Now, add the labels and apply the same standard operations, as before:
# + colab={} colab_type="code" id="XYxBwaLYnGop"
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
# + colab={} colab_type="code" id="W8X6RmGan1-P"
timeit(ds)
| site/en/r2/tutorials/load_data/images.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # Indexed Expressions: Representing and manipulating tensors, pseudotensors, etc. in NRPy+
#
# ## Author: <NAME>
# ### Formatting improvements courtesy <NAME>
#
# ### NRPy+ Source Code for this module: [indexedexp.py](../edit/indexedexp.py)
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# This notebook is organized as follows
#
# 1. [Step 1](#initializenrpy): Initialize core Python/NRPy+ modules
# 1. [Step 2](#idx1): Rank-1 Indexed Expressions
# 1. [Step 2.a](#dot): Performing a Dot Product
# 1. [Step 3](#idx2): Rank-2 and Higher Indexed Expressions
# 1. [Step 3.a](#con): Creating C Code for the contraction variable
# 1. [Step 3.b](#simd): Enable SIMD support
# 1. [Step 4](#exc): Exercise
# 1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
# <a id='initializenrpy'></a>
#
# # Step 1: Initialize core NRPy+ modules \[Back to [top](#toc)\]
# $$\label{initializenrpy}$$
#
# Let's start by importing all the needed modules from NRPy+ for dealing with indexed expressions and ouputting C code.
# The NRPy_param_funcs module sets up global structures that manage free parameters within NRPy+
import NRPy_param_funcs as par # NRPy+: Parameter interface
# The indexedexp module defines various functions for defining and managing indexed quantities like tensors and pseudotensors
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
# The grid module defines various parameters related to a numerical grid or the dimensionality of indexed expressions
# For example, it declares the parameter DIM, which specifies the dimensionality of the indexed expression
import grid as gri # NRPy+: Functions having to do with numerical grids
from outputC import outputC # NRPy+: Basic C code output functionality
# <a id='idx1'></a>
#
# # Step 2: Rank-1 Indexed Expressions \[Back to [top](#toc)\]
# $$\label{idx1}$$
#
# Indexed expressions of rank 1 are stored as [Python lists](https://www.tutorialspoint.com/python/python_lists.htm).
#
# There are two standard ways to declare indexed expressions:
# + **Initialize indexed expression to zero:**
# + **zerorank1(DIM=-1)** $\leftarrow$ As we will see below, initializing to zero is useful if the indexed expression depends entirely on some other indexed or non-indexed expressions.
# + **DIM** is an *optional* parameter that, if set to -1, will default to the dimension as set in the **grid** module: `par.parval_from_str("grid::DIM")`. Otherwise the rank-1 indexed expression will have dimension **DIM**.
# + **Initialize indexed expression symbolically:**
# + **declarerank1(symbol, DIM=-1)**.
# + As in **`zerorank1()`, **DIM** is an *optional* parameter that, if set to -1, will default to the dimension as set in the **grid** module: `par.parval_from_str("grid::DIM")`. Otherwise the rank-1 indexed expression will have dimension **DIM**.
#
# `zerorank1()` and `declarerank1()` are both wrapper functions for the more general function `declare_indexedexp()`.
# + **declare_indexedexp(rank, symbol=None, symmetry=None, dimension=None)**.
# + The following are optional parameters: **symbol**, **symmetry**, and **dimension**. If **symbol** is not specified, then `declare_indexedexp()` will initialize an indexed expression to zero. If **symmetry** is not specified or has value "nosym", then an indexed expression will not be symmetrized, which has no relevance for an indexed expression of rank 1. If **dimension** is not specified or has value -1, then **dimension** will default to the dimension as set in the **grid** module: `par.parval_from_str("grid::DIM")`.
#
# For example, the 3-vector $\beta^i$ (upper index denotes contravariant) can be initialized to zero as follows:
# +
# Declare rank-1 contravariant ("U") vector
betaU = ixp.zerorank1()
# Print the result. It's a list of zeros!
print(betaU)
# -
# Next set $\beta^i = \sum_{j=0}^i j = \{0,1,3\}$
# +
# Get the dimension we just set, so we know how many indices to loop over
DIM = par.parval_from_str("grid::DIM")
for i in range(DIM): # sum i from 0 to DIM-1, inclusive
for j in range(i+1): # sum j from 0 to i, inclusive
betaU[i] += j
print("The 3-vector betaU is now set to: "+str(betaU))
# -
# Alternatively, the 3-vector $\beta^i$ can be initialized **symbolically** as follows:
# +
# Set the dimension to 3
par.set_parval_from_str("grid::DIM",3)
# Declare rank-1 contravariant ("U") vector
betaU = ixp.declarerank1("betaU")
# Print the result. It's a list!
print(betaU)
# -
# Declaring $\beta^i$ symbolically is standard in case `betaU0`, `betaU1`, and `betaU2` are defined elsewhere (e.g., read in from main memory as a gridfunction.
#
# As can be seen, NRPy+'s standard naming convention for indexed rank-1 expressions is
# + **\[base variable name\]+\["U" for contravariant (up index) or "D" for covariant (down index)\]**
#
# *Caution*: After declaring the vector, `betaU0`, `betaU1`, and `betaU2` can only be accessed or manipulated through list access; i.e., via `betaU[0]`, `betaU[1]`, and `betaU[2]`, respectively. Attempts to access `betaU0` directly will fail.
#
# Knowing this, let's multiply `betaU1` by 2:
betaU[1] *= 2
print("The 3-vector betaU is now set to "+str(betaU))
print("The component betaU[1] is now set to "+str(betaU[1]))
# <a id='dot'></a>
#
# ## Step 2.a: Performing a Dot Product \[Back to [top](#toc)\]
# $$\label{dot}$$
#
# Next, let's declare the variable $\beta_j$ and perform the dot product $\beta^i \beta_i$:
# +
# First set betaU back to its initial value
betaU = ixp.declarerank1("betaU")
# Declare beta_j:
betaD = ixp.declarerank1("betaD")
# Get the dimension we just set, so we know how many indices to loop over
DIM = par.parval_from_str("grid::DIM")
# Initialize dot product to zero
dotprod = 0
# Perform dot product beta^i beta_i
for i in range(DIM):
dotprod += betaU[i]*betaD[i]
# Print result!
print(dotprod)
# -
# <a id='idx2'></a>
#
# # Step 3: Rank-2 and Higher Indexed Expressions \[Back to [top](#toc)\]
# $$\label{idx2}$$
#
# Moving to higher ranks, rank-2 indexed expressions are stored as lists of lists, rank-3 indexed expressions as lists of lists of lists, etc. For example
#
# + the covariant rank-2 tensor $g_{ij}$ is declared as `gDD[i][j]` in NRPy+, so that e.g., `gDD[0][2]` is stored with name `gDD02` and
# + the rank-2 tensor $T^{\mu}{}_{\nu}$ is declared as `TUD[m][n]` in NRPy+ (index names are of course arbitrary).
#
# *Caveat*: Note that it is currently up to the user to determine whether the combination of indexed expressions makes sense; NRPy+ does not track whether up and down indices are written consistently.
#
# NRPy+ supports symmetries in indexed expressions (above rank 1), so that if $h_{ij} = h_{ji}$, then declaring `hDD[i][j]` to be symmetric in NRPy+ will result in both `hDD[0][2]` and `hDD[2][0]` mapping to the *single* SymPy variable `hDD02`.
#
# To see how this works in NRPy+, let's define in NRPy+ a symmetric, rank-2 tensor $h_{ij}$ in three dimensions, and then compute the contraction, which should be given by $$con = h^{ij}h_{ij} = h_{00} h^{00} + h_{11} h^{11} + h_{22} h^{22} + 2 (h_{01} h^{01} + h_{02} h^{02} + h_{12} h^{12}).$$
# +
# Get the dimension we just set (should be set to 3).
DIM = par.parval_from_str("grid::DIM")
# Declare h_{ij}=hDD[i][j] and h^{ij}=hUU[i][j]
hUU = ixp.declarerank2("hUU","sym01")
hDD = ixp.declarerank2("hDD","sym01")
# Perform sum h^{ij} h_{ij}, initializing contraction result to zero
con = 0
for i in range(DIM):
for j in range(DIM):
con += hUU[i][j]*hDD[i][j]
# Print result
print(con)
# -
# <a id='con'></a>
#
# ## Step 3.a: Creating C Code for the contraction variable $\text{con}$ \[Back to [top](#toc)\]
# $$\label{con}$$
#
# Next let's create the C code for the contraction variable $\text{con}$, without CSE (common subexpression elimination)
outputC(con,"con")
# <a id='simd'></a>
#
# ## Step 3.b: Enable SIMD support \[Back to [top](#toc)\]
# $$\label{simd}$$
#
# Finally, let's see how it looks with SIMD support enabled
outputC(con,"con",params="enable_SIMD=True")
# <a id='exc'></a>
#
# # Step 4: Exercise \[Back to [top](#toc)\]
# $$\label{exc}$$
#
# Setting $\beta^i$ via the declarerank1(), write the NRPy+ code required to generate the needed C code for the lowering operator: $g_{ij} \beta^i$, and set the result to C variables `betaD0out`, `betaD1out`, and `betaD2out` [solution](Tutorial-Indexed_Expressions_soln.ipynb). *Hint: You will want to use the `zerorank1()` function*
# **To complete this exercise, you must first reset all variables in the notebook:**
# +
# *Uncomment* the below %reset command and then press <Shift>+<Enter>.
# Respond with "y" in the dialog box to reset all variables.
# # %reset
# -
# **Write your solution below:**
# <a id='latex_pdf_output'></a>
#
# # Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-Indexed_Expressions.pdf](Tutorial-Indexed_Expressions.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Indexed_Expressions")
| Tutorial-Indexed_Expressions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tf2rc
# language: python
# name: tf2rc
# ---
import tensorflow as tf
import os, sys
root_dir, _ = os.path.split(os.getcwd())
script_dir = os.path.join(root_dir, 'scripts')
sys.path.append(script_dir)
sys.path.append('home/phd/miniconda3/')
from hparams import hparams
# %load_ext tensorboard
from tensorboard import notebook
notebook.list()
log_dir, __ = os.path.split(hparams['log_dir'])
# !/home/phd/miniconda3/envs/tf2rc/bin/tensorboard --logdir $log_dir --port=6081
| notebooks/control_tensorboard.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# _by <NAME>$^{1,2}$ and <NAME>$^1$_
#
# $^1$ Institute of Communications Engineering, University of Rostock, Rostock <br>
# $^2$ University Library, University of Rostock, Rostock
#
# **Abstract**:
# This notebook contains the solutions for the tasks in the `03 Data Analysis Example.ipynb` Jupyter Notebook.
# ## 3 – Jupyter Notebook Programming (Solutions)
#
# First of all, we will import several useful libraries:
# +
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# -
# **Task 1:** Create a `requirements.txt` file that containes all imported libraries and their corresponding version.
#
# **Solution:** The following output has to be copied to a new file called `requirements.txt`.
import matplotlib
print('numpy==%s' % (np.version.version))
print('pandas==%s' % (pd.__version__))
print('matplotlib==%s' % (matplotlib.__version__))
# ---
#
# **Task 2:** Write a function `read_data` that takes a file name of a CSV file and reads the corresponding file from a folder called `_data` in order to return a Pandas dataframe.
#
# **Solution:**
# The following function that takes the filename (a string) and reads the corresponding file inside the `_data` folder.
# If no file exists, this function will raise an exception; otherwise it will return the corresponding Pandas dataframe.
# Note, this assumes the file to be separated by ',' and containing a headline.
def read_data(filename: str):
filepath = os.path.join('_data', filename)
if not os.path.isfile(filepath):
raise Exception("File '%s' was not found" % (filepath))
return pd.read_csv(filepath)
# Lets's actually try our function, by reading the file `_data/raw_1.csv`:
raw1 = read_data('raw_1.csv')
# In order to check the result, we want to display the content of the variable `raw1`:
raw1
# ---
#
# **Task 3:** Write a plot that displays: X-, Y-, and Z-axis in the same plot and make it a little more useful such as adding a title as well as axis-labels.
#
# **Solution:**
plt.figure(figsize=(12, 6), dpi= 80, facecolor='w', edgecolor='k')
plt.title('Acceleration of raw_1.csv (Sensor: T8)')
plt.plot(raw1['Sensor_T8_Acceleration_X'], label='X')
plt.plot(raw1['Sensor_T8_Acceleration_Y'], label='Y')
plt.plot(raw1['Sensor_T8_Acceleration_Z'], label='Z')
plt.legend(loc='upper right')
| 03 Data Analysis Example (Solutions).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Load the TensorBoard notebook extension
# %load_ext tensorboard
import datetime
import os
from tensorflow.keras.callbacks import TensorBoard
from tensorboard import notebook
import numpy as np
import pandas as pd
import seaborn as sns
import random
import tensorflow as tf
from tensorflow.keras.datasets import mnist, fashion_mnist
from tensorflow.keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, MaxPooling2D, UpSampling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras import regularizers
import matplotlib
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# +
notebook.list() # View open TensorBoard instances
'''
! powershell "echo 'checking for existing tensorboard processes'"
! powershell "ps | Where-Object {$_.ProcessName -eq 'tensorboard'}"
! powershell "ps | Where-Object {$_.ProcessName -eq 'tensorboard'}| %{kill $_}"
! powershell "echo 'cleaning tensorboard temp dir'"
! powershell "rm $env:TEMP\.tensorboard-info\*"
! powershell "ps | Where-Object {$_.ProcessName -eq 'tensorboard'}"
! powershell "echo 'Done!'"
'''
# +
# # %reload_ext tensorboard
# Clear any logs from previous runs
# !rm -rf ./logs/
# +
(fm_X_train, fm_y_train), (fm_X_test, fm_y_test) = fashion_mnist.load_data()
# Normalize data
fm_X_train = fm_X_train/255.0
fm_X_test = fm_X_test/255.0
# Add noise to images
noise_factor = 0.38
noise_train_dataset = list()
noise_test_dataset = list()
for img in fm_X_train:
noisy_image = img + noise_factor*np.random.randn(*img.shape)
noisy_image = np.clip(noisy_image, 0, 1)
noise_train_dataset.append(noisy_image)
for img in fm_X_test:
noisy_image = img + noise_factor*np.random.randn(*img.shape)
noisy_image = np.clip(noisy_image, 0, 1)
noise_test_dataset.append(noisy_image)
noise_train_dataset = np.array(noise_train_dataset)
noise_test_dataset = np.array(noise_test_dataset)
fm_X_train = np.reshape(fm_X_train, (len(fm_X_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
noise_train_dataset = np.reshape(noise_train_dataset, (len(noise_train_dataset), 28, 28, 1))
fm_X_test = np.reshape(fm_X_test, (len(fm_X_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
noise_test_dataset = np.reshape(noise_test_dataset, (len(noise_test_dataset), 28, 28, 1))
# -
# ## Convolutional AutoEncoder
# ### Convolutional Encoder
# +
input_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format
x = Conv2D(filters = 16, kernel_size = (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(filters = 8, kernel_size = (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(filters = 8, kernel_size = (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same', name = 'LatentSpace')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
encoder = Model(input_img, encoded, name='CNN_Encoder')
encoder.summary()
# -
# ### Convolutional Decoder
# +
latent_space = Input(shape=(4, 4, 8)) # adapt this if using `channels_first` image data format
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(filters = 8, kernel_size = (3, 3), activation='relu', padding='same')(latent_space)
x = UpSampling2D((2, 2))(x)
x = Conv2D(filters = 8, kernel_size = (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(filters = 16, kernel_size = (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(filters = 1, kernel_size = (3, 3), activation='sigmoid', padding='same')(x)
# create the decoder model
decoder = Model(latent_space, decoded, name='CNN_Decoder')
decoder.summary()
# -
autoencoder = Model(input_img, decoder(encoder(input_img)), name="CNN_autoencoder")
autoencoder.summary()
autoencoder.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(lr=0.001))
autoencoder.summary()
# +
# logdir = os.path.join("logs", datetime.datetime.now().strftime("%m%d_%H%M%S"))
# tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
# eStop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3, mode='min')
epochs = 2
batch_size = 128
HistoryAE = autoencoder.fit(noise_train_dataset,
fm_X_train,
epochs = epochs,
batch_size = batch_size,
# callbacks=[eStop, tensorboard_callback],
validation_data = (noise_test_dataset, fm_X_test)
)
# -
plt.plot(HistoryAE.history['loss'], label="loss")
plt.plot(HistoryAE.history['val_loss'], label="val_loss")
plt.legend()
plt.show()
# +
Evaluation = autoencoder.evaluate(noise_test_dataset,
fm_X_test,
batch_size=batch_size,
verbose=2)
print('Test Loss: {:.3f}'.format(Evaluation))
# +
predicted = autoencoder.predict(noise_test_dataset[:10])
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
for images, row in zip([noise_test_dataset[:10], predicted], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# +
# # %tensorboard --logdir logs --port=6061
| CNN_AutoEncoder Tesis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# %matplotlib inline
# # %matplotlib notebook # for interactive
# For high dpi displays.
# %config InlineBackend.figure_format = 'retina'
# # 0. Note
# * This notebook shows an example of EOS fitting for static compression, focusing on applying a range of different pressure scales.
#
# * The result and data have been published in [Nisr et al. (2017, JGR)](http://onlinelibrary.wiley.com/doi/10.1002/2016JE005158/full).
# # 1. Setup
import numpy as np
from uncertainties import unumpy as unp
import pandas as pd
import pytheos as eos
import matplotlib.pyplot as plt
# # 2. Setup pressure scale and starting values
# Setup dictionaries for pressure standard `(au_eos)` and equation to use `(fit_model)`. This allows for eos fits with a wide range of different pressure scales.
# SiC has two polymorphs, 3C and 6H, at the studied pressure range. This notebook can conduct fitting for 3C.
# Uncomment the following line to get some help.
# +
#help(eos.gold.Yokoo2009)
# -
# We use the values from Zhuravlev (2013) for initial guess.
v0 = 82.804
k0 = 218.
k0p = 3.75
# # 3. Setup data
# Read data file. Data points are stored in `csv` files.
data = pd.read_csv('./data/3C-300EOS-final.csv')
data.head()
# Sort the data in a reverse order based on the unit-cell volume of pressure standard.
n = data['V(Au)'].__len__()
ind = data['V(Au)'].argsort()[::-1][:n]
# Make error propagation possible.
v_std = unp.uarray(data['V(Au)'][ind], data['sV(Au)'][ind])
v = unp.uarray(data['V(3C)'][ind], data['sV(3C)'][ind])
plt.plot(unp.nominal_values(v_std), unp.nominal_values(v), 'bo')
# # 4. Fitting
# Define dictionaries for a range of different fittings.
au_eos = {'Fei2007': eos.gold.Fei2007bm3(), 'Dorogokupets2007': eos.gold.Dorogokupets2007(),
'Yokoo2009': eos.gold.Yokoo2009()}
fit_model = {'Fei2007': eos.BM3Model(), 'Dorogokupets2007': eos.VinetModel(),
'Yokoo2009': eos.BM3Model()}
# The cell below runs an iteration to generate fitting for three different pressure scales. We fix `v0` in this fitting example.
key = 'Dorogokupets2007'
# Set pressure standard to use
p = au_eos[key].cal_pst(v_std)
# Plot volume versus pressure
plt.plot(unp.nominal_values(p), unp.nominal_values(v), 'bo')
plt.xlabel('Pressure (GPa)')
plt.ylabel('Unit-cell volume ($\mathrm{\AA}^3$)');
# Set equation to fit
model = fit_model[key]
# Assign initial values for the parameters
params = model.make_params(v0=v0, k0=k0, k0p=k0p)
# Fix v0
params['v0'].vary = False
# Conduct fitting
fitresult = model.fit(unp.nominal_values(p), params, v=unp.nominal_values(v))
# Generate text ouput for fitting result
print('***'+key)
print(fitresult.fit_report())
# generate plots
eos.plot.static_fit_result(fitresult, title=key)
| b-8_pv_eos_fit_multi-scales.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tvmx
# language: python
# name: python3
# ---
# (sphx_glr_tutorial_autotvm_relay_x86)=
# # 用 Python 接口编译和优化模型(AutoTVM)
#
# **原作者**: [<NAME>](https://github.com/hogepodge>)
#
# 在 [TVMC 教程](tvmc_command_line_driver) 中,介绍了如何使用 TVM 的命令行界面 TVMC 来编译、运行和微调预训练的视觉模型 ResNet-50 v2。不过,TVM 不仅仅是命令行工具,它也是优化框架,其 API 可用于许多不同的语言,在处理机器学习模型方面给你带来巨大的灵活性。
#
# 在本教程中,将涵盖与 TVMC 相同的内容,但展示如何用 Python API 来完成它。完成本节后,将使用 TVM 的 Python API 来完成以下任务:
#
# - 编译预训练的 ResNet-50 v2 模型供 TVM 运行时使用。
# - 使用编译后的模型,运行真实图像,并解释输出和评估模型性能。
# - 使用 TVM 在 CPU 上调度该模型。
# - 使用 TVM 收集的调度数据重新编译已优化的模型。
# - 通过优化后的模型运行图像,并比较输出和模型的性能。
#
# 本节的目的是让你了解 TVM 的能力以及如何通过 Python API 使用它们。
#
# TVM 是一个深度学习编译器框架,有许多不同的模块可用于处理深度学习模型和算子。在本教程中,我们将研究如何使用 Python API 加载、编译和优化一个模型。
#
# 首先要导入一些依赖关系,包括用于加载和转换模型的 ``mxnet``,用于下载测试数据的辅助工具,用于处理图像数据的 Python 图像库,用于图像数据预处理和后处理的 ``numpy``,TVM Relay 框架,以及 TVM Graph Executor。
from tvm.contrib.download import download_testdata
from PIL import Image
import numpy as np
import tvm
from tvm import relay
from tvm.contrib import graph_executor
# ## 下载和加载前端模型
#
# 在本教程中,我们将使用 ResNet-50 v2。ResNet-50 是卷积神经网络,有 50 层深度,旨在对图像进行分类。该模型已经在超过一百万张图片上进行了预训练,有 1000 种不同的分类。该网络的输入图像大小为 224x224。如果你有兴趣探索更多关于 ResNet-50 模型的结构,我们建议下载免费的 ML 模型查看器 [Netron](https://netron.app)。
#
# TVM 提供了辅助库来下载预训练的模型。通过该模块提供模型的 URL、文件名和模型类型,TVM 将下载模型并保存到磁盘。
#
# ```{admonition} 与其他模型格式一起工作
# TVM 支持许多流行的模型格式。清单可以在 TVM 文档的 [编译深度学习模型](tutorial-frontend) 部分找到。
# ```
#
# ````{note}
# 可以直接使用如下方式下载预训练的模型(以 ONNX 为例):
#
# ```python
# model_url = "".join(
# [
# "https://github.com/onnx/models/raw/",
# "master/vision/classification/resnet/model/",
# "resnet50-v2-7.onnx",
# ]
# )
#
# model_path = download_testdata(model_url, "resnet50-v2-7.onnx", module="onnx")
# ```
# ````
#
# MXNet 可直接载入模型:
# +
import mxnet as mx
model_name = 'resnet50_v2'
gluon_model = mx.gluon.model_zoo.vision.get_model(model_name, pretrained=True)
# -
# ## 下载、预处理和加载测试图像
#
# 当涉及到预期的张量形状、格式和数据类型时,每个模型都很特别。出于这个原因,大多数模型需要一些预处理和后处理,以确保输入是有效的,并解释输出。TVMC 对输入和输出数据都采用了 NumPy 的 ``.npz`` 格式。
#
# 作为本教程的输入,将使用一只猫的图像,但你可以自由地用你选择的任何图像来代替这个图像。
#
# <img src="https://s3.amazonaws.com/model-server/inputs/kitten.jpg" height="224px" width="224px" align="center">
#
# 下载图像数据,然后将其转换成 numpy 数组,作为模型的输入。
# +
img_url = "https://s3.amazonaws.com/model-server/inputs/kitten.jpg"
img_path = download_testdata(img_url, "imagenet_cat.png", module="data")
# resize 到 224x224
with Image.open(img_path) as im:
resized_image = im.resize((224, 224))
# 转换为 float32
img_data = np.asarray(resized_image).astype("float32")
# 输入图像是在 HWC 布局,而 MXNet 期望 CHW 输入
img_data = np.transpose(img_data, (2, 0, 1))
# 根据 ImageNet 输入规范进行 Normalize
imagenet_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
imagenet_stddev = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
norm_img_data = (img_data / 255 - imagenet_mean) / imagenet_stddev
# 添加批处理维度,设置数据为 4 维 输入:NCHW
img_data = np.expand_dims(norm_img_data, axis=0)
# -
# ## 用 Relay 编译模型
#
# 下一步是编译 ResNet 模型。使用 {func}`~tvm.relay.frontend.from_mxnet` 导入器将模型导入到 {mod}`~tvm.relay`。
#
# 不同的模型类型,输入的名称可能不同。你可以使用 Netron 这样的工具来检查输入名称。
# +
input_name = "data"
shape_dict = {input_name: img_data.shape}
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
# -
# 将模型与标准优化一起构建成 TVM 库。
#
# ```{admonition} 定义正确的目标
# 指定正确的目标可以对编译后的模块的性能产生巨大影响,因为它可以利用目标上可用的硬件特性。欲了解更多信息,请参考为 [x86 CPU 自动调整卷积网络](tune_relay_x86)。建议确定你运行的是哪种 CPU,以及可选的功能,并适当地设置目标。例如,对于某些处理器, `target = "llvm -mcpu=skylake"`,或者对于具有 AVX-512 向量指令集的处理器, `target = "llvm-mcpu=skylake-avx512"`。
# ```
# +
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
# -
# 从该库中创建 TVM graph 运行时模块。
dev = tvm.device(str(target), 0)
module = graph_executor.GraphModule(lib["default"](dev))
# ## 在 TVM 运行时上执行
#
# 已经编译了模型,下面可以使用 TVM 运行时来进行预测。要使用 TVM 来运行模型并进行预测,需要两样东西:
#
# - 编译后的模型,也就是我们刚刚制作的模块 `module`。
# - 对模型的有效输入,以便进行预测。
dtype = "float32"
module.set_input(input_name, img_data)
module.run()
output_shape = (1, 1000)
tvm_output = module.get_output(0,
tvm.nd.empty(output_shape)).numpy()
# ## 收集基本性能数据
#
# 想收集一些与这个未优化的模型相关的基本性能数据,并在以后与调整后的模型进行比较。为了帮助说明 CPU 的噪音,在多个批次的重复中运行计算,然后收集一些关于平均值、中位数和标准差的基础统计数据。
# +
import timeit
timing_number = 10
timing_repeat = 10
unoptimized = (
np.array(timeit.Timer(lambda: module.run()).repeat(repeat=timing_repeat, number=timing_number))
* 1000
/ timing_number
)
unoptimized = {
"mean": np.mean(unoptimized),
"median": np.median(unoptimized),
"std": np.std(unoptimized),
}
print(unoptimized)
# -
# ## 对输出进行后处理
#
# 如前所述,每个模型都有自己提供输出张量的特殊方式。
#
# 在案例中,需要运行一些后处理,利用为模型提供的查找表,将 ResNet-50 v2 的输出渲染成更适合人类阅读的形式。
# +
from scipy.special import softmax
from gluoncv.data.imagenet.classification import ImageNet1kAttr
# 获取 ImageNet 标签列表
imagenet_1k_attr = ImageNet1kAttr()
labels = imagenet_1k_attr.classes_long
# 获取输出张量
scores = softmax(tvm_output)
scores = np.squeeze(scores)
ranks = np.argsort(scores)[::-1]
for rank in ranks[0:5]:
print(f"class='{labels[rank]}' with probability={scores[rank]:f}")
# -
# ## 调优模型
#
# 之前的模型是为了在 TVM 运行时工作而编译的,但不包括任何特定平台的优化。在本节中,我们将向你展示如何使用 TVM 建立一个针对你工作平台的优化模型。
#
# 在某些情况下,当使用我们编译的模块运行推理时,我们可能无法获得预期的性能。在这种情况下,我们可以利用自动调谐器,为我们的模型找到一个更好的配置,获得性能的提升。TVM 中的调谐是指对模型进行优化以在给定目标上更快地运行的过程。这与训练或微调不同,因为它不影响模型的准确性,而只影响运行时的性能。作为调优过程的一部分,TVM 将尝试运行许多不同的运算器实现变体,以观察哪些运算器表现最佳。这些运行的结果被储存在调优记录文件中。
#
# 在最简单的形式下,调优需要你提供三样东西:
#
# - 你打算在上面运行这个模型的设备的目标规格
# - 输出文件的路径,调优记录将被存储在该文件中
# - 要调优的模型的路径
#
import tvm.auto_scheduler as auto_scheduler
from tvm.autotvm.tuner import XGBTuner
from tvm import autotvm
# 为运行器设置一些基本参数。运行器采用一组特定参数生成的编译代码,并测量其性能。``number`` 指定我们将测试的不同配置的数量,而 ``repeat`` 指定我们将对每个配置进行多少次测量。``min_repeat_ms`` 是一个值,指定需要多长时间运行配置测试。如果重复次数低于这个时间,它将被增加。这个选项对于在 GPU 上进行精确的调优是必要的,而对于 CPU 的调优则不需要。把这个值设置为 0 可以禁用它。``timeout`` 为每个测试的配置运行训练代码的时间设置了上限。
# +
number = 10
repeat = 1
min_repeat_ms = 0 # since we're tuning on a CPU, can be set to 0
timeout = 10 # in seconds
# create a TVM runner
runner = autotvm.LocalRunner(
number=number,
repeat=repeat,
timeout=timeout,
min_repeat_ms=min_repeat_ms,
enable_cpu_cache_flush=True,
)
# -
# 创建一个简单的结构来保存调谐选项。我们使用一个 XGBoost 算法来指导搜索。对于一个生产作业来说,你会想把试验的数量设置得比这里使用的 10 的值大。对于 CPU,我们推荐 1500,对于 GPU,推荐 3000-4000。所需的试验次数可能取决于特定的模型和处理器,因此值得花一些时间来评估各种数值的性能,以找到调整时间和模型优化之间的最佳平衡。因为运行调谐是需要时间的,我们将试验次数设置为 10 次,但不建议使用这么小的值。``early_stopping`` 参数是在应用提前停止搜索的条件之前,要运行的最小轨数。``measure`` 选项表示将在哪里建立试验代码,以及将在哪里运行。在这种情况下,我们使用刚刚创建的 ``LocalRunner`` 和一个 ``LocalBuilder``。``tuning_records`` 选项指定了一个文件来写入调整数据。
tuning_option = {
"tuner": "xgb",
"trials": 10,
"early_stopping": 100,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="default"), runner=runner
),
"tuning_records": "resnet-50-v2-autotuning.json",
}
# ```{admonition} 定义调谐搜索算法
# 默认情况下,这种搜索是使用 XGBoost 网格算法指导的。根据你的模型的复杂性和可用的时间量,你可能想选择一个不同的算法。
# ```
#
# ```{admonition} 设置调谐参数
# 在这个例子中,为了节省时间,我们将试验次数和提前停止设置为 10。如果你把这些值设置得更高,你可能会看到更多的性能改进,但这是以花时间调整为代价的。收敛所需的试验次数将取决于模型和目标平台的具体情况。
# ```
# +
# begin by extracting the tasks from the onnx model
tasks = autotvm.task.extract_from_program(mod["main"], target=target, params=params)
# Tune the extracted tasks sequentially.
for i, task in enumerate(tasks):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
tuner_obj = XGBTuner(task, loss_type="rank")
tuner_obj.tune(
n_trial=min(tuning_option["trials"], len(task.config_space)),
early_stopping=tuning_option["early_stopping"],
measure_option=tuning_option["measure_option"],
callbacks=[
autotvm.callback.progress_bar(tuning_option["trials"], prefix=prefix),
autotvm.callback.log_to_file(tuning_option["tuning_records"]),
],
)
# -
# ## 用调优数据编译优化后的模型
#
# 作为上述调优过程的输出,我们获得了存储在 ``resnet-50-v2-autotuning.json`` 的调优记录。编译器将使用这些结果,在你指定的目标上为模型生成高性能代码。
#
# 现在,模型的调优数据已经收集完毕,我们可以使用优化的运算符重新编译模型,以加快我们的计算速度。
# +
with autotvm.apply_history_best(tuning_option["tuning_records"]):
with tvm.transform.PassContext(opt_level=3, config={}):
lib = relay.build(mod, target=target, params=params)
dev = tvm.device(str(target), 0)
module = graph_executor.GraphModule(lib["default"](dev))
# -
# 验证优化后的模型是否运行并产生相同的结果:
#
# +
dtype = "float32"
module.set_input(input_name, img_data)
module.run()
output_shape = (1, 1000)
tvm_output = module.get_output(0, tvm.nd.empty(output_shape)).numpy()
scores = softmax(tvm_output)
scores = np.squeeze(scores)
ranks = np.argsort(scores)[::-1]
for rank in ranks[0:5]:
print("class='%s' with probability=%f" % (labels[rank], scores[rank]))
# -
# ## 比较已调谐和未调谐的模型
#
# 我们想收集一些与这个优化模型相关的基本性能数据,将其与未优化的模型进行比较。根据你的底层硬件、迭代次数和其他因素,你应该看到优化后的模型与未优化的模型相比有性能的提高。
# +
import timeit
timing_number = 10
timing_repeat = 10
optimized = (
np.array(timeit.Timer(lambda: module.run()).repeat(repeat=timing_repeat, number=timing_number))
* 1000
/ timing_number
)
optimized = {"mean": np.mean(optimized), "median": np.median(optimized), "std": np.std(optimized)}
print("optimized: %s" % (optimized))
print("unoptimized: %s" % (unoptimized))
# -
# ## 小结
#
# 在本教程中,我们举了一个简短的例子,说明如何使用 TVM Python API 来编译、运行和调整一个模型。我们还讨论了对输入和输出进行预处理和后处理的必要性。在调优过程之后,我们演示了如何比较未优化和优化后的模型的性能。
#
# 这里我们介绍了一个使用 ResNet-50 v2 本地的简单例子。然而,TVM 支持更多的功能,包括交叉编译、远程执行和剖析/基准测试。
#
| xinetzone/docs/tutorial/autotvm_relay_x86.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Motivation
#
# 이번 세션에서는 본 자료를 준비하게 된 동기에 대해 설명합니다.
# ## 1. Why Large Scale?
#
# 최근 인류 역사상 최고의 언어모델이라는 평가를 받고 있는 GPT3를 필두로 딥러닝 언어모델의 크기가 점점 커지고 있는 추세입니다. 몇 년전까지만 해도 큰 모델이라고 평가받던 BERT가 이제는 아기자기한 사이즈의 모델이 되었죠. 그런데 한가지 궁금증이 생깁니다. 최근부터 왜 갑자기 모델의 크기가 점점 커지고 있는걸까요?
#
#
# 
#
# <br>
#
# ### 1) 모델의 아키텍처가 그다지 중요하지 않다?
# 지금까지 많은 연구자들이 모델의 아키텍처에 매우 큰 관심을 기울여왔지만 최근 연구 결과에 따르면 그동안의 변화가 생각보다 큰 차이를 가져오지는 못하는 것으로 밝혀졌습니다. 물론 아키텍처의 개선에 의해 언어모델의 성능이 어느정도 향상된 것은 사실이지만 판을 깨는 수준으로 드라마틱한 성능 개선을 가져오지는 못했죠.
#
# 
#
# <br>
#
# ### 2) 결국 관건은 데이터와 모델의 크기, 그들이 곧 성능과 비례한다.
# 이전에도 데이터와 모델의 크기가 커지면 성능이 개선되는 것은 알고 있었습니다. 그러나 이것을 극단적으로 키워보니 모델이 마법같은 일을 하기 시작했습니다. 예를 들면 언어모델이 fine-tuning 없이 번역, 요약, 분류 등의 태스크를 수행할 수 있었습니다. 단순히 수행만 하는게 아니라 성능도 fine-tuning한 모델과 비슷한 수준을 보여줬죠. 연구자들이 이러한 현상을 분석해보니 모델의 크기가 성능에 가장 큰 영향을 미치고 그 다음으로 데이터의 크기가 중요했다고 합니다. 아래 그래프의 Y축이 log scale인 것을 감안하면 모델의 크기가 성능에 미치는 영향은 엄청난 수준이죠.
#
# 
#
# <br>
#
# ### 이대로 간다면 아마 몇년 뒤...?
#
# 
#
# <br><br>
# ## 2. Large-scale의 시대에 우리는 무엇을 준비해야 할까?
#
# Large-scale transformer 모델은 기존과 아키텍처는 거의 동일한데, 모델의 모델과 데이터 사이즈를 엄청나게 키운 것입니다. 그래서 몇몇 사람들은 "에이~ 예전이랑 똑같은데 크기만 키웠네"와 같이 생각 하기도 하죠.
#
# 
#
# <br>
#
# ### 하지만 현실은...
#
# 하지만 Large-scale 모델을 잘 다루려면 아래와 같이 수 많은 하드코어 엔지니어링들이 병행되어야 합니다.<br>
#
# 
#
# <br>
#
# 즉 이러한 엔지니어링 테크닉을 사용할 줄 알아야 Large-scale 시대의 모델링을 수행 할 수 있습니다. 문제는 이러한 기술들은 전문적으로 공부하지 않은 사람들이 아니면 사용하기 어렵다는 것입니다. 일반적인 모델 사이언티스트들에게는 다소 난해하기도 한 개념들도 많고 한국어로 만들어진 자료는 더더욱 없죠.
#
#
# ### So What?
# 저는 이러한 이유 때문에 이 자료와 발표를 준비하게 되었습니다. 이 자료가 라지스케일 엔지니어링을 처음 배우는 분들에게 어느정도 길라잡이가 되어주길 바랍니다. :)
| notebooks/02_motivation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
from bs4 import BeautifulSoup as bs
from splinter import Browser
from splinter.exceptions import ElementDoesNotExist
import pandas as pd
import requests
import time
# !which chromedriver #install chrome driver
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser('chrome', **executable_path, headless=False)
# URL of page to be scraped
url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
titles=soup.find_all('div', class_="content_title")
#print(titles)
# +
# Examine the results, then determine element that contains sought info
#print(soup.prettify())
# -
latest_title = titles[1]
latest_title = latest_title.a.text
print(latest_title)
#scrape latest news paragraph text
paragraph=soup.find('div', class_='article_teaser_body')
latest_paragraph=paragraph.text
print(latest_paragraph)
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
results=soup.prettify()
#print(results)
#featured_image_url
images= soup.find_all('div', class_="img")
#print(images)
source_url = images[0].img["src"]
#print(source_url)
#url for first image
featured_image_url =(f"https://www.jpl.nasa.gov{source_url}")
print(featured_image_url)
url="https://space-facts.com/mars/"
tables = pd.read_html(url)
#tables
tables = pd.read_html(str(url))[0]
tables
url="https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
# +
#print(soup.prettify())
# -
images=soup.find_all('a', class_="itemLink product-item")
#print(images)
image_title=soup.find_all('h3')
image_title
#print title as text
image_title[1].text
#first url
url=images[6]['href']
click_url=f"https://astrogeology.usgs.gov/{url}"
click_url
browser.visit(click_url)
html=browser.html
soup=bs(html,'html.parser')
#grab url of image on next page
#after clicking hemisphere title
source_url=soup.find('li')
picture_url=source_url.a['href']
picture_url
#create dictionary with hemisphere title and url to enhanced picture
title_list=[]
url_list=[]
dictionary=[]
images = images[: : 2]
for i in range(len(image_title)):
title_list.append(image_title[i].text)
url=images[i]['href']
click_url=f"https://astrogeology.usgs.gov/{url}"
browser.visit(click_url)
time.sleep(1)
html=browser.html
soup=bs(html,'html.parser')
source_url=soup.find('li')
picture_url=source_url.a['href']
url_list.append(picture_url)
dictionary.append({"title":image_title[i].text, "img_url":picture_url})
# +
#print(title_list)
# +
#print(url_list)
# -
print(dictionary)
| mission_to_mars.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from os import listdir
import matplotlib.pyplot as plt
from signals import *
from plots import *
from samplers import *
from pylab import rcParams
rcParams['figure.figsize'] = 5, 2.5
# # Convergence analysis
#
# This notebook shows what happens "begind the scenes" when the Alternating Least Squares ends up in local minimum.
# None of those plots ended up in the paper.
#
# The plots are generated based on precomputed data that is produced when solver is run with the option `verbose=True`.
param = np.pi/18
generate_plots(5, 1, 0, 2*param)
plt.show()
f1, f2 = generate_plots(7, 1, 1, np.degrees(2*param))
plt.title("evolution of error over time, degree 7, with noise")
f1.savefig("pol_iter_params.pdf")
f2.savefig("pol_iter_erros.pdf")
plt.show()
generate_plots(7, 1, 0, 2*param)
plt.show()
| convergence_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="klGNgWREsvQv"
# # "Control of a Cart-Pole Dynamic System with TF-Agents"
#
# > Reinforcement Learning (RL) to control the balancing of a pole on a moving cart
# - toc: true
# - branch: master
# - badges: false
# - comments: true
# - hide: false
# - search_exclude: true
# - metadata_key1: metadata_value1
# - metadata_key2: metadata_value2
# - image: images/graphical_representation_of_rl.png
# - categories: [Control, ReinforcementLearning, TensorFlow,TF-Agents,Python]
# - show_tags: true
# + id="4E8ujfcND6pJ"
#hide
# from google.colab import drive
# drive.mount('/content/gdrive', force_remount=True)
# root_dir = "/content/gdrive/My Drive/"
# base_dir = root_dir + 'RL/TF-Agents/blog_posts/TF-Agents-CartPole/'
# # base_dir = ""
# + [markdown] id="lsaQlK8fFQqH"
# ## 1. Introduction
#
# The cart-pole problem can be considered as the "Hello World" problem of Reinforcement Learning (RL). It was described by [Barto (1983)](http://www.derongliu.org/adp/adp-cdrom/Barto1983.pdf). The physics of the system is as follows:
#
# * All motion happens in a vertical plane
# * A hinged pole is attached to a cart
# * The cart slides horizontally on a track in an effort to balance the pole vertically
# * The system has four state variables:
#
# $x$: displacement of the cart
#
# $\theta$: vertical angle on the pole
#
# $\dot{x}$: velocity of the cart
#
# $\dot{\theta}$: angular velocity of the pole
#
# + [markdown] id="cKOCZlhUgXVK"
# Here is a graphical representation of the system:
#
# 
# + [markdown] id="S0-ZNxUXoMR6"
# ## 2. Purpose
#
# The purpose of our activity in this blog post is to construct and train an entity, let's call it a *controller*, that can manage the horizontal motions of the cart so that the pole remains as close to vertical as possible. The controlled entity is, of course, the *cart and pole* system.
# + [markdown] id="1u9QVVsShC9X"
# ## 3. TF-Agents Setup
#
# We will use the Tensorflow TF-Agents framework. In addition, this notebook will need to run in Google Colab.
# + id="KEHR2Ui-lo8O" colab={"base_uri": "https://localhost:8080/"} outputId="382d522d-14fe-496c-f75f-a8c984dead19"
#hide
# !sudo apt-get install -y xvfb ffmpeg
# !pip install 'imageio==2.4.0'
# !pip install pyvirtualdisplay
# !pip install tf-agents
# + id="sMitx5qSgJk1"
from __future__ import absolute_import, division, print_function
import base64
import imageio
import IPython
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image
import pyvirtualdisplay
import tensorflow as tf
from tf_agents.agents.dqn import dqn_agent
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.networks import q_network
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="1K7_uLc_nrCs" outputId="eafce333-e8e1-461f-ea2a-0f37125aa724"
tf.version.VERSION
# + [markdown] id="Cm53yvG3naxc"
# The following is needed for rendering a virtual display:
# + id="J6HsdS5GbSjd"
tf.compat.v1.enable_v2_behavior()
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
# + id="FGSsxkOkEkxF"
#hide
# base_dir = root_dir + 'RL/TF-Agents/blog_posts/TF-Agents-CartPole/'
# base_dir + 'images/graphical_representation_of_rl.png'
# + [markdown] id="y1s77CKrv1m_"
# ## 4. Hyperparameters
# Here we specify all the hyperparameters for the problem:
# + id="psVAtI4Vv4zL"
NUM_ITERATIONS = 20000
INITIAL_COLLECT_STEPS = 100
COLLECT_STEPS_PER_ITERATION = 1
REPLAY_BUFFER_MAX_LENGTH = 100000
BATCH_SIZE = 64
LEARNING_RATE = 1e-3
LOG_INTERVAL = 200
NUM_EVAL_EPISODES = 10
EVAL_INTERVAL = 1000
# + [markdown] id="w913YTn-4pOU"
# ## 5. Graphical Representation of the Problem
#
# We will work with a graphical representation of our cart-and-pole problem, rather than to just ramble on with words. This will enhance the description. The graphic will also include some TF-Agents specifics. Here is the representation:
#
# 
# + id="KRG0AYE6Ocgy"
#hide
# import Image
# im = PIL.Image.open(base_dir+'graphical_representation_of_rl.png')
# + [markdown] id="VMsJC3DEgI0x"
# ## 6. Environment
#
# Let's start with the controller. In Reinforcement Learning, the controlled entity is known as an **environment**. The TF-Agents framework contain some ready to use environments that can be created in TF-Agents using the `tf_agents.environments` suites. Fortunately, it makes access to the cart-and-pole environment (setup by OpenAI Gym) easy. Next, we load the cart-and-pole environment from the OpenAI Gym suite.
# + id="pYEz-S9gEv2-"
env_name = 'CartPole-v0'
env = suite_gym.load(env_name)
# + [markdown] id="IIHYVBkuvPNw"
# You can render this environment to see how it looks. A free-swinging pole is attached to a cart. The goal is to move the cart right or left in order to keep the pole pointing up. To verify, we can inspect our loaded environment with:
# + id="RlO7WIQHu_7D" colab={"base_uri": "https://localhost:8080/", "height": 417} outputId="d3574103-c7dc-4c1a-ab7f-c6c66ddc727d"
env.reset()
PIL.Image.fromarray(env.render())
# + [markdown] id="NSmAGfjnRzUE"
# ### Input to Environment
#
# The specification of inputs to the environment is provided by the `env.action_spec` method:
# + colab={"base_uri": "https://localhost:8080/"} id="xBBaR-jTSJ3B" outputId="43cf0ab0-1f97-418d-97fa-4a28626f499b"
env.action_spec()
# + [markdown] id="wsQ8Qb5gSXSP"
# `shape` specifies the structure of the input which is a scalar in this case. `dtype` is the data type which is an `int64`. The `minimum` value of the action is `0` and the `maximum` is `1`. We will use the convention that the `action` on the cart is as follows:
#
# * `0` means LEFT
# * `1` means RIGHT
# + [markdown] id="r_fatIpdUMMg"
# ### Evolution of the Environment
#
# The arrival of an `action` at the input of the environment leads to the update of its state. This is how the environment evolves. To advance the state of the environment, the `environment.step` method takes an input `action` and returns a `TimeStep` tuple containing the next observation of the environment and the reward for the action.
#
#
# + [markdown] id="4MoIFnVtVJuA"
# ### Output from Environment
#
# The specification of output from the environment is provided by the `env.time_step_spec` method:
# + colab={"base_uri": "https://localhost:8080/"} id="YxRXy0PPSJzL" outputId="01c7f552-b725-4f4e-8510-dc10c1279ce0"
env.time_step_spec()
# + [markdown] id="gtWKdiFKVouy"
# This specification has the following fields:
# + colab={"base_uri": "https://localhost:8080/"} id="LyEA8FFHSJvj" outputId="2cbc18dd-42c9-4f3d-c760-b70db0551bb5"
env.time_step_spec()._fields
# + [markdown] id="C4cg3P9wcmOb"
# The `step_type` indicates whether a step is the first step, a middle step, or the last step in an episode:
# + colab={"base_uri": "https://localhost:8080/"} id="qvXKTwO7SJks" outputId="b56e0e30-3c63-4f92-8a08-b32c60467ee9"
env.time_step_spec().step_type
# + [markdown] id="byT1cNWOc-de"
# The `reward` is a scalar which conveys the reward from the environment:
# + colab={"base_uri": "https://localhost:8080/"} id="mbWtFTCRdBou" outputId="1574f12f-5c97-482c-a61e-eca46f18379d"
env.time_step_spec().reward
# + [markdown] id="fb3PnaHMdVSH"
# The `discount` is a factor that modifies the `reward`:
# + colab={"base_uri": "https://localhost:8080/"} id="qEvlLMBSdBhp" outputId="0728404c-d691-47ee-9ef7-30a00e2067b8"
env.time_step_spec().discount
# + [markdown] id="SnMNcvGwdluI"
# The `observation` is the observable state of the environment:
# + colab={"base_uri": "https://localhost:8080/"} id="L4ImxIaRdBc0" outputId="74c9caab-274b-4739-cc11-10582389668e"
env.time_step_spec().observation
# + [markdown] id="zN7rw5a-d6v3"
# In this case we have a vector with 4 elements - one each for the cart displacement, cart velocity, pole angle, and pole angular velocity.
# + [markdown] id="IVllomMLfPzO"
# ### Demonstrate the evolution of the environment
#
# Let's submit 10 `RIGHT` actions to the environment, just for fun:
#
# + [markdown] id="dye0LOUvgr5T"
# It is interesting to see an agent actually performing a task in an environment.
#
# First, create a function to embed videos in the notebook.
# + id="l7YqTromgr5T"
def embed_mp4(filename):
"""Embeds an mp4 file in the notebook."""
video = open(filename,'rb').read()
b64 = base64.b64encode(video)
tag = '''
<video width="640" height="480" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>'''.format(b64.decode())
return IPython.display.HTML(tag)
# + [markdown] id="-D4Z-7X9gr5U"
# Now iterate through a few episodes of the Cartpole game with the agent. The underlying Python environment (the one "inside" the TensorFlow environment wrapper) provides a `render()` method, which outputs an image of the environment state. These can be collected into a video.
# + id="wLQGQZVHgr5U"
def create_video(filename, action, num_steps=10, fps=30):
filename = filename + ".mp4"
env.reset()
with imageio.get_writer(filename, fps=fps) as video:
video.append_data(env.render())
for _ in range(num_steps):
tstep = env.step(action); print(tstep)
video.append_data(env.render())
return embed_mp4(filename)
# + id="fgarXWXydBaF"
action = np.array(1, dtype=np.int32) #move RIGHT action
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="MSjp-UcydBXL" outputId="b38fcec2-cc02-49ba-9494-4fb7155c3d43"
create_video("untrained-agent", action, 50)
# + [markdown] id="PW6ii0doiXw1"
# We are not surprised to see the pole repeatedly falling over to the left as the agent repeatedly applies an action to the right.
# + [markdown] id="4JSc9GviWUBK"
# We will use two environments: one for training and one for evaluation.
# + id="N7brXNIGWXjC"
train_py_env = suite_gym.load(env_name)
eval_py_env = suite_gym.load(env_name)
# + [markdown] id="aoXsA9sarHO7"
# ### Convert environments to TensorFlow
# + [markdown] id="zuUqXAVmecTU"
# The Cartpole environment, like most environments, is written in pure Python. This is converted to TensorFlow using the `TFPyEnvironment` wrapper.
#
# The original environment's API uses Numpy arrays. The `TFPyEnvironment` converts these to `Tensors` to make it compatible with Tensorflow agents and policies.
#
# + id="Xp-Y4mD6eDhF"
train_env = tf_py_environment.TFPyEnvironment(train_py_env)
eval_env = tf_py_environment.TFPyEnvironment(eval_py_env)
# + [markdown] id="E9lW_OZYFR8A"
# ## 7. Agent
#
# The controller in our problem is the algorithm used to solve the problem. In RL parlance the controller is known as an `Agent`. TF-Agents provides standard implementations of a variety of `Agents`, including:
#
# - [DQN](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf)
# - [REINFORCE](https://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf)
# - [DDPG](https://arxiv.org/pdf/1509.02971.pdf)
# - [TD3](https://arxiv.org/pdf/1802.09477.pdf)
# - [PPO](https://arxiv.org/abs/1707.06347)
# - [SAC](https://arxiv.org/abs/1801.01290).
#
# For our problem we will use the DQN agent. The DQN agent can be used in any environment which has a discrete action space.
#
# The fundamental problem for an Agent is how to find the next best action to submit to the environment. In the case of a DQN Agent the agent makes use of a `QNetwork`, which is a neural network model that can learn to predict `QValues` (expected returns) for all actions, given an observation from the environment. By inspecting the `QValues`, the agent can decide on the best next action.
# + [markdown] id="kWBr2y4_tYIw"
# ### QNetwork
#
# We use `tf_agents.networks.q_network` to create a `QNetwork`, passing in the `observation_spec`, `action_spec`, and a tuple `fc_layer_params` describing the number and size of the model's hidden layers. Each value in the tuple specifies the number of neurons for that hidden layer:
#
# + id="TgkdEPg_muzV"
fc_layer_params = (100,)
q_net = q_network.QNetwork(
input_tensor_spec= train_env.observation_spec(),
action_spec= train_env.action_spec(),
fc_layer_params= fc_layer_params)
# + [markdown] id="z62u55hSmviJ"
# ### DqnAgent
# We now use `tf_agents.agents.dqn.dqn_agent` to instantiate a `DqnAgent`. In addition to the `time_step_spec`, `action_spec` and the QNetwork, the agent constructor also requires an optimizer (in this case, `AdamOptimizer`), a loss function, and an integer step counter.
# + id="jbY4yrjTEyc9"
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=LEARNING_RATE)
train_step_counter = tf.Variable(0)
agent = dqn_agent.DqnAgent(
time_step_spec= train_env.time_step_spec(),
action_spec= train_env.action_spec(),
q_network= q_net,
optimizer= optimizer,
td_errors_loss_fn= common.element_wise_squared_loss,
train_step_counter= train_step_counter)
agent.initialize()
# + [markdown] id="I0KLrEPwkn5x"
# ### Policies
#
# A policy defines the way an agent acts relative to the environment. The goal of reinforcement learning is to train the underlying model until the policy produces the desired outcome.
#
# In this problem:
#
# - The desired outcome is keeping the pole balanced vertically over the cart
# - The policy returns an action (LEFT or RIGHT) for each `TimeStep`'s `observation`
#
# Agents contain two policies:
#
# - `agent.policy` — The main policy that is used for evaluation and deployment.
# - `agent.collect_policy` — A second policy that is used for data collection.
#
# + id="BwY7StuMkuV4" colab={"base_uri": "https://localhost:8080/"} outputId="eddbeaed-d198-4189-9922-c2e50cc69812"
eval_policy = agent.policy
eval_policy
# + colab={"base_uri": "https://localhost:8080/"} id="aDRtP8G-yxEd" outputId="1573bb86-2091-4e6d-ef50-2f25f3a663d7"
collect_policy = agent.collect_policy
collect_policy
# + [markdown] id="2Qs1Fl3dV0ae"
# Policies can be created independently of agents. For example, use `tf_agents.policies.random_tf_policy` to create a policy which will randomly select an action for each `time_step`.
# + id="HE37-UCIrE69"
random_policy = random_tf_policy.RandomTFPolicy(
time_step_spec= train_env.time_step_spec(),
action_spec= train_env.action_spec())
# + [markdown] id="dOlnlRRsUbxP"
# To get an action from a policy, call the `policy.action(tstep)` method. The `tstep` of type `TimeStep` contains the observation from the environment. This method returns a `PolicyStep`, which is a named tuple with three components:
#
# - `action` — the action to be taken (in this case, `0` or `1`)
# - `state` — used for stateful (that is, RNN-based) policies
# - `info` — auxiliary data, such as log probabilities of actions
# + [markdown] id="7sLfx_CzzNz8"
# Let's create an example environment and setup a random policy:
# + id="5gCcpXswVAxk"
example_environment = tf_py_environment.TFPyEnvironment(
suite_gym.load('CartPole-v0'))
# + [markdown] id="_ZgkSaGhzvgk"
# We reset this environment:
# + id="D4DHZtq3Ndis" colab={"base_uri": "https://localhost:8080/"} outputId="f92796c0-1b0f-4883-a31c-0fa917f22321"
tstep = example_environment.reset()
tstep
# + colab={"base_uri": "https://localhost:8080/"} id="zqQMzy9Pz4Py" outputId="74000dfc-3fb1-4d81-ff00-e21b4a9d0564"
tstep._fields
# + colab={"base_uri": "https://localhost:8080/"} id="rt37ROxD0FXn" outputId="93fc4d44-20a9-484d-f09b-a792f85c3c21"
print(tstep.step_type)
print(tstep.reward)
print(tstep.discount)
print(tstep.observation)
# + [markdown] id="tWSFDcAt0X4c"
# Now we find the `PolicyStep` from which the next `action` can be found:
# + id="PRFqAUzpNaAW" colab={"base_uri": "https://localhost:8080/"} outputId="dc6ad1d9-65ed-44c9-ad3a-5f91662ea684"
pstep = random_policy.action(tstep)
pstep
# + id="W7llvqyLyfAT" colab={"base_uri": "https://localhost:8080/"} outputId="34bb2ffc-5bcc-439f-de8a-d26275d2390d"
pstep._fields
# + colab={"base_uri": "https://localhost:8080/"} id="xJHQbbb586DK" outputId="d466c1d9-e0b2-417f-a735-0472f96688fc"
print(pstep.action)
print(pstep.state)
print(pstep.info)
# + [markdown] id="94rCXQtbUbXv"
# ## 8. Metrics and Evaluation
#
# The most common metric used to evaluate a policy is the **average return**. The return is the sum of rewards obtained while running a policy in an environment for an episode. Several episodes are run, creating an average return.
#
# The following function computes the average return of a policy, given the policy, environment, and a number of episodes.
#
# + id="bitzHo5_UbXy"
def compute_avg_return(env, pol, num_episodes=10):
total_return = 0.0
for _ in range(num_episodes):
tstep = env.reset()
episode_return = 0.0
while not tstep.is_last():
pstep = pol.action(tstep)
tstep = env.step(pstep.action)
episode_return += tstep.reward
total_return += episode_return
avg_return = total_return / num_episodes
return avg_return.numpy()[0]
# See also the metrics module for standard implementations of different metrics.
# https://github.com/tensorflow/agents/tree/master/tf_agents/metrics
# + [markdown] id="_snCVvq5Z8lJ"
# Running this computation on the `random_policy` shows a baseline performance in the environment.
# + id="xKFMXrtGzdqy" colab={"base_uri": "https://localhost:8080/"} outputId="7b10ac42-e1d2-4258-e0c2-0e5429b8f8cb"
NUM_EVAL_EPISODES
# + id="9bgU6Q6BZ8Bp" colab={"base_uri": "https://localhost:8080/"} outputId="185affe7-7360-4262-d706-7a8753587eea"
compute_avg_return(eval_env, random_policy, NUM_EVAL_EPISODES)
# + [markdown] id="NLva6g2jdWgr"
# ## 9. Replay Buffer
#
# The replay buffer keeps track of data collected from the environment. We will use `tf_agents.replay_buffers.tf_uniform_replay_buffer.TFUniformReplayBuffer`, as it is the most common.
#
# The constructor requires the specs for the data it will be collecting. This is available from the agent using the `collect_data_spec` method. The batch size and maximum buffer length are also required.
#
# + id="vX2zGUWJGWAl"
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec= agent.collect_data_spec,
batch_size= train_env.batch_size,
max_length= REPLAY_BUFFER_MAX_LENGTH)
# + [markdown] id="ZGNTDJpZs4NN"
# For most agents, `collect_data_spec` is a named tuple called `Trajectory`, containing the specs for observations, actions, rewards, and other items.
# + id="_IZ-3HcqgE1z" colab={"base_uri": "https://localhost:8080/"} outputId="818fad3f-0526-4d26-a2f5-8bf2a40b4310"
agent.collect_data_spec
# + id="sy6g1tGcfRlw" colab={"base_uri": "https://localhost:8080/"} outputId="7126fbbd-e221-463b-9032-59d52db3d9dc"
agent.collect_data_spec._fields
# + [markdown] id="rVD5nQ9ZGo8_"
# ## 10. Data Collection
#
# Now we execute the random policy in the environment for a few steps, recording the data in the replay buffer.
# + id="wr1KSAEGG4h9"
def collect_step(env, pol, buffer):
tstep = env.current_time_step()
pstep = pol.action(tstep)
next_tstep = env.step(pstep.action)
traj = trajectory.from_transition(tstep, pstep, next_tstep)
buffer.add_batch(traj) # Add trajectory to the replay buffer
def collect_data(env, pol, buffer, steps):
for _ in range(steps):
collect_step(env, pol, buffer)
collect_data(train_env, random_policy, replay_buffer, INITIAL_COLLECT_STEPS)
# This loop is so common in RL, that we provide standard implementations.
# For more details see the drivers module.
# https://www.tensorflow.org/agents/api_docs/python/tf_agents/drivers
# + [markdown] id="84z5pQJdoKxo"
# The replay buffer is now a collection of Trajectories. Let's inspect one of the Trajectories:
# + id="4wZnLu2ViO4E" colab={"base_uri": "https://localhost:8080/"} outputId="3168a9c4-5046-46b5-824b-f88575065d8b"
traj = iter(replay_buffer.as_dataset()).next()
print(type(traj))
print(len(traj))
print(traj);
# + colab={"base_uri": "https://localhost:8080/"} id="bPlmzAvLWwO1" outputId="52fc933c-f96c-41f5-fd3e-1e2fbf6f0095"
traj[0]
# + colab={"base_uri": "https://localhost:8080/"} id="d-uvmakNX623" outputId="cbcc67b3-33dd-4979-a8e7-27496cbe5f16"
type(traj[0])
# + colab={"base_uri": "https://localhost:8080/"} id="-jFiMstgYHuK" outputId="acb839bf-83cb-4751-ccc3-1f0ffd6e6de9"
traj[0]._fields
# + id="W0xF1tnC6msn" colab={"base_uri": "https://localhost:8080/"} outputId="f4a86e9d-bf2d-496e-9439-88ca66863496"
print('step_type:', traj[0].step_type)
print('observation:', traj[0].observation)
print('action:', traj[0].action)
print('policy_info:', traj[0].policy_info)
print('next_step_type:', traj[0].next_step_type)
print('reward:', traj[0].reward)
print('discount:', traj[0].discount)
# + colab={"base_uri": "https://localhost:8080/"} id="w2J7SAu-YfZ0" outputId="193189ef-2db4-4c68-9126-3b88105bd541"
traj[1]
# + colab={"base_uri": "https://localhost:8080/"} id="0qQL2QhUYfRk" outputId="8fcbb757-f23c-4d9d-9162-63af75f49c99"
type(traj[1])
# + colab={"base_uri": "https://localhost:8080/"} id="6fwgcbf8Y2p_" outputId="79caa734-c514-4ba7-d090-d573ec24960b"
traj[1]._fields
# + colab={"base_uri": "https://localhost:8080/"} id="-6qg48cmY2mF" outputId="3c1d6366-c51f-4c7d-e620-e5ca7d235584"
print('ids:', traj[1].ids)
print('probabilities:', traj[1].probabilities)
# + [markdown] id="TujU-PMUsKjS"
# The agent needs access to the replay buffer. TF-Agents provide this access by creating an iterable `tf.data.Dataset` pipeline which will feed data to the agent.
#
# Each row of the replay buffer only stores a single observation step. But since the DQN Agent needs both the current and next observation to compute the loss, the dataset pipeline will sample two adjacent rows for each item in the batch (`num_steps=2`).
#
# The code also optimize this dataset by running parallel calls and prefetching data.
# + colab={"base_uri": "https://localhost:8080/"} id="QWuA3GbfiASI" outputId="877bbf5f-dcb2-4a8d-e1f3-aefaa9f435ae"
print(BATCH_SIZE)
# + id="ba7bilizt_qW" colab={"base_uri": "https://localhost:8080/"} outputId="68dce753-7808-4d28-e6a6-ea5245c2b07d"
# Dataset generates trajectories with shape [Bx2x...]
dataset = replay_buffer.as_dataset(
num_parallel_calls=3,
sample_batch_size=BATCH_SIZE,
num_steps=2).prefetch(3)
dataset
# + id="K13AST-2ppOq" colab={"base_uri": "https://localhost:8080/"} outputId="ca632022-0b38-45b1-94b2-242d585aa1a7"
iterator = iter(dataset)
print(iterator)
# + id="Th5w5Sff0b16"
#hide
# Compare this representation of replay data
# to the collection of individual trajectories shown earlier:
# iterator.next()
# + [markdown] id="hBc9lj9VWWtZ"
# ## 11. Training the agent
#
# Two things must happen during the training loop:
#
# - collect data from the environment
# - use that data to train the agent's neural network(s)
#
# This example also periodicially evaluates the policy and prints the current score.
#
# The following will take ~5 minutes to run.
# + id="8vdorhCI74h2" colab={"base_uri": "https://localhost:8080/"} outputId="8c1deeda-d3c9-4aaf-d7cb-99cf3ab10304"
NUM_ITERATIONS
# NUM_ITERATIONS = 20000
# + id="0pTbJ3PeyF-u" colab={"base_uri": "https://localhost:8080/"} outputId="c8fb5b24-071d-494d-ac13-126084e8a242"
#@test {"skip": true}
try:
# %%time
except:
pass
# (Optional) Optimize by wrapping some of the code in a graph using TF function.
agent.train = common.function(agent.train)
# Reset the train step
agent.train_step_counter.assign(0)
# Evaluate the agent's policy once before training.
avg_return = compute_avg_return(eval_env, agent.policy, NUM_EVAL_EPISODES)
returns = [avg_return]
for _ in range(NUM_ITERATIONS):
# Collect a few steps using collect_policy and save to the replay buffer
collect_data(train_env, agent.collect_policy, replay_buffer, COLLECT_STEPS_PER_ITERATION)
# Sample a batch of data from the buffer and update the agent's network
experience, unused_info = next(iterator)
train_loss = agent.train(experience).loss
step = agent.train_step_counter.numpy()
if step % LOG_INTERVAL == 0:
print(f'step = {step}: loss = {train_loss}')
if step % EVAL_INTERVAL == 0:
avg_return = compute_avg_return(eval_env, agent.policy, NUM_EVAL_EPISODES)
print(f'step = {step}: Average Return = {avg_return}')
returns.append(avg_return)
# + [markdown] id="68jNcA_TiJDq"
# ## Visualization
#
# + [markdown] id="aO-LWCdbbOIC"
# ### Plots
#
# Use `matplotlib.pyplot` to chart how the policy improved during training.
#
# One iteration of `Cartpole-v0` consists of 200 time steps. The environment gives a reward of `+1` for each step the pole stays up, so the maximum return for one episode is 200. The charts shows the return increasing towards that maximum each time it is evaluated during training. (It may be a little unstable and not increase monotonically each time.)
# + id="NxtL1mbOYCVO" colab={"base_uri": "https://localhost:8080/", "height": 300} outputId="b5105e5c-b211-40fa-b373-42e09544fbfb"
#@test {"skip": true}
iterations = range(0, NUM_ITERATIONS + 1, EVAL_INTERVAL)
plt.plot(iterations, returns)
plt.ylabel('Average Return')
plt.xlabel('Iterations')
plt.ylim(top=250)
# + [markdown] id="M7-XpPP99Cy7"
# ### Videos
# + [markdown] id="9pGfGxSH32gn"
# Charts are nice. But more exciting is seeing an agent actually performing a task in an environment.
#
# First, create a function to embed videos in the notebook.
# + id="ULaGr8pvOKbl"
def embed_mp4(filename):
"""Embeds an mp4 file in the notebook."""
video = open(filename,'rb').read()
b64 = base64.b64encode(video)
tag = '''
<video width="640" height="480" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>'''.format(b64.decode())
return IPython.display.HTML(tag)
# + [markdown] id="9c_PH-pX4Pr5"
# Now iterate through a few episodes of the Cartpole game with the agent. The underlying Python environment (the one "inside" the TensorFlow environment wrapper) provides a `render()` method, which outputs an image of the environment state. These can be collected into a video.
# + id="owOVWB158NlF"
# def create_policy_eval_video(policy, filename, num_episodes=5, fps=30):
def create_policy_eval_video(policy, filename, num_episodes=3, fps=30):
filename = filename + ".mp4"
with imageio.get_writer(filename, fps=fps) as video:
for _ in range(num_episodes):
time_step = eval_env.reset()
video.append_data(eval_py_env.render())
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = eval_env.step(action_step.action)
video.append_data(eval_py_env.render())
return embed_mp4(filename)
# + id="gf7rBUYt8lXT" colab={"base_uri": "https://localhost:8080/", "height": 539} outputId="3261037f-0392-41b1-cef0-ffc9591b9aa8"
create_policy_eval_video(agent.policy, "trained-agent")
# + [markdown] id="povaAOcZygLw"
# For fun, compare the trained agent (above) to an agent moving randomly. (It does not do as well.)
# + id="pJZIdC37yNH4" colab={"base_uri": "https://localhost:8080/", "height": 539} outputId="17770527-1128-45a7-93a0-6fac1b44a2f7"
create_policy_eval_video(random_policy, "random-agent")
# + id="EM6S09fN8_uR"
| _notebooks/2021-01-19-TF_Agents_CartPole.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: geocomp
# language: python
# name: geocomp
# ---
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn import sklearn.model_selection.LeaveOneGroupOut¶
from sklearn.model_selection import LeaveOneOut
# +
import sys
sys.path.insert(0,'../')
import mysticbit
from mysticbit import ml
from mysticbit import munging
munging.load_log_data()
# create pipeline
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('random_forest', RandomForestRegressor(max_depth=2, random_state=0, n_estimators=100)))
model = Pipeline(estimators)
# evaluate pipeline
seed = 7
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(model, X, Y, cv=kfold)
print(results.mean())
| notebooks/PRD_Run.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 7 Exploration: AR Markers
#
# In this notebook, we will learn how to use OpenCv functions to identify AR markers and write a function to determine their orientation using their coordinates.
#
# Throughout this notebook, **<font style="color:red">text in bold red</font>** indicates a change you must make to the following code block before running it.
#
# ## Table of Contents
# 1. [Getting Started](#GettingStarted)
# 1. [Locating AR Markers](#LocatingARMarkers)
# 1. [Using Marker Coordinates](#UsingMarkerCoordinates)
# 1. [Marker Direction](#MarkerDirection)
# <a id="GettingStarted"></a>
# ## 1. Getting Started
#
# **<font style="color:red">If you are running the car in RacecarSim, set `isSimulation` to `True`</font>**. Leave `isSimulation` `False` if you are using a physical car.
# TODO: Update isSimulation if necessary
isSimulation = True
# Next, we will import the necessary libraries for this notebook, including Python libraries (`cv`, `numpy`, etc.) and the Racecar library (`racecar_core`).
# +
# Import Python libraries
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
from nptyping import NDArray
from typing import Any, Tuple, List, Optional
from enum import IntEnum
# Import Racecar library
import sys
sys.path.append("../../library")
import racecar_core
# -
# The following function will help us in this notebook.
def show_image(image: NDArray) -> None:
"""
Displays a color image in the Jupyter Notebook.
"""
plt.imshow(cv.cvtColor(image, cv.COLOR_BGR2RGB))
plt.show()
# Finally, we will create a racecar object. If this step fails, make sure that `isSimulation` has the correct value.
# Create Racecar
rc = racecar_core.create_racecar(isSimulation)
# <a id="LocatingARMarkers"></a>
# ## 2. Locating AR Markers
# In Jupyter Notebook, we can take a photo with the car's camera using `rc.camera.get_color_image_async()`. Outside of Jupyter Notebook, we must use `rc.camera.get_color_image()` instead.
#
# Let's see what the car is currently looking at.
# Take and display a photo
image = rc.camera.get_color_image_async()
show_image(image)
# The provided AR markers are 6x6 ArUco markers. The 6x6 markers have a 6x6 binary encoding in the center surrounded by a black border. The center encoding does not encode any data because of the matrix's low resolution. You would not be able to store much data. However, the large black and white pixels make these markers easy to distinguish at a distance and detectable when tilted with respect to the camera.
#
# The following functions utilize existing OpenCV ArUco functions to locate and draw the AR markers. All the markers you will see in the course come from an ArUco dictionary of 250 unique 6x6 ar markers. Each marker has a defined 'upright' position in the dictionary, but can still be detected when rotated.
# +
dictionary = cv.aruco.Dictionary_get(cv.aruco.DICT_6X6_250)
params = cv.aruco.DetectorParameters_create()
def get_ar_markers(
color_image: NDArray[(Any, Any, 3), np.uint8]
) -> Tuple[List[NDArray[(1, 4, 2), np.int32]], Optional[NDArray[(Any, 1), np.int32]]]:
"""
Finds AR marker coordinates and ids in an image.
"""
corners, ids, _ = cv.aruco.detectMarkers(
color_image,
dictionary,
parameters=params
)
return (corners, ids)
def draw_ar_markers(
color_image: NDArray[(Any, Any, 3), np.uint32],
corners: List[NDArray[(1, 4, 2), np.int32]],
ids: NDArray[(Any, 1), np.int32],
color: Tuple[int, int, int] = (0, 255, 0),
) -> NDArray[(Any, Any, 3), np.uint8]:
"""
Draw AR markers in a image, modifying original image.
"""
return cv.aruco.drawDetectedMarkers(color_image, corners, ids, color)
# -
# Let's just see how these functions work on an image.
# +
# Retrieve a new image
image = rc.camera.get_color_image_async()
show_image(image)
# Find AR marker corners and ids
corners, ids = get_ar_markers(image)
# Draw borders around the AR markers in the original image
marked_image = draw_ar_markers(image, corners, ids, (0, 255, 0))
show_image(marked_image)
# -
# <a id="UsingMarkerCoordinates"></a>
# ## 3. Using Marker Coordinates
# The ArUco library can be useful for quickly identifying ar markers. However, the return types of `detectMarkers` aren't very straight forward. They can be passed as is to the `drawDetectedMarkers` function, but aren't easy to work with. They are left unmodified when passed between `get_ar_markers` and `draw_ar_markers`. Try to gain a familiarity with the return types.
#
# The `get_ar_markers` function returns a tuple of the AR marker corners and ids. For a detected marker, the coordinates and id can be found at the same index in both lists. Assume *n* is the number of *detected* markers.
#
# * **corners**: This is a list of numpy arrays of length *n*. If there are no markers an empty list is returned. Each numpy array has the shape (1, 4, 2).
# - **0th dimension**: redundancy
# - **1st dimension**: four corners listed clockwise and beginning with the top-left corner (of the upright marker)
# - **2nd dimension**: the coordinates of the corner (column then row)
# * **ids**: A numpy array with shape (*n*, 1). Each element in ids contains a list of a single id number.
#
# Now try to gain some information about our variables: corners and ids.
# +
# Print the AR marker coordinates and ids
print(f"corners: {corners}")
print(f"ids: {ids}")
# Check shape of numpy arrays
print(f"len(corners): {len(corners)}")
print(f"corners[0] shape: {corners[0].shape}")
print(f"ids shape: {ids.shape}")
# -
# When OpenCv detects AR markers, the markers from the image are compared against markers from a dictionary. The markers have a defined 'upright' position in the dictionary with a true top-left, top-right, bottom-right, and bottom-left corner. So when we receive the coordinates of each marker, the coordinates for the 'upright' marker's top-left corner will always be listed first followed by the other corners in clockwise order. When we draw our markers on the image, a small red box will appear around this special corner. These can be helpful for determining the direction/orientation of the marker.
#
# **<font style="color:red">Write a short statement to check if the true top-left corner is above the true bottom-left.</font>** Left click in the RacecarSim Lab 7 level to rotate one of the ar markers.
# +
# TODO: Check whether the true top-left corner is above the true bottom-left corner
corners[0][0,0, 1] < corners[0][0,3, 1]
# -
# <a id="MarkerDirection"></a>
# ## 4. Marker Direction
# OpenCv does not indicate what direction our ar markers are turned. However, we may find it useful to distinguish whether the AR marker is turned left, right, down, or up. This gives us more ways to extract data from the AR marker beyond just the id number. In fact, it will be used in the racecar time trials.
#
# For this function we provide
#
# **<font style="color:red">Finish writing the function `get_ar_direction` below, which takes the coordinate data from a single AR marker and returns the direction of the AR marker.</font>**
# +
class Direction(IntEnum):
"""
AR marker direction
"""
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
# TODO: Check the direction of the AR marker and return the correct enum
def get_ar_direction(ar_corners):
mags = np.sqrt(np.sum(np.square(ar_corners[0]), axis = 1))
return Direction(np.argmin(mags))
# -
# You are now ready to begin work on your time trials. Good luck, and don't be afraid to ask questions!
get_ar_direction(corners[1])
| labs/lab7/lab7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/microprediction/timeseries-notebooks/blob/main/pycaret_microprediction_timemachines.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="9wb8F5n1v8qT"
# !pip install pycaret[full]
# !pip install --upgrade statsmodels
# !pip install microprediction
# !pip install timemachines
# + [markdown] id="gaQ8l9Pu1D7w"
#
# ## Leveraging Time-Machines and PyCaret together
# This notebook demonstrates how you can use pycaret and timemachines packages together. We shall:
#
# * Grab a live time series from microprediction
# * Fit with pycaret
# * Run some timemachines models
# * Fit with pycaret again
#
# Yup, there's a chance of overfitting !
#
#
#
#
#
#
# + [markdown] id="rX8RJIT2Cali"
# ### 1. Retrieve recent history of a live time series from www.microprediction.org
# + [markdown] id="4a1bEXza1HxK"
# microprediction is "live" whereas pycaret is for "offline" analysis of time-series. Be aware:
#
# * Time series are live, so each time you run this the data will be different
# * Time series are returned as lagged values, so you need to reverse them for chronological ordering
# * Time is measured in epoch seconds at microprediction
#
# A little loop to ensure the time series is long enough.
#
#
# + id="rlsWzGLF0gn6"
import microprediction
from datetime import datetime, timedelta
from microprediction import MicroReader
import random
import matplotlib.pyplot as plt
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/", "height": 299} id="C4XRXBuH0inu" outputId="1d88978e-a9f2-454b-cbe1-6f87ceeb8a5c"
mr = MicroReader()
all_streams = mr.get_stream_names()
lagged_values = []
while len(lagged_values)<900:
a_stream = random.choice(all_streams)
lagged_values, lagged_seconds = mr.get_lagged_values_and_times(a_stream)
values = list(reversed(lagged_values))
dt = [ datetime.fromtimestamp(s) for s in reversed(lagged_seconds)] # arbitrary offset
plt.plot(dt,values)
plt.title(a_stream)
# + [markdown] id="zKBZWUYP1Y__"
# ### Use Py-caret to fit a univariate time-series model
# Add a few features
#
# + id="qRQg2EEH0tf7"
df = pd.DataFrame(columns=['Date','y'])
df['date'] = dt
df['y']=values
df['dayofweek'] = df['date'].dt.dayofweek
df['hour'] = df['date'].dt.hour
num_lags = 10
lags = range(1,num_lags)
lag_names = [ 'y_'+str(lag) for lag in lags ]
for lag, lag_name in zip(lags,lag_names):
df[lag_name] = df['y'].shift(lag)
numerical_features = lag_names
categorical_features = ['dayofweek','hour']
# + [markdown] id="uDiL-EUJC4bw"
# Then fit lots of models and compare ...
# + id="j8muE1ED5sRs"
from pycaret.regression import setup, compare_models, pull
# + id="tz59sxu35m4t"
s = setup(df, target = 'y', train_size = 0.95,
data_split_shuffle = False, fold_strategy = 'timeseries', fold = 3,
ignore_features = ['date'],
numeric_features = numerical_features,
categorical_features = categorical_features,
silent = True, verbose = False, session_id = 123)
# + colab={"base_uri": "https://localhost:8080/", "height": 714, "referenced_widgets": ["81545811a49a47f8b4908165d63c8329", "3835fc43699b4ce6bab22a281b6722cf", "70c062e7d3fd4847b18d505b93dc2d47"]} id="ICF707C_-GaM" outputId="bcbefdb7-58d7-48f7-cff8-e0b61dc4902e"
top5 = compare_models(n_select = 5)
# + [markdown] id="Z21Y9RZU9Dqu"
# ## Use py-caret and timemachines together
# Use timemachines to generate some more features
# * By default 3rd party time-series models are not included in timemachines install, so we manually installed statsmodels above. It should be a recent version (e.g. 0.12.2).
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="haFoB74kBFB4" outputId="8370a15c-0b65-48ec-d822-8b69a0dfc3b6"
# !pip freeze | grep statsmodels
# + [markdown] id="aq0iZS0hD3fK"
# If you can't run this line, then you need to !pip install --upgrade statsmodels
# + id="jp-VvubQBb4I"
from statsmodels.tsa.arima_model import ARIMA
# + [markdown] id="HVBN_ytsEQwG"
# Now we run a bunch of univariate time-series models over the data to create forecasts. These can be used as features in pycaret. See [timemachiens](https://github.com/microprediction/timemachines) package for more possibilities (such as Facebook prophet, neuralprophet and so forth) but be warned that some of those libraries will take a very long time to run! Even statsmodels TSA drags a bit, as you can see...
# + colab={"base_uri": "https://localhost:8080/"} id="BrzmY2689BE5" outputId="54a38673-2cdc-46cf-9179-3f8378c0ae44"
from timemachines.skaters.allskaters import EMA_SKATERS, DLM_SKATERS, THINKING_SKATERS, TSA_SKATERS, HYPOCRATIC_ENSEMBLE_SKATERS
from timemachines.skating import prior
skaters = EMA_SKATERS + DLM_SKATERS + TSA_SKATERS + HYPOCRATIC_ENSEMBLE_SKATERS
skater_names = [ f.__name__ for f in skaters ]
for f, skater_name in zip(skaters,skater_names):
print('Running '+skater_name)
y = df['y'].values
x,x_std = prior(f, y=y, k=1) # Runs a time-series model forward
df[skater_name] = x
# + [markdown] id="uRKwRUetT31O"
# Perhaps that was a little painful. That's why the timemachines library exists - to provides fast incremental time-series models (in addition to exposing others whose speed is what it is).
#
# But now we can use the 1 step ahead forecasts as feature
# + id="9Q3NiHSqSSV5"
# Skaters are supposed to return a list of foreward predictions, but we only want
# one of them so we extract the first element in the list
# Here I do this defensively as there are a couple of models that might violate this convention
for skater_name in skater_names:
if isinstance( df[skater_name].values[0],list ):
df[skater_name] = [ x[0] for x in df[skater_name].values]
# + colab={"base_uri": "https://localhost:8080/"} id="s9bykgf8SILi" outputId="4eeccfa6-b9a6-463a-b9b3-a4ec1a1fbb35"
df['quick_aggressive_ema_ensemble'].values[:4]
# + colab={"base_uri": "https://localhost:8080/", "height": 749, "referenced_widgets": ["a6abecc185d64be48bf06be1506bb48d", "93f898bf55ae430ba9b42ef39d481252", "e05394abd67e42178067324371931c40"]} id="kVblp56UE-5W" outputId="11b58c4f-ca68-47fb-edc2-360b811aaa92"
s = setup(df, target = 'y', train_size = 0.95,
data_split_shuffle = False, fold_strategy = 'timeseries', fold = 3,
ignore_features = ['date'],
numeric_features = numerical_features + skater_names,
categorical_features = categorical_features,
silent = True, verbose = False, session_id = 321)
top5again = compare_models(n_select = 5)
# + [markdown] id="9g-1OWp0Fq8R"
# Notice that the errors have been reduced (well, maybe - I don't know which random time series you chose).
#
# For more tips like this consider following [microprediction](https://www.linkedin.com/company/65109690) on Linked-In.
| pycaret_microprediction_timemachines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:Python3] *
# language: python
# name: conda-env-Python3-py
# ---
# +
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import faculty.datasets as datasets
from pandas.api.types import CategoricalDtype
import matplotlib.ticker as mtick
import statsmodels.api as sm
import plotly.graph_objects as go
from pandas import Series, DataFrame
import os
from functools import reduce
vcvtype = 'nonrobust'
# -
all_football_18 = pd.read_csv('/project/all_football_18.csv', index_col=0)
all_football_18.head()
# Based on the 6 most significant/important variables we found, we decided to make a ranking table and rank the countries based on these variables. The aim is to compare the actual ranking of each country with the ranking we found to be the best according to the data.
ranking_table = all_football_18[["country_name",'Final_positional_ranking',
"goal", 'possession_percentage','stat-value',
'passLongBallInaccurate','shortPassAccurate',
'keyPassShort']]
ranking_table.head()
# +
cols = ["goal", 'possession_percentage','stat-value',
'shortPassAccurate','keyPassShort','passLongBallInaccurate']
ranking_table["Ranking_after_analysis"] = ranking_table[cols].apply(tuple,axis=1)\
.rank(method='dense',ascending=False).astype(int)
ranking_table.sort_values("Ranking_after_analysis")
# -
# moving the Rank_after_analysis column to be just after the Final_positional_ranking so
# that it can be easier for the analysis
my_list = list(ranking_table)
ranking_table = ranking_table[['country_name',
'Final_positional_ranking',
'Ranking_after_analysis',
'goal',
'possession_percentage',
'stat-value',
'passLongBallInaccurate',
'shortPassAccurate',
'keyPassShort']]
ranking_table.sort_values(by=["Final_positional_ranking"], inplace = True)
ranking_table.head()
# Export dataset ranking_table to be able to use it in other jupyter notebooks
ranking_table.to_csv("ranking_table.csv")
# +
fig = plt.figure(figsize=(10,10))
x= ranking_table["Final_positional_ranking"]
y = ranking_table["country_name"]
_ = plt.xticks(np.arange(min(x), max(x)+1, 1.0))
plt.grid(visible=True)
plt.xlabel("Countries")
plt.ylabel("Position Ranking")
plt.title("World Cup 2018 ranking")
plt.bar(x, y, color ='b', width = 0.8, align = 'center')
plt.savefig("/project/Visualisation_graphs/world_cup_ranking_comparison.png")
plt.show()
# -
# +
fig = plt.figure(figsize=(15,15))
plt.grid(visible=True)
df1 = pd.DataFrame({"Position Ranking":ranking_table["Final_positional_ranking"], "Countries":ranking_table["country_name"]})
df2 = pd.DataFrame({"Position Ranking":ranking_table["Ranking_after_analysis"], "Countries":ranking_table["country_name"]})
df1['Legend']="Actual_positional_ranking"
df2['Legend']="Ranking_after_analysis"
res=pd.concat([df1,df2])
sns.barplot(y='Position Ranking',x='Countries',data=res,hue='Legend')
_ = plt.yticks(range(len(y)+1))
_ = plt.xticks(rotation = 45)
plt.title("World Cup 2018 ranking")
plt.savefig("/project/Visualisation_graphs/world_cup_ranking_comparison.png")
plt.show()
# -
| Ranking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# _Examples taken from <NAME>'s book, Deeplearning with Python_
#
# https://github.com/fchollet/deep-learning-with-python-notebooks/blob/master/4.4-overfitting-and-underfitting.ipynb
import keras
# # Fighting overfitting
#
# We will take the IMDB dataset from Keras and start applying various technique to reduce overfitting
# # Load the data
# +
from keras.datasets import imdb
import numpy as np
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
def vectorize_sequences(sequences, dimension=10000):
# Create an all-zero matrix of shape (len(sequences), dimension)
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1. # set specific indices of results[i] to 1s
return results
# Our vectorized training data
x_train = vectorize_sequences(train_data)
# Our vectorized test data
x_test = vectorize_sequences(test_data)
# Our vectorized labels
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
# -
# # Load the original model
# +
from keras import models
from keras import layers
original_model = models.Sequential()
original_model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
original_model.add(layers.Dense(16, activation='relu'))
original_model.add(layers.Dense(1, activation='sigmoid'))
original_model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
# -
# # Reduce the network size
# +
smaller_model = models.Sequential()
smaller_model.add(layers.Dense(4, activation='relu', input_shape=(10000,)))
smaller_model.add(layers.Dense(4, activation='relu'))
smaller_model.add(layers.Dense(1, activation='sigmoid'))
smaller_model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
# -
# # Fit the original model
original_hist = original_model.fit(x_train, y_train,
epochs=20,
batch_size=512,
validation_data=(x_test, y_test))
# # Fit the smaller model
smaller_model_hist = smaller_model.fit(x_train, y_train,
epochs=20,
batch_size=512,
validation_data=(x_test, y_test))
# # Compare the loss between models
epochs = range(1, 21)
original_val_loss = original_hist.history['val_loss']
smaller_model_val_loss = smaller_model_hist.history['val_loss']
# +
import matplotlib.pyplot as plt
# b+ is for "blue cross"
plt.plot(epochs, original_val_loss, 'b+', label='Original model')
# "bo" is for "blue dot"
plt.plot(epochs, smaller_model_val_loss, 'bo', label='Smaller model')
plt.xlabel('Epochs')
plt.ylabel('Validation loss')
plt.legend()
plt.show()
# -
# # Add weight regularization
# +
from keras import regularizers
l2_model = models.Sequential()
l2_model.add(layers.Dense(16, kernel_regularizer=regularizers.l2(0.001),
activation='relu', input_shape=(10000,)))
l2_model.add(layers.Dense(16, kernel_regularizer=regularizers.l2(0.001),
activation='relu'))
l2_model.add(layers.Dense(1, activation='sigmoid'))
# -
l2_model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
l2_model_hist = l2_model.fit(x_train, y_train,
epochs=20,
batch_size=512,
validation_data=(x_test, y_test))
# # Compare original model with regularized model
# +
l2_model_val_loss = l2_model_hist.history['val_loss']
plt.plot(epochs, original_val_loss, 'b+', label='Original model')
plt.plot(epochs, l2_model_val_loss, 'bo', label='L2-regularized model')
plt.xlabel('Epochs')
plt.ylabel('Validation loss')
plt.legend()
plt.show()
# -
# # Add dropout
# +
dpt_model = models.Sequential()
dpt_model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
dpt_model.add(layers.Dropout(0.5))
dpt_model.add(layers.Dense(16, activation='relu'))
dpt_model.add(layers.Dropout(0.5))
dpt_model.add(layers.Dense(1, activation='sigmoid'))
dpt_model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
# -
dpt_model_hist = dpt_model.fit(x_train, y_train,
epochs=20,
batch_size=512,
validation_data=(x_test, y_test))
# # Compare with the original model
# +
dpt_model_val_loss = dpt_model_hist.history['val_loss']
plt.plot(epochs, original_val_loss, 'b+', label='Original model')
plt.plot(epochs, dpt_model_val_loss, 'bo', label='Dropout-regularized model')
plt.xlabel('Epochs')
plt.ylabel('Validation loss')
plt.legend()
plt.show()
# -
| keras/regularization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Histogramme
#
# La fonction `hist` permet de visualiser la répartition d'une variable. Elle distribue les échantillons en un certain nombre de classes.
import matplotlib.pyplot as plt
import numpy as np
# ## Analyser 10000 points
#
# Nous analysons une distribution gaussienne de 10'000 points.
mu, sigma = 100, 15
x = mu + sigma * np.random.randn(10000)
plt.plot(x, 'o');
# Ces points sont situés autour d'une moyenne de 100, avec une déviation standard de 15.
plt.hist(x, 50, density=1, facecolor='g', alpha=0.75)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title('Histogram of IQ')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([50, 150, 0, 0.03])
plt.grid(True)
# ## Distribution normale
n=1000
y = np.random.randn(n)
plt.title('Distribution normale')
plt.grid(True)
plt.hist(y, 40);
# ## Distribution uniforme
n = 1000
y = np.random.rand(n)
plt.title('Distribution uniforme')
plt.hist(y, 10, facecolor='pink');
plt.grid();
# ## Fréquence de lettres
#
# Un dictionnaire est une structure qui permet de compter facilement les lettres dans une phrase.
s = 'Introduction to Python programming'
d = {}
for c in s:
d[c] = d.get(c, 0) + 1
# Ceci nous permet de dessiner un histogramme.
plt.bar(d.keys(), d.values());
# ## Histogramme superposé
#
# Avec l'option `bottom` nous pouvons superposer un graphique en barres sur un autre. Avec l'option `yerr`nous ajoutons une barre d'erreur.
# +
labels = ['G1', 'G2', 'G3', 'G4', 'G5']
men_means = [20, 35, 30, 35, 27]
women_means = [25, 32, 34, 20, 25]
men_std = [2, 3, 4, 1, 2]
women_std = [3, 5, 2, 3, 3]
width = 0.35 # the width of the bars: can also be len(x) sequence
fig, ax = plt.subplots()
ax.bar(labels, men_means, width, yerr=men_std, label='Men')
ax.bar(labels, women_means, width, yerr=women_std,
bottom=men_means,
label='Women')
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.legend()
plt.show()
| doc/plot/hist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Groovy Magic测试
# + language="groovy"
#
# h = HTML("<h1 style='color:#8080DD'>Groovy HTML示例</h1>")
# + language="groovy"
#
# def p = new Plot(title : 'Groovy Plot示例', xLabel: 'X轴', yLabel: 'Y轴');
# p << new Bars(x: [0, 1, 2, 3, 4, 5], y: [3, 4, 3, 2, 5, 6], width: 0.6)
# p << new Line(x: [0, 1, 2, 3, 4, 5], y: [5, 2, 6, 5, 4, 8], width: 3)
# -
# # Groovy获取数据,通过autotranslate传递至Python中使用
from beakerx import *
from beakerx.object import beakerx
# + language="groovy"
#
# // 通过beakerx.xxx进行不同编程语言间的autotranslate。
# beakerx.gcsv = new CSV().read("sample.csv")
# +
print(beakerx.gcsv)
Histogram(
data = [it['单价'] for it in beakerx.gcsv],
ItemMargin = 1,
binCount = 10
)
# + language="groovy"
#
# gdata = [
# [ 栏目1: 1, 栏目2: 2],
# [ 栏目1: 3, 栏目2: 4],
# [ 栏目1: 5, 栏目2: 6],
# ]
# gdata << [ 栏目1: 7, 栏目2: 8 ]
#
# beakerx.gdata = gdata
# +
pdata = beakerx.gdata
for it in pdata:
it['栏目3'] = np.random.randint(1,10)
TableDisplay(pdata)
| beakerx_samples/beakerx_python_languagemagic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import six
import sys
# sys.path.append('..')
import tkseem as tk
word = 'حالكم'
class MorphBert(tk.MorphologicalTokenizer):
max_input_chars_per_word = 10
def _tokenize_from_dict(self):
pass
def convert_to_unicode(self,text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def whitespace_tokenize(self, text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def tokenize(self,word):
return self._split_word(word)
def _split_word(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = self.convert_to_unicode(text)
output_tokens = []
for token in self.whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
class MorphGenerators(tk.MorphologicalTokenizer):
def _tokenize_from_dict(self, text, freq_dict, cache=False, max_size=20):
"""Tokenize using frequency based approach given a dictionary
Args:
text (str): input string
freq_dict (dict): frequency dictionary
cache (bool, optional): faster approach. Defaults to False.
max_size (int, optional): maximum word size. Defaults to 20.
Returns:
[type]: [description]
"""
assert freq_dict
tokens = []
output_tokens = []
for word in text.split():
if len(word) >= max_size:
print(f"{word} is too long ...")
output_tokens.append(self.unk_token)
continue
if word in freq_dict:
output_tokens.append(word)
else:
groups_of_subwords = self._split_word(word)
for group in groups_of_subwords:
group[0] = group[0].replace('##','')
groups_of_valid_subwords = list(
filter(
lambda group: all(
subword in freq_dict for subword in group
),
groups_of_subwords,
)
)
if groups_of_valid_subwords:
break
if len(groups_of_valid_subwords) == 0:
output_tokens.append(self.unk_token)
else:
sorted_groups_of_valid_subwords = sorted(
groups_of_valid_subwords,
key=lambda group: sum(freq_dict[subword] for subword in group),
)
tokens = sorted_groups_of_valid_subwords[-1]
for token in tokens:
output_tokens.append(str(token))
return output_tokens
def _split_word(self, word):
"""Split a word into a specific number of sub-words
Args:
word (str): word input
number_of_subwords (int): number of subtokens to generate from the word
Returns:
list: list of subwords
"""
def _split(_word):
if not _word:
return
yield [f'##{_word}',]
for i in range(1, len(_word)):
for subwords in self._split_word(_word[i:]):
yield [f'##{word[:i]}'] +subwords
subwords_groups = [group for group in _split(word)]
return subwords_groups
# +
# training each tokenizer
morph_generators = MorphGenerators()
morph_generators.train()
morph_bert = MorphBert()
morph_bert.train()
morph = tk.MorphologicalTokenizer()
morph.train()
# -
# %%timeit
morph_generators.tokenize(word)
# %%timeit
morph_bert.tokenize(word)
# %%timeit
morph.tokenize(word)
| tasks/benchmarking methods for morphological tokenizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="qDOI4a05X01Y"
# Name: <NAME>
# HW5 News API + COVID-19 Dataset
# CPP CS 4650.01
# + id="xGV0jpeyXvCx" outputId="b965daf6-9438-4055-b89f-bb94d017bcb6" colab={"base_uri": "https://localhost:8080/", "height": 384}
# !pip install spacy
# + id="g1FIWJL5YtdJ" outputId="c62d56c9-f2cd-447f-fb90-5513c1348c5c" colab={"base_uri": "https://localhost:8080/", "height": 436}
# !python -m spacy download en_core_web_lg
# + [markdown] id="H3AUuCLPNARo"
# ## Restart runtime NOW
# + id="tpY0bZ9oY5qR"
import spacy
nlp_eng = spacy.load('en_core_web_lg')
# + id="KlDkzPDjbucD" outputId="7a758b93-aa8f-4c4b-becf-b9d423b2b594" colab={"base_uri": "https://localhost:8080/", "height": 141}
# !pip install newsapi-python
from newsapi import NewsApiClient
# + id="sOAM46tgjaPR"
# All the rest import statements
import pickle
import pandas as pd
import string
from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
# + id="j0t9mioBaBjg"
newsapi = NewsApiClient (api_key='API_KEY_HERE')
# + id="_6snXq02cMOw"
# Get news articles details with a specified query.
def get_news(pagina):
temp = newsapi.get_everything(q='coronavirus', language='en',
from_param='2020-09-27', to='2020-10-27',
sort_by='relevancy', page=pagina)
return temp
# + id="J9RX4ePjK2ud"
articles = list(map(get_news, range(1,6)))
# + id="w1gEnHI7fZGX"
# Use pickle and save a file
filename = 'articlesCOVID.pckl'
pickle.dump(articles, open(filename, 'wb'))
filename = 'articlesCOVID.pckl'
loaded_model = pickle.load(open(filename, 'rb'))
filepath = '/content/articlesCOVID.pckl'
pickle.dump(loaded_model, open(filepath, 'wb'))
# + id="kO6pFZUIfbYK" outputId="0eff06e8-e703-481c-9b7e-d52f05338e34" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Iterate through every article to retrieve required information and save in df.
dados, titles, dates, descriptions = ([] for i in range(4))
counter = 0
for i, article in enumerate(articles):
for x in article['articles']:
title = x['title']
titles.append(title)
description = x['description']
descriptions.append(description)
content = x['content']
date = x['publishedAt']
dates.append(date)
dados.append({'title':titles[counter], 'date':dates[counter], 'desc':descriptions[counter], 'content':content})
counter += 1
df = pd.DataFrame(dados)
df = df.dropna()
df.shape
# + id="IiW-E4YB6lDZ"
# Retrieve useful deemed keywords from the article's content.
def get_keywords_eng(content):
result = []
pos_tag = ['NOUN','VERB','PROPN']
for token in nlp_eng(content):
if (token.text in nlp_eng.Defaults.stop_words or token.text in string.punctuation):
continue
if (token.pos_ in pos_tag):
result.append(token.text)
return result
# + id="2DJB0qJ0f4ZB"
# Add the five keywords per article to df.
results = []
for content in df.content.values:
results.append([('#' + x[0]) for x in Counter(get_keywords_eng(content)).most_common(5)])
df['keywords'] = results
# + id="kJYE72EPQPpT" outputId="1310d299-ebed-4f06-dc62-1f667cd282a5" colab={"base_uri": "https://localhost:8080/", "height": 589}
# Save the dataset
df.to_csv('dataset.csv')
df
# + id="yZmH0Q4TQXWm" outputId="00b1427a-cfa1-45f4-932e-2347710eca1d" colab={"base_uri": "https://localhost:8080/", "height": 55}
# Print five most common words for every article
text = str(results)
print(text)
# + id="KL9iYXoigSHC" outputId="eb45e2f0-4afb-4a89-b4f3-12e61dff1ca5" colab={"base_uri": "https://localhost:8080/", "height": 198}
# Create a wordcloud of the keywords
wordcloud = WordCloud(max_font_size=50, max_words=100, background_color="white").generate(text)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
| News_API+Covid_Dataset.ipynb |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all,-execution,-papermill,-trusted
# formats: ipynb,py//R:percent
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# + [markdown] tags=[]
# # Description
# + [markdown] tags=[]
# It uses the `clustree` package to generate clustering tree visualizations.
# + [markdown] tags=[]
# # Modules loading
# + tags=[]
library(clustree)
library(tidyverse)
# + [markdown] tags=[]
# # Settings
# + tags=[]
CLUSTERING_DIR <- Sys.getenv("PHENOPLIER_RESULTS_CLUSTERING_DIR")
# + tags=[]
CLUSTERING_DIR
# + tags=[]
CONSENSUS_CLUSTERING_DIR = file.path(CLUSTERING_DIR, "consensus_clustering")
# + tags=[]
CONSENSUS_CLUSTERING_DIR
# + tags=[]
MANUSCRIPT_FIGURES_DIR <- Sys.getenv("PHENOPLIER_MANUSCRIPT_FIGURES_DIR")
# + tags=[]
if (MANUSCRIPT_FIGURES_DIR == "") {
MANUSCRIPT_FIGURES_DIR = "/tmp"
}
# + tags=[]
MANUSCRIPT_FIGURES_DIR
# + tags=[]
OUTPUT_FIG_DIR = file.path(MANUSCRIPT_FIGURES_DIR, "clustering")
dir.create(OUTPUT_FIG_DIR, showWarnings = FALSE)
# + tags=[]
OUTPUT_FIG_DIR
# + [markdown] tags=[]
# # Load data
# + tags=[]
data <- read_tsv(file.path(CONSENSUS_CLUSTERING_DIR, "clustering_tree_data.tsv"))
# + tags=[]
dim(data)
# + tags=[]
head(data)
# + [markdown] tags=[]
# # Plot clustering tree
# + [markdown] tags=[]
# ## Plain
# + tags=[]
options(repr.plot.width = 20, repr.plot.height = 15)
clustree(data, prefix = "k")
ggsave(
file.path(OUTPUT_FIG_DIR, "clustering_tree.svg"),
height=15,
width=20,
scale=1,
)
# + tags=[]
| nbs/13_consensus_clustering/040_05-clustering_trees.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Anaconda 3)
# language: python
# name: python3
# ---
# ### Install/Upgrade Packages
import sys
from os.path import getsize
from urllib.request import urlretrieve
import warnings
warnings.filterwarnings("ignore")
packages = "folium geopandas glmnet holidays matplotlib numpy pandas pyarrow scikit-learn scipy seaborn"
# !{sys.executable} -m pip install {packages} --upgrade
# ### Download NOAA NCDC Weather Dataset in Jan-Feb 2017-2020
# +
# %%time
station = "USW00094728" # NY CITY CENTRAL PARK
output_dir = "../raw_data"
for year in range(2017, 2021):
for m in range(1, 3):
last_day = 31 if m == 1 else (28 if year % 4 else 29)
month = str(m).zfill(2)
out = f"weather_{year}-{month}.csv"
url = "https://www.ncei.noaa.gov/access/services/data/v1?" \
"dataset=daily-summaries&" \
"dataTypes=PRCP,SNOW,TMAX,TMIN&" \
f"stations={station}&" \
f"startDate={year}-{month}-01&endDate={year}-{month}-{last_day}&" \
"units=metric"
urlretrieve(url, f"{output_dir}/{out}")
print(f"Done downloading {out} to {output_dir} with size {getsize(f'{output_dir}/{out}') / 1024:.2f}KB")
# -
# ### Download NYC TLC Dataset for Yellow Taxi in Jan-Feb 2017-2020
# %%time
for year in range(2017, 2021):
for m in range(1, 3):
month = str(m).zfill(2)
output_dir = f"../raw_data/large/{year}"
out = f"yellow_tripdata_{year}-{month}.csv"
url = f"https://s3.amazonaws.com/nyc-tlc/trip+data/{out}"
urlretrieve(url, f"{output_dir}/{out}")
print(f"Done downloading {out} to {output_dir} with size {getsize(f'{output_dir}/{out}') / 1073741824:.2f}GB")
| code/1_Packages_and_Data_Downloading.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jsedoc/ConceptorDebias/blob/master/Debias_BERT.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="-lJ5H2KrsL9f" colab_type="code" colab={}
import numpy as np
import torch
t = np.transpose
import matplotlib.pyplot as plt
# %matplotlib inline
# + [markdown] id="C5UBFar7so29" colab_type="text"
# # Setup BERT
# + id="GbLgfNRusWBt" colab_type="code" colab={}
# !pip install -q flair
# + id="sAxGdPUKsZB5" colab_type="code" colab={}
from flair.embeddings import BertEmbeddings
from flair.data import Sentence
# init embedding
embedding = BertEmbeddings('bert-large-uncased')
# + id="lII3xWJ8shhk" colab_type="code" colab={}
# + [markdown] id="j91LrkyBsu-R" colab_type="text"
# # Get Brown Corpus
# + id="5GVVfJkDs0fh" colab_type="code" colab={}
from tqdm import tqdm
import nltk
nltk.download('brown')
from nltk.corpus import brown
brown_corpus = brown.sents()
# + id="Uo4Qu3fntX5f" colab_type="code" colab={}
brown_e = []
for s in tqdm(brown_corpus[:20000]):
sentence = Sentence(' '.join(s))
embedding.embed(sentence)
sent_emb = torch.stack([token.embedding for token in sentence]).numpy()
brown_e.append(sent_emb)
# + id="Ml280Y7Qkah8" colab_type="code" colab={}
import numpy as np
t = np.transpose
import matplotlib.pyplot as plt
# %matplotlib inline
# + id="mghOzcVXklz3" colab_type="code" colab={}
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE, Isomap, LocallyLinearEmbedding, MDS, SpectralEmbedding
from sklearn.preprocessing import StandardScaler
np.set_printoptions(suppress=True)
np.set_printoptions(precision=4)
plt_style = 'seaborn-talk'
# Experimental: HDBScan is a state-of-the-art clustering algorithm
hdbscan_available = True
try:
import hdbscan
except ImportError:
hdbscan_available = False
# + id="GXRIbtOmksRo" colab_type="code" colab={}
def do_plot(X_fit, title=None, labels=['']):
dimension = X_fit.shape[1]
label_types = sorted(list(set(labels)))
num_labels = len(label_types)
colors = cm.Accent(np.linspace(0, 1, num_labels))
print(X_fit.shape, label_types, num_labels, colors)
with plt.style.context(plt_style):
fig = plt.figure()
if dimension == 2:
ax = fig.add_subplot(111)
for lab, col in zip(label_types, colors):
if num_labels>1:
idxs = [i for i,v in enumerate(labels) if v == lab]
ax.scatter([X_fit[i, 0] for i in idxs],
[X_fit[i, 1] for i in idxs],
c=col, label=lab)
else:
ax.scatter(X_fit[:, 0],
X_fit[:, 1],
c=col)
elif dimension == 3:
ax = fig.add_subplot(111, projection='3d')
for lab, col in zip(label_types, colors):
ax.scatter(X_fit[labels==lab, 0],
X_fit[labels==lab, 1],
X_fit[labels==lab, 2],
c=col)
else:
raise Exception('Unknown dimension: %d' % dimension)
plt.title(title)
if num_labels>1:
ax.legend()
plt.show()
# + id="2XB7QFvHkxWq" colab_type="code" colab={}
def pick_embeddings(corpus, sent_embs, word_list):
X = []
labels = []
sents = []
for i, s in enumerate(corpus):
for j, w in enumerate(s):
if w in word_list:
X.append(sent_embs[i][j])
labels.append(w)
sents.append(s)
return (X, labels, sents)
# + id="4QGjzpJOlEni" colab_type="code" colab={}
brown_corpus = brown_corpus[:20000]
brown_embs = brown_e
# + id="797rJIVxk1wH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 3717} outputId="69987e75-958a-49b6-9521-8e2b42408dbe"
X, labels, _ = pick_embeddings(brown_corpus, brown_embs, ['husband', 'wife', 'Mary', 'John'])
pca = PCA(n_components=2)
pca.fit(X)
do_plot(pca.transform(X), 'PCA', labels=labels)
X, labels, _ = pick_embeddings(brown_corpus, brown_embs, ['man', 'woman'])
do_plot(pca.transform(X), 'PCA', labels=labels)
X, labels, _ = pick_embeddings(brown_corpus, brown_embs, ['himself', 'herself'])
do_plot(pca.transform(X), 'PCA', labels=labels)
X, labels, _ = pick_embeddings(brown_corpus, brown_embs, ['boy', 'girl'])
do_plot(pca.transform(X), 'PCA', labels=labels)
X, labels, _ = pick_embeddings(brown_corpus, brown_embs, ['Mary', 'John'])
do_plot(pca.transform(X), 'PCA', labels=labels)
X, labels, _ = pick_embeddings(brown_corpus, brown_embs, ['husband', 'wife'])
do_plot(pca.transform(X), 'PCA', labels=labels)
X, labels, _ = pick_embeddings(brown_corpus, brown_embs, ['David', 'Lauren', 'Robert', 'Bonnie'])
do_plot(pca.transform(X), 'PCA', labels=labels)
# + id="0CiP73winLx4" colab_type="code" colab={}
pickle.dump({'bert_bce':brown_embs},open( "bert_brown_corpus_20k.pkl", "wb" ))
| ContextualizedEmbeddings/Debias_BERT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Section 3: Procedural generation with few qubits
# We will now introduce a method to generate terrain using few qubits. Specifically, few enough qubits to be easy to simulate. Though the techniques here could also be used on real quantum devices, we will focus on simulation here.
#
# First we will introduce some additional details about how to describe qubits, in order to explain the underlying process behind the method.
# ### Some mathematical notation
#
# The states $|0\rangle$ and $|1\rangle$ represent two completely disjoint states that a qubit can be in, since they represent completely opposite outcomes for a certain measurement type. Specifically, they are the states that represent certainty for an outcome of `0` and `1`, respectively, for a z measurement. We refer to these two states as the *z basis*.
#
# We can also define the *x basis*, using states that are certain to output `0` and `1` for x measurements. The state that is certain to output `0` is typically referred to as $|+\rangle$, and that for `1` is $|-\rangle$.
#
# In this notation, the $|$ and $\rangle$ are often confusing to newcomers. The job of these symbols is essentially just to remind us that we are describing a quantum state. This ensures that $|0\rangle$, for example, is not confused with the number $0$ or bit value `0`.
#
# There are an infinite number of possible single qubit states, and obviously they do not all have their own unique symbol. Instead it is possible to express any state in terms of any basis. For example, here are $|+\rangle$ and $|-\rangle$ expressed in terms of $|0\rangle$ and $|1\rangle$, and vice-versa.
#
# $$
# |+\rangle \, = \, \frac{1}{\sqrt{2}}|0\rangle \, + \, \frac{1}{\sqrt{2}}|1\rangle, \,\,\,\, |-\rangle \, = \, \frac{1}{\sqrt{2}}|0\rangle \, - \, \frac{1}{\sqrt{2}}|1\rangle, \\
# |0\rangle \, = \, \frac{1}{\sqrt{2}}|+\rangle \, + \, \frac{1}{\sqrt{2}}|-\rangle, \,\,\,\, |1\rangle \, = \, \frac{1}{\sqrt{2}}|+\rangle \, - \, \frac{1}{\sqrt{2}}|-\rangle
# $$
#
# In general, any single qubit state can be expressed in the for $c_0 |0\rangle \, + \, c_1 |1\rangle$. Here $c_0$ and $c_1$ are complex numbers, referred to as the amplitudes for $|0\rangle$ and $|1\rangle$. They can be directly used to determine the probabilities for the outcomes of a z measurement: $|c_0|^2$ represents the probability of getting a `0`, and $|c_1|^2$ is the probability for a `1`. The amplitudes for other possible sets of basis states, and hence the probabilities for the corresponding measurement, can be found by reexpressing the state in that basis.
#
# Now we know how to write down single qubit states, which are already familiar to us from the last section, we can move on to multiqubit states. This is basically done by replacing the single bit values with bit strings. For example, measuring two qubits could yeild one of the four outputs `00`, `01`, `10` and `11`. Any state of two qubits can be represented
#
# $$
# c_{00} |00\rangle + c_{01} |01\rangle + c_{10} |10\rangle + c_{11} |11\rangle.
# $$
#
# Here $|c_{00}|^2$ represents the probability of getting `00` from a z measurement of both qubits, and so on. The states can also be expressed in terms of a tensor product, such as $|01\rangle = |0\rangle\otimes |1\rangle$. For readers who are not familiar with this, in the context of this work it is simply a way of combining the states of two single qubits into one two qubit state, and it behaves in equations in the same way as multiplication.
#
# The default initial state of two qubits is expressed $|00\rangle$, which simply implies that both are in state $|0\rangle$. Applying gates then transforms the state. For example, applying `ry` for an angle $\theta$ has the effect
#
# $$
# |0\rangle \rightarrow \cos \frac{\theta}{2} \, |0\rangle + \sin \frac{\theta}{2} |1\rangle, \,\,\,\, |1\rangle \rightarrow \cos \frac{\theta}{2} \, |1\rangle + \sin \frac{\theta}{2} |0\rangle
# $$
#
# If only done to one qubit of a two qubit state, the result would be
#
# $$
# |00\rangle = |0\rangle \otimes |0\rangle \rightarrow |0\rangle \otimes \left( \cos \frac{\theta}{2} \, |0\rangle + \sin \frac{\theta}{2} |1\rangle \right) = \cos \frac{\theta}{2} \, |00\rangle + \sin \frac{\theta}{2} |01\rangle
# $$
#
# Here it is the qubit on the right on which the gate was applied. By repeating the same process, we could similarly calculate the effect of further gates on this qubit, or gates on the other qubit.
#
# For a more in-depth treatment of all these topics, consult the Qiskit textbook$^1$. However, what we have seen so far already covers the behaviour that will be used in this section: Single qubit rotations allow us to peturb an initial state. After a rotation for a small value of $\theta$ the state is mostly what it was initially (in this case $|00\rangle$). However, some amplitude has also been moved to neighbouring basis states (in this case $|01\rangle$). Here 'neighbouring' refers to the Hamming distance of the bit strings, and so two states are considered neighbouring if their bitstrings differ on only a single bit.
#
# In this section we will introduce a method to encode height maps in quantum states. Using exactly the effect described above, we will then use single qubit rotations to peturb those heightmaps. The end result will be a method that this essentially a quantum version of one of the most rudimentary techniques in procedural generation: the box blur.
# ### Converting heightmaps to quantum states
# In this section we are focussing on relatively small quantum processors of around 10 qubits. We will want to use them to generate maps with thousands of points. Clearly there is a difference of scale between the two. However, as we saw above, the state of $n$ qubits is described by a set of $2^n$ amplitudes: one for each possible output bit strings. We can therefore close the gap by making use of all these. Unfortunately, gaining access to all these numbers is not a trivial task. By the very fact that the number of them is exponential with the number of qubits, the complexity of calculating them is also exponential.
#
# In the method that follows we will specifically look to access the probabilities for each of the $2^n$ possible output bit strings (for a z measurement of each qubit) rather than the amplitudes themselves. This can be done by repeating a circuit many times to sample from the output. Specificially, we will use `shots=4**n` samples. This exponential complexity is what limits the method to being used only on small processors.
#
# To get started we will need the same tools as in the last section
# +
from qiskit import QuantumCircuit, execute, Aer
from math import pi
import random
from tools import plot_height
# -
# as well as some basic tools for maths.
import numpy as np
# Our first task is to find a mapping between the numbers that describe a heightmap (height values for each coordinate) and the numbers that describe a quantum state (amplitudes for each z basis bit string). The most important element of this is to define a mapping between the coordinates and the bit strings.
#
# The ideal mapping for our purposes would be one that maps neighbouring coordinates to neighbouring bit strings. For example, if we map some $(x,y)$ to `0000` it would be ideal to have something like
#
# * $(x+1,y) \, \rightarrow$ `1000`
# * $(x-1,y) \, \rightarrow$ `0100`
# * $(x,y+1) \, \rightarrow$ `0010`
# * $(x,y-1) \, \rightarrow$ `0001`
#
# Here the Manhattan distance between any two points is equal to the Hamming distance between the corresponding bit strings.
#
# In general, this will not be a perfect mapping. We usually consider heightmaps based on 2D square lattices, whereas the structure formed by the Hamming distance between $n$-bit strings forms an $n$-dimensional hypercube. This will mean that there will always have to be non-neighbouring coordinates whose bit strings are neighbours. However, we can ensure that neighbouring coordinates always have neighbouring bit strings.
#
# The core of how we do this is defined in the following function, `make_line`. This creates a sequence of unique bit strings of at least a given `length`, such that each bit string in the list is a neighbour to those before and after.
def make_line ( length ):
# determine the number of bits required for at least `length` bit strings
n = int(np.ceil(np.log(length)/np.log(2)))
# start with the basic list of bit values
line = ['0','1']
# each application of the following process double the length of the list,
# and of the bit strings it contains
for j in range(n-1):
# this is done in each step by first appending a reverse-ordered version of the current list
line = line + line[::-1]
# then adding a '0' onto the end of all bit strings in the first half
for j in range(int(len(line)/2)):
line[j] += '0'
# and a '1' onto the end of all bit strings in the second half
for j in range(int(len(line)/2),int(len(line))):
line[j] += '1'
return line
# For example, here is a list of length 8.
line = make_line(8)
print(line)
# With this, we can use `line[x]+line[y]` to define a unique string for each coordinate of an $8\times8$ grid. For example, for the point $(3,5)$ we can take the string at `line[3]` to serve as the $x=3$ coordinate, and the string `line[6]` to serve as $y=5$. These are the strings `010` and `111`, respectively. The string for the combined coordinates is then simply obtained by putting this together, giving us $(3,6)\,\rightarrow$ `010111`.
#
# The following function, `make_grid` runs through all the coordinates of an `L`$\times$`L` grid, calculates the corresponding bit string, and then outputs all the results. This is done as a Python dictionary, with bit strings as keys and the corresponding coordinates as values.
def make_grid(L):
line = make_line( L )
grid = {}
for x in range(L):
for y in range(L):
grid[ line[x]+line[y] ] = (x,y)
return grid
# For example, for an $8\times8$ grid, we find that the bit string `010111` corrsponds to exactly the set of coordinates that we saw above.
grid = make_grid(8)
grid['010111']
# Now we have figured out what to do with the coordinates in a heightmap, it is time to focus on the height values themselves. To do this, we will assume that each value $h$ exists in the range $0\leq h \leq 1$, and that the largest of all the heights is equal to exactly $1$. This assumption is without loss of generality, since any set of heights can be shifted and rescaled into this form.
#
# We will define a quantum state for which the probability of a bit string $b$ is proportional to the height of the corresponding point ${\tt grid[ } b {\tt \\]}$,
#
# $$
# \frac{ p_{b'} }{ p_{b} } = \frac{ h_{ {\tt grid[ } b' {\tt \\]} } }{ h_{ {\tt grid[} b {\tt \\]} } }
# $$
#
# The reason why we cannot simply set $p_{b} = h_{ {\tt grid[} b {\tt \\]} }$ is because probabilities must always sum to 1. To acheive this we simply renormalize using
#
# $$
# p_{b} = \frac{ h_{ {\tt grid[} b {\tt \\]} } }{ H }, \,\,\,\, H = \sum_b h_{ {\tt grid[} b {\tt \\]} }.
# $$
#
# Now we have the probabilities, we need corresponding amplitudes for the basis states. When we restrict to the case that these amplitudes are real numbers, they are related to the probability by the simple relation $c_b = \sqrt{p_b}$. The state we require to encode our heightmap is then
#
# $$
# \frac{1}{\sqrt{H}} \sum_b \sqrt{h_b} \, | b \rangle
# $$
#
# Now we can construct a function to convert create this state for any given heightmap. Specifically, we will convert the heightmap to a Qiskit `QuantumCircuit` object. This will contain a circuit to prepares the required state from the initial $|00\ldots00\rangle$ state. Fortunately, the construction of the required circuit can be done by the Qiskit function `initalize`. All it needs is for us to supply the required state. The whole process is implemented by the function `height2circuit`, below.
def height2circuit(height,grid):
n = len( list(grid.keys())[0] )
state = [0]*(2**n)
H = 0
for bitstring in grid:
(x,y) = grid[bitstring]
if (x,y) in height:
h = height[x,y]
state[ int(bitstring,2) ] = np.sqrt( h )
H += h
for j,amp in enumerate(state):
state[ j ] = amp/np.sqrt(H)
qc = QuantumCircuit(n,n)
qc.initialize( state, qc.qregs )
return qc
# ### Converting quantum states to heightmaps
#
# The next job is to implement the opposite process: to turn a quantum circuit into an image. This is done by running the circuit for many samples, and using the number of samples that give each result to determine the probabilities. These probabilities are then rescaled such that the largest is equal to 1, in order to regain the original heightmap. This is done in the `circuit2height` function below.
def circuit2height(qc,grid,backend,shots=None,log=False):
# get the number of qubits from the circuit
n = qc.n_qubits
# construct a circuit to perform z measurements
meas = QuantumCircuit(n,n)
for j in range(n):
meas.measure(j,j)
# if no shots value is supplied use 4**n by default (unless that is too small)
if not shots:
shots = max(4**n,8192)
#run the circuit on the supplied backend
counts = execute(qc+meas,backend,shots=shots).result().get_counts()
# determine max and min counts values, to use in rescaling
if log: # log=True uses the log of counts values, instead of the values themselves
min_h = np.log( 1/10 ) # fake small counts value for results that didn't appear
max_h = np.log( max( counts.values() ) )
else:
min_h = 0
max_h = max( counts.values() )
# loop over all bit strings in `counts`, and set the corresponding value to be
# the height for the corresponding coordinate. Values are rescaled to ensure
# that the biggest height is 1, and that no height is less than zero.
height = {}
for bitstring in counts:
if bitstring in grid:
if log: # log=True uses the log of counts values, instead of the values themselves
height[ grid[bitstring] ] = ( np.log(counts[bitstring]) - min_h ) / (max_h-min_h)
else:
height[ grid[bitstring] ] = ( counts[bitstring] - min_h ) / (max_h-min_h)
return height
# In the above functions, it is assumed that heightmaps are expressed in the form of Python dictionaries, with coordinates as keys and the corresponding heights as values. Absent coordinates are assumed to correspond to a value of 0. Here is an example of such a height map.
height = {(2,6):1,(2,5):1,(5,6):1,(5,5):1,(3,1):1,(4,1):1,(2,1):1,(5,1):1,(1,2):1,(6,2):1,(7,7):0}
# The function `plot_height` below takes such height maps and plots them. This is done as a countour plot, with the default choice of colours being one that reflects terrain.
# With this we can plot the example heightmap. In this case, we will use a greyscale colour scheme.
plot_height(height,color_map='gray')
# We can now test the process that we have created so far by converting this heightmap into a quantum circuit, and then back into an image. In this test, we use the local simulator to run the circuit, which is invoked using `Aer.get_backend('qasm_simulator')`.
qc = height2circuit(height,grid)
new_height = circuit2height(qc,grid,Aer.get_backend('qasm_simulator'))
plot_height(new_height,L=8,color_map='gray')
# Here we see that the image returns unscathed. For a more explicit comparison, we can look at the height values of the original and new heightmaps directly.
print("Coords height new_height")
for pos in height:
if height[pos]>0:
print(pos,'',height[pos],' ',new_height[pos])
# Here we see that the exact values of the original are slighly peturbed in the version that has gone through the quantum circuit. This is due to statistical noise when calculating probabilities using a finite number of samples. Increasing the number of `shots` could suppress the effect if required.
#
# We can now manipulate the heighmap by applying gates to the quantum circuit. Though there are many possible choices we could make of what gates to apply, we will look at simply applying `ry` to all qubits by a given angle `theta`.
qc = height2circuit(height,grid)
qc.ry(pi/10,qc.qregs[0]) # shorthand for applying rx to all qubits in qc
new_height = circuit2height(qc,grid,Aer.get_backend('qasm_simulator'))
plot_height(new_height,L=8,color_map='gray')
# From the above, we see that the effect of the given rotation angle is not large. This is partly because the effect induced by the rotation, which is to lead amplitude from each position to its neighbours, decays exponentially with distance for small `theta`. To boost the effect we will use the `log` keyword argument from the `circuit2height` function, which uses the logarithms of the probabilities as heights, rather than the probabilities themselves. This will help us get better results. As an example, here is the same process as that above, but with `log=True`.
new_height = circuit2height(qc,grid,Aer.get_backend('qasm_simulator'),log=True)
plot_height(new_height,L=8,color_map='gray')
# In this case we see the expected blur effect.
#
# The effect of large angle rotations is not simply an extension of this blur effect. Instead, an interference effect is induced as the amplitudes from different sources begin to overlap. This can be seen most easily by starting with a checkerboard pattern for the heightmap.
height = {}
for x in range(8):
for y in range(8):
height[x,y] = (x+y)%2
plot_height(height,L=8,color_map='gray')
# The corresponding quantum state is an equally weighted superposition of all bit strings with even parity. It is known as the GHZ state$^2$, and is an example of a highly entangled state. In this case, for an $8x8$ grid, it is an entangled state of 6 qubits.
#
# This state was chosen because of it's particular behaviour when `ry` rotations are applied. Even for small angle rotations, the effect is different than a simple blur. Instead, the interference effects cause the amplitude to become focussed in two particular points.
qc = height2circuit(height,grid)
qc.ry(pi/8,qc.qregs[0])
new_height = circuit2height(qc,grid,Aer.get_backend('qasm_simulator'),log=True)
plot_height(new_height,L=8,color_map='gray')
# The effect peaks for the angle `pi/2`, which we should regard as a large angle in this context.
qc = height2circuit(height,grid)
qc.ry(pi/2,qc.qregs[0])
new_height = circuit2height(qc,grid,Aer.get_backend('qasm_simulator'),log=True)
plot_height(new_height,L=8,color_map='gray')
# The underlying state here is
#
# $$
# \frac{1}{\sqrt{2}} \left( |000000\rangle + |111111\rangle \right).
# $$
#
# so all amplitude is concentrated at the corresponding two points.
#
# Most heightmaps we consider will not be so extreme in their behaviour. However, they will always lead to quantum interference effects in some form. This allows us to generate interesting patterns, which we can then use in procedurally generated content.
#
# For the rest of this section, we will apply the process to randomly generated seed images, created by the function `generate_seed` below.
def generate_seed(L,num=5):
# generate a height map of `num` randomly chosen points, each with randomly chosen values
seed = {}
for _ in range(num):
x = random.randint(0,L-1)
y = random.randint(0,L-1)
seed[x,y] = random.random()
# set one to have a height of exactly 1
seed[random.choice(list(seed.keys()))] = 1
return seed
# Here is an example for `L=16`.
L = 16
grid = make_grid(L)
seed = generate_seed(L)
plot_height(seed,L,color_map='gray')
# We will apply $\pi/4$ `ry` rotations to this in order to induce quantum interefence effects and obtain a resulting pattern. We will refer to such patterns as *quantum tartan*.
# +
qc = height2circuit(seed,grid)
qc.ry(pi/4,qc.qregs[0])
tartan = circuit2height(qc,grid,Aer.get_backend('qasm_simulator'),log=True)
plot_height(tartan,color_map='gray')
# -
# An obvious way to obtain more examples of quantum tartan is to apply the process for different seed images. However, we can also generate new and unique results without needing to run the quantum circuit again. Instead, we can use the fact that the `grid` used above is not the only mapping of bitstrings to coordinates that serves our needs. In fact, we can create alternative versions by simply shuffling each bit string in the same way. This will yield another mapping of bitstrings to coordinates, but preserve the fact that neighbouring coordinates have neighbouring bit strings.
#
# The function `shuffle_height` below peforms this shuffling for a given `grid`. It then produces a new heightmap, for which each position is moved to the point corresponding to the shuffled version of its bit string. The result is new quantum tartan produced from the same results from the same quantum job.
def shuffle_height (height,grid):
# determine the number of qubits
n = int( np.log(len(grid))/np.log(2) )
# randomly choose a way to shuffle the bit values in the string
shuffle = [j for j in range(n)]
random.shuffle(shuffle)
# for each bit string, determine and record the pair of positions
# * `pos`: the position correspoding to the bit string in the given `grid`
# * `new_pos`: the position corresponding to the shuffled version of the bit string
remap = {}
for bitstring in grid:
shuffledstring = ''.join([bitstring[j] for j in shuffle])
pos = grid[bitstring]
new_pos = grid[shuffledstring]
remap[pos] = new_pos
# create and return `new_height`, in which each point is moved from `pos` to `new_pos`
new_height = {}
for pos in height:
new_height[remap[pos]] = height[pos]
return new_height
# As an example, here is a shuffled versio of the above tartan.
plot_height(shuffle_height(tartan,grid),L,color_map='gray')
# ### Creating a quantum island
#
# The size of these heightmaps is $16\times 16$. We could increase this number, but at the cost of runtime for the quantum process (or simulation thereof). In order to create a large map, we will need to use another technique.
#
# To be concrete, we will aim to create an improved version of the island from the last section.
# +
from island import island
size = max(max(island))+1
plot_height(island)
# -
# It is still a bit too smooth to be interesting for a player to explore within a game. To solve this problem, we will overlay it with quantum tartan. The following code generates a set of tartans by shuffling the one generated above.
tartans = [ shuffle_height(tartan,grid) for _ in range(int(size/L)) ]
# The overlaying procedure is done by moving selecting successive $L\times L$ patches of the island, moving by a distance of $L/2$ between each. For each patch, the height of the island at each point is combined with that of corresponding point within the tartan. Finally, the result is renormalized such that the maximum value is 1.
#
# The process is described most effectively by the code itself.
# +
quantum_island = {}
for x0 in range(0,size+int(L/2),int(L/2)):
for y0 in range(0,size+int(L/2),int(L/2)):
tartan = random.choice(tartans) # choose a random tartan from the list
for (x,y) in tartan:
xx = x-int(L/2)+x0
yy = y-int(L/2)+y0
if (xx,yy) in island:
quantum_island[xx,yy] = (1+tartan[x,y])*island[xx,yy]
# renormalize
max_height = max(quantum_island.values())
for (x,y) in quantum_island:
quantum_island[x,y] = quantum_island[x,y]/max_height
# plot the result
plot_height(quantum_island,size)
# -
# The result is better than the original island, though still not entirely realistic. It serves as the starting point for generating interesting and realtistic terrain.
#
# A more advanced use of these techniques is found in the blog post on which this section was based$^3$. One method used there is to overlay the tartans randomly, rather than successively overlaying in a strict pattern. Code to implement such a process, as well as an example of it in action, can be found below.
# +
quantum_island = {}
for _ in range(int(10*size**2/L**2)):
chosen = False
while not chosen:
x0 = random.randint(0,size-1)
y0 = random.randint(0,size-1)
chosen = (random.random()<island[x0,y0])
tartan = random.choice(tartans)
for (x,y) in tartan:
xx = x-int(L/2)+x0
yy = y-int(L/2)+y0
if (xx,yy) in quantum_island:
quantum_island[xx,yy] += tartan[x,y]
else:
quantum_island[xx,yy] = tartan[x,y]
max_height = max(quantum_island.values())
for (x,y) in quantum_island:
quantum_island[x,y] = quantum_island[x,y]/max_height
plot_height(quantum_island,size)
# -
# ## References
#
# 1. ["Learn Quantum Computing using Qiskit", Qiskit Community (2019)](https://community.qiskit.org/textbook/)
# 2. ["Going Beyond Bell's Theorem", <NAME>, et al (1989)](https://arxiv.org/abs/0712.0921)
# 3. ["Creating infinite worlds with quantum computing", <NAME>ton (2019)](https://medium.com/qiskit/creating-infinite-worlds-with-quantum-computing-5e998e6d21c2)
# 4. [The Ædwen Brooch, British Museum collection online](https://www.britishmuseum.org/research/collection_online/collection_object_details/collection_image_gallery.aspx?partid=1&assetid=752049001&objectid=64612)
# ## Towards a combined approach
#
# A set of tartans, as produced here, will be used in the combined terrain generation procedure of [Section 5](5_A_Combined_Approach.ipynb). This procedure was developed during the PROCJAM 2019 game jam, which had a theme of 'Heritage'. Inspired by this, the seed images will be 8x8 pixel representations of the pseudo-runes from the Ædwen Brooch$^4$. This is an Anglo-Saxon artefact, decorated with runes that are probably only there to convey a sense of Viking mystery.
# +
from runes import runes
for rune in runes:
plot_height(rune,color_map='gray')
# -
# They will do the same thing in the content that we will procedurally generate, though with the addition of a quantum twist. Fourteen quantum runes will be created, two from each of Ædwen's runes. One of these will be lightly perturbed with an angle of $\pi/32$. The other will get a more significant perturbation of $\pi/8$.
# +
from runes import runes
tartans = []
for theta in [pi/32,pi/8]:
for rune in runes:
L = max(max(rune))+1
grid = make_grid(L)
qc = height2circuit(rune,grid)
qc.ry(theta,qc.qregs[0])
tartan = circuit2height(qc,grid,Aer.get_backend('qasm_simulator'),log=True)
plot_height(tartan,L=8,color_map='gray')
tartans.append( tartan )
with open('tartans.py', 'w') as file:
file.write('tartans='+str(tartans))
# -
| Quantum_Procedural_Generation/3_FewQubit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Yu7u1mNfnxVP"
# **Copyright 2019 The Sonnet Authors. All Rights Reserved.**
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ---
# + [markdown] colab_type="text" id="WAfR3cvnoGMB"
# # Preamble
# + colab={} colab_type="code" id="4FqOAJb_jJR9"
import sys
assert sys.version_info >= (3, 6), "Sonnet 2 requires Python >=3.6"
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="XnWX2azUDuCl" outputId="8516864f-06cb-4dd5-946c-adeae1e17f3a"
# !pip install dm-sonnet tqdm
# + colab={} colab_type="code" id="mn5ofK4-D1Qk"
import sonnet as snt
import tensorflow as tf
import tensorflow_datasets as tfds
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="Rpp_houJEHr9" outputId="9e7597ba-d8a5-483e-b415-5729ef3102e8"
print("TensorFlow version: {}".format(tf.__version__))
print(" Sonnet version: {}".format(snt.__version__))
# + [markdown] colab_type="text" id="5RmHUmz1padR"
# Finally lets take a quick look at the GPUs we have available:
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="TXoxEvKepdw2" outputId="da76b78b-274e-462d-fa47-89d626a4370f"
# !grep Model: /proc/driver/nvidia/gpus/*/information | awk '{$1="";print$0}'
# + [markdown] colab_type="text" id="UYYmqvOKfNbk"
# # Dataset
#
# We need to get our dataset in a state where we can iterate over it easily. The TensorFlow Datasets package provides a simple API for this. It will download the dataset and prepare it for us to speedily process on a GPU. We can also add our own pre-processing functions to mutate the dataset before our model sees it:
# + colab={} colab_type="code" id="UkBRriaQEr4z"
batch_size = 100
def process_batch(images, labels):
images = tf.squeeze(images, axis=[-1])
images = tf.cast(images, dtype=tf.float32)
images = ((images / 255.) - .5) * 2.
return images, labels
def mnist(split):
dataset = tfds.load("mnist", split=split, as_supervised=True)
dataset = dataset.map(process_batch)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
dataset = dataset.cache()
return dataset
mnist_train = mnist("train").shuffle(10)
mnist_test = mnist("test")
# + [markdown] colab_type="text" id="JfOCWVGEfgcq"
# MNIST contains `28x28` greyscale handwritten digits. Let's take a look at one:
# + colab={"base_uri": "https://localhost:8080/", "height": 269} colab_type="code" id="I_yM0TVjFCZq" outputId="54959260-c8fc-4e39-ea54-e566e9122893"
import matplotlib.pyplot as plt
images, _ = next(iter(mnist_test))
plt.imshow(images[0]);
# + [markdown] colab_type="text" id="d7bsizs5gK3K"
# # Sonnet
#
# The next step is to define a model. In Sonnet everything that contains TensorFlow variables (`tf.Variable`) extends `snt.Module`, this includes low level neural network components (e.g. `snt.Linear`, `snt.Conv2D`), larger nets containing subcomponents (e.g. `snt.nets.MLP`), optimizers (e.g. `snt.optimizers.Adam`) and whatever else you can think of.
#
# Modules provide a simple abstraction for storing parameters (and `Variable`s used for other purposes, like for storing moving avergages in `BatchNorm`).
#
# To find all the parameters for a given module, simply do: `module.variables`. This will return a `tuple` of all the parameters that exist for this module, or any module it references:
# + [markdown] colab_type="text" id="GrN37pi1o4HT"
# ## Building the model
# + [markdown] colab_type="text" id="c6XoN56S2lSW"
# In Sonnet you build neural networks out of `snt.Module`s. In this case we'll build a multi-layer perceptron as a new class with a `__call__` method that computes the logits by passing the input through a number of fully connected layers, with a ReLU non-linearity.
# + colab={} colab_type="code" id="hgjyB9yhFclD"
class MLP(snt.Module):
def __init__(self):
super(MLP, self).__init__()
self.flatten = snt.Flatten()
self.hidden1 = snt.Linear(1024, name="hidden1")
self.hidden2 = snt.Linear(1024, name="hidden2")
self.logits = snt.Linear(10, name="logits")
def __call__(self, images):
output = self.flatten(images)
output = tf.nn.relu(self.hidden1(output))
output = tf.nn.relu(self.hidden2(output))
output = self.logits(output)
return output
# + [markdown] colab_type="text" id="0i03px8y8gf7"
# Now we'll create an instance of our class whose weights will be randomly initialized. We'll train this MLP such that it learns to recognize digits in the MNIST dataset.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="XqL8oIMqGAnU" outputId="96efc445-d7bb-45db-f6b1-5aa4a8b72dcb"
mlp = MLP()
mlp
# + [markdown] colab_type="text" id="snzkUUh9oXPy"
# ## Using the model
# + [markdown] colab_type="text" id="On8wI6VwpDPm"
# Let's feed an example input through the model and see what it predicts. Since the model is randomly initialized there is a 1/10 chance that it will predict the right class!
# + colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="code" id="4T-qmIc0GHfP" outputId="316f41f2-319c-452e-a132-5006b732e86b"
images, labels = next(iter(mnist_test))
logits = mlp(images)
prediction = tf.argmax(logits[0]).numpy()
actual = labels[0].numpy()
print("Predicted class: {} actual class: {}".format(prediction, actual))
plt.imshow(images[0]);
# + [markdown] colab_type="text" id="V297xpzfobXK"
# ## Training the model
# + [markdown] colab_type="text" id="WTrv-jn4pPSx"
# To train the model we need an optimizer. For this simple example we'll use Stochastic Gradient Descent which is implemented in the `SGD` optimizer. To compute gradients we'll use a `tf.GradientTape` which allows us to selectively record gradients only for the computation we want to back propagate through:
# + cellView="form" colab={} colab_type="code" id="V7gi8NQ-WZOl"
#@title Utility function to show progress bar.
from tqdm import tqdm
# MNIST training set has 60k images.
num_images = 60000
def progress_bar(generator):
return tqdm(
generator,
unit='images',
unit_scale=batch_size,
total=(num_images // batch_size) * num_epochs)
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="UUkshshiK6Eq" outputId="edf09c6d-cc08-482d-98d8-45778082ed0b"
opt = snt.optimizers.SGD(learning_rate=0.1)
num_epochs = 10
def step(images, labels):
"""Performs one optimizer step on a single mini-batch."""
with tf.GradientTape() as tape:
logits = mlp(images)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=labels)
loss = tf.reduce_mean(loss)
params = mlp.trainable_variables
grads = tape.gradient(loss, params)
opt.apply(grads, params)
return loss
for images, labels in progress_bar(mnist_train.repeat(num_epochs)):
loss = step(images, labels)
print("\n\nFinal loss: {}".format(loss.numpy()))
# + [markdown] colab_type="text" id="2K0_eoR8og-G"
# ## Evaluating the model
# + [markdown] colab_type="text" id="Cm_9RMJopgWc"
# We'll do very simple analysis of the model to get a feeling for how well it does against this dataset:
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="PM7IPcOeXtxH" outputId="fc983d03-f92c-4002-f7df-514c251bd300"
total = 0
correct = 0
for images, labels in mnist_test:
predictions = tf.argmax(mlp(images), axis=1)
correct += tf.math.count_nonzero(tf.equal(predictions, labels))
total += images.shape[0]
print("Got %d/%d (%.02f%%) correct" % (correct, total, correct / total * 100.))
# + [markdown] colab_type="text" id="Lnkc55PtqA_I"
# To understand the result a bit better, lets take a look at a small sample of where the model correctly identified the digits:
# + cellView="form" colab={} colab_type="code" id="eFro_RB4YR-X"
#@title Utility function to show a sample of images.
def sample(correct, rows, cols):
n = 0
f, ax = plt.subplots(rows, cols)
if rows > 1:
ax = tf.nest.flatten([tuple(ax[i]) for i in range(rows)])
f.set_figwidth(14)
f.set_figheight(4 * rows)
for images, labels in mnist_test:
predictions = tf.argmax(mlp(images), axis=1)
eq = tf.equal(predictions, labels)
for i, x in enumerate(eq):
if x.numpy() == correct:
label = labels[i]
prediction = predictions[i]
image = images[i]
ax[n].imshow(image)
ax[n].set_title("Prediction:{}\nActual:{}".format(prediction, label))
n += 1
if n == (rows * cols):
break
if n == (rows * cols):
break
# + colab={"base_uri": "https://localhost:8080/", "height": 214} colab_type="code" id="PSamdka2dodW" outputId="e5c521a6-3d5b-4be6-f371-a2c7a87ae124"
sample(correct=True, rows=1, cols=5)
# + [markdown] colab_type="text" id="hzHp02F_pzdh"
# Now lets take a look at where it incorrectly classifies the input. MNIST has some rather dubious handwriting, I'm sure you'll agree that some of the samples below are a little ambiguous:
# + colab={"base_uri": "https://localhost:8080/", "height": 451} colab_type="code" id="KQe5Q9LNdnb0" outputId="7f28c377-2226-468e-e388-a1cb649a26f5"
sample(correct=False, rows=2, cols=5)
| examples/mlp_on_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Cheat Sheet
#
# Basic cheatsheet for Python mostly based on the book written by <NAME>, [Automate the Boring Stuff with Python](https://automatetheboringstuff.com/) under the [Creative Commons license](https://creativecommons.org/licenses/by-nc-sa/3.0/) and many other sources.
#
# ## Read It
#
# - [Website](https://www.pythoncheatsheet.org)
# - [Github](https://github.com/wilfredinni/python-cheatsheet)
# - [PDF](https://github.com/wilfredinni/Python-cheatsheet/raw/master/python_cheat_sheet.pdf)
# - [Jupyter Notebook](https://mybinder.org/v2/gh/wilfredinni/python-cheatsheet/master?filepath=jupyter_notebooks)
#
# ## Debugging
#
# ### Raising Exceptions
#
# Exceptions are raised with a raise statement. In code, a raise statement consists of the following:
#
# - The raise keyword
# - A call to the Exception() function
# - A string with a helpful error message passed to the Exception() function
raise Exception('This is the error message.')
Traceback (most recent call last):
File "<pyshell#191>", line 1, in <module>
raise Exception('This is the error message.')
Exception: This is the error message.
# Often it’s the code that calls the function, not the function itself, that knows how to handle an expection. So you will commonly see a raise statement inside a function and the try and except statements in the code calling the function.
def box_print(symbol, width, height):
if len(symbol) != 1:
raise Exception('Symbol must be a single character string.')
if width <= 2:
raise Exception('Width must be greater than 2.')
if height <= 2:
raise Exception('Height must be greater than 2.')
print(symbol * width)
for i in range(height - 2):
print(symbol + (' ' * (width - 2)) + symbol)
print(symbol * width)
for sym, w, h in (('*', 4, 4), ('O', 20, 5), ('x', 1, 3), ('ZZ', 3, 3)):
try:
box_print(sym, w, h)
except Exception as err:
print('An exception happened: ' + str(err))
# ### Getting the Traceback as a String
#
# The traceback is displayed by Python whenever a raised exception goes unhandled. But can also obtain it as a string by calling traceback.format_exc(). This function is useful if you want the information from an exception’s traceback but also want an except statement to gracefully handle the exception. You will need to import Python’s traceback module before calling this function.
# +
import traceback
try:
raise Exception('This is the error message.')
except:
with open('errorInfo.txt', 'w') as error_file:
error_file.write(traceback.format_exc())
print('The traceback info was written to errorInfo.txt.')
# -
# The 116 is the return value from the write() method, since 116 characters were written to the file. The traceback text was written to errorInfo.txt.
Traceback (most recent call last):
File "<pyshell#28>", line 2, in <module>
Exception: This is the error message.
# ### Assertions
#
# An assertion is a sanity check to make sure your code isn’t doing something obviously wrong. These sanity checks are performed by assert statements. If the sanity check fails, then an AssertionError exception is raised. In code, an assert statement consists of the following:
#
# - The assert keyword
# - A condition (that is, an expression that evaluates to True or False)
# - A comma
# - A string to display when the condition is False
# +
pod_bay_door_status = 'open'
assert pod_bay_door_status == 'open', 'The pod bay doors need to be "open".'
pod_bay_door_status = 'I\'m sorry, Dave. I\'m afraid I can\'t do that.'
assert pod_bay_door_status == 'open', 'The pod bay doors need to be "open".'
# -
# In plain English, an assert statement says, “I assert that this condition holds true, and if not, there is a bug somewhere in the program.” Unlike exceptions, your code should not handle assert statements with try and except; if an assert fails, your program should crash. By failing fast like this, you shorten the time between the original cause of the bug and when you first notice the bug. This will reduce the amount of code you will have to check before finding the code that’s causing the bug.
#
# Disabling Assertions
#
# Assertions can be disabled by passing the -O option when running Python.
#
# ### Logging
#
# To enable the logging module to display log messages on your screen as your program runs, copy the following to the top of your program (but under the #! python shebang line):
# +
import logging
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s- %(message)s')
# -
# Say you wrote a function to calculate the factorial of a number. In mathematics, factorial 4 is 1 × 2 × 3 × 4, or 24. Factorial 7 is 1 × 2 × 3 × 4 × 5 × 6 × 7, or 5,040. Open a new file editor window and enter the following code. It has a bug in it, but you will also enter several log messages to help yourself figure out what is going wrong. Save the program as factorialLog.py.
# +
import logging
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s- %(message)s')
logging.debug('Start of program')
def factorial(n):
logging.debug('Start of factorial(%s)' % (n))
total = 1
for i in range(1, n + 1):
total *= i
logging.debug('i is ' + str(i) + ', total is ' + str(total))
logging.debug('End of factorial(%s)' % (n))
return total
print(factorial(5))
logging.debug('End of program')
# -
# ### Logging Levels
#
# Logging levels provide a way to categorize your log messages by importance. There are five logging levels, described in Table 10-1 from least to most important. Messages can be logged at each level using a different logging function.
#
# | Level | Logging Function | Description |
# | ---------- | -------------------- | ------------------------------------------------------------------------------------------------------------------------------ |
# | `DEBUG` | `logging.debug()` | The lowest level. Used for small details. Usually you care about these messages only when diagnosing problems. |
# | `INFO` | `logging.info()` | Used to record information on general events in your program or confirm that things are working at their point in the program. |
# | `WARNING` | `logging.warning()` | Used to indicate a potential problem that doesn’t prevent the program from working but might do so in the future. |
# | `ERROR` | `logging.error()` | Used to record an error that caused the program to fail to do something. |
# | `CRITICAL` | `logging.critical()` | The highest level. Used to indicate a fatal error that has caused or is about to cause the program to stop running entirely. |
#
# ### Disabling Logging
#
# After you’ve debugged your program, you probably don’t want all these log messages cluttering the screen. The logging.disable() function disables these so that you don’t have to go into your program and remove all the logging calls by hand.
# +
import logging
logging.basicConfig(level=logging.INFO, format=' %(asctime)s -%(levelname)s - %(message)s')
logging.critical('Critical error! Critical error!')
logging.disable(logging.CRITICAL)
logging.critical('Critical error! Critical error!')
logging.error('Error! Error!')
# -
# ### Logging to a File
#
# Instead of displaying the log messages to the screen, you can write them to a text file. The logging.basicConfig() function takes a filename keyword argument, like so:
# +
import logging
logging.basicConfig(filename='myProgramLog.txt', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
| jupyter_notebooks/16_Debugging.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Learning With Errors (LWE)
# ## import libs
# +
# #!conda install pycrypto -y
# #!conda install numba -y
# +
import numpy as np
import pandas as pd
import scipy.stats
import math
import itertools
import random
from Crypto.Util import number
import matplotlib.pyplot as plt
from matplotlib import collections as matcoll
import time
# %load_ext cython
# -
# ## Set vector s (secret)
# Choose $s \in \mathbb{Z}^m_p$ with arbitrary $m \in \mathbb{N}$
#s = np.array([1, 1, 1])
s = np.array([1, 2, 3])
#s = np.array([10, 13, 9, 11])
#s = np.array([10, 13, 9, 11, 3])
n = len(s)
# ## parameters
# +
# modulus
#p = 17 # only prime numbers (it has to be a finite field)
p = number.getPrime(n.bit_length()**2) # using pycrypto lib (p = O(n^2))
print("Prime:", p)
#size parameter
m = 100
#m = min(2**(3*n), 200)
print('Count of equations:', m)
# distribution
## gaussian in this example according to the paper https://cims.nyu.edu/~regev/papers/lwesurvey.pdf (09.11.2021, page 2)
mu = 0
#alpha = 1/(math.sqrt(n)*math.log2(n)**2) # α = 1/(√n log^2(n))
alpha = 0.05
print(alpha)
sigma = alpha * p
sigma = 3
print("Mu:", mu, "\t Sigma:", sigma)
## plot the dense function of the distribution
gridPointCount = 10000; delta = 1/gridPointCount
interval = [-4*np.ceil(sigma), 4*np.ceil(sigma)]
x = np.linspace(interval[0], interval[1], gridPointCount)
y = scipy.stats.norm.pdf(x, loc=mu, scale=sigma)
fig, ax = plt.subplots(figsize=(16, 5))
ax.fill_between(x, y)
ax.set_xticks(np.linspace(interval[0], interval[1], 20, dtype = int))
fig.savefig('gaussianPropapation.png')
plt.show()
# +
x = np.arange(interval[0]+.5, interval[1]+.5, 1)
y = scipy.stats.norm.cdf(x, loc=mu, scale=sigma)
z = y[1:] - y[:-1]
fig, ax = plt.subplots(figsize=(16, 5))
ax.stem(x[:-1]+.5, z, basefmt=" ", use_line_collection=True)
ax.set_xticks(np.linspace(interval[0], interval[1], 20, dtype = int))
fig.savefig('roundedGaussianPropapation.png')
plt.show()
# -
# ## Construct the LWE problem
# #### Construct A, b and e
# +
np.random.seed(42) # set seed
np.random.randint(0, p) # uniform distribution (p excluded)
np.random.normal(loc=mu, scale=sigma, size=n) # gaussian distribution
A = np.random.randint(0, p, size=(m, n))
e = np.rint(np.random.normal(loc=mu, scale=sigma, size=m)) # rounding specified by the IEEE floating point standard IEEE 754
b = (np.matmul(A, s) + e)%p # system of linear equations with perturbation
# +
# oracle
def oracleLWE(s, p, n, mu, sigma):
a = np.random.randint(0, p, size=n)
e = np.rint(np.random.normal(loc=mu, scale=sigma))
b = (a.dot(s) + e)%p
return a, b, e
oracleLWE(s, p, n, mu, sigma)
# -
# %timeit oracleLWE(s, p, n, mu, sigma)
# ## Solving LWE
# ### Blum-Kalai-Wasserman (BKW) (variant of https://eprint.iacr.org/2012/636.pdf)
# ### helper functions
# +
# Iterative Algorithm (xgcd)
def iterative_egcd(a, b):
x,y, u,v = 0,1, 1,0
while a != 0:
q,r = b//a,b%a; m,n = x-u*q,y-v*q # use x//y for floor "floor division"
b,a, x,y, u,v = a,r, u,v, m,n
return b, x, y
def modinv(a, m):
g, x, y = iterative_egcd(a, m)
if g != 1:
return None
else:
return x % m
def solve_linear_congruence(a, b, m):
""" Describe all solutions to ax = b (mod m), or raise ValueError. """
g = math.gcd(a, m)
if b % g:
raise ValueError("No solutions")
a, b, m = a//g, b//g, m//g
return modinv(a, m) * b % m, m
def print_solutions(a, b, m):
print(f"Solving the congruence: {a}x = {b} (mod {m})")
x, mx = solve_linear_congruence(a, b, m)
print(f"Particular solution: x = {x}")
print(f"General solution: x = {x} (mod {mx})")
# for debug
print_solutions(272, 256, 1009)
# -
# ### Solving algorithm
def furtherElimination(prevCompareVec, subsetDict, guessCountDict, a, b, p):
equationList = subsetDict.get(prevCompareVec.tobytes())
for (a_tilde, b_tilde) in equationList:
compareVec = (a == a_tilde)
compareVecNeg = (a == -a_tilde%p)
if (len(compareVec) - sum(compareVec) == 1):
a_new = (a - a_tilde) % p
b_new = (b - b_tilde) % p
pos = np.nonzero(a_new)[0]
a_coeff = a_new[pos][0]
(secret, _) = solve_linear_congruence(a_coeff, b_new, p)
guessCountDict.get(pos[0]).append(secret)
#subsetDict.get(compareVec.tobytes()).append((a_new, b_new)) # debug
else:
if (len(compareVec) != sum(compareVec)) and (sum(compareVec) > sum(prevCompareVec)):
a_new = (a - a_tilde) % p
b_new = (b - b_tilde) % p
subsetDict.get(compareVec.tobytes()).append((a_new, b_new))
furtherElimination(compareVec, subsetDict, guessCountDict, a_new, b_new, p)
#continue # debug
if (len(compareVecNeg) - sum(compareVecNeg) == 1):
a_new = (a + a_tilde) % p
b_new = (b + b_tilde) % p
pos = np.nonzero(a_new)[0]
a_coeff = a_new[pos][0]
(secret, _) = solve_linear_congruence(a_coeff, b_new, p)
guessCountDict.get(pos[0]).append(secret)
#subsetDict.get(compareVecNeg.tobytes()).append((a_new, b_new)) # debug
else:
if (len(compareVecNeg) != sum(compareVecNeg)) and (sum(compareVecNeg) > sum(prevCompareVec)):
a_new = (a + a_tilde) % p
b_new = (b + b_tilde) % p
subsetDict.get(compareVecNeg.tobytes()).append((a_new, b_new))
furtherElimination(compareVecNeg, subsetDict, guessCountDict, a_new, b_new, p)
def solveLWE(s, n, p, mu, sigma):
# initialize subsets
subsetDict = dict()
for item in itertools.product("\x01\x00", repeat=n):
subsetDict.setdefault(bytes("".join(item), encoding='utf8'), list())
# initialize error memory
errorList = list()
# initialize guess count
guessCountDict = dict()
for key in range(n):
guessCountDict.setdefault(key, list())
start = time.time()
for i in range(100):
a, b, e = oracleLWE(s, p, n, mu, sigma)
errorList.append(e)
compareVec = (a == np.zeros(n))
furtherElimination(compareVec, subsetDict, guessCountDict, a, b, p)
subsetDict.get(compareVec.tobytes()).append((a, b))
end = time.time()
print("Processing Time:", end-start)
print(i, sum([len(elt) for key, elt in subsetDict.items()]))
return subsetDict, guessCountDict, errorList
# %%time
subsetDict, guessCountDict, errorList = solveLWE(s, n, p, mu, sigma)
# +
guess = list()
for key, value in guessCountDict.items():
try:
print("Position:", key)
unique, counts = np.unique(value, return_counts=True)
significantList = [scipy.stats.binom_test(count, n=len(value), p=1/p, alternative='greater') for count in counts] # determine significant level
#print(unique, counts)
significantArgMin = np.argmin(significantList)
print("Min significants:", significantList[significantArgMin], significantList[significantArgMin]<0.001)
indexOfMax = np.argmax(counts)
guessTmp = unique[indexOfMax]
guess.append(guessTmp)
print("Guess:", guessTmp)
print()
except ValueError:
pass
print()
print()
if (guess==s%p).all():
print("Guess:", guess, "\t", "Right Solution:", s%p, "\t", "Solved!")
else:
print("Guess:", guess, "\t", "Right Solution:", s%p, "\t", "Failed!")
# +
# evaluate error distribution of the concrete LWE instance
fig, ax = plt.subplots(nrows=2, figsize=(16, 5))
unique, counts = np.unique(np.array(errorList), return_counts=True)
ax[0].stem(unique, counts, basefmt=" ", use_line_collection=True)
ax[0].set_xticks(range(round(min(unique)), round(max(unique))+1))
unique, counts = np.unique(np.array(errorList)%p, return_counts=True)
ax[1].stem(unique, counts, basefmt=" ", use_line_collection=True)
ax[1].set_xticks(range(round(min(unique)), round(max(unique))+1))
plt.show()
# -
#error
## mean
print(np.array(errorList).mean())
## std
np.array(errorList).std()
for key, value in guessCountDict.items():
print("Position:", key)
unique, counts = np.unique(value, return_counts=True)
fig, ax = plt.subplots(figsize=(16, 5))
ax.stem(unique, counts, basefmt=" ", use_line_collection=True)
ax.set_xticks(range(round(min(unique)), round(max(unique))+1))
plt.show()
import statistics
# positions of the solution
for key, value in guessCountDict.items():
print("Position:", key)
## mean
print(statistics.mean(value))
## std
print(statistics.stdev(value))
print()
| jupyter_notebooks/solving/LWE/.ipynb_checkpoints/solveLWE_withError_BKW-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/BKBK00/Reinforcement-learning-with-tensorflow/blob/master/HW02_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="OYlaRwNu7ojq"
# # **Homework 2-1 Phoneme Classification**
#
# * Slides: https://speech.ee.ntu.edu.tw/~hylee/ml/ml2021-course-data/hw/HW02/HW02.pdf
# * Video (Chinese): https://youtu.be/PdjXnQbu2zo
# * Video (English): https://youtu.be/ESRr-VCykBs
#
# + [markdown] id="emUd7uS7crTz"
# ## The DARPA TIMIT Acoustic-Phonetic Continuous Speech Corpus (TIMIT)
# The TIMIT corpus of reading speech has been designed to provide speech data for the acquisition of acoustic-phonetic knowledge and for the development and evaluation of automatic speech recognition systems.
#
# This homework is a multiclass classification task,
# we are going to train a deep neural network classifier to predict the phonemes for each frame from the speech corpus TIMIT.
#
# link: https://academictorrents.com/details/34e2b78745138186976cbc27939b1b34d18bd5b3
# + [markdown] id="KVUGfWTo7_Oj"
# ## Download Data
# Download data from google drive, then unzip it.
#
# You should have `timit_11/train_11.npy`, `timit_11/train_label_11.npy`, and `timit_11/test_11.npy` after running this block.<br><br>
# `timit_11/`
# - `train_11.npy`: training data<br>
# - `train_label_11.npy`: training label<br>
# - `test_11.npy`: testing data<br><br>
#
# **notes: if the google drive link is dead, you can download the data directly from Kaggle and upload it to the workspace**
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="OzkiMEcC3Foq" outputId="6ab059be-43b5-4e8f-e3cb-ec15fa3c24bb"
# !gdown --id '1HPkcmQmFGu-3OknddKIa5dNDsR05lIQR' --output data.zip
# !unzip data.zip
# !ls
# + [markdown] id="_L_4anls8Drv"
# ## Preparing Data
# Load the training and testing data from the `.npy` file (NumPy array).
# + colab={"base_uri": "https://localhost:8080/"} id="IJjLT8em-y9G" outputId="6d272f50-276b-474d-b168-29381c77b970"
import numpy as np
print('Loading data ...')
data_root='./timit_11/'
train = np.load(data_root + 'train_11.npy')
train_label = np.load(data_root + 'train_label_11.npy')
test = np.load(data_root + 'test_11.npy')
print('Size of training data: {}'.format(train.shape))
print('Size of testing data: {}'.format(test.shape))
# + [markdown] id="us5XW_x6udZQ"
# ## Create Dataset
# + id="Fjf5EcmJtf4e"
import torch
from torch.utils.data import Dataset
class TIMITDataset(Dataset):
def __init__(self, X, y=None):
self.data = torch.from_numpy(X).float()
if y is not None:
y = y.astype(np.int)
self.label = torch.LongTensor(y)
else:
self.label = None
def __getitem__(self, idx):
if self.label is not None:
return self.data[idx], self.label[idx]
else:
return self.data[idx]
def __len__(self):
return len(self.data)
# + [markdown] id="otIC6WhGeh9v"
# Split the labeled data into a training set and a validation set, you can modify the variable `VAL_RATIO` to change the ratio of validation data.
# + colab={"base_uri": "https://localhost:8080/"} id="sYqi_lAuvC59" outputId="75145295-e5b5-49f9-d539-f482e3dd320d"
VAL_RATIO = 0.2
percent = int(train.shape[0] * (1 - VAL_RATIO))
train_x, train_y, val_x, val_y = train[:percent], train_label[:percent], train[percent:], train_label[percent:]
print('Size of training set: {}'.format(train_x.shape))
print('Size of validation set: {}'.format(val_x.shape))
# + [markdown] id="nbCfclUIgMTX"
# Create a data loader from the dataset, feel free to tweak the variable `BATCH_SIZE` here.
# + id="RUCbQvqJurYc"
BATCH_SIZE = 64
from torch.utils.data import DataLoader
train_set = TIMITDataset(train_x, train_y)
val_set = TIMITDataset(val_x, val_y)
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True) #only shuffle the training data
val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False)
# + [markdown] id="_SY7X0lUgb50"
# Cleanup the unneeded variables to save memory.<br>
#
# **notes: if you need to use these variables later, then you may remove this block or clean up unneeded variables later<br>the data size is quite huge, so be aware of memory usage in colab**
# + colab={"base_uri": "https://localhost:8080/"} id="y8rzkGraeYeN" outputId="72c8c813-0475-4c2f-9764-18df82d7fdb3"
import gc
del train, train_label, train_x, train_y, val_x, val_y
gc.collect()
# + [markdown] id="IRqKNvNZwe3V"
# ## Create Model
# + [markdown] id="FYr1ng5fh9pA"
# Define model architecture, you are encouraged to change and experiment with the model architecture.
# + id="lbZrwT6Ny0XL"
import torch
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
self.layer1 = nn.Linear(429, 1024)
self.layer2 = nn.Linear(1024, 512)
self.layer3 = nn.Linear(512, 128)
self.out = nn.Linear(128, 39)
self.act_fn = nn.Sigmoid()
def forward(self, x):
x = self.layer1(x)
x = self.act_fn(x)
x = self.layer2(x)
x = self.act_fn(x)
x = self.layer3(x)
x = self.act_fn(x)
x = self.out(x)
return x
# + [markdown] id="VRYciXZvPbYh"
# ## Training
# + id="y114Vmm3Ja6o"
#check device
def get_device():
return 'cuda' if torch.cuda.is_available() else 'cpu'
# + [markdown] id="sEX-yjHjhGuH"
# Fix random seeds for reproducibility.
# + id="88xPiUnm0tAd"
# fix random seed
def same_seeds(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# + [markdown] id="KbBcBXkSp6RA"
# Feel free to change the training parameters here.
# + id="QTp3ZXg1yO9Y" colab={"base_uri": "https://localhost:8080/"} outputId="8dd333c8-df2b-49e6-ad6d-38cc7a490fe1"
# fix random seed for reproducibility
same_seeds(0)
# get device
device = get_device()
print(f'DEVICE: {device}')
# training parameters
num_epoch = 20 # number of training epoch
learning_rate = 0.0001 # learning rate
# the path where checkpoint saved
model_path = './model.ckpt'
# create model, define a loss function, and optimizer
model = Classifier().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# + id="CdMWsBs7zzNs" colab={"base_uri": "https://localhost:8080/"} outputId="f339bd08-6deb-45ed-98c8-f74402c375bc"
# start training
best_acc = 0.0
for epoch in range(num_epoch):
train_acc = 0.0
train_loss = 0.0
val_acc = 0.0
val_loss = 0.0
# training
model.train() # set the model to training mode
for i, data in enumerate(train_loader):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
batch_loss = criterion(outputs, labels)
_, train_pred = torch.max(outputs, 1) # get the index of the class with the highest probability
batch_loss.backward()
optimizer.step()
train_acc += (train_pred.cpu() == labels.cpu()).sum().item()
train_loss += batch_loss.item()
# validation
if len(val_set) > 0:
model.eval() # set the model to evaluation mode
with torch.no_grad():
for i, data in enumerate(val_loader):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
batch_loss = criterion(outputs, labels)
_, val_pred = torch.max(outputs, 1)
val_acc += (val_pred.cpu() == labels.cpu()).sum().item() # get the index of the class with the highest probability
val_loss += batch_loss.item()
print('[{:03d}/{:03d}] Train Acc: {:3.6f} Loss: {:3.6f} | Val Acc: {:3.6f} loss: {:3.6f}'.format(
epoch + 1, num_epoch, train_acc/len(train_set), train_loss/len(train_loader), val_acc/len(val_set), val_loss/len(val_loader)
))
# if the model improves, save a checkpoint at this epoch
if val_acc > best_acc:
best_acc = val_acc
torch.save(model.state_dict(), model_path)
print('saving model with acc {:.3f}'.format(best_acc/len(val_set)))
else:
print('[{:03d}/{:03d}] Train Acc: {:3.6f} Loss: {:3.6f}'.format(
epoch + 1, num_epoch, train_acc/len(train_set), train_loss/len(train_loader)
))
# if not validating, save the last epoch
if len(val_set) == 0:
torch.save(model.state_dict(), model_path)
print('saving model at last epoch')
# + [markdown] id="1Hi7jTn3PX-m"
# ## Testing
# + [markdown] id="NfUECMFCn5VG"
# Create a testing dataset, and load model from the saved checkpoint.
# + id="1PKjtAScPWtr" colab={"base_uri": "https://localhost:8080/"} outputId="e38e5792-8f6e-4eb8-cc08-33ec983ec113"
# create testing dataset
test_set = TIMITDataset(test, None)
test_loader = DataLoader(test_set, batch_size=BATCH_SIZE, shuffle=False)
# create model and load weights from checkpoint
model = Classifier().to(device)
model.load_state_dict(torch.load(model_path))
# + [markdown] id="940TtCCdoYd0"
# Make prediction.
# + id="84HU5GGjPqR0"
predict = []
model.eval() # set the model to evaluation mode
with torch.no_grad():
for i, data in enumerate(test_loader):
inputs = data
inputs = inputs.to(device)
outputs = model(inputs)
_, test_pred = torch.max(outputs, 1) # get the index of the class with the highest probability
for y in test_pred.cpu().numpy():
predict.append(y)
# + colab={"base_uri": "https://localhost:8080/"} id="VWBfsperle23" outputId="66178b69-be26-4012-9792-94ab9447bb6b"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="AWDf_C-omElb"
# Write prediction to a CSV file.
#
# After finish running this block, download the file `prediction.csv` from the files section on the left-hand side and submit it to Kaggle.
# + id="GuljYSPHcZir"
with open('prediction.csv', 'w') as f:
f.write('Id,Class\n')
for i, y in enumerate(predict):
f.write('{},{}\n'.format(i, y))
| HW02_1.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++11
// language: C++11
// name: xcpp11
// ---
// + active=""
// process Templet DSL and update 'pythidentity.cpp' file
// -
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <string>
using namespace std;
{
bool op1 = (system("~/bin/xgen pythidentity.cpp pythidentity.gen.cpp > out1.txt")==0);
system("cp pythidentity.cpp pythidentity.bak");
bool op2 = op1 ? (system("~/bin/skel -i pythidentity.cpp -s pythidentity.gen.cpp > out2.txt")==0):false;
if(op2) cout << "preprocessing is Ok:\n\n"; else cout << "preprocessing failed:\n\n";
{ifstream file("out1.txt"); for (string line; getline(file, line); ){cout << line << endl;} }
if(op1){cout << endl; ifstream file("out2.txt"); for (string line; getline(file, line); ){cout << line << endl;} }
system("rm out1.txt"); system("rm out2.txt"); system("rm pingpong.gen.cpp");
}
// + active=""
// compile 'pythidentity.cpp' file
// -
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <string>
using namespace std;
{
system("g++ pythidentity.cpp -fpermissive -l curl -I ~/lib -o pythidentity 2> out.txt");
ifstream file("out.txt"); bool ok=true; for (string line; getline(file, line); ){cout << line << endl; ok = false;}
if(ok)cout << "Compilation is Ok";
system("rm out.txt");
}
// + active=""
// run pythidentity sample
// -
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <string>
using namespace std;
{
system("./pythidentity > out.txt");
ifstream file("out.txt"); bool ok=true; for (string line; getline(file, line); ){cout << line << endl; ok = false;}
system("rm out.txt");
}
| samples/xpythidentity/pythindentity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="OLZGrbizishR"
import tensorflow as tf
# + [markdown] id="Qbh7EOs2iz0O"
# #**Creating Tensorflow Dataset form Existing Tensors**
#
# If the data already exists in the form of a tensor object, a Python list, or a NumPy
# array, we can easily create a dataset using the **tf.data.Dataset.from_tensor_
# slices()**function. This function returns an object of class Dataset, which we
# can use to iterate through the individual elements in the input dataset.
# + [markdown] id="V3brgFMpvedS"
# Data တွေ က tensor type ဒါမှအဟုတ် list,NumPy array အနေနဲ့တည်ရှိနေပြီးသား ဆို tf.data.Dataset.from_slices() function ကိုသုံး ပြီး dataset တစ်ခု ကို ဖန်တီးနိုင်တယ်။
# Dataset ရယ့် object အနေနဲ့ return ပြန်ပေးတယ်။ဒီfunction မှာ optional argument အနေနဲ့ drop_reminder ဆိုတာပါတယ်။ ဒီကောင်က Tensor ထဲမာပါဝင်တယ့်element စုစုပေါင်းကို desired batch size နဲ့စားလို့ မပြတ်တယ့်အခါမျိုးမှာ အသုံးဝင်တယ်။
# + [markdown] id="kRFKIErLmk5I"
# ###"Create a dataset from list of value"
# + id="3lOkWbc8ji3A" outputId="33c8b691-1b91-40ff-8891-9ead2fc7e229" colab={"base_uri": "https://localhost:8080/", "height": 34}
a = [1.2, 3.4, 7.5, 4.1, 5.0, 1.0]
ds = tf.data.Dataset.from_tensor_slices(a)
print(ds)
# + [markdown] id="D00PvGgTmxR9"
# ### We can easily iterate through a dataset entry by entry as follows:
# + id="Z-1sZoeAmv0J" outputId="04097831-6cce-4aef-f479-7a72e11b62b4" colab={"base_uri": "https://localhost:8080/", "height": 119}
for item in ds :
print(item)
# + [markdown] id="xLor49Eyn4nO"
# ### If we want to create batches from this dataset, with a desired batch size of 3, we can do this as follows:
# + id="IEG4HUu_n-qj" outputId="6f55e296-f221-4646-f62b-74a519c13f42" colab={"base_uri": "https://localhost:8080/", "height": 51}
ds_batch = ds.batch(3)
for i, elem in enumerate(ds_batch, 1):
print('batch {}:'.format(i), elem.numpy())
# + [markdown] id="QjrtOfnJo408"
# # **Combining two tensors into a joint dataset**
# + [markdown] id="Qd4oLH2-pD9p"
# ### Often, we may have the data in two (or possibly more) tensors. For example, wecould have a tensor for features and a tensor for labels. In such cases, we need tobuild a dataset that combines these tensors together, which will allow us to retrievethe elements of these tensors in tuples.
# + [markdown] id="kP-1r0VAvobH"
# Tensors Data တွေက 2မျိုး ဒါမှမဟုတ် 2မျိုး ထက်ပိုရှိနိုင်တယ်။ ဥပမာ အားဖြင့် Data ရယ့် Input feature နဲ့ Labels.
#
#
# + id="rzjElOO9pzGr" outputId="d60bf3ec-976b-42d4-8040-24dfe7f7def6" colab={"base_uri": "https://localhost:8080/", "height": 119}
tf.random.set_seed(1)
t_x = tf.random.uniform([4, 3], dtype=tf.float32) # x hold feature value
t_y = tf.range(4) # y hold class lable
print(t_x)
print(t_y)
# + [markdown] id="RCWaJyQQrPvq"
# ### Now, we want to create a joint dataset from these two tensors. Note that there is a required one-to-one correspondence between the elements of these two tensors:
# + id="ZEBIftgrrZM5" outputId="0ec30fc8-b0a9-4f9f-98b0-addab8e311a4" colab={"base_uri": "https://localhost:8080/", "height": 85}
ds_x = tf.data.Dataset.from_tensor_slices(t_x)
ds_y = tf.data.Dataset.from_tensor_slices(t_y)
ds_joint = tf.data.Dataset.zip((ds_x, ds_y))
for example in ds_joint:
print(' x:', example[0].numpy(),
' y:', example[1].numpy())
# + [markdown] id="2OTQGsAKsBu2"
# ### Alternatively, we can create the joint dataset using **tf.data.Dataset.from_tensor_slices()** as follows:
# + id="bBXZzWS-sK8e" outputId="b885fc7e-89ba-4dfd-c187-2414db994ed2" colab={"base_uri": "https://localhost:8080/", "height": 85}
ds_joint = tf.data.Dataset.from_tensor_slices((t_x, t_y))
for example in ds_joint:
print(' x:', example[0].numpy(),
' y:', example[1].numpy())
# + [markdown] id="ptwFeinEtMbU"
# ### Feature Scaling
# + id="6tyCZ5YdtSPj" outputId="c2262e84-89c3-4dde-84bb-b50c7c734f05" colab={"base_uri": "https://localhost:8080/", "height": 85}
ds_trans = ds_joint.map(lambda x, y: (x*2-1.0, y))
for example in ds_trans:
print(' x:', example[0].numpy(),
' y:', example[1].numpy())
# + [markdown] id="juHvA3-rtxCK"
# # **Shuffle, batch, and repeat**
# + [markdown] id="XNtfwBJAziJa"
# shuffle လုပ်ခြင်း batch ခွဲ ခြင်း repeat လုပ်ခြင်းဖြင့် model train နေစဉ်မှာoverfit ဖြစ် ခြင်းမှကာကွယ်ပေးနိုင်ပါတယ်။
# shuffle ()method မာ ပါဝင်တယ့် buffer_sizeဆိုတာကတော့သူက အဓိက အားဖြင့် datasets ထဲမာ ရှိတယ့် elements အရေအတွက်ကိုsuffle မလုပ်မှီ grouped လိုက်သတ်မှတ်ပေးတာပါ။
# Model ကို epoch အများကြီးနဲ့ trainတယ့်အခါမှာ dataset ကိုshuffleလုပ်ခြင်း itertate လုပ်ခြင်း များလိုအပ်ပါတယ်။
# + [markdown] id="7HK0p_L1vCSs"
# ### Shuffle
#
# The .shuffle() method requires an argument called
# buffer_size, which determines how many elements in the dataset are grouped
# together before shuffling.To ensure complete randomization during each epoch, we can simply
# choose a buffer size that is equal to the number of the training examples, as in the
# preceding code (buffer_size=len(t_x)).
# + id="28ETEbq2t06J" outputId="9788ceab-3135-443e-d7da-21e5241baf9f" colab={"base_uri": "https://localhost:8080/", "height": 85}
tf.random.set_seed(1)
ds = ds_joint.shuffle(buffer_size=len(t_x))
for example in ds:
print(' x:', example[0].numpy(),
' y:', example[1].numpy())
# + [markdown] id="5iMDfstUvVWR"
# ### Batch
# + id="ATa--GqjvWa2" outputId="954ba91e-86b7-41ad-e152-7763f1fa0397" colab={"base_uri": "https://localhost:8080/", "height": 85}
ds = ds_joint.batch(batch_size=3, drop_remainder=False)
batch_x, batch_y = next(iter(ds))
print('Batch-x:\n', batch_x.numpy())
# + id="zx1jVHIQwKnF" outputId="7475cf3d-c652-4f18-a93f-e9e1977622f6" colab={"base_uri": "https://localhost:8080/", "height": 34}
print('Batch-y: ', batch_y.numpy())
# + [markdown] id="05t93HL9wM3C"
# ### Repeat
# When training a model for multiple epochs, we need to shuffle and
# iterate over the dataset by the desired number of epochs. So, let's repeat the batched
# dataset twice:
# + id="BXUM206VwR8B" outputId="af79ad76-2ecd-492b-f5b1-3603b2c43e55" colab={"base_uri": "https://localhost:8080/", "height": 187}
ds = ds_joint.batch(3).repeat(count=2)
for i,(batch_x, batch_y) in enumerate(ds):
print(i, batch_x, batch_y.numpy())
# + [markdown] id="R93oRs6uyyg1"
# If we change the order of these two
# operations, that is, first batch and then repeat, the results will be different:
# + id="uGcDfEayy0b7" outputId="c6745a44-3a03-4655-a1b0-446d35009df5" colab={"base_uri": "https://localhost:8080/", "height": 68}
ds = ds_joint.repeat(count=2).batch(3)
for i,(batch_x, batch_y) in enumerate(ds):
print(i, batch_x.shape, batch_y.numpy())
# + [markdown] id="OL7YGdRTzMXe"
# Combine the operations in the following order: (1) shuffle, (2) batch, and (3) repeat:
# + id="8wO_ic-izOcb" outputId="03f42660-8ef1-4801-d93d-ae258068def9" colab={"base_uri": "https://localhost:8080/", "height": 119}
tf.random.set_seed(1)
ds = ds_joint.shuffle(4).batch(2).repeat(3)
for i,(batch_x, batch_y) in enumerate(ds):
print(i, batch_x.shape, batch_y.numpy())
# + [markdown] id="fpjP0EuLzefS"
# let's try a different order: (2) batch, (1) shuffle, and (3) repeat
# + id="wPFcRcuTzgci" outputId="b24f18af-e500-4181-fe15-775688e3b3fd" colab={"base_uri": "https://localhost:8080/", "height": 119}
tf.random.set_seed(1)
ds = ds_joint.batch(2).shuffle(4).repeat(3)
for i,(batch_x, batch_y) in enumerate(ds):
print(i, batch_x.shape, batch_y.numpy())
# + [markdown] id="pWsB1haH2rPu"
# # **Creating a dataset from files on your localstorage disk**
# + [markdown] id="M7wK_O2R9fjP"
# We will use the pathlib
# library to generate a list of image files:
# + id="O7P7xhox2wXD" outputId="8d356910-2b33-4797-c37c-352351afaf70" colab={"base_uri": "https://localhost:8080/", "height": 54}
import pathlib
imgdir_path = pathlib.Path("/content/drive/My Drive/cat_dog_images" )
file_list = sorted([str(path) for path in imgdir_path.glob('*.jpg')])
print(file_list)
# + [markdown] id="iZ-DV7RW9iTM"
# Next, we will visualize these image examples using Matplotlib:
# + id="rnsmABHg9uGS" outputId="9aba700f-c1bb-4205-b88e-523294e40792" colab={"base_uri": "https://localhost:8080/", "height": 478}
import matplotlib.pyplot as plt
import os
fig = plt.figure(figsize=(10, 5))
for i, file in enumerate(file_list):
img_raw = tf.io.read_file(file)
img = tf.image.decode_image(img_raw)
print('Image shape: ', img.shape)
ax = fig.add_subplot(2, 3, i+1)
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(img)
ax.set_title(os.path.basename(file), size=15)
plt.tight_layout()
plt.show()
# + [markdown] id="sE8uNtFC-1IB"
# The
# labels for these images are provided within their filenames. So, we extract these
# labels from the list of filenames, assigning label 1 to dogs and label 0 to cats:
# + id="_g30fqwq-5mW" outputId="a7bcaec7-fc8d-4f3a-b522-e6fc51ce3751" colab={"base_uri": "https://localhost:8080/", "height": 34}
labels = [1 if 'Dog' in os.path.basename(file) else 0 for file in file_list]
print(labels)
# + [markdown] id="iN-WEOHB_Ue9"
# Now, we have two lists: a list of filenames (or paths of each image) and a list of their
# labels. In the previous section, you already learned two ways of creating a joint
# dataset from two tensors. Here, we will use the second approach as follows:
# + id="1Z4HrMg1_Wyt" outputId="8d16d51b-4ce6-4af2-bb97-32058de31358" colab={"base_uri": "https://localhost:8080/", "height": 119}
ds_files_labels = tf.data.Dataset.from_tensor_slices((file_list, labels))
for item in ds_files_labels:
print(item[0].numpy(), item[1].numpy())
# + [markdown] id="U0bodcCD_9nl"
# We have called this dataset ds_files_labels, since it has filenames and labels.
# Next, we need to apply transformations to this dataset: load the image content from
# its file path, decode the raw content, and resize it to a desired size, for example,
# 80 × 120. Previously, we saw how to apply a lambda function using the .map()
# method. However, since we need to apply multiple preprocessing steps this time,
# we are going to write a helper function instead and use it when calling the .map()
# method:
#
# + id="9IZ34536AAPG"
def load_and_preprocess(path, label):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [img_height, img_width])
image /= 255.0
return image, label
# + id="CjlHHYDFAUF3"
img_width, img_height = 120, 80
ds_images_labels = ds_files_labels.map(load_and_preprocess)
# + id="EJkv_AmCAVJm" outputId="95609e2a-a4ff-4c92-e180-f522d7ab03c7" colab={"base_uri": "https://localhost:8080/", "height": 402}
fig = plt.figure(figsize=(10, 6))
for i,example in enumerate(ds_images_labels):
ax = fig.add_subplot(2, 3, i+1)
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(example[0])
ax.set_title('{}'.format(example[1].numpy()),
size=15)
plt.tight_layout()
plt.show()
# + [markdown] id="ANnj_kllBhnw"
# # **Fetching available datasets from the tensorflow_datasets library**
# + id="Adad9mD_BllD" outputId="c902cc0a-eb06-4488-aa33-40e08219c13e" colab={"base_uri": "https://localhost:8080/", "height": 357}
pip install tensorflow-datasets
# + [markdown] id="9iYv4twrBw3p"
# let's import this module and take a look at the list of available datasets:
# + id="o-1pc3PTB1ad" outputId="9f60544d-0120-4db7-8360-08ccf30fcec2" colab={"base_uri": "https://localhost:8080/", "height": 51}
import tensorflow_datasets as tfds
print(len(tfds.list_builders()))
print(tfds.list_builders()[:5])
| coding-exercise/week2/part1/data-processing-with-tensorflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# language: python
# name: python3
# ---
import evolvepy as ep
from evolvepy.generator.selection import roulette, tournament, rank
import numpy as np
import matplotlib.pyplot as plt
fitness = np.arange(-100, 100, 1, dtype=np.float32)
# +
hist = []
for i in range(100):
selected = roulette(fitness, 2)
hist.append(selected[0])
hist.append(selected[1])
plt.hist(hist)
plt.show()
# -
| devel/selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + hide_input=true tags=["active-ipynb", "remove-input", "remove-output"]
# try:
# from openmdao.utils.notebook_utils import notebook_mode
# except ImportError:
# !python -m pip install openmdao[notebooks]
# -
# # Optimizing the Thickness Distribution of a Cantilever Beam Using the Adjoint Method
#
# In this example, we optimize the thickness (height) distribution of a cantilever beam
# using the adjoint method to compute the gradient. We use Euler--Bernoulli beam theory and assume a rectangular section.
#
# ## Background
#
# The optimization problem is:
#
# $$
# \begin{array}{r c l}
# \text{minimize} & & f^T d \\
# \text{with respect to} & & h \\
# \text{subject to} & & \text{sum}(h) b L_0 = \text{volume} \\
# \end{array}
# $$
#
# where $f$ is the vector of forces, $h$ is the vector of beam heights, and $L_0$ is the length of a single beam element.
#
# The displacements vector $d$ is given by
#
# $$
# K d = f
# $$
#
# where $K$ is the stiffness matrix. However, in practice, we augment the linear system with Lagrange multipliers to apply the boundary constraints at the first node.
#
# Since our model contains a system of equations, we use the adjoint method to compute the gradient of the objective with respect to the beam height vector. The model is shown below.
# + tags=["remove-input"]
import numpy as np
import openmdao.api as om
from openmdao.test_suite.test_examples.beam_optimization.beam_group import BeamGroup
if __name__ == '__main__':
E = 1.
L = 1.
b = 0.1
volume = 0.01
num_elements = 50
prob = om.Problem(model=BeamGroup(E=E, L=L, b=b, volume=volume, num_elements=num_elements),
driver=om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=True))
prob.setup()
prob.run_driver()
om.n2(prob)
# + tags=["remove-input", "remove-output"]
assert np.linalg.norm(prob['h'] - [
0.14915754, 0.14764328, 0.14611321, 0.14456715, 0.14300421, 0.14142417,
0.13982611, 0.13820976, 0.13657406, 0.13491866, 0.13324268, 0.13154528,
0.12982575, 0.12808305, 0.12631658, 0.12452477, 0.12270701, 0.12086183,
0.11898809, 0.11708424, 0.11514904, 0.11318072, 0.11117762, 0.10913764,
0.10705891, 0.10493903, 0.10277539, 0.10056526, 0.09830546, 0.09599246,
0.09362243, 0.09119084, 0.08869265, 0.08612198, 0.08347229, 0.08073573,
0.07790323, 0.07496382, 0.07190453, 0.06870925, 0.0653583, 0.06182632,
0.05808044, 0.05407658, 0.04975295, 0.0450185, 0.03972912, 0.03363155,
0.02620192, 0.01610863
]) < 1e-4
# -
# ## Implementation: list of components
#
# There are 5 components that compute:
#
# 1. Moment of inertia for each element
# 2. Local stiffness matrix for each element
# 3. Displacements from solution of the $Kd=f$ linear system augmented with the Lagrange multipliers
# 4. Compliance
# 5. Volume
class MomentOfInertiaComp(om.ExplicitComponent):
def initialize(self):
self.options.declare('num_elements', types=int)
self.options.declare('b')
def setup(self):
num_elements = self.options['num_elements']
self.add_input('h', shape=num_elements)
self.add_output('I', shape=num_elements)
def setup_partials(self):
rows = cols = np.arange(self.options['num_elements'])
self.declare_partials('I', 'h', rows=rows, cols=cols)
def compute(self, inputs, outputs):
outputs['I'] = 1./12. * self.options['b'] * inputs['h'] ** 3
def compute_partials(self, inputs, partials):
partials['I', 'h'] = 1./4. * self.options['b'] * inputs['h'] ** 2
class LocalStiffnessMatrixComp(om.ExplicitComponent):
def initialize(self):
self.options.declare('num_elements', types=int)
self.options.declare('E')
self.options.declare('L')
def setup(self):
num_elements = self.options['num_elements']
E = self.options['E']
L = self.options['L']
self.add_input('I', shape=num_elements)
self.add_output('K_local', shape=(num_elements, 4, 4))
L0 = L / num_elements
coeffs = np.empty((4, 4))
coeffs[0, :] = [12, 6 * L0, -12, 6 * L0]
coeffs[1, :] = [6 * L0, 4 * L0 ** 2, -6 * L0, 2 * L0 ** 2]
coeffs[2, :] = [-12, -6 * L0, 12, -6 * L0]
coeffs[3, :] = [6 * L0, 2 * L0 ** 2, -6 * L0, 4 * L0 ** 2]
coeffs *= E / L0 ** 3
self.mtx = mtx = np.zeros((num_elements, 4, 4, num_elements))
for ind in range(num_elements):
self.mtx[ind, :, :, ind] = coeffs
self.declare_partials('K_local', 'I',
val=self.mtx.reshape(16 * num_elements, num_elements))
def compute(self, inputs, outputs):
outputs['K_local'] = 0
for ind in range(self.options['num_elements']):
outputs['K_local'][ind, :, :] = self.mtx[ind, :, :, ind] * inputs['I'][ind]
# +
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import splu
class StatesComp(om.ImplicitComponent):
def initialize(self):
self.options.declare('num_elements', types=int)
self.options.declare('force_vector', types=np.ndarray)
def setup(self):
num_elements = self.options['num_elements']
num_nodes = num_elements + 1
size = 2 * num_nodes + 2
self.add_input('K_local', shape=(num_elements, 4, 4))
self.add_output('d', shape=size)
cols = np.arange(16*num_elements)
rows = np.repeat(np.arange(4), 4)
rows = np.tile(rows, num_elements) + np.repeat(np.arange(num_elements), 16) * 2
self.declare_partials('d', 'K_local', rows=rows, cols=cols)
self.declare_partials('d', 'd')
def apply_nonlinear(self, inputs, outputs, residuals):
force_vector = np.concatenate([self.options['force_vector'], np.zeros(2)])
self.K = self.assemble_CSC_K(inputs)
residuals['d'] = self.K.dot(outputs['d']) - force_vector
def solve_nonlinear(self, inputs, outputs):
force_vector = np.concatenate([self.options['force_vector'], np.zeros(2)])
self.K = self.assemble_CSC_K(inputs)
self.lu = splu(self.K)
outputs['d'] = self.lu.solve(force_vector)
def linearize(self, inputs, outputs, jacobian):
num_elements = self.options['num_elements']
self.K = self.assemble_CSC_K(inputs)
self.lu = splu(self.K)
i_elem = np.tile(np.arange(4), 4)
i_d = np.tile(i_elem, num_elements) + np.repeat(np.arange(num_elements), 16) * 2
jacobian['d', 'K_local'] = outputs['d'][i_d]
jacobian['d', 'd'] = self.K.toarray()
def solve_linear(self, d_outputs, d_residuals, mode):
if mode == 'fwd':
d_outputs['d'] = self.lu.solve(d_residuals['d'])
else:
d_residuals['d'] = self.lu.solve(d_outputs['d'])
def assemble_CSC_K(self, inputs):
"""
Assemble the stiffness matrix in sparse CSC format.
Returns
-------
ndarray
Stiffness matrix as dense ndarray.
"""
num_elements = self.options['num_elements']
num_nodes = num_elements + 1
num_entry = num_elements * 12 + 4
ndim = num_entry + 4
data = np.zeros((ndim, ), dtype=inputs._get_data().dtype)
cols = np.empty((ndim, ))
rows = np.empty((ndim, ))
# First element.
data[:16] = inputs['K_local'][0, :, :].flat
cols[:16] = np.tile(np.arange(4), 4)
rows[:16] = np.repeat(np.arange(4), 4)
j = 16
for ind in range(1, num_elements):
ind1 = 2 * ind
K = inputs['K_local'][ind, :, :]
# NW quadrant gets summed with previous connected element.
data[j-6:j-4] += K[0, :2]
data[j-2:j] += K[1, :2]
# NE quadrant
data[j:j+4] = K[:2, 2:].flat
rows[j:j+4] = np.array([ind1, ind1, ind1 + 1, ind1 + 1])
cols[j:j+4] = np.array([ind1 + 2, ind1 + 3, ind1 + 2, ind1 + 3])
# SE and SW quadrants together
data[j+4:j+12] = K[2:, :].flat
rows[j+4:j+12] = np.repeat(np.arange(ind1 + 2, ind1 + 4), 4)
cols[j+4:j+12] = np.tile(np.arange(ind1, ind1 + 4), 2)
j += 12
data[-4:] = 1.0
rows[-4] = 2 * num_nodes
rows[-3] = 2 * num_nodes + 1
rows[-2] = 0.0
rows[-1] = 1.0
cols[-4] = 0.0
cols[-3] = 1.0
cols[-2] = 2 * num_nodes
cols[-1] = 2 * num_nodes + 1
n_K = 2 * num_nodes + 2
return coo_matrix((data, (rows, cols)), shape=(n_K, n_K)).tocsc()
# -
class ComplianceComp(om.ExplicitComponent):
def initialize(self):
self.options.declare('num_elements', types=int)
self.options.declare('force_vector', types=np.ndarray)
def setup(self):
num_nodes = self.options['num_elements'] + 1
self.add_input('displacements', shape=2 * num_nodes)
self.add_output('compliance')
def setup_partials(self):
num_nodes = self.options['num_elements'] + 1
force_vector = self.options['force_vector']
self.declare_partials('compliance', 'displacements',
val=force_vector.reshape((1, 2 * num_nodes)))
def compute(self, inputs, outputs):
outputs['compliance'] = np.dot(self.options['force_vector'], inputs['displacements'])
class VolumeComp(om.ExplicitComponent):
def initialize(self):
self.options.declare('num_elements', types=int)
self.options.declare('b', default=1.)
self.options.declare('L')
def setup(self):
num_elements = self.options['num_elements']
b = self.options['b']
L = self.options['L']
L0 = L / num_elements
self.add_input('h', shape=num_elements)
self.add_output('volume')
self.declare_partials('volume', 'h', val=b * L0)
def compute(self, inputs, outputs):
L0 = self.options['L'] / self.options['num_elements']
outputs['volume'] = np.sum(inputs['h'] * self.options['b'] * L0)
# ## Implementation: Optimization Script
#
# Here is the optimization script:
# +
from openmdao.test_suite.test_examples.beam_optimization.beam_group import BeamGroup
E = 1.
L = 1.
b = 0.1
volume = 0.01
num_elements = 50
prob = om.Problem(model=BeamGroup(E=E, L=L, b=b, volume=volume, num_elements=num_elements))
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['tol'] = 1e-9
prob.driver.options['disp'] = True
prob.setup()
prob.run_driver()
print(prob['h'])
# -
# 
# + tags=["remove-input", "remove-output"]
from openmdao.utils.assert_utils import assert_near_equal
assert_near_equal(prob['h'],
[0.14915754, 0.14764328, 0.14611321, 0.14456715, 0.14300421, 0.14142417,
0.13982611, 0.13820976, 0.13657406, 0.13491866, 0.13324268, 0.13154528,
0.12982575, 0.12808305, 0.12631658, 0.12452477, 0.12270701, 0.12086183,
0.11898809, 0.11708424, 0.11514904, 0.11318072, 0.11117762, 0.10913764,
0.10705891, 0.10493903, 0.10277539, 0.10056526, 0.09830546, 0.09599246,
0.09362243, 0.09119084, 0.08869265, 0.08612198, 0.08347229, 0.08073573,
0.07790323, 0.07496382, 0.07190453, 0.06870925, 0.0653583, 0.06182632,
0.05808044, 0.05407658, 0.04975295, 0.0450185, 0.03972912, 0.03363155,
0.02620192, 0.01610863], 1e-4)
| openmdao/docs/openmdao_book/examples/beam_optimization_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # subplot2grid demo
#
#
# This example demonstrates the use of `plt.subplot2grid` to generate subplots.
# Using `GridSpec`, as demonstrated in :doc:`/gallery/userdemo/demo_gridspec03`
# is generally preferred.
#
# +
import matplotlib.pyplot as plt
def annotate_axes(fig):
for i, ax in enumerate(fig.axes):
ax.text(0.5, 0.5, "ax%d" % (i+1), va="center", ha="center")
ax.tick_params(labelbottom=False, labelleft=False)
fig = plt.figure()
ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=3)
ax2 = plt.subplot2grid((3, 3), (1, 0), colspan=2)
ax3 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)
ax4 = plt.subplot2grid((3, 3), (2, 0))
ax5 = plt.subplot2grid((3, 3), (2, 1))
annotate_axes(fig)
plt.show()
| testing/examples/demo_gridspec01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: VPython
# language: python
# name: vpython
# ---
# +
from vpython import *
# GlowScript version of Jupyter demo program Color-RGB-HSV
scene.userzoom = False
scene.userspin = False
scene.width = 460
scene.height = 200
scene.range = 1
scene.background = color.red
box(pos=vector(10,0,0)) # Force creation of canvas; box is not seen because it is outside the canvas
cancopy = 'You can Ctrl-C or Command-C copy these RGB and HSV values:\n'
scene.title = cancopy
scene.append_to_title("RGB = <")
titlergb = wtext(pos=scene.title_anchor, text="1.000, 0.000, 0.000")
scene.append_to_title(">, HSV = <")
titlehsv = wtext(pos=scene.title_anchor, text="0.000, 0.000, 0.000")
scene.append_to_title(">")
C = ['Red', 'Green', 'Blue', 'Hue', 'Saturation', 'Value']
sliders = []
wts = []
def set_background(sl):
if sl.id < 3:
wts[sl.id].text = '{:1.3f}'.format(sl.value)
rgb = vector(sliders[0].value, sliders[1].value, sliders[2].value)
hsv = color.rgb_to_hsv(rgb)
sliders[3].value = int(1000*hsv.x)/1000 # reset HSV slider positions; display 3 figures
sliders[4].value = int(1000*hsv.y)/1000
sliders[5].value = int(1000*hsv.z)/1000
wts[3].text = '{:1.3f}'.format(hsv.x)
wts[4].text = '{:1.3f}'.format(hsv.y)
wts[5].text = '{:1.3f}'.format(hsv.z)
else:
wts[sl.id].text = '{:1.3f}'.format(sl.value)
hsv = vector(sliders[3].value, sliders[4].value, sliders[5].value)
rgb = color.hsv_to_rgb(hsv)
sliders[0].value = int(1000*rgb.x)/1000 # reset RGB slider positions; display 3 figures
sliders[1].value = int(1000*rgb.y)/1000
sliders[2].value = int(1000*rgb.z)/1000
wts[0].text = '{:1.3f}'.format(rgb.x)
wts[1].text = '{:1.3f}'.format(rgb.y)
wts[2].text = '{:1.3f}'.format(rgb.z)
scene.background = rgb
# For readability, limit precision of display of quantities to 3 figures
titlergb.text = "{:1.3f}, {:1.3f}, {:1.3f}".format(rgb.x, rgb.y, rgb.z)
titlehsv.text = "{:1.3f}, {:1.3f}, {:1.3f}".format(hsv.x, hsv.y, hsv.z)
scene.caption = '\n'
for i in range(6): # Create the 3 RGB and 3 HSV sliders
sliders.append(slider(length=300, left=10, min=0, max=1, bind=set_background, id=i))
scene.append_to_caption(' '+C[i]+' ') # Display slider name
wts.append(wtext(text='0.000'))
scene.append_to_caption('\n\n')
if i == 2: scene.append_to_caption("\n\n") # Separate the RGB and HSV sliders
sliders[0].value = 1 # make the background red
sliders[4].value = sliders[5].value = 1
wts[0].text = '1.000'
wts[4].text = wts[5].text = '1.000'
# -
| Demos/Color-RGB-HSV2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LegacyHalos SersicFitting
#
# This notebook demonstrates how we fit the 1D surface brightness profiles using various parametric (e.g., Sersic) models, using one galaxy as a toy example.
# ### Imports, paths, and other preliminaries.
import os, pdb
import numpy as np
import matplotlib.pyplot as plt
from legacyhalos import io
from legacyhalos.ellipse import ellipse_sbprofile
from legacyhalos.qa import display_ellipse_sbprofile, display_sersic
from legacyhalos.sersic import SersicSingleWaveModel, SersicSingleWaveFit
import seaborn as sns
sns.set(style='ticks', font_scale=1.4, palette='Set2')
# %matplotlib inline
# ### Test on real data
#
# Read a surface brightness profile and fit it.
sample = io.read_sample(first=0, last=0)
objid, objdir = io.get_objid(sample)
ellipsefit = io.read_ellipsefit(objid, objdir)
sbprofile = ellipse_sbprofile(ellipsefit)
display_ellipse_sbprofile(ellipsefit)
sersic = SersicSingleWaveFit(ellipsefit, seed=1).fit(verbose=True)
display_sersic(sersic)
sersic['phot']
stop
# #### Simulate a data set and plot it
#
# Create a "galaxy" with a g-, r-, and z-band surface brightness profile measured over different radii. Choose the Sersic index to vary with wavelength as $\beta=0.1$ with $n_{ref}=2.8$, and the half-light radius to vary as $\alpha=-0.2$ with $r_{ref}=8$ kpc.
def simulate_sbprofiles(errfactor=0.2, seed=None, psfsigma=2.0,
verbose=False, uniform_radius=True):
"""Simple script to simulate grz surface brightness profiles."""
# instantiate a nominal model
model = SersicSingleWaveModel(beta=0.1, alpha=-0.2, r50ref=8, nref=2.8,
mu50_g=0.5, mu50_r=1.0, mu50_z=1.5,
psfsigma_g=psfsigma, psfsigma_r=psfsigma,
psfsigma_z=psfsigma)
if verbose:
print(model)
rand = np.random.RandomState(seed)
minradius = 0.02
maxradius = 15.0
nrad = (95, 89, 92) # number of measurements per bandpass g, r, z
# build the wavelength and radius vector
radius = []
wave = []
for lam, nn in zip( (model.lambda_g, model.lambda_r, model.lambda_z), nrad ):
# uniform sampling makes the PSF convolution smoother
if uniform_radius:
rad = np.linspace(minradius, maxradius, nn)
else:
rad = np.sort(rand.uniform(minradius, maxradius, nn))
radius.append(rad)
wave.append(np.repeat(lam, nn))
radius = np.hstack(radius)
wave = np.hstack(wave)
# evaluate the model and add variance
sb = model(radius, wave)
sberr = rand.normal(loc=0, scale=sb*errfactor)
#sberr = np.zeros_like(sb)
#for ii in range(len(sb)):
# sberr[ii] = rand.normal(loc=0, scale=sb[ii]*errfactor)
sb += sberr
sersic = dict({'success': False, 'radius': radius,
'wave': wave, 'sb': sb, 'sberr': sberr,
'band': model.band, 'pixscale': 1.0,
'lambda_g': model.lambda_g, 'lambda_r': model.lambda_r,
'lambda_z': model.lambda_z})
return sersic
for sig in (0, 1, 2):
sersic = simulate_sbprofiles(seed=1, errfactor=0.1, psfsigma=sig, uniform_radius=False)
display_sersic(sersic)
| doc/sandbox/legacyhalos-sersic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch as th
import torchvision as tv
import pro_gan_pytorch.PRO_GAN as pg
# select the device to be used for training
device = th.device("cuda" if th.cuda.is_available() else "cpu")
data_path = "cifar-10/"
def setup_data(download=False):
"""
setup the CIFAR-10 dataset for training the CNN
:param batch_size: batch_size for sgd
:param num_workers: num_readers for data reading
:param download: Boolean for whether to download the data
:return: classes, trainloader, testloader => training and testing data loaders
"""
# data setup:
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
transforms = tv.transforms.ToTensor()
trainset = tv.datasets.CIFAR10(root=data_path,
transform=transforms,
download=download)
testset = tv.datasets.CIFAR10(root=data_path,
transform=transforms, train=False,
download=False)
return classes, trainset, testset
if __name__ == '__main__':
# some parameters:
depth = 6
# hyper-parameters per depth (resolution)
num_epochs = [10, 20, 20, 20]
fade_ins = [50, 50, 50, 50]
batch_sizes = [128, 128, 128, 128]
latent_size = 128
# get the data. Ignore the test data and their classes
_, dataset, _ = setup_data(download=True)
# ======================================================================
# This line creates the PRO-GAN
# ======================================================================
pro_gan = pg.ConditionalProGAN(num_classes=10, depth=depth,
latent_size=latent_size, device=device)
# ======================================================================
# ======================================================================
# This line trains the PRO-GAN
# ======================================================================
pro_gan.train(
dataset=dataset,
epochs=num_epochs,
fade_in_percentage=fade_ins,
batch_sizes=batch_sizes
)
# ======================================================================
# -
_, dataset, _ = setup_data(download=True)
from torchvision import transforms
import torchvision
TRANSFORM_IMG = transforms.Compose([
transforms.Resize(128),
#transforms.CenterCrop(256),
transforms.ToTensor(),
#transforms.ToPILImage(mode='RGB'),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225] )
])
TRAIN_DATA_PATH = '/home/jovyan/github/models/imagesProcessed/'
train_data = torchvision.datasets.ImageFolder(root=TRAIN_DATA_PATH, transform=TRANSFORM_IMG)
train_data.classes
trainset.classes
# +
import torch as th
import torchvision as tv
import pro_gan_pytorch.PRO_GAN as pg
from torchvision import transforms
import torchvision
TRAIN_DATA_PATH = '/home/jovyan/github/models/BAM-DCGAN/data/dataset_updated/training_set/'
# select the device to be used for training
device = th.device("cuda" if th.cuda.is_available() else "cpu")
def setup_data(download=False):
"""
setup the CIFAR-10 dataset for training the CNN
:param batch_size: batch_size for sgd
:param num_workers: num_readers for data reading
:param download: Boolean for whether to download the data
:return: classes, trainloader, testloader => training and testing data loaders
"""
# data setup:
TRANSFORM_IMG = transforms.Compose([
transforms.Resize((32,32)),
#transforms.CenterCrop(256),
transforms.ToTensor(),
#transforms.ToPILImage(mode='RGB'),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = torchvision.datasets.ImageFolder(root=TRAIN_DATA_PATH, transform=TRANSFORM_IMG)
testset = torchvision.datasets.ImageFolder(root=TRAIN_DATA_PATH, transform=TRANSFORM_IMG)
classes = trainset.classes
return classes, trainset, testset
if __name__ == '__main__':
# some parameters:
depth = 4
# hyper-parameters per depth (resolution)
num_epochs = [10, 20, 20, 20]
fade_ins = [50, 50, 50, 50]
batch_sizes = [32, 32, 32, 32]
latent_size = 128
# get the data. Ignore the test data and their classes
_, dataset, _ = setup_data(download=True)
# ======================================================================
# This line creates the PRO-GAN
# ======================================================================
pro_gan = pg.ConditionalProGAN(num_classes=len(dataset.classes), depth=depth,
latent_size=latent_size, device=device)
# ======================================================================
# ======================================================================
# This line trains the PRO-GAN
# ======================================================================
pro_gan.train(
dataset=dataset,
epochs=num_epochs,
fade_in_percentage=fade_ins,
batch_sizes=batch_sizes,
feedback_factor=1
)
# ======================================================================
# -
| notebooks/PRO-GAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ( Ignore this first code section. It's just basic setup. )
# +
import pandas
import numpy
import itertools
from collections import OrderedDict
from IPython.display import Image
def color_nan(x):
if x is numpy.nan:
return "background-color: #e1b2be; border-color: white"
else:
return "background-color: #eff5f5; border-color: white"
DEBUG=False
def debug(*args):
if DEBUG:
display_nans(*args)
def display_nans(*args):
for thing in args:
if isinstance(thing, pandas.DataFrame):
display(thing.sort_index(axis=1).style.hide_index().applymap(color_nan))
else:
print(thing)
def yellow_cols(s, columns):
if s.name in columns:
return s.apply(
lambda x: "background-color: #ffff99; border-color: white" if x is not numpy.nan else "background-color: #F0D9AC; border-color: white"
)
else:
return [""]*len(s)
def color_suffixes(df, suffixes):
cols_to_color = []
for s in suffixes:
for col in df.columns:
if col.endswith(s):
cols_to_color.append(col)
display(df.style.hide_index().applymap(color_nan).apply(yellow_cols, columns=cols_to_color, axis=0))
def current_merge(left, right):
return left.merge(right, how="outer")
def sequential_outer_merge(*args):
ret = None
for df in args:
if ret is None:
ret = df
else:
ret = current_merge(ret, df)
return ret
def show_sequential_outer_merge_permutations(*args):
for p in itertools.permutations(args):
display_nans(
sequential_outer_merge(*p)
)
def run_test(*args):
function = args[-1]
print("-"*50)
print("inputs")
display_nans(*args[:-1])
print("output")
display_nans(function(*args[:-1]))
# -
# ---
# # The Dangers Of Naively Preserving Source Data's Tabular Nature, From Zero Knowledge Onward
# ---
# * ## Merge Bubbles From Partial Batches:
# +
df1 = pandas.DataFrame({
'PARTICIPANT_ID':['pt_1','pt_2','pt_3','pt_4'],
'SPECIMEN_ID':['bs_1','bs_2','bs_3','bs_4']
})
df2 = pandas.DataFrame({
'SPECIMEN_ID':['bs_1','bs_2'],
'GENOMIC_FILE_PATH':['f_1','f_2'],
})
df3 = pandas.DataFrame({
'SPECIMEN_ID':['bs_3','bs_4'],
'GENOMIC_FILE_PATH':['f_3','f_4']
})
Image("img/Partial_Batches.png")
# -
# #### When merging these DFs, we can intuitively say that we really want to get this:
#
# ```
# GENOMIC_FILE_PATH PARTICIPANT_ID SPECIMEN_ID
# 0 f_1 pt_1 se_1
# 1 f_2 pt_2 se_2
# 2 f_3 pt_3 se_3
# 3 f_4 pt_4 se_4
# ```
#
# #### but most merge orders don't get us there...
#
#
#
show_sequential_outer_merge_permutations(df1, df2, df3)
# #### Only the results that minimize the final row count are good. (Avi is 50% sure that this is the right metric at this stage)
# +
def minimum_length_batch_merge(*args):
shortest = None
for p in itertools.permutations(args):
try:
df = sequential_outer_merge(*p)
if shortest is None:
shortest = df
elif len(df) < len(shortest):
shortest = df
except Exception as e:
print(e)
pass
return shortest
def show_minimum_length_batch_merge(*args):
display_nans(minimum_length_batch_merge(*args))
run_test(df1, df2, df3, minimum_length_batch_merge)
# -
# #### Good enough for now, but...
# * ## Merge Bubbles From Partial Overlap:
# +
df4 = pandas.DataFrame({
'SPECIMEN_ID':['bs_1','bs_2'],
'GENOMIC_FILE_PATH':['f_1','f_2']
})
df5 = pandas.DataFrame({
'SEQUENCING_ID':['se_1','se_2','se_3','se_4'],
'GENOMIC_FILE_PATH':['f_1','f_2','f_3','f_4']
})
df6 = pandas.DataFrame({
'SPECIMEN_ID':['bs_3','bs_4'],
'SEQUENCING_ID':['se_3','se_4']
})
Image("img/Partial_Overlap.png")
# -
# #### When merging these DFs, we can intuitively say that we really want to get
#
# ```
# SPECIMEN_ID GENOMIC_FILE_PATH SEQUENCING_ID
# 0 bs_1 f_1 se_1
# 1 bs_2 f_2 se_2
# 2 bs_3 f_3 se_3
# 3 bs_4 f_4 se_4
# ```
# #### That would express the nature of the identification relationship connecting Specimen, Genomic File, and Sequencing Experiment.
#
#
# #### But these relationships are lost to bubbles regardless of merge order.
#
show_sequential_outer_merge_permutations(df4, df5, df6)
# #### <span style="color:red">That's bad.</span> The main problem is that Pandas doesn't remember from one merge to the next which nans are real data and which ones are just merge artifacts.
#
# ### So next we try to make a merge that rectifies null/non-null overlaps.
# #### 1. First just mark the null/non-null overlaps while merging
# +
def marking_merge(left, right):
sl = "_left_"
sr = "_right_"
overlap = set(left.columns).intersection(set(right.columns))
if not overlap:
raise Exception("No overlapping columns.")
# I think we can only merge data for rows where the join columns don't all contain nans
right = right.dropna(subset=overlap)
# Find the outer join columns that produce the fewest resulting rows.
# Don't fret too much about the performance cost of looping through many merges.
# It's still plenty fast enough.
df = None
for n in range(len(overlap), 0, -1):
for o in itertools.combinations(overlap, n):
temp_df = pandas.merge(
# Setting the on and suffixes arguments is what lets us do our magic.
# Instead of creating new rows on null/non-null value overlaps,
# pandas will create parallel left/right columns instead.
left, right, how="outer", on=o, suffixes=(sl, sr)
)
if df is None or (len(temp_df) < len(df)):
df = temp_df
return df
current_merge = marking_merge
for p in itertools.permutations([df4, df5, df6]):
color_suffixes(sequential_outer_merge(*p), suffixes=["_left_", "_right_"])
# -
# #### 2. Then recombine the two marked sides of an overlap
# +
def recombine(df):
sl = "_left_"
sr = "_right_"
# Recombine any left/right split columns by filling nans on
# the left with values from the right.
while True:
to_del = []
for coll in df.columns:
if coll.endswith(sl):
firstpart = coll.split(sl)[0]
colr = firstpart + sr
df[firstpart] = df[coll].fillna(df[colr])
to_del.append(coll)
to_del.append(colr)
if not to_del:
break
else:
for c in to_del:
del df[c]
return df
def recombining_merge(left, right):
return recombine(marking_merge(left, right))
current_merge = recombining_merge
show_sequential_outer_merge_permutations(df4, df5, df6)
# -
# #### And if we again use only the most compact results...
run_test(df1, df2, df3, minimum_length_batch_merge)
run_test(df4, df5, df6, minimum_length_batch_merge)
# #### Good enough for that test, but...
# * ## Value Mispropagation From Unencoded Attribute Relationships <br>(fixable _only_ by knowing relationships)
# +
df7 = pandas.DataFrame({
'SPECIMEN_ID':['bs_1','bs_2', 'bs_3','bs_4'],
'PARTICIPANT_ID':['pt_1','pt_2','pt_1','pt_2']
})
df8 = pandas.DataFrame({
'SPECIMEN_ID':['bs_3','bs_4'],
'PARTICIPANT_ID':['pt_1','pt_2'],
'X': ['a', 'b']
})
run_test(df7, df8, current_merge)
# -
# #### If X is a property of <span style="color:blue">specimens</span>, then [<span style="background-color:#e1b2be">nan, nan</span>, a, b] is the correct final X column.
#
# #### If X is a property of <span style="color:blue">participants</span>, then [a, b, a, b] is the correct final X column.
# #### <span style="color:red">Getting the same behavior in both cases is bad.</span> We need a new kind of merge that accounts for the relationships between distinct concepts, their identifiers, and their attributes.
# #### 1. First we differentiate our identifiers from their attributes and define the relationships
# +
ATTRIBUTE_SETS = {
'participant': {'PARTICIPANT_GENDER', 'PARTICIPANT_ID', 'PARTICIPANT_FATHER_ID', 'PARTICIPANT_FAMILY', 'PARTICIPANT_RACE', 'PARTICIPANT_CONSENT_TYPE', 'PARTICIPANT_IS_PROBAND', 'PARTICIPANT_MOTHER_ID', 'PARTICIPANT_ETHNICITY'},
'specimen': {'SPECIMEN_TUMOR_DESCRIPTOR', 'SPECIMEN_SHIPMENT_DATE', 'SPECIMEN_ID', 'SPECIMEN_CONCENTRATION_MG_PER_ML', 'SPECIMEN_ANATOMY_SITE', 'SPECIMEN_ALIQUOT_ID', 'SPECIMEN_EVENT_AGE', 'SPECIMEN_TISSUE_TYPE', 'SPECIMEN_ANALYTE', 'SPECIMEN_UBERON_ANATOMY_SITE_ID', 'SPECIMEN_NCIT_ANATOMY_SITE_ID', 'SPECIMEN_KF_ID', 'SPECIMEN_SHIPMENT_ORIGIN', 'SPECIMEN_COMPOSITION', 'SPECIMEN_SPATIAL_DESCRIPTOR', 'SPECIMEN_VOLUME_ML', 'SPECIMEN_NCIT_TISSUE_TYPE_ID'},
'genomic_file': {'GENOMIC_FILE_ETAG', 'GENOMIC_FILE_ID', 'GENOMIC_FILE_NAME', 'GENOMIC_FILE_PATH', 'GENOMIC_FILE_KF_ID', 'GENOMIC_FILE_SIZE', 'GENOMIC_FILE_HARMONIZED', 'GENOMIC_FILE_CAVATICA_OUTPUT_FILE'},
'sequencing_experiment': {'SEQUENCING_MEAN_READ_LENGTH', 'SEQUENCING_PLATFORM', 'SEQUENCING_INSERT_SIZE', 'SEQUENCING_MEAN_INSERT_SIZE', 'SEQUENCING_MEAN_DEPTH', 'SEQUENCING_LIBRARY_NAME', 'SEQUENCING_CENTER_NAME', 'SEQUENCING_REFERENCE_GENOME', 'SEQUENCING_ID', 'SEQUENCING_DATE', 'SEQUENCING_STRATEGY', 'SEQUENCING_INSTRUMENT', 'SEQUENCING_IS_PAIRED_END', 'SEQUENCING_LIBRARY_STRAND', 'SEQUENCING_TOTAL_READS', 'SEQUENCING_CENTER_KF_ID', 'SEQUENCING_MAX_INSERT_SIZE'},
'phenotype': {'PHENOTYPE_OBSERVED', 'PHENOTYPE_HPO_ID', 'PHENOTYPE_EVENT_AGE', 'PHENOTYPE_ID', 'PHENOTYPE_NAME', 'PHENOTYPE_SNOMED_ID'},
'diagnosis': {'DIAGNOSIS_MONDO_ID', 'DIAGNOSIS_UBERON_TUMOR_LOCATION_ID', 'DIAGNOSIS_ICD_ID', 'DIAGNOSIS_ID', 'DIAGNOSIS_TUMOR_LOCATION', 'DIAGNOSIS_EVENT_AGE', 'DIAGNOSIS_NCIT_ID', 'DIAGNOSIS_NAME'},
'outcome': {'OUTCOME_EVENT_AGE', 'OUTCOME_RELATED', 'OUTCOME_ID', 'OUTCOME_VITAL_STATUS'}
}
# This MUST stay sorted in order from most differentiating to least.
# I believe that we will need to differentiate between single-specimen and multi-specimen genomic files too.
IDENTIFIERS = OrderedDict()
IDENTIFIERS["GENOMIC_FILE_ID"] = ATTRIBUTE_SETS["genomic_file"]
IDENTIFIERS["GENOMIC_FILE_NAME"] = ATTRIBUTE_SETS["genomic_file"]
IDENTIFIERS["GENOMIC_FILE_PATH"] = ATTRIBUTE_SETS["genomic_file"]
IDENTIFIERS["SEQUENCING_ID"] = ATTRIBUTE_SETS["sequencing_experiment"]
IDENTIFIERS["SEQUENCING_LIBRARY_NAME"] = ATTRIBUTE_SETS["sequencing_experiment"]
IDENTIFIERS["SPECIMEN_ID"] = ATTRIBUTE_SETS["specimen"]
IDENTIFIERS["SPECIMEN_ALIQUOT_ID"] = ATTRIBUTE_SETS["specimen"]
IDENTIFIERS["PARTICIPANT_ID"] = ATTRIBUTE_SETS["participant"]
# -
# #### 2. Then we modify the marking_merge function to use a set of known-joinable keys ordered from most differentiating to least. This will prevent joining on things that aren't clear identifiers and (hopefully) always join on the best available one.
def identifier_marking_merge(left, right, joinable_keys):
sl = "_left_"
sr = "_right_"
overlap = set(left.columns).intersection(set(right.columns)).intersection(set(joinable_keys))
if not overlap:
raise Exception("No overlapping identifiers.")
# I think we can only merge data for rows where the join columns don't all contain nans
right = right.dropna(subset=overlap)
debug('left', left, 'right', right)
# Find the first (most differentiating) joinable column.
for k in joinable_keys:
if k in overlap:
debug("JOIN ON", k)
return pandas.merge(
# Setting the on and suffixes arguments is what lets us do our magic.
# Instead of creating new rows on null/non-null value overlaps,
# pandas will create parallel left/right columns instead.
left, right, how="outer", on=k, suffixes=(sl, sr)
)
# #### 3. Then we share concept attribute values across each instance of a given concept identifier, according to what identifies what.
# +
def smear(df):
""" An identified entity can only have one set of descriptive attributes, so for each identifier, find
all associated rows and share non-nan values to fill nans.
"""
# Currently cannot chain df.groupby(col).transform(function) if there are any nans in the group column.
# This is a known bug in pandas https://github.com/pandas-dev/pandas/issues/17093
# For now, we'll just replace nans with something magical.
DEFINITELY_NAN = "QWsDFGvBHjMkjnBGfREDsXcFGHgFDEdFtytrEWQ34567Yt543WeDFVCxdr"
for ident, attributes in IDENTIFIERS.items():
attributes = attributes & set(df.columns)
if (ident in df) and attributes:
attributes = list(attributes)
df[ident] = df[ident].fillna(DEFINITELY_NAN)
df[attributes] = df.groupby(ident)[attributes].transform(lambda x: x.ffill().bfill())
df[ident] = df[ident].replace(DEFINITELY_NAN, numpy.nan)
debug('smeared', df)
return df
def smeared_merge(left, right):
df = identifier_marking_merge(left, right, IDENTIFIERS.keys())
debug('merged', df)
df = recombine(df)
debug('recombined', df)
return smear(df)
current_merge = smeared_merge
# -
df9 = df8.rename(columns={'X': 'PARTICIPANT_RACE'})
df10 = df8.rename(columns={'X': 'SPECIMEN_ANATOMY_SITE'})
run_test(df1, df2, df3, minimum_length_batch_merge)
run_test(df4, df5, df6, minimum_length_batch_merge)
run_test(df7, df9, minimum_length_batch_merge)
run_test(df7, df10, minimum_length_batch_merge)
# ### Looks ok so far. But then what?
# * How fragile is this?
# * How many other scenarios not yet accounted for?
# * Is the "fewest rows" test always right or only 50% right?
# * Are miniaturized experiments sufficient for discovering the correct manipulations for arbitrarily large and complex source tables using this approach?
# * Since we have to encode relationships anyway, can we record this directly in a graph and save ourselves from pandas hell?
| docs/archive/design/merge_algorithm/DataFrame_Design_Complications_Motivating_The_Relationship_Graph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Extracting the time series of activations in a label
#
#
# We first apply a dSPM inverse operator to get signed activations in a label
# (with positive and negative values) and we then compare different strategies
# to average the times series in a label. We compare a simple average, with an
# averaging using the dipoles normal (flip mode) and then a PCA,
# also using a sign flip.
#
# +
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse
print(__doc__)
data_path = sample.data_path()
label = 'Aud-lh'
label_fname = data_path + '/MEG/sample/labels/%s.label' % label
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src']
# Compute inverse solution
pick_ori = "normal" # Get signed values to see the effect of sign filp
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori=pick_ori)
label = mne.read_label(label_fname)
stc_label = stc.in_label(label)
modes = ('mean', 'mean_flip', 'pca_flip')
tcs = dict()
for mode in modes:
tcs[mode] = stc.extract_label_time_course(label, src, mode=mode)
print("Number of vertices : %d" % len(stc_label.data))
# View source activations
fig, ax = plt.subplots(1)
t = 1e3 * stc_label.times
ax.plot(t, stc_label.data.T, 'k', linewidth=0.5)
for mode, tc in tcs.items():
ax.plot(t, tc[0], linewidth=3, label=str(mode))
ax.legend(loc='upper right')
ax.set(xlabel='Time (ms)', ylabel='Source amplitude',
title='Activations in Label : %s' % label)
| stable/_downloads/536cd12bd68c07a8888b8e67008dab4f/plot_label_source_activations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Package
from IPython.display import YouTubeVideo
YouTubeVideo('CqvZ3vGoGs0', width=900, height=400)
# Packages are a way of structuring Python’s module namespace by using “dotted module names”.
#
# A directory must contain a file named __init__.py in order for Python to consider it as a package. This file can be left empty but we generally place the initialization code for that package in this file.
# 
# # importing module from a package
# We can import modules from packages using the dot (.) operator.
# +
# import Gate.Image.open
| Lecture 24 packages.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/BlaiseMarvin/FaceRecognitionPaymentSystem/blob/main/FaceNetTrainingArcFaceLoss.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="8kc6LRT3n1zs"
# ## RE-TRAINING THE FACENET MODEL USING BLACK-FACES
# + colab={"base_uri": "https://localhost:8080/"} id="mA_LhHNC9rpz" outputId="fb399502-82bf-413d-baad-e32b2339c062"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="4kGn24yZZfd7"
# ## The Arcface loss class
# + id="FGXqL3j-7t_O"
import math as m
import numpy as np
from tqdm import tqdm
import math
import os
import tensorflow as tf
import tensorflow.keras
from tensorflow.keras.models import *
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import *
from tensorflow.keras.layers import Dense, Flatten, Dropout, Input
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Layer
from tensorflow.keras.applications.densenet import DenseNet121
from tensorflow.keras.callbacks import *
from tensorflow.keras import backend as K
from tensorflow.keras import regularizers
from tensorflow.keras.metrics import categorical_accuracy
from tensorflow.keras import layers
from tensorflow.keras import Model
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
# + id="_7G46HMXZeuF"
# Original paper: https://arxiv.org/pdf/1801.07698.pdf
# Original implementation: https://github.com/deepinsight/insightface
# Adapted from tensorflow implementation: https://github.com/luckycallor/InsightFace-tensorflow
class ArcFace(Layer):
'''Custom Keras layer implementing ArcFace including:
1. Generation of embeddings
2. Loss function
3. Accuracy function
'''
def __init__(self, output_dim, class_num, margin=0.5, scale=64., **kwargs):
self.output_dim = output_dim
self.class_num = class_num
self.margin = margin
self.s = scale
self.cos_m = tf.math.cos(margin)
self.sin_m = tf.math.sin(margin)
self.mm = self.sin_m * margin
self.threshold = tf.math.cos(tf.constant(m.pi) - margin)
super(ArcFace, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1], self.class_num),
initializer='glorot_normal',
trainable=True)
super(ArcFace, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
embeddings = tf.nn.l2_normalize(x, axis=1, name='normed_embeddings')
weights = tf.nn.l2_normalize(self.kernel, axis=0, name='normed_weights')
cos_t = tf.matmul(embeddings, weights, name='cos_t')
return cos_t
def get_logits(self, labels, y_pred):
cos_t = y_pred
cos_t2 = tf.square(cos_t, name='cos_2')
sin_t2 = tf.subtract(1., cos_t2, name='sin_2')
sin_t = tf.sqrt(sin_t2, name='sin_t')
cos_mt = self.s * tf.subtract(tf.multiply(cos_t, self.cos_m), tf.multiply(sin_t, self.sin_m), name='cos_mt')
cond_v = cos_t - self.threshold
cond = tf.cast(tf.nn.relu(cond_v, name='if_else'), dtype=tf.bool)
keep_val = self.s*(cos_t - self.mm)
cos_mt_temp = tf.where(cond, cos_mt, keep_val)
mask = tf.one_hot(labels, depth=self.class_num, name='one_hot_mask')
inv_mask = tf.subtract(1., mask, name='inverse_mask')
s_cos_t = tf.multiply(self.s, cos_t, name='scalar_cos_t')
output = tf.add(tf.multiply(s_cos_t, inv_mask), tf.multiply(cos_mt_temp, mask), name='arcface_logits')
return output
def loss(self, y_true, y_pred):
labels = K.argmax(y_true, axis=-1)
logits = self.get_logits(labels, y_pred)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
return loss
def accuracy(self, y_true, y_pred):
labels = K.argmax(y_true, axis=-1)
logits = self.get_logits(labels, y_pred)
accuracy = categorical_accuracy(y_true=labels, y_pred=logits)
return accuracy
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
# + colab={"base_uri": "https://localhost:8080/"} id="wPpKDHgpzMWU" outputId="b738b807-b490-422f-a166-d2ffff012035"
import os
from os import listdir
path="/content/drive/MyDrive/NewDataset/Aligned/"
count=0
for f in listdir(path):
count=0
for nome in listdir(path+f):
count+=1
print("Name: ",f,"count: ",count)
# + colab={"base_uri": "https://localhost:8080/"} id="K4HiJlgE0JUU" outputId="b7ced947-c571-496d-97f2-e3112842681c"
import os
from os import listdir
path="/content/drive/MyDrive/NewDataset/AlignedVal/"
count=0
for f in listdir(path):
count=0
for nome in listdir(path+f):
count+=1
print("Name: ",f,"count: ",count)
# + [markdown] id="Min14FmgoLZ7"
# #### Setting up the Image Data Generator API
# + colab={"base_uri": "https://localhost:8080/"} id="_oP9GPv9eyP_" outputId="aff97891-5230-42f0-db03-b6e146717c38"
#Import shutil first, this package deletes ipnb_checkpoints files that create a ghost class
import shutil
#The next step is to delete every ipynb_checkpoints file created by colab
#shutil.rmtree("/tmp/training/.ipynb_checkpoints") #be careful with shutil.rmtree() because it deletes every tree in that path. In other words, do not make mistakes.
#shutil.rmtree("/tmp/testing/.ipynb_checkpoints")
#specify both the training and validation directories
TRAINING_DIR="/content/drive/MyDrive/NewDataset/Aligned/"
VALIDATION_DIR="/content/drive/MyDrive/NewDataset/AlignedVal/"
#Initialize Image Data Generator objects, and rescale the image
training_datagen=ImageDataGenerator(rescale=1/255)
validation_datagen=ImageDataGenerator(rescale=1/255)
#Create the image generators that create the create the classes for all images uploaded
training_generator=training_datagen.flow_from_directory(TRAINING_DIR,class_mode='categorical',target_size=(160,160), shuffle=False)
validation_generator=validation_datagen.flow_from_directory(VALIDATION_DIR,class_mode='categorical',target_size=(160,160), shuffle=False, batch_size=1)
#Load the facenet model architecture
#model=load_model('/tmp/facenet/facenet_keras.h5')
# + [markdown] id="Xni0h7FSQ7oq"
# ## Loading the facenet Model architecture
# + colab={"base_uri": "https://localhost:8080/"} id="IAs1PSgVLNP0" outputId="17b75743-1c1a-48a6-f6a2-2c97cb30b563"
model=load_model('/content/drive/MyDrive/facenet_keras (1).h5')
#A summary of the model architecture
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="rH7-R2Iz6LQy" outputId="d3b09a91-91ed-4a55-bc90-d0bbf6d0bdd7"
print("Number of layers in the base model: ", len(model.layers))
# + id="HWNkmU4SEBEm"
local_weights_file='/content/drive/MyDrive/facenet_keras_weights.h5'
model.load_weights(local_weights_file)
for layer in model.layers:
layer.trainable=False
# + id="yjnsrv9Vhpau"
#Specify the last layer from the architecture, that you actually want
last_layer=model.get_layer('Bottleneck')
last_output=last_layer.output
# + colab={"base_uri": "https://localhost:8080/"} id="SyOcetAV_xoP" outputId="01a8222f-0d26-4c87-d893-7bc5546cd108"
model.summary()
# + id="NSQETUqLmN1n"
#Code from arcface repo
#customizable arcface layer
af_layer = ArcFace(output_dim=128, class_num=128, margin=0.5, scale=64.)
arcface_output = af_layer(last_output)
x=layers.Flatten()(arcface_output)
#print(arcface_output)
x = Dropout(rate=0.3)(x)
x=layers.Dense(1024,activation='relu')(arcface_output)
x=layers.Dense(512,activation='relu')(x)
x = Dropout(rate=0.5)(x)
x=layers.Dense(128,activation='relu')(x)
x=layers.Dense(59,activation='softmax')(arcface_output)
model=Model(model.input,x)
model.compile(optimizer=RMSprop(learning_rate=0.0001),loss='categorical_crossentropy',metrics=['accuracy',tf.keras.metrics.AUC(multi_label = True), tf.keras.metrics.Recall(),tf.keras.metrics.Precision()])
# + id="4SAQo-oSnM1b"
early = EarlyStopping(monitor="val_loss", mode="min", patience=20)
# + id="oKi3UIyinOxd"
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=2, min_lr=0.00001)
# + id="5mqfp8lknYYu"
checkpoint_filepath= '/content/drive/MyDrive/Extracted/Model'
model_checkpoint_callback=tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_loss',
mode='min',
save_best_only=True)
# + colab={"base_uri": "https://localhost:8080/"} id="qje4VW0jl8d5" outputId="f804b080-b4c3-40b8-a7c5-b18dcb0a01b8"
#training for 500 epochs
history=model.fit(training_generator,validation_data=validation_generator,epochs=500,verbose=2, callbacks=[reduce_lr,early,model_checkpoint_callback])
# + [markdown] id="4F1yNvnnqpjc"
# ### Lets visualize the output of the training phase
# + [markdown] id="XYaRMnfWC-d2"
# ## 413 from 426
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="uWpYratUmo8T" outputId="b6a44a2c-8ecf-45b4-e15f-60e117702659"
auc=history.history['auc']
val_auc=history.history['val_auc']
acc=history.history['accuracy']
val_acc=history.history['val_accuracy']
loss=history.history['loss']
val_loss=history.history['val_loss']
recall=history.history['recall']
val_recall=history.history['val_recall']
precision=history.history['precision']
val_precision=history.history['val_precision']
epochs=range(len(acc))
plt.plot(epochs,loss,'bo',label="Training Loss")
plt.plot(epochs,val_loss,'r',label="Validation Loss")
plt.legend()
plt.show()
plt.plot(epochs,auc,'bo',label="Training AUC")
plt.plot(epochs,val_auc,'r',label="Validation AUC")
plt.legend()
plt.figure()
plt.plot(epochs,acc,'bo',label="Training Accuracy")
plt.plot(epochs,val_acc,'r',label="Validation Accuracy")
plt.legend()
plt.show()
plt.plot(epochs,recall,'bo',label="Training Recall")
plt.plot(epochs,val_recall,'r',label="Validation Recall")
plt.legend()
plt.show()
plt.plot(epochs,precision,'bo',label="Training Precision")
plt.plot(epochs,val_precision,'r',label="Validation Precision")
plt.legend()
plt.show()
# + id="80bffwUd1P36"
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import plot_confusion_matrix
# + id="dnSdnAbc2i8A"
filenames = validation_generator.filenames
nb_samples = len(filenames)
# + colab={"base_uri": "https://localhost:8080/"} id="deP7NsH13khH" outputId="ca8538d9-1923-4a71-de3d-7ba0a387380c"
predict = model.predict_generator(validation_generator,steps = nb_samples)
y_pred = np.argmax(predict, axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="bP5ERVgy3xNC" outputId="5228ffdd-a91a-40b9-8041-e65a831c6489"
print('Confusion Matrix')
print(confusion_matrix(validation_generator.classes, y_pred))
# + colab={"base_uri": "https://localhost:8080/"} id="gzf0xvqV4Wa-" outputId="685950ba-50f1-48b5-c42a-ec609f2d6d74"
target_names=list(training_generator.class_indices.keys())
print(classification_report(validation_generator.classes, y_pred, target_names=target_names))
# + colab={"base_uri": "https://localhost:8080/"} id="ISoJ4iZXB4AZ" outputId="ed81ece0-4764-4253-f73a-2260c26d203b"
# !pip install scikit-plot
# + id="HlQtroeHB1KN" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="fb84eb71-400b-4228-814c-ddbbc770f917"
import scikitplot as skplt
plt.figure(figsize=(20, 20))
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="aar-7-2YCFG3" outputId="6add1ec2-dcd1-4beb-916b-0f453b3b286a"
fig, ax = plt.subplots(figsize=(20, 20))
skplt.metrics.plot_confusion_matrix(validation_generator.classes, y_pred, normalize=True,ax=ax)
plt.figure(figsize=(20, 20))
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="2k8RzXSFvYjT" outputId="a21565a8-4579-4bdb-a2b2-da8341c5275c"
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="vpB6CtjbveC7" outputId="3a50fbdd-cf2c-4238-e368-3f1d1b344076"
model2=Model(model.input,model.layers[-3].output)
model2.summary()
# + id="ILR3Wqmb7bwV"
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
sess=tf.Session()
from tensorflow.python.framework import graph_io
frozen = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["name_of_the_output_node"])
graph_io.write_graph(frozen, '/tmp/session-frozens', 'inference_graph.pb', as_text=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 89} id="Wci41p_z2LSO" outputId="1dcb8f36-ea34-4e23-bce6-af6f8c04c912"
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from keras import backend as K
from keras.models import Sequential, Model
sess=tf.Session()
K.set_learning_phase(0) # Set the learning phase to 0
model = model2
config = model2.get_config()
#weights = model2.get_weights()
#model = Sequential.from_config(config)
output_node = model2.output.name.split(':')[0] # We need this in the next step
graph_file = "kerasFacenet.pb"
ckpt_file = "kerasFacenet.ckpt"
saver = tf.train.Saver(sharded=True)
tf.train.write_graph(sess.graph_def, '', graph_file)
#saver.save(sess, ckpt_file)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="kue-AGmHwiod" outputId="5beaef0a-adcf-4f50-9456-352c2bc2ed0e"
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
tf.saved_model.save(model2, "/tmp/saved-models")
# Convert Keras model to ConcreteFunction
full_model = tf.function(lambda x: model2(x))
full_model = full_model.get_concrete_function(
tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))
# Get frozen ConcreteFunction
frozen_func = convert_variables_to_constants_v2(full_model)
frozen_func.graph.as_graph_def()
layers = [op.name for op in frozen_func.graph.get_operations()]
#print("-" * 50)
#print("Frozen model layers: ")
for layer in layers:
print(layer)
#print("-" * 50)
#print("Frozen model inputs: ")
#print(frozen_func.inputs)
#print("Frozen model outputs: ")
#print(frozen_func.outputs)
# Save frozen graph from frozen ConcreteFunction to hard drive
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir="/tmp/saved-model",
name="facenet-Original-LastLayer.pb",
as_text=False)
# + id="0NvKeRsc8Xgd"
| FaceNetTrainingArcFaceLoss.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Area Chart with Gradient
#
# This example shows how to make an area chart with a gradient fill. For more information about gradient options see the Vega-Lite [Gradient documentation](https://vega.github.io/vega-lite/docs/types.html#gradient).
# +
import altair as alt
from vega_datasets import data
source = data.stocks()
alt.Chart(source).transform_filter(
'datum.symbol==="GOOG"'
).mark_area(
line={'color':'darkgreen'},
color=alt.Gradient(
gradient='linear',
stops=[alt.GradientStop(color='white', offset=0),
alt.GradientStop(color='darkgreen', offset=1)],
x1=1,
x2=1,
y1=1,
y2=0
)
).encode(
alt.X('date:T'),
alt.Y('price:Q')
)
| doc/gallery/area_chart_gradient.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''clipenv'': conda)'
# name: python388jvsc74a57bd087ca9f09b7ab97f4b0c2b431eda3d3c04a7d0df0284a0aa2f4790ab8b5423040
# ---
import torch
import clip
from PIL import Image
from os import listdir
import os
from os.path import isfile, join
from torchvision.datasets import CIFAR100
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)
def process_batch(photo_batch):
images = []
batch_photo_ids = []
for pf in photo_batch:
images.append(preprocess(Image.open(pf)))
batch_photo_ids.append(pf.name.split('.jpg')[0])
images_tensor = torch.stack(images).to(device)
with torch.no_grad():
images_features = model.encode_image(images_tensor)
images_features /= images_features.norm(dim=-1, keepdim=True)
return images_features.cpu(), batch_photo_ids
| research.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import configparser
import sys,os
sys.path.append('../')
import functions
from statistics import mean
from rich.console import Console
from scipy.optimize import curve_fit
from scipy.stats import norm
# # <span style="color:orange"> Exercise 1.1 </span>
#
# ## <span style="color:green"> Task </span>
# Test the Pseudo-Random Number generator downloaded from the NSL Ariel web site by estimating the integrals:
# - $\langle r \rangle = \int_0^1 r dr = 1/2$
# - $\sigma^2 = \int_0^1 (r-1/2)^2 dr = 1/12$
#
# and dividing the interval $[0,1]$ into $M$ identical sub-intervals to implement the $\chi^2$ test.
# ## <span style="color:green"> Solution </span>
# This exercise consists of a central script (main.cpp) and a pseudo-random number generator library (random.hpp). The idea behind the methodology is to run two for loops: the external one over N blocks and the inner one over L numbers, where the random numbers are summed according to the integrand equation. Then the averages for each block is calculated and stored together with the square of the mean value. Later, the datablocking method is computed, by calculating the progressive mean, the squared progressive mean and the associated error.
#
# The error on the mean was calculated following this formula: $ \sigma_A^2 = \Big\langle (A - \langle A\rangle)^2\Big\rangle = \langle A^2 \rangle - \langle A \rangle^2 $.<br>
# The central limit theorem states that the error on the mean value follows the law $\frac{1}{N}$, so that it gets smaller with increasing N.
#
#
# ### Parameters
# The simulation uses two parameters:
# - **Number of runs** which indicates how many times the simulation runs (defaults to 10000)
# - **Number of blocks** which is the number of blocks in which the runs are split into (defaults to 100)
# +
base_dir = "es1.1/es1.1.1/"
filename = base_dir+"config.ini"
config = configparser.ConfigParser()
config.read(filename)
M=int(config["simulation"]["runs"])
N=int(config["simulation"]["blocks"])
logger_debug = bool(config["settings"]["logger_debug"])
if M%N != 0:
raise ValueError(f"Number of blocks not a factor of number of runs. {M} runs -- {N} blocks")
L=int(M/N)
print(f"Ex1.1.1: Configuration file '{filename}' successfully parsed")
x = np.arange(N)*L
_sum,_error = zip(*[
(lines.split('\t')[1],lines.split('\t')[2] ) for lines in open(base_dir+config['settings']['input_file']).readlines()])
_sum,_error = np.asfarray(_sum),np.asfarray(_error)
avg = [mean(_sum-0.5) for i in range(len(x))]
_mean = mean(_sum-0.5)
_mean = float("{:.4f}".format(_mean))
if x.shape == _sum.shape and _sum.shape == _error.shape and logger_debug:
print("Ex1.1.1: Dimensional checks passed.")
# +
print("Average of intergral without datablocking\n")
y_f = np.loadtxt(base_dir+"/outputs/temp.dat")
x_f = [i for i in range(len(y_f))]
mean_y_f = [mean(y_f) for i in range(len(y_f))]
plt.title(f"Integral value without datablocking")
plt.plot(x_f, y_f,label="Data")
plt.plot(x_f,mean_y_f,label="Mean")
plt.xlabel("Block")
plt.ylabel("<r>")
plt.grid(True)
plt.legend()
plt.show()
print("")
print("Ex1.1.1: Graph successfully plotted")
print("Data average: ",mean_y_f[0])
print("Expected value: ",0.5)
print("Uncertainty: ",mean_y_f[0]-0.5)
# -
print("Average of integral with datablocking\n")
try:
plt.title(f"Integral value with {M} runs and {N} blocks")
plt.errorbar(x,_sum-0.5,yerr=_error,label="Experimental Data")
plt.plot(x,[_sum[-1]-0.5 for i in range(len(x))],color="orange",label="Final value",linewidth=2)
plt.plot(x,[0 for i in range(len(x))],label="Expected value",linewidth=2)
plt.xlabel('run')
plt.ylabel('<r>-1/2')
plt.grid(True)
plt.legend()
plt.show()
print("")
print(f"Final value after all blocks: {_sum[-1]-0.5}")
print("Expected value: ",0.0)
print("Uncertainty: ",_mean-0.0)
print("Ex1.1.1: Graph successfully plotted\n\n")
except ValueError as e:
print("Ex1.1.1: Cannot execute error graph:\n- Possible shape mismatch.\n- Forgot to call make\n- Number of blocks not a factor\n\n")
# +
base_dir = "es1.1/es1.1.2/"
filename = base_dir+"config.ini"
config = configparser.ConfigParser()
config.read(filename)
print(f"Ex1.1.2: Configuration file '{filename}' successfully parsed")
M=int(config["simulation"]["runs"])
N=int(config["simulation"]["blocks"])
if M%N != 0:
raise ValueError(f"Number of blocks not a factor of number of runs. {M} - {N}")
L=int(M/N)
x = np.arange(N)*L
_sum,_error = zip(*[
(lines.split('\t')[1],lines.split('\t')[2] ) for lines in open(base_dir+config['settings']['input_file']).readlines()])
_sum,_error = np.asfarray(_sum),np.asfarray(_error)
avg = [mean(_sum-1./12) for i in range(len(x))]
if x.shape == _sum.shape and _sum.shape == _error.shape:
print("Ex1.1.2: Dimensional checks passed.")
# -
plt.title(f"Integral value with {M} runs and {N} blocks")
plt.errorbar(x,_sum-1/12,yerr=_error,label="Experimental Data")
plt.plot(x,[_sum[-1]-1/12 for i in range(len(x))],color="orange",label="Final value",linewidth=2)
plt.plot(x,[0 for i in range(len(x))],label="Expected value",linewidth=2)
plt.xlabel('# Runs')
plt.ylabel('<r>-1/12')
plt.grid(True)
plt.legend()
plt.show()
# +
################## ---- CHI SQUARED ---- ##################
base_dir = "es1.1/es1.1.3/"
filename = base_dir+"config.ini"
config = configparser.ConfigParser()
config.read(filename)
print(f"Ex1.1.3: Configuration file '{filename}' successfully parsed")
M = int(config["simulation"]["blocks"])
N = int(config["simulation"]["numbers"])
chi2 = [float(line.split("\t")[1]) for line in open(base_dir+config['settings']['input_file']).readlines()]
x = [i for i in range(M)]
avg = [mean(chi2) for i in range(len(x))]
plt.title(f"Chi-squared test with {N} numbers and {M} blocks")
plt.errorbar(x,chi2,label="Data")
plt.plot(x,avg,label="mean",linewidth=3,color="orange")
plt.xlabel('# Runs')
plt.ylabel('chi^2')
plt.grid(True)
plt.legend()
plt.show()
_mean = mean(chi2)
diff = abs(int(N/M)-mean(chi2))
print("Mean: ",_mean,"\t\tExpected: ",N/M,"\t\tDifference: ","{:.4f}".format(diff))
# -
# ## <span style="color:green"> Results </span>
#
# As expected, the accuracy of the simulation improves with the number of Monte Carlo runs. A larger number of blocks gives more points to the graph but a slightly lower accuracy, because the average for each block is calculated with less points.
#
# The following graph shows the estimate of the integral subtracted by the expected value (in blue) against the number of runs. The overall average of the data is also plotted (orange).
#
# The fact that the accuracy improves with the number of tries, and that the calculated value stabilizes quickly proves the validity of the pseudo-random number generator. In fact, a non-functional generator would not exhibit these properties, but would rather compute a divergent value for the integral, or make predictions to a wrong number. The fact that the sequence converges to zero with a relatively small error shows that the calculated value is correct and the central limit theorem is valid.
#
# The chi-squared is correctly fluctuating around the expected value of N/M (100). However, the accuracy of the values does not improve with time. This is because the module does not generate pure, random number, but pseudo-random numbers. These are produced according to a precise algorithm that uses a initializing seed and the modulo operation, making it look like the numbers are randomly generated. <br> <br>
# # <span style="color:orange"> Exercise 1.2 </span>
#
# ## <span style="color:green"> Task </span>
#
# - Extend Pseudo-Random Number generator downloaded from the NSL Ariel web site and check the Central Limit Theorem:
# 1. <font color="red">Add two probability distributions</font> by using the **method of the inversion of the cumulative distribution** to sample from a **generic** <font color="red">exponential distribution</font>, $p(x) = \lambda \exp(-\lambda x)$, $x\in [0;+\infty]$ (see <a href="https://en.wikipedia.org/wiki/Exponential_distribution">this Wikipedia link</a>), and a **generic** <font color="red">Cauchy-Lorentz distribution</font> $p(x)=\frac{1}{\pi}\frac{\Gamma}{(x-\mu)^2+\Gamma^2}$, $x\in [-\infty;+\infty]$ (see <a href="https://en.wikipedia.org/wiki/Cauchy_distribution">this Wikipedia link</a>).
# 2. <font color="red">Make 3 pictures</font> with the histograms obtained filling them with $10^4$ realizations of $S_N = \frac{1}{N}\sum_{i=1}^N x_i$ (for $N=1, 2, 10, 100$), being $x_i$ a random variable sampled throwing a *standard* dice (fig.1), an *exponential* dice (fig.2, use $\lambda=1$) and a *Lorentzian* dice (fig.3, use $\mu=0$ and $\Gamma=1$).
#
# Note that you can try to fit the case $N=100$ with a Gaussian for standard and exponential dices, whereas you should use a Cauchy-Lorentz distribution for the last case.
#
# ## <span style="color:green"> Solution </span>
# The Random class has been enriched with two additional probability distributions: Exp($\lambda$) and Lorentz($\mu$,$\Gamma$). In both cases, the number y given by the distribution $p_y(y)$ is obtained by a pseudo-random number uniformly generated inside $[0,1]$ and returned using the respective inverted cumulative function.
#
# The second task is achieved by writing three files, containing $10^4$ averages of numbers (1,2,10 and 100) generated according to three distributions: uniform, exponential and Cauchy-Lorentz. The files are read from the Python file that produces 4 histograms, respective to the numbers used for the averages, for each file. Above the histogram for N=100, a fit is made using a Gaussian function for the uniform and exponential distributions, while a Cauchy-Lorentz function is used for its distribution.
# +
filename = "es1.2/config.ini"
config = configparser.ConfigParser()
config.read(filename)
print(f"Ex1.2: Configuration file '{filename}' successfully parsed")
console = Console()
M = int(config["simulation"]["throws"])
numbers = functions.convert_string(config["simulation"]["numbers"],d_type=int)
logger_debug = bool(config["settings"]["logger_debug"].capitalize())
base_dir = "es1.2/"+str(config["settings"]["base_dir"])
colors = ["blue","orange","green","magenta"]
if logger_debug: print("Ex1.2: Parameters loaded.")
def Gaussian(x,mu,sigma):
x = np.asfarray(x)
return np.exp( -(pow(x-mu,2)) / (2*pow(sigma,2)) )
def Gauss (x, a, mu, sigma):
return a*np.exp(-((x-mu)/sigma)**2)/(np.sqrt(2*np.pi)*sigma)
#def Gaussian(x,mu,sigma):
# x = np.asfarray(x)
# return 1./np.sqrt(2.*np.pi*sigma**2)*np.exp(-0.5*(x-mu)**2/sigma**2)
def Lorentz(x, a, mu, gamma):
x = np.asfarray(x)
return a*gamma/(np.pi*((x-mu)**2.+gamma**2.))
# +
#for filename in os.listdir(base_dir):
filename = "unif.dat"
distrib = "Uniform"
console.print(f"------------------ {filename} ------------------", style="bold red")
lines = open(os.path.join(base_dir,filename),"r+").read().split("\n")[:-1]
matrix = []
i = 0
for line in lines:
#line represent each n (1,2,10,100)
elems = line.split("\t")
#elem represent each number for a fixed n
temp = []
for e in elems[:-1]:
temp.append(float(e))
matrix.append(temp)
f, ax = plt.subplots(1,4,figsize=(12,6))
plt.suptitle(f"Sampling of {distrib} distribution",fontsize=22)
for i,item in enumerate(matrix):
print(i)
if filename == "gauss.dat":
min_range = -50
max_range = 50
else:
min_range = min(item)
max_range = max(item)
print(f"min: {min(item)}\t max: {max(item)}")
print(f"i: {i}, len: {len(matrix)}")
print(f"min range: {min_range}\tmax range: {max_range}")
exec(f"ax[{i}].axvline(np.mean(item), color='k', linestyle='dashed', linewidth=0.5)")
exec(f"bin_heights, bin_borders, _ = ax[{i}].hist(item,label=f'N= {numbers[i]}',bins=100,color=colors[i])")
if i==3:
bin_centers = bin_borders[:-1] + np.diff(bin_borders) / 2
p_opt, p_cov = curve_fit(Gauss,bin_centers,bin_heights,p0=[100,2,1])
print("Optimal parameters: ",p_opt)
#ax[i].plot(bin_centers,bin_heights,color="red")
ax[i].plot(bin_centers,Gauss(bin_centers,*p_opt),label="Fit",linewidth=3)
print("-----------------------------------------------")
lines_labels = [ax.get_legend_handles_labels() for ax in f.axes]
lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
plt.xlabel("Bin")
plt.ylabel("Frequency")
#plt.legend(lines,labels)
plt.show()
print("\n\n\n")
# +
filename = "exp.dat"
distrib = "Exponential"
console.print(f"------------------ {filename} ------------------", style="bold red")
lines = open(os.path.join(base_dir,filename),"r+").read().split("\n")[:-1]
matrix = []
i = 0
for line in lines:
#line represent each n (1,2,10,100)
elems = line.split("\t")
#elem represent each number for a fixed n
temp = []
for e in elems[:-1]:
temp.append(float(e))
matrix.append(temp)
f, ax = plt.subplots(1,4,figsize=(10,6))
plt.suptitle(f"Sampling of {distrib} distribution",fontsize=22)
for i,item in enumerate(matrix):
print(i)
if filename == "gauss.dat":
min_range = -50
max_range = 50
else:
min_range = min(item)
max_range = max(item)
print(f"min: {min(item)}\t max: {max(item)}")
print(f"i: {i}, len: {len(matrix)}")
print(f"min range: {min_range}\tmax range: {max_range}")
exec(f"ax[{i}].axvline(np.mean(item), color='k', linestyle='dashed', linewidth=0.5)")
exec(f"bin_heights, bin_borders, _ = ax[{i}].hist(item,label=f'N= {numbers[i]}',bins=50,color=colors[i])")
if i==3:
bin_centers = bin_borders[:-1] + np.diff(bin_borders) / 2
p_opt, p_cov = curve_fit(Gauss,bin_centers,bin_heights,p0=[350,2,2])
print("Optimal parameters: ",p_opt)
#ax[i].plot(bin_centers,bin_heights,color="red")
ax[i].plot(bin_centers,Gauss(bin_centers,*p_opt),label="Fit",linewidth=3)
print("-----------------------------------------------")
lines_labels = [ax.get_legend_handles_labels() for ax in f.axes]
lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
plt.xlabel('Bin')
plt.ylabel("Frequency")
plt.legend(lines,labels)
plt.show()
print("\n\n\n")
# +
filename = "gauss.dat"
distrib = "Cauchy-Lorentz"
console.print(f"------------------ {filename} ------------------", style="bold red")
lines = open(os.path.join(base_dir,filename),"r+").read().split("\n")[:-1]
matrix = []
i = 0
for line in lines:
#line represent each n (1,2,10,100)
elems = line.split("\t")
#elem represent each number for a fixed n
temp = []
for e in elems[:-1]:
temp.append(float(e))
matrix.append(temp)
f, ax = plt.subplots(1,4,figsize=(10,6))
plt.suptitle(f"Sampling of {distrib} distribution",fontsize=22)
for i,item in enumerate(matrix):
print(i)
if filename == "gauss.dat":
min_range = -50
max_range = 50
else:
min_range = min(item)
max_range = max(item)
print(f"min: {min(item)}\t max: {max(item)}")
print(f"i: {i}, len: {len(matrix)}")
print(f"min range: {min_range}\tmax range: {max_range}")
exec(f"bin_heights, bin_borders , _= ax[{i}].hist(item,label=f'N= {numbers[i]}',range=(-50,50),bins=100,color=colors[i])")
exec(f"ax[{i}].axvline(np.mean(item), color='k', linestyle='dashed', linewidth=0.5)")
if i==3:
bin_centers = bin_borders[:-1] + np.diff(bin_borders) / 2
p_opt, p_cov = curve_fit(Lorentz,bin_centers,bin_heights)
print("Optimal parameters: ",p_opt)
#ax[i].plot(bin_centers,bin_heights,color="red")
ax[i].plot(bin_centers,Lorentz(bin_centers,*p_opt),label="Fit",linewidth=2)
print("-----------------------------------------------")
lines_labels = [ax.get_legend_handles_labels() for ax in f.axes]
lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
plt.xlabel("Bin")
plt.ylabel("Frequency")
plt.legend(lines,labels, loc="upper left")
plt.show()
# -
# # <span style="color:orange"> Exercise 1.3 </span>
# ## <span style="color:green"> Task </span>
# **Simulate** the Buffon’s experiment (see LSN_Lecture_00, supplementary material): A needle of length $L$ is thrown at random onto a horizontal plane ruled with straight lines a distance $d$ (must be $d > L$, but do not use $d\gg L$ otherwise $P\ll 1$) apart. The pro§bability $P$ that the needle will intersect one of these lines is: $P = 2L/\pi d$. This could be used to evaluate $\pi$ from throws of the needle: if the needle is thrown down $N_{thr}$ times and is observed to land on a line $N_{hit}$ of those times, we can make an estimate of $\pi$ from
# $$\pi = \frac{2L}{Pd} = \lim_{N_{thr} \to \infty}\frac{2LN_{thr}}{N_{hit}d}$$
# <font color="red">Make a picture of the estimation of $\pi$ and its uncertainty (Standard Deviation of the mean) with a large number of *throws* $M$ as a function of the number of blocks, $N$</font> (see below: Computing statistical uncertainties). If possible, do not use $\pi$ to evaluate $\pi$.
#
# ## <span style="color:green"> Solution </span>
# The simulation is composed of a main.cpp, random.h and datablocking function (defined as a shared header).
#
# After having initialized the number generator and useful variables for the simulation, the main script computes an external and an internal for loop, which cycle through the number of blocks and the number of throws respectively.
# In fact, the script simulates the throwing of numerous needles inside a 2D grid, counting the number of times that it hits a grid line against the total number of throws. The simulation of the throws is achieved by generating a random number in the range [0,spacing], where spacing is a configuration parameter, that fixes the x component of one end of the needle. Subsequently, another random number is generated to represent the direction of the needle with respect to its previously-generated end. The other extremity of the needle is then calculated with a simple trigonometric formula. To check whether the needle hits a line in the plane (considered to be on the natural values of the x-axis 1,2,..), the script checks whether the two ends share the same x coordinates (doesn't hit) or not (hits).<br>
# The estimated value for $\pi$ for each block is saved in a container that is processed in the datablocking method before terminating the simulation.
# +
import pylab as pl
import math
import numpy as np
from matplotlib import collections as mc
from matplotlib import pyplot as plt
printlines = 30
print("---> Showing {} needles on the plane\n".format(printlines))
planelines = []
planecolors = []
# Load lines
for iter in range(11):
planelines.append([(iter,0),(iter,10)])
planecolors.append([0,0,1,1])
# Load Data
i, x1, y1, x2, y2, state = np.loadtxt("es1.3/outputs/positions.dat",unpack=True) # state = 1 -> hit, state = 0 -> miss
lines = []
colors = []
for iter in range(printlines):
segment = [(x1[iter],y1[iter]),(x2[iter],y2[iter])]
lines.append(segment)
if state[iter]==1:
colors.append([0,1,0,1])
else:
colors.append([1,0,0,1])
plane = mc.LineCollection(planelines, colors=planecolors, linewidths=1)
lc = mc.LineCollection(lines, colors=colors, linewidths=1)
fig, ax = pl.subplots(figsize=(14,6))
ax.add_collection(plane)
ax.add_collection(lc)
ax.autoscale()
ax.margins(0.1)
# -
print("---> Showing estimate of π using datablocking\n")
i, pi, err = np.loadtxt("es1.3/outputs/results.dat",unpack=True)
plt.title("Estimation of PI")
plt.xlabel("Block")
plt.ylabel("PI")
plt.errorbar(i,pi,yerr=err,label="Data",fmt='r.',ecolor="orange",ms=3)
pis = [math.pi for iter in range(len(i))]
plt.plot(i,pis,label='Pi',color="blue")
plt.grid(True)
plt.legend()
plt.plot()
plt.show()
| es1/Exercise1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # ScmDatabase
#
# In this notebook, we provide an example of the `ScmDatabase` class. `ScmDatabase` helps read and write large bunches of timeseries data by splitting them up into multiple files on disk and allowing users to read/write selections at a time.
#
# This allows handling very large datasets which may exceed the amount of system memory a user has available.
# +
# NBVAL_IGNORE_OUTPUT
import traceback
import tempfile
import numpy as np
import pandas as pd
from scmdata import ScmRun, run_append
from scmdata.database import ScmDatabase
from scmdata.errors import NonUniqueMetadataError
pd.set_option("display.width", 160)
# -
# ## Initialisation
#
# There are two main things to think about when creating a `ScmDatabase`. Namely:
#
# * Where the data is going to be stored (`root_dir`)
# * How the data will be split up (`levels`)
#
# When data is to be written to disk it is split into different files, each with a unique combination of metadata values. The `levels` option defines the metadata columns used to split up the data.
#
# Choosing an appropriate value for `levels` could play a large role in determining the performance of reading/writing. For example, if you were storing output from a number of different climate models, you may define `levels` as `["climate_model", "scenario", "variable", "region"]`. This would allow loading a particular variable and region, say `Surface Temperature` for the `World` region, from all climate models and scenarios without needing to load the other variables and regions. Specifying too many groups may result in slow writing if a very large number of database files are written.
#
# If you wish load a subset of a particular metadata dimension then it must be specified in this list.
print(ScmDatabase.__init__.__doc__)
temp_out_dir = tempfile.TemporaryDirectory()
database = ScmDatabase(temp_out_dir.name, levels=["climate_model", "scenario"])
# NBVAL_IGNORE_OUTPUT
database
# ## Saving data
#
# Data can be added to the database using the `save_to_database` method. Subsequent calls merge new data into the database.
def create_timeseries(
n=500,
count=1,
b_factor=1 / 1000,
model="example",
scenario="ssp119",
variable="Surface Temperature",
unit="K",
region="World",
**kwargs,
):
a = np.random.rand(count)
b = np.random.rand(count) * b_factor
data = a + np.arange(n)[:, np.newaxis] ** 2 * b
index = 2000 + np.arange(n)
return ScmRun(
data,
columns={
"model": model,
"scenario": scenario,
"variable": variable,
"region": region,
"unit": unit,
"ensemble_member": range(count),
**kwargs,
},
index=index,
)
runs_low = run_append(
[
create_timeseries(
scenario="low",
climate_model="model_a",
count=10,
b_factor=1 / 1000,
),
create_timeseries(
scenario="low",
climate_model="model_b",
count=10,
b_factor=1 / 1000,
),
]
)
runs_high = run_append(
[
create_timeseries(
scenario="high",
climate_model="model_a",
count=10,
b_factor=2 / 1000,
),
create_timeseries(
scenario="high",
climate_model="model_b",
count=10,
b_factor=2 / 1000,
),
]
)
# NBVAL_IGNORE_OUTPUT
run_append([runs_low, runs_high]).line_plot(hue="scenario", style="climate_model")
# NBVAL_IGNORE_OUTPUT
database.save(runs_low)
database.available_data()
# Internally, each row shown in `available_data()` is stored as a netCDF file in a directory structure following ``database.levels``.
# NBVAL_IGNORE_OUTPUT
# !pushd {temp_out_dir.name}; tree; popd
# Additional calls to `save` will merge the new data into the database, creating any new files as required.
#
# If existing data is found, it is first loaded and merged with the saved data before writing to prevent losing existing data.
# NBVAL_IGNORE_OUTPUT
database.save(runs_high)
database.available_data()
# These data still need unique metadata otherwise a `NonUniqueMetadataError` is raised.
# NBVAL_IGNORE_OUTPUT
try:
database.save(runs_high)
except NonUniqueMetadataError:
traceback.print_exc(limit=0, chain=False)
# NBVAL_IGNORE_OUTPUT
runs_high_extra = runs_high.copy()
runs_high_extra["ensemble_member"] = runs_high_extra["ensemble_member"] + 10
database.save(runs_high_extra)
# ## Loading data
#
# When loading data we can select a subset of data, similar to `ScmRun.filter` but limited to filtering for the metadata columns as specified in `levels`
# NBVAL_IGNORE_OUTPUT
run = database.load(scenario="high")
run.meta
# NBVAL_IGNORE_OUTPUT
database.load(climate_model="model_b").meta
# The entire dataset can also be loaded if needed. This may not be possible for very large datasets depending on the amount of system memory available.
# NBVAL_IGNORE_OUTPUT
all_data = database.load()
all_data.meta
# NBVAL_IGNORE_OUTPUT
all_data.line_plot(hue="scenario", style="climate_model")
temp_out_dir.cleanup()
| notebooks/database.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
df_movies = pd.read_csv('ml-20m/movies.csv')
df_ratings = pd.read_csv('ml-20m/ratings.csv')
movieId_to_name = pd.Series(df_movies.title.values,
index = df_movies.movieId.values).to_dict()
name_to_movieId = pd.Series(df_movies.movieId.values,
index = df_movies.title).to_dict()
# Randomly display 5 records in the dataframe
for df in list((df_movies, df_ratings)):
rand_idx = np.random.choice(len(df), 5, replace=False)
display(df.iloc[rand_idx,:])
print("Displaying 5 of the total "+str(len(df))+" data points")
# +
import matplotlib.pyplot as plt
import plotly.plotly as py
# %matplotlib inline
plt.figure(figsize=(8, 6))
ax = plt.subplot(111)
ax.set_title("Distribution of Movie Ratings", fontsize=16)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel("Movie Rating", fontsize=14)
plt.ylabel("Count", fontsize=14)
plt.hist(df_ratings['rating'], color="#3F5D7D")
plt.show()
# +
from sklearn.model_selection import train_test_split
df_ratings_train, df_ratings_test= train_test_split(df_ratings,
stratify=df_ratings['userId'],
random_state = 15688,
test_size=0.30)
# -
print("Number of training data: "+str(len(df_ratings_train)))
print("Number of test data: "+str(len(df_ratings_test)))
def rating_splitter(df):
df['liked'] = np.where(df['rating']>=4, 1, 0)
df['movieId'] = df['movieId'].astype('str')
gp_user_like = df.groupby(['liked', 'userId'])
return ([gp_user_like.get_group(gp)['movieId'].tolist() for gp in gp_user_like.groups])
pd.options.mode.chained_assignment = None
splitted_movies = rating_splitter(df_ratings_train)
# +
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
import gensim
assert gensim.models.word2vec.FAST_VERSION > -1
# -
# The below code shuffles the training data:
# +
import random
for movie_list in splitted_movies:
random.shuffle(movie_list)
# +
from gensim.models import Word2Vec
import datetime
start = datetime.datetime.now()
model = Word2Vec(sentences = splitted_movies, # We will supply the pre-processed list of moive lists to this parameter
iter = 5, # epoch
min_count = 10, # a movie has to appear more than 10 times to be keeped
size = 200, # size of the hidden layer
workers = 4, # specify the number of threads to be used for training
sg = 1, # Defines the training algorithm. We will use skip-gram so 1 is chosen.
hs = 0, # Set to 0, as we are applying negative sampling.
negative = 5, # If > 0, negative sampling will be used. We will use a value of 5.
window = 9999999)
print("Time passed: " + str(datetime.datetime.now()-start))
#Word2Vec.save('item2vec_20180327')
# +
from gensim.models import Word2Vec
import datetime
start = datetime.datetime.now()
model_w2v_sg = Word2Vec(sentences = splitted_movies,
iter = 10, # epoch
min_count = 5, # a movie has to appear more than 5 times to be keeped
size = 300, # size of the hidden layer
workers = 4, # specify the number of threads to be used for training
sg = 1,
hs = 0,
negative = 5,
window = 9999999)
print("Time passed: " + str(datetime.datetime.now()-start))
model_w2v_sg.save('item2vec_word2vecSg_20180328')
del model_w2v_sg
# +
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
from gensim.models import Word2Vec
model = Word2Vec.load('item2vec_20180327')
word_vectors = model.wv
# del model # uncomment this line will delete the model
# +
import requests
import re
from bs4 import BeautifulSoup
def refine_search(search_term):
"""
Refine the movie name to be recognized by the recommender
Args:
search_term (string): Search Term
Returns:
refined_term (string): a name that can be search in the dataset
"""
target_url = "http://www.imdb.com/find?ref_=nv_sr_fn&q="+"+".join(search_term.split())+"&s=tt"
html = requests.get(target_url).content
parsed_html = BeautifulSoup(html, 'html.parser')
for tag in parsed_html.find_all('td', class_="result_text"):
search_result = re.findall('fn_tt_tt_1">(.*)</a>(.*)</td>', str(tag))
if search_result:
if search_result[0][0].split()[0]=="The":
str_frac = " ".join(search_result[0][0].split()[1:])+", "+search_result[0][0].split()[0]
refined_name = str_frac+" "+search_result[0][1].strip()
else:
refined_name = search_result[0][0]+" "+search_result[0][1].strip()
return refined_name
def produce_list_of_movieId(list_of_movieName, useRefineSearch=False):
"""
Turn a list of movie name into a list of movie ids. The movie names has to be exactly the same as they are in the dataset.
Ambiguous movie names can be supplied if useRefineSearch is set to True
Args:
list_of_movieName (List): A list of movie names.
useRefineSearch (boolean): Ambiguous movie names can be supplied if useRefineSearch is set to True
Returns:
list_of_movie_id (List of strings): A list of movie ids.
"""
list_of_movie_id = []
for movieName in list_of_movieName:
if useRefineSearch:
movieName = refine_search(movieName)
print("Refined Name: "+movieName)
if movieName in name_to_movieId.keys():
list_of_movie_id.append(str(name_to_movieId[movieName]))
return list_of_movie_id
def recommender(positive_list=None, negative_list=None, useRefineSearch=False, topn=20):
recommend_movie_ls = []
if positive_list:
positive_list = produce_list_of_movieId(positive_list, useRefineSearch)
if negative_list:
negative_list = produce_list_of_movieId(negative_list, useRefineSearch)
for movieId, prob in model.wv.most_similar_cosmul(positive=positive_list, negative=negative_list, topn=topn):
recommend_movie_ls.append(movieId)
return recommend_movie_ls
# -
ls = recommender(positive_list=["UP"], useRefineSearch=True, topn=5)
print('Recommendation Result based on "Up (2009)":')
display(df_movies[df_movies['movieId'].isin(ls)])
ls = recommender(positive_list=["The Matrix", "Django Unchained"], useRefineSearch=True, topn=7)
print('Recommendation Result based on "The Matrix (1999)" + ""Django Unchained (2012)":')
display(df_movies[df_movies['movieId'].isin(ls)])
| Collaborative_Filtering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Predicting floods with weather forecast data
#
# More rain, more water and ultimately more floods. This seems straightforward, but how does it really work? What are the physical processes? What kind of models can you use to understand and predict floods? What data do you need? This workshop will address these questions using Jupyter notebooks, Python code examples and real world data. With timeseries of historical weather and river flow data we will explore the data and build several models to get an understanding of floods across the UK.
#
# ## Explore data
import pandas as pd
# Data from https://nrfa.ceh.ac.uk/data/search
# +
rain = pd.read_csv('data/3001_cdr.csv',skiprows=16)
rain.columns = ['date','rainfall','x']
rain = rain.drop('x', axis=1)
flow = pd.read_csv('data/3001_gdf.csv',skiprows=16)
flow.columns = ['date','flow','x']
flow = flow.drop('x', axis=1)
# -
rain.head()
flow.head()
| .ipynb_checkpoints/rainfall-runoff-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %matplotlib inline
import pylab
# import tensorflow after matplotlib always to avoid errors
import tensorflow as tf
import numpy as np
# -
hello = tf.constant('Hello World!')
sess = tf.Session()
print(sess.run(hello))
# ** Linear Regression **
#
#
# +
# Creating random input data using NumPy. y = x * 0.1 + 0.3 + noise
x_data = np.random.rand(100).astype(np.float32)
noise = np.random.normal(scale=0.01, size=len(x_data))
y_data = x_data * 0.1 + 0.3 + noise
# Plotting the input data.
pylab.plot(x_data, y_data, '.')
# +
# Building inference graph.
# Create Variables W and b that compute y_data = W * x_data + b
W = tf.Variable(tf.random_uniform([1], 0.0, 1.0),name='lr_weights')
b = tf.Variable(tf.zeros([1]),name="lr_bias")
y = W * x_data + b
# Uncomment the following lines to see W and b are.
print(W.name)
print(b)
# +
# Building training graph.
loss = tf.reduce_mean(tf.square(y - y_data)) # Create an operation that calculates loss.
optimizer = tf.train.GradientDescentOptimizer(0.5) # Create an optimizer.
train = optimizer.minimize(loss) # Create an operation that minimizes loss.
init = tf.initialize_all_variables() # Create an operation initializes all the variables.
# Uncomment the following 3 lines to see what 'loss', 'optimizer' and 'train' are.
print("loss:", loss)
print("optimizer:", optimizer)
print("train:", train)
print(init)
# -
print(tf.get_default_graph().as_graph_def())
# +
# Creating a session and launching the graph.
sess = tf.Session()
sess.run(init)
y_initial_values = sess.run(y) # Save initial values for plotting later.
# Uncomment the following line to see the initial W and b values.
print(sess.run([W, b]))
# +
# Perform training.
for step in range(201):
sess.run(train)
# Uncomment the following two lines to watch training happen real time.
# if step % 20 == 0:
# print(step, sess.run([W, b]))
print(sess.run([W, b]))
# -
# Uncomment the following lines to compare.
pylab.plot(x_data, y_data, '.', label="target_values")
pylab.plot(x_data, y_initial_values, ".", label="initial_values")
pylab.plot(x_data, sess.run(y), ".", label="trained_values")
pylab.legend()
pylab.ylim(0, 1.0)
| Learning_TensorFLow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="images/usm.jpg" width="480" height="240" align="left"/>
# # MAT281 - Laboratorio N°02
#
# ## Objetivos de la clase
#
# * Reforzar los conceptos básicos de numpy.
# ## Contenidos
#
# * [Problema 01](#p1)
# * [Problema 02](#p2)
# * [Problema 03](#p3)
# <a id='p1'></a>
#
# ## Problema 01
#
# Una **media móvil simple** (SMA) es el promedio de los últimos $k$ datos anteriores, es decir, sea $a_1$,$a_2$,...,$a_n$ un arreglo $n$-dimensional, entonces la SMA se define por:
#
# $$sma(k) =\dfrac{1}{k}(a_{n}+a_{n-1}+...+a_{n-(k-1)}) = \dfrac{1}{k}\sum_{i=0}^{k-1}a_{n-i} $$
#
#
# Por otro lado podemos definir el SMA con una venta móvil de $n$ si el resultado nos retorna la el promedio ponderado avanzando de la siguiente forma:
#
# * $a = [1,2,3,4,5]$, la SMA con una ventana de $n=2$ sería:
#
#
# * sma(2): [mean(1,2),mean(2,3),mean(3,4)] = [1.5, 2.5, 3.5, 4.5]
# * sma(3): [mean(1,2,3),mean(2,3,4),mean(3,4,5)] = [2.,3.,4.]
#
#
# Implemente una función llamada `sma` cuyo input sea un arreglo unidimensional $a$ y un entero $n$, y cuyo ouput retorne el valor de la media móvil simple sobre el arreglo de la siguiente forma:
#
# * **Ejemplo**: *sma([5,3,8,10,2,1,5,1,0,2], 2)* = $[4. , 5.5, 9. , 6. , 1.5, 3. , 3. , 0.5, 1. ]$
#
# En este caso, se esta calculando el SMA para un arreglo con una ventana de $n=2$.
#
# **Hint**: utilice la función `numpy.cumsum`
# +
import numpy as np
def sma(a:np.array,window_len:int)->np.array:
"""sma(a,n)
Entrega una lista la cual contiene la media movil simple de los window_len datos anteriores
Parametros:
a:np.array
Vector que contiene los elementos que se van a promediar
n:int
Ventana del promedio
Returns
np.array
Vector que contiene los promedios"""
sma=np.zeros(len(a)-1) #crea un arreglo de largo igual al largo del arreglo a menos 1
for i in range(window_len,len(a)): #se itera desde n hasta el largo del arreglo menos 1
if i==window_len: #si es la primera iteracion
sma[0]=np.cumsum(a)[i-1]/window_len #entonces se promedia usualmente
sma[i-window_len+1]=(np.cumsum(a)[i]-np.cumsum(a)[i-window_len])/window_len #se promedian los ultimos n numeros
return sma #se retorna la lista con los promedios
# -
# ejemplo 01
a = [1,2,3,4,5]
sma(a,2)
# ejemplo 02
a = [5,3,8,10,2,1,5,1,0,2]
sma(a,2)
# <a id='p2'></a>
#
# ## Problema 02
#
# La función **strides($a,n,p$)**, corresponde a transformar un arreglo unidimensional $a$ en una matriz de $n$ columnas, en el cual las filas se van construyendo desfasando la posición del arreglo en $p$ pasos hacia adelante.
#
# * Para el arreglo unidimensional $a$ = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], la función strides($a,4,2$), corresponde a crear una matriz de $4$ columnas, cuyos desfaces hacia adelante se hacen de dos en dos.
#
# El resultado tendría que ser algo así:$$\begin{pmatrix}
# 1& 2 &3 &4 \\
# 3& 4&5&6 \\
# 5& 6 &7 &8 \\
# 7& 8 &9 &10 \\
# \end{pmatrix}$$
#
#
# Implemente una función llamada `strides(a,4,2)` cuyo input sea un arreglo unidimensional y retorne la matriz de $4$ columnas, cuyos desfaces hacia adelante se hacen de dos en dos.
#
# * **Ejemplo**: *strides($a$,4,2)* =$\begin{pmatrix}
# 1& 2 &3 &4 \\
# 3& 4&5&6 \\
# 5& 6 &7 &8 \\
# 7& 8 &9 &10 \\
# \end{pmatrix}$
#
import numpy as np
def strides(a:np.array,n:int,p:int)->np.array:
"""strides(a,n,p)
Entrega una matriz con los valores del arreglo desfasados en p terminos por p^2 filas
Parametros:
a:np.array
Vector que se ocupara para construir la matriz
n:int
Numero de columnas que tendra la matriz
p:int
Natural que indica el desfase que tengra el Arreglo a en la matriz
Returns
np.array
Matriz que tendra los valores del arreglo desfasados en p terminos"""
if p>=n:
return "El valor de p debe ser menor que el numero de columas"
dimension_filas=p**2
strides=np.array(np.zeros((dimension_filas,n))) #se define la matriz con las dimensiones necesarias
cont=0 #se define un contador
for i in range(0,dimension_filas): #se itera sobre la dimension de las filas
for j in range(0,n): #se itera sobre la dimension de las columnas
if i==0: #si es la primera fila
strides[0,j]=a[j] #entonces se deja igual el arreglo
cont+=1 #se incrementa el contador
elif (cont-i*p)>=(len(a)): #si el indice se pasa del largo del arreglo
strides[i,j]=0 #escribir 0
cont+=1
return strides #se retorna la matriz
else: #sino
strides[i,j]=a[cont-i*p] #se agrega el arreglo con un desfase de p terminos
cont+=1 #se incrementa el contador
return strides
# ejemplo 01
a = np.array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
strides(a,4,2)
# <a id='p3'></a>
#
# ## Problema 03
#
#
# Un **cuadrado mágico** es una matriz de tamaño $n \times n$ de números enteros positivos tal que
# la suma de los números por columnas, filas y diagonales principales sea la misma. Usualmente, los números empleados para rellenar las casillas son consecutivos, de 1 a $n^2$, siendo $n$ el número de columnas y filas del cuadrado mágico.
#
# Si los números son consecutivos de 1 a $n^2$, la suma de los números por columnas, filas y diagonales principales
# es igual a : $$M_{n} = \dfrac{n(n^2+1)}{2}$$
# Por ejemplo,
#
# * $A= \begin{pmatrix}
# 4& 9 &2 \\
# 3& 5&7 \\
# 8& 1 &6
# \end{pmatrix}$,
# es un cuadrado mágico.
#
# * $B= \begin{pmatrix}
# 4& 2 &9 \\
# 3& 5&7 \\
# 8& 1 &6
# \end{pmatrix}$, no es un cuadrado mágico.
#
# Implemente una función llamada `es_cudrado_magico` cuyo input sea una matriz cuadrada de tamaño $n$ con números consecutivos de $1$ a $n^2$ y cuyo ouput retorne *True* si es un cuadrado mágico o 'False', en caso contrario
#
# * **Ejemplo**: *es_cudrado_magico($A$)* = True, *es_cudrado_magico($B$)* = False
#
# **Hint**: Cree una función que valide la mariz es cuadrada y que sus números son consecutivos del 1 a $n^2$.
import numpy as np
def es_cuadrada_numeros_consecutivos(A:np.array)->bool:
"""es_cuadrada_numeros_consecutivos
Verifica si la matriz es cuadrada y tambien verifica si los numeros que contiene son consecutivos
Parametros:
A:np.array
Matriz a verificar
Returns
bool
True si cumple, False si no"""
dim_filas=len(A[:,1]) #se define la dimension de las filas
dim_columnas=len(A[1,:]) #se define la dimension de las columnas
if dim_filas==dim_columnas: #si es cuadrada
dim=len(A) #se define la dimension de la matriz
lista_numeros_consecutivos_A=[] #se crea una lista vacia que tendra los numeros consecutivos
for k in range(1,dim**2+1): #se itera sobre la dimension al cuadrado de la matriz
for i in range(0,dim): #se itera sobre la dimension de las filas
for j in range(0,dim): #se itera sobre la dimension de las columnas
if A[i,j]==k: #si algun elemento de la matriz es el numero k
lista_numeros_consecutivos_A.append(A[i,j]) #entonces se agrega a la lista de numeros consecutivos
if len(lista_numeros_consecutivos_A)==dim**2: #si la lista tiene todos los numeros consecutivos
return True #retorna True
else:
return False #sino False
else: #sino es cuadrada retorna False
return False
def es_cuadrado_magico(A:np.array)->bool:
"""es_cuadrado_magico(A)
Verifica si la matriz A es cuadrado magico
Parametros:
A:np.array
Matriz a verificar
Returns
bool
True si es cuadrado magico, False si no"""
if es_cuadrada_numeros_consecutivos(A)==True: #pregunta si la matriz es cuadrada y tiene numeros consecutivos
suma_filas=[] #se crea una lista vacia que tendra la suma de las filas
suma_columnas=[] #se crea una lista vacia que tendra la suma de las columnas
suma_diagonales=[] #se crea una lista vacia que tendra la suma de las diagonales
elementos_diagonal_1=[] #se crea una lista vacia que tendra elementos de la diagonal 1
elementos_diagonal_2=[] #se crea una lista vacia que tendra elementos de la diagonal 2
dim=len(A) #es la dimension de la matriz
for j in range(0,dim): #se itera sobre las filas
suma_filas.append(sum(A[:,j])) #se agrega la suma de la iesima fila
for i in range(0,dim): #se itera sobre las columnas
suma_columnas.append(sum(A[i,:])) #se agrega la suma de la jesima columna
for m in range(0,dim): #se itera sobre la diagonal
elementos_diagonal_1.append(A[m,m]) #se agregan los elementos de la diagonal 1
elementos_diagonal_2.append(A[m,dim-1-m]) #se agregan los elementos de la diagonal 2
suma_diagonales.append(sum(elementos_diagonal_1)) #se agrega la suma de los elementos de la diagonal 1
suma_diagonales.append(sum(elementos_diagonal_2)) #se agrega la suma de los elementos de la diagonal 2
if suma_filas==suma_columnas: #si las listas de las sumas de las filas y columnas son iguales
if suma_diagonales[0]==suma_diagonales[1]: #si la suma de ambas diagonales es igual
if suma_filas[0]==suma_diagonales[0]: #si la suma de las filas y columnas es igual a la de las diagonales
return True #retorna True
else:
return False #sino False
else:
return False
# ejemplo 01
A=np.array([[4,9,2],[3,5,7],[8,1,6]])
es_cuadrado_magico(A)
#ejemplo 02
B = np.array([[4,2,9],[3,5,7],[8,1,6]])
es_cuadrado_magico(B)
| labs/C1_data_analysis/02_numpy/laboratorio_02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sentiment Analysis with Yelp Reviews
#
# This dataset comes from the Yelp Dataset Challenge for NLP and sentiment analysis. I chose to create a classifier trained using the ACL IMDB movie review dataset for it's high quality, large amount of training examples and it's similar to what a user may write for a Yelp review.
#
# Training data: Large Movie Review Dataset http://ai.stanford.edu/~amaas/data/sentiment/
# +
import pandas as pd
import numpy as np
import mglearn
import json
from collections import Counter
from sklearn.datasets import load_files
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.grid_search import GridSearchCV
from spacy.en import STOP_WORDS
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ### Training a Classifier using the Large Movie Review Data
#
# For now I'll use a logistic regression model since the performance seems good enough based on the grid search results. If we are looking for better performance it might be good to test other classfiers.
# +
# Training data loading
reviews_train = load_files('~/data/aclImdb/train/')
text_train, y_train = reviews_train.data, reviews_train.target
reviews_test = load_files('~/data/aclImdb/test/')
text_test, y_test = reviews_test.data, reviews_test.target
# Clean some of the HTML
text_train = [doc.replace(b'<br />', b" ") for doc in text_train]
text_test = [doc.replace(b'<br />', b" ") for doc in text_test]
# Bag of words would use the countvectorizer method
# vect = CountVectorizer().fit(text_train)
# X_train = vect.transform(text_train)
# Sample of training
# model = LogisticRegression()
# model.fit(X_train, y_train)
# Tf-idf and grid search for runing the logistic regression model
pipeline = make_pipeline(TfidfVectorizer(min_df=5, norm=None), LogisticRegression())
parameters = {'logisticregression__C': [.001, .01, .1, 1, 10]} # grid search
grid = GridSearchCV(pipeline, parameters, cv=5, n_jobs=-1)
grid.fit(text_train, y_train)
print('Top CV score: %s' % grid.best_score_)
# -
# ### Yelp Dataset
#
# Our goal here is to leverage the model trained from movie reviews to identify the sentiment of the Yelp reviews. The data provided by Yelp has these 9 features: business_id, cool, date, funny, review_id, stars, text and useful. We will assign the text reviews from the text feature.
#
# After identifying the predicted sentiment labels I spot-checked a few examples and they were correctly labeled. Since the Yelp dataset doesn't contain labels we can't verify precisely how well the model classified the reviews. After looking over the results it's interesting to see the positive reviews outweigh the negative from the sample. I wasn't expecting this as users are usually more driven to spend the time to write reviews if they experienced issues rather than had great experiences. I feel that sentiment analysis can have a great affect on quantifying data like user reviews. It has a lot of potential to better understand customers, businesses and products as well as influence other systems with the information it finds.
# +
# Parsing the json file provided by Yelp
# Thank you - https://github.com/karenxiao/pandas-express
l = []
with open('../data/yelp-dataset/review.json', 'r') as f:
for line in f:
l.append(json.loads(line.rstrip()))
yelp_df = pd.DataFrame.from_dict(l)
reviews = yelp_df['text']
print(yelp_df.head(5))
# -
# ### Predictions
# #### Comparing Samples
#
# To spotcheck how well the model did we can read a couple of passages where the we have a sample from positive and negative reviews. The first is a good example of a postive review. The review contains strong words that might indicate a positive passage like "loved", "amazing", "incredible" and "excellent". There was a few negative sections within the review but this should be positive after reading. The second passage is an example of a negative review. This contains potentially indicating words like "no", "attitude", "awful".
# +
collection = [reviews[0], reviews[1]]
p = grid.predict(collection)
for d, pred in zip(collection, p):
print('%r \n>>> %s' % (d, pred))
print()
# -
# #### Word Frequencies
#
# Another method that could shed additional insight could be word frequencies from common words found in sentiment analysis lexicons. Unfortunitely when I was spot-checking some samples, most were moderately short texts that didn't have many overlapping words aside from common stop words.
# +
test = [w for w in reviews[1].split() if w not in STOP_WORDS]
feature_names = np.array(top_vectorizer.get_feature_names())
print(Counter(test).most_common(10))
print()
for word in reviews[0].split():
if word in feature_names[sorted_by_tfidf[-20:]]:
print(word)
print('----')
for word in reviews[0].split():
if word in feature_names[sorted_by_tfidf[:20]]:
print(word)
# -
# ### Exploring the model
#
# We can use tfidf scaling to help find words that distinguish the sentiment. Some of the negative words were "worst", "waste", "awful" and "bad"; while some of the positive words were "great", "excellent", "perfect" and "best".
# +
top_vectorizer = grid.best_estimator_.named_steps['tfidfvectorizer']
# yelp data
x_train = top_vectorizer.transform(yelp_df['text'])
# max value for each feature
max_v = x_train.max(axis=0).toarray().ravel()
sorted_by_tfidf = max_v.argsort()
feature_names = np.array(top_vectorizer.get_feature_names())
print('Lowest tfidf features: %s' % feature_names[sorted_by_tfidf[:10]])
print('Highest tfidf features: %s' % feature_names[sorted_by_tfidf[-10:]])
# Visualization using the mglearn package
mglearn.tools.visualize_coefficients(grid.best_estimator_.named_steps['logisticregression'].coef_,
feature_names, n_top_features=20)
# -
# Predict sentiment for all yelp reviews, add sentiment feature
yelp_df['sentiment'] = grid.predict(yelp_df['text'])
# +
# Plot distribution of predicted review sentiment
yelp_df['sentiment'].value_counts().plot(kind='bar',
title='Sentiment Analysis for Yelp Reviews')
print('1 = POSITIVE\n0 = NEGATIVE')
# -
# ### Resources
#
# http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
#
# Intro to Machine Learning with Python
#
# https://github.com/karenxiao/pandas-express
#
# https://www.yelp.com/dataset/
#
#
| notebooks/Sentiment Analysis with Yelp Reviews.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Overview
# The main point of this script which is dependent on `resynthesize.py` and `signal_processing.py` is to resynthesize a a tone with quantifiable timbral features based on the harmonic distribution of an imported sound wav.
#
# © <i><NAME> 2021</i>
# +
#Formatting (to center plots):
from IPython.display import display, HTML
CSS = """
.output {
align-items: center;
}
"""
HTML('<style>{}</style>'.format(CSS))
# -
# # Importing Relevant Functions
from resynthesize import resynthesize, extract_harmonics, play_alma_mater
from signal_processing import pure_tone_complex, sound, magphase, get_spect
import matplotlib.pyplot as plt
from scipy.signal import spectrogram as sp
from scipy.io import wavfile
import numpy as np
# # Generating a Simulated Tone
# +
harmonics = 7;
first = 0;
dur_sec = 1;
toPlay = np.array(
[0,1,2,3,4,5,6])
fname = 'instruments/banjo_A4_normal.wav'
fs, x = wavfile.read(fname)
extract = extract_harmonics(fname, fs = 44100, f_0 = 440, n_harms = harmonics);
fs_Hz = extract[4];
amp = extract[1][toPlay];
phase = extract[2][toPlay];
freq_Hz = extract[0][toPlay];
t_vect = np.arange(0,dur_sec*fs_Hz)/fs_Hz;
env_banj = np.exp(-5*t_vect);
env_string = (1+0.15*np.sin(6*np.pi*2*t_vect))*np.sin(.5*np.pi*2*t_vect);
env_bassoon = (1+0.2*np.sin(5*np.pi*2*t_vect))*np.sin(.5*np.pi*2*t_vect);
env_flute = (1+0.50*np.sin(5*np.pi*2*t_vect))*np.sin(.5*np.pi*2*t_vect);
tone = resynthesize(amp, 'sound.wav', fs_Hz = 44100,freq_Hz = freq_Hz, dur_sec = 1, phi = phase,
scale = 1, tone_shift = 1, env_fxn = env_banj, type = 'saw', play_write = True, plot = False)
#play_alma_mater(extract,freq_Hz, fxn = 'string', type = 'sin', short = False);
# -
# # Spectrogram of Output
get_spect(tone, fs_Hz, DR = 300, BW = 60, xlim = [0,1], ylim = [0,4000],
colormap = 'cividis',title = 'Simulated Banjo | All Harmonics');
# +
plt.figure()
plt.plot(t_vect, tone-0.2, label = 'Resynthesized')
plt.xlim(0.3,0.32)
#original signal for sanity check
t_vect_orig = np.arange(0,len(x))/fs
plt.plot(t_vect_orig+0.01, x/max(x)+0.2,label = 'Original')
plt.xlim(0.3,0.32)
plt.ylim(-.6,0.6)
plt.legend();
plt.title('Comparison Between Resynthesized and Original Stimuli');
plt.xlabel('Time (s)')
plt.ylabel('Scaled Amplitude')
# +
fig, ax = plt.subplots(figsize=[5,4]);
plt.plot(t_vect+0.25, tone, color = 'k', label = 'Resynthesized')
#original signal for sanity check
t_vect_orig = np.arange(0,len(x))/fs
plt.plot(t_vect_orig, x/max(x),color = "#9A0EEA",label = 'Original')
plt.ylim(-.6,0.6)
plt.legend();
plt.title('Comparison Between Resynthesized and Original Stimuli');
plt.xlabel('Time (s)')
plt.ylabel('Scaled Amplitude')
axins = ax.inset_axes([0.55, 0.05, 0.4, 0.4])
axins.plot(t_vect, tone+0.2, color = "k")
axins.plot(t_vect_orig+0.01, x/max(x)-0.2, color = "#9A0EEA")
x1, x2, y1, y2 = 0.3, 0.31, -.5, .5
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
axins.set_xticklabels('')
axins.set_yticklabels('')
ax.indicate_inset_zoom(axins, edgecolor="black")
plt.savefig('comparison.png', facecolor="white", dpi=100, transparent=False, bbox_inches = "tight", pad_inches = 0.05);
plt.draw()
plt.show()
| stimulus_generation_resynthesized/.ipynb_checkpoints/timbre_demo-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Basic imports
# +
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import copy
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
print("Python: %s" % sys.version)
print("Pytorch: %s" % torch.__version__)
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#audioviz
import librosa as libr
import librosa.display as display
import IPython.display
import pandas as pd
# -
# ## Hyperparameters
n_seconds = 3
n_epochs = 50
sampling_rate = 16000
number_of_mels =128
all_data = ['train-clean-360']
lr = 0.001
# ## Speech preprocessing
# Buidling tensorToMFCC transformation for learning
class tensorToMFCC:
def __call__(self, y):
# y = y.numpy()
dims = y.shape
y = libr.feature.melspectrogram(np.reshape(y, (dims[1],)), 16000, n_mels=number_of_mels,
fmax=8000)
y = libr.feature.mfcc(S = libr.power_to_db(y))
y = torch.from_numpy(y)
return y.float()
transform = tensorToMFCC()
# ## LibriSpeechDataSet
# Load personalized data set, inspred by this [repository](https://github.com/oscarknagg/voicemap/tree/pytorch-python-3.6)
# %load_ext autoreload
# %autoreload 2
sys.path.insert(0, './../../Utils')
from datasets import LibriSpeechDataset
from datasets import Libri_preload_and_split
# +
path = 'data/'
splits = [0.8, 0.2] #input fraction of data you want partitioned
attacking = False
if sum(splits) != 1:
print('error: splits do not sum to 1.')
#Splits data into above defined train:test splits
dfs = Libri_preload_and_split(path,all_data,n_seconds,pad=False,cache=True,splits=splits, attacking = attacking)
#target train & test
valid_sequence_train = LibriSpeechDataset(path, df = dfs[0], seconds = n_seconds, downsampling=1,
transform = transform, stochastic=False)
valid_sequence_test = LibriSpeechDataset(path, df = dfs[1], seconds = n_seconds, downsampling=1,
transform = transform, stochastic=False)
# +
# Loaders for data for baseline model
train_loader = DataLoader(valid_sequence_train,
batch_size=32,
shuffle=True,
num_workers=8
# pin_memory=True # CUDA only
)
test_loader = DataLoader(valid_sequence_test,
batch_size=32,
shuffle=True,
num_workers=8
# pin_memory=True # CUDA only
)
# -
recording, speaker = iter(train_loader).next()
print(recording.shape)
print(valid_sequence_train.num_speakers)
# ## Cyphercat utilities
sys.path.insert(0,'../../Utils/')
from train import *
from metrics import *
import models
from data_downloaders import *
# ## Models
# +
class ConvBlock(nn.Module):
def __init__(self, n_input, n_out, kernel_size):
super(ConvBlock, self).__init__()
self.cnn_block = nn.Sequential(
nn.Conv1d(n_input, n_out, kernel_size, padding=1),
nn.BatchNorm1d(n_out),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=4)
)
def forward(self, x):
return self.cnn_block(x)
class CNN_classifier(nn.Module):
def __init__(self, in_size, n_hidden, n_classes):
super(CNN_classifier, self).__init__()
self.down_path = nn.ModuleList()
self.down_path.append(ConvBlock(in_size, 2*in_size, 3))
self.down_path.append(ConvBlock(2*in_size, 4*in_size, 3))
self.down_path.append(ConvBlock(4*in_size, 8*in_size, 3))
self.fc = nn.Sequential(
nn.Linear(8*in_size, n_hidden),
nn.ReLU()
)
self.out = nn.Linear(n_hidden, n_classes)
def forward(self, x):
for down in self.down_path:
x = down(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return self.out(x)
# -
test = ConvBlock(20, 40, 3)
aa = test(recording)
print(aa.shape)
valid_sequence_test.num_speakers
classifier = CNN_classifier(20, 512, valid_sequence_test.num_speakers)
# classifier.apply(models.weights_init)
classifier.to(device)
test = classifier(recording.to(device))
print(test.shape)
optimizer = optim.Adam(classifier.parameters(), lr)
criterion = nn.CrossEntropyLoss()
train(classifier, train_loader, test_loader, optimizer, criterion, 50, verbose = False)
# ## Results
# ### Set-up
# - Audio fetures MFCC
# - 5 eposh training
# - 3 second recordings
# - Adam optimizer
# - lr = 0.001
# ### Performance
# - 95.71 accuracu traiing
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
epoch = 39
save_checkpoint({
'epoch': epoch,
'arch': 'CNN_voice_classifier',
'state_dict': classifier.state_dict(),
'optimizer' : optimizer.state_dict(),
}, False, filename = 'model_weights/CNN_voice_classifier360all_'+str(epoch)+'.pth')
| Classification_baselines/LibriSpeech/CNN_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Prepare validation data
# Load sales for the period: 2017, January - March (PE, TRG)
# +
import pandas as pd
import numpy as np
sales2017 = pd.DataFrame.from_csv('PE-TRG-Jan-Mar-2017.csv').reset_index()
#remove duplications by summing up quantity by days
sales2017 = pd.pivot_table(sales2017, values='Quantity', index=['Locationid','PLU','Year','Month','Day'], aggfunc=np.sum).reset_index()
# -
# Keep only March data for validation
sales_2017_march = sales2017[(sales2017['Year']==2017)&(sales2017['Month']==3)]
# ## Raw data properties
# +
positions = sales_2017_march.groupby(['Locationid','PLU']).size().rename('counts').reset_index()
positions_count = len(positions.index)
locations_count = len(positions.drop_duplicates(['Locationid']).index)
products_count = len(positions.drop_duplicates(['PLU']).index)
def setCountGroup(row):
if row['counts'] < 10:
return "1. less than 10"
elif row['counts'] <= 20:
return "2. 20 or less"
elif row['counts'] <= 30:
return "3. 30 or less"
else:
return "4. more than 30"
counts_groups = positions
counts_groups['segment'] = counts_groups.apply(setCountGroup, axis=1)
counts_groups = counts_groups.groupby('segment').size().rename('counts').reset_index()
counts_groups['%'] = counts_groups.apply(lambda r: r['counts']/positions_count*100, axis=1)
rows_in_march = len(sales_2017_march.index)
rows_with_negative = len(sales_2017_march[sales_2017_march['Quantity'] < 0].index)
rows_with_zero = len(sales_2017_march[sales_2017_march['Quantity'] == 0].index)
rows_with_positive = len(sales_2017_march[sales_2017_march['Quantity'] > 0].index)
# ==== output results ====
print("Total unique positions (Location-Recipe) %d" % (positions_count))
print("Total locations %d" % (locations_count))
print("Total recipes %d" % (products_count))
print("Data rows %d" % (rows_in_march))
print("Negative data %.2f%% (%d)" % (rows_with_negative/rows_in_march*100, rows_with_negative))
print("Zero data %.2f%% (%d)" % (rows_with_zero/rows_in_march*100, rows_with_zero))
print("Positive data %.2f%% (%d)" % (rows_with_positive/rows_in_march*100, rows_with_positive))
counts_groups
# -
# ## Save to CSV
sales_2017_march.to_csv('sales-2017-march.csv')
| prepare-data-for-validation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="BJsrTjueHRPH" colab_type="text"
# # Introduction to Descriptive and Predictive Models
#
# This module introduces the core ideas of *machine learning*, which is subdivided into **unsupervised** and **supervised** machine learning.
#
# We break it into three segments:
#
# 1. Representing data for building machine learning models.
# 2. Unsupervised machine learning.
# 3. Supervised machine learning.
# -
# !pip install pandas
# !pip install numpy
# !pip install matplotlib
# !pip install sklearn
# + [markdown] id="pYvn19vEHuie" colab_type="text"
#
# ## 1. Representing Data for Building Machine Learning Models
#
# To this point, we've used **dataframes** to capture data. Dataframes allow for many rows, each containing named fields with heterogeneous types (string, Python object, image, etc.). Of course, we assume that each field generally has a value (recall how we did *data cleaning* for cases where values were missing).
#
# For machine learning, we'll typically want to use **arrays** (matrices) to represent data. Recall that arrays have **homogeneous** types, i.e., all values are of the same type. Typically we will use either integer or floating-point values for the array.
#
# We expect you to be generally familiar with Numpy arrays, but we will provide a brief refresher of a few key points...
#
# Given data in a dataframe, we will typically convert it into a matrix as follows:
#
# 1. We will convert each dataframe row (*instance*) to a matrix row.
# 1. We will drop columns that contain data we don't expect to be useful as **features** (i.e., that isn't predictive for the machine learning output).
# 1. We will convert the remaining columns to one or more columns in the matrix.
#
# Often, step #3 involves **feature extraction** (perhaps extracting a particular piece of data out of a dataframe field), normalization, or encoding.
# + [markdown] id="ekTh5PldPMjb" colab_type="text"
# ## 1.1 Quick Review of Numpy Arrays
# + id="zUGPfIOFHOVx" colab_type="code" outputId="71d7c58b-5c41-46fd-9b71-806205ec02a0" executionInfo={"status": "ok", "timestamp": 1579366692543, "user_tz": 300, "elapsed": 1677, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mChyQQ34E9ZYcOXkXp9pxUqwR9de31_8B86KVtR3Jc=s64", "userId": "03300870303029638058"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# Basics of arrays: Numpy
import numpy as np
# We start with a simple array, initialized with random values
arr = np.ndarray((4,2))
# Show dimensions
arr.shape
# + id="ULQ4D9xIE2Cf" colab_type="code" outputId="49d9a3e9-07a4-4ab3-c07e-cafa47fb79a3" executionInfo={"status": "ok", "timestamp": 1579366693907, "user_tz": 300, "elapsed": 3023, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mChyQQ34E9ZYcOXkXp9pxUqwR9de31_8B86KVtR3Jc=s64", "userId": "03300870303029638058"}} colab={"base_uri": "https://localhost:8080/", "height": 85}
arr
# + id="5a5R0u--Enuf" colab_type="code" outputId="bba52787-82c2-45e7-f8ca-bf974d0ad069" executionInfo={"status": "ok", "timestamp": 1579366693909, "user_tz": 300, "elapsed": 3007, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mChyQQ34E9ZYcOXkXp9pxUqwR9de31_8B86KVtR3Jc=s64", "userId": "03300870303029638058"}} colab={"base_uri": "https://localhost:8080/", "height": 85}
# We can also initialize with zeros or ones...
arr2 = np.zeros((4,2))
arr2
# + [markdown] id="BTHhjco-PJen" colab_type="text"
# ## 1.2 Encoding Categorical Data
#
# While arrays are integer-valued, sometimes we need to start with DataFrame data, which may be categorical. How do we go from that to numbers?
# + id="s5PC-HWhPyI-" colab_type="code" outputId="e9754959-d87d-49ed-f3c8-a37be1e710fe" executionInfo={"status": "ok", "timestamp": 1579366693911, "user_tz": 300, "elapsed": 2996, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mChyQQ34E9ZYcOXkXp9pxUqwR9de31_8B86KVtR3Jc=s64", "userId": "03300870303029638058"}} colab={"base_uri": "https://localhost:8080/", "height": 260}
import pandas as pd
addresses_df = pd.DataFrame([{'city': 'New York', 'state': 'NY'},\
{'city': 'Los Angeles', 'state': 'CA'},\
{'city': 'Chicago', 'state': 'IL'},\
{'city': 'Houston', 'state': 'TX'},\
{'city': 'Phoenix', 'state': 'AZ'}, \
{'city': 'Philadelphia', 'state': 'PA'}, \
{'city': 'San Antonio', 'state': 'TX'}])
addresses_df
# + [markdown] id="mkYOuDGCP7aA" colab_type="text"
# Suppose we want to turn this into machine learning features! Let's focus on `state`. This is categorical and string-valued, so we may need to *one-hot encode* it. Each unique string will receive its own column, and the column will be set to `0` if the value isn't present, or `1` if it is. Only one column will be set for each row, hence "one-hot".
#
# Pandas' `get_dummies` function will take a DataFrame or Series, and produce a one-hot-encoded DataFrame useful for machine learning.
# + id="0rJAXwoSQIYj" colab_type="code" outputId="b022deef-fe98-40d0-f7a4-2a5cc05db756" executionInfo={"status": "ok", "timestamp": 1579366693912, "user_tz": 300, "elapsed": 2983, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mChyQQ34E9ZYcOXkXp9pxUqwR9de31_8B86KVtR3Jc=s64", "userId": "03300870303029638058"}} colab={"base_uri": "https://localhost:8080/", "height": 260}
pd.get_dummies(addresses_df['state'])
# + id="AM1SRoLTSaKy" colab_type="code" outputId="8cde0e91-2ff6-4a3d-c7c1-1ed97a0cce7c" executionInfo={"status": "ok", "timestamp": 1579366693913, "user_tz": 300, "elapsed": 2970, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mChyQQ34E9ZYcOXkXp9pxUqwR9de31_8B86KVtR3Jc=s64", "userId": "03300870303029638058"}} colab={"base_uri": "https://localhost:8080/", "height": 136}
# And if we really want an array...
pd.get_dummies(addresses_df['state']).to_numpy()
# + id="pOLiJOBNdsjC" colab_type="code" outputId="d87bd4bc-9c0e-4411-95bc-6ee64e0ee597" executionInfo={"status": "ok", "timestamp": 1579366693914, "user_tz": 300, "elapsed": 2940, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mChyQQ34E9ZYcOXkXp9pxUqwR9de31_8B86KVtR3Jc=s64", "userId": "03300870303029638058"}} colab={"base_uri": "https://localhost:8080/", "height": 136}
# A more realistic use case of slicing... We often start with training
# data that has *labels*, i.e., in- or out-of-class
# Suppose we wanted addresses in states that have the letter 'A' in them...
addresses_df['has_a'] = addresses_df['state'].apply(lambda x: 'A' in x)
addresses_df
data = pd.get_dummies(addresses_df['state'])
data['class'] = addresses_df['has_a']
# This is what the training set might look like
data = data.to_numpy()
data
# + id="V_X8UeXOeoZX" colab_type="code" outputId="6e79365a-81fa-43cc-d5b6-d294826de24d" executionInfo={"status": "ok", "timestamp": 1579366693915, "user_tz": 300, "elapsed": 2927, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mChyQQ34E9ZYcOXkXp9pxUqwR9de31_8B86KVtR3Jc=s64", "userId": "03300870303029638058"}} colab={"base_uri": "https://localhost:8080/", "height": 187}
# All rows, last column
y = data[:,-1]
# All rows, all but last column
X = data[:,0:-1]
print('Labels:')
print(y)
print('Training data:')
print(X)
# + [markdown] id="UueRwtL7R6Pt" colab_type="text"
# ## 1.3 Slicing Parts of an Array
# + id="0YojU_JGExhr" colab_type="code" outputId="2f386b6b-5f04-4587-a6d1-2e77a440549d" executionInfo={"status": "ok", "timestamp": 1579366693916, "user_tz": 300, "elapsed": 2909, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mChyQQ34E9ZYcOXkXp9pxUqwR9de31_8B86KVtR3Jc=s64", "userId": "03300870303029638058"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# Slicing gives a subset of an array
# In each dimension you can give a left:right
# range. Ranges are left-inclusive and right exclusive
arr[1:3,1]
# + id="tvPh_MqeFRbH" colab_type="code" outputId="08a7ad32-aaa3-425d-bf4f-b64e4b0dda41" executionInfo={"status": "ok", "timestamp": 1579366693917, "user_tz": 300, "elapsed": 2896, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mChyQQ34E9ZYcOXkXp9pxUqwR9de31_8B86KVtR3Jc=s64", "userId": "03300870303029638058"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# Negative numbers cut from the end
arr[-2,1:3]
# + [markdown] id="bTO5Y-JDTggl" colab_type="text"
# # 2. Unsupervised Machine Learning
#
# ## 2.1 Dimensionality Reduction
#
# ### Example Data
#
# We'll start by using PCA, which is helpful in reducing dimensionality. Here we use a dataset on types of glass -- useful in criminology where we may need to identify a glass shard by its purpose. This dataset comes from the UC Irvine Machine Learning Repository; it was created by B. German in Britain's Home Office Forensic Science Service and donated by Dr. <NAME>ler of Diagnostic Products Corporation. You can read more about it at:
#
# https://archive.ics.uci.edu/ml/datasets/glass+identification
#
#
# + id="yo7YgJWHXDiX" colab_type="code" outputId="2724895a-901c-4dc4-fb59-13d7213b4d6a" executionInfo={"status": "ok", "timestamp": 1580669560119, "user_tz": 300, "elapsed": 359, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 415}
# Load into a dataframe, with the header in row 0
import pandas as pd
glass_df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data', header=None,
names=['ID','RefractiveIndex','Na','Mg','Al','Si','K','Ca','Ba','Fe','Type'])
glass_df
# + id="emfInXU-sNB2" colab_type="code" outputId="4ff34e7e-6aeb-42ac-c49f-a44b70f7726f" executionInfo={"status": "ok", "timestamp": 1580669561405, "user_tz": 300, "elapsed": 741, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 279}
import matplotlib
import matplotlib.pyplot as plt
fig1 = glass_df.plot.scatter(x='RefractiveIndex',y='Na')
# + id="oBHmrP7QXM2s" colab_type="code" outputId="18a27386-7c99-4e9f-b904-d0a50203cdc9" executionInfo={"status": "ok", "timestamp": 1580669561902, "user_tz": 300, "elapsed": 551, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 415}
# Remove the ID and the Type labels for the training data
X = glass_df.drop(['ID','Type'], axis=1)
# Labels
y = glass_df['Type']
# Let's see the data
X
# + [markdown] id="DmjNbBha_0BO" colab_type="text"
# ### 2.1.1 Principal Components Analysis (PCA)
#
# We will now use **PCA** to reduce the number of dimensions in the data.
# + [markdown] id="hYV4nyKqiL06" colab_type="text"
# Best practice for PCA is to scale the data...
# + id="4Ks0QAlqc0I8" colab_type="code" outputId="80b411f5-75b7-4672-a042-0dfd58968236" executionInfo={"status": "ok", "timestamp": 1580669564937, "user_tz": 300, "elapsed": 762, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 243}
from sklearn.preprocessing import StandardScaler
# Standardizing the features between 0 and 1
X = StandardScaler().fit_transform(X)
X
# + id="2MQ0-vE5uQP6" colab_type="code" outputId="159a2bc0-8754-4902-83e9-9094e35a846f" executionInfo={"status": "ok", "timestamp": 1580669567795, "user_tz": 300, "elapsed": 615, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 283}
# Re-plotting now with the mean at the center!
plt.scatter(X[:,0], X[:,1])
# + [markdown] id="XpEKRnjEYAIr" colab_type="text"
# ### Running PCA
#
# We start by creating an instance of the PCA class, and fit it to the data...
# + id="hc3jHCm_X3o1" colab_type="code" colab={}
from sklearn.decomposition import PCA
pca = PCA(n_components=9)
X2 = pca.fit_transform(X)
# + id="vkpPHzeDX_Jm" colab_type="code" outputId="fd2ddb57-1ce4-4a4f-f832-81235247fbdf" executionInfo={"status": "ok", "timestamp": 1580669569712, "user_tz": 300, "elapsed": 362, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 329}
# Let's see the components
pca.components_
# + id="yCUTaUOowxUP" colab_type="code" outputId="96e7bfe0-b0e4-4e25-c5ad-d0942651a6ce" executionInfo={"status": "ok", "timestamp": 1580669571230, "user_tz": 300, "elapsed": 344, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
X[:,0:2]
# + id="H0OMBOpCvM3k" colab_type="code" outputId="ad75dc4d-b30e-400c-80fa-5315a5835f5f" executionInfo={"status": "ok", "timestamp": 1580669572729, "user_tz": 300, "elapsed": 538, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 265}
# Visualization code based on
# https://stackoverflow.com/questions/18299523/basic-example-for-pca-with-matplotlib
import numpy as np
# Let's take our first two dimensions, as before
data = X[:, 0:2]
mu = data.mean(axis=0)
data = (data - mu)/data.std(axis=0)
eigenvectors, eigenvalues, V = np.linalg.svd(data.T, full_matrices=False)
projected_data = np.dot(data, eigenvectors)
sigma = projected_data.std(axis=0).mean()
fig, ax = plt.subplots()
ax.scatter(X[:,0], X[:,1])
for axis in eigenvectors:
start, end = mu, mu + sigma * axis
ax.annotate(
'', xy=end, xycoords='data',
xytext=start, textcoords='data',
arrowprops=dict(facecolor='red', width=2.0))
ax.set_aspect('equal')
plt.show()
# + id="h-Ycb38szfmI" colab_type="code" outputId="a5ee1152-250f-4aac-854c-e41bbcc7031c" executionInfo={"status": "ok", "timestamp": 1580669574058, "user_tz": 300, "elapsed": 570, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 283}
# Here is the transformed data along the first 2 components
plt.scatter(X2[:,0], X2[:,1])
# + [markdown] id="uZyMIkq3eFMU" colab_type="text"
# ### 2.1.2 How Many Components? Principal Components vs Explained Variance
#
# How much does each component explain the variance? We can look at the `explained_variance_ratio_` to tell...
# + id="7oxDWx30d2YB" colab_type="code" outputId="48195962-d8e6-4a71-f714-20a34cb1e8c1" executionInfo={"status": "ok", "timestamp": 1580669575063, "user_tz": 300, "elapsed": 466, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 52}
np.set_printoptions(suppress=True)
pca.explained_variance_ratio_
# + id="nH6_48jWl-k2" colab_type="code" outputId="f982adbc-6f05-4858-fec7-a373006a2593" executionInfo={"status": "ok", "timestamp": 1580669576671, "user_tz": 300, "elapsed": 620, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 283}
# See how much is contributed by the first few terms
pc_vs_variance = np.cumsum(pca.explained_variance_ratio_)
pc_vs_variance
plt.plot(pc_vs_variance)
# + [markdown] id="VmkvrPMpeQbu" colab_type="text"
# ... So, the first 6 components (of 9) gives 95% explained variance. Not too bad!
# + [markdown] id="jJ_fHVl-cWw1" colab_type="text"
# ### 2.1.3 Learning over PCA-Reduced Data
#
# From the above, we saw how to do PCA on the overall dataset. But let's do it more methodically as part of machine learning. We'll start with separate training and test data.
#
#
# + id="ZdqkKECmc-dp" colab_type="code" outputId="4ddbd8d9-6238-405b-ca63-4bb1e307e36b" executionInfo={"status": "ok", "timestamp": 1580669578369, "user_tz": 300, "elapsed": 354, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 243}
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(\
X, y, test_size=0.20, random_state=42)
# Fit the PCA on the training data
pca = PCA(n_components=6)
pca.fit(X_train)
# and transform it
X_train_2 = pca.transform(X_train)
# Then train a simple linear regression classifier
# (tries to find the best weighted linear combination to
# match the output)
regr = linear_model.LinearRegression()
regr.fit(X_train_2, y_train)
X_train_2
# + id="MzspjW-9bW7I" colab_type="code" outputId="060936d0-89e6-4867-ffd2-e2c5e89089c0" executionInfo={"status": "ok", "timestamp": 1580669579915, "user_tz": 300, "elapsed": 432, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
X_test_2 = pca.transform(X_test)
regr.predict(X_test_2)
regr.score(X_test_2, y_test)
# + [markdown] id="r8bsfFWpgE7I" colab_type="text"
# So, 87.4% predictive accuracy on the test set.
#
# How does that compare with working directly on the real data?
# + id="h0UJilwrfxdu" colab_type="code" outputId="51e0ff7f-a2ca-4f27-e863-3d74ec6c855f" executionInfo={"status": "ok", "timestamp": 1580669581001, "user_tz": 300, "elapsed": 313, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
# Train and evaluate over non-dimensionality-reduced data
regr_full_data = linear_model.LinearRegression()
regr_full_data.fit(X_train, y_train)
regr_full_data.predict(X_test)
regr_full_data.score(X_test, y_test)
# + [markdown] id="4pen2UiLAMsD" colab_type="text"
# ### 2.1.4 t-SNE
#
# For high-dimensional data, we often use t-Distributed Stochastic Neighbor Embedding (t-SNE) to reduce dimensionality. This is a stochastic method so it doesn't always produce the same output.
# + id="t7ywFmPFhdBi" colab_type="code" outputId="eb25dc80-9d4f-4c1b-9556-dea329a0d96d" executionInfo={"status": "ok", "timestamp": 1580669584706, "user_tz": 300, "elapsed": 2181, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 283}
from sklearn.manifold import TSNE
X_embedded = TSNE(n_components=2).fit_transform(X)
plt.scatter(X_embedded[:,0],X_embedded[:,1])
# + [markdown] id="Vqb3OWu_Apg9" colab_type="text"
# ## 2.2 Clustering
#
# ### 2.2.1 k-Means Clustering
# + id="zBTrS2wurTmC" colab_type="code" outputId="fc9c2f8d-7bdc-4186-d6c9-d834c84293c5" executionInfo={"status": "ok", "timestamp": 1580669584707, "user_tz": 300, "elapsed": 1169, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
# Let's take the data from X_embedded and try to choose a clustering!
X_embedded
# + [markdown] id="Hh6ox3hg2SGo" colab_type="text"
# ### The Algorithm, in Full Detail
#
# Let's start with the basic algorithm. We'll split it into three components:
# 1. The clustering coefficient or `error` function -- how far away are points?
# 2. The function to get the most appropriate cluster for a point, `get_nearest`.
# 3. The main K-Means algorithm.
#
# For this version we will initialize with randomly chosen points in the dataset, then iteratively recompute until we reach convergence (which is detected when every point remains in its current cluster).
# + id="83hAdqB4BwsX" colab_type="code" outputId="fe716f3f-69fd-4494-ef82-8463ad251813" executionInfo={"status": "ok", "timestamp": 1580669588936, "user_tz": 300, "elapsed": 991, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
from random import randint
def error(x,c):
"""
Error (distance, clustering coefficient) between a point x
and the centroid c. We are using Euclidean distance, which
can also be thought of as the sum of the squared error.
"""
#err = 0
#for i in range(len(x)):
# err = err + np.square(x[i] - c[i])
err = np.linalg.norm(x - c)
return err
def get_nearest(c_list, x):
nearest = -1
nearest_error = np.infty
for i, v in enumerate(c_list):
if error(x, v) < nearest_error:
nearest_error = error(x, v)
nearest = i
return nearest
def kmeans(X, k, show):
"""
Simple k-means algorithm
"""
# Initialize the centroids to random points
# in the data
centroids = np.zeros((k,2))
cluster_assignments = [0 for i in range(len(X))]
for i in range(0, k):
centroids[i] = X[randint(0, X.shape[0])]
if show:
print('Initialized centroids to: ')
print(centroids)
iteration = 1
changed = True
while changed:
if show:
print ('Iteration %d'%iteration)
iteration = iteration + 1
# Nothing happened in this iteration, by default
changed = False
# Assign points to clusters
for i,x in enumerate(X):
nearest = get_nearest(centroids, x)
# We changed a cluster mapping!
if nearest != cluster_assignments[i]:
changed = True
cluster_assignments[i] = nearest
if changed:
# Recompute clusters
for i in range(len(centroids)):
points = [j for j,v in enumerate(cluster_assignments) if v == i]
if show:
print ('Cluster %d'%i)
X_subset = np.array([[X[i,0],X[i,1]] for i in points])
#if show:
# print (X_subset)
if len(X_subset):
centroids[i][0] = np.sum(X_subset[:, 0]) / len(points)
centroids[i][1] = np.sum(X_subset[:, 1]) / len(points)
print (centroids[i])
elif show:
print('Converged!')
return (centroids, np.array(cluster_assignments))
k = 2
plt.scatter(X_embedded[:,0], X_embedded[:,1], c='red', marker='o', s=50)
plt.grid()
plt.show()
centroids, assignments = kmeans(X_embedded, k, True)
assignments
# + id="wE87q7lm0TOU" colab_type="code" outputId="cff1cd9c-5cdf-42e6-9dbe-38d5fb2a94c5" executionInfo={"status": "ok", "timestamp": 1580669589155, "user_tz": 300, "elapsed": 529, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 283}
# Plotting along with the Centroids
clust0 = np.array([x for i, x in enumerate(X_embedded) if assignments[i] == 0])
clust1 = np.array([x for i, x in enumerate(X_embedded) if assignments[i] == 1])
plt.scatter(clust0[:,0], clust0[:,1], c='red', marker='o', s=50)
plt.scatter(clust1[:,0], clust1[:,1], c='black', marker='o', s=50)
plt.scatter(centroids[:,0], centroids[:,1], marker='*', s=50, c='g')
# + id="15YV5OnlCIgg" colab_type="code" outputId="e1f9be79-f859-47a9-9c6b-fddacb066b99" executionInfo={"status": "ok", "timestamp": 1580669590489, "user_tz": 300, "elapsed": 577, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 69}
from sklearn.cluster import KMeans
km = KMeans(2, 'random', n_init=1, max_iter=300, random_state=0)
km.fit(X_embedded)
# + id="DYlKOGWhV4sm" colab_type="code" outputId="c79ea325-5747-42bb-d06b-2aec341bece0" executionInfo={"status": "ok", "timestamp": 1580669590836, "user_tz": 300, "elapsed": 428, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 52}
km.cluster_centers_
# + id="vD5sJfDjV57W" colab_type="code" outputId="1e0b4d4f-d78d-4629-a809-0c0695294e46" executionInfo={"status": "ok", "timestamp": 1580669591139, "user_tz": 300, "elapsed": 344, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 191}
km.labels_
# + id="-Dj_ABunXGaz" colab_type="code" outputId="2762f8a0-4175-4289-966f-ac0bf98445e2" executionInfo={"status": "ok", "timestamp": 1580669592318, "user_tz": 300, "elapsed": 571, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 283}
# Plotting along with the Centroids
clust0 = np.array([x for i, x in enumerate(X_embedded) if km.labels_[i] == 0])
clust1 = np.array([x for i, x in enumerate(X_embedded) if km.labels_[i] == 1])
plt.scatter(clust0[:,0], clust0[:,1], c='red', marker='o', s=50)
plt.scatter(clust1[:,0], clust1[:,1], c='black', marker='o', s=50)
plt.scatter(centroids[:,0], centroids[:,1], marker='*', s=50, c='g')
# + [markdown] id="Q3ApcyV2MKLh" colab_type="text"
# ## 2.2.2 Choosing the Right *k* for Clustering
#
# To pick the right value of *k* for our data, we will search the space of possible values -- looking at the smallest one that (roughly) minimizes the total error (Euclidean distance), aka the distortion.
# + id="awCUR0b6IFDC" colab_type="code" outputId="a80985b7-a210-45cc-ab82-6613ba99c3dc" executionInfo={"status": "ok", "timestamp": 1580669596297, "user_tz": 300, "elapsed": 509, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00573172153387237137"}} colab={"base_uri": "https://localhost:8080/", "height": 279}
distortions = []
max_k = 10
for i in range(1,max_k+1):
km = KMeans(n_clusters=i,
init='random',
n_init=1,
max_iter=300,
random_state=0)
km.fit(X)
# The distortion is called inertia in SciKit
distortions.append(km.inertia_)
plt.plot(range(1,max_k+1), distortions, marker='o')
plt.xlabel('Cluster count (k)')
plt.ylabel('Distortion')
plt.show()
# + id="4ypZeyVe0A__" colab_type="code" colab={}
| opends4all-resources/opends4all-machine-learning/UNSUPERVISED-ML-matrices-pca-clustering-basic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!/usr/bin/env python3
import os
import json
from datetime import datetime
import tensorflow as tf
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard, LearningRateScheduler
from src.architecture import FootpathModelArchitecture
from src.miscellaneous import load_random_image, preprocess_image
from src.training_preparation import (create_checkpoint_callback,
create_training_dataset, load_image_test,
load_image_train, load_vgg16_weights,
map_to_masks, sum_up_training_dataset,
sum_up_validation_dataset)
from src.visualization import create_overlay, display_images
# -
# ## Load and prepare parameters
parameter_directory = './parameters/'
training_parameter_file = 'training.json'
parameter_path = parameter_directory + training_parameter_file
with open(parameter_path) as parameter_file:
training_parameters = json.load(parameter_file)
data_parameters = training_parameters['data']
input_parameters = training_parameters['input']
compiler_parameters = training_parameters['compiler']
solver_parameters = training_parameters['solver']
output_parameters = training_parameters['output']
testing_parameters = training_parameters['testing']
NUMBER_OF_TRAINING_IMAGES = len(os.listdir(data_parameters['training_data_directory'] +
data_parameters['training_image_folder']))
NUMBER_OF_VALIDATION_IMAGES = len(os.listdir(data_parameters['training_data_directory'] +
data_parameters['validation_image_folder']))
BUFFER_SIZE = NUMBER_OF_TRAINING_IMAGES + 1
TRAINING_STEPS = NUMBER_OF_TRAINING_IMAGES
VALIDATION_STEPS = NUMBER_OF_VALIDATION_IMAGES
SAVE_CHECKPOINT_STEPS = NUMBER_OF_TRAINING_IMAGES * solver_parameters['save_checkpoints_steps']
# ## Build the model structure
# Notes: https://stackoverflow.com/questions/47787011/how-to-disable-dropout-while-prediction-in-keras
input_shape = (input_parameters['width'], input_parameters['height'], input_parameters['channels'])
footpath_model = FootpathModelArchitecture(input_shape).footpath_model
# ## Load VGG16 model weights up to conv5_3
load_vgg16_weights(footpath_model)
# ## Prepare the training data
# Before training with the Kitti road dataset, it was further processed. In particular the masks were cleaned from a third shade so that two colors remain, representing street and no-street areas. Also the folder structure was changed: The training folder consists of the folders `training_frames`, `training_masks`, `validation_frames` and `validation_masks`.
#
# The images are splitted in ~80% (231 images) training data and ~20% (58 images) validation data.
# ### Folder structure:
# Keep in mind that although the file names in testing and training are the same, the images are not.
#
# kitti_road_data
# - testing
# - um_000000.png
# - um_000001.png
# ...
# - umm_000000.png
# - umm_000001.png
# ...
# - uu_000000.png
# - uu_000001.png
# ...
# - training
# - training_images
# - um_000000.png
# - um_000001.png
# ...
# - umm_000000.png
# - umm_000001.png
# ...
# - uu_000000.png
# - uu_000001.png
# ...
# - training_masks
# - um_000000.png
# - um_000001.png
# ...
# - umm_000000.png
# - umm_000001.png
# ...
# - uu_000000.png
# - uu_000001.png
# ...
# - validation_images
# - um_000028.png
# - um_000029.png
# ...
# - umm_000028.png
# - umm_000029.png
# ...
# - uu_000028.png
# - uu_000029.png
# ...
# - validation_masks
# - um_000028.png
# - um_000029.png
# ...
# - umm_000028.png
# - umm_000029.png
# ...
# - uu_000028.png
# - uu_000029.png
# ...
training_data = create_training_dataset(data_parameters['training_data_directory'])
training_set = training_data['training']
training_set = training_set.map(lambda training_set: load_image_train(training_set,
(input_parameters['width'],
input_parameters['height'])))
validation_set = training_data['validation']
validation_set = validation_set.map(lambda validation_set: load_image_test(validation_set,
(input_parameters['width'],
input_parameters['height'])))
training_set = sum_up_training_dataset(training_set, buffer_size=BUFFER_SIZE,
batch_size=solver_parameters['batch_size'])
validation_set = sum_up_validation_dataset(validation_set,
batch_size=solver_parameters['batch_size'])
# ### Show samples of training and validation data
for _, sample in enumerate(training_set.take(1)):
original_image = sample[0]
mask = sample[1]
overlay = create_overlay(original_image, mask)
display_images([original_image, mask, overlay], ['origin', 'mask', 'overlay'])
for _, sample in enumerate(validation_set.take(1)):
original_image = sample[0]
mask = sample[1]
overlay = create_overlay(original_image, mask)
display_images([original_image, mask, overlay], ['origin', 'mask', 'overlay'])
# ## Define callbacks
log_directory = (output_parameters['output_folder'] +
output_parameters['log_folder'] +
datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = TensorBoard(log_dir=log_directory)
checkpoint_directory = (output_parameters['output_folder'] + output_parameters['checkpoint_folder'])
checkpoint_callback = create_checkpoint_callback(checkpoint_directory,
save_best_only=True,
monitor='binary_crossentropy',
save_weights_only=True,
save_freq=SAVE_CHECKPOINT_STEPS)
limit = solver_parameters['epochs'] - (solver_parameters['epochs'] // 3)
adjusted_learning_rate = 0.005
def scheduler(epoch, initial_learning_rate, limit, adjusted_learning_rate):
if (limit == 0):
return initial_learning_rate
elif (epoch < limit):
return initial_learning_rate
else:
return adjusted_learning_rate
learning_rate_callback = LearningRateScheduler(lambda epoch: scheduler(epoch,
compiler_parameters['learning_rate'],
limit,
adjusted_learning_rate))
# ## Compile and train the model
footpath_model.compile(optimizer=Adam(learning_rate=compiler_parameters['learning_rate'],
epsilon=compiler_parameters['epsilon']),
loss=BinaryCrossentropy(from_logits=False),
metrics=[tf.keras.metrics.BinaryCrossentropy()])
model_history = footpath_model.fit(training_set,
epochs=solver_parameters['epochs'],
steps_per_epoch=TRAINING_STEPS,
validation_steps=VALIDATION_STEPS,
validation_data=validation_set,
callbacks=[checkpoint_callback, tensorboard_callback, learning_rate_callback])
# ## Save final weights and trained model
output_model_directory = output_parameters['output_folder'] + output_parameters['output_model']
weights_directory = output_parameters['output_folder'] + output_parameters['weights']
footpath_model.save(filepath=output_model_directory)
footpath_model.save_weights(filepath=weights_directory)
# ## Test the model
# Load a random test image
test_image = load_random_image(testing_parameters['test_data_directory'],
image_dimension=(input_parameters['width'], input_parameters['height']))
# +
# Load a specific image
# test_image = tf.keras.preprocessing.image.load_img('./data/examples/uu_000088.png',
# target_size=(input_parameters['width'],
# input_parameters['height']))
# -
preprocessed_test_image = preprocess_image(test_image, expand_dimension=True)
output = footpath_model.predict(preprocessed_test_image)
overlay = create_overlay(test_image, output, alpha=0.3, from_prediction=True)
display_images([test_image, output, overlay], ['original image', 'predicted mask', 'overlay'])
# +
#footpath_model = tf.keras.models.load_model('training_output-120e/fm_model.h5')
| train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.3
# language: julia
# name: julia-1.0
# ---
# # Question 1
#
#
# ## Part a
#
# Plugging in the upwind difference scheme we have
#
# $u_x^- = \frac{u_i^{n} - u_{i-1}^{n}}{\Delta x}$
#
#
# $u_x^+ = \frac{u_{i+1}^{n} - u_{i}^{n}}{\Delta x}$
#
# $\begin{bmatrix} {u^{n+1}_j } \\ {v^{n+1}_j} \end{bmatrix} = \begin{bmatrix} u_n^j \\ v^n_j \end{bmatrix} + \dfrac{\Delta t}{\Delta x} \begin{bmatrix} {u_{j+1}^{n} - u_{j}^{n}} + 2 ({v_{j+1}^{n} - v_{j}^{n}}) \\ -3({v_j^{n} - v_{j-1}^{n}})\end{bmatrix} $
#
# ## Part b
#
# For the top equation,
#
# $ {u^{n+1}_j } = u_n^j + \dfrac{\Delta t}{\Delta x} ( {u_{j+1}^{n} - u_{j}^{n}} + 2 ({v_{j+1}^{n} - v_{j}^{n}}))$
#
#
# Plugging in the error associated with each step $\epsilon(u_n^j) = exp(i(j \Delta x + n \Delta t)$ and $\epsilon(v_n^j) = exp(i(v_0 + n \Delta x + j \Delta t)$
#
# $exp(i(\Delta t )) = 1 + \dfrac{\Delta t}{\Delta x} ( {exp(i \Delta x) - 1}) + 2( exp( i(v_0 + \Delta x) ) - exp(i v_0 ) )$
#
# For the bottom equation we get that
#
# $e^{i (\Delta t ) } = 1 + \dfrac{-3 \Delta t}{\Delta x} e^{i (-\Delta x)}$
#
#
# As a result the scheme is stable if
#
# As a result the scheme is stable if $ 0 \leq \dfrac{3 \Delta t}{\Delta x} \leq 1$ and $ 0 \leq \dfrac{2 \Delta t}{\Delta x} \leq 1$
# # Question 2
#
# one has a nice property no need to do upwinding
# not as clean in practice
# conservation
#
# $u_j$ on first term need to rewrite so difference between $u_j$ in hte flux part
#
# the conservation form is that we have a numerical flux at $j+1$ same for 1 index
#
# $f(u_{j+1}) - f(u_{j-1})$
#
# would like to have a numerical flux where there is a shift of the time
#
#
#
# ## Part a
#
# Using the fact that
#
# $f(u_{j+1}) + f(u_j) - f(u_j) - f(u_{j-1}) = f(u_{j+1}) - f(u_{j-1})$
#
# We can let $F(u_{j+1}, u_j) = f(u_{j+1}) - f(u_j)$. This gives us the following conservation from
#
# $u_i^{n+1} = u^n_i - \frac{\Delta t}{ \Delta x} \left( {F}^n_{i+1/2} - {F}^n_{i-1/2} \right)$
#
#
#
# ## Part b
#
# Using a Taylor expansion around $x_j$ and $t_n$, we get
#
#
# $u(x_j,t_{n+1}) = u(x_j,t_n) + \Delta t u_t(x_j, t_n) + \dfrac{\Delta t^2}{2}u_{tt}(x_j, t_n) +O(\Delta t^3) $
#
# $u(x_{j+1},t_{n}) = f(u(x_j,t_n)) + \Delta x f'(u)u_x(x_j, t_n) + \dfrac{\Delta x^2}{2} f''(u)u_{xx}(x_j, t_n) +O(\Delta x^3) $
#
# $u(x_{j-1},t_{n}) = f(u(x_j,t_n)) - \Delta x f'(u)u_x(x_j, t_n) + \dfrac{\Delta x^2}{2} f''(u)u_{xx}(x_j, t_n) +O(\Delta x^3) $
#
# Subtracting $u(x_{j-1},t_{n})$ from $u(x_{j+1},t_{n})$ we get $O(\frac{\Delta x^2}{\Delta t})$
#
# Similarly we get $O(\Delta t)$ for the $u(x_j,t_{n+1})$ term. As a result, the error order is
#
# $O(\frac{\Delta x^2}{\Delta t}) + O(\Delta t)$
#
#
#
# ## Part c
#
#
# We must show that $u_j^{n+1} = H(u_{j-1}^n, u_j^n, u_{j-1})$ where $\partial H/\partial u_i >0$.
#
# In this case we can see that since
#
# $u_j^{n+1} = \dfrac{1}{2} ( u_{j+1}^n + u_{j-1}^n) - \dfrac{ \Delta t} {2 \Delta x} ( f(u_{j+1}^n) - f(u_{j-1}^n) )$
#
# If $\dfrac{ \Delta t} {2 \Delta x} $ is sufficiently small, then
#
# $u_j^{n+1} = \dfrac{1}{2} ( u_{j+1}^n + u_{j-1}^n) $ so it is clearly increasing in $u_{j+1}^n$ and $ u_{j-1}^n$
# # Question 3
#
#
#
# ## Part a
#
#
# $u^{n+1}=u^{n}-\frac{\Delta t}{\Delta x }(F_{j+\frac{1}{2}}-F_{j-\frac{1}{2}})$
#
#
# Where,
#
# $F_{j+\frac{1}{2}} = \begin{cases} F_{j+1/2}^{+} & u_j > 0 \\ F_{j+1/2}^{-} & u_j <0 \end{cases}$
#
# And,
#
# $F_{j+1/2}^{+} = \dfrac{1}{2} (f_{j+1} + f_j)$
# $F_{j+1/2}^{-} = \dfrac{1}{2} (f_{j} + f_{j-1})$
#
# And, where $f_j = x_j^2/2$
#
#
# With conservation form, if the numerical solution converges it will converge to the actual weak solution. If monotonicity is added it will be guaranteed to converge.
#
#
# ## Part b
#
#
# First we discretize over the spatial domian by letting
#
# $$u_h(t) = \sum \phi_i(t) \alpha_i$$
#
# Next we choose a test function
#
# $$v_h(t) = \sum \phi_j(t) \alpha_j$$
#
# Combining and integrating by parts we get that
#
# $\dfrac{d}{dt} \int u_h v_h dx + \int v_h f(u_h) n dx = \int f(u_h) \nabla v_h dx$
#
# With $P_1$ elements $\phi_i(t)$ and $\phi_j(t)$ are hat functions. Then we can apply usual upwind difference schemes to this weak formulation to fully discretize the problem. The flux could be determined by averaging.
#
# # Question 4
#
# We get the following 2 bilinear form
# $$ \sum_{j=1}^{J} \int_{x_{j-1/2} }^{x_{j+1/2}} u_x v_x + u v dx + u_x v|_{x_{j-1/2}}^{x_{j+1/2}} = \int_{x_{j-1/2}}^{x_{j+1/2}} f v$$
#
#
#
# The problem statement says to determine the fluxes using averaging. As a result, we get that
#
# $$u_{x}(x_{j+\frac{1}{2}})v(x_{j+\frac{1}{2}})\approx \frac{1}{2}(u_{x}(x_{j+\frac{1}{2}})^++u_{x}(x_{j+\frac{1}{2}})^-)v(x_{j+\frac{1}{2}})^-$$
#
#
# Using $P_1$ elements, we get that $v = \sum_j \phi_j$ and $u = \sum_i \alpha_i \phi_i$
#
# As a result, we get the following finite element approximation
#
# $ \sum_{j=1}^{J} \left( \sum_{i=1}^{I} K_{ij} \alpha_i + u_{x}|_{x_{j-1/2}}^{x_{j+1/2}} - \int f_j \phi_i dx \right) = 0$$
#
# Where
#
# $K_{ij} = \left( \int_{0}^{1} \phi_j'\phi_i' + \phi_j \phi_i dx \right) $
#
# # Question 5
#
#
# The analytical solution to the differential equation is
#
# $$-(2 - 2 e^{1 - x} - 2 e^x - x + x^2 + e (2 - x + x^2))/(1 + e)$$
#
# We plot it below, then we plot the finite element approximation
using LinearAlgebra
using Pkg
using PyPlot
# +
f(x) = -1 .* (2 .- 2 .*exp.(1 .- x) .- 2 .*exp.(x) .- x + x .^ 2 + exp.(2 .- x + x .^ 2 ) ./(1 + exp.(1) ) )
u = f(collect((0:.1:1)))
plot(collect((0:.1:1)),u)
# +
dx = 0.1
xmin,xmax = 0,1
x = collect(xmin:dx:xmax)
m = size(x)[1]
f(x) = x .* (1 .- x)
#f1(x) = 1 .- 2 .* x
d = (1/dx + dx/3)*ones(m)
dl = (-1/dx + dx/6)*ones(m-1)
K = Tridiagonal(dl,d,dl)
u = inv(K)*(f(x)) # .- 0.5 .* ( f1(x .- dx./2) +f1(x .+ dx./2) ))
plot(x, u)
# +
#https://www.mathworks.com/matlabcentral/fileexchange/56120-modal-dg
function h = DGnflux(f,df,u,strategy)
flx = x.*(1 .- x); # flux value at every point of the domain
dflx = (1 .- 2 .* x); # flux slope at every point of the domain
switch strategy
case{1} # Roe Flux
u_ave = (u(1,:) + u(2,:))/2; # u @ cells boundaries
bool_p = df(u_ave) > 0; # if positive
bool_n = df(u_ave) <= 0; # if negative
h = bool_p.*flx(1,:) + bool_n.*flx(2,:);
case{2} #% LF
alpha = max(max(abs(dflx)));
h = 0.5*(flx(1,:) + flx(2,:) - alpha*(u(2,:)- u(1,:)));
case{3} #% LLF
fluxmat = [dflx(1:nx-1);dflx(2:nx)];
beta = max(abs(dflx));
h = 0.5*(flx(1,:) + flx(2,:) - beta.*(u(2,:)- u(1,:)));
case{4} #% Upwind Flux
#% For dflux is constant along the domain!
a_p = max(max(dflx - abs(dflx))/2) == [0]; #ok<NBRAK> % boolen operator for a>0
a_n = max(max(dflx + abs(dflx))/2) == [0]; #ok<NBRAK> % boolen operator for a<0
h = a_p*flx(1,:) + a_n*flx(2,:);
otherwise
error("strategy not suported")
end
end
end
# -
# # Question 6
#
# As we can see the naive FDM has the most numerical error. The FVM does better, but "upwind" there is still quite a bit of numerical error. Finally DG does the best of the 3 approaches.
# +
dx = 0.02
dt = 0.015
tmin,tmax = 0,0.24
xmin,xmax = 0,1
x = collect(xmin:dx:xmax)
t = collect(tmin:dt:tmax+dt)
#useful constants
n = size(t)[1]
m = size(x)[1]
A= dt/(2*dx)
f(v) = v.^2 ./2
#solution matrix
u=zeros(n,m)
u[1,:]= 1.5 .*( (x .< .75) .& (.25 .< x ) ) + -.5 .* ones(m)
plot(x, u[1,:], label = "u(x,0)")
for i=1:(n-1)
for j=2:m-1
u[i+1,j] = .5 .* (u[i,j+1] + u[i,j-1]) .- A .* (f(u[i,j+1]) - f(u[i,j-1]) )
end
end
i = 10
plot(x, u[i,:], label = "u(x,$(t[i]))")
plot(x, u[n,:],label = "u(x,1)")
legend()
# +
dx = 0.02
dt = 0.015
tmin,tmax = 0,0.24
xmin,xmax = 0,1
x = collect(xmin:dx:xmax)
t = collect(tmin:dt:tmax+dt)
#useful constants
n = size(t)[1]
m = size(x)[1]
f(v) = v.^2 ./2
f_1(v) = v
#solution matrix
u=zeros(n,m)
u[1,:]=
u[1,:]= 1.5 .*( (x .< .75) .& (.25 .< x ) ) + -.5 .* ones(m)
plot(x, u[1,:], label = "u(x,0)")
for i=1:(n-1)
for j=2:m-1
A = f_1(u[i,j]) .* ( dt/(2*dx) )
u[i+1,j] = .5 .* (u[i,j+1] + u[i,j-1]) .- A .* (f(u[i,j+1]) - f(u[i,j-1]) )
end
end
i = 10
plot(x, u[i,:], label = "u(x,$(t[i]))")
plot(x, u[n,:],label = "u(x,1)")
legend()
# +
dx = 0.02
dt = 0.015
tmin,tmax = 0,0.24
xmin,xmax = 0,1
x = collect(xmin:dx:xmax)
t = collect(tmin:dt:tmax+dt)
#useful constants
n = size(t)[1]
m = size(x)[1]
f(v) = v.^2 ./2
f_1(v) = v
#solution matrix
u=zeros(n,m)
u[1,:]=
u[1,:]= 1.5 .*( (x .< .75) .& (.25 .< x ) ) + -.5 .* ones(m)
plot(x, u[1,:], label = "u(x,0)")
for i=1:(n-1)
for j=2:m-1
u_plus = ( dt/dx ) .* f(u[i,j]) .* (u[i,j] .> 0) .* (f(u[i,j+1]) - f(u[i,j]) )
u_minus = ( dt/dx ) .* f(u[i,j]) .* (u[i,j] .< 0) .* (f(u[i,j]) - f(u[i,j-1]) )
u[i+1,j] = .5 .* (u[i,j+1] + u[i,j-1]) .- u_plus .- u_minus
end
end
i = 10
plot(x, u[i,:], label = "u(x,$(t[i]))")
plot(x, u[n,:],label = "u(x,1)")
legend()
# -
| spring_hw6/homework6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Python-Reddit API Wrapper
import praw
from psaw import PushshiftAPI
#for connecting to databases
import psycopg2
from sqlalchemy import create_engine
import pandas as pd
import datetime as dt
import numpy as np
import json #for parsing the return from the Google API
import urllib #for passing info to the Google API
import accesses #local file with API certs and database passwords.
# -
nodes = pd.read_csv('data/node_locations.csv')
nodes.head()
nodes['iata_code'].iloc[:3]
api_key = accesses.google_api
url = 'https://maps.googleapis.com/maps/api/geocode/json?'
geo_list = []
for i in nodes['iata_code']:
geo_dict = {}
address = i+'+Airport'
geo_dict['iata_code'] = i
url_address_api = '{}address={}&key={}'.format(url, address, api_key)
try:
with urllib.request.urlopen(url_address_api) as response:
js = json.loads(response.read())
geo_dict['lat'] = js['results'][0]['geometry']['location']['lat']
geo_dict['lon'] = js['results'][0]['geometry']['location']['lng']
for component in js['results'][0]['address_components']:
geo_dict[component['types'][0]] = component['short_name']
geo_list.append(geo_dict)
except:
print('Error in Geocoding.', address, ' not found.')
len(geo_list)
df = pd.DataFrame(geo_list)
df
df.to_csv('full_airport.csv')
df_final = df_cleaned["origin_iata", "origin_lon", "origin_lat", "dest_iata","dest_lon", "dest_lat"]
| .ipynb_checkpoints/flight_airport-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (ppd599)
# language: python
# name: ppd599
# ---
# This notebook loads an SRTM elevation tile, crops it to a bounding box, and resaves it as a tif.
import numpy as np
import rasterio.mask
from shapely.wkt import loads
# load the SRTM raster
raster_path = '../../data/N34W119.hgt'
raster = rasterio.open(raster_path)
# define a bounding box to crop raster to
wkt = 'POLYGON((-118.2863 34.0171, -118.2863 34.0711, -118.2212 34.0711, -118.2212 34.0171, -118.2863 34.0171))'
bbox = loads(wkt)
# crop the raster to the bounding box
out_image, out_transform = rasterio.mask.mask(raster, [bbox], crop=True)
out_meta = raster.meta
out_meta.update({'driver': 'GTiff',
'height': out_image.shape[1],
'width': out_image.shape[2],
'transform': out_transform})
# save the cropped raster as a tif file
with rasterio.open('../../data/la-elevation.tif', 'w', **out_meta) as f:
f.write(out_image)
| modules/06-spatial-data/raster-crop-bbox.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Radial Velocities
# %matplotlib inline
# %config IPython.matplotlib.backend = "retina"
from matplotlib import rcParams
rcParams["figure.dpi"] = 150
rcParams["savefig.dpi"] = 150
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from maelstrom.kepler import kepler
# Read in the light curve, the frequencies, and then the RV data.
# +
# kicid = 6780873
kicid = 8975515
times, dmag = np.loadtxt(f"data/kic{kicid}_lc.txt",usecols=(0,1)).T
# Subtract midpoint
time_mid = (times[0] + times[-1]) / 2.
times -= time_mid
# mmags
dmmags = dmag * 1000.
metadata = np.loadtxt(f"data/kic{kicid}_metadata.csv", delimiter=",", skiprows=1)
nu_arr = metadata[::6]
# -
# load the RV data. They have the same time format (JD-2400000) as the light curve.
# rvs are in the format [[JD, RV, uncertainty]]
rv_JD, rv_RV, rv_err = np.loadtxt(f"data/kic{kicid}a_JDrv.txt",delimiter=",", usecols=(0,1,2)).T
rv_JD -= time_mid
# plot just the RVs for now
plt.errorbar(rv_JD,rv_RV,rv_err,fmt="o")
plt.xlabel(f"reduced JD - {np.round(time_mid,2)}")
plt.ylabel("RV (km/s)")
plt.show()
# +
# read in the table of orbits
orbits = pd.read_csv(f"data/orbits.csv").rename(columns = lambda x: x.strip())
# oops, our target doesn't have an orbit there, since P < 20 days.
# let's just cheat a bit for now and read in the orbit of this one target as the orbits db
# orbits = pd.read_csv(f"data/kic{kicid}_orbit.csv").rename(columns = lambda x: x.strip())
orb_params = orbits[orbits.Name == f"kic{kicid}"].iloc[0]
# initialising close to a better solution
porb = 1088. #orb_params.Porb
a1 = 138. #orb_params["a1sini/c"]
tp = 55087.218038 - time_mid #orb_params["t_p"] - time_mid
e = 0.12 #orb_params["e"]
varpi = 3.764 #orb_params["varpi"]
a1d = a1/86400.0
e_param = np.log(e) - np.log(1.0 - e)
# -
sess = tf.InteractiveSession()
# +
T = tf.float64
# First the variables that we might want to optimize:
porb_tensor = tf.Variable(porb, dtype=T)
a1d_tensor = tf.Variable(a1d, dtype=T)
tp_tensor = tf.Variable(tp, dtype=T)
nu_tensor = tf.Variable(nu_arr, dtype=T)
e_param_tensor = tf.Variable(e_param, dtype=T) # This forces the ecc to be between 0 and 1
e_tensor = 1.0 / (1.0 + tf.exp(-e_param_tensor))
varpi_tensor = tf.Variable(varpi, dtype=T)
log_sigma2_tensor = tf.Variable(0.0, dtype=T) # Variance from observational uncertainties and model misspecification
log_rv_sigma2_tensor = tf.Variable(np.log(np.median(rv_err)), dtype=T) # RV jitter
gammav_tensor = tf.Variable(np.mean(rv_RV) / 299792.458, dtype=T)
# These are some placeholders for the data:
times_tensor = tf.placeholder(T, times.shape)
dmmags_tensor = tf.placeholder(T, dmmags.shape)
# Solve Kepler's equation
mean_anom = 2.0 * np.pi * (times_tensor - tp_tensor) / porb_tensor
ecc_anom = kepler(mean_anom, e_tensor)
true_anom = 2.0 * tf.atan2(tf.sqrt(1.0+e_tensor)*tf.tan(0.5*ecc_anom),tf.sqrt(1.0-e_tensor) + tf.zeros_like(times_tensor))
# -
# Calculate the model and likelihood for the time delays
# +
# Here we define how the time delay will be calculated:
tau_tensor = -a1d_tensor * (1.0 - tf.square(e_tensor)) * tf.sin(true_anom + varpi_tensor) / (1.0 + e_tensor*tf.cos(true_anom))
# And the design matrix:
arg_tensor = 2.0 * np.pi * nu_tensor[None, :] * (times_tensor - tau_tensor)[:, None]
D_tensor = tf.concat([tf.cos(arg_tensor), tf.sin(arg_tensor)], axis=1)
# Define the linear solve for W_hat:
DTD_tensor = tf.matmul(D_tensor, D_tensor, transpose_a=True)
DTy_tensor = tf.matmul(D_tensor, dmmags_tensor[:, None], transpose_a=True)
W_hat_tensor = tf.linalg.solve(DTD_tensor, DTy_tensor)
# Finally, the model and the chi^2 objective:
model_tensor = tf.squeeze(tf.matmul(D_tensor, W_hat_tensor))
chi2_tensor = tf.reduce_sum(tf.square(dmmags_tensor - model_tensor)) * tf.exp(-log_sigma2_tensor)
chi2_tensor += len(times) * log_sigma2_tensor
# NOW THE RVs:
rv_time_tensor = tf.placeholder(T)
rv_tensor = tf.placeholder(T)
rv_err_tensor = tf.placeholder(T)
# Solve Kepler's equation for the RVs
rv_mean_anom = (2.0 * np.pi * (rv_time_tensor - tp_tensor) / porb_tensor)
rv_ecc_anom = kepler(rv_mean_anom, e_tensor)
rv_true_anom = 2.0 * tf.atan2(tf.sqrt(1.0+e_tensor)*tf.tan(0.5*rv_ecc_anom), tf.sqrt(1.0-e_tensor) + tf.zeros_like(rv_time_tensor))
# Here we define how the RV will be calculated:
vrad_tensor = -2.0 * np.pi * (a1d_tensor / porb_tensor) * (1/tf.sqrt(1.0 - tf.square(e_tensor))) * (tf.cos(rv_true_anom + varpi_tensor) + e_tensor*tf.cos(varpi_tensor))
vrad_tensor += gammav_tensor
vrad_tensor *= 299792.458 # c in km/s
rv_sig2 = tf.square(rv_err_tensor) + tf.exp(log_rv_sigma2_tensor)
chi = tf.square(rv_tensor - vrad_tensor) / rv_sig2 + tf.log(rv_sig2)
chi2_tensor += tf.reduce_sum(chi)
var_list = [
porb_tensor, a1d_tensor, tp_tensor, e_param_tensor, varpi_tensor, log_rv_sigma2_tensor, log_sigma2_tensor, gammav_tensor
]
init = tf.global_variables_initializer()
sess.run(init)
# +
feed_dict = {
times_tensor: times,
dmmags_tensor: dmmags,
rv_time_tensor: rv_JD,
rv_tensor: rv_RV,
rv_err_tensor: rv_err,
}
sess.run(chi2_tensor, feed_dict=feed_dict)
# -
sess.run(tf.gradients(chi2_tensor, var_list), feed_dict=feed_dict)
# +
for v in var_list[::-1]:
opt = tf.contrib.opt.ScipyOptimizerInterface(chi2_tensor, var_list=[v])
opt.minimize(sess, feed_dict=feed_dict)
opt = tf.contrib.opt.ScipyOptimizerInterface(chi2_tensor, var_list=var_list[1:])
for i in range(3):
opt.minimize(sess, feed_dict=feed_dict)
opt = tf.contrib.opt.ScipyOptimizerInterface(chi2_tensor, var_list=var_list)
for i in range(3):
opt.minimize(sess, feed_dict=feed_dict)
# -
sess.run(chi2_tensor, feed_dict=feed_dict)
# +
rv_time_test = np.sort(np.linspace(sess.run(tp_tensor), rv_JD.max(), 5000))
vrad_test = sess.run(vrad_tensor, feed_dict={rv_time_tensor: rv_time_test})
plt.errorbar(rv_JD,rv_RV,rv_err,fmt=".",label='RV obs')
plt.plot(rv_time_test, vrad_test,label='RV th')
plt.xlabel(f"reduced JD - {np.round(time_mid,2)}")
plt.ylabel("RV (km/s)")
plt.title("KIC"+str(kicid) + ": RV curve")
plt.legend()
plt.show()
# -
rv_JD.max() - rv_JD.min(), sess.run(porb_tensor)
# +
rv_phi_test = np.sort(np.linspace(0, sess.run(porb_tensor), 5000) % sess.run(porb_tensor))
vrad_test = sess.run(vrad_tensor, feed_dict={rv_time_tensor: rv_phi_test})
plt.errorbar((rv_JD % sess.run(porb_tensor))/sess.run(porb_tensor),rv_RV,rv_err,fmt=".",label='RV obs')
plt.plot(rv_phi_test/sess.run(porb_tensor), vrad_test,label='RV th')
plt.xlabel("Orbital phase")
plt.ylabel("RV (km/s)")
plt.title("KIC"+str(kicid) + ": Phase-folded RV curve, P = " + f"{np.round(sess.run(porb_tensor))}")
plt.legend()
plt.show()
# -
print(time_mid)
print(sess.run(tp_tensor))
print(rv_JD)
sess.run(gammav_tensor)*299792.458
sess.run(var_list, feed_dict=feed_dict)
print(f"e = {np.round(sess.run(e_tensor),3)}")
print(f"a1sini/c = {np.round(sess.run(a1d_tensor)*86400.,3)} s")
| notebooks/RadialVelocity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Logistic regression implementation from scratch
# Logistic regression is the first of its kind classification technique that can easily classify into multiple categories and that too by usig the same linear model techniques. After getting the output using the linear model, we run it through a sigmoid function and get are class labels.
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# -
import numpy as np
import pandas as pd
import math
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression as LogR
from sklearn.metrics import r2_score
from scipy.special import expit as sigmoid
# #### Basic Structure
#
# Assume 0's to be weights for the linear model.
#
# for each iteration:
#
# find the output of the linear model using existing weights
# find the delta wrt the true values.
# update the weights using a learning rate.
#
# for predictions:
#
# simply use sigmoid of the output using the linear model
class LogisticRegression():
def __init__(self, x, y, n_iter=1500, lr=0.01):
self.w = np.zeros((x.shape[1], 1))
self.lr = lr/x.shape[0]
self.n_iter = n_iter
self.x, self.y = x, y
def fit(self):
for i in range(self.n_iter):
predictions = self.predict(self.x)
delta = self.y - predictions
self.w += (self.lr * (self.x.T @ delta))
def predict(self, x):
l = x @ self.w
return np.round(sigmoid(l))
data = load_breast_cancer()
d = data.data
X_train = pd.DataFrame(d, columns=data.feature_names)
y_train = data.target[:, None]
X_train = (X_train - X_train.mean())/X_train.std()
lr = LogisticRegression(X_train, y_train)
lr.fit()
r2_score(y_train, lr.predict(X_train))
sklearn_lr = LogR()
sklearn_lr.fit(X_train, data.target)
sklearn_lr.score(X_train, data.target)
# ## Conclusion
#
# Our overall training set accuracy is around 93% and the sklearn model gives around 98% accuracy. We are close enough to say that we have achieved nirvana for now.
| Algorithm/LogisticRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn import metrics
from keras.models import Sequential
from keras.layers import Dense, Dropout, regularizers
from keras.layers import LSTM
from keras.callbacks import EarlyStopping
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
import warnings
import numpy as np
from collections import OrderedDict
import os
from lob_data_utils import lob, db_result, gdf_pca, model
from lob_data_utils.svm_calculation import lob_svm
sns.set_style('whitegrid')
warnings.filterwarnings('ignore')
# -
data_length = 24000
stock = '11869'
gdf_filename_pattern = 'gdf_{}_r{}_s{}_K50'
gdf_parameters = [(0.1, 0.1), (0.01, 0.1), (0.1, 0.5), (0.01, 0.5), (0.25, 0.25)]
gdf_dfs = []
for r, s in gdf_parameters:
gdf_dfs.append(gdf_pca.SvmGdfResults(
stock, r=r, s=s, data_length=data_length, data_dir='../../data/data_gdf',
reg_data_dir='../../data/prepared',
gdf_filename_pattern=gdf_filename_pattern))
# +
df = gdf_dfs[4].df
df_test = gdf_dfs[4].df_test
n_components = gdf_dfs[4].get_pca('pca_n_gdf_que_prev').n_components_
class_weights = gdf_dfs[4].get_classes_weights()
print(n_components, class_weights)
# -
df[[c for c in df.columns if 'gdf' in c]].boxplot(figsize=(16, 4))
# +
from keras import backend as K
def as_keras_metric(method):
import functools
import tensorflow as tf
@functools.wraps(method)
def wrapper(self, args, **kwargs):
""" Wrapper for turning tensorflow metrics into keras metrics """
value, update_op = method(self, args, **kwargs)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
return wrapper
import tensorflow as tf
auc_roc = as_keras_metric(tf.metrics.auc)
def matthews_correlation(y_true, y_pred):
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
# +
## With validation
# -
n_steps = 4
gdf_df = gdf_dfs[4]
feature = 'pca_n_gdf_que'
pca = gdf_df.get_pca(feature)
# +
train_x = pca.transform(gdf_df.df[gdf_df.feature_columns_dict[feature]])
train_y = gdf_df.df['mid_price_indicator']
test_x = pca.transform(gdf_df.df_test[gdf_df.feature_columns_dict[feature]])
test_y = gdf_df.df['mid_price_indicator']
#numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
train_x = np.reshape(train_x, (train_x.shape[0], train_x.shape[1], 1))
test_x = np.reshape(test_x, (test_x.shape[0], test_x.shape[1], 1))
print(train_x.shape, test_x.shape)
# -
def get_model():
model = Sequential()
model.add(LSTM(16, activation='sigmoid', input_shape=(10, 1), return_sequences=False))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[matthews_correlation, auc_roc])
return model
m = get_model()
m.fit(train_x, train_y, verbose=1, batch_size=512, epochs=20, shuffle=False)
pred = m.predict_classes(train_x)
metrics.matthews_corrcoef(train_y, pred)
| overview/playground/playground_11869_lstm-Copy2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: feml
# language: python
# name: feml
# ---
# ## Machine Learning Model Building Pipeline: Feature Engineering
#
# In the following videos, we will take you through a practical example of each one of the steps in the Machine Learning model building pipeline, which we described in the previous lectures. There will be a notebook for each one of the Machine Learning Pipeline steps:
#
# 1. Data Analysis
# 2. Feature Engineering
# 3. Feature Selection
# 4. Model Building
#
# **This is the notebook for step 2: Feature Engineering**
#
# We will use the house price dataset available on [Kaggle.com](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data). See below for more details.
#
# ===================================================================================================
#
# ## Predicting Sale Price of Houses
#
# The aim of the project is to build a machine learning model to predict the sale price of homes based on different explanatory variables describing aspects of residential houses.
#
# ### Why is this important?
#
# Predicting house prices is useful to identify fruitful investments, or to determine whether the price advertised for a house is over or under-estimated.
#
# ### What is the objective of the machine learning model?
#
# We aim to minimise the difference between the real price and the price estimated by our model. We will evaluate model performance using the mean squared error (mse) and the root squared of the mean squared error (rmse).
#
# ### How do I download the dataset?
#
# To download the House Price dataset go this website:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data
#
# Scroll down to the bottom of the page, and click on the link 'train.csv', and then click the 'download' blue button towards the right of the screen, to download the dataset. Rename the file as 'houseprice.csv' and save it to a directory of your choice.
#
# **Note the following:**
# - You need to be logged in to Kaggle in order to download the datasets.
# - You need to accept the terms and conditions of the competition to download the dataset
# - If you save the file to the same directory where you saved this jupyter notebook, then you can run the code as it is written here.
#
# ====================================================================================================
# ## House Prices dataset: Feature Engineering
#
# In the following cells, we will engineer / pre-process the variables of the House Price Dataset from Kaggle. We will engineer the variables so that we tackle:
#
# 1. Missing values
# 2. Temporal variables
# 3. Non-Gaussian distributed variables
# 4. Categorical variables: remove rare labels
# 5. Categorical variables: convert strings to numbers
# 5. Standarise the values of the variables to the same range
#
# ### Setting the seed
#
# It is important to note that we are engineering variables and pre-processing data with the idea of deploying the model. Therefore, from now on, for each step that includes some element of randomness, it is extremely important that we **set the seed**. This way, we can obtain reproducibility between our research and our development code.
#
# This is perhaps one of the most important lessons that you need to take away from this course: **Always set the seeds**.
#
# Let's go ahead and load the dataset.
# +
# to handle datasets
import pandas as pd
import numpy as np
# for plotting
import matplotlib.pyplot as plt
# to divide train and test set
from sklearn.model_selection import train_test_split
# feature scaling
from sklearn.preprocessing import MinMaxScaler
# to visualise al the columns in the dataframe
pd.pandas.set_option('display.max_columns', None)
import warnings
warnings.simplefilter(action='ignore')
# -
# load dataset
data = pd.read_csv('houseprice.csv')
print(data.shape)
data.head()
# ## Separate dataset into train and test
#
# Before beginning to engineer our features, it is important to separate our data intro training and testing set. When we engineer features, some techniques learn parameters from data. It is important to learn this parameters only from the train set. This is to avoid over-fitting.
#
# **Separating the data into train and test involves randomness, therefore, we need to set the seed.**
# +
# Let's separate into train and test set
# Remember to set the seed (random_state for this sklearn function)
X_train, X_test, y_train, y_test = train_test_split(data,
data['SalePrice'],
test_size=0.1,
# we are setting the seed here:
random_state=0)
X_train.shape, X_test.shape
# -
# ## Missing values
#
# ### Categorical variables
# For categorical variables, we will replace missing values with the string "missing".
# +
# make a list of the categorical variables that contain missing values
vars_with_na = [
var for var in data.columns
if X_train[var].isnull().sum() > 0 and X_train[var].dtypes == 'O'
]
# print percentage of missing values per variable
X_train[vars_with_na].isnull().mean()
# +
# replace missing values with new label: "Missing"
X_train[vars_with_na] = X_train[vars_with_na].fillna('Missing')
X_test[vars_with_na] = X_test[vars_with_na].fillna('Missing')
# -
# check that we have no missing information in the engineered variables
X_train[vars_with_na].isnull().sum()
# check that test set does not contain null values in the engineered variables
[var for var in vars_with_na if X_test[var].isnull().sum() > 0]
# ### Numerical variables
#
# To engineer missing values in numerical variables, we will:
#
# - add a binary missing value indicator variable
# - and then replace the missing values in the original variable with the mode
#
# +
# make a list with the numerical variables that contain missing values
vars_with_na = [
var for var in data.columns
if X_train[var].isnull().sum() > 0 and X_train[var].dtypes != 'O'
]
# print percentage of missing values per variable
X_train[vars_with_na].isnull().mean()
# +
# replace engineer missing values as we described above
for var in vars_with_na:
# calculate the mode using the train set
mode_val = X_train[var].mode()[0]
# add binary missing indicator (in train and test)
X_train[var+'_na'] = np.where(X_train[var].isnull(), 1, 0)
X_test[var+'_na'] = np.where(X_test[var].isnull(), 1, 0)
# replace missing values by the mode
# (in train and test)
X_train[var] = X_train[var].fillna(mode_val)
X_test[var] = X_test[var].fillna(mode_val)
# check that we have no more missing values in the engineered variables
X_train[vars_with_na].isnull().sum()
# +
# check that test set does not contain null values in the engineered variables
[vr for var in vars_with_na if X_test[var].isnull().sum() > 0]
# +
# check the binary missing indicator variables
X_train[['LotFrontage_na', 'MasVnrArea_na', 'GarageYrBlt_na']].head()
# -
# ## Temporal variables
#
# ### Capture elapsed time
#
# We learned in the previous Jupyter notebook, that there are 4 variables that refer to the years in which the house or the garage were built or remodeled. We will capture the time elapsed between those variables and the year in which the house was sold:
def elapsed_years(df, var):
# capture difference between the year variable
# and the year in which the house was sold
df[var] = df['YrSold'] - df[var]
return df
for var in ['YearBuilt', 'YearRemodAdd', 'GarageYrBlt']:
X_train = elapsed_years(X_train, var)
X_test = elapsed_years(X_test, var)
# ## Numerical variable transformation
#
# In the previous Jupyter notebook, we observed that the numerical variables are not normally distributed.
#
# We will log transform the positive numerical variables in order to get a more Gaussian-like distribution. This tends to help Linear machine learning models.
for var in ['LotFrontage', 'LotArea', '1stFlrSF', 'GrLivArea', 'SalePrice']:
X_train[var] = np.log(X_train[var])
X_test[var] = np.log(X_test[var])
# check that test set does not contain null values in the engineered variables
[var for var in ['LotFrontage', 'LotArea', '1stFlrSF',
'GrLivArea', 'SalePrice'] if X_test[var].isnull().sum() > 0]
# same for train set
[var for var in ['LotFrontage', 'LotArea', '1stFlrSF',
'GrLivArea', 'SalePrice'] if X_train[var].isnull().sum() > 0]
# ## Categorical variables
#
# ### Removing rare labels
#
# First, we will group those categories within variables that are present in less than 1% of the observations. That is, all values of categorical variables that are shared by less than 1% of houses, well be replaced by the string "Rare".
#
# To learn more about how to handle categorical variables visit our course [Feature Engineering for Machine Learning](https://www.udemy.com/feature-engineering-for-machine-learning/?couponCode=UDEMY2018) in Udemy.
# +
# let's capture the categorical variables in a list
cat_vars = [var for var in X_train.columns if X_train[var].dtype == 'O']
# +
def find_frequent_labels(df, var, rare_perc):
# function finds the labels that are shared by more than
# a certain % of the houses in the dataset
df = df.copy()
tmp = df.groupby(var)['SalePrice'].count() / len(df)
return tmp[tmp > rare_perc].index
for var in cat_vars:
# find the frequent categories
frequent_ls = find_frequent_labels(X_train, var, 0.01)
# replace rare categories by the string "Rare"
X_train[var] = np.where(X_train[var].isin(
frequent_ls), X_train[var], 'Rare')
X_test[var] = np.where(X_test[var].isin(
frequent_ls), X_test[var], 'Rare')
# -
# ### Encoding of categorical variables
#
# Next, we need to transform the strings of the categorical variables into numbers. We will do it so that we capture the monotonic relationship between the label and the target.
#
# To learn more about how to encode categorical variables visit our course [Feature Engineering for Machine Learning](https://www.udemy.com/feature-engineering-for-machine-learning/?couponCode=UDEMY2018) in Udemy.
# +
# this function will assign discrete values to the strings of the variables,
# so that the smaller value corresponds to the category that shows the smaller
# mean house sale price
def replace_categories(train, test, var, target):
# order the categories in a variable from that with the lowest
# house sale price, to that with the highest
ordered_labels = train.groupby([var])[target].mean().sort_values().index
# create a dictionary of ordered categories to integer values
ordinal_label = {k: i for i, k in enumerate(ordered_labels, 0)}
# use the dictionary to replace the categorical strings by integers
train[var] = train[var].map(ordinal_label)
test[var] = test[var].map(ordinal_label)
# -
for var in cat_vars:
replace_categories(X_train, X_test, var, 'SalePrice')
# check absence of na in the train set
[var for var in X_train.columns if X_train[var].isnull().sum() > 0]
# check absence of na in the test set
[var for var in X_test.columns if X_test[var].isnull().sum() > 0]
# +
# let me show you what I mean by monotonic relationship
# between labels and target
def analyse_vars(df, var):
# function plots median house sale price per encoded
# category
df = df.copy()
df.groupby(var)['SalePrice'].median().plot.bar()
plt.title(var)
plt.ylabel('SalePrice')
plt.show()
for var in cat_vars:
analyse_vars(X_train, var)
# -
# The monotonic relationship is particularly clear for the variables MSZoning, Neighborhood, and ExterQual. Note how, the higher the integer that now represents the category, the higher the mean house sale price.
#
# (remember that the target is log-transformed, that is why the differences seem so small).
# ## Feature Scaling
#
# For use in linear models, features need to be either scaled or normalised. In the next section, I will scale features to the minimum and maximum values:
# +
# capture all variables in a list
# except the target and the ID
train_vars = [var for var in X_train.columns if var not in ['Id', 'SalePrice']]
# count number of variables
len(train_vars)
# +
# create scaler
scaler = MinMaxScaler()
# fit the scaler to the train set
scaler.fit(X_train[train_vars])
# transform the train and test set
X_train[train_vars] = scaler.transform(X_train[train_vars])
X_test[train_vars] = scaler.transform(X_test[train_vars])
# -
X_train.head()
# +
# let's now save the train and test sets for the next notebook!
X_train.to_csv('xtrain.csv', index=False)
X_test.to_csv('xtest.csv', index=False)
# -
# That concludes the feature engineering section for this dataset.
#
# **Remember: the aim of this course is to show you how to put models in production. We deliberately kept the feature engineering pipeline, yet included many of the traditional engineering steps, to give you a full flavour of building and deploying a machine learning model pipeline** as we will see in the coming sections of the course.
# That is all for this notebook. We hope you enjoyed it and see you in the next one!
| Section-2-Machine-Learning-Pipeline-Overview/Machine-Learning-Pipeline-Step2-Feature-Engineering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text"
# # Working with preprocessing layers
#
# **Authors:** <NAME>, <NAME><br>
# **Date created:** 2020/07/25<br>
# **Last modified:** 2021/04/23<br>
# **Description:** Overview of how to leverage preprocessing layers to create end-to-end models.
# + [markdown] colab_type="text"
# ## Keras preprocessing
#
# The Keras preprocessing layers API allows developers to build Keras-native input
# processing pipelines. These input processing pipelines can be used as independent
# preprocessing code in non-Keras workflows, combined directly with Keras models, and
# exported as part of a Keras SavedModel.
#
# With Keras preprocessing layers, you can build and export models that are truly
# end-to-end: models that accept raw images or raw structured data as input; models that
# handle feature normalization or feature value indexing on their own.
# + [markdown] colab_type="text"
# ## Available preprocessing
#
# ### Text preprocessing
#
# - `TextVectorization` layer: turns raw strings into an encoded representation that can be
# read by an `Embedding` layer or `Dense` layer.
#
# ### Numerical features preprocessing
#
# - `Normalization` layer: performs feature-wise normalize of input features.
# - `Discretization` layer: turns continuous numerical features into integer categorical
# features.
#
# ### Categorical features preprocessing
#
# - `CategoryEncoding` layer: turns integer categorical features into one-hot, multi-hot,
# or count dense representations.
# - `Hashing` layer: performs categorical feature hashing, also known as the "hashing
# trick".
# - `StringLookup` layer: turns string categorical values an encoded representation that can be
# read by an `Embedding` layer or `Dense` layer.
# - `IntegerLookup` layer: turns integer categorical values into an encoded representation that can be
# read by an `Embedding` layer or `Dense` layer.
#
#
# ### Image preprocessing
#
# These layers are for standardizing the inputs of an image model.
#
# - `Resizing` layer: resizes a batch of images to a target size.
# - `Rescaling` layer: rescales and offsets the values of a batch of image (e.g. go from
# inputs in the `[0, 255]` range to inputs in the `[0, 1]` range.
# - `CenterCrop` layer: returns a center crop of a batch of images.
#
# ### Image data augmentation
#
# These layers apply random augmentation transforms to a batch of images. They
# are only active during training.
#
# - `RandomCrop` layer
# - `RandomFlip` layer
# - `RandomTranslation` layer
# - `RandomRotation` layer
# - `RandomZoom` layer
# - `RandomHeight` layer
# - `RandomWidth` layer
# - `RandomContrast` layer
# + [markdown] colab_type="text"
# ## The `adapt()` method
#
# Some preprocessing layers have an internal state that can be computed based on
# a sample of the training data. The list of stateful preprocessing layers is:
#
# - `TextVectorization`: holds a mapping between string tokens and integer indices
# - `StringLookup` and `IntegerLookup`: hold a mapping between input values and integer
# indices.
# - `Normalization`: holds the mean and standard deviation of the features.
# - `Discretization`: holds information about value bucket boundaries.
#
# Crucially, these layers are **non-trainable**. Their state is not set during training; it
# must be set **before training**, either by initializing them from a precomputed constant,
# or by "adapting" them on data.
#
# You set the state of a preprocessing layer by exposing it to training data, via the
# `adapt()` method:
# + colab_type="code"
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
data = np.array([[0.1, 0.2, 0.3], [0.8, 0.9, 1.0], [1.5, 1.6, 1.7],])
layer = preprocessing.Normalization()
layer.adapt(data)
normalized_data = layer(data)
print("Features mean: %.2f" % (normalized_data.numpy().mean()))
print("Features std: %.2f" % (normalized_data.numpy().std()))
# + [markdown] colab_type="text"
# The `adapt()` method takes either a Numpy array or a `tf.data.Dataset` object. In the
# case of `StringLookup` and `TextVectorization`, you can also pass a list of strings:
# + colab_type="code"
data = [
"ξεῖν᾽, ἦ τοι μὲν ὄνειροι ἀμήχανοι ἀκριτόμυθοι",
"γίγνοντ᾽, οὐδέ τι πάντα τελείεται ἀνθρώποισι.",
"δοιαὶ γάρ τε πύλαι ἀμενηνῶν εἰσὶν ὀνείρων:",
"αἱ μὲν γὰρ κεράεσσι τετεύχαται, αἱ δ᾽ ἐλέφαντι:",
"τῶν οἳ μέν κ᾽ ἔλθωσι διὰ πριστοῦ ἐλέφαντος,",
"οἵ ῥ᾽ ἐλεφαίρονται, ἔπε᾽ ἀκράαντα φέροντες:",
"οἱ δὲ διὰ ξεστῶν κεράων ἔλθωσι θύραζε,",
"οἵ ῥ᾽ ἔτυμα κραίνουσι, βροτῶν ὅτε κέν τις ἴδηται.",
]
layer = preprocessing.TextVectorization()
layer.adapt(data)
vectorized_text = layer(data)
print(vectorized_text)
# + [markdown] colab_type="text"
# In addition, adaptable layers always expose an option to directly set state via
# constructor arguments or weight assignment. If the intended state values are known at
# layer construction time, or are calculated outside of the `adapt()` call, they can be set
# without relying on the layer's internal computation. For instance, if external vocabulary
# files for the `TextVectorization`, `StringLookup`, or `IntegerLookup` layers already
# exist, those can be loaded directly into the lookup tables by passing a path to the
# vocabulary file in the layer's constructor arguments.
#
# Here's an example where we instantiate a `StringLookup` layer with precomputed vocabulary:
# + colab_type="code"
vocab = ["a", "b", "c", "d"]
data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
layer = preprocessing.StringLookup(vocabulary=vocab)
vectorized_data = layer(data)
print(vectorized_data)
# + [markdown] colab_type="text"
# ## Preprocessing data before the model or inside the model
#
# There are two ways you could be using preprocessing layers:
#
# **Option 1:** Make them part of the model, like this:
#
# ```python
# inputs = keras.Input(shape=input_shape)
# x = preprocessing_layer(inputs)
# outputs = rest_of_the_model(x)
# model = keras.Model(inputs, outputs)
# ```
#
# With this option, preprocessing will happen on device, synchronously with the rest of the
# model execution, meaning that it will benefit from GPU acceleration.
# If you're training on GPU, this is the best option for the `Normalization` layer, and for
# all image preprocessing and data augmentation layers.
#
# **Option 2:** apply it to your `tf.data.Dataset`, so as to obtain a dataset that yields
# batches of preprocessed data, like this:
#
# ```python
# dataset = dataset.map(lambda x, y: (preprocessing_layer(x), y))
# ```
#
# With this option, your preprocessing will happen on CPU, asynchronously, and will be
# buffered before going into the model.
# In addition, if you call `dataset.prefetch(tf.data.AUTOTUNE)` on your dataset,
# the preprocessing will happen efficiently in parallel with training:
#
# ```python
# dataset = dataset.map(lambda x, y: (preprocessing_layer(x), y))
# dataset = dataset.prefetch(tf.data.AUTOTUNE)
# model.fit(dataset, ...)
# ```
#
# This is the best option for `TextVectorization`, and all structured data preprocessing
# layers. It can also be a good option if you're training on CPU
# and you use image preprocessing layers.
#
# **When running on TPU, you should always place preprocessing layers in the `tf.data` pipeline**
# (with the exception of `Normalization` and `Rescaling`, which run fine on TPU and are commonly
# used as the first layer is an image model).
# + [markdown] colab_type="text"
# ## Benefits of doing preprocessing inside the model at inference time
#
# Even if you go with option 2, you may later want to export an inference-only end-to-end
# model that will include the preprocessing layers. The key benefit to doing this is that
# **it makes your model portable** and it **helps reduce the
# [training/serving skew](https://developers.google.com/machine-learning/guides/rules-of-ml#training-serving_skew)**.
#
# When all data preprocessing is part of the model, other people can load and use your
# model without having to be aware of how each feature is expected to be encoded &
# normalized. Your inference model will be able to process raw images or raw structured
# data, and will not require users of the model to be aware of the details of e.g. the
# tokenization scheme used for text, the indexing scheme used for categorical features,
# whether image pixel values are normalized to `[-1, +1]` or to `[0, 1]`, etc. This is
# especially powerful if you're exporting
# your model to another runtime, such as TensorFlow.js: you won't have to
# reimplement your preprocessing pipeline in JavaScript.
#
# If you initially put your preprocessing layers in your `tf.data` pipeline,
# you can export an inference model that packages the preprocessing.
# Simply instantiate a new model that chains
# your preprocessing layers and your training model:
#
# ```python
# inputs = keras.Input(shape=input_shape)
# x = preprocessing_layer(inputs)
# outputs = training_model(x)
# inference_model = keras.Model(inputs, outputs)
# ```
# + [markdown] colab_type="text"
# ## Quick recipes
#
# ### Image data augmentation
#
# Note that image data augmentation layers are only active during training (similarly to
# the `Dropout` layer).
# + colab_type="code"
from tensorflow import keras
from tensorflow.keras import layers
# Create a data augmentation stage with horizontal flipping, rotations, zooms
data_augmentation = keras.Sequential(
[
preprocessing.RandomFlip("horizontal"),
preprocessing.RandomRotation(0.1),
preprocessing.RandomZoom(0.1),
]
)
# Load some data
(x_train, y_train), _ = keras.datasets.cifar10.load_data()
input_shape = x_train.shape[1:]
classes = 10
# Create a tf.data pipeline of augmented images (and their labels)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.batch(16).map(lambda x, y: (data_augmentation(x), y))
# Create a model and train it on the augmented image data
inputs = keras.Input(shape=input_shape)
x = preprocessing.Rescaling(1.0 / 255)(inputs) # Rescale inputs
outputs = keras.applications.ResNet50( # Add the rest of the model
weights=None, input_shape=input_shape, classes=classes
)(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy")
model.fit(train_dataset, steps_per_epoch=5)
# + [markdown] colab_type="text"
# You can see a similar setup in action in the example
# [image classification from scratch](https://keras.io/examples/vision/image_classification_from_scratch/).
# + [markdown] colab_type="text"
# ### Normalizing numerical features
# + colab_type="code"
# Load some data
(x_train, y_train), _ = keras.datasets.cifar10.load_data()
x_train = x_train.reshape((len(x_train), -1))
input_shape = x_train.shape[1:]
classes = 10
# Create a Normalization layer and set its internal state using the training data
normalizer = preprocessing.Normalization()
normalizer.adapt(x_train)
# Create a model that include the normalization layer
inputs = keras.Input(shape=input_shape)
x = normalizer(inputs)
outputs = layers.Dense(classes, activation="softmax")(x)
model = keras.Model(inputs, outputs)
# Train the model
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy")
model.fit(x_train, y_train)
# + [markdown] colab_type="text"
# ### Encoding string categorical features via one-hot encoding
# + colab_type="code"
# Define some toy data
data = tf.constant([["a"], ["b"], ["c"], ["b"], ["c"], ["a"]])
# Use StringLookup to build an index of the feature values and encode output.
lookup = preprocessing.StringLookup(output_mode="one_hot")
lookup.adapt(data)
# Convert new test data (which includes unknown feature values)
test_data = tf.constant([["a"], ["b"], ["c"], ["d"], ["e"], [""]])
encoded_data = lookup(test_data)
print(encoded_data)
# + [markdown] colab_type="text"
# Note that, here, index 0 is reserved for out-of-vocabulary values
# (values that were not seen during `adapt()`).
#
# You can see the `StringLookup` in action in the
# [Structured data classification from scratch](https://keras.io/examples/structured_data/structured_data_classification_from_scratch/)
# example.
# + [markdown] colab_type="text"
# ### Encoding integer categorical features via one-hot encoding
# + colab_type="code"
# Define some toy data
data = tf.constant([[10], [20], [20], [10], [30], [0]])
# Use IntegerLookup to build an index of the feature values and encode output.
lookup = preprocessing.IntegerLookup(output_mode="one_hot")
lookup.adapt(data)
# Convert new test data (which includes unknown feature values)
test_data = tf.constant([[10], [10], [20], [50], [60], [0]])
encoded_data = lookup(test_data)
print(encoded_data)
# + [markdown] colab_type="text"
# Note that index 0 is reserved for missing values (which you should specify as the value
# 0), and index 1 is reserved for out-of-vocabulary values (values that were not seen
# during `adapt()`). You can configure this by using the `mask_token` and `oov_token`
# constructor arguments of `IntegerLookup`.
#
# You can see the `IntegerLookup` in action in the example
# [structured data classification from scratch](https://keras.io/examples/structured_data/structured_data_classification_from_scratch/).
# + [markdown] colab_type="text"
# ### Applying the hashing trick to an integer categorical feature
#
# If you have a categorical feature that can take many different values (on the order of
# 10e3 or higher), where each value only appears a few times in the data,
# it becomes impractical and ineffective to index and one-hot encode the feature values.
# Instead, it can be a good idea to apply the "hashing trick": hash the values to a vector
# of fixed size. This keeps the size of the feature space manageable, and removes the need
# for explicit indexing.
# + colab_type="code"
# Sample data: 10,000 random integers with values between 0 and 100,000
data = np.random.randint(0, 100000, size=(10000, 1))
# Use the Hashing layer to hash the values to the range [0, 64]
hasher = preprocessing.Hashing(num_bins=64, salt=1337)
# Use the CategoryEncoding layer to multi-hot encode the hashed values
encoder = preprocessing.CategoryEncoding(num_tokens=64, output_mode="multi_hot")
encoded_data = encoder(hasher(data))
print(encoded_data.shape)
# + [markdown] colab_type="text"
# ### Encoding text as a sequence of token indices
#
# This is how you should preprocess text to be passed to an `Embedding` layer.
# + colab_type="code"
# Define some text data to adapt the layer
adapt_data = tf.constant(
[
"The Brain is wider than the Sky",
"For put them side by side",
"The one the other will contain",
"With ease and You beside",
]
)
# Create a TextVectorization layer
text_vectorizer = preprocessing.TextVectorization(output_mode="int")
# Index the vocabulary via `adapt()`
text_vectorizer.adapt(adapt_data)
# Try out the layer
print(
"Encoded text:\n", text_vectorizer(["The Brain is deeper than the sea"]).numpy(),
)
# Create a simple model
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(input_dim=text_vectorizer.vocabulary_size(), output_dim=16)(inputs)
x = layers.GRU(8)(x)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
# Create a labeled dataset (which includes unknown tokens)
train_dataset = tf.data.Dataset.from_tensor_slices(
(["The Brain is deeper than the sea", "for if they are held Blue to Blue"], [1, 0])
)
# Preprocess the string inputs, turning them into int sequences
train_dataset = train_dataset.batch(2).map(lambda x, y: (text_vectorizer(x), y))
# Train the model on the int sequences
print("\nTraining model...")
model.compile(optimizer="rmsprop", loss="mse")
model.fit(train_dataset)
# For inference, you can export a model that accepts strings as input
inputs = keras.Input(shape=(1,), dtype="string")
x = text_vectorizer(inputs)
outputs = model(x)
end_to_end_model = keras.Model(inputs, outputs)
# Call the end-to-end model on test data (which includes unknown tokens)
print("\nCalling end-to-end model on test string...")
test_data = tf.constant(["The one the other will absorb"])
test_output = end_to_end_model(test_data)
print("Model output:", test_output)
# + [markdown] colab_type="text"
# You can see the `TextVectorization` layer in action, combined with an `Embedding` mode,
# in the example
# [text classification from scratch](https://keras.io/examples/nlp/text_classification_from_scratch/).
#
# Note that when training such a model, for best performance, you should always
# use the `TextVectorization` layer as part of the input pipeline.
# + [markdown] colab_type="text"
# ### Encoding text as a dense matrix of ngrams with multi-hot encoding
#
# This is how you should preprocess text to be passed to a `Dense` layer.
# + colab_type="code"
# Define some text data to adapt the layer
adapt_data = tf.constant(
[
"The Brain is wider than the Sky",
"For put them side by side",
"The one the other will contain",
"With ease and You beside",
]
)
# Instantiate TextVectorization with "multi_hot" output_mode
# and ngrams=2 (index all bigrams)
text_vectorizer = preprocessing.TextVectorization(output_mode="multi_hot", ngrams=2)
# Index the bigrams via `adapt()`
text_vectorizer.adapt(adapt_data)
# Try out the layer
print(
"Encoded text:\n", text_vectorizer(["The Brain is deeper than the sea"]).numpy(),
)
# Create a simple model
inputs = keras.Input(shape=(text_vectorizer.vocabulary_size(),))
outputs = layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
# Create a labeled dataset (which includes unknown tokens)
train_dataset = tf.data.Dataset.from_tensor_slices(
(["The Brain is deeper than the sea", "for if they are held Blue to Blue"], [1, 0])
)
# Preprocess the string inputs, turning them into int sequences
train_dataset = train_dataset.batch(2).map(lambda x, y: (text_vectorizer(x), y))
# Train the model on the int sequences
print("\nTraining model...")
model.compile(optimizer="rmsprop", loss="mse")
model.fit(train_dataset)
# For inference, you can export a model that accepts strings as input
inputs = keras.Input(shape=(1,), dtype="string")
x = text_vectorizer(inputs)
outputs = model(x)
end_to_end_model = keras.Model(inputs, outputs)
# Call the end-to-end model on test data (which includes unknown tokens)
print("\nCalling end-to-end model on test string...")
test_data = tf.constant(["The one the other will absorb"])
test_output = end_to_end_model(test_data)
print("Model output:", test_output)
# + [markdown] colab_type="text"
# ### Encoding text as a dense matrix of ngrams with TF-IDF weighting
#
# This is an alternative way of preprocessing text before passing it to a `Dense` layer.
# + colab_type="code"
# Define some text data to adapt the layer
adapt_data = tf.constant(
[
"The Brain is wider than the Sky",
"For put them side by side",
"The one the other will contain",
"With ease and You beside",
]
)
# Instantiate TextVectorization with "tf-idf" output_mode
# (multi-hot with TF-IDF weighting) and ngrams=2 (index all bigrams)
text_vectorizer = preprocessing.TextVectorization(output_mode="tf-idf", ngrams=2)
# Index the bigrams and learn the TF-IDF weights via `adapt()`
text_vectorizer.adapt(adapt_data)
# Try out the layer
print(
"Encoded text:\n", text_vectorizer(["The Brain is deeper than the sea"]).numpy(),
)
# Create a simple model
inputs = keras.Input(shape=(text_vectorizer.vocabulary_size(),))
outputs = layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
# Create a labeled dataset (which includes unknown tokens)
train_dataset = tf.data.Dataset.from_tensor_slices(
(["The Brain is deeper than the sea", "for if they are held Blue to Blue"], [1, 0])
)
# Preprocess the string inputs, turning them into int sequences
train_dataset = train_dataset.batch(2).map(lambda x, y: (text_vectorizer(x), y))
# Train the model on the int sequences
print("\nTraining model...")
model.compile(optimizer="rmsprop", loss="mse")
model.fit(train_dataset)
# For inference, you can export a model that accepts strings as input
inputs = keras.Input(shape=(1,), dtype="string")
x = text_vectorizer(inputs)
outputs = model(x)
end_to_end_model = keras.Model(inputs, outputs)
# Call the end-to-end model on test data (which includes unknown tokens)
print("\nCalling end-to-end model on test string...")
test_data = tf.constant(["The one the other will absorb"])
test_output = end_to_end_model(test_data)
print("Model output:", test_output)
# + [markdown] colab_type="text"
# ## Important gotchas
#
# ### Working with lookup layers with very large vocabularies
#
# You may find yourself working with a very large vocabulary in a `TextVectorization`, a `StringLookup` layer,
# or an `IntegerLookup` layer. Typically, a vocabulary larger than 500MB would be considered "very large".
#
# In such case, for best performance, you should avoid using `adapt()`.
# Instead, pre-compute your vocabulary in advance
# (you could use Apache Beam or TF Transform for this)
# and store it in a file. Then load the vocabulary into the layer at construction
# time by passing the filepath as the `vocabulary` argument.
#
#
# ### Using lookup layers on a TPU pod or with `ParameterServerStrategy`.
#
# There is an outstanding issue that causes performance to degrade when using
# a `TextVectorization`, `StringLookup`, or `IntegerLookup` layer while
# training on a TPU pod or on multiple machines via `ParameterServerStrategy`.
# This is slated to be fixed in TensorFlow 2.7.
| guides/ipynb/preprocessing_layers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="moved-collapse" pycharm={"name": "#%% md\n"}
# # Monodepth Estimation with OpenVINO
#
# This tutorial demonstrates Monocular Depth Estimation with MidasNet in OpenVINO. Model information can be found [here](https://docs.openvino.ai/latest/omz_models_model_midasnet.html).
#
# 
#
# ### What is Monodepth?
# Monocular Depth Estimation is the task of estimating scene depth using a single image. It has many potential applications in robotics, 3D reconstruction, medical imaging and autonomous systems. For this demo, we use a neural network model called [MiDaS](https://github.com/intel-isl/MiDaS) which was developed by the [Embodied AI Foundation](https://www.embodiedaifoundation.org/). Check out the research paper below to learn more.
#
# <NAME>, <NAME>, <NAME>, <NAME> and <NAME>, ["Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer,"](https://ieeexplore.ieee.org/document/9178977) in IEEE Transactions on Pattern Analysis and Machine Intelligence, doi: 10.1109/TPAMI.2020.3019967.
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Preparation
#
# ### Imports
# + id="ahead-spider"
import sys
import time
from pathlib import Path
import cv2
import matplotlib.cm
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import (
HTML,
FileLink,
Pretty,
ProgressBar,
Video,
clear_output,
display,
)
from openvino.runtime import Core
sys.path.append("../utils")
from notebook_utils import load_image
# + [markdown] id="contained-office"
# ### Settings
# + id="amber-lithuania"
DEVICE = "CPU"
MODEL_FILE = "model/MiDaS_small.xml"
model_xml_path = Path(MODEL_FILE)
# -
# ## Functions
# +
def normalize_minmax(data):
"""Normalizes the values in `data` between 0 and 1"""
return (data - data.min()) / (data.max() - data.min())
def convert_result_to_image(result, colormap="viridis"):
"""
Convert network result of floating point numbers to an RGB image with
integer values from 0-255 by applying a colormap.
`result` is expected to be a single network result in 1,H,W shape
`colormap` is a matplotlib colormap.
See https://matplotlib.org/stable/tutorials/colors/colormaps.html
"""
cmap = matplotlib.cm.get_cmap(colormap)
result = result.squeeze(0)
result = normalize_minmax(result)
result = cmap(result)[:, :, :3] * 255
result = result.astype(np.uint8)
return result
def to_rgb(image_data) -> np.ndarray:
"""
Convert image_data from BGR to RGB
"""
return cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)
# + [markdown] id="sensitive-wagner"
# ## Load the Model
#
# Load the model in Inference Engine with `ie.read_model` and compile it for the specified device with `ie.compile_model`. Get input and output keys and the expected input shape for the model.
# + id="complete-brother"
ie = Core()
model = ie.read_model(model=model_xml_path, weights=model_xml_path.with_suffix(".bin"))
compiled_model = ie.compile_model(model=model, device_name=DEVICE)
input_key = compiled_model.input(0)
output_key = compiled_model.output(0)
network_input_shape = list(input_key.shape)
network_image_height, network_image_width = network_input_shape[2:]
# + [markdown] id="compact-bargain"
# ## Monodepth on Image
#
# ### Load, resize and reshape input image
#
# The input image is read with OpenCV, resized to network input size, and reshaped to (N,C,H,W) (N=number of images, C=number of channels, H=height, W=width).
# + colab={"base_uri": "https://localhost:8080/"} id="central-psychology" outputId="d864ee96-3fbd-488d-da1a-88e730f34aad" tags=[]
IMAGE_FILE = "data/coco_bike.jpg"
image = load_image(path=IMAGE_FILE)
# resize to input shape for network
resized_image = cv2.resize(src=image, dsize=(network_image_height, network_image_width))
# reshape image to network input shape NCHW
input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0)
# + [markdown] id="taken-spanking"
# ### Do inference on image
#
# Do the inference, convert the result to an image, and resize it to the original image shape
# + id="banner-kruger"
result = compiled_model([input_image])[output_key]
# convert network result of disparity map to an image that shows
# distance as colors
result_image = convert_result_to_image(result=result)
# resize back to original image shape. cv2.resize expects shape
# in (width, height), [::-1] reverses the (height, width) shape to match this
result_image = cv2.resize(result_image, image.shape[:2][::-1])
# -
# ### Display monodepth image
# + colab={"base_uri": "https://localhost:8080/", "height": 867} id="ranging-executive" outputId="30373e8e-34e9-4820-e32d-764aa99d4b25"
fig, ax = plt.subplots(1, 2, figsize=(20, 15))
ax[0].imshow(to_rgb(image))
ax[1].imshow(result_image);
# + [markdown] id="descending-cache"
# ## Monodepth on Video
#
# By default, only the first 100 frames are processed, in order to quickly check that everything works. Change NUM_FRAMES in the cell below to modify this. Set NUM_FRAMES to 0 to process the whole video.
# -
# ### Video Settings
# + colab={"base_uri": "https://localhost:8080/"} id="terminal-dividend" outputId="87f5ada0-8caf-49c3-fe54-626e2b1967f3"
# Video source: https://www.youtube.com/watch?v=fu1xcQdJRws (Public Domain)
VIDEO_FILE = "data/Coco Walking in Berkeley.mp4"
# Number of seconds of input video to process. Set to 0 to process
# the full video.
NUM_SECONDS = 4
# Set ADVANCE_FRAMES to 1 to process every frame from the input video
# Set ADVANCE_FRAMES to 2 to process every second frame. This reduces
# the time it takes to process the video
ADVANCE_FRAMES = 2
# Set SCALE_OUTPUT to reduce the size of the result video
# If SCALE_OUTPUT is 0.5, the width and height of the result video
# will be half the width and height of the input video
SCALE_OUTPUT = 0.5
# The format to use for video encoding. vp09 is slow,
# but it works on most systems.
# Try the THEO encoding if you have FFMPEG installed.
# FOURCC = cv2.VideoWriter_fourcc(*"THEO")
FOURCC = cv2.VideoWriter_fourcc(*"vp09")
# Create Path objects for the input video and the resulting video
output_directory = Path("output")
output_directory.mkdir(exist_ok=True)
result_video_path = output_directory / f"{Path(VIDEO_FILE).stem}_monodepth.mp4"
# -
# ### Load Video
#
# Load video from `VIDEO_FILE`, set in the *Video Settings* cell above. Open the video to read the frame width and height and fps, and compute values for these properties for the monodepth video.
# +
cap = cv2.VideoCapture(str(VIDEO_FILE))
ret, image = cap.read()
if not ret:
raise ValueError(f"The video at {VIDEO_FILE} cannot be read.")
input_fps = cap.get(cv2.CAP_PROP_FPS)
input_video_frame_height, input_video_frame_width = image.shape[:2]
target_fps = input_fps / ADVANCE_FRAMES
target_frame_height = int(input_video_frame_height * SCALE_OUTPUT)
target_frame_width = int(input_video_frame_width * SCALE_OUTPUT)
cap.release()
print(
f"The input video has a frame width of {input_video_frame_width}, "
f"frame height of {input_video_frame_height} and runs at {input_fps:.2f} fps"
)
print(
"The monodepth video will be scaled with a factor "
f"{SCALE_OUTPUT}, have width {target_frame_width}, "
f" height {target_frame_height}, and run at {target_fps:.2f} fps"
)
# -
# ### Do Inference on a Video and Create Monodepth Video
# + colab={"base_uri": "https://localhost:8080/"} id="present-albany" outputId="600edb69-af12-44dc-ec8e-95005b74179c" tags=[]
# Initialize variables
input_video_frame_nr = 0
start_time = time.perf_counter()
total_inference_duration = 0
# Open input video
cap = cv2.VideoCapture(str(VIDEO_FILE))
# Create result video
out_video = cv2.VideoWriter(
str(result_video_path),
FOURCC,
target_fps,
(target_frame_width * 2, target_frame_height),
)
num_frames = int(NUM_SECONDS * input_fps)
total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) if num_frames == 0 else num_frames
progress_bar = ProgressBar(total=total_frames)
progress_bar.display()
try:
while cap.isOpened():
ret, image = cap.read()
if not ret:
cap.release()
break
if input_video_frame_nr >= total_frames:
break
# Only process every second frame
# Prepare frame for inference
# resize to input shape for network
resized_image = cv2.resize(src=image, dsize=(network_image_height, network_image_width))
# reshape image to network input shape NCHW
input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0)
# Do inference
inference_start_time = time.perf_counter()
result = compiled_model([input_image])[output_key]
inference_stop_time = time.perf_counter()
inference_duration = inference_stop_time - inference_start_time
total_inference_duration += inference_duration
if input_video_frame_nr % (10 * ADVANCE_FRAMES) == 0:
clear_output(wait=True)
progress_bar.display()
# input_video_frame_nr // ADVANCE_FRAMES gives the number of
# frames that have been processed by the network
display(
Pretty(
f"Processed frame {input_video_frame_nr // ADVANCE_FRAMES}"
f"/{total_frames // ADVANCE_FRAMES}. "
f"Inference time per frame: {inference_duration:.2f} seconds "
f"({1/inference_duration:.2f} FPS)"
)
)
# Transform network result to RGB image
result_frame = to_rgb(convert_result_to_image(result))
# Resize image and result to target frame shape
result_frame = cv2.resize(result_frame, (target_frame_width, target_frame_height))
image = cv2.resize(image, (target_frame_width, target_frame_height))
# Put image and result side by side
stacked_frame = np.hstack((image, result_frame))
# Save frame to video
out_video.write(stacked_frame)
input_video_frame_nr = input_video_frame_nr + ADVANCE_FRAMES
cap.set(1, input_video_frame_nr)
progress_bar.progress = input_video_frame_nr
progress_bar.update()
except KeyboardInterrupt:
print("Processing interrupted.")
finally:
clear_output()
processed_frames = num_frames // ADVANCE_FRAMES
out_video.release()
cap.release()
end_time = time.perf_counter()
duration = end_time - start_time
print(
f"Processed {processed_frames} frames in {duration:.2f} seconds. "
f"Total FPS (including video processing): {processed_frames/duration:.2f}."
f"Inference FPS: {processed_frames/total_inference_duration:.2f} "
)
print(f"Monodepth Video saved to '{str(result_video_path)}'.")
# + [markdown] id="bZ89ZI369KjA"
# ### Display Monodepth Video
# + tags=[]
video = Video(result_video_path, width=800, embed=True)
if not result_video_path.exists():
plt.imshow(stacked_frame)
raise ValueError("OpenCV was unable to write the video file. Showing one video frame.")
else:
print(f"Showing monodepth video saved at\n{result_video_path.resolve()}")
print(
"If you cannot see the video in your browser, please click on the "
"following link to download the video "
)
video_link = FileLink(result_video_path)
video_link.html_link_str = "<a href='%s' download>%s</a>"
display(HTML(video_link._repr_html_()))
display(video)
| notebooks/201-vision-monodepth/201-vision-monodepth.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('bmh')
import linearsolve as ls
# %matplotlib inline
# # New-Keynesian Model
#
# This program verifies the math underlying the interactive simulation of the new-Keynesian model: https://www.briancjenkins.com/simulations/nk.html
#
#
#
# ## Equilibrium Conditions
#
# The equilibrium conditions of the new-Keynesian model:
#
# \begin{align}
# y_{t} & = E_ty_{t+1} - \frac{1}{\sigma} r_t + g_t\\
# \pi_t & = \beta E_t\pi_{t+1} + \kappa y_t + u_t\\
# i_t & = \phi_{\pi} \pi_t + \phi_y y_t + v_t\\
# r_t & = i_t - E_t \pi_{t+1}\\
# g_{t+1} & = \rho_g g_{t} + \epsilon^g_{t+1}\\
# u_{t+1} & = \rho_u u_{t} + \epsilon^u_{t+1}\\
# v_{t+1} & = \rho_v v_{t} + \epsilon^v_{t+1}
# \end{align}
#
# ## Analytic Solution
#
# The model's endogenous variables as functions of exogenous state variables:
#
# \begin{align}
# y_{t} & = a_1g_t + a_2u_t + a_3v_t\\
# \pi_{t} & = b_1g_t + b_2u_t + b_3v_t\\
# i_{t} & = c_1g_t + c_2u_t + c_3v_t\\
# r_{t} & = d_1g_t + d_2u_t + d_3v_t
# \end{align}
#
# where:
#
# \begin{align}
# a_1 & = \frac{1-\beta\rho_g}{(1-\beta\rho_g)(1-\rho_g+\sigma^{-1}\phi_y)+ \sigma^{-1}\kappa(\phi_{\pi}-\rho_g)}\\
# a_2 & = -\frac{\sigma^{-1}(\phi_{\pi} - \rho_u)}{(1-\beta\rho_u)(1-\rho_u+\sigma^{-1}\phi_y)+ \sigma^{-1}\kappa(\phi_{\pi}-\rho_u)}\\
# a_3 & = -\frac{\sigma^{-1}(1-\beta\rho_v)}{(1-\beta\rho_v)(1-\rho_v+\sigma^{-1}\phi_y)+ \sigma^{-1}\kappa(\phi_{\pi}-\rho_v)}
# \end{align}
#
# and:
#
# \begin{align}
# b_1 & = \frac{\kappa}{(1-\beta\rho_g)(1-\rho_g+\sigma^{-1}\phi_y)+ \sigma^{-1}\kappa(\phi_{\pi}-\rho_g)}\\
# b_2 & = \frac{1-\rho_u+\sigma^{-1}\phi_y}{(1-\beta\rho_u)(1-\rho_u+\sigma^{-1}\phi_y)+ \sigma^{-1}\kappa(\phi_{\pi}-\rho_u)}\\
# b_3 & = -\frac{\sigma^{-1}\kappa}{(1-\beta\rho_v)(1-\rho_v+\sigma^{-1}\phi_y)+ \sigma^{-1}\kappa(\phi_{\pi}-\rho_v)}\\
# \end{align}
#
# and:
#
# \begin{align}
# c_1 & = \phi_ya_1 + \phi_{\pi}b_1\\
# c_2 & = \phi_ya_2 + \phi_{\pi}b_2\\
# c_3 & = 1+ \phi_ya_3 + \phi_{\pi}b_3\\
# \end{align}
#
# and:
#
# \begin{align}
# d_1 & = c_1 - \rho_g b_1\\
# d_2 & = c_2 - \rho_u b_2\\
# d_3 & = c_3 - \rho_v b_3\\
# \end{align}
#
#
# ## Compute Solution with `linearsolve`
# +
# Input model parameters
beta = np.exp(-2/100)
sigma= 1
kappa= 0.25
phi_pi= 1.5
phi_y = 0.5
rho_g = 0.25
rho_u = 0.35
rho_v = 0.5
parameters=pd.Series()
parameters.beta=beta
parameters.sigma=sigma
parameters.kappa=kappa
parameters.phi_pi=phi_pi
parameters.phi_y=phi_y
parameters.rho_g=rho_g
parameters.rho_u=rho_u
parameters.rho_v=rho_v
def equilibrium_equations(variables_forward,variables_current,parameters):
# Parameters
p = parameters
# Variables
fwd = variables_forward
cur = variables_current
# Euler equation
euler_eqn = fwd.y -1/p.sigma*cur.r + cur.g - cur.y
# NK Phillips curve
phillips_curve = p.beta*fwd.pi + p.kappa*cur.y + cur.u - cur.pi
# Interest rate rule for monetary policy
interest_rule = p.phi_y*cur.y+p.phi_pi*cur.pi + cur.v - cur.i
# Fisher equation
fisher_eqn = cur.i - fwd.pi - cur.r
# Exogenous demand
g_proc = p.rho_g*cur.g - fwd.g
# Exogenous inflation
u_proc = p.rho_u*cur.u - fwd.u
# Exogenous monetary policy
v_proc = p.rho_v*cur.v - fwd.v
# Stack equilibrium conditions into a numpy array
return np.array([
euler_eqn,
phillips_curve,
interest_rule,
fisher_eqn,
g_proc,
u_proc,
v_proc
])
# Initialize the nk
nk = ls.model(equilibrium_equations,
nstates=3,
varNames=['g','u','v','i','r','y','pi'],
parameters=parameters)
# Set the steady state of the nk
nk.set_ss([0,0,0,0,0,0,0])
# Find the log-linear approximation around the non-stochastic steady state
nk.approximate_and_solve(loglinear=False)
# Solve the nk
nk.solve_klein(nk.a,nk.b)
# -
# ## Compute Solution Directly
# +
a1=(1-beta*rho_g)/((1-beta*rho_g)*(1-rho_g+phi_y/sigma)+kappa/sigma*(phi_pi-rho_g))
a2=-(phi_pi-rho_u)/sigma/((1-beta*rho_u)*(1-rho_u+phi_y/sigma)+kappa/sigma*(phi_pi-rho_u))
a3=-(1-beta*rho_v)/sigma/((1-beta*rho_v)*(1-rho_v+phi_y/sigma)+kappa/sigma*(phi_pi-rho_v))
b1=kappa/((1-beta*rho_g)*(1-rho_g+phi_y/sigma)+kappa/sigma*(phi_pi-rho_g))
b2=(1-rho_u+phi_y/sigma)/((1-beta*rho_u)*(1-rho_u+phi_y/sigma)+kappa/sigma*(phi_pi-rho_u))
b3=-kappa/sigma/((1-beta*rho_v)*(1-rho_v+phi_y/sigma)+kappa/sigma*(phi_pi-rho_v))
c1=phi_y*a1+phi_pi*b1
c2=phi_y*a2+phi_pi*b2
c3=phi_y*a3+phi_pi*b3+1
d1=c1-rho_g*b1
d2=c2-rho_u*b2
d3=c3-rho_v*b3
# -
# ## Compare Analytic and Numeric Solutions
# +
print('verify a1,a2, a3:')
print(a1-nk.f[2,0])
print(a2-nk.f[2,1])
print(a3-nk.f[2,2])
print('\n')
print('verify b1,b2, b3:')
print(b1-nk.f[3,0])
print(b2-nk.f[3,1])
print(b3-nk.f[3,2])
print('\n')
print('verify c1,c2, c3:')
print(c1-nk.f[0,0])
print(c2-nk.f[0,1])
print(c3-nk.f[0,2])
print('\n')
print('verify d1,d2, d3:')
print(d1-nk.f[1,0])
print(d2-nk.f[1,1])
print(d3-nk.f[1,2])
# -
# ## Plot Simulations
# +
# Compute impulse responses and plot
nk.impulse(T=15,t0=1,shocks=[0.2,0.5,1])
# Create the figure and axes
fig = plt.figure(figsize=(12,12))
ax1 = fig.add_subplot(3,1,1)
ax2 = fig.add_subplot(3,1,2)
ax3 = fig.add_subplot(3,1,3)
# Plot commands
nk.irs['e_g'][['y','pi','r','i']].plot(lw='5',alpha=0.5,grid=True,title='Demand shock',ax=ax1).legend(loc='upper right',ncol=5)
nk.irs['e_u'][['y','pi','r','i']].plot(lw='5',alpha=0.5,grid=True,title='Inflation shock',ax=ax2).legend(loc='upper right',ncol=5)
nk.irs['e_v'][['y','pi','r','i']].plot(lw='5',alpha=0.5,grid=True,title='Interest rate shock',ax=ax3).legend(loc='upper right',ncol=5)
fig.tight_layout()
# -
| python/nk-simulation-verification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Worksheet 1 exercises - due Sep 27 @ 2pm
# #### Please upload to Blackboard the .ipynb file, containing your last name.
# #### Original work statement:
#
# Please write your name here to indicate that your worksheet is the result of your own work, and you have not copied from sources without citing them (this is plagiarism and is not acceptable). Identical or very similar worksheet will share the credit.
#
# #### Your name:
# 1\. Form the following 2-D array, without typing it in explicitly: (hint: use .reshape)
#
# array<br>
# ([[ 1, 3, 5], <br>
# [ 7, 9, 11], <br>
# [13, 15, 17], <br>
# [19, 21, 23], <br>
# [25, 27, 29]])
# 2\. Form a new array that contains the first two rows and columns of the one above.
# 3\. Create a numpy array that contains the 26 letters of the alphabet, without typing. Hint: you can obtain a string with all the letters by using string, the text manipulation module, and typing:
#
# import string
#
# x = string.ascii_lowercase
# 4\. Create an array containing 26 random numbers between 0 and 1, filter out those that are > 0.5, and return the set of letters from the array above corresponding to those indices (in other words, apply the same mask to the array above. Bonus points if you can do it with a one liner).
# 5\. The code below (from the matplotlib tutorial page, https://matplotlib.org/tutorials/introductory/sample_plots.html#sphx-glr-tutorials-introductory-sample-plots-py) shows an example of how to make subplots. Add a comment after every line of code to explain what it does.
# +
import numpy as np
import matplotlib.pyplot as plt
x1 = np.linspace(0.0, 5.0)
x2 = np.linspace(0.0, 2.0)
y1 = np.cos(2 * np.pi * x1) * np.exp(-x1)
y2 = np.cos(2 * np.pi * x2)
plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'o-')
plt.title('A tale of 2 subplots')
plt.ylabel('Damped oscillation')
plt.subplot(2, 1, 2)
plt.plot(x2, y2, '.-')
plt.xlabel('time (s)')
plt.ylabel('Undamped')
plt.show()
# -
# 6\. Read into a numpy array the data set "HIP_star.dat". This data set contains information about stars; we are only interested in columns "Vmag", 'Plx', and "B-V", so make sure you only read those. Hint: open the file with a text editor first, to see if/how many lines you need to skip.
#
# http://astrostatistics.psu.edu/datasets/HIP_star.html
#
#
#
d = np.genfromtxt('HIP_star.dat', usecols=(1,4,8), skip_header=1)
d
# 7\. Calculate the shape of your array.
# 8\. Define a function, logL, that calculates the (log of) a star's luminosity starting from Vmag (the visual brightness) and Plx (the parallax angle, which is inversely proportional to a star's distance). The function should implement the following relationship:
#
# LogL = (15 - Vmag - 5 * log(Plx))/2.5
#
# Hint: To implement Log = base 10 log, you can use the function np.log10().
#
#
#
# 9\. Make sure your function is vectorized (feed it a vector as argument and check that it runs).
#
#
#
# 10\. Make a scatter plot that has B-V for the stars on the x axis (make sure you index your array correctly!), and their log luminosity (calculated using the function you just made) on the y axis. This is called a H-R diagram (Hertzsprung Russell diagram). It encodes information about the temperature of stars (expressed by color, or B-V) and their luminosity.
#
# Add axes titles: B-V for the x axis, Log L (as appropriate) for y axis
#
# Adjust axes limits to be [-0.5,3] for B-V, [-3, 4] for log L
#
#
# 11\. Add information to the above plot using colors and sizes. Make a scatter plot with colors arranged according to Vmag, and another scatter plot where the size of the markers is proportional to the Vmag of each point.
#
#
#
# 12\. Visualize B-V as a histogram.
#
#
#
# 13\. For the B-V array, calculate its mean and its median, and its standard deviation.
#
#
#
# Extra points:
#
# 14\. Calculate the 10 and 90 percentile levels of the B-V array, and explain what they are.
#
#
# 15\. Are the mean and the median different? In either case, what does this tell us about the distribution?
#
| Worksheets/WS1_IntroToPython.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Projeto Porto Inteligente
# #### Union Squad – <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# Este projeto visa ajudar na virtualização da cadeia logística integrada dos portos brasileiros
# ### Importando pacotes e fazendos ajustes:
import pandas as pd
import numpy as np
import seaborn as sns
import palettable
import matplotlib.pyplot as plt
# %matplotlib inline
from jupyterthemes import jtplot
jtplot.style(theme='monokai', context='notebook', ticks=True, grid=False)
# +
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", 10)
import matplotlib.style
import matplotlib as mpl
for m,n in zip(['grid.color','axes.titlecolor','axes.labelcolor','xtick.color','ytick.color','axes.titlesize',
'figure.figsize','legend.framealpha'],['white','white','white','white','white','20.0','13.0, 8.0','0.8']):
mpl.rcParams[m] = n
# -
# ### Importando os dados de estadia:
stays = pd.read_csv('/Users/pedrocerbino/Downloads/ESTADIA/Estadia.csv',sep=';')
for i in ['Atracação Prevista','Atracação Efetiva','Desatracação Prevista','Desatracação Efetiva']:
stays[i] = pd.to_datetime(stays[i],errors='coerce')
stays['Local(is) Atracação (área do porto > berço > cabeço)'] = [i.split(' > ')[1] for i in stays[
'Local(is) Atracação (área do porto > berço > cabeço)']]
stays.rename(columns={'Local(is) Atracação (área do porto > berço > cabeço)':'Berço'},inplace=True)
stays.drop(columns=['Local(is) e Data(s) Reatracação (área do porto > berço > ca'],inplace=True)
stays['Código Bitrigrama'] = [i.split(' - ')[0] for i in stays['Porto de estadia atual']]
stays.head(2)
stays['Porto de estadia atual'].value_counts(True)[:15]
dex = stays['Atracação Efetiva'].astype('datetime64[ns]')
data = pd.DataFrame({'Date':dex.dt.date,'Hour':dex.dt.hour,'Data':(stays
['Atracação Prevista'] - stays['Atracação Efetiva']).astype('timedelta64[h]')}).groupby(
['Date','Hour'],as_index=False).mean()
data['Média Diária'] = data.Data.rolling(24).mean()
data.set_index(data.Date,inplace=True)
data.drop(columns=['Date'],inplace=True)
data
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(go.Scatter(x=data.Data[-8330:].index, y=data.Data[-8330:].values,
line=dict(color='rgba(240, 52, 52, 0.5)'), name='Dados'))
fig.add_trace(go.Scatter(x=data.Data[-8330:].index, y=data.Data[-8330:].rolling(24).mean().values,
line=dict(color='yellow'), name='Média Diária'))
fig.update_layout(template='plotly_dark')
fig.update_layout(title='Diferença da Atracação Efetiva e Prevista por Hora – 2019',
template='plotly_dark',
xaxis_title='Data',
yaxis_title='Horas')
# fig.show();
# fig.write_html("/Users/pedrocerbino/Downloads/Graph_2.html")
plotly.io.orca.config.executable = '/Users/pedrocerbino/opt/anaconda3/bin/orca'
fig.show('svg');
pd.Series(data=(stays['Atracação Prevista'] - stays['Atracação Efetiva']).astype(
'timedelta64[h]').values, index=stays['Atracação Efetiva'].astype(
'datetime64[D]').values).groupby(level=0).mean()[:364].plot(color='yellow')
plt.title('Diferença da Atracação Efetiva e Prevista por Dia');
# ### Verificando a distribuição dos dados
stays['Finalidade da Embarcação'].value_counts(normalize=True)
stays['Tipo de Embarcação'].value_counts(normalize=True)
stays['Especialidade da Carga Predominante'].value_counts(normalize=True)
stays[stays['Especialidade da Carga Predominante']=='Granel Sólido'][
'Porto de estadia atual'].value_counts()
stays[stays['Especialidade da Carga Predominante']=='Granel Sólido'].head(2)
# ### Analisando para cargas e tipos de navios
dex = stays['Atracação Efetiva'].astype('datetime64[ns]')
data = pd.DataFrame({'Date':dex.dt.date,'Hour':dex.dt.hour,
'Especialidade da Carga Predominante':stays['Especialidade da Carga Predominante'],
'Data':(stays
['Atracação Prevista'] - stays['Atracação Efetiva']).astype('timedelta64[h]')}).groupby(
['Especialidade da Carga Predominante'],as_index=False).mean()
data.drop(columns=['Hour'],inplace=True)
data.sort_values('Data',ascending=False,inplace=True)
data
sns.barplot(data=data.sort_values(['Data'])[:10][-1::-1],x='Especialidade da Carga Predominante',
y='Data',palette='YlOrRd')
plt.title('Atraso da Atracação por Carga Predominante')
plt.ylabel('Diferença da Atracação Prevista e Efetiva (Horas)')
plt.xlabel('')
plt.ylim(-100,120,10);
dex = stays['Atracação Efetiva'].astype('datetime64[ns]')
data = pd.DataFrame({'Date':dex.dt.date,'Hour':dex.dt.hour,
'Tipo de Embarcação':stays['Tipo de Embarcação'],'Data':(stays
['Atracação Prevista'] - stays['Atracação Efetiva']).astype('timedelta64[h]')}).groupby(
['Tipo de Embarcação'],as_index=False).mean()
data.drop(columns=['Hour'],inplace=True)
data.sort_values('Data',ascending=False,inplace=True)
data
sns.barplot(data=data.sort_values(['Data'])[:10],y='Tipo de Embarcação',x='Data',
palette='YlOrBr_r')
plt.xlabel('Diferença da Atracação Prevista e Efetiva (Horas)')
plt.title('Atraso de Atracação por Embarcação)');
data = pd.Series(data=(stays['Atracação Prevista'] - stays['Atracação Efetiva']).astype(
'timedelta64[h]').values,index=stays['Atracação Efetiva'].astype(
'datetime64[D]').values).groupby(level=0).mean().rolling(7).mean()
data.plot(color='yellow')
plt.title('Diferença da Atracação Efetiva e Prevista por Semana');
# ## 1. Modelo de Previsão da Diferença entre Atracação Prevista e Efetiva
# Aplicamos um algoritmo estatístico de sazonalidade múltipla para prever os erros de estimativa do horário de atracação. Com base nessas informações, os agentes podem reavaliar as previsões realizadas e otimizá-las, além de terem uma ferramenta que facilita a tomada de decisão sobre como otimizar a logística, visto que o modelo dá visibilidade de quais dias possuem mais erros.
y_to_train = data.dropna()[:-219]
y_to_test = data.dropna()[-219:]
y_to_train
from tbats import TBATS, BATS
# Fit the model
estimator = TBATS(seasonal_periods=(30.5,365.25))
model = estimator.fit(y_to_train)
# Forecast 219 days ahead
y_forecast = model.forecast(steps=219)
print(model.summary())
# ### Validando o Modelo:
# calculate MAE between expected and predicted last 219 observations
from sklearn.metrics import mean_absolute_error
from statsmodels.tools.eval_measures import rmse
mae = mean_absolute_error(y_to_test, y_forecast)
rmse_1 = rmse(y_to_test, y_forecast)
mape = np.median(np.abs((y_to_test - y_forecast) / y_to_test)) * 100
print('MAE: %.2f' % mae)
print('RMSE: %.2f' % rmse_1)
print('MAPE: %.2f' % mape + '%')
accuracy = 100 - mape
print('Accuracy: %.2f' % accuracy + '%')
# plot expected vs actual
plt.figure(figsize=(13,8))
y_to_test.plot(label='Verdadeiro',color='yellow')
pd.Series(data=y_forecast,index=y_to_test.index).plot(label='Previsto',lw=2,color='red')
plt.title('Validação do Modelo – 71.6% de Acurácia',size=20,color='w')
plt.xticks(color='w')
plt.yticks(color='w')
plt.xlabel('')
plt.legend();
# ### Prevendo os dados futuros:
from tbats import TBATS, BATS
# Fit the model
estimator = TBATS(seasonal_periods=(30.5,365.25))
model = estimator.fit(data.dropna())
# Forecast 219 days ahead
y_forecast = model.forecast(steps=219)
import datetime
date_list=[data.dropna().index[-1] + datetime.timedelta(days=x) for x in range(len(y_forecast)+1)]
# plot expected vs actual
plt.figure(figsize=(13,8))
data.dropna().plot(label='Série Histórica',color='red')
pd.Series(data=y_forecast,index=date_list[1:]).plot(label='Previsão',lw=2,color='yellow')
plt.title('Previsão da Diferença entre Atracação Prevista e Efetiva',size=20,color='w')
plt.xticks(color='w')
plt.yticks(color='w')
plt.xlabel('')
plt.ylabel('Média Semanal');
# ### Analisando o Fundeadouro
fundeadouro = pd.read_csv('/Users/pedrocerbino/Downloads/Fundeadouro_Entrada_Sa¡da.csv',sep=';')
for i in ['Data Entrada Fundeio','Data Saída Fundeio']:
fundeadouro[i] = pd.to_datetime(fundeadouro[i],utc=True)
fundeadouro['Data Entrada Fundeio'] = fundeadouro['Data Entrada Fundeio'].astype('datetime64[ns]')
fundeadouro['Data Saída Fundeio'] = fundeadouro['Data Saída Fundeio'].astype('datetime64[ns]')
fundeadouro['Dif Fund Saída Entrada'] = (fundeadouro['Data Saída Fundeio'] - fundeadouro[
'Data Entrada Fundeio']).astype('timedelta64[h]')
fundeadouro['Dia Entrada'] = fundeadouro['Data Entrada Fundeio'].astype('datetime64[D]')
fundeadouro['Hora Entrada'] = [i.hour for i in fundeadouro['Data Entrada Fundeio'].astype(
'datetime64[h]')]
fundeadouro['Dia Saída'] = fundeadouro['Data Saída Fundeio'].astype('datetime64[D]')
fundeadouro['Hora Saída'] = [i.hour for i in fundeadouro['Data Saída Fundeio'].astype(
'datetime64[h]')]
fundeadouro = fundeadouro[fundeadouro['Dif Fund Saída Entrada']>0].copy()
fundeadouro.head(2)
fundeadouro.Porto.value_counts(True)[:15]
# +
dex = fundeadouro['Data Saída Fundeio'].astype('datetime64[ns]')
data = pd.DataFrame({'Date':dex.dt.date,'Hour':dex.dt.hour,'Data':(fundeadouro
['Data Saída Fundeio'] - fundeadouro['Data Entrada Fundeio']).astype('timedelta64[h]')}).groupby(
['Date','Hour'],as_index=False).mean()
data['Média Diária'] = data.Data.rolling(24).mean()
data.set_index(data.Date,inplace=True)
data.drop(columns=['Date'],inplace=True)
data = data[:-12]
data
# -
# ### Plotando os dados de Fundeio
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(go.Scatter(x=data.Data.index, y=data.Data.values,
line=dict(color='rgba(240, 52, 52, 0.5)'), name='Dados'))
fig.add_trace(go.Scatter(x=data.Data.rolling(24*7).mean().index,
y=data.Data.rolling(24*7).mean().values,
line=dict(color='yellow'), name='Média Móvel Semanal'))
fig.update_layout(template='plotly_dark')
fig.update_layout(title='Diferença da Saída e Entrada do Fundeio',
template='plotly_dark',
xaxis_title='Data',
yaxis_title='Total de Horas de Atraso')
# fig.show();
# fig.write_html("/Users/pedrocerbino/Downloads/Graph_1.html")
plotly.io.orca.config.executable = '/Users/pedrocerbino/opt/anaconda3/bin/orca'
fig.show('svg');
# ## 2. Modelo de Previsão de Hiato no Fundeadouro
# Usaremos o estudo de caso do Porto de Aratu para desenvolver um modelo estatístico de regressão linear para estimar o quanto um navio espera na barra antes de atracar. Além de realizar a previsão, o modelo também mostra quais as variáveis são mais importantes para essa estimação.
stays_porto = stays[stays['Código Bitrigrama'] == 'BRARB'].copy()
stays_porto['Atracação Efetiva'] = stays_porto['Atracação Efetiva'].astype('datetime64[ns]')
stays_porto['Atracação Prevista'] = stays_porto['Atracação Prevista'].astype('datetime64[ns]')
stays_porto['Dif Atrac Prev Efet'] = (stays_porto['Atracação Prevista'] - stays_porto[
'Atracação Efetiva']).astype('timedelta64[h]')
stays_porto.head(2)
# ### Dados de Chuva (Porto de Aratu):
# Os dados foram coletado da por meio da API da INMET, que disponibiliza esses dados gratuitamente. Foram analisados os dados da estação pluviométrica mais próxima do porto.
import requests
r = requests.get('https://apitempo.inmet.gov.br/estacao/diaria/2017-01-01/2019-12-31/A456')
r = r.json()
porto_chuva = pd.DataFrame(r)[['DT_MEDICAO','CHUVA']]
porto_chuva.DT_MEDICAO = pd.to_datetime(porto_chuva.DT_MEDICAO)
porto_chuva.CHUVA = [0 if str(i) == 'None' else float(i) for i in porto_chuva.CHUVA]
porto_chuva.head(5)
# ### Dados de Marés (Porto de Aratu):
# Os dados das marés foram coletados a partir do site do INPE, que registra os dados históricos que são produzidos pela Marinha.
def limpa_dados(database):
"""
Eliminando colunas e linhas desnecessárias para a análise
"""
database.drop(inplace=True,columns=[i for i in database.columns if 'Unnamed: ' in i])
database.reset_index(inplace=True,drop=True)
database.drop(index=list(range(0,14)),inplace=True)
database.reset_index(inplace=True,drop=True)
database = database.T.reset_index().copy()
return database
def estrutura_dados(database):
"""
Estruturando os dados para cada linha representar um dia, e cada coluna uma variável
"""
cont = 0
data_dict = {}
for row in database.index:
for col in database.columns[1:]:
if '2020' in str(database[col][row]):
cont += 1
if cont <= 12:
col_name = '2017' + str(database[col][row]).split(' ')[0][4:]
elif cont > 12:
col_name = '2018' + str(database[col][row]).split(' ')[0][4:]
elif cont > 24:
col_name = '2019' + str(database[col][row]).split(' ')[0][4:]
elif cont > 36:
col_name = '2020' + str(database[col][row]).split(' ')[0][4:]
data_dict[col_name] = []
else:
try:
value_hour = str(database[col][row]).split(' ')[0]
value_tide = str(database[col][row]).split(' ')[2]
data_dict[col_name].append(value_hour)
data_dict[col_name].append(value_tide)
except:
pass
return data_dict
def entabela_dados(data_dict):
"""
Transformando dados vazios em valores nulos para permitir estruturar em formato de tabela
"""
for key in data_dict.keys():
if len(data_dict[key]) != 16:
max_iter = 16 - len(data_dict[key])
for i in range(0,max_iter):
data_dict[key].append(np.nan)
return data_dict
def ajusta_dados(data_dict):
"""
Ajustes para facilitar manuseio: transpondo a matriz e renomeando as colunas
"""
database = pd.DataFrame(data_dict).T.copy()
database.columns=sorted([str(i)+'_Tide' for i in range(1,9)]+[str(i)+'_Clock' for i
in range(1,9)])
return database
def preenche_dados(database,nome):
"""
Preenchendo valores nulos na tabela com a última maré registrada
"""
for row in database.index:
for col in database.columns[-10:]:
if str(database[col][row]) == 'nan':
database[col][row] = database[str(int(col[0])-1)+col[1:]][row]
for k in range(1,9):
database[str(k)+'_Hour']=[i.hour for i in database[str(k)+'_Clock'].astype('datetime64[h]')]
database.reset_index(inplace=True)
database.rename(columns={'index':'Data'},inplace=True)
database.Data = database.Data.astype('datetime64[D]')
globals()[str(nome)] = database
return database.head(2)
aratu = pd.read_excel('/Users/pedrocerbino/Downloads/Tábua de Marés - Porto de Aratu.xlsx')
preenche_dados(ajusta_dados(entabela_dados(estrutura_dados(limpa_dados(aratu)))),'porto_aratu')
# ### Consolidando tabelas distintas em uma única
merged = stays_porto.merge(fundeadouro[fundeadouro.Porto=='BRARB - ARATU'].drop(columns='Porto'))
porto_merged = merged.merge(porto_aratu,left_on=['Dia Entrada'],right_on=['Data'])
porto_merged = porto_merged.merge(porto_chuva,left_on=['Dia Entrada'],right_on=['DT_MEDICAO'])
# ### Construindo colunas contendo o horário de maré, e o nível de maré, mais próximo do de atracação
tide_hour, tide = [], []
for row in porto_merged.index:
for m,n in zip(['one','two','three','four','five','six','seven','eight'],range(1,9)):
globals()[m]=np.abs(porto_merged['Hora Entrada'][row]-porto_merged[str(n)+'_Hour'][row])
lower = min(one,two,three,four,five,six,seven,eight)
m_last = 'test'
for m,n in zip(['one','two','three','four','five','six','seven','eight'],range(1,9)):
if globals()[m] == lower and globals()[m] != m_last:
tide_hour.append(porto_merged[str(n)+'_Hour'][row])
tide.append(porto_merged[str(n)+'_Tide'][row])
break
m_last = globals()[m]
porto_merged['Tide Hour'] = tide_hour
porto_merged['Tide'] = tide
# ### Criando colunas categóricas para dia e mês da atracação (dummies)
# Essas coluna funcionarão para capturar o efeito fixo da sazonalidade de atrasos ao longo do ano
porto_merged['Day Control'] = [str(i.day) for i in porto_merged['Dia Entrada']]
porto_merged['Month Control'] = [str(i.month) for i in porto_merged['Dia Entrada']]
porto_merged['Year Control'] = [str(i.year) for i in porto_merged['Dia Entrada']]
porto_merged['Weekday Control'] = [str(i.weekday()) for i in porto_merged['Dia Entrada']]
to_model = pd.get_dummies(porto_merged,columns=['Day Control','Month Control','Year Control',
'Tipo de Embarcação'], drop_first=True)
to_model.head(2)
porto_chuva = pd.DataFrame(r)[['DT_MEDICAO','CHUVA']]
porto_chuva.DT_MEDICAO = pd.to_datetime(porto_chuva.DT_MEDICAO)
porto_chuva.CHUVA = [0 if str(i) == 'None' else float(i) for i in porto_chuva.CHUVA]
porto_chuva.head(5)
# ### Selecionando as colunas que servirão de variáveis explicativas para o modelo
cols=['Tide','CHUVA','Tipo de Embarcação_Graneleiro']+[i for i in to_model.columns if 'Day Control'
in i or 'Month Control' in i or 'Year Control' in i]
X = to_model[cols]
y = to_model['Dif Fund Saída Entrada']
# ### Equação do Modelo
# $ Log(y_i) = \alpha{} + \beta_1{\cdot{Tide_i}} + \beta_2{\cdot{Rain_i}} + Ship'_i{\cdot{ \gamma_1{}}} + Day'_i {\cdot{\gamma_2{}}} + Month'_i {\cdot{\gamma_3{}}} + Year'_i {\cdot{\gamma_4{}}} + \epsilon_i{} $
# ### Rodando a Regressão
import statsmodels.api as sm
est = sm.OLS(np.log(y), X.astype(float)).fit()
est.summary()
# Pelo modelo, é possível perceber que atingimos praticamente **89% de acuácia preditiva**. Além disso, dado que o p-valor de todas as variáveis explicativas tende a zero, existe forte evidência de impacto estatisticamente significante destas em prever o tempo de demora que um navio levará no fundeadouro. Esta informação pode ser valiosa para **planejar um line-up mais eficiente.**
sns.distplot(est.resid,bins=30,color='purple',kde_kws={"color":"orange",'lw':2,'label':'kde'})
quantile = np.quantile(est.resid,[0.25,0.5,0.75])
for i in range(1,4):
plt.axvline(x=quantile[i-1],color='burlywood',label=str(i)+'° quartil')
plt.legend()
plt.title('Resíduos - Regressão Linear');
# ### Testando premissas do modelo:
from statsmodels.compat import lzip
import statsmodels.stats.api as sms
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sms.jarque_bera(est.resid)
lzip(name, test)
from statsmodels.tsa.stattools import adfuller
resid_mean = np.mean(est.resid)
adfuller_p = adfuller(est.resid)[1]
print("** Mean of the residuals: ", np.around(resid_mean,2))
print("\n** Jarque Bera Normality Test, p_value:", np.around(lzip(name, test)[0][1],3), "(>0.05, Normal)" if (lzip(name, test)[0][1]>0.05) else "(<0.05, Not-normal)")
print("\n** AD Fuller, p_value:", np.around(adfuller_p,3), "(>0.05, Non-stationary)" if (adfuller_p > 0.05) else "(<0.05, Stationary)")
from statsmodels.stats.stattools import durbin_watson
print('Assumption 4: No Autocorrelation', '\n')
print('Performing Durbin-Watson Test')
print('Values of 1.5 < d < 2.5 generally show that there is no autocorrelation in the data')
print('0 to 2< is positive autocorrelation')
print('>2 to 4 is negative autocorrelation')
print('-------------------------------------')
durbinWatson = durbin_watson(est.resid)
print('Durbin-Watson:', durbinWatson)
if durbinWatson < 1.5:
print('Signs of positive autocorrelation', '\n')
print('Assumption not satisfied')
elif durbinWatson > 2.5:
print('Signs of negative autocorrelation', '\n')
print('Assumption not satisfied')
else:
print('Little to no autocorrelation', '\n')
print('Assumption satisfied')
# Ou seja, todas as premissas da regressão linear foram satisfeitas
| Projeto Porto Inteligente.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Comparisons
#
# Boolean values most often arise from comparison operators. Python includes a variety of operators that compare values. For example, `3` is larger than `1 + 1`.
3 > 1 + 1
# The value `True` indicates that the comparison is valid; Python has confirmed this simple fact about the relationship between `3` and `1+1`. The full set of common comparison operators are listed below.
#
# | Comparison | Operator | True example | False Example |
# |--------------------|----------|--------------|---------------|
# | Less than | < | 2 < 3 | 2 < 2 |
# | Greater than | > | 3>2 | 3>3 |
# | Less than or equal | <= | 2 <= 2 | 3 <= 2 |
# | Greater or equal | >= | 3 >= 3 | 2 >= 3 |
# | Equal | == | 3 == 3 | 3 == 2 |
# | Not equal | != | 3 != 2 | 2 != 2 |
# An expression can contain multiple comparisons, and they all must hold in order for the whole expression to be `True`. For example, we can express that `1+1` is between `1` and `3` using the following expression.
1 < 1 + 1 < 3
# The average of two numbers is always between the smaller number and the larger number. We express this relationship for the numbers `x` and `y` below. You can try different values of `x` and `y` to confirm this relationship.
x = 12
y = 5
min(x, y) <= (x+y)/2 <= max(x, y)
# Strings can also be compared, and their order is alphabetical. A shorter string is less than a longer string that begins with the shorter string.
"Dog" > "Catastrophe" > "Cat"
| Mathematics/Statistics/Statistics and Probability Python Notebooks/Computational and Inferential Thinking - The Foundations of Data Science (book)/Notebooks - by chapter/4. Pytho Data Types/Comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install opencv-contrib-python
# +
# from argoverse.data_loading.argoverse_tracking_loader import argoverse_tracking_loader
from argoverse.data_loading.argoverse_tracking_loader import ArgoverseTrackingLoader
from argoverse.utils import calibration
# +
argoverse_loader = ArgoverseTrackingLoader('./tracking_sample_v1.1.tar/argoverse-tracking/sample/')
cams = ['ring_front_center',
'ring_front_left',
'ring_front_right',
'ring_rear_left',
'ring_rear_right',
'ring_side_left',
'ring_side_right',
'stereo_front_left',
'stereo_front_right']
data_dir = 'C://users/cathx/repos/argoverse-api/tracking_sample_v1.1.tar/argoverse-tracking/sample/'
for log_id in argoverse_loader.log_list:
argoverse_data = argoverse_loader.get(log_id)
for cam in cams:
# recreate the calibration file content
# load calibration based on the camera from the cameras list
# because of the for loop, we go camera by camera
calibration_data = calibration.load_calib(data_dir+log_id+'/vehicle_calibration_info.json')[cam]
print(calibration_data)
L3 = 'P2: '
for j in calibration_data.K.reshape(1, 12)[0]:
L3 = L3 + str(j) + ' '
L3 = L3[:-1]
L6 = 'Tr_velo_to_cam: '
for k in calibration_data.extrinsic.reshape(1, 16)[0][0:12]:
L6 = L6 + str(K) + ' '
L6 = L6[:-1]
L1 ='P0: 0 0 0 0 0 0 0 0 0 0 0 0'
L2 ='P1: 0 0 0 0 0 0 0 0 0 0 0 0'
L4 ='P3: 0 0 0 0 0 0 0 0 0 0 0 0'
L5 ='R0_rect: 1 0 0 0 1 0 0 0 1'
L7 ='Tr_imu_to_velo: 0 0 0 0 0 0 0 0 0 0 0 0'
print(L1)
# -
import os
paths = os.listdir('./train/')
for i in paths:
complete = os.path.join('./train/' + i)
print(complete)
# main_dir = 'C://users/cathx/repos/argoverse-api/'
root_dir = 'C://Users/cathx/repos/argoverse-api/tracking_train1_v1.1.tar/argoverse-tracking/train1/'
paths = os.listdir(root_dir)
# paths = os.listdir(main_dir + './train/')
# for i in paths:
# complete = os.path.join(main_dir + i)
# print(complete)
# # print(paths)
for i in paths:
complete = os.path.join(root_dir+ i)
# print(complete)
for file in os.listdir(complete):
for root, paths, filenames in os.walk(file + 'stere_front_left', topdown=False):
for count, filename in enumerate(filenames):
os.rename('stereo_front_left'+ str(count) + '.jpg')
for file in os.listdir(complete):
for root, dirs, files in os.walk(os.listdir(complete), topdown=False):
for name in files:
print(os.path.join(root, name))
for name in dirs:
print(os.path.join(root, name))
# +
# for root, dirs, files in os.walk('C://users/cathx/repos/argoverse-api/train/c6911883-1843-3727-8eaa-41dc8cda8993/stereo_front_left/'):
# # for name in files:
# # print(os.path.join(root, name))
# count = 0
# for name in files:
# new_name = 'stereo_front_left' + str(count) + '.jpg'
# os.rename(name, 'stereo_front_left_'+str(count)+'.jpg')
# count +=1
# -
import os
for i in os.listdir(root_dir):
path = (main_dir + i + '/stereo_front_right/')
for count, filename in enumerate(os.listdir(main_dir + '/train/' + i + '/stereo_front_right')):
new_name = 'stereo_front_right_' + "{:06}.jpg".format(str(count))
source = path + filename
new_name = path + new_name
os.rename(source, new_name)
root_dir = 'C://Users/cathx/repos/argoverse-api/tracking_train1_v1.1.tar/argoverse-tracking/train/'
import os
for i in os.listdir(root_dir):
path = (root_dir + i + '/stereo_front_right/')
for count, filename in enumerate(os.listdir(root_dir + i + '/stereo_front_right')):
new_name = "{:06}.jpg".format((count))
source = path + filename
new_name = path + new_name
os.rename(source, new_name)
for i in paths:
complete = os.path.join(root_dir + 'train/' + i)
print(complete)
paths = os.listdir(os.path.join(root_dir + i))
print(paths)
paths = [i for i in os.listdir(root_dir)]
# +
# def videos(root_dir):
# for i in os.listdir(root_dir):
# path = (root_dir + i + '/stereo_front_right/')
# print(os.listdir(path))
# images = [img for img in os.listdir(path) if img.endswith(".jpg")]
# frame = cv2.imread(os.path.join(path, images[0]))
# height, width, layers = frame.shape
# video_name = 'left_cam.avi'
# fps = 5
# count = 0
# video_name = f('test_vid_{count}'+'avi'
# video = cv2.VideoWriter(os.path.join(root_dir+i+ video_name), 0, fps, (width, height))
# for image in images:
# video.write(cv2.imread(os.path.join(image_folder, image)))
# -
def videos_write(root_dir):
paths = os.listdir(os.path.join(root_dir + i)
for i in paths:
complete = os.path.join(main_dir + 'train/' + i)
for i in os.listdir(main_dir + 'train/'):
path = (main_dir + '/train/' + i + '/stereo_front_right/')
for count, filename in enumerate(os.listdir(main_dir + '/train/' + i + '/stereo_front_right')):
new_name = 'stereo_front_right_' + str(count) + ".jpg"
source = path + filename
new_name = path + new_name
os.rename(source, new_name)
import os
for i in os.listdir(root_dir):
path = (root_dir + i + '/stereo_front_right/')
for count, filename in enumerate(os.listdir(root_dir + i + '/stereo_front_right')):
new_name = "{:06}.jpg".format((count))
source = path + filename
new_name = path + new_name
os.rename(source, new_name)
from PIL import Image
import glob
# # Loading Calibration data
# +
print('\nLoading files...')
import argoverse
from argoverse.data_loading.argoverse_tracking_loader import ArgoverseTrackingLoader
import os
from shutil import copyfile
from argoverse.utils import calibration
import json
import numpy as np
from argoverse.utils.calibration import CameraConfig
from argoverse.utils.cv2_plotting_utils import draw_clipped_line_segment
from argoverse.utils.se3 import SE3
from argoverse.utils.transform import quat2rotmat
import math
import os
from typing import Union
import numpy as np
import pyntcloud
import progressbar
from time import sleep
# -
root_dir = 'C://users/cathx/repos/argoverse-api/'
data_dir = root_dir + '/train/'
_PathLike = Union[str, "os.PathLike[str]"]
def load_ply(ply_fpath: _PathLike) -> np.ndarray:
"""Load a point cloud file from a filepath.
Args:
ply_fpath: Path to a PLY file
Returns:
arr: Array of shape (N, 3)
"""
data = pyntcloud.PyntCloud.from_file(os.fspath(ply_fpath))
x = np.array(data.points.x)[:, np.newaxis]
y = np.array(data.points.y)[:, np.newaxis]
z = np.array(data.points.z)[:, np.newaxis]
return np.concatenate((x, y, z), axis=1)
| Argoverse/utils/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Copyright (c) 2020-2021 CertifAI Sdn. Bhd.
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -
# # Array Indexing and Slicing
# Array indexing and slicing are methods used in Python to access, modify or restructure the contents stored in data structure objects. <br> In indexing and slicing, we use `[]` or so-called `square brackets` to index/slice data structures. Examples of data structures that could be sliced are lists, tuples, NumPy arrays etc.
import numpy as np
import sklearn.datasets as dataset
# ## Indexing
# Indexing refers to accessing elements through the use of indices. Most programming languages follows zero-based index.That is index of an array with $n$ elements will start from 0 and end at $n-1$. For example, the first element of an array will have index of 0.
# ### 1-Dimension Array
# Let's try to access the **second element of the array** and name it as variable **`elem`**.
# <i> Reminder: 2nd element corresponds to index 1.
array_1d = np.array([1,2,3,4,5])
array_1d.shape
elem = array_1d[1]
# ### 2-Dimension Matrix
# For 2 or more dimensions (>2d tensors), indexing will start from higher dimensions. In matrices, row is a higher dimension than columns, so we will have to specify the row which the element is in, and then the column as well.<br><br>
# Let's initialize a $3\times5$ matrix.
# 2d matrix
mat_2d = np.array([[1,4,5,6,7],
[2,4,8,5,9],
[0,5,4,7,2]])
mat_2d.shape
# Acessing the value **9**, which is the $5^{th}$ element from the $2^{nd}$ row.<br>
# <i>Reminder: index = n-1
mat_2d[1][4]
# ### N-Dimension Tensor
# For high-dimensionss tensors, indexing will be similar, from high dimensions to low dimensions. We can try this on a 3-dimension tensor. Accesing the element located at depth = 1, row = 2, column = 3, which has the value `2`.
tensor_3d = np.array([[[1,2,3],
[3,2,2],
[6,4,1],
[3,4,5]],
[[4,6,7],
[5,6,7],
[6,6,6],
[8,8,8]],
[[6,0,1],
[2,3,6],
[4,5,1],
[2,3,7]]])
tensor_3d.shape
tensor_3d[0][1][2]
# ## Slicing
# Slicing is just another way to access values, multiple objects at a time. In slicing, we use `[start:stop:step]` to indicate what we want to slice.<br>
# 1. `start`: the index which to start. The default will be 0 if not specified.
# 2. `stop`: the index which slicing stops. This index will not be included into the sliced result. The default will be `len(array)` if not specified.
# 3. `step`: how much index per step is taken. The default will be 1 if not specified.<br>
# Let's use the predefined `array_1d` for example. We want to slice the values from $2^{nd}$ to the $4^{th}$ element.
print(array_1d[1:4])
# ### 2-dimension matrix
# In slicing, unlike indexing all dimensions are defined in a single square brackets. The start-stop-step for each dimension will be spllit using a `,` <br><br>
# `[start1:stop1:step1, start2:stop2:step2]`.<br><br>Like indexing, the sequence of dimensions are also arranged from high to low.
# We'll try to slice `mat_2d` out to obtain the slices that contain the last three elements of the first two rows. `rows = 1->2`,`columns = 3->5`.
# 2D matrix slicing
mat_2d[0:2,2:5]
# Slicing techniques are really handy when it comes to handling real datasets. Here, we are going to try and slice a dataset imported from sklearn.
data = dataset.load_breast_cancer()
print(data.DESCR)
# `dataset.load_breast_cancer()` will return us a dataset containing 569 instances regarding breast cancer and the corresponding attributes/characteristics *(e.g area, perimeter and smoothness)* of the tumor.
# For demonstration purposes, let's say that the researcher only wants 5 attriibutes from the first 50 samples. Here we can perform slicing which is very helpful.
# +
# data.data will return us all the features or attributes of all 569 instances of breast cancer
X = data.data
# data.target will return us 0 or 1, which is the target label of the corresponding instances,
# showing whether or not the tumor is cancerous
y = data.target
# We only want 50 samples and the first 5 attributes of the data
X.shape, y.shape
# -
X = X[0:50,0:5]
y = y[0:50]
print(X.shape)
print(len(y))
# ## Exercise
# 1. Initialize a random tensor with 3 dimensions with `shape: 5,3,4` as **`t_1`**.<br>
# Print out `t_1`.<br>
# <i> Hint: use `np.random.rand()`
t_1 = np.random.rand(5,3,4)
print(t_1)
# 2. Index the elements of these dimensions.
# <ul>
# <li> column = 2, row = 2, depth = 1
# <li> column = 3, row = 1, depth = 5
#
print(t_1[0][1][1])
print(t_1[4][0][2])
# 3. Load iris dataset from `sklearn.datasets`. Slice the dataset down to 3 attributes and 30 instances.
import sklearn.datasets as dataset
data = dataset.load_iris()
X = data.data[:30,:3]
X.shape
print(X)
| solution/Python NumPy Array Indexing and Slicing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
library(dplyr)
# #### load data
data <- read.csv('../data_others/ego_features.csv')
dim(data)
# ---
# ### OLS regression
# ----
# #### 1. weak and strong diversity
# +
dat <-
data %>%
mutate(log_in_degree = (log_in_degree-min(log_in_degree))/(max(log_in_degree)-min(log_in_degree))) %>%
mutate(log_weak_SD = (log_weak_SD-min(log_weak_SD))/(max(log_weak_SD)-min(log_weak_SD))) %>%
mutate(log_strong_SD = (log_strong_SD-min(log_strong_SD))/(max(log_strong_SD)-min(log_strong_SD))) %>%
mutate(log_answerCount = (log_answerCount-min(log_answerCount))/(max(log_answerCount)-min(log_answerCount))) %>%
mutate(social_reputation_100 = (social_reputation_100-min(social_reputation_100))/(max(social_reputation_100)-min(social_reputation_100))) %>%
select(log_in_degree, log_weak_SD,
log_strong_SD, log_answerCount,
social_reputation_100)
# %>% summary()
dim(dat)
summary(dat)
# +
model1 = lm(social_reputation_100 ~ log_in_degree,
data = dat)
summary(model1)
cat('\n')
model2 = lm(social_reputation_100 ~ log_weak_SD,
data = dat)
summary(model2)
cat('\n')
model2.1 = lm(social_reputation_100 ~ log_in_degree + log_weak_SD,
data = dat)
summary(model2.1)
cat('\n')
model3 = lm(social_reputation_100 ~ log_strong_SD,
data = dat)
summary(model3)
cat('\n')
model3.1 = lm(social_reputation_100 ~ log_in_degree + log_strong_SD,
data = dat)
summary(model3.1)
cat('\n')
model3.2 = lm(social_reputation_100 ~ log_weak_SD + log_strong_SD,
data = dat)
summary(model3.2)
cat('\n')
model4 = lm(social_reputation_100 ~ log_in_degree + log_weak_SD + log_strong_SD,
data = dat)
summary(model4)
# -
# ---
# *on top 5% ego users*
quantile(data$social_reputation_100, probs=0.95)
# +
dat <-
data %>%
filter(social_reputation_100>=33.3620129500335) %>% # top 5%
mutate(log_in_degree = (log_in_degree-min(log_in_degree))/(max(log_in_degree)-min(log_in_degree))) %>%
mutate(log_weak_SD = (log_weak_SD-min(log_weak_SD))/(max(log_weak_SD)-min(log_weak_SD))) %>%
mutate(log_strong_SD = (log_strong_SD-min(log_strong_SD))/(max(log_strong_SD)-min(log_strong_SD))) %>%
mutate(log_answerCount = (log_answerCount-min(log_answerCount))/(max(log_answerCount)-min(log_answerCount))) %>%
mutate(social_reputation_100 = (social_reputation_100-min(social_reputation_100))/(max(social_reputation_100)-min(social_reputation_100))) %>%
select(log_in_degree, log_weak_SD,
log_strong_SD, log_answerCount,
social_reputation_100)
# %>% summary()
dim(dat)
summary(dat)
# +
model1 = lm(social_reputation_100 ~ log_in_degree,
data = dat)
summary(model1)
cat('\n')
model2 = lm(social_reputation_100 ~ log_weak_SD,
data = dat)
summary(model2)
cat('\n')
model2.1 = lm(social_reputation_100 ~ log_in_degree + log_weak_SD,
data = dat)
summary(model2.1)
cat('\n')
model3 = lm(social_reputation_100 ~ log_strong_SD,
data = dat)
summary(model3)
cat('\n')
model3.1 = lm(social_reputation_100 ~ log_in_degree + log_strong_SD,
data = dat)
summary(model3.1)
cat('\n')
model3.2 = lm(social_reputation_100 ~ log_weak_SD + log_strong_SD,
data = dat)
summary(model3.2)
cat('\n')
model4 = lm(social_reputation_100 ~ log_in_degree + log_weak_SD + log_strong_SD,
data = dat)
summary(model4)
# -
# ----
# #### 2. k-clip diversity measure
# +
dat <-
data %>%
# filter(social_reputation_100>=33.3620129500335) %>% # top 5%
mutate(log_in_degree = (log_in_degree-min(log_in_degree))/(max(log_in_degree)-min(log_in_degree))) %>%
mutate(log_weak_SD = (log_weak_SD-min(log_weak_SD))/(max(log_weak_SD)-min(log_weak_SD))) %>%
mutate(log_strong_SD = (log_strong_SD-min(log_strong_SD))/(max(log_strong_SD)-min(log_strong_SD))) %>%
mutate(log_sd_kclip_5 = (log_sd_kclip_5-min(log_sd_kclip_5))/(max(log_sd_kclip_5)-min(log_sd_kclip_5))) %>%
mutate(log_answerCount = (log_answerCount-min(log_answerCount))/(max(log_answerCount)-min(log_answerCount))) %>%
mutate(social_reputation_100 = (social_reputation_100-min(social_reputation_100))/(max(social_reputation_100)-min(social_reputation_100))) %>%
select(log_in_degree,
log_weak_SD,
log_strong_SD,
log_sd_kclip_5,
log_answerCount,
social_reputation_100)
# %>% summary()
dim(dat)
summary(dat)
# +
model0 = lm(social_reputation_100 ~ log_sd_kclip_5,
data = dat)
summary(model0)
cat('\n')
model1 = lm(social_reputation_100 ~ log_in_degree + log_sd_kclip_5,
data = dat)
summary(model1)
cat('\n')
model2 = lm(social_reputation_100 ~ log_weak_SD + log_sd_kclip_5,
data = dat)
summary(model2)
cat('\n')
model3 = lm(social_reputation_100 ~ log_strong_SD + log_sd_kclip_5,
data = dat)
summary(model3)
# -
# ---
# *on top 5% ego users*
# +
dat <-
data %>%
filter(social_reputation_100>=33.3620129500335) %>% # top 5%
mutate(log_in_degree = (log_in_degree-min(log_in_degree))/(max(log_in_degree)-min(log_in_degree))) %>%
mutate(log_weak_SD = (log_weak_SD-min(log_weak_SD))/(max(log_weak_SD)-min(log_weak_SD))) %>%
mutate(log_strong_SD = (log_strong_SD-min(log_strong_SD))/(max(log_strong_SD)-min(log_strong_SD))) %>%
mutate(log_sd_kclip_5 = (log_sd_kclip_5-min(log_sd_kclip_5))/(max(log_sd_kclip_5)-min(log_sd_kclip_5))) %>%
mutate(log_answerCount = (log_answerCount-min(log_answerCount))/(max(log_answerCount)-min(log_answerCount))) %>%
mutate(social_reputation_100 = (social_reputation_100-min(social_reputation_100))/(max(social_reputation_100)-min(social_reputation_100))) %>%
select(log_in_degree,
log_weak_SD,
log_strong_SD,
log_sd_kclip_5,
log_answerCount,
social_reputation_100)
# %>% summary()
dim(dat)
summary(dat)
# +
model0 = lm(social_reputation_100 ~ log_sd_kclip_5,
data = dat)
summary(model0)
cat('\n')
model1 = lm(social_reputation_100 ~ log_in_degree + log_sd_kclip_5,
data = dat)
summary(model1)
cat('\n')
model2 = lm(social_reputation_100 ~ log_weak_SD + log_sd_kclip_5,
data = dat)
summary(model2)
cat('\n')
model3 = lm(social_reputation_100 ~ log_strong_SD + log_sd_kclip_5,
data = dat)
summary(model3)
# -
# ----
# #### 3. Robustness: controlling answer count
# ---
# *conditioning on answer count*
# +
dat <-
data %>%
# filter(social_reputation_100>=33.3620129500335) %>% # top 5%
mutate(log_in_degree = (log_in_degree-min(log_in_degree))/(max(log_in_degree)-min(log_in_degree))) %>%
mutate(log_weak_SD = (log_weak_SD-min(log_weak_SD))/(max(log_weak_SD)-min(log_weak_SD))) %>%
mutate(log_strong_SD = (log_strong_SD-min(log_strong_SD))/(max(log_strong_SD)-min(log_strong_SD))) %>%
mutate(log_sd_kclip_5 = (log_sd_kclip_5-min(log_sd_kclip_5))/(max(log_sd_kclip_5)-min(log_sd_kclip_5))) %>%
mutate(log_answerCount = (log_answerCount-min(log_answerCount))/(max(log_answerCount)-min(log_answerCount))) %>%
mutate(social_reputation_100 = (social_reputation_100-min(social_reputation_100))/(max(social_reputation_100)-min(social_reputation_100))) %>%
select(log_in_degree, log_weak_SD,
log_strong_SD,
log_sd_kclip_5,
log_answerCount,
social_reputation_100)
# %>% summary()
dim(dat)
summary(dat)
# +
model0.0 = lm(social_reputation_100 ~ log_answerCount,
data = dat)
summary(model0.0)
cat('\n')
model0 = lm(social_reputation_100 ~ log_in_degree + log_answerCount,
data = dat)
summary(model0)
cat('\n')
model1 = lm(social_reputation_100 ~ log_weak_SD + log_answerCount,
data = dat)
summary(model1)
cat('\n')
model2 = lm(social_reputation_100 ~ log_strong_SD + log_answerCount,
data = dat)
summary(model2)
cat('\n')
model3 = lm(social_reputation_100 ~ log_sd_kclip_5 + log_answerCount,
data = dat)
summary(model3)
cat('\n')
model4 = lm(social_reputation_100 ~ log_in_degree + log_weak_SD + log_answerCount,
data = dat)
summary(model4)
cat('\n')
model5 = lm(social_reputation_100 ~ log_in_degree + log_strong_SD + log_answerCount,
data = dat)
summary(model5)
cat('\n')
model6 = lm(social_reputation_100 ~ log_in_degree + log_sd_kclip_5 + log_answerCount,
data = dat)
summary(model6)
cat('\n')
model7 = lm(social_reputation_100 ~ log_weak_SD + log_sd_kclip_5 + log_answerCount,
data = dat)
summary(model7)
cat('\n')
model8 = lm(social_reputation_100 ~ log_strong_SD + log_sd_kclip_5 + log_answerCount,
data = dat)
summary(model8)
# -
# ---
# on top 5% ego users
# +
dat <-
data %>%
filter(social_reputation_100>=33.3620129500335) %>% # top 5%
mutate(log_in_degree = (log_in_degree-min(log_in_degree))/(max(log_in_degree)-min(log_in_degree))) %>%
mutate(log_weak_SD = (log_weak_SD-min(log_weak_SD))/(max(log_weak_SD)-min(log_weak_SD))) %>%
mutate(log_strong_SD = (log_strong_SD-min(log_strong_SD))/(max(log_strong_SD)-min(log_strong_SD))) %>%
mutate(log_sd_kclip_5 = (log_sd_kclip_5-min(log_sd_kclip_5))/(max(log_sd_kclip_5)-min(log_sd_kclip_5))) %>%
mutate(log_answerCount = (log_answerCount-min(log_answerCount))/(max(log_answerCount)-min(log_answerCount))) %>%
mutate(social_reputation_100 = (social_reputation_100-min(social_reputation_100))/(max(social_reputation_100)-min(social_reputation_100))) %>%
select(log_in_degree, log_weak_SD,
log_strong_SD,
log_sd_kclip_5,
log_answerCount,
social_reputation_100)
# %>% summary()
dim(dat)
summary(dat)
# +
model0.0 = lm(social_reputation_100 ~ log_answerCount,
data = dat)
summary(model0.0)
model0 = lm(social_reputation_100 ~ log_in_degree + log_answerCount,
data = dat)
summary(model0)
model1 = lm(social_reputation_100 ~ log_weak_SD + log_answerCount,
data = dat)
summary(model1)
model2 = lm(social_reputation_100 ~ log_strong_SD + log_answerCount,
data = dat)
summary(model2)
model3 = lm(social_reputation_100 ~ log_sd_kclip_5 + log_answerCount,
data = dat)
summary(model3)
model4 = lm(social_reputation_100 ~ log_in_degree + log_weak_SD + log_answerCount,
data = dat)
summary(model4)
model5 = lm(social_reputation_100 ~ log_in_degree + log_strong_SD + log_answerCount,
data = dat)
summary(model5)
model6 = lm(social_reputation_100 ~ log_in_degree + log_sd_kclip_5 + log_answerCount,
data = dat)
summary(model6)
model7 = lm(social_reputation_100 ~ log_weak_SD + log_sd_kclip_5 + log_answerCount,
data = dat)
summary(model7)
model8 = lm(social_reputation_100 ~ log_strong_SD + log_sd_kclip_5 + log_answerCount,
data = dat)
summary(model8)
# -
# ---
# #### Social bridges
# +
dat <-
data %>%
# ego users with more than 10,000 followers are ommitted as their social bridges are not computed,
# due to the computational complexity
na.omit() %>% # remove na in social bridge data
# dim()
mutate(log_in_degree = (log_in_degree-min(log_in_degree))/(max(log_in_degree)-min(log_in_degree))) %>%
mutate(log_weak_SD = (log_weak_SD-min(log_weak_SD))/(max(log_weak_SD)-min(log_weak_SD))) %>%
mutate(log_strong_SD = (log_strong_SD-min(log_strong_SD))/(max(log_strong_SD)-min(log_strong_SD))) %>%
mutate(log_sd_kclip_5 = (log_sd_kclip_5-min(log_sd_kclip_5))/(max(log_sd_kclip_5)-min(log_sd_kclip_5))) %>%
mutate(log_kclip_5_weak_SB_f20 = (log_kclip_5_weak_SB_f20-min(log_kclip_5_weak_SB_f20))/(max(log_kclip_5_weak_SB_f20)-min(log_kclip_5_weak_SB_f20))) %>%
mutate(log_answerCount = (log_answerCount-min(log_answerCount))/(max(log_answerCount)-min(log_answerCount))) %>%
mutate(social_reputation_100 = (social_reputation_100-min(social_reputation_100))/(max(social_reputation_100)-min(social_reputation_100))) %>%
select(log_in_degree, log_weak_SD,
log_strong_SD,
log_sd_kclip_5,
log_kclip_5_weak_SB_f20, # diversity measure enabled by social bridges
log_answerCount,
social_reputation_100)
# %>% summary()
dim(dat)
summary(dat)
# +
model1 = lm(social_reputation_100 ~ log_in_degree + log_kclip_5_weak_SB_f20,
data = dat)
summary(model1)
cat('\n')
model2 = lm(social_reputation_100 ~ log_weak_SD + log_kclip_5_weak_SB_f20,
data = dat)
summary(model2)
cat('\n')
model3 = lm(social_reputation_100 ~ log_strong_SD + log_kclip_5_weak_SB_f20,
data = dat)
summary(model3)
cat('\n')
model4 = lm(social_reputation_100 ~ log_sd_kclip_5 + log_kclip_5_weak_SB_f20,
data = dat)
summary(model4)
cat('\n')
model1.1 = lm(social_reputation_100 ~ log_in_degree + log_kclip_5_weak_SB_f20 + log_answerCount,
data = dat)
summary(model1.1)
cat('\n')
model2.1 = lm(social_reputation_100 ~ log_weak_SD + log_kclip_5_weak_SB_f20 + log_answerCount,
data = dat)
summary(model2.1)
cat('\n')
model3.1 = lm(social_reputation_100 ~ log_strong_SD + log_kclip_5_weak_SB_f20 + log_answerCount,
data = dat)
summary(model3.1)
cat('\n')
model4.1 = lm(social_reputation_100 ~ log_sd_kclip_5 + log_kclip_5_weak_SB_f20 + log_answerCount,
data = dat)
summary(model4.1)
# -
# ---
# *on top 5% ego users*
data %>%
# ego users with more than 10,000 followers are ommitted as their social bridges are not computed,
# due to the computational complexity
na.omit() %>% # remove na in social bridge data
summarize(top5_percent=quantile(social_reputation_100, probs=0.95))
# +
dat <-
data %>%
# ego users with more than 10,000 followers are ommitted as their social bridges are not computed,
# due to the computational complexity
na.omit() %>% # remove na in social bridge data
filter(social_reputation_100>=33.13859) %>%
mutate(log_in_degree = (log_in_degree-min(log_in_degree))/(max(log_in_degree)-min(log_in_degree))) %>%
mutate(log_weak_SD = (log_weak_SD-min(log_weak_SD))/(max(log_weak_SD)-min(log_weak_SD))) %>%
mutate(log_strong_SD = (log_strong_SD-min(log_strong_SD))/(max(log_strong_SD)-min(log_strong_SD))) %>%
mutate(log_sd_kclip_5 = (log_sd_kclip_5-min(log_sd_kclip_5))/(max(log_sd_kclip_5)-min(log_sd_kclip_5))) %>%
mutate(log_kclip_5_weak_SB_f20 = (log_kclip_5_weak_SB_f20-min(log_kclip_5_weak_SB_f20))/(max(log_kclip_5_weak_SB_f20)-min(log_kclip_5_weak_SB_f20))) %>%
mutate(log_answerCount = (log_answerCount-min(log_answerCount))/(max(log_answerCount)-min(log_answerCount))) %>%
mutate(social_reputation_100 = (social_reputation_100-min(social_reputation_100))/(max(social_reputation_100)-min(social_reputation_100))) %>%
select(log_in_degree, log_weak_SD,
log_strong_SD,
log_sd_kclip_5,
log_kclip_5_weak_SB_f20, # diversity measure enabled by social bridges
log_answerCount,
social_reputation_100)
# %>% summary()
dim(dat)
summary(dat)
# +
model1 = lm(social_reputation_100 ~ log_in_degree + log_kclip_5_weak_SB_f20,
data = dat)
summary(model1)
cat('\n')
model2 = lm(social_reputation_100 ~ log_weak_SD + log_kclip_5_weak_SB_f20,
data = dat)
summary(model2)
cat('\n')
model3 = lm(social_reputation_100 ~ log_strong_SD + log_kclip_5_weak_SB_f20,
data = dat)
summary(model3)
cat('\n')
model4 = lm(social_reputation_100 ~ log_sd_kclip_5 + log_kclip_5_weak_SB_f20,
data = dat)
summary(model4)
cat('\n')
model1.1 = lm(social_reputation_100 ~ log_in_degree + log_kclip_5_weak_SB_f20 + log_answerCount,
data = dat)
summary(model1.1)
cat('\n')
model2.1 = lm(social_reputation_100 ~ log_weak_SD + log_kclip_5_weak_SB_f20 + log_answerCount,
data = dat)
summary(model2.1)
cat('\n')
model3.1 = lm(social_reputation_100 ~ log_strong_SD + log_kclip_5_weak_SB_f20 + log_answerCount,
data = dat)
summary(model3.1)
cat('\n')
model4.1 = lm(social_reputation_100 ~ log_sd_kclip_5 + log_kclip_5_weak_SB_f20 + log_answerCount,
data = dat)
summary(model4.1)
# -
| ols.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/madhavjk/DataScience-ML_and_DL/blob/main/Session_8_(Pandas).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="zIGmeNAiypfG"
# # pandas
# pandas contains data structures and data manipulation tools designed to make data analysis and cleaning fast and easy in python. It has two data structures : 1.Series and 2.DataFrame
# #### Datatypes : int64, float64, object, bool, datetime64, timedelta, category
# + [markdown] id="kFw35pQ-ypfJ"
# # 1.SERIES
# + id="O4KJ_vrxypfL"
import pandas as pd
# + id="y2ZDrhouypfN" outputId="4e1e5a87-0be4-4bb5-f6ba-ad81ccc9c203"
data = [1,2,3,4]
s1 = pd.Series(data)
s1
# + id="DF7j2nqNypfQ" outputId="ededd23d-b0df-4d0c-ae9e-03d578f23ff4"
s1.values
# + id="X-gpL6xFypfS" outputId="2748415f-7ae9-4a03-f7a6-bce3288f2426"
s1.index
# + id="H86Dv4afypfT"
s1.index = ['a','b','c','d']
# + id="cdNTime_ypfU" outputId="1448c247-42ca-4c1f-afbc-0c4177c4805a"
s1
# + id="2YqSlqRrypfW" outputId="ecced104-c34c-4ddd-c206-3beca6f33fb1"
s1.index
# + id="4ozOgxFxypfX" outputId="db1c888c-bbcd-4267-b251-a51a0bae8a82"
s1[0]
# + id="mLpZWdIBypfa" outputId="8f3d2665-d31c-4a5f-8016-7daf5536d924"
s1['a']
# + id="VHS5XN6oypfc" outputId="3158d051-b81f-4dd0-bcc7-55539b781bc7"
s1.iloc[2]
# + id="cVIjlnkPypfd" outputId="56ef5008-6237-4297-c6ec-70c1f5d1a42c"
s1.loc['c']
# + id="A8AY8Jlhypfm" outputId="c85752d4-dd01-48c9-abc6-ef0ead89d262"
s1['e'] = 5
s1
# + id="zT-RarQcypfq" outputId="79c103d8-cdf5-4361-a6e7-4ee6d4a01629"
s1.loc['f'] = 6
s1
# + id="SW7VMr_pypfs" outputId="2621caea-25e3-4734-ecdb-4d49a70148bc"
s1.loc
# + id="5ZQbk7WUypf7" outputId="c3c927fe-552b-44aa-d9fe-b28781af257d"
1 in s1.values
# + id="peI0Gwjpypf9" outputId="ce5f8073-8ceb-4509-ed63-5f20db674219"
7 in s1.values
# + id="cPaH8oBBypf_" outputId="1840211e-b682-4728-9b80-042350e57fa1"
'c' in s1.index
# + id="sFci1kO0ypgB" outputId="7a6a3d40-e581-4bef-88f1-7d7e74a9819e"
'x' in s1.index
# + id="hSxF0qq0ypgC" outputId="42f834cd-fa3f-4307-8dde-d17e018d8b87"
s1[1:5]
# + id="XoYHvVFSypgD" outputId="705884f7-26bb-433e-afc9-76a898e0f897"
data = {'Jaipur' : 'Rajasthan',
'Mumbai' : 'Maharashtra',
'Kolkata' : 'West Bengal',
'Bangaluru' : 'Karnataka',
'Chandigarh' : 'Punjab'}
s2 = pd.Series(data)
s2
# + id="OqLcr000ypgE"
s2.name = 'States'
# + id="75_5723dypgF"
s2.index.name = 'Capital'
# + id="qLiQn_SpypgG" outputId="08c987fb-8924-4ba6-c511-3789854d411a"
s2
# + id="CE-d3_uFypgH" outputId="bfd7f53e-d751-4db0-a9d0-bcfe608fed6c"
s2['Mumbai']
# + id="KXYUQ07DypgI" outputId="df4b0ea2-65aa-4741-9f3b-645be127243d"
s2[['Jaipur','Kolkata', 'Chandigarh']]
# + id="7zB4JQgDypgK" outputId="7bbacd27-02d7-4645-d04b-3829740bd6d3"
capitals = ['Jaipur','Kolkata', 'Chandigarh']
s3 = pd.Series(data, index = capitals)
s3
# + id="OHLLhaO-ypgM" outputId="53fc5833-a34f-42db-92f1-6f1006886f8d"
s3.isnull()
# + id="Mh1DeGvlypgN" outputId="636f1848-8a74-4959-c396-a9e403e31ecd"
capitals2 = ['Jaipur','Kolkata', 'Chandigarh','Dehli']
s4 = pd.Series(data, index = capitals2)
s4
# + id="DeonOmkbypgP" outputId="0ec94ce4-8666-46e5-b53a-b21cf6cc8537"
s4.isnull()
# + id="E8_bMCR6ypgR" outputId="c6812b51-2dfd-4b92-d7d6-2316ce946268"
s4.notnull()
# + id="kGYPPjJMypgS" outputId="b1a0b3af-da18-4077-bc96-683755fcdf82"
s = pd.Series(['India','Australia','England'], index = ['Cricket','Cricket','Cricket'])
s
# + id="SCntEgIzypgU" outputId="2df0f123-1dd7-4793-8393-ad42250bb1fa"
s.loc['Cricket']
# + id="fS9oiZi1ypgX" outputId="b2c39e3c-54c7-41cf-a32f-c68fb0e4dc04"
colors = ['Blue', 'Red', None]
pd.Series(colors)
# + id="fL7WiOCgypgY" outputId="b0967596-115a-400f-8fba-d33e2af8987d"
numbers = [1,2, None]
pd.Series(numbers)
# + [markdown] id="2DsIQf1oypga"
# # DATAFRAME
# + id="4n18jnALypgc"
student1 = pd.Series({'Name' : 'Utkarsh',
'Id' : 1})
student2 = pd.Series({'Name' : 'Amit',
'Id' : 2})
student3 = pd.Series({'Name' : 'Ankit',
'Id' : 3})
# + id="2MjtIRtpypgc"
df1 = pd.DataFrame([student1,student2,student3], index = (101,102,103))
# + id="4B689StRypgd" outputId="d5be7939-1a4e-4831-c039-79288acb835d"
df1
# + id="FqJcYSRZypgf" outputId="19ba38f8-d1d1-49aa-ffe9-14e4191f1b60"
data = {'Name' : ['Utkarsh', 'Amit', 'Ankit'],
'ID' : [1,2,3]}
df2 = pd.DataFrame(data, index = [101,102,103])
df2
# + id="muzbvYl4ypgh" outputId="d63fff48-db9f-4c1d-d853-7bb899b12fae"
df2.head()
# + id="8dxikWRWypgi" outputId="236cdba7-b454-418b-a9d1-9da945f594f7"
df3 = pd.DataFrame(data, index = [101,102,103], columns = ['Name','ID','Age'])
df3
# + id="GkqSjQERypgr"
df3.Age = 25
# + id="-PKMsSX8ypgr" outputId="c56394d7-1209-4bcb-fe26-3d768b5f9dff"
df3
# + id="0ZSOw7TUypgs" outputId="6d99afa8-dcf9-4186-a9ef-c0cb1a217e6f"
df3.index
# + id="-bpb4KdPypgu" outputId="e7bcabd6-66e0-4031-cdde-0a722b3f431b"
df3.values
# + id="5__uck6Oypgw" outputId="78d22bd8-3b0f-4b33-9c78-cc74a4101ce7"
df3.columns
# + id="eL_bHlULypgy" outputId="2bfc0fea-62dd-4013-a8fd-d6eccdbfcad6"
df3['Name']
# + id="KKWCTTMTypgz" outputId="ef046559-068c-4349-b424-11b2939c7000"
df3.Name
# + id="Ywcs1ftzypg0" outputId="82b0741d-6bfc-4844-d453-02781252cf28"
df3.iloc[0]
# + id="irvLCDFcypg2" outputId="3b3179a4-fed9-42d7-813c-16f7fcd70855"
df3.loc[101]
# + id="-wvQ-QJSypg3"
df3.loc[104] = ['Akash',4,23]
# + id="hOV-oMT5ypg4" outputId="c07196f3-d31c-4863-e990-2a5ff9b373cd"
df3
# + id="TiQTOfvPypg-" outputId="918745eb-ab02-4bf0-9887-deb305715062"
df3.loc[102]['Name']
# + id="EpRsUTqgypg_" outputId="b023f88f-8d79-413c-e3ef-14a1465fff80"
df3['Age'][102] = 26
# + id="72O-gff6yphC" outputId="525dc2e0-edd7-4a06-d9ce-7ef696d54ad3"
df3
# + id="CKRIsrM2yphF" outputId="2801cb28-05d5-4c73-e494-c01ddbe5a360"
val = pd.Series([21,23,25,24], index = [101,102,103,104])
df3['Age'] = val
df3
# + id="rroEdMctyphG" outputId="95d87e5d-9187-4ba3-c6a0-28a284eddf7c"
val2 = pd.Series([6,5], index = [101,103])
df3['Height'] = val2
df3
# + id="L0mNYz7ayphI" outputId="777f0dcb-dab7-4339-e2ed-2323b5da6763"
df3['Weight'] = df3.Age
df3
# + id="FgmNB5YwyphL" outputId="965ebfd0-bede-4e77-fb0f-7ea42b92564a"
df3.drop(103)
# + id="k-l6jOQryphM" outputId="75430db0-e74a-4c00-925d-8f6c7aed60e0"
df3
# + id="VkunAw02yphP"
del df3['Weight']
# + id="sQBVZhtXyphP" outputId="916b372f-9a5d-4ef7-dcd6-b040dda5beb3"
df3
# + id="6hpzc9FbyphQ" outputId="cd849a35-dfcb-4fc7-99ce-77f44da6674e"
df3.T
# + id="S6luL_oPyphR" outputId="a7f64882-51da-4cb3-806b-5dedfc874c87"
df3.index.name = 'Students'
df3.columns.name = 'Details'
df3
# + id="owaZxcIEyphS"
index = df3.index
# + id="yOMfNlx7yphT" outputId="0f859ec1-999f-424c-810a-15a03b896d46"
index[1] = 'A'
# + id="TKlRu352yphU" outputId="3932f6d9-2df0-4661-bcab-4cbb92568c8a"
df4 = df3.reindex([201,202,203,204])
df4
# + id="nM-oxrxJyphW" outputId="69051353-1f8b-4bbb-8094-74809b336bbb"
df3.dropna()
# + id="Ug5EBQBjyphZ" outputId="a0b5555e-d32e-496b-aa6b-3c8baff17cde"
df3.fillna(0)
# + id="0q3qyQxbypha" outputId="b1d789bd-1633-4828-b926-d49a69a318a5"
df3.set_index(['Name'])
# + id="MmB37m0-yphb" outputId="960f4830-dcfc-45cc-db16-ebc46b79b282"
df3.reset_index()
# + [markdown] id="O0QNxh9ryphc"
# ## Index a dataframe
# + id="C279UWKEyphc" outputId="c5e7ddf6-476d-40f1-c80c-06cb35c6351c"
df3['Name']
# + id="k1G-Nh9Xyphd" outputId="2504b7f0-06d6-487d-abf5-625f48fc23d0"
df3[['Name','Age']]
# + id="NSdHHzF1yphf" outputId="6e99264e-c6fd-4593-d128-87a507ca64f5"
df3.loc[102]
# + id="W5ulmSQ0yphg" outputId="ca245118-3e48-4d88-e4e3-438928a6307e"
df3.loc[[101,103]]
# + id="y5fos36Uyphm" outputId="a08b9a83-6cc3-4667-fbea-d1a120d41f08"
df3.loc[[101,103],['Name','Age']]
# + id="gGUOpaO0yphn" outputId="ced3dc05-ff29-4103-a6fb-3156d99fa243"
df3.iloc[3]
# + id="gHZYochEypho" outputId="f657bbc6-e5dd-49f9-ccb6-e0dce563a4b5"
df3.loc[:,['Name','Age']]
# + id="-3GRYkt9yphp" outputId="d859c240-5135-4214-e759-ec3ebabe2c24"
df3.sort_index()
# + id="LOuUAM6Hyphq" outputId="151af984-9b04-4842-f51e-b197ae44bd94"
df3.sort_values(by = 'Name')
# + id="12ogNnWeyphr" outputId="1f2a20d9-1b4d-44b3-a020-7c970471e1e1"
df3['Name'].unique()
# + [markdown] id="9Aa4Q3iuypht"
# ## Descriptive Statistics
# + id="X38oanmpyphu" outputId="b1a13206-8275-49ac-d7ca-6e0f3b46634f"
import numpy as np
data = {'A' : np.arange(11,21),
'B' : np.arange(21,31),
'C' : np.arange(31,41)}
df = pd.DataFrame(data)
df
# + id="E5zDnDnlyphv" outputId="971e1600-5664-4e2d-f592-50240fae5c3f"
df.sum()
# + id="k080rv7Ryphv" outputId="4bb31de5-9055-474f-d2bc-f406a5cb3366"
df.sum(axis=1)
# + id="5z-tStwpyphw" outputId="41af5b11-e468-4ff6-88ec-a05ed196e3c2"
df.mean()
# + id="y1D9mTGoyphx" outputId="6dd92333-758a-4580-f8c0-f181a8204a40"
df.mean(axis=1)
# + id="-QTzgulVyphy" outputId="3aa88551-1ab8-4df4-cb99-207ca0c9a5e1"
df.mean(skipna=True)
# + id="i6F1gIN0yphz" outputId="eb5e62a7-272b-4eae-9992-9413cf210748"
df.min()
# + id="aHy-IMxjyph0" outputId="23f89299-cd6e-4533-a4e4-4431fc2f2da7"
df.max()
# + id="hOdjb54Uyph0" outputId="338c2121-f446-488c-dd16-ba1f29e1dd28"
df.var()
# + id="YGCeOCW4yph2" outputId="c285526c-d11b-4a5f-c80c-b01cd9167a2a"
df.std()
# + id="4qCHoaQAyph4" outputId="b33dcd8c-19ee-4167-c4cb-1acd60f7f7b1"
df.median()
# + id="YBK0Oa5fyph5" outputId="5fa9c51a-de73-4528-eb68-51bd63e48c13"
df.describe()
# + id="tyTiBTuVyph7" outputId="4cf3e3ca-4adc-4e77-aee4-3f7aa0f29492"
df.describe().T
# + id="2AfxvKrZyph8"
| Session_8_(Pandas).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .env3.8
# language: python
# name: .env3.8
# ---
# # Lights model tutorial
# + pycharm={"is_executing": true}
# %reset -f
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from lifelines import KaplanMeierFitter
from tick.plot import plot_point_process
from lights.simulation import SimuJointLongitudinalSurvival
from lights.base.utils import heatmap, annotate_heatmap, gompertz_pdf, \
gompertz_survival, visualize_vect_learning, visualize_vect_per_group
from sklearn.model_selection import ShuffleSplit
from lifelines.utils import concordance_index as c_index_score
from IPython.display import Markdown, display
from scipy.stats import beta
from matplotlib import rc
rc('text', usetex=True)
# %matplotlib inline
def printmd(string):
display(Markdown(string))
# -
# ## Generate data
# +
n_long_features = 5
n_time_indep_features = 10
simu = SimuJointLongitudinalSurvival(seed=123, n_long_features=n_long_features, n_samples=400
,n_time_indep_features = n_time_indep_features, sparsity=0.5)
X, Y, T, delta, S_k = simu.simulate()
printmd("\nLevel of censoring: **%.2f%%**" % (100*(1 - delta.mean())))
# -
# ### Visualize parameter vectors
# We generate survival times with a risk model of the form
# $$\begin{align*}
# \lambda_i(t|G_i = k) &= \lambda_0(t) \exp \Big\{ x_i^\top \xi + \sum_{l=1}^L \gamma_{k,1}^l (\beta_{k,1}^l + \beta_{k,2}^l t + b_{i,1}^l + b_{i,2}^l t) + (\gamma_{k,2,1}^l b_{i,1}^l + \gamma_{k,2,2}^l b_{i,2}^l) + \gamma_{k,3}^l (\beta_{k,2}^l + b_{i,2}^l) \Big\} \\
# &= \lambda_0(t) \exp \big\{ \iota_{i,k,1} + \iota_{i,k,2} t \big\}
# \end{align*}$$
# +
xi, betas, gammas = simu.time_indep_coeffs, simu.fixed_effect_coeffs, simu.asso_coeffs
fig = plt.figure(figsize=(8, 4))
fontsize = 16
ax = fig.add_subplot(111)
ax.stem(xi, linefmt='g-', markerfmt='go')
ax.set_xlim([-5, len(xi) + 5])
ax.set_title(r"$\xi$", fontsize=fontsize+4)
plt.yticks(fontsize=fontsize)
plt.tick_params(axis='x', bottom=False, labelbottom=False)
plt.show()
fig = plt.figure(figsize=(16, 8))
names, colors, labels = ['beta', 'gamma'], ['b', 'r'], ['Low-risk', 'High-risk']
j = 1
for i, vectors in enumerate([betas, gammas]):
for k in [0, 1]:
name = names[i]
ax = fig.add_subplot(2,2,j)
j += 1
ax.stem(vectors[k], linefmt='%s-' % colors[k],
markerfmt='%so' % colors[k], label=labels[k])
ax.set_xlim([-5, len(vectors[k]) + 5])
ax.set_title(r"$\%s_%s$" % (name, k), fontsize=fontsize+4)
plt.yticks(fontsize=fontsize)
plt.tick_params(axis='x', bottom=False, labelbottom=False)
plt.legend(fontsize=fontsize-2)
visualize_vect_per_group(vectors[k], n_long_features, ax)
plt.show()
# -
# ### Visualize simulated times
# We choose a Gompertz distribution for the baseline, that is
# $$\lambda_0(t) = \kappa_1 \kappa_2 \exp(\kappa_2t)$$
# with $\kappa_1 > 0$ and $\kappa_2 \in R$ the scale and shape parameters respectively.
# +
scale, shape = simu.scale, simu.shape
print("kappa_1=%s, kappa_2=%s" % (scale, shape))
fig = plt.figure(figsize=(12, 6))
# Density function plot
ax = fig.add_subplot(121)
t = np.linspace(0, 100, 100)
ax.plot(t, gompertz_pdf(t, shape, scale), '-', color='darkorange', lw=3, alpha=0.6)
plt.xlabel(r"$t$", fontsize=fontsize+4)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.title(r"$f_0(t) = \kappa_1 \kappa_2 \exp \big(\kappa_1 + \kappa_2 t -\kappa_1 e^{\kappa_2 t}\big)$",
size=fontsize+2)
# Survival function plot
ax = fig.add_subplot(122)
t = np.linspace(0, 100, 100)
ax.plot(t, gompertz_survival(t, shape, scale), '-', color='darkorange', lw=3, alpha=0.6)
plt.xlabel(r"$t$", fontsize=fontsize+4)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.title(r"$S_0(t) = \exp \big(-\kappa_1 (e^{\kappa_2 t} - 1) \big)$", size=fontsize+2)
fig.tight_layout()
plt.show()
# +
fig = plt.figure(figsize=(16, 4))
iotas = simu.iotas
for i in [1, 2]:
ax = fig.add_subplot(1,2,i)
iota_0i, iota_1i = iotas[i]
all_iota = list(iota_0i) + list(iota_1i)
mini, maxi = min(all_iota), max(all_iota)
bins = np.linspace(mini, maxi, 40)
kwargs = dict(bins=bins, alpha=0.6, rwidth=0.9)
plt.hist(iota_1i, **kwargs, color='r', label='High-risk')
plt.hist(iota_0i, **kwargs, color='b', label='Low-risk')
plt.xlabel(r"$\iota_{i,k,%s}$" % i, size=fontsize + 5)
plt.ylabel("Count", size=fontsize)
plt.legend(fontsize=fontsize-2)
ax.tick_params(labelsize=fontsize-2)
plt.title("Frequency histogram of " + r'$\iota_{i,k,%s}$' % i, size=fontsize+2)
plt.show()
fig = plt.figure(figsize=(16, 4))
G = simu.latent_class
T_star = simu.event_times
times, labels = [T_star, T], ['T^\star', 'T']
for i in [0, 1]:
ax = fig.add_subplot(1,3,i+1)
df = pd.DataFrame(data={"time": times[i], "group": G})
bins = np.linspace(0, times[i].max(), 40)
kwargs = dict(bins=bins, alpha=0.6, rwidth=0.9)
plt.hist(df.loc[df.group == 1, 'time'], **kwargs, color='r', label='High-risk')
plt.hist(df.loc[df.group==0, 'time'], **kwargs, color='b', label='Low-risk')
plt.legend(fontsize=fontsize-2)
plt.xlabel(r'$%s$' % labels[i], size=fontsize+2)
plt.ylabel("Count", size=fontsize)
ax.tick_params(labelsize=fontsize-2)
plt.title("Frequency histogram of " + r'$%s$' % labels[i], size=fontsize+2)
# Kaplan Meier estimation of survival curves
kmf = KaplanMeierFitter()
ax = plt.subplot(133)
kmf.fit(T_star[G == 1], delta[G == 1], label="High-risk").plot(ax=ax, c='r')
kmf.fit(T_star[G == 0], delta[G == 0], label="Low-risk").plot(ax=ax, c='b')
plt.legend(fontsize=fontsize-2)
plt.xlabel('Time $t$', size=fontsize)
plt.ylabel(r'$P[S > t]$', size=fontsize+2)
plt.title("Survival curves", size=fontsize+2)
ax.tick_params(labelsize=fontsize-2)
plt.show()
# -
# ### Visualize longitudinal processes for two subjects randomly chosen
np.random.seed(10)
fig = plt.figure(figsize=(16, 8))
ax0 = plt.subplot(n_long_features,1,1)
colors, labels = ['b', 'r'], ['Low-risk', 'High-risk']
for k in [0, 1]:
idx = np.random.choice(Y[G == k].index)
Y_i = Y.loc[idx, :]
label, color = labels[k], colors[k]
for l in range(1, n_long_features + 1):
Y_il = Y_i["long_feature_%s" % l]
ax = plt.subplot(n_long_features,1 ,l , sharex=ax0)
Y_il.plot(label=label, color=color, marker='H')
ax.set_title("Longitudinal feature %s" % l, fontsize=fontsize+4)
plt.xticks(fontsize=fontsize), plt.yticks(fontsize=fontsize)
plt.legend(fontsize=fontsize-2)
fig.tight_layout()
plt.show()
# ### Visualize Hawkes Processes used to generate time measurements of longitudinal data for the previous high-risk subject
# For a subject $i$, times $\{t_{ij}^l\}_{j \geq 1}$ for processes $l=1, \ldots, L$ are simulated using a multivariate Hawkes process $N_{it} = [N_{it}^1 \cdots N_{it}^L]$ with $t \geq 0$ and $N_{it}^l = \sum_{j \geq 1} 1_{\{t_{ij}^l \leq t\}}$. The process $N_{it}$ is a multivariate counting process, whose components $N_{it}^l$ have intensities $$\lambda_i^l(t) = \Upsilon_{l} + \sum_{l'=1}^L \sum_{j \geq 1} A_{ll'} \upsilon \exp\big(-\upsilon(t - t_{ij}^{l'}) \big)$$ for $l=1, \ldots, L$.
# +
hawkes = simu.hawkes
hawkes_i = hawkes[idx]
adjacency = hawkes_i.adjacency
# need to reset hawkes to track intensity
hawkes_i.reset()
dt = 0.01
hawkes_i.track_intensity(dt)
hawkes_i.simulate()
fig, ax = plt.subplots(n_long_features, 1, figsize=(16, 8), sharex=True, sharey=True)
long_features_names = ["Longitudinal feature %s" % (l + 1) for l in range(n_long_features)]
plot_point_process(hawkes_i, n_points=50000, ax=ax, node_names=long_features_names)
fig.tight_layout()
# -
# The matrix $A = [A_{ll'}]_{1 \leq l,l' \leq L}$ is the adjacency matrix such that $A_{ll'} \geq 0$ quantifies the impact of past measurement time of process $l'$ on the measurement time of process $l$, and $\upsilon \geq 0$ is a memory parameter.
#
# We also plot the density of the law used to simulate $t_i^{max}$, that is,
#
# $$t_i^{max} \sim T_i \times \big(1 - \text{Beta}(\nu_1, \nu_2)\big)$$
# with $(\nu_1, \nu_2) = (2,5)$, to mimic the fact that in practice, one has access to a reasonable amount of longitudinal data before making a prediction.
# +
fig = plt.figure(figsize=(12, 6))
# Adjacency matrix
ax = fig.add_subplot(121)
im, cbar = heatmap(adjacency, long_features_names, long_features_names, ax=ax, cmap="YlGn")
texts = annotate_heatmap(im)
plt.title("Adjacency matrix", size=fontsize+2)
# Density of the law for the time up to which one has longitudinal data
ax = fig.add_subplot(122)
a, b = 2, 5
x = np.linspace(beta.ppf(0.0, a, b),
beta.ppf(1, a, b), 100)
ax.plot(x, beta.pdf(1-x, a, b), '-', color='darkorange', lw=3, alpha=0.6)
labels = ['', r'$0$', r"$T_i$"]
plt.xlabel(r"$t_i^{max}$", fontsize=fontsize+4)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
ax.tick_params(axis='x', pad=10)
plt.title(r"$t_i^{max} \sim T_i \times \big(1 - Beta(\nu_1, \nu_2)\big) $", size=fontsize+4)
plt.locator_params(axis='x', nbins=2)
ax.set_xticklabels(labels)
fig.tight_layout()
plt.show()
# -
# # Lights learning
# ## Initializer illustration
# Since our QNMCEM algorithm gives a local minimum, it is clever to choose an initial value $\theta^{(0)}$ close to the final solution $\hat \theta$, so that the number of iterations required to reach convergence is reduced.
#
# The longitudinal submodels parameters $\beta_k^{(0)}$, $D^{(0)}$ and $\phi^{(0)}$ are initialized (like if there is no subgroup ($\beta_0^{(0)} = \beta_{1}^{(0)}$)) using a multivariate linear mixed model (MLMM) with an explicit EM algorithm, being itself initialized with univariates fits (ULMM). Let us test those initializer on a simple example.
# ### Simulation with no latent subgroups
# +
from lights.init.mlmm import MLMM
from lights.base.base import extract_features
from lights.base.utils import plot_history
# high_risk_rate=0 for no subgroups
simu_ = SimuJointLongitudinalSurvival(n_samples=200, n_time_indep_features=5, n_long_features=3,
seed=1, high_risk_rate=0, fixed_effect_mean_low_risk=(1, .3),
fixed_effect_mean_high_risk=(1, .5), cov_corr_long = .001, std_error = 2.)
Y_ = simu_.simulate()[1]
# Get true beta parameter
true_beta_ = simu_.fixed_effect_coeffs[0]
# -
# ### MLMM with fixed initialization
#
# For instance $\beta^{(0)}=\textbf{0}$
# +
fixed_effect_time_order = 1
tol = 1e-5
mlmm = MLMM(fixed_effect_time_order=fixed_effect_time_order,
tol=tol, initialize=False, max_iter=300)
# Get design features matrices
extracted_features = extract_features(Y_, fixed_effect_time_order)
# Train the initializer
mlmm.fit(extracted_features)
# Visualize learning
to = len(true_beta_)+1
leg1 = [r"$\hat \beta_%s$" % j for j in range(1, to)]
leg2 = [r"$\beta_%s$" % j for j in range(1, to)]
visualize_vect_learning(mlmm, "fixed_effect_coeffs", r"$\beta$", true_beta_, leg1, leg2)
# -
# ### MLMM with ULMM initialization
#
# <span style="color:crimson">**Convergence is faster, and the estimation much better**</span>
mlmm = MLMM(fixed_effect_time_order=fixed_effect_time_order,
print_every=5, tol=tol, initialize=True)
extracted_features = extract_features(Y_, fixed_effect_time_order)
mlmm.fit(extracted_features)
visualize_vect_learning(mlmm, "fixed_effect_coeffs", r"$\beta$", true_beta_, leg1, leg2)
# True covariance of random effect
true_long_cov = simu.long_cov
print("True of covariance of random effect", true_long_cov)
# Estimation of covariance of random effect
est_long_cov = mlmm.long_cov
print("Estimatation of covariance of random effect", est_long_cov)
# ## Data splitting
# +
# Split data into training and test sets
test_size = .3 # proportion of data used for testing
rs = ShuffleSplit(n_splits=1, test_size=test_size, random_state=0)
for train_index, test_index in rs.split(X):
X_test = X[test_index]
Y_test = Y.iloc[test_index, :]
T_test = T[test_index]
delta_test = delta[test_index]
X_train = X[train_index]
Y_train = Y.iloc[train_index, :]
T_train = T[train_index]
delta_train = delta[train_index]
print("%d%% for training, %d%% for testing."
% ((1 - test_size) * 100, test_size * 100))
# -
# ## Training
# +
from lights.inference import prox_QNMCEM
## Choose parameters ##
tol = 1e-6 # tolerance for the convergence stopping criterion
eta = 0.3 # parameter controlling the trade-off between l1
# and l2 regularization in the elasticNet
gamma_chosen = '1se' # way to select l_elasticNet_chosen: '1se' or 'min'
warm_start = True # at each L-BGFS-B iteration, reset beta to 0 or take
# the previous value
grid_size = 30 # grid size for the cross validation procedure
metric = 'C-index' # cross-validation metric: 'log_lik' or 'C-index'
learner = prox_QNMCEM(fixed_effect_time_order=1, max_iter=10, compute_obj=True, print_every=1,
l_pen_SGL=0.1, eta_sp_gp_l1=.7, l_pen_EN=0.08, initialize=True, S_k=S_k)
learner.fit(X_train, Y_train, T_train, delta_train)
# Visualize learning
visualize_vect_learning(learner, "obj")
# -
# Visualize beta_0
true_beta_0 = simu.fixed_effect_coeffs[0].reshape(-1, 1)
to = len(true_beta_0)+1
leg1 = [r"$\hat \beta^0_%s$" % j for j in range(1, to)]
leg2 = [r"$\beta^0_%s$" % j for j in range(1, to)]
visualize_vect_learning(learner, "beta_0", r"$\beta_0$", true_beta_0, leg1, leg2)
# Visualize beta_1
true_beta_1 = simu.fixed_effect_coeffs[1].reshape(-1, 1)
to = len(true_beta_1)+1
leg1 = [r"$\hat \beta^1_%s$" % j for j in range(1, to)]
leg2 = [r"$\beta^1_%s$" % j for j in range(1, to)]
visualize_vect_learning(learner, "beta_1", r"$\beta_1$", true_beta_1, leg1, leg2)
# +
beta_0_true = true_beta_0
beta_0_est = learner.theta["beta_0"]
fig = plt.figure(figsize=(12, 4))
fontsize = 16
ax = fig.add_subplot(121)
ax.stem(np.arange(len(beta_0_true)).tolist(), beta_0_true, linefmt='g-', markerfmt='go', label= r"$\beta_0$")
ax.stem((np.arange(len(beta_0_est)) + .5).tolist(), beta_0_est, linefmt='r-', markerfmt='rx', label= r"$\hat \beta_0$")
ax.set_xlim([-5, len(beta_0_true) + 5])
ax.set_title(r"$\beta_0$ and its estimation", fontsize=fontsize+4)
plt.legend(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.tick_params(axis='x', bottom=False, labelbottom=False)
visualize_vect_per_group(beta_0_true, n_long_features, ax)
beta_1_true = true_beta_1
beta_1_est = learner.theta["beta_1"]
fontsize = 16
ax = fig.add_subplot(122)
ax.stem(np.arange(len(beta_1_true)).tolist(), beta_1_true, linefmt='g-', markerfmt='go', label= r"$\beta_1$")
ax.stem((np.arange(len(beta_1_est)) + .5).tolist(), beta_1_est, linefmt='r-', markerfmt='rx', label= r"$\hat \beta_1$")
ax.set_xlim([-5, len(beta_1_true) + 5])
ax.set_title(r"$\beta_1$ and its estimation", fontsize=fontsize+4)
plt.legend(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.tick_params(axis='x', bottom=False, labelbottom=False)
visualize_vect_per_group(beta_1_true, n_long_features, ax)
plt.show()
# -
# Visualize phi
true_phi = np.array([simu.std_error ** 2] * simu.n_long_features).reshape(-1, 1)
to = len(true_phi)+1
leg1 = [r"$\hat \phi_%s$" % j for j in range(1, to)]
leg2 = [r"$\phi_%s$" % j for j in range(1, to)]
visualize_vect_learning(learner, "phi", symbol = r"$\phi$", true_coeffs = true_phi, legend_est = leg1, legend_true = leg2)
# Visualize xi
true_xi = simu.time_indep_coeffs.reshape(-1, 1)
to = len(true_xi)+1
leg1 = [r"$\hat \xi_%s$" % j for j in range(1, to)]
leg2 = [r"$\xi_%s$" % j for j in range(1, to)]
visualize_vect_learning(learner, "xi", r"$\xi$", true_xi, leg1, leg2)
xi_true = true_xi
xi_est = learner.theta["xi"]
fig = plt.figure(figsize=(8, 4))
fontsize = 16
ax = fig.add_subplot(111)
ax.stem(np.arange(len(xi_true)).tolist(), true_xi, linefmt='g-', markerfmt='go', label= r"$\xi$")
ax.stem((np.arange(len(xi_est)) + .5).tolist(), xi_est, linefmt='r-', markerfmt='rx', label= r"$\hat \xi$")
ax.set_xlim([-5, len(true_xi) + 5])
ax.set_title(r"$\xi$ and its estimation", fontsize=fontsize+4)
plt.legend(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.tick_params(axis='x', bottom=False, labelbottom=False)
plt.show()
# Visualize gamma_0
true_gamma_0 = simu.asso_coeffs[0].reshape(-1, 1)
to = len(true_gamma_0)+1
leg1 = [r"$\hat \gamma^0_{%s}$" % j for j in range(1, to)]
leg2 = [r"$\gamma^0_{%s}$" % j for j in range(1, to)]
visualize_vect_learning(learner, "gamma_0", r"$\gamma_0$", true_gamma_0, leg1, leg2)
# Visualize gamma_1
true_gamma_1 = simu.asso_coeffs[1].reshape(-1, 1)
to = len(true_gamma_1)+1
leg1 = [r"$\hat \gamma^1_{%s}$" % j for j in range(1, to)]
leg2 = [r"$\gamma^1_{%s}$" % j for j in range(1, to)]
visualize_vect_learning(learner, "gamma_1", r"$\gamma_1$", true_gamma_1, leg1, leg2)
# +
gamma_0_true = true_gamma_0
gamma_0_est = learner.theta["gamma_0"]
fig = plt.figure(figsize=(12, 4))
fontsize = 16
ax = fig.add_subplot(121)
ax.stem(np.arange(len(gamma_0_true)).tolist(), gamma_0_true, linefmt='g-', markerfmt='go', label= r"$\gamma_0$")
ax.stem((np.arange(len(gamma_0_est)) + .5).tolist(), gamma_0_est, linefmt='r-', markerfmt='rx', label= r"$\hat \gamma_0$")
ax.set_xlim([-5, len(true_gamma_0) + 5])
ax.set_title(r"$\gamma_0$ and its estimation", fontsize=fontsize+4)
plt.legend(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.tick_params(axis='x', bottom=False, labelbottom=False)
visualize_vect_per_group(gamma_0_true, n_long_features, ax)
gamma_1_true = true_gamma_1
gamma_1_est = learner.theta["gamma_1"]
ax = fig.add_subplot(122)
ax.stem(np.arange(len(gamma_1_true)).tolist(), gamma_1_true, linefmt='g-', markerfmt='go', label= r"$\gamma_1$")
ax.stem((np.arange(len(gamma_1_est)) + .5).tolist(), gamma_1_est, linefmt='r-', markerfmt='rx', label= r"$\hat \gamma_1$")
ax.set_xlim([-5, len(true_gamma_1) + 5])
ax.set_title(r"$\gamma_1$ and its estimation", fontsize=fontsize+4)
plt.legend(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.tick_params(axis='x', bottom=False, labelbottom=False)
visualize_vect_per_group(gamma_1_true, n_long_features, ax)
plt.show()
# -
# Check estimated covariance of random effect
learner.theta["long_cov"]
# +
fig, axes = plt.subplots(1, 2, figsize=(15, 5), sharey=True)
fig.suptitle('Variance-covariance matrix and its estimation')
sns.heatmap(
data=simu.long_cov,
vmin=-.01, vmax=.01, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True, ax=axes[0]
)
sns.heatmap(
data=learner.theta["long_cov"],
vmin=-.01, vmax=.01, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True, ax=axes[1]
)
# -
times = learner.theta["baseline_hazard"].index.values
est_value = learner.theta["baseline_hazard"].values
true_value = (simu.scale * simu.shape) * np.exp(simu.shape * times)
non_zero_idx = np.argwhere(est_value != 0)
ratio = np.mean(true_value[non_zero_idx] / est_value[non_zero_idx])
fig = plt.figure(figsize=(10, 5))
plt.bar(times, np.log10(1 + true_value), color = 'b', width = 0.5)
plt.bar(times + 0.25, np.log10(1 + est_value * ratio), color = 'r', width = 0.25)
plt.yscale('log')
plt.show
# +
# estimation of G
est_G = (learner.pi_est > .5).astype(int)
fig = plt.figure(figsize=(16, 4))
fontsize = 14
ax = fig.add_subplot(1,2,1)
bins = np.linspace(0, T_train.max(), 40)
kwargs = dict(bins=bins, alpha=0.6, rwidth=0.9)
plt.hist(T_train[est_G == 1], **kwargs, color='r', label='High-risk')
plt.hist(T_train[est_G == 0], **kwargs, color='b', label='Low-risk')
plt.legend(fontsize=fontsize-2)
plt.xlabel("T", size=fontsize+2)
plt.ylabel("Count", size=fontsize)
ax.tick_params(labelsize=fontsize-2)
plt.title("Frequency histogram of T", size=fontsize+2)
# Kaplan Meier estimation of survival curves
kmf = KaplanMeierFitter()
ax = plt.subplot(122)
kmf.fit(T_train[est_G == 1], delta_train[learner.pi_est > .5], label="High-risk").plot(ax=ax, c='r')
kmf.fit(T_train[est_G == 0], delta_train[learner.pi_est < .5], label="Low-risk").plot(ax=ax, c='b')
plt.legend(fontsize=fontsize-2)
plt.xlabel('Time $t$', size=fontsize)
plt.ylabel(r'$P[S > t]$', size=fontsize+2)
plt.title("Estimated survival curves", size=fontsize+2)
ax.tick_params(labelsize=fontsize-2)
plt.show()
# -
# ## Prediction
# +
# evalutation here
marker_train = learner.predict_marker(X_train, Y_train)
c_index_train = c_index_score(T_train, marker_train, delta_train)
c_index_train = max(c_index_train, 1 - c_index_train)
## Obtain the marker vector on test set ##
# prediction here
marker_test = learner.predict_marker(X_test, Y_test)
c_index_test = c_index_score(T_test, marker_test, delta_test)
c_index_test = max(c_index_test, 1 - c_index_test)
print("Done predicting on dataset.")
print("C-index on train: %.2f" % c_index_train)
print("C-index on test: %.2f" % c_index_test)
# -
# ## Figures
# ### Learning curves
# ### Convergence
# ### Beta coefficients
| Lights tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# +
class Quaternion:
def __init__(self, x = 0, y = 0, z = 0, s = 0):
self.mean = (s**2 + x**2 + y**2 + z**2)**0.5
self.s,self.x,self.y,self.z = s,x,y,z
self.RotationMatrix()
def Q2E(self, x, y, z, s = 0):
self.__init__(x,y,z,s)
return self
def E2Q(self, phi, theta, Phi):
self.phi,self.theta,self.Phi = phi, theta, Phi
self.s = np.cos(phi/2)*np.cos(theta/2)*np.cos(Phi/2) + np.sin(phi/2)*np.sin(theta/2)*np.sin(Phi/2)
self.x = np.sin(phi/2)*np.cos(theta/2)*np.cos(Phi/2) - np.cos(phi/2)*np.sin(theta/2)*np.sin(Phi/2)
self.y = np.cos(phi/2)*np.sin(theta/2)*np.cos(Phi/2) + np.sin(phi/2)*np.cos(theta/2)*np.sin(Phi/2)
self.z = np.cos(phi/2)*np.cos(theta/2)*np.sin(Phi/2) - np.sin(phi/2)*np.sin(theta/2)*np.cos(Phi/2)
self.RotationMatrix()
return self
def RotationMatrix(self):
s,x,y,z = self.s,self.x,self.y,self.z
mean = (s**2 + x**2 + y**2 + z**2)**0.5
if mean == 0:
mean = 1
s,x,y,z = s/mean,x/mean,y/mean,z/mean
self.phi = np.arctan2(2*(s*x + y*z), 1 - 2*(x**2 + y**2))
self.theta = np.arcsin(2*(s*y - z*x))
self.Phi = np.arctan2(2*(s*z + x*y), 1 - 2*(y**2 + z**2))
self.R = np.array([[1-2*(y**2 + z**2), 2 * (x*y - s*z), 2 * (s*y + x*z)],
[ 2 * (x*y + s*z), 1 - 2*(x**2 + z**2), 2 * (y*z - s*x)],
[ 2 * (x*z - s*y), 2 * (s*x + y*z), 1 - 2*(x**2 + y**2)]])
# def Rotate(self, phi, theta, Phi):
# y = self*Quaternion().E2Q(phi, theta, Phi)
# return Quaternion(*list(y.reshape(-1)))
# def dot_mul(self, other):
# Q1 = self
# Q2 = other
# return Q1.s*Q2.s + Q1.x*Q2.x + Q1.y*Q2.y + Q1.z*Q2.z
def mul(self,other):
sa,xa,ya,za = self.s, self.x, self.y, self.z
sb,xb,yb,zb = other.s, other.x,other.y,other.z
s = sa*sb - xa*xb - ya*yb - za*zb
x = sa*xb + sb*xa + ya*zb - yb*za
y = sa*yb + sb*ya + za*xb - zb*xa
z = sa*zb + sb*za + xa*yb - xb*ya
return Quaternion(x,y,z,s)
def inverse(self):
return Quaternion(-self.x, -self.y, -self.z, self.s)
def Rotate(self, vector):
return vector.mul(self).mul(vector.inverse())
def __add__(self, vector):
return Quaternion(self.x+vector.x,self.y+vector.y,self.z+vector.z,self.s+vector.s)
def __mul__(self, c):
self.x, self.y, self.z, self.s = self.x*c, self.y*c, self.z*c, self.s*c
return self
def __repr__(self):
return '({s},{x},{y},{z})--({phi},{theta},{Phi})'.format(s = self.s, x = self.x, y = self.y, z = self.z, phi = self.phi, theta = self.theta, Phi = self.Phi)
# -
def getRotateQuaternion(Q1,Q2):
cosTheta = (Q1.s*Q2.s + Q1.x*Q2.x + Q1.y*Q2.y + Q1.z*Q2.z)/(Q1.mean*Q2.mean)
Theta = np.arccos(cosTheta)
x = Q1.y*Q2.z - Q2.y*Q1.z
y = Q1.z*Q2.x - Q2.z*Q1.x
z = Q1.x*Q2.y - Q2.x*Q1.y
mean = (x**2 + y**2 + z**2)**0.5
vector = Quaternion(np.sin(Theta/2)*x/mean, np.sin(Theta/2)*y/mean, np.sin(Theta/2)*z/mean, np.cos(Theta/2))
return vector
g_all = [-4.225172, -0.7840731, 8.8149605]
g = Quaternion(0,0,9.8)
gx = g_all[0]
gy = g_all[1]
gz = g_all[2]
g_v = Quaternion(-4.225172, -0.7840731, 8.8149605)
o = Quaternion()
p1 = Quaternion(-5,-5,0)
p2 = Quaternion(-5,5,0)
p3 = Quaternion(5,5,0)
p4 = Quaternion(5,-5,0)
r = getRotateQuaternion(g,g_v)
p1.Rotate(r)
p1_ = p1.Rotate(r)
p2_ = p2.Rotate(r)
p3_ = p3.Rotate(r)
p4_ = p4.Rotate(r)
# +
fig = plt.figure()
# ax = fig.gca(projection='3d')
ax = plt.axes(projection='3d')
ax.plot([0,gx], [0,gy], [0,gz], label='parametric curve')
ax.plot([p1_.x, p2_.x, p3_.x, p4_.x, p1_.x],[p1_.y, p2_.y, p3_.y, p4_.y, p1_.y],[p1_.z, p2_.z, p3_.z, p4_.z, p1_.z],
label='parametric curve')
ax.set_xlim3d(-10,10)
ax.set_ylim3d(-10,10)
ax.set_zlim3d(-10,10)
# -
| Smart-phone Sensor/Attitude estimation.ipynb |