code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
using DelimitedFiles
values = readdlm("data/input01.txt", Int);
for i in 1:length(values)
for j in i:length(values)
if (values[i] + values[j]) == 2020
println("soln: $(values[i] * values[j])")
end
end
end
for i in 1:length(values)
for j in i:length(values)
for k in j:length(values)
if (values[i] + values[j] + values[k]) == 2020
println("soln: $(values[i] * values[j] * values[k])")
end
end
end
end
| 2020/Day1julia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import functools
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import regularizers
from keras.layers import LSTM
from keras.utils import plot_model
from keras import backend as K
import keras.metrics
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, TimeDistributed
from keras.layers import LSTM, Bidirectional
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
import warnings
import numpy as np
from collections import OrderedDict
import os
from lob_data_utils import lob, db_result, gdf_pca
from lob_data_utils.svm_calculation import lob_svm
from numpy.random import seed
seed(1)
sns.set_style('whitegrid')
warnings.filterwarnings('ignore')
# -
data_length = 10000
stock = '9062'
gdf_filename_pattern = 'gdf_{}_' + 'len{}'.format(data_length) + '_r{}_s{}_K50'
gdf_parameters = [(1.0, 1.0), (0.1, 0.1), (0.1, 1.0), (1.0, 0.1), (0.01, 0.1)]
df_log = pd.read_csv('../gdf_pca/res_log_que.csv')
df_log = df_log[df_log['stock'] == int(stock)]
columns = [c for c in df_log.columns if 'matthews' in c or 'roc_auc' in c]
df_log[columns]
def convert_scores_to_df(scores):
scores2 = []
for l in scores:
res = {}
for k, v in l.items():
if isinstance(v, list):
res[k] = np.mean(v)
else:
res[k] = v
scores2.append(res)
return pd.DataFrame(scores2)
# +
def as_keras_metric(method):
@functools.wraps(method)
def wrapper(self, args, **kwargs):
""" Wrapper for turning tensorflow metrics into keras metrics """
value, update_op = method(self, args, **kwargs)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
return wrapper
auc_roc = as_keras_metric(tf.metrics.auc)
# -
gdf_dfs = []
for r, s in gdf_parameters:
gdf_dfs.append(gdf_pca.SvmGdfResults(
stock, r=r, s=s, data_length=data_length,
gdf_filename_pattern=gdf_filename_pattern))
pca = gdf_dfs[0].get_pca('pca_gdf_que_prev10')
plt.bar(list(range(10)), pca.explained_variance_ratio_)
print(np.sum(pca.explained_variance_ratio_[0:3]))
| overview/playground/overview_9062_full-lstm-pca.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2A.eco - Python et la logique SQL - correction
#
# Correction d'exercices sur SQL.
from jyquickhelper import add_notebook_menu
add_notebook_menu()
# SQL permet de créer des tables, de rechercher, d'ajouter, de modifier ou de supprimer des données dans les bases de données.
# Un peu ce que vous ferez bientôt tous les jours. C’est un langage de management de données, pas de nettoyage, d’analyse ou de statistiques avancées.
#
# Les instructions SQL s'écrivent d'une manière qui ressemble à celle de phrases ordinaires en anglais. Cette ressemblance voulue vise à faciliter l'apprentissage et la lecture. Il est néanmoins important de respecter un ordre pour les différentes instructions.
#
# Dans ce TD, nous allons écrire des commandes en SQL via Python.
#
# Pour plus de précisions sur SQL et les commandes qui existent, rendez-vous là [SQL, PRINCIPES DE BASE](http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx3/ext2a/sql_doc.html).
# ## Se connecter à une base de données
#
# A la différence des tables qu'on utilise habituellement, la base de données n'est pas visible directement en ouvrant Excel ou un éditeur de texte. Pour avoir une vue de ce que contient la base de données, il est nécessaire d'avoir un autre type de logiciel.
#
# Pour le TD, nous vous recommandans d'installer SQLLiteSpy (disponible à cette adresse [SqliteSpy](http://www.yunqa.de/delphi/products/sqlitespy/index) ou [sqlite_bro](https://pypi.python.org/pypi/sqlite_bro) si vous voulez voir à quoi ressemble les données avant de les utiliser avec Python.
# +
import sqlite3
# on va se connecter à une base de données SQL vide
# SQLite stocke la BDD dans un simple fichier
filepath = "./DataBase.db"
open(filepath, 'w').close() #crée un fichier vide
CreateDataBase = sqlite3.connect(filepath)
QueryCurs = CreateDataBase.cursor()
# -
# La méthode cursor() est un peu particulière :
#
# Il s'agit d'une sorte de tampon mémoire intermédiaire, destiné à mémoriser temporairement les données en cours de traitement, ainsi que les opérations que vous effectuez sur elles, avant leur transfert définitif dans la base de données. Tant que la méthode .commit() n'aura pas été appelée, aucun ordre ne sera appliqué à la base de données.
# --------------------
# A présent que nous sommes connectés à la base de données, on va créer une table qui contient plusieurs variables de format différents
# - ID sera la clé primaire de la base
# - Nom, Rue, Ville, Pays seront du text
# - Prix sera un réel
# +
# On définit une fonction de création de table
def CreateTable(nom_bdd):
QueryCurs.execute('''CREATE TABLE IF NOT EXISTS ''' + nom_bdd + '''
(id INTEGER PRIMARY KEY, Name TEXT,City TEXT, Country TEXT, Price REAL)''')
# On définit une fonction qui permet d'ajouter des observations dans la table
def AddEntry(nom_bdd, Nom,Ville,Pays,Prix):
QueryCurs.execute('''INSERT INTO ''' + nom_bdd + '''
(Name,City,Country,Price) VALUES (?,?,?,?)''',(Nom,Ville,Pays,Prix))
def AddEntries(nom_bdd, data):
""" data : list with (Name,City,Country,Price) tuples to insert
"""
QueryCurs.executemany('''INSERT INTO ''' + nom_bdd + '''
(Name,City,Country,Price) VALUES (?,?,?,?)''',data)
### On va créer la table clients
CreateTable('Clients')
AddEntry('Clients','Toto','Munich','Germany',5.2)
AddEntries('Clients',
[('Bill','Berlin','Germany',2.3),
('Tom','Paris','France',7.8),
('Marvin','Miami','USA',15.2),
('Anna','Paris','USA',7.8)])
# on va "commit" c'est à dire qu'on va valider la transaction.
# > on va envoyer ses modifications locales vers le référentiel central - la base de données SQL
CreateDataBase.commit()
# -
# ### Voir la table
# Pour voir ce qu'il y a dans la table, on utilise un premier Select où on demande à voir toute la table
QueryCurs.execute('SELECT * FROM Clients')
Values = QueryCurs.fetchall()
print(Values)
# ### Passer en pandas
#
# Rien de plus simple : plusieurs manières de faire
# +
import pandas as pd
# méthode SQL Query
df1 = pd.read_sql_query('SELECT * FROM Clients', CreateDataBase)
print("En utilisant la méthode read_sql_query \n", df1.head(), "\n")
#méthode DataFrame en utilisant la liste issue de .fetchall()
df2 = pd.DataFrame(Values, columns=['ID','Name','City','Country','Price'])
print("En passant par une DataFrame \n", df2.head())
# -
# ## Comparaison SQL et pandas
# ### SELECT
#
# En SQL, la sélection se fait en utilisant des virgules ou * si on veut sélectionner toutes les colonnes
# en SQL
QueryCurs.execute('SELECT ID,City FROM Clients LIMIT 2')
Values = QueryCurs.fetchall()
print(Values)
# En pandas, la sélection de colonnes se fait en donnant une liste
#sur la table
df2[['ID','City']].head(2)
# ### WHERE
# En SQL, on utilise WHERE pour filtrer les tables selon certaines conditions
QueryCurs.execute('SELECT * FROM Clients WHERE City=="Paris"')
print(QueryCurs.fetchall())
# Avec Pandas, on peut utiliser plusieurs manières de faire :
# - avec un booléen
# - en utilisant la méthode 'query'
df2[df2['City'] == "Paris"]
df2.query('City == "Paris"')
# Pour mettre plusieurs conditions, on utilise :
# - & en Python, AND en SQL
# - | en python, OR en SQL
QueryCurs.execute('SELECT * FROM Clients WHERE City=="Paris" AND Country == "USA"')
print(QueryCurs.fetchall())
# +
df2.query('City == "Paris" & Country == "USA"')
df2[(df2['City'] == "Paris") & (df2['Country'] == "USA")]
# -
# ## GROUP BY
#
# En pandas, l'opération GROUP BY de SQL s'effectue avec une méthode similaire : groupby()
#
# groupby() sert à regrouper des observations en groupes selon les modalités de certaines variables en appliquant une fonction d'aggrégation sur d'autres variables.
QueryCurs.execute('SELECT Country, count(*) FROM Clients GROUP BY Country')
print(QueryCurs.fetchall())
# Attention, en pandas, la fonction count() ne fait pas la même chose qu'en SQL. Count() s'applique à toutes les colonnes et compte toutes les observations non nulles.
df2.groupby('Country').count()
# Pour réaliser la même chose qu'en SQL, il faut utiliser la méthode size()
df2.groupby('Country').size()
# On peut aussi appliquer des fonctions plus sophistiquées lors d'un groupby
QueryCurs.execute('SELECT Country, AVG(Price), count(*) FROM Clients GROUP BY Country')
print(QueryCurs.fetchall())
# Avec pandas, on peut appeler les fonctions classiques de numpy
import numpy as np
df2.groupby('Country').agg({'Price': np.mean, 'Country': np.size})
# Ou utiliser des fonctions lambda
# par exemple calculer le prix moyen et le multiplier par 2
df2.groupby('Country')['Price'].apply(lambda x: 2*x.mean())
QueryCurs.execute('SELECT Country, 2*AVG(Price) FROM Clients GROUP BY Country').fetchall()
QueryCurs.execute('SELECT * FROM Clients WHERE Country == "Germany"')
print(QueryCurs.fetchall())
QueryCurs.execute('SELECT * FROM Clients WHERE City=="Berlin" AND Country == "Germany"')
print(QueryCurs.fetchall())
QueryCurs.execute('SELECT * FROM Clients WHERE Price BETWEEN 7 AND 20')
print(QueryCurs.fetchall())
# ## Enregistrer une table SQL sous un autre format
#
# On utilise le package csv, l'option 'w' pour 'write'.
#
# On crée l'objet "writer", qui vient du package csv.
#
# Cet objet a deux méthodes :
# - writerow pour les noms de colonnes : une liste
# - writerows pour les lignes : un ensemble de liste
#
# +
data = QueryCurs.execute('SELECT * FROM Clients')
import csv
with open('./output.csv', 'w') as file:
writer = csv.writer(file)
writer.writerow(['id','Name','City','Country','Price'])
writer.writerows(data)
# -
# On peut également passer par un DataFrame pandas et utiliser .to_csv()
QueryCurs.execute('''DROP TABLE Clients''')
#QueryCurs.close()
# ## Exercice
# Dans cet exercice, nous allons manipuler les tables de la base de données World.
#
# Avant tout, connectez vous à la base de donénes en utilisant sqlite3 et connect
#
# Lien vers la base de données : [World.db3](https://github.com/sdpython/ensae_teaching_cs/raw/master/src/ensae_teaching_cs/data/data_sql/World.db3) ou
#
# ```
# from ensae_teaching_cs.data import simple_database
# name = simple_database()
# ```
#Se connecter à la base de données WORLD
CreateDataBase = sqlite3.connect("./World.db3")
QueryCurs = CreateDataBase.cursor()
# Familiarisez vous avec la base de données : quelles sont les tables ? quelles sont les variables de ces tables ?
# - utilisez la fonction PRAGMA pour obtenir des informations sur les tables
#
# +
# pour obtenir la liste des tables dans la base de données
tables = QueryCurs.execute("SELECT name FROM sqlite_master WHERE type='table';").fetchall()
# on veut voir les colonnes de chaque table ainsi que la première ligne
for table in tables :
print("Table :", table[0])
schema = QueryCurs.execute("PRAGMA table_info({})".format(table[0])).fetchall()
print("Colonnes", ["{}".format(x[1]) for x in schema])
print("1ère ligne", QueryCurs.execute('SELECT * FROM {} LIMIT 1'.format(table[0])).fetchall(), "\n")
# -
# ## Question 1
# - Quels sont les 10 pays qui ont le plus de langues ?
# - Quelle langue est présente dans le plus de pays ?
QueryCurs.execute("""SELECT CountryCode, COUNT(*) as NB
FROM CountryLanguage
GROUP BY CountryCode
ORDER BY NB DESC
LIMIT 10""").fetchall()
QueryCurs.execute('''SELECT Language, COUNT(*) as NB
FROM CountryLanguage
GROUP BY Language
ORDER BY -NB
LIMIT 1''').fetchall()
# ## Question 2
# - Quelles sont les différentes formes de gouvernements dans les pays du monde ?
# - Quels sont les 3 gouvernements où la population est la plus importante ?
QueryCurs.execute('''SELECT DISTINCT GovernmentForm FROM Country''').fetchall()
QueryCurs.execute('''SELECT GovernmentForm, SUM(Population) as Pop_Totale_Gouv
FROM Country
GROUP BY GovernmentForm
ORDER BY Pop_Totale_Gouv DESC
LIMIT 3
''').fetchall()
# ## Question 3
# - Combien de pays ont Elisabeth II à la tête de leur gouvernement ?
#
# - Quelle proporition des sujets de Sa Majesté ne parlent pas anglais ?
# - 78 % ou 83% ?
QueryCurs.execute('''SELECT HeadOfState, Count(*)
FROM Country
WHERE HeadOfState = "Elisabeth II" ''').fetchall()
# la population totale
population_queen_elisabeth = QueryCurs.execute('''SELECT HeadOfState, SUM(Population)
FROM Country
WHERE HeadOfState = "Elisabeth II"''').fetchall()
# La part de la population parlant anglais
Part_parlant_anglais= QueryCurs.execute('''SELECT Language, SUM(Percentage*0.01*Population)
FROM
Country
LEFT JOIN
CountryLanguage
ON Country.Code = CountryLanguage.CountryCode
WHERE HeadOfState = "Elisabeth II"
AND Language = "English"
''').fetchall()
# La réponse est 78% d'après ces données
Part_parlant_anglais[0][1]/population_queen_elisabeth[0][1]
# +
## on trouve 83% si on ne fait pas attention au fait que dans certaines zones, 0% de la population parle anglais
## La population totale n'est alors pas la bonne, comme dans cet exemple
QueryCurs.execute('''SELECT Language,
SUM(Population_pays*0.01*Percentage) as Part_parlant_anglais, SUM(Population_pays) as Population_totale
FROM (SELECT Language, Code, Percentage, SUM(Population) as Population_pays
FROM
Country
LEFT JOIN
CountryLanguage
ON Country.Code = CountryLanguage.CountryCode
WHERE HeadOfState = "Elisabeth II" AND Language == "English"
GROUP BY Code)''').fetchall()
# -
# Conclusion: il vaut mieux écrire deux requêtes simples et lisibles pour obtenir le bon résultat, plutôt qu'une requête qui fait tout en une seule passe mais dont on va devoir vérifier la correction longuement...
# ## Question 4 - passons à Pandas
# Créer une DataFrame qui contient les informations suivantes par pays :
# - le nom
# - le code du pays
# - le nombre de langues parlées
# - le nombre de langues officielles
# - la population
# - le GNP
# - l'espérance de vie
#
# **Indice : utiliser la commande pd.read_sql_query**
#
#
# Que dit la matrice de corrélation de ces variables ?
df = pd.read_sql_query('''SELECT Code, Name, Population, GNP , LifeExpectancy,
COUNT(*) as Nb_langues_parlees, SUM(IsOfficial) as Nb_langues_officielles
FROM Country
INNER JOIN CountryLanguage ON Country.Code = CountryLanguage.CountryCode
GROUP BY Country.Code''',
CreateDataBase)
df.head()
df.corr()
| _doc/notebooks/td2a_eco/td2a_eco_sql_correction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# # Exercise Pandas
# For these exercices we are using a [dataset](https://www.kaggle.com/dgomonov/new-york-city-airbnb-open-data/kernels) provided by Airbnb for a Kaggle competition. It describes its offer for New York City in 2019, including types of appartments, price, location etc.
# ## 1. Create a dataframe
# Create a dataframe of a few lines with objects and their poperties (e.g fruits, their weight and colour).
# Calculate the mean of your Dataframe.
# ## 2. Import
# - Import the table called ```AB_NYC_2019.csv``` as a dataframe. It is located in the Datasets folder. Have a look at the beginning of the table (head).
#
# - Create a histogram of prices
# ## 3. Operations
# Create a new column in the dataframe by multiplying the "price" and "availability_365" columns to get an estimate of the maximum yearly income.
# ## 3b. Subselection and plotting
# Create a new Dataframe by first subselecting yearly incomes between 1 and 100'000. Then make a scatter plot of yearly income versus number of reviews
# ## 4. Combine
# We provide below and additional table that contains the number of inhabitants of each of New York's boroughs ("neighbourhood_group" in the table). Use ```merge``` to add this population information to each element in the original dataframe.
# ## 5. Groups
# - Using ```groupby``` calculate the average price for each type of room (room_type) in each neighbourhood_group. What is the average price for an entire home in Brooklyn ?
# - Unstack the multi-level Dataframe into a regular Dataframe with ```unstack()``` and create a bar plot with the resulting table
#
# ## 6. Advanced plotting
# Using Seaborn, create a scatter plot where x and y positions are longitude and lattitude, the color reflects price and the shape of the marker the borough (neighbourhood_group). Can you recognize parts of new york ? Does the map make sense ?
| 99-DA_Pandas_Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import subprocess
print(type(os.environ))
# +
# print(os.environ)
# -
print(os.environ['LANG'])
# +
# print(os.environ['NEW_KEY'])
# KeyError: 'NEW_KEY'
# -
print(os.environ.get('LANG'))
print(os.environ.get('NEW_KEY'))
print(os.environ.get('NEW_KEY', 'default'))
print(os.getenv('LANG'))
print(os.getenv('NEW_KEY'))
print(os.getenv('NEW_KEY', 'default'))
os.environ['NEW_KEY'] = 'test'
print(os.environ['NEW_KEY'])
os.environ['NEW_KEY'] = 'test2'
print(os.environ['NEW_KEY'])
# +
# os.environ['NEW_KEY'] = 100
# TypeError: str expected, not int
# -
os.environ['NEW_KEY'] = '100'
print(os.environ.pop('NEW_KEY'))
# +
# print(os.environ.pop('NEW_KEY'))
# KeyError: 'NEW_KEY'
# -
print(os.environ.pop('NEW_KEY', None))
os.environ['NEW_KEY'] = '100'
print(os.getenv('NEW_KEY'))
del os.environ['NEW_KEY']
print(os.getenv('NEW_KEY'))
# +
# del os.environ['NEW_KEY']
# KeyError: 'NEW_KEY'
# -
print(os.getenv('LANG'))
print(subprocess.check_output('date', encoding='utf-8'))
os.environ['LANG'] = 'en_US'
print(subprocess.check_output('date', encoding='utf-8'))
print(os.getenv('LANG'))
if os.getenv('LANG').startswith('ja'):
print('こんにちは')
else:
print('Hello')
os.environ['LANG'] = 'ja_JP'
if os.getenv('LANG').startswith('ja'):
print('こんにちは')
else:
print('Hello')
| notebook/os_environ_getenv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Lecture 18: Computational Structural Biology
#
# CBIO (CSCI) 4835/6835: Introduction to Computational Biology
# + [markdown] slideshow={"slide_type": "slide"}
# ## Overview and Objectives
#
# Even though we're "officially" moving off modeling, we're still using similar techniques to study protein structure dynamics--how they fold, how they interact, how they bind, how they function. Proteins are enormously complex and modeling them requires answering a range of questions. By the end of this lecture, you should be able to
#
# - Discuss the trade-offs of different strategies to represent protein structure and function
# - Convert from Cartesian to generalized coordinates
# - Explain dihedral angles and their role in verifying a valid generalized coordinate transformation
# - Define a protein trajectory and its role in simulating protein function
# - Create a trajectory covariance matrix and find its primary modes of variation
# - Explain principal components analysis and singular value decomposition, and how to use these techniques to analyze protein trajectory data
# + [markdown] slideshow={"slide_type": "slide"}
# ## Part 1: What is computational structural biology?
# -
# Not so dissimilar from computational modeling--basically a different side of the same coin.
# + [markdown] slideshow={"slide_type": "fragment"}
# Definition 1: **Development and application of theoretical, computational, and mathematical models and methods to understand the biomolecular basis and mechanisms of biomolecular functions.**
# + [markdown] slideshow={"slide_type": "fragment"}
# Definition 2: **Understanding the fundamental physical principles that control the structure, dynamics, thermodynamics, and kinetics of biomolecular systems.**
# + [markdown] slideshow={"slide_type": "fragment"}
# Basically, modeling of molecular structure.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Historical Perspective
# -
# - 1946: Molecular mechanics calculations
# - 1950: $\alpha$-helices and $\beta$-strads proposed by Pauling
# - 1960: [force fields](https://en.wikipedia.org/wiki/Force_field_(chemistry))
# - 1969: [Levinthal's paradox](https://en.wikipedia.org/wiki/Levinthal's_paradox) of protein folding
# - 1970: first molecular dynamics simulation of biomolecules
# - 1971: The [Protein Data Bank](http://www.rcsb.org/pdb/home/home.do)
# - 1998: Ion channel crystal structure
# + [markdown] slideshow={"slide_type": "slide"}
# ### Available Resources
# -
# As I'm sure everyone is aware: both computing technology and available storage have grown exponentially over the last few decades. This has enabled us to stockpile structure data and analyze it.
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ### PBD: Protein Data Bank
# -
# 
# If you haven't, I encourage you to check out this website.
# + [markdown] slideshow={"slide_type": "slide"}
# - Contains 3D structure data for hundreds of thousands of molecules
# -
# - Builds multiple structure viewers into the browser (don't need to install separate programs to view the molecules)
# - Has built-in tools for sequence and structure alignment, among others
# We'll come back to this (specifically, the eponymous PDB structure file).
# + [markdown] slideshow={"slide_type": "slide"}
# ### Timescales
# -
# Of course, obtaining the structure of a molecule is a challenge (i.e. X-ray crystallography), but what we're discussing here is what we do *after* obtaining the structure information.
# Note from the previous definitions a common thread: *dynamics*, *kinetics*, and *thermodynamics*. We're interested in **how structure changes**--i.e., a notion of **time**.
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# There's a trade-off in terms of resolution and speed.
# -
# - If you want to simulate bond vibration ($10^{-15}$), you'll need quite a few simulations to obtain a physically meaningful result
# - If you want to simulate protein folding ($10^{0}$), you'll need a creative way to introduce some detail.
# + [markdown] slideshow={"slide_type": "slide"}
# 
# -
# What are the **minimal** ingredients of a **simplified**, but **physically meaningful**, model of structure?
# + [markdown] slideshow={"slide_type": "slide"}
# ## Part 2: Protein Structure Basics
# -
# First, we need to go over some preliminary background regarding protein structure before we get into how to model it.
# As you all know, there is a hierarchy in protein structure.
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# This hierarchy is where structural variability--and ultimately, function--comes from.
# -
# 
# + [markdown] slideshow={"slide_type": "slide"}
# Of course, polypeptide chains have potentially enormous variability (recall [Levinthal's paradox](https://en.wikipedia.org/wiki/Levinthal%27s_paradox)).
# -
# There are "only" 20 amino acids, but accounting for their possible combinations, in addition to bond lengths, bond angles, and side chains exponentially increases the number of variables to consider.
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "fragment"}
# **We need a way of representing proteins from a modeling standpoint.**
# + [markdown] slideshow={"slide_type": "slide"}
# ### Generalized Coordinates
# -
# One way to represent a protein is to use our old friend and mainstay, Cartesian coordinates.
# + [markdown] slideshow={"slide_type": "slide"}
# 
# -
# - Each atom in the backbone is represented as a 3D *vector* $R_i$
# - The location of the $i^{th}$ atom with respect to the fixed frame XYZ is $R_i$
# - *What is the fixed frame XYZ?*
# + [markdown] slideshow={"slide_type": "slide"}
# Even better than Cartesian coordinates is **generalized coordinates**: we throw out absolute XYZ space for a representation of the polypeptide in an arbitrary location.
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# -
# Instead of recording absolute spatial coordinates of atoms, **we record bonds, bond angles, and dihedral angles.**
# + [markdown] slideshow={"slide_type": "slide"}
# ### Degrees of Freedom
# -
# The concept of degrees of freedom is an important one: this is basically the number of parameters that make up the system or model you're examining. Put another way, it's the number of "knobs" you can turn to get a unique configuration.
# In generalized coordinates, how many degrees of freedom are there? Put another way, how many values do we need to specify, assuming $N$ backbone atoms, to fully characterize the 3D structure of a polypeptide?
# + [markdown] slideshow={"slide_type": "fragment"}
# - Bonds: $N - 1$ *(do you see why?)*
# + [markdown] slideshow={"slide_type": "fragment"}
# - Bond angles: $N - 2$ *(do you see why?)*
# + [markdown] slideshow={"slide_type": "fragment"}
# - Dihedral angles: $N - 3$ *(do you see why?)*
# + [markdown] slideshow={"slide_type": "fragment"}
# **Total: $3N - 6$** (+ 6 external degrees of freedom: 3 translation, 3 rotation)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Molecule Movement
# -
# Of all the $3N - 6$ parameters in a generalized coordinate system, bond rotations (i.e. changes in dihedral angles) are the softest, and are primarily responsible for the protein's functional motions.
# - Fluctuations around isomeric states near the native state
# - Jumps between isomeric states
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ### Dihedral angles
# -
# These torsional bonds have names: the $\phi$, $\psi$, and $\omega$ angles, though any angle with the $\phi$ prefix is understood to be a dihedral angle.
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ### Cartesian to Generalized Coordinates
# -
# A quick primer on converting from absolute (Cartesian) coordinates to the more algorithmically-favorable generalized coordinates.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Input: 3D Cartesian coordinates for $N$ atoms ${R_1 = (x_1, y_1, z_1), R_2 = (x_2, y_2, z_2), ..., R_N = (x_N, y_N, z_N)}$
# - Output: Generalized coordinates for
# - $N - 1$ bonds $(l_2, l_3, ..., l_N)$
# - $N - 2$ bond angles $(\theta_2, \theta_3, ..., \theta_{N - 1})$
# - $N - 3$ dihedral angles $(\phi_3, \phi_4, ..., \phi_{N - 1})$
#
# (the bond vector $l_i$ points from atom $i - 1$ to atom $i$; hence, we start with bond $l_2$)
# + [markdown] slideshow={"slide_type": "slide"}
# Let's say you have a molecule with 4 atoms, $R_1$ through $R_4$ that each have their own $(x, y, z)$ coordinates.
# + [markdown] slideshow={"slide_type": "fragment"}
# - First, we calculate the bonds: $l_k = |R_k - R_{k - 1}|$
# + [markdown] slideshow={"slide_type": "fragment"}
# - Next, we calculate the bond angles: $\theta_k = \theta_k (R_{k - 1}, R_k, R_{k + 1}) = arccos\left( \frac{l_k \cdot l_{k + 1}}{|l_k \cdot l_{k + 1}|} \right)$
# + [markdown] slideshow={"slide_type": "fragment"}
# - Finally, the dihedral angles $\phi_k = \phi_k (R_{k - 2}, R_{k - 1}, R_k, R_{k + 1}) = sign [ arccos(-n_{k - 1} \cdot l_{k + 1})] arccos(-n_{k - 1} \cdot n_k)$
#
# where $n_k$ is the *unit vector* that is perpendicular to the plane created by $l_k$ and $l_{k + 1}$. You can compute this vector by computing $n_k = \frac{l_k \times l_{k + 1}}{| l_k \times l_{k + 1} |}$
# + [markdown] slideshow={"slide_type": "fragment"}
# - $\times$ denotes an outer product, while $\cdot$ is an inner product
# - $sign$ just means it takes the + or - from the resulting calculation
# + [markdown] slideshow={"slide_type": "slide"}
# ### Conformational Space
# -
# Given these parameters, how many possible different conformations are there for a given $N$-atom protein?
# + [markdown] slideshow={"slide_type": "fragment"}
# **Infinitely many.**
# + [markdown] slideshow={"slide_type": "fragment"}
# **Or, in discrete space, a whole freakin' lot.**
# + [markdown] slideshow={"slide_type": "slide"}
# ### Ramachandran Plots
# -
# Luckily...
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# Ramachandran plot for 4 residue types:
# -
# 
# + [markdown] slideshow={"slide_type": "slide"}
# Upshot: even though there potentially infinitely-many conformations, real-life proteins seem to inhabit a very small space of conformational possibilities.
# -
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## Part 3: Trajectories
# -
# At this point, we understand how to describe a polypeptide in space. Now we want to see what it does over time in some kind of environment.
# This addition of time generates what is called a *trajectory* of the polypeptide, giving us insight into its function.
# + [markdown] slideshow={"slide_type": "slide"}
# For a single residue $R_1$ (not to be confused with our definition of $R_1$ as the 3D Cartesian coordinates of an atom previously in this lecture), its trajectory looks something like this:
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# -
# where $t_1$ through $t_{1000}$ are all the time points at which we evaluate the structure.
# We're basically solving $F = ma$ for every atom in a residue at each time point; done over enough time points, we can start to see larger, coordinated behavior emerge.
# + [markdown] slideshow={"slide_type": "slide"}
# Of course, that's just for 1 residue. A protein usually has quite a few residues, in which case the trajectory would look something like this:
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# From this information, we can compute a few critical quantities.
# + [markdown] slideshow={"slide_type": "fragment"}
# The *average position vector* for $R_1$ is written as $ < R_1 > $. This is simply the average over all time points $t$.
# + [markdown] slideshow={"slide_type": "fragment"}
# From this quantity, we can compute the *instantaneous fluctuation vector* $\Delta R_1(t_i)$ at a specific time point $t_i$: $R_1(t_i) - < R_1 > $
# + [markdown] slideshow={"slide_type": "fragment"}
# *Cross-correlation* between fluctuation vectors of residues $R_i$ and $R_j$: $<\Delta R_i \cdot \Delta R_j> = \frac{1}{m} \sum_{k = 1}^m \Delta R_i (t_k) \cdot \Delta R_j (t_k)$
# + [markdown] slideshow={"slide_type": "fragment"}
# When $i = j$, this reduces to *mean-square fluctuation*.
# + [markdown] slideshow={"slide_type": "slide"}
# If we compute cross-correlations for all pairs of residues in the protein, this gives us the *covariance matrix*.
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# -
# As the name implies, this matrix quantifies how each residue in a protein *co-varies* with each other residue over the duration of the trajectory.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Long-term Dynamics
# -
# The covariance matrix is a crucial component to understanding *long-term dynamics* of a protein.
# After all, we're not necessarily interested in what happens between individual femtoseconds, but we need enough femtoseconds to be able to observe cooperative, global behavior in the protein.
# But how do we uncover these dynamics from a covariance matrix?
# + [markdown] slideshow={"slide_type": "fragment"}
# **Anyone remember Principal Components Analysis (PCA)?**
# + [markdown] slideshow={"slide_type": "slide"}
# ### Principal Components Analysis (PCA)
# -
# PCA is the most familiar of a family of analysis methods, collectively called *dimensionality reduction* techniques.
# You can think of PCA (and really, any dimensionality reduction technique) as a sort of compression algorithm: condensing your existing data into a more compact representation, in theory improving interpretability of the data and discarding noise while maintaining the overall original structure.
# Each algorithm goes about this process a little differently. PCA is all above *maximizing variance*.
# + [markdown] slideshow={"slide_type": "slide"}
# Given some data, PCA tries to find new axes for the data. These axes are chosen to "maximize the variance in the data"--a complicated way of saying that it draws new coordinate axes to capture the directions in which the data move the most.
# -
# 
# + [markdown] slideshow={"slide_type": "-"}
# Once you have these "principal components", you can then choose to keep only the top handful--theoretically maintaining the majority of the variability in the data, but with considerably fewer data points.
# + [markdown] slideshow={"slide_type": "slide"}
# This is precisely how we want to analyze our trajectory data. After all:
# -
# - Proteins, especially proteins we're really interested in, are likely quite large: several thousands of residues
# - The number of trajectory snapshots required to observe long-term dynamics are probably on the order of millions, if not billions.
# - A trajectory covariance matrix that's $N \times N$ is not going to be trivial to compute.
# + [markdown] slideshow={"slide_type": "fragment"}
# - **But as we saw with Ramachandran plots, there are probably some patterns to how proteins behave.** PCA can help discover those patterns.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Eigen-decomposition
# -
# We can rewrite the form of the covariance matrix $C$ as $C = U \Lambda U^{-1}$
# - $U$ is the matrix of row-based eigenvectors
# - $\Lambda$ is the diagonal matrix of eigenvalues
# - $U^{-1} = U^T$
# + [markdown] slideshow={"slide_type": "fragment"}
# Cool. What do these mean?
# + [markdown] slideshow={"slide_type": "slide"}
# Let's reorganize the equation a bit to gain some intuition about these quantities.
# + [markdown] slideshow={"slide_type": "fragment"}
# - When we first defined $C$ a few slides ago, we also said it could be written as $< \Delta R \Delta R^T >$.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Using that notation, we'll substitute into the original eigen-decomposition equation: $< \Delta R \Delta R^T > = U \Lambda U^T$
# + [markdown] slideshow={"slide_type": "fragment"}
# - Since the $\Lambda$ matrix is diagonal (meaning the diagonal elements are the eigenvalues, and all the off-diagonal elements are 0), we can rewrite this matrix as an outer product of the same vector with itself: $q \times q^T$
# + [markdown] slideshow={"slide_type": "fragment"}
# - Now we have $< \Delta R \Delta R^T > = U q q^T U^T$, or $< \Delta R \Delta R^T > = (Uq)(Uq)^T$
# + [markdown] slideshow={"slide_type": "slide"}
# **We have an exact correspondence**: $\Delta R = Uq$
# + [markdown] slideshow={"slide_type": "fragment"}
# - The $i^{th}$ eigenvector $u_i$ (basically row $i$ of $U$) describes the normalized displacement of residues along the $i^{th}$ principal axis, where the $k^{th}$ element of $u_i$ (in Python parlance: `u[k]`) corresponds to the motion of the $k^{th}$ residue!)
# + [markdown] slideshow={"slide_type": "fragment"}
# - The square root of the $i^{th}$ eigenvalue provides a measure of the fluctuation amplitude or size along the $i^{th}$ principal axis (across all residues)
# + [markdown] slideshow={"slide_type": "slide"}
# Armed with this information, we can start to make some inferences about how the entire protein behaves!
# -
# 
# + [markdown] slideshow={"slide_type": "slide"}
# Side, but important, note: the eigenvalues (and corresponding eigenvectors) are sorted in descending order in $\Lambda$. Therefore, the **first** eigenvalue (and its corresponding eigenvector) will represent the *lowest-frequency motion*, or the *largest fluctuation amplitude*.
# -
# Aka, the part of the protein that moves the most.
# + [markdown] slideshow={"slide_type": "slide"}
# You can use the top handful of principal components to see exactly how the protein moves:
# -
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ### Problem
# -
# This all sounds well and good. Problem is, **computing eigenvalues and eigenvectors ain't cheap.**
# We've discussed how $N$ (the number of residues) and $m$ (the number of snapshots) can both get extremely large.
# + [markdown] slideshow={"slide_type": "fragment"}
# Unless you get into highly sophisticated computing environments, your average SciPy eigen-solver (yep, SciPy has an eigensolver!) runs in $O(n^3)$.
# -
# That's fine when $n$ is small, not so fine when $n$ is in the **hundreds of thousands or millions**.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Singular Value Decomposition (SVD)
# -
# Think of SVD as a sort of half-sibling to PCA. In the *very specific circumstance* where you want to compute a covariance matrix, SVD short-circuits that process and can give you a little performance boost.
# + [markdown] slideshow={"slide_type": "slide"}
# Now we don't have to compute the full covariance matrix, just the trajectory matrix $A$:
# + [markdown] slideshow={"slide_type": "-"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# Similar to PCA, SVD will break down the matrix of interest into three constituent components that have direct biological interpretations. Specifically, we represent our trajectory matrix $A$ as
# -
# 
# - As before, $U$ is the matrix of principal axis (only now they're called *singular vectors* instead of eigenvectors)
# - Also as before, $\Sigma$ is a diagonal matrix of fluctuation amplitudes (now called *singular values* instead of eigenvalues)
# - The columns of $\Sigma V^T$ are the conformation vectors in the snapshot spanned by the axes $u_i$ for each residue
# + [markdown] slideshow={"slide_type": "slide"}
# Let's look at it visually. First, we have our trajectory matrix $A$ of $N$ residues over $m$ time points or snapshots:
# -
# 
# + [markdown] slideshow={"slide_type": "slide"}
# From the SVD, one of the parameters we get back is the matrix $V^T$, the rows of which give the displacements along each principal axis (with columns indicating displacement at each time step):
# -
# 
# + [markdown] slideshow={"slide_type": "slide"}
# Which you can then use to directly visualize the motion along the principal axes:
# -
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ### Summary of SVD and PCA
# + [markdown] slideshow={"slide_type": "fragment"}
# These techniques decompose the trajectory into a collection of *modes* of the protein, sorted in order of significance.
# + [markdown] slideshow={"slide_type": "fragment"}
# You can use these modes to reconstruct the trajectory based on the dominant modes.
# + [markdown] slideshow={"slide_type": "fragment"}
# These modes are *robust*: they are invariant and do not depend on the details of the model or force field.
# + [markdown] slideshow={"slide_type": "fragment"}
# These modes are also relevant to biological function: you can see what conformations your protein spends most of its time in.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Administrivia
# + [markdown] slideshow={"slide_type": "-"}
# - Assignment 4 is due **today**!
# -
# - Assignment 5 is out! Due in **two weeks**.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Final project proposals are due in **one week!**
# - 1 page, maximum
# - Succinctly describe the problem you want to work on, and how you plan to answer it
# - Emphasize the computational approach you want to take
# - Any references
| lectures/StructuralBiology.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# <img src="https://spacy.io/static/social_default-1d3b50b1eba4c2b06244425ff0c49570.jpg" align='right' width=200>
#
# # Natural Language Processing with Python
# ## ... and spaCy
#
# This notebook is an exercise-based introductory demo of how to use Python for Natural Language Processing (NLP). It uses open data and the package spaCy, which comes with a lot of functionality for interacting with text data. Similar things can be done with packages like `nltk`. At some point, some machine learning will be done, for which scikit-learn is used.
#
# Let's start by importing the pacakges that are used:
# +
# General imports
import sys, os
import numpy as np
import pandas as pd
# NLP related
import string
import regex as re
import spacy
# Machine learning
import sklearn
import gensim
# Visualisation
import matplotlib
import matplotlib.pyplot as plt
# Print which versions are used
print("This notebook uses the following packages (and versions):")
print("---------------------------------------------------------")
print("python", sys.version[:6])
print('\n'.join(f'{m.__name__} {m.__version__}' for m in globals().values() if getattr(m, '__version__', None)))
# -
# ## Text data
#
# Text is unstructured data, which means that we don't have something like a nice set of features (e.g. columns in a pandas DataFrame) for a set of observations (e.g. rows in that same DataFrame). The information is enclosed in human-readable text, but needs to be made quantitative in order for machine learning methods to be able to handle them. That process of getting quantitative information out of text is called NLP. SpaCy will help us out.
#
#
# ## Simple string operations
#
# The first step might often be to use Python's rich collection of string operations. For example, making everything lower case, removing punctuation or splitting a document into its consecutive sentences are operations that we wouldn't need anything else than core python for:
# +
my_text = "This workshop is about language, and Python. Let's Go!"
sentences = my_text.split('. ')
print(sentences)
# Removing punctuation with regular expressions
def remove_punctuation(text):
pattern = "[" + string.punctuation + "]+"
result = re.sub(pattern," ",text)
return result
print(remove_punctuation("text!!!text??"))
for s in sentences:
print(remove_punctuation(s.lower()))
# -
# After simple operations like that, your results will no longer be case sensitive (but if uppercase is used to find names later on, be careful!). Note that for more complex string operations, it may be very useful to get familiar with [regular expressions](https://regex101.com/).
# + [markdown] tags=[]
# ## SpaCy language models
#
# Much of what's here is adapted from the [spaCy documentation](https://spacy.io/).
#
# There are many complications. In most applications, you will be after something like *the meaning*, *the context* or *the intent* of text. These can be hard to extract, and we will look at the qunatification of text in steps.
#
# From spaCy you can import [pre-trained language models](https://spacy.io/usage/models) in a number of languages, that enable you to digest the "documents" (this can be just that example sentence, or a whole collection of books). The examples below show what you can do with such "NLP models".
#
# ### Part-of-Speech Tagging
# POS tagging can be helpful for understanding the build-up of the text you're dealing with. See below for an example.
#
# Let's start with a simple example sentence:
# +
sentence = "This is an example sentence by Marcel with a somewhat obvouis spelling mistake."
nlp = spacy.load('en_core_web_sm')
doc = nlp(sentence)
for token in doc:
print(f"{token.text:14s} {token.pos_:6s} {token.dep_}")
# -
# And if you need to know what any of those abbreviations mean, you can invoke
spacy.explain("ADJ")
# Which shows that even a spelling mistake gets correctly interpreted. The interplay of words within a sentence is also known to the `doc` object:
spacy.displacy.render(doc, style='dep')
# ### Named entity recognition
#
# SpaCy understands that my name is a "named entity" and it can try to figure out what kind of an entity I am:
for ent in doc.ents: print(f"{ent} is a {ent.label_} and appears in the sentence at position {ent.start_char}")
# ---
# #### Exercise
# Just to get familiar with this type of exercise and solution loading:
#
# As you can see, my name isn't totally obvious for spaCy. Try with "Steve" and see if it gets better. Also, use the displacy renderer with `style='ent` to see what it recognizes in the sentence "Steve worked for Apple until January 2011".
# +
# If you want solutions, uncomment and run the next two lines.
# to_include = os.path.join('solutions', 'NER.py')
# # %load $to_include
# -
# In many real-world applications, paying special attention to pre-defined entities is very valuable!
#
# ### Stopwords
#
# In many cases, tere is little to no information in super common words like *the* or *is*. Note that **this depends on your use case!!**. In general, the most common words in a language do'n add information because they appear all over the place, but their actual meaning might be important in your context. SpaCy comes with lists of stopwords that are useful for most use cases:
stopwords = spacy.lang.en.stop_words.STOP_WORDS
print(f"I know {len(stopwords)} stopwords.")
# ---
#
# #### Exercise
# `stopwords` is a set. Can you think of a reason why?
#
# What is the longest stopword in English included in spaCy?
#
# Add a few more stopwords: "and", "market" and "people". How many of them were already in the collection?
# +
# If you want solutions, uncomment and run the next two lines.
# to_include = os.path.join('solutions', 'stopwords.py')
# # %load $to_include
# -
# ## Text Normalization: Stemming and Lemmatization
#
# Often, the information content of a text does not depend on verb conjugations, single vs plural, etc. In such cases we want to use text normalization in order to tell our future model that "is" and "was" are both represnetations of "be". In the same vein, you could also map synonyms to the same word. This is less common, for reasons you can probably think of. This type of word normalization can be done in a variety of ways.
#
# Stemming means to cut-off parts of the word (typically a suffix) to get back to the root of the word (e.g. reading -> read, played -> play etc.). This is a simple procedure.
#
# Lemmatization, on the other hand, is an organized and step-by-step procedure of obtaining the root form of the word. It makes use of vocabulary (dictionary importance of words) and morphological analysis (word structure and grammar relations). It typically results in more useful features for our future predictive models. It can be done with spaCy like this:
from_the_news = "Belarus has been accused of taking revenge for EU sanctions by offering migrants tourist visas, and helping them across its border. The BBC has tracked one group trying to reach Germany."
# +
doc = nlp(from_the_news)
lemma_word1 = []
for token in doc:
lemma_word1.append(token.lemma_)
' '.join(lemma_word1)
# -
# ## Preprocessing pipelines
#
# Before to jump to learning on our text data, let's create a pipeline for preprocessing the data in way that we would want to. We will use pandas pipes to combine the functions into a pipeline. They work in a dataframe containing the data, so let's first create a simple data set.
df = pd.DataFrame({'text':['My first text ingredient.', 'More text. In the DataFrame.']})
df
# +
def remove_period(text):
return text.str.replace(".", "", regex=False)
def to_lower(text):
return text.str.lower()
processed = (df.text.pipe(remove_period)
.pipe(to_lower)
)
processed
# -
# For the cases below, we will be using a subset of the "20 newsgroup" dataset that comes along with scikit-learn. These are kind of discussion forums on which people get questions answered. We will load the subset here and quickly look at it:
# +
from sklearn.datasets import fetch_20newsgroups
# We will load only 4 of the categories
cats = ['sci.space', 'sci.med', 'rec.autos', 'alt.atheism']
data = fetch_20newsgroups(categories=cats,
remove=('headers', 'footers'))
print(data.target.shape)
print(len(data.data))
# -
# Get a random one
random_index = np.random.randint(0, high=len(data.data))
print(data.target_names[data.target[random_index]])
print()
print(data.data[random_index])
# ---
#
# #### Exercise
#
# Create a pre-processing pipeline that cleans the data of the newsgroups. You can think of your own steps and order (order matters!), or you can take these steps (these might well be sub-optimal!):
# 1. Transform to lower case
# 2. Remove punctuation
# 3. Lemmatize
# 4. Remove stop words -- Look at the stop words list: is this lemmatized?
#
# Are you ready? Or do you need to remove more?
# +
# If you want solutions, uncomment and run the next two lines.
# to_include = os.path.join('solutions', 'preprocessor.py')
# # %load $to_include
# -
# ## Vectorize the preprocessed text data
#
# In order for machine learning methods to deal with this cleaned-up text data, we are going to build a "bag of words" matrix out of these. This is a huge feature space where every observations is a document, and every single word that is in at least one of the documents is a feature. This will be a very sparse matrix.
#
# We can use either the `CountVectorizer` or the `TfidfVectorizer` from scikit-learn for this.
#
# ---
#
# #### Exercise
#
# Give that a go, look at the various 'hyperparameters" for the vectorizers and play with it a bit. Down below, we will use these in supervised and unsupervised learning.
#
# Note that these vectorizers can take preprocessor functions as well! This will need to be done just slightly differently than above.
# +
# If you want solutions, uncomment and run the next two lines.
# to_include = os.path.join('solutions', 'vectorizer.py')
# # %load $to_include
# +
# Another example, calling pre-processors from the vectorizer.
# If you want solutions, uncomment and run the next two lines.
# to_include = os.path.join('solutions', 'vectorizer_preprocessor.py')
# # %load $to_include
# -
# Just like with more common data sets, we can do both supervised and unsupervised machine learning with text data, after the vectorization described above. After all, we created numeric features (based on the occurence of words) for all documents, which serve as the observations of our model. Hence, we can use our data set to train a machine learning model like we are used to.
#
# Below are a supervised learning example, in which the label (the category in the 20 newsgroups data set) is predicted based on the bags-of-words. We can also pretend that we do not yet know these labels, or that there are 4 and do an unsupervised, clustering-like analysis. In this case, that is known as topic modeling and is described after the supervised example.
#
# The two examples will be followed by a brief discussion of more elaborate machine learning techniques based on the *context* of words, rather than the words themselves, through word vectors.
#
# ## Supervised learning: text classification
#
# We have a feature matrix (the result of the Count- or TfIdf-Vectorizer above) as well as a label (the category the text came from) for a subset of the 20 newsgroups data set. Building a predictive algorithm, that based on the occurence of words will determine which of the 4 labels fit best can be made in a way completely analogously to how we would do this with a feature matrix of another origin.
#
# ---
# #### Exercise
#
# If you follow the these steps, your predictive model will be built:
# - Split your feature matrix and target vector in a train and a test set (e.g. a random 20% of your data can go in the test set) using `sklearn.model_selection.train_test_split`
# - Instantiate a supervised classification algorithm. For example, use `sklearn.naive_bayes.MultinomialNB` with the default settings
# - Train on the train set and evaluate the predictions on the independent test, using a visualization of the confusion matrix (`sklearn.metrics.confusion_matrix`)
# +
# If you want solutions, uncomment and run the next two lines.
# to_include = os.path.join('solutions', 'naive_bayes.py')
# # %load $to_include
# + [markdown] tags=[]
# The above example is as simple as the example below. By no means do I pretend that this is all there is to machine learning! I do hope that it shows you how to use a bag-of-words to do machine learning on text data.
#
# ## Unsupervised learning: topic modeling with LDA
#
# In the unsupervised setting we look for structure present in the data that we do not have a "target variable" for. We do not know the correct answer, if that even means something.
#
# In this particular example, we would hope that 4 clusters are present, which in reality are described by the 4 different labels that we predicted above. Here we have a look at the data and try to find 4 topics, described by a form of soft clustering through [Latent Dirichlet Allocation](https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation).
#
# The procedure is very similar to the supervised learning example above. It doesn't make much sense to split off a test set, as there is nothing to test.
#
# ---
# #### Exercise
# Run an LDA clustering with 4 components (`sklearn.decomposition.LatentDirichletAllocation`).
#
# -
# +
# If you want solutions, uncomment and run the next two lines.
# to_include = os.path.join('solutions', 'lda.py')
# # %load $to_include
# -
# We can easily visualize the results with `pyLDAvis.sklearn` to investigate our topics:
# +
import pyLDAvis.sklearn
pyLDAvis.enable_notebook()
dash = pyLDAvis.sklearn.prepare(lda, bow, vectorizer)
dash
# -
# ---
# #### Exercise
# Play with the number of topics and see if you understand what happens!
# ## Word vectors
#
# In order to capture the 'meaning' or 'context' of a word, people often use word vectors. These are an $N$-dimensional representation of a word in an abstract space, in which words with a similar meaning are supposed to be near each other.
#
# The `nlp` object defined above comes with 96-dimensional word vectors:
mango = nlp('mango')
mango.vector.shape
# The numbers by themselves hardly mean anything, but proximity in this high-dimensional space does.
#
# ---
# #### Exercise
#
# Get the vectors for "mango", "strawberry" and "brick" and verify that the fruits are indeed the closest pair.
#
# Use the `similarity` method of tokens as well, to get a measure of all pairwise similarities.
# +
# If you want solutions, uncomment and run the next two lines.
# to_include = os.path.join('solutions', 'vector_dist.py')
# # %load $to_include
# -
# With the larger language model, the word vectors are 300-dimensional.
#
# Word vectors can be incredibly powerful. You can use the pre-trained models in spaCy, or you can train your own, with e.g. `word2vec`, `GenSim` or `FastText`. It can also be useful to take existing word embeddings and "re-train" them, which is supposed to make the existing embeddings more relevant for your domain of application, while you can still use the versatile pre-trained models, which are typically trained on massive amounts of data (more than you're likely to have at hand).
#
# With the vectors representing words, you can also do machine learning. In that case you do not need the bag-of-words methods any longer, which is nice for several reasons, e.g.:
# - Bag-of-words methods are unaware of the contexts of words
# - Word vectors are less sensitive to the use of synonyms and are more versatile in large diverse corpora of text
# - Word vectors trivially combine into document vectors (through averaging), allowing you to treat the documents in much the same way
#
# When you create an `nlp()` object ot of a document, using one of the "larger" language models (see above), it is possible to assess the similarity of two documents using the `.similarity()` method. This uses the `.vector` attribute and calculates the similarity based on the [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity), a well-known distance metric that is insensitive to the length (L2-norm) of the vector, but only to its "direction".
#
# SpaCy has lots of functionalitie with word vectors, transformers and other sophisticated tooling see e.g. [their documentation](https://spacy.io/). The key difference between word/document-vectors and contextual language models such as transformers is that word vectors model lexical types, rather than tokens. If you have a list of terms with no context around them, a transformer model like BERT can’t really help you. BERT is designed to understand language in context, which isn’t what you have. A word vectors table will be a much better fit for your task. However, if you do have words in context – whole sentences or paragraphs of running text -- word vectors will only provide a very rough approximation of what the text is about.
#
# Transformer models are usually trained with PyTorch, and is greatly helped by the use of GPUs. These are beyond the scope of this workshop.
#
#
| NLP_with_Python.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .groovy
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Groovy
// language: groovy
// name: groovy
// ---
// ## Close Op
// +
//load ImageJ
// %classpath config resolver imagej.public https://maven.imagej.net/content/groups/public
// %classpath add mvn net.imagej imagej 2.0.0-rc-67
//create ImageJ object
ij = new net.imagej.ImageJ()
// -
// This op performs [morphological closing](https://en.wikipedia.org/wiki/Closing_(morphology)) on any binary image. Let's see how the `Op` is called:
ij.op().help("close")
// Note the parameters here:
//
// * `IterableInterval out`: the output image
// * `RandomAccessibleInterval in1`: the input image
// * `List in2`: this parameter contains all of the [`Shape`](http://javadoc.scijava.org/ImgLib2/net/imglib2/algorithm/neighborhood/Shape.html)s that the `Op` will use to close (i.e. for each `Shape` in `in2` close the input (or output of the last `close` if there are 2+ `Shape`s in `in2`)).
//
// We need to get a binary image to perform any morphological operations. Let's get a grayscale image and then binarize it using [the Huang Threshold Op](../threshold/threshold.ipynb#Huang-Thresholding):
// +
input_full = ij.scifio().datasetIO().open("http://imagej.net/images/blobs.gif")
//input_full has 3 channels. We only need one.
input = ij.op().run("hyperSliceView", input_full, 2, 0)
//invert the image so that the blobs are "on" and the background "off"
inverted = ij.op().run("create.img", input)
ij.op().run("invert", inverted, input)
binaryInput = ij.op().run("threshold.huang", inverted)
ij.notebook().display(binaryInput)
// -
// As it states in the Wikipedia article, closing an image will bridge small gaps between objects. The smaller the gap, the wider the bridge:
// +
import net.imglib2.algorithm.neighborhood.HyperSphereShape
//create the shape that we are going to dilate the image with
shape = new HyperSphereShape(6)
output = ij.op().run("close", null, binaryInput, [shape])
ij.notebook().display(output)
// -
// To show the difference, let's [stack](../transform/stackView.ipynb) the images to see the difference:
// +
import net.imglib2.img.Img
list = new ArrayList<Img>()
list.add(binaryInput)
list.add(output)
stacked = ij.op().run("stackView", list)
ij.notebook().display(stacked)
// -
// Note the yellow regions denoting the original blobs as well as the green bridges between the blobs.
| notebooks/1-Using-ImageJ/Ops/morphology/close.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NetworKit Sparsification Tutorial
# This notebook covers the NetworKit sparsification module, which provides algorithms to compute edge scores and to sparsify an input graph.
#
# Sparsification algorithms rely on edge scores, therefore graph edges must be indexed. Call the [indexEdges()](https://networkit.github.io/dev-docs/python_api/networkit.html?highlight=indexedges#networkit.Graph.indexEdges) function to do so.
#
# Every sparsification algorithm computing edge scores in NetworKit provides a `scores()` function that returns the edge attributes maximum parameter value such that the edge is contained in the sparsified graph.
#
# The [getSparsifiedGraph(G, parameter, attribute)](https://networkit.github.io/dev-docs/python_api/sparsification.html?highlight=getsparsif#networkit.sparsification.Sparsifier.getSparsifiedGraph) or [getSparsifiedGraphOfSize(G, edgeRatio, attribute)](https://networkit.github.io/dev-docs/python_api/sparsification.html?highlight=getsparsif#networkit.sparsification.Sparsifier.getSparsifiedGraphOfSize) functions return the sparsified graph. `parameter` determines the degree of sparsification, while `edgeRatio` is the target edge ratio of the specified graph. `attribute` is an optional parameter representing a previously calculated edge attribute.
import networkit as nk
G = nk.readGraph("../input/jazz.graph", nk.Format.METIS)
G.indexEdges()
G.numberOfNodes(), G.numberOfEdges()
# All sparsification algorithms need an `edgeRatio` parameter. We use the same `edgeRatio` in all our examples.
targetRatio = 0.2
# ## Forest Fire
# The Forest Fire sparsifier implements a variant of the Forest Fire sparsification approach that is based on random walks.
# ### Edge Scores
# The [ForestFireScore(G, pf, tebr)]() constructor expects as inputs a graph, the probability `pf` that the neighbor nodes will burn as well, and the target burn ratio which states that forest fire will burn until `tebr * m` edges have been burnt (where `m` is the number of edges of `G`).
# +
# Initialize the algorithm
ffs = nk.sparsification.ForestFireScore(G, 0.6, 5.0)
# Run
ffs.run()
# Get edge scores
attributes = ffs.scores()
for attribute in attributes[:5]:
print("{:.3f}".format(attribute))
# -
# ### Sparsification
# The [ForestFireSparsifier(burnProbability, targetBurntRatio)]() constructor expects as inputs the probability `burnProbability` that the neighbor nodes will burn as well, and the target burn ratio which states that forest fire will burn until `targetBurntRatio * m` edges have been burnt.
# +
# Initialize the algorithm
fireSparsifier = nk.sparsification.ForestFireSparsifier(0.6, 5.0)
# Get sparsified graph
fireGraph = fireSparsifier.getSparsifiedGraphOfSize(G, targetRatio)
G.numberOfEdges(), fireGraph.numberOfEdges()
# -
# ## Global Threshold Filter
# The Global Threshold Filter calculates a sparsified graph by filtering globally using a constant threshold value and a given edge attribute.
#
# The [GlobalThresholdFilter(G, attribute, e, above)](https://networkit.github.io/dev-docs/python_api/sparsification.html?highlight=globalth#networkit.sparsification.GlobalThresholdFilter) constructor expects as inputs a graph, a list of edge attributes, a threshold value `e` and a boolean value `above`. If `above` is set to `True`, all edges with an attribute value greater than or equal `e` will be kept in the sparsified graph. The `calculate` method returns the sparsified graph.
# ### Sparsification
# +
# Initialize the algorithm
gtf = nk.sparsification.GlobalThresholdFilter(G, attributes, 0.2, False)
# Run
newG = gtf.calculate()
G.numberOfEdges(), newG.numberOfEdges()
# -
# ## Local Degree
# The local degree sparsification strategy is based on the idea of hub nodes. For each edge of the graph, it determines the maximum parameter value such that the edge is still contained in the sparsified graph.
# ### Edge Scores
# The [LocalDegreeScore(G)](https://networkit.github.io/dev-docs/python_api/sparsification.html?highlight=local%20degree#networkit.sparsification.LocalDegreeScore) constructor expects a graph as input.
# +
# Initialize the algorithm
lds = nk.sparsification.LocalDegreeScore(G)
# Run
lds.run()
# Get edge scores
ldsScores = lds.scores()
for score in ldsScores[:5]:
print("{:.3f}".format(score))
# -
# ### Sparsification
# +
# Initialize the algorithm
localDegSparsifier = nk.sparsification.LocalDegreeSparsifier()
# Get sparsified graph
localDegGraph = localDegSparsifier.getSparsifiedGraphOfSize(G, targetRatio)
G.numberOfEdges(), localDegGraph.numberOfEdges()
# -
# ## Local Similarity
# ### Edge Scores
# The [LocalSimilarityScore(G, triangles)](https://networkit.github.io/dev-docs/python_api/sparsification.html?highlight=local#networkit.sparsification.LocalSimilarityScore) constructor expects a graph and previously calculated edge triangle counts of the graph.
#
# The edge triangles can be computed using the [TriangleEdgeScore(G)](https://networkit.github.io/dev-docs/python_api/sparsification.html?highlight=triangle#networkit.sparsification.TriangleEdgeScore) algorithm.
# Compute triangles in G
e_triangles = nk.sparsification.TriangleEdgeScore(G)
e_triangles.run()
triangles = e_triangles.scores()
# +
# Initialize the algorithm
lss = nk.sparsification.LocalSimilarityScore(G, triangles)
# Run
lss.run()
# Get edge scores
scores = lss.scores()
for score in scores[:5]:
print("{:.3f}".format(score))
# -
# ### Sparsification
# +
# Initialize the algorithm
similaritySparsifier = nk.sparsification.LocalSimilaritySparsifier()
# Get sparsified graph
similarityGraph = similaritySparsifier.getSparsifiedGraphOfSize(G, targetRatio)
G.numberOfEdges(), similarityGraph.numberOfEdges()
# -
# ## Random Edge Score
# This strategy assigns to each edge a random value in [0,1].
# ### Edge Scores
# The [RandomEdgeScore(G)](https://networkit.github.io/dev-docs/python_api/sparsification.html?highlight=randomedge#networkit.sparsification.RandomEdgeScore) constructor expects a graph as input.
# +
# Initialize
res = nk.sparsification.RandomEdgeScore(G)
# Run
res.run()
# Get edge scores
randomEdgeScores = res.scores()
for score in randomEdgeScores[:5]:
print("{:.3f}".format(score))
# -
# ### Sparsification
# +
# Initialize the algorithm
randomEdgeSparsifier = nk.sparsification.RandomEdgeSparsifier()
# Get sparsified graph
randomGraph = randomEdgeSparsifier.getSparsifiedGraphOfSize(G, targetRatio)
G.numberOfEdges(), randomGraph.numberOfEdges()
# -
# ## Random Node Edge Score
# This attributizer returns edge attributes where each value is selected uniformly at random from [0,1].
# ### Edge Scores
# The [RandomNodeEdgeScore(G)](https://networkit.github.io/dev-docs/python_api/sparsification.html?highlight=randomnode#networkit.sparsification.RandomNodeEdgeScore) constructor expects a graph as input.
# +
# Initialize
rn = nk.sparsification.RandomNodeEdgeScore(G)
# Run
rn.run()
# Get edge scores
randomNodeScores = rn.scores()
for score in randomNodeScores[:5]:
print("{:.3f}".format(score))
# -
# ### Sparsification
# +
# Initialize the algorithm
randomNodeEdgeSparsifier = nk.sparsification.RandomNodeEdgeSparsifier()
# Get sparsified graph
randomNodeGraph = randomNodeEdgeSparsifier.getSparsifiedGraphOfSize(G, targetRatio)
G.numberOfEdges(), randomNodeGraph.numberOfEdges()
# -
# ## SCAN Structural Similarity Score
# This algorithm is a Structural Clustering Algorithm for Networks (SCAN) whose goal is to find clusters, hubs, and outliers in large networks.
# ### Edge Scores
# The [SCANStructuralSimilarityScore(G, triangles)](https://networkit.github.io/dev-docs/python_api/sparsification.html?highlight=scan#networkit.sparsification.SCANStructuralSimilarityScore) constructor expects as inputs a graph and previously calculated edge triangle counts of the graph.
# +
# Initialize the algorithm
scan = nk.sparsification.SCANStructuralSimilarityScore(G, triangles)
# Run
scan.run()
# Get edge scores
scanScores = scan.scores()
for score in scanScores[:5]:
print("{:.3f}".format(score))
# -
# ### Sparsification
# +
# Initialize
scanSparsifier = nk.sparsification.SCANSparsifier()
# Get sparsified graph
scanGraph = scanSparsifier.getSparsifiedGraphOfSize(G, targetRatio)
G.numberOfEdges(), scanGraph.numberOfEdges()
# -
# ## Simmelian Overlap Score
# This is an implementation of the parametric variant of Simmelian Backbones. It calculates for each edge the minimum parameter value such that the edge is still contained in the sparsified graph.
# ### Edge Scores
# The [SimmelianOverlapScore(G, triangles, maxRank)](https://networkit.github.io/dev-docs/python_api/sparsification.html?highlight=simmelian#networkit.sparsification.SimmelianOverlapScore) constructor expects as inputs a graph, triangles and the maximum rank that is considered for overlap calculation.
# +
# Initialize the algorithm
sos = nk.sparsification.SimmelianOverlapScore(G, triangles, 5)
# Run
sos.run()
# Get edge scores
sosScores = sos.scores()
for score in sosScores[:5]:
print("{:.3f}".format(score))
# -
# ### Sparsification
# +
# Initialize the algorithm
simmelianSparsifier = nk.sparsification.SimmelianSparsifierNonParametric()
# Get sparsified graph
simmelieanGraph = simmelianSparsifier.getSparsifiedGraphOfSize(G, targetRatio)
G.numberOfEdges(), simmelieanGraph.numberOfEdges()
| notebooks/Sparsification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PyCharm (scikit-network)
# language: python
# name: pycharm-ac94289e
# ---
# # Diffusion
# This notebook illustrates the ranking of the nodes of a graph by [heat diffusion](https://scikit-network.readthedocs.io/en/latest/reference/ranking.html#diffusion).
from IPython.display import SVG
import numpy as np
from sknetwork.data import karate_club, painters, movie_actor
from sknetwork.ranking import Diffusion, BiDiffusion
from sknetwork.visualization import svg_graph, svg_digraph, svg_bigraph
# ## Graphs
graph = karate_club(metadata=True)
adjacency = graph.adjacency
position = graph.position
labels_true = graph.labels
diffusion = Diffusion()
seeds = {0: 0, 33: 1}
scores = diffusion.fit_transform(adjacency, seeds)
image = svg_graph(adjacency, position, scores=scores, seeds=seeds)
SVG(image)
# ## Digraphs
graph = painters(metadata=True)
adjacency = graph.adjacency
position = graph.position
names = graph.names
picasso = 0
manet = 3
diffusion = Diffusion()
seeds = {picasso: 1, manet: 1}
scores = diffusion.fit_transform(adjacency, seeds, init=0)
image = svg_digraph(adjacency, position, names, scores=scores, seeds=seeds)
SVG(image)
# ## Bigraphs
graph = movie_actor(metadata=True)
biadjacency = graph.biadjacency
names_row = graph.names_row
names_col = graph.names_col
drive = 3
aviator = 9
bidiffusion = BiDiffusion()
seeds_row = {drive: 0, aviator: 1}
bidiffusion.fit(biadjacency, seeds_row=seeds_row)
scores_row = bidiffusion.scores_row_
scores_col = bidiffusion.scores_col_
image = svg_bigraph(biadjacency, names_row, names_col, scores_row=scores_row, scores_col=scores_col,
seeds_row=seeds_row)
SVG(image)
| docs/tutorials/ranking/diffusion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # YNet - Dataset v5.1 and v5.2:
#
# Data from Experiment (1) and (2), Mitochondria = Cit1-mCherry
# ### Importing utilities:
# %matplotlib inline
# %reload_ext autoreload
# %autoreload 2
# +
import os
from pathlib import Path
import skimage.external.tifffile as tiff
from common import Statistics, dataset_source
from resources.conv_learner import *
from resources.plots import *
from pprint import pprint
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# #### Setting up variables
PATH = "datasets/yeast_v5.2/"
data_path = Path(PATH)
CLASSES = ('WT', 'mfb1KO', 'mmr1KO', 'mmm1KO', 'num1KO' )
NUM_CLASSES = len(CLASSES)
BATCH_SIZE = 64
SIZE = 200
# !ls ..
# #### Calculating normalization statistics
# +
stats_name = "yeast_v5.2_per_class.dict"
classes = Statistics.source_class(data_path)
train_val = zip(classes['train'], classes['val'])
Xtest = zip(classes['test'])
main_stats = Statistics.per_class(train_val, save_name=stats_name)
test_stats = Statistics.per_class(Xtest, save_name=stats_name)
# -
for keys in main_stats.keys():
print(f"{keys}: \t \t \t {main_stats[keys]}")
for keys in test_stats.keys():
print(f"{keys}: \t \t \t {test_stats[keys]}")
# ## Defining datasets:
def tfms_for_test(stats, sz):
test_norm = Normalize(stats)
test_denorm = Denormalize(stats)
val_crop = CropType.NO
test_tfms = image_gen(test_norm, test_denorm,sz, crop_type=val_crop)
return test_tfms
def get_data(path: str, sz, bs):
create, lbl2index, test_lbl2index = ImageClassifierData.prepare_from_path(path, val_name='val',test_name='test', test_with_labels=True, bs=bs)
main_stats_X = {lbl2index[key]: val for key, val in main_stats.items()}
test_stats_X= {test_lbl2index[key]: val for key, val in test_stats.items()}
tfms = tfms_from_stats(main_stats_X, sz, aug_tfms=[RandomDihedral()], pad=sz//8)
test_tfms = tfms_for_test(test_stats_X,sz) # change test_stats to test_stats_X to use lbl2index version
tfms += (test_tfms, )
print('\n class to index mapping:\n',lbl2index)
print('\n test class to index mapping:\n',test_lbl2index)
return create(tfms)
data = get_data(PATH,SIZE, BATCH_SIZE)
x, y = next(iter(data.trn_dl))
test_x, test_y = next(iter(data.test_dl))
# ### Inspect loaded data:
# +
# specify which image-index
idx = 31
# loading it from GPU to CPU
xx = x[idx].cpu().numpy().copy()
yy = y[idx]
# showing the image
#
#sp.axis('Off')
#sp.set_title("Norm", fontsize=11)
figure, _ ,_ = tiff.imshow(np.sum(xx, axis=0))
figure.set_size_inches(6,6)
figure.add_subplot(111)
figure2, _, _ = tiff.imshow(np.sum(data.trn_ds.denorm(xx,yy).squeeze() * 65536, axis=2))
figure2.set_size_inches(6,6)
# -
# # Training setup
torch.cuda.is_available()
# ## ResNet_with_Batchnorm
class BnLayer(nn.Module):
def __init__(self, ni, nf, stride=2, kernel_size=3):
super().__init__()
self.conv = nn.Conv2d(ni, nf, kernel_size=kernel_size, stride=stride,
bias=False, padding=1)
self.a = nn.Parameter(torch.zeros(nf,1,1))
self.m = nn.Parameter(torch.ones(nf,1,1))
def forward(self, x):
x = F.relu(self.conv(x))
x_chan = x.transpose(0,1).contiguous().view(x.size(1), -1)
if self.training:
self.means = x_chan.mean(1)[:,None,None]
self.stds = x_chan.std (1)[:,None,None]
return (x-self.means) / self.stds *self.m + self.a
class ResnetLayer(BnLayer):
def forward(self, x): return x + super().forward(x)
class Resnet(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.conv1 = nn.Conv2d(2, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])
for i in range(len(layers) - 1)])
self.layers2 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)
for i in range(len(layers) - 1)])
self.layers3 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
x = self.conv1(x)
for l,l2,l3 in zip(self.layers, self.layers2, self.layers3):
x = l3(l2(l(x)))
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
wd=1e-5
learn = ConvLearner.from_model_data(Resnet([10, 20, 40, 80, 160], 6), data)
learn.summary()
# %time learn.fit(1e-2, 8, cycle_len=1, wds=wd)
# at very little overfitting we have 43% accuracy
# %time learn.fit(1e-2, 8, wds=wd, cycle_len=10, use_clr=(20,8, 0.95, 0.85))
# %time learn.fit(1e-3, 2, wds=wd, cycle_len=20, use_clr=(20,8, 0.95, 0.85))
# %time learn.fit(1e-3, 2, wds=wd, cycle_len=20, use_clr=(20,8, 0.95, 0.85), best_save_name='YNet_Res_v5_redo_1')
# ### Show loss over time
f = plt.figure()
x = learn.sched.plot_loss()
plt.ylabel('Loss')
plt.xlabel('Iterations')
plt.show()
# ## Analysis
# ### ...after training
## Load model:
learn.load('Objective_A_Resnet_per_class_2')
# %time learn.fit(1e-10, 1, wds=wd, cycle_len=1)
log_preds, y = learn.TTA() # run predictions with TTA
# ### Confusion matrix
# Plot confusion matrix
log_preds_mean = np.mean(log_preds, axis=0)
preds = np.argmax(log_preds_mean, axis=1)
cm = confusion_matrix(preds,y)
plot_confusion_matrix(cm, data.classes)
# ### Analyse images
# #### Show random correct/incorrectly classified images:
log_preds_mean = np.mean(log_preds, axis=0) # averages predictions on original + 4 TTA images
preds = np.argmax(log_preds_mean, axis=1) # converts into 0 or 1
# probs = np.exp(log_preds_mean[:,0]) # prediction(WT)
probs = np.exp(log_preds_mean) # predictions
def rand_by_mask(mask): return np.random.choice(np.where(mask)[0], 4, replace=False)
def rand_by_correct(is_correct): return rand_by_mask((preds == data.val_y)==is_correct)
def plots(ims, channel, figsize=(12,6), rows=1, titles=None):
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
sp.axis('Off')
if titles is not None: sp.set_title(titles[i], fontsize=11)
if channel is not None: plt.imshow(ims[i,channel,:,:])
else: plt.imshow(np.sum(ims, axis=1)[i,:,:])
def plot_val_with_title_from_ds_no_denorm(idxs, title, channel=None):
imgs = np.stack(data.val_ds[x][0] for x in idxs) # get images by idx
corr_lbl = np.stack(data.val_ds[x][1] for x in idxs) # get correct label from data.val_ds by idx
pred_lbl = np.stack(preds[x] for x in idxs) # get predicted label from preds by idx
p_max = [np.amax(probs[x,:]) for x in idxs] # get highes probability from probs by idx
title_fin = [f"true = {corr_lbl[x]}\n predicted: {pred_lbl[x]}\n p = {p_max[x]}" for x in corr_lbl]
print(title)
return plots(imgs, channel, rows=1, titles=title_fin, figsize=(16,8))
# ### Plot images according to predictions
# load from ds - not denormalized!
plot_val_with_title_from_ds_no_denorm(rand_by_correct(True), "Correctly classified")
#optionally pass channel arg. to select single channel
plot_val_with_title_from_ds_no_denorm(rand_by_correct(False), "Incorrectly classified")
# #### Show most correct/incorrectly classified images per class:
# +
def most_by_mask(mask, y, mult):
idxs = np.where(mask)[0]
return idxs[np.argsort(mult * probs[:,y][idxs])[:4]]
def most_by_correct(y, is_correct):
mult = -1 if is_correct else 1
return most_by_mask(((preds == data.val_y)==is_correct) & (data.val_y == y), y, mult)
# -
plot_val_with_title_from_ds_no_denorm(most_by_correct(0, True), "Most correctly classified WT")
plot_val_with_title_from_ds_no_denorm(most_by_correct(0, False), "Most incorrectly classified WT") # logic?
plot_val_with_title_from_ds_no_denorm(most_by_correct(1, True), "Most correctly classified mfb1KO")
plot_val_with_title_from_ds_no_denorm(most_by_correct(1, False), "Most incorrectly classified mfb1KO")
plot_val_with_title_from_ds_no_denorm(most_by_correct(2, True), "Most correctly classified mfb1KO-mmr1KO")
plot_val_with_title_from_ds_no_denorm(most_by_correct(3, True), "Most correctly classified mmr1KO")
# +
# etc.
# -
# #### Show (most) uncertain images
most_uncertain = t = np.argsort(np.amax(probs, axis = 1))[:6] # get best "guess" per image and list the least confident ones
plot_val_with_title_from_ds_no_denorm(most_uncertain, "Most uncertain predictions")
# # DOES THE MODEL GENERALIZE?????
def get_test_data(path: str, sz, bs):
create, lbl2index = ImageClassifierData.prepare_from_path(path, val_name='val', test_name='test', test_with_labels=True, bs=bs)
main_stats_X = {lbl2index[key]: val for key, val in main_stats.items()}
test_stats_X= {lbl2index[key]: val for key, val in test_stats.items()}
tfms = tfms_from_stats(main_stats_X, sz, aug_tfms=[RandomDihedral()], pad=sz//8) #even without transformations and padding -> failure
test_tfms = tfms_for_test(test_stats_X,sz)
tfms += (tfms[0], )
print('\n class to index mapping:\n',lbl2index)
return create(tfms)
test_data = get_test_data(PATH,SIZE, BATCH_SIZE)
test_learn = ConvLearner.from_model_data(Resnet([10, 20, 40, 80, 160], 4), test_data)
test_learn.load('Objective_A_Resnet_per_class_2')
test_learn.warm_up(1e-14)
test_log_preds, targs = test_learn.predict_with_targs(is_test=True)
testprobs = np.exp(test_log_preds)
preds = np.argmax(testprobs, axis=1)
print(preds)
print(targs)
# Print Accuracy
print(f"accuracy: [{sum(preds == targs) / 70:4.4}]")
test_log_preds
# ### Troubleshooting:
test_x , test_y = next(iter(test_data.test_dl))
# +
# specify which image-index
idx = 2
# loading it from GPU to CPU
test_xx = test_x[idx].cpu().numpy().copy()
test_yy = test_y[idx]
figure, _ ,_ = tiff.imshow(np.sum(test_xx, axis=0))
figure.set_size_inches(6,6)
figure.add_subplot(111)
figure2, _, _ = tiff.imshow(np.sum(xx, axis=0))
figure2.set_size_inches(6,6)
# -
# ## Caclulating normalization statistics separately
for keys in main_stats.keys():
print(f"{keys}: \t \t \t {main_stats[keys]}")
for keys in test_stats.keys():
print(f"{keys}: \t \t \t {test_stats[keys]}")
## Load model:
learn.load('Objective_A_Resnet_per_class_2')
# %time learn.fit(1e-10, 1, wds=wd, cycle_len=1)
test_log_preds, targs = learn.predict_with_targs(is_test=True)
testprobs = np.exp(test_log_preds)
preds = np.argmax(testprobs, axis=1)
print(preds)
print(targs)
# Print Accuracy
print(f"accuracy: [{sum(preds == targs) / 140:4.4}]")
test_log_preds
| YNet_dev/Objective_D_ResNet_v5.2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
'''
Welcome to your first lab! Each lab starts with a multiline
string, like this. (That's what the triple quotes delineate.)
Embedded in this multiline string will be what looks like copy-pastes
from interactive Python prompts:
>>> hello("world")
'Hello, world!'
That is actually an automated test. Your job is to write code to make
it pass. In other words, you must define a function named "hello" that
takes one argument, and prints out the greeting. You do that below,
where it says "Write your code here".
Here's another test, which gives an important distinction:
>>> print_hello("world")
Hello, world!
Notice the output has no quotes around it? That means the function
printed, rather than returning a string.
Pro tips:
* ONLY write code in the area below. Don't modify this multiline
string!
* When you run the script and see multiple failures, focus on fixing
the first failure. Often later failures will then go away.
'''
# Write your code here:
def hello(str):
return 'Hello, %s!' % str
def print_hello(str):
print 'Hello, %s!' % str
# Do not edit any code below this line!
if __name__ == '__main__':
import doctest
doctest.testmod()
# -
print_hello("world")
hello("world")
| jupyterhub/notebooks/zz_under_construction/zz_old/Python/BeyondBasics/solutions/helloworld.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # SqueezeNet on ImageNet
#
# This is SqueezeNet on Imagenet. SGD algorithm.
#
#
# #### Direct SqeezeNet
#
# Total params: 1,248,424
# Trainable params: 1,248,424
# Non-trainable params: 0
#
#
# #### Sqeeze with 50K intrinsic dim
#
# Total params: 7,589,881
# Trainable params: 50,000
# Non-trainable params: 7,539,881
#
# #### Sqeeze with 100K intrinsic dim
#
# Total params: 7,639,881
# Trainable params: 100,000
# Non-trainable params: 7,539,881
#
# #### Sqeeze with 200K intrinsic dim
#
# Total params: 7,739,881
# Trainable params: 200,000
# Non-trainable params: 7,539,881
#
# #### Sqeeze with 500K intrinsic dim
#
# Total params: 8,039,881
# Trainable params: 500,000
# Non-trainable params: 7,539,881
#
import os, sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import *
# %matplotlib inline
# Find the right directory with
#
# `ll | grep direct`
# `ll | grep vsize50k`
# `ll | grep vsize100m`
# ...
#
# Go to the most recent one, grep validation accuracies with e.g.
#
# `grep val: 171023_044314_a9c117e_lrb_squz_dist4_direct_cont_3x/diary`
# ...
#
#
# Copy the best accuracy numbers (4 of them due to 4-GPU distributed training)
# #### Direct
#
# (ml)rosanne@opusgpu168-wbu2 [/mnt/share/public/results/rosanne/171019_sqznet] $ grep val: 171023_044314_a9c117e_lrb_squz_dist4_direct_cont_3x/diary
#
# 17.10.23.04.44.13.557 505404 (worker 3) val: l: 2.0153, l_xe: 2.0153, acc: 0.5481 (0.126s/i)
#
# 17.10.23.04.44.14.129 505404 (worker 1) val: l: 2.0145, l_xe: 2.0145, acc: 0.5510 (0.129s/i)
#
# 17.10.23.04.44.14.190 505404 (worker 0) val: l: 2.0251, l_xe: 2.0251, acc: 0.5469 (0.129s/i)
#
# 17.10.23.04.44.14.500 505404 (worker 2) val: l: 2.0019, l_xe: 2.0019, acc: 0.5504 (0.129s/i)
#
#
# #### intrinsic 50k
#
# (ml)rosanne@opusgpu168-wbu2 [/mnt/share/public/results/rosanne/171019_sqznet] $ grep val: 171023_151800_a9c117e_lrb_squz_dist4_fastfood_vsize50k_cont2_cont3_cont_cont/diary
#
# 17.10.23.15.18.46.406 275220 (worker 3) val: l: 4.6874, l_xe: 4.6874, acc: 0.1371 (0.0738s/i)
#
# 17.10.23.15.18.46.884 275220 (worker 1) val: l: 4.6705, l_xe: 4.6705, acc: 0.1377 (0.0729s/i)
#
# 17.10.23.15.18.53.147 275220 (worker 0) val: l: 4.6823, l_xe: 4.6823, acc: 0.1373 (0.107s/i)
#
# 17.10.23.15.18.53.510 275220 (worker 2) val: l: 4.6737, l_xe: 4.6737, acc: 0.1387 (0.107s/i)
#
# #### intrinsic 100k
#
# (ml)rosanne@opusgpu168-wbu2 [/mnt/share/public/results/rosanne/171019_sqznet] $ grep val: 171023_152032_a9c117e_lrb_squz_dist4_fastfood_vsize100k_cont_cont_cont2_cont_2x/diary
#
# 17.10.23.15.21.41.945 565452 (worker 0) val: l: 3.9296, l_xe: 3.9296, acc: 0.2381 (0.148s/i)
#
# 17.10.23.15.21.42.234 565452 (worker 2) val: l: 3.9113, l_xe: 3.9113, acc: 0.2385 (0.15s/i)
#
# 17.10.23.15.21.42.444 565452 (worker 1) val: l: 3.9138, l_xe: 3.9138, acc: 0.2397 (0.152s/i)
#
# 17.10.23.15.21.42.522 565452 (worker 3) val: l: 3.9388, l_xe: 3.9388, acc: 0.2377 (0.154s/i)
#
# #### intrinsic 200k
#
# (ml)rosanne@opusgpu168-wbu2 [/mnt/share/public/results/rosanne/171019_sqznet] $ grep val: 171023_152741_a9c117e_lrb_squz_dist4_fastfood_vsize200k_cont_6x/diary
#
# 17.10.23.19.00.57.030 680544 (worker 0) val: l: 3.6063, l_xe: 3.6063, acc: 0.2820 (0.139s/i)
#
# 17.10.23.19.00.57.035 680544 (worker 2) val: l: 3.5659, l_xe: 3.5659, acc: 0.2871 (0.139s/i)
#
# 17.10.23.19.00.57.085 680544 (worker 1) val: l: 3.5694, l_xe: 3.5694, acc: 0.2843 (0.139s/i)
#
# 17.10.23.19.00.57.092 680544 (worker 3) val: l: 3.6047, l_xe: 3.6047, acc: 0.2870 (0.139s/i)
#
#
# #### intrinsic 500k
#
# (ml)rosanne@opusgpu168-wbu2 [/mnt/share/public/results/rosanne/171019_sqznet] $ grep val: 171023_152544_a9c117e_lrb_squz_dist4_fastfood_vsize500k_cont2_cont3/diary
#
# 17.10.23.18.57.14.485 360288 (worker 0) val: l: 3.4111, l_xe: 3.4111, acc: 0.3177 (0.121s/i)
#
# 17.10.23.18.57.14.530 360288 (worker 2) val: l: 3.3885, l_xe: 3.3885, acc: 0.3151 (0.122s/i)
#
# 17.10.23.18.57.14.544 360288 (worker 1) val: l: 3.3623, l_xe: 3.3623, acc: 0.3232 (0.122s/i)
#
# 17.10.23.18.57.14.566 360288 (worker 3) val: l: 3.3980, l_xe: 3.3980, acc: 0.3153 (0.122s/i)
#
#
# #### intrinsic 500k with large batch
#
# (ml)rosanne@opusgpu168-wbu2 [/mnt/share/public/results/rosanne/171019_sqznet] $ grep val: 171023_005007_a9c117e_lrb_squz_dist4_fastfood_vsize500k_mb900_cont_cont2/diary
#
# 17.10.23.13.07.12.374 125224 (worker 1) val: l: 3.2953, l_xe: 3.2953, acc: 0.3360 (2.39s/i)
#
# 17.10.23.13.07.12.375 125224 (worker 3) val: l: 3.3209, l_xe: 3.3209, acc: 0.3354 (2.39s/i)
#
# 17.10.23.13.09.21.004 125224 (worker 2) val: l: 3.3296, l_xe: 3.3296, acc: 0.3270 (4.73s/i)
#
# 17.10.23.13.09.21.072 125224 (worker 0) val: l: 3.2987, l_xe: 3.2987, acc: 0.3270 (4.73s/i)
dim = [0,50000,100000,200000,500000,500001]
# +
Rs = [
np.mean([0.5481, 0.5510,0.5469, 0.5504]), # direct, or dim 0
np.mean([0.1371, 0.1377,0.1373, 0.1387]), # dim 50k
np.mean([0.2381, 0.2385,0.2397, 0.2377]), # dim 100k
np.mean([0.2820, 0.2871,0.2843, 0.2870]), # dim 200k
np.mean([0.3177, 0.3151,0.3232, 0.3153]), # dim 500k
np.mean([0.3360, 0.3354,0.3270, 0.3270]), # dim 500k with large batch
]
Rs = np.array(Rs)
# -
Rs
# ## Performance comparison with Baseline
#
# "Baseline method" indicates optimization in the parameter space.
#
# The proposed method first embeds parameters into the intrinisic space (via orthogonal matrix), and optimization is the new space.
#
# The dimension of intrinsic space indicates the degree of freedom in the weights of neural nets.
matplotlib.rcParams.update({'font.size': 16})
# +
nn = len(Rs)-1
fig, ax = subplots(figsize=(5,4) )
plt.scatter(dim[1:], Rs[1:], edgecolor="b", facecolor="c",s=150 )
ax.plot(dim[1:], Rs[0]*np.ones(nn)*0.9,'k-.', label="Testing: 0.9*baseline")
ax.plot(dim[1:], Rs[0]*np.ones(nn),'k-', label="Testing: baseline")
ax.set_xlabel('Intrinsic Dim')
ax.set_ylabel('Top 1 Accuracy')
plt.grid()
ax.legend()
ax.set_ylim([-0.0,0.8])
ax.set_xlim([0,530000])
fig.set_size_inches(10, 6)
# -
| intrinsic_dim/plots/more/squeezenet_imagenet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 03: RK and RD
import SMEFT19
from SMEFT19.scenarios import rotBI, rotBII
SMEFT19.obsuncert.calculate(rotBI, [-0.25, -0.03, 0], [0.0, 0.03, 3.5], '../data/observables/obsBI.yaml',
'../data/ellipses/rotBI.yaml', name='Scenario I mass rotation', mode='exact', cores=4)
SMEFT19.obsuncert.calculate(rotBII, [-0.25, -0.15, -0.03, -0.12, 0], [0.0, 0.15, 0.03, 0.07, 3.5],
'../data/observables/obsBII.yaml', '../data/ellipses/rotBII.yaml',
name='Scenario II mass rotation', mode='exact', cores=4)
SMEFT19.plots.error_plot('../data/plots/rotRKplot', 'RK', ['../data/observables/obsBI.yaml',
'../data/observables/obsBII.yaml'], legend=1)
SMEFT19.plots.error_plot('../data/plots/rotRDplot', 'RD', ['../data/observables/obsBI.yaml',
'../data/observables/obsBII.yaml'], legend=0)
| PaperML/03_RKRD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Game Tree Search
#
# We start with defining the abstract class `Game`, for turn-taking *n*-player games. We rely on, but do not define yet, the concept of a `state` of the game; we'll see later how individual games define states. For now, all we require is that a state has a `state.to_move` attribute, which gives the name of the player whose turn it is. ("Name" will be something like `'X'` or `'O'` for tic-tac-toe.)
#
# We also define `play_game`, which takes a game and a dictionary of `{player_name: strategy_function}` pairs, and plays out the game, on each turn checking `state.to_move` to see whose turn it is, and then getting the strategy function for that player and applying it to the game and the state to get a move.
from collections import namedtuple, Counter, defaultdict
import random
import math
import functools
cache = functools.lru_cache(10**6)
# +
class Game:
"""A game is similar to a problem, but it has a terminal test instead of
a goal test, and a utility for each terminal state. To create a game,
subclass this class and implement `actions`, `result`, `is_terminal`,
and `utility`. You will also need to set the .initial attribute to the
initial state; this can be done in the constructor."""
def actions(self, state):
"""Return a collection of the allowable moves from this state."""
raise NotImplementedError
def result(self, state, move):
"""Return the state that results from making a move from a state."""
raise NotImplementedError
def is_terminal(self, state):
"""Return True if this is a final state for the game."""
return not self.actions(state)
def utility(self, state, player):
"""Return the value of this final state to player."""
raise NotImplementedError
def play_game(game, strategies: dict, verbose=False):
"""Play a turn-taking game. `strategies` is a {player_name: function} dict,
where function(state, game) is used to get the player's move."""
state = game.initial
while not game.is_terminal(state):
player = state.to_move
move = strategies[player](game, state)
state = game.result(state, move)
if verbose:
print('Player', player, 'move:', move)
print(state)
return state
# -
# # Minimax-Based Game Search Algorithms
#
# We will define several game search algorithms. Each takes two inputs, the game we are playing and the current state of the game, and returns a a `(value, move)` pair, where `value` is the utility that the algorithm computes for the player whose turn it is to move, and `move` is the move itself.
#
# First we define `minimax_search`, which exhaustively searches the game tree to find an optimal move (assuming both players play optimally), and `alphabeta_search`, which does the same computation, but prunes parts of the tree that could not possibly have an affect on the optimnal move.
# +
def minimax_search(game, state):
"""Search game tree to determine best move; return (value, move) pair."""
player = state.to_move
def max_value(state):
if game.is_terminal(state):
return game.utility(state, player), None
v, move = -infinity, None
for a in game.actions(state):
v2, _ = min_value(game.result(state, a))
if v2 > v:
v, move = v2, a
return v, move
def min_value(state):
if game.is_terminal(state):
return game.utility(state, player), None
v, move = +infinity, None
for a in game.actions(state):
v2, _ = max_value(game.result(state, a))
if v2 < v:
v, move = v2, a
return v, move
return max_value(state)
infinity = math.inf
def alphabeta_search(game, state):
"""Search game to determine best action; use alpha-beta pruning.
As in [Figure 5.7], this version searches all the way to the leaves."""
player = state.to_move
def max_value(state, alpha, beta):
if game.is_terminal(state):
return game.utility(state, player), None
v, move = -infinity, None
for a in game.actions(state):
v2, _ = min_value(game.result(state, a), alpha, beta)
if v2 > v:
v, move = v2, a
alpha = max(alpha, v)
if v >= beta:
return v, move
return v, move
def min_value(state, alpha, beta):
if game.is_terminal(state):
return game.utility(state, player), None
v, move = +infinity, None
for a in game.actions(state):
v2, _ = max_value(game.result(state, a), alpha, beta)
if v2 < v:
v, move = v2, a
beta = min(beta, v)
if v <= alpha:
return v, move
return v, move
return max_value(state, -infinity, +infinity)
# -
# # A Simple Game: Tic-Tac-Toe
#
# We have the notion of an abstract game, we have some search functions; now it is time to define a real game; a simple one, tic-tac-toe. Moves are `(x, y)` pairs denoting squares, where `(0, 0)` is the top left, and `(2, 2)` is the bottom right (on a board of size `height=width=3`).
# +
class TicTacToe(Game):
"""Play TicTacToe on an `height` by `width` board, needing `k` in a row to win.
'X' plays first against 'O'."""
def __init__(self, height=3, width=3, k=3):
self.k = k # k in a row
self.squares = {(x, y) for x in range(width) for y in range(height)}
self.initial = Board(height=height, width=width, to_move='X', utility=0)
def actions(self, board):
"""Legal moves are any square not yet taken."""
return self.squares - set(board)
def result(self, board, square):
"""Place a marker for current player on square."""
player = board.to_move
board = board.new({square: player}, to_move=('O' if player == 'X' else 'X'))
win = k_in_row(board, player, square, self.k)
board.utility = (0 if not win else +1 if player == 'X' else -1)
return board
def utility(self, board, player):
"""Return the value to player; 1 for win, -1 for loss, 0 otherwise."""
return board.utility if player == 'X' else -board.utility
def is_terminal(self, board):
"""A board is a terminal state if it is won or there are no empty squares."""
return board.utility != 0 or len(self.squares) == len(board)
def display(self, board): print(board)
def k_in_row(board, player, square, k):
"""True if player has k pieces in a line through square."""
def in_row(x, y, dx, dy): return 0 if board[x, y] != player else 1 + in_row(x + dx, y + dy, dx, dy)
return any(in_row(*square, dx, dy) + in_row(*square, -dx, -dy) - 1 >= k
for (dx, dy) in ((0, 1), (1, 0), (1, 1), (1, -1)))
# -
# States in tic-tac-toe (and other games) will be represented as a `Board`, which is a subclass of `defaultdict` that in general will consist of `{(x, y): contents}` pairs, for example `{(0, 0): 'X', (1, 1): 'O'}` might be the state of the board after two moves. Besides the contents of squares, a board also has some attributes:
# - `.to_move` to name the player whose move it is;
# - `.width` and `.height` to give the size of the board (both 3 in tic-tac-toe, but other numbers in related games);
# - possibly other attributes, as specified by keywords.
#
# As a `defaultdict`, the `Board` class has a `__missing__` method, which returns `empty` for squares that have no been assigned but are within the `width` × `height` boundaries, or `off` otherwise. The class has a `__hash__` method, so instances can be stored in hash tables.
class Board(defaultdict):
"""A board has the player to move, a cached utility value,
and a dict of {(x, y): player} entries, where player is 'X' or 'O'."""
empty = '.'
off = '#'
def __init__(self, width=8, height=8, to_move=None, **kwds):
self.__dict__.update(width=width, height=height, to_move=to_move, **kwds)
def new(self, changes: dict, **kwds) -> 'Board':
"Given a dict of {(x, y): contents} changes, return a new Board with the changes."
board = Board(width=self.width, height=self.height, **kwds)
board.update(self)
board.update(changes)
return board
def __missing__(self, loc):
x, y = loc
if 0 <= x < self.width and 0 <= y < self.height:
return self.empty
else:
return self.off
def __hash__(self):
return hash(tuple(sorted(self.items()))) + hash(self.to_move)
def __repr__(self):
def row(y): return ' '.join(self[x, y] for x in range(self.width))
return '\n'.join(map(row, range(self.height))) + '\n'
# # Players
#
# We need an interface for players. I'll represent a player as a `callable` that will be passed two arguments: `(game, state)` and will return a `move`.
# The function `player` creates a player out of a search algorithm, but you can create your own players as functions, as is done with `random_player` below:
# +
def random_player(game, state): return random.choice(list(game.actions(state)))
def player(search_algorithm):
"""A game player who uses the specified search algorithm"""
return lambda game, state: search_algorithm(game, state)[1]
# -
# # Playing a Game
#
# We're ready to play a game. I'll set up a match between a `random_player` (who chooses randomly from the legal moves) and a `player(alphabeta_search)` (who makes the optimal alpha-beta move; practical for tic-tac-toe, but not for large games). The `player(alphabeta_search)` will never lose, but if `random_player` is lucky, it will be a tie.
play_game(TicTacToe(), dict(X=random_player, O=player(alphabeta_search)), verbose=True).utility
# The alpha-beta player will never lose, but sometimes the random player can stumble into a draw. When two optimal (alpha-beta or minimax) players compete, it will always be a draw:
play_game(TicTacToe(), dict(X=player(alphabeta_search), O=player(minimax_search)), verbose=True).utility
# # Connect Four
#
# Connect Four is a variant of tic-tac-toe, played on a larger (7 x 6) board, and with the restriction that in any column you can only play in the lowest empty square in the column.
class ConnectFour(TicTacToe):
def __init__(self): super().__init__(width=7, height=6, k=4)
def actions(self, board):
"""In each column you can play only the lowest empty square in the column."""
return {(x, y) for (x, y) in self.squares - set(board)
if y == board.height - 1 or (x, y + 1) in board}
play_game(ConnectFour(), dict(X=random_player, O=random_player), verbose=True).utility
# # Transposition Tables
#
# By treating the game tree as a tree, we can arrive at the same state through different paths, and end up duplicating effort. In state-space search, we kept a table of `reached` states to prevent this. For game-tree search, we can achieve the same effect by applying the `@cache` decorator to the `min_value` and `max_value` functions. We'll use the suffix `_tt` to indicate a function that uses these transisiton tables.
def minimax_search_tt(game, state):
"""Search game to determine best move; return (value, move) pair."""
player = state.to_move
@cache
def max_value(state):
if game.is_terminal(state):
return game.utility(state, player), None
v, move = -infinity, None
for a in game.actions(state):
v2, _ = min_value(game.result(state, a))
if v2 > v:
v, move = v2, a
return v, move
@cache
def min_value(state):
if game.is_terminal(state):
return game.utility(state, player), None
v, move = +infinity, None
for a in game.actions(state):
v2, _ = max_value(game.result(state, a))
if v2 < v:
v, move = v2, a
return v, move
return max_value(state)
# For alpha-beta search, we can still use a cache, but it should be based just on the state, not on whatever values alpha and beta have.
# +
def cache1(function):
"Like lru_cache(None), but only considers the first argument of function."
cache = {}
def wrapped(x, *args):
if x not in cache:
cache[x] = function(x, *args)
return cache[x]
return wrapped
def alphabeta_search_tt(game, state):
"""Search game to determine best action; use alpha-beta pruning.
As in [Figure 5.7], this version searches all the way to the leaves."""
player = state.to_move
@cache1
def max_value(state, alpha, beta):
if game.is_terminal(state):
return game.utility(state, player), None
v, move = -infinity, None
for a in game.actions(state):
v2, _ = min_value(game.result(state, a), alpha, beta)
if v2 > v:
v, move = v2, a
alpha = max(alpha, v)
if v >= beta:
return v, move
return v, move
@cache1
def min_value(state, alpha, beta):
if game.is_terminal(state):
return game.utility(state, player), None
v, move = +infinity, None
for a in game.actions(state):
v2, _ = max_value(game.result(state, a), alpha, beta)
if v2 < v:
v, move = v2, a
beta = min(beta, v)
if v <= alpha:
return v, move
return v, move
return max_value(state, -infinity, +infinity)
# -
# %time play_game(TicTacToe(), {'X':player(alphabeta_search_tt), 'O':player(minimax_search_tt)})
# %time play_game(TicTacToe(), {'X':player(alphabeta_search), 'O':player(minimax_search)})
# # Heuristic Cutoffs
# +
def cutoff_depth(d):
"""A cutoff function that searches to depth d."""
return lambda game, state, depth: depth > d
def h_alphabeta_search(game, state, cutoff=cutoff_depth(6), h=lambda s, p: 0):
"""Search game to determine best action; use alpha-beta pruning.
As in [Figure 5.7], this version searches all the way to the leaves."""
player = state.to_move
@cache1
def max_value(state, alpha, beta, depth):
if game.is_terminal(state):
return game.utility(state, player), None
if cutoff(game, state, depth):
return h(state, player), None
v, move = -infinity, None
for a in game.actions(state):
v2, _ = min_value(game.result(state, a), alpha, beta, depth+1)
if v2 > v:
v, move = v2, a
alpha = max(alpha, v)
if v >= beta:
return v, move
return v, move
@cache1
def min_value(state, alpha, beta, depth):
if game.is_terminal(state):
return game.utility(state, player), None
if cutoff(game, state, depth):
return h(state, player), None
v, move = +infinity, None
for a in game.actions(state):
v2, _ = max_value(game.result(state, a), alpha, beta, depth + 1)
if v2 < v:
v, move = v2, a
beta = min(beta, v)
if v <= alpha:
return v, move
return v, move
return max_value(state, -infinity, +infinity, 0)
# -
# %time play_game(TicTacToe(), {'X':player(h_alphabeta_search), 'O':player(h_alphabeta_search)})
# %time play_game(ConnectFour(), {'X':player(h_alphabeta_search), 'O':random_player}, verbose=True).utility
# %time play_game(ConnectFour(), {'X':player(h_alphabeta_search), 'O':player(h_alphabeta_search)}, verbose=True).utility
# +
class CountCalls:
"""Delegate all attribute gets to the object, and count them in ._counts"""
def __init__(self, obj):
self._object = obj
self._counts = Counter()
def __getattr__(self, attr):
"Delegate to the original object, after incrementing a counter."
self._counts[attr] += 1
return getattr(self._object, attr)
def report(game, searchers):
for searcher in searchers:
game = CountCalls(game)
searcher(game, game.initial)
print('Result states: {:7,d}; Terminal tests: {:7,d}; for {}'.format(
game._counts['result'], game._counts['is_terminal'], searcher.__name__))
report(TicTacToe(), (alphabeta_search_tt, alphabeta_search, h_alphabeta_search, minimax_search_tt))
# -
# # Monte Carlo Tree Search
class Node:
def __init__(self, parent, )
def mcts(state, game, N=1000):
# # Heuristic Search Algorithms
# +
t = CountCalls(TicTacToe())
play_game(t, dict(X=minimax_player, O=minimax_player), verbose=True)
t._counts
# -
for tactic in (three, fork, center, opposite_corner, corner, any):
for s in squares:
if tactic(board, s,player): return s
for s ins quares:
if tactic(board, s, opponent): return s
# +
def ucb(U, N, C=2**0.5, parentN=100):
return round(U/N + C * math.sqrt(math.log(parentN)/N), 2)
{C: (ucb(60, 79, C), ucb(1, 10, C), ucb(2, 11, C))
for C in (1.4, 1.5)}
# +
def ucb(U, N, parentN=100, C=2):
return U/N + C * math.sqrt(math.log(parentN)/N)
C = 1.4
class Node:
def __init__(self, name, children=(), U=0, N=0, parent=None, p=0.5):
self.__dict__.update(name=name, U=U, N=N, parent=parent, children=children, p=p)
for c in children:
c.parent = self
def __repr__(self):
return '{}:{}/{}={:.0%}{}'.format(self.name, self.U, self.N, self.U/self.N, self.children)
def select(n):
if n.children:
return select(max(n.children, key=ucb))
else:
return n
def back(n, amount):
if n:
n.N += 1
n.U += amount
back(n.parent, 1 - amount)
def one(root):
n = select(root)
amount = int(random.uniform(0, 1) < n.p)
back(n, amount)
def ucb(n):
return (float('inf') if n.N == 0 else
n.U / n.N + C * math.sqrt(math.log(n.parent.N)/n.N))
tree = Node('root', [Node('a', p=.8, children=[Node('a1', p=.05),
Node('a2', p=.25,
children=[Node('a2a', p=.7), Node('a2b')])]),
Node('b', p=.5, children=[Node('b1', p=.6,
children=[Node('b1a', p=.3), Node('b1b')]),
Node('b2', p=.4)]),
Node('c', p=.1)])
for i in range(100):
one(tree);
for c in tree.children: print(c)
'select', select(tree), 'tree', tree
# +
us = (100, 50, 25, 10, 5, 1)
infinity = float('inf')
@lru_cache(None)
def f1(n, denom):
return (0 if n == 0 else
infinity if n < 0 or not denom else
min(1 + f1(n - denom[0], denom),
f1(n, denom[1:])))
@lru_cache(None)
def f2(n, denom):
@lru_cache(None)
def f(n):
return (0 if n == 0 else
infinity if n < 0 else
1 + min(f(n - d) for d in denom))
return f(n)
@lru_cache(None)
def f3(n, denom):
return (0 if n == 0 else
infinity if n < 0 or not denom else
min(k + f2(n - k * denom[0], denom[1:])
for k in range(1 + n // denom[0])))
def g(n, d=us): return f1(n, d), f2(n, d), f3(n, d)
n = 12345
# %time f1(n, us)
# %time f2(n, us)
# %time f3(n, us)
# -
| games4e.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.9 64-bit (''algae'': conda)'
# language: python
# name: python3
# ---
# + id="NhlwmgZhxo9o"
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
import numpy as np
from algae_population import *
import matplotlib.pyplot as plt
SMALL_SIZE = 16
MEDIUM_SIZE = 18
BIGGER_SIZE = 20
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
color_cycle = [
"#67001f",
"#b2182b",
"#d6604d",
"#f4a582",
"#fddbc7",
"#f7f7f7",
"#d1e5f0",
"#92c5de",
"#4393c3",
"#2166ac",
"#053061"
]
from matplotlib import cm
colors = cm.get_cmap('viridis',12) # 'viridis', 12)
# prop_cycle = plt.rcParams['axes.prop_cycle']
# colors = prop_cycle.by_key()['color']
# colormap = plt.cm.gist_ncar
cycler = plt.cycler('color',CB_color_cycle)
# cycler = plt.cycler('color', plt.cm.jet(np.linspace(0, 1, 12)))
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
# %matplotlib tk
# -
import pickle
# pickle.dump(solutions_age, open('figure2a.p','wb'))
solutions_age = pickle.load(open('figure3a.p','rb'))
# +
# import pickle
# pickle.dump(data,open('figure2b.p','wb'))
data = pickle.load(open('figure3b.p','rb'))
# + tags=[]
import math
fig,ax = plt.subplots(1,3, figsize=(20,5))
ax[0].set_prop_cycle(cycler)
ax[1].set_prop_cycle(cycler)
ax[2].set_prop_cycle(cycler)
for i, solution in enumerate(solutions_age):
filename = solution[0]['s'][0].replace('/','-')
# print(filename)
t = np.hstack([s.t for s in solution])
tmp = np.hstack([s.y for s in solution])
yeeld = tmp[:-1,:].sum(axis=0)
I = tmp[-1,:]/yeeld
I = np.clip(I, 1e-1, 10.)
ax[0].plot(t, yeeld,'-')#,color=colors[i]
ax[2].semilogy(t, I,'-')#,color=viridis.colors[i,:])
# ax[1].set_yscale('log')
# ax[1].set_ylim([0,5])
ax[0].set_xlim([0, 120])
ax[2].set_xlim([0, 120])
# first axis
ax[0].set_xlabel('days')
ax[0].set_ylabel(r'Yield kg/m$^3$')
# second axis
ax[2].set_xlabel('days')
ax[2].set_ylabel(r'$I$')
ax[2].yaxis.set_label_coords(-0.1,.5)
# ax[0].text(10,9,solution[0]['s'][0])
# fig.savefig(f'{filename}_{replacement}_days.png')
# fig.savefig(filename+"_10days.svg")
ax[0].text(5,9,'a)')
ax[2].text(5,8,'c)')
ax[1].text(10, 68, 'b)')
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
replacements = [1, 5, 10, 15, 20, 25, 30]
# markers = ['o','s','^','>','<','v','h','+','x','D','1','2']
# fig, ax = plt.subplots(figsize=(12,12))
rr = []
for i in range(len(replacements)):
t = []
for j in range(len(scenarios)):
ind = np.argmax(data[i][j]['revenue'] >= 9)
if ind == 0:
t.append(np.nan)
else:
t.append(data[i][j]['times'][ind])
rr.append(t)
rr = np.array(rr)
for i in range(len(replacements)):
ax[1].plot(replacements,rr[:,i],'-')#,color=colors[i,:])#, marker = markers[j], label=list(scenarios.items())[j][0])
# ax.set_yscale('log')
ax[2].legend(scenarios.keys(),loc='best',bbox_to_anchor=[1.2,0.15],fontsize=10)
# fmt = mpl.ticker.StrMethodFormatter("{x:g}")
# ax.yaxis.set_major_formatter(fmt)
ax[1].set_xlabel(r'Water repl. (days)')
ax[1].set_ylabel(r'Time to reach 90%')# 1 kg/m$^3$')
ax[1].xaxis.set_major_locator(MultipleLocator(5))
ax[1].xaxis.set_major_formatter('{x:.0f}')
# ax[0].yaxis.set_minor_formatter(fmt)
# ax[1].yaxis.set_major_formatter(fmt)
# ax[1].yaxis.set_minor_formatter(fmt)
plt.show()
fig.savefig('figure3.png',dpi=300,bbox_inches='tight',
transparent=True,
pad_inches=0)
# -
| figure3_create_plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sumiranphq/deepL/blob/master/sentiment_data_cleaning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="KUPsUaSxYLvI" colab_type="text"
# Sentiment Analysis
#
# using sample data from twitter
# + id="AY4uQSWVYJU1" colab_type="code" colab={}
import nltk
from nltk.corpus import twitter_samples
import matplotlib.pyplot as plt
import random
# + [markdown] id="DgyntMspZIvR" colab_type="text"
# The sample data consists of 5k positive and 5k negative tweets
# + id="FHQn4LsUYuCB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="77275543-f332-445c-9a2e-1271b907571d"
nltk.download('twitter_samples')
positive_tweets = twitter_samples.strings('positive_tweets.json')
negative_tweets = twitter_samples.strings('negative_tweets.json')
# + id="aIZWZtMmZ589" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="8ec73dad-a895-4536-9129-fd4574e6d411"
print('The no. of positive tweets,', len(positive_tweets))
print('The no. of negative tweets,', len(negative_tweets))
print('The type of tweets: ', type(positive_tweets))
print('The type of tweet entry:', type(negative_tweets[0]))
# + id="7Qq7RoWSbJzF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 303} outputId="4045a646-ed68-4635-bc47-f13f8c4c3dfb"
fig = plt.figure(figsize=(5,5))
labels = 'Positive', 'Negative'
sizes = [len(positive_tweets), len(negative_tweets)]
plt.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
plt.axis('equal')
plt.show()
# + id="sQUNWcxwcg2b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="0344d1e8-0ceb-4ce4-8abc-ae13fcb88765"
print('\033[92m', positive_tweets[random.randint(0, 5000)] )
print('\033[91m', negative_tweets[random.randint(0, 5000)])
# + [markdown] id="2U7n5i-CeZD9" colab_type="text"
# # **Preprocessing Text**
#
# * tokenize strings
# * lowercase
# * remove stop words
# * stemming
#
# note the presence of emoticons and URLs would help later
# + id="swhbbWsueKTv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="63de97a8-efc2-45a1-b6f8-6d6739ef7af1"
nltk.download('stopwords')
# + id="F7xQpuDafVW7" colab_type="code" colab={}
import re
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
# + [markdown] id="d-vOUa3ygB3Q" colab_type="text"
# **Clean data **
#
# remove hashtags, hyperlinks, retweet marks
# + id="8Fr-aNXGf5A3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5a6258eb-5b77-4c0c-ce7f-77dbeb3b00e2"
tweet = positive_tweets[2277]
tweet = re.sub(r'^RT[\s]+', '', tweet)
tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
tweet = re.sub(r'#', '', tweet)
print(tweet)
# + id="oTr1MbzmiNDG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="060fa049-74b9-401b-eafd-8e99ca03c272"
tokenizer = TweetTokenizer(preserve_case=False, reduce_len=True, strip_handles=True)
tweet_tokens = tokenizer.tokenize(tweet)
print(tweet_tokens)
# + id="iYV2BAvUjANB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="6d9e6e2e-b08a-49ba-c8a3-3de27af1b566"
stopwords_words = stopwords.words('english')
print(string.punctuation)
clean_tweet = []
for word in tweet_tokens:
if word not in stopwords_words and word not in string.punctuation:
clean_tweet.append(word)
print(clean_tweet)
# + id="RidBqAwfkSnC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cb06fed5-09f5-4be3-8873-9a239d7ddd6c"
stemmer = PorterStemmer()
stemmed_tweet = []
for word in clean_tweet:
stemmed_tweet.append(stemmer.stem(word))
print(stemmed_tweet)
# + id="7WlnQcuIlKwE" colab_type="code" colab={}
| sentiment_data_cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: robustness_analysis_venv
# language: python
# name: robustness_analysis_venv
# ---
# # Example of analyzing a neural feedback loop
# ## Load libraries and set random seed
import numpy as np
import nn_closed_loop.dynamics as dynamics
import nn_closed_loop.analyzers as analyzers
import nn_closed_loop.constraints as constraints
from nn_closed_loop.utils.nn import load_controller, load_controller_unity
from nn_closed_loop.utils.utils import range_to_polytope, get_polytope_A
np.random.seed(seed=0)
# ## Load NN control policy, $\pi$
controller = load_controller(name="double_integrator")
print(controller)
# ## Load dynamics model (double integrator), $\mathbf{x}_{t+1} = f(\mathbf{x}_t; \pi)$
dyn = dynamics.DoubleIntegrator()
print(dyn)
# ## Define initial state set, $\mathcal{X}_0$
# +
# Note: this notebook uses $\ell_infty$-ball sets
init_state_range = np.array(
[ # (num_inputs, 2)
[2.5, 3.0], # x0min, x0max
[-0.25, 0.25], # x1min, x1max
]
)
# Convert these into constraint objects
input_constraint = constraints.LpConstraint(
range=init_state_range, p=np.inf
)
output_constraint = constraints.LpConstraint(p=np.inf)
print(input_constraint)
# -
# ## Define partitioner hyperparameters
partitioner_hyperparams = {
"type": "GreedySimGuided",
"make_animation": False,
"show_animation": False,
}
# ## Define propagator hyperparameters
propagator_hyperparams = {
"type": "CROWN",
"input_shape": init_state_range.shape[:-1],
}
# ## Initialize analyzer, partitioner, propagator
analyzer = analyzers.ClosedLoopAnalyzer(controller, dyn)
analyzer.partitioner = partitioner_hyperparams
analyzer.propagator = propagator_hyperparams
print(analyzer)
# ## Compute the reachable sets
t_max = 5
output_constraint, analyzer_info = analyzer.get_reachable_set(
input_constraint, output_constraint, t_max=t_max
)
print(output_constraint.range)
# analyzer_info contains more details
# ## Generate a visualization of the input/output mapping
analyzer.visualize(
input_constraint,
output_constraint,
show_samples=True,
show=True,
labels=None,
aspect="auto",
iteration=None,
inputs_to_highlight=[{"dim": [0], "name": "$x_0$"}, {"dim": [1], "name": "$x_1$"}],
**analyzer_info
)
# ## Estimate the output set error
final_error, avg_error, errors = analyzer.get_error(input_constraint, output_constraint, t_max=t_max)
print('Final step approximation error: {:.2f}'.format(final_error))
print('Average approximation error: {:.2f}'.format(avg_error))
print('All errors: {}'.format(errors))
| jupyter_notebooks/closed_loop_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="_DDaAex5Q7u-"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" id="W1dWWdNHQ9L0"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="6Y8E0lw5eYWm"
# # 훈련 후 정수 양자화
# + [markdown] id="CIGrZZPTZVeO"
# <table class="tfo-notebook-buttons" align="left">
# <td> <a target="_blank" href="https://www.tensorflow.org/lite/performance/post_training_integer_quant"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">View on TensorFlow.org</a> </td>
# <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/lite/performance/post_training_integer_quant.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Run in Google Colab</a> </td>
# <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/lite/performance/post_training_integer_quant.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a> </td>
# <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/lite/performance/post_training_integer_quant.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">Download notebook</a> </td>
# </table>
# + [markdown] id="BTC1rDAuei_1"
# ## 개요
#
# 정수 양자화는 32bit 부동 소수점 숫자(예: 가중치 및 활성화 출력)를 가장 가까운 8bit 고정 소수점 숫자로 변환하는 최적화 전략입니다. 그 결과 모델이 작아지고 추론 속도가 증가하여 [마이크로 컨트롤러](https://www.tensorflow.org/lite/microcontrollers)와 같은 저전력 장치에 유용합니다. 이 데이터 형식은 [에지 TPU](https://coral.ai/)와 같은 정수 전용 가속기에도 필요합니다.
#
# 이 가이드에서는 MNIST 모델을 처음부터 훈련하고 Tensorflow Lite 파일로 변환하고 [훈련 후 양자화](https://www.tensorflow.org/lite/performance/post_training_quantization)로 양자화합니다. 마지막으로 변환된 모델의 정확성을 확인하고 원본 부동 모델과 비교합니다.
#
# 실제로 모델을 양자화하려는 정도에 대한 몇 가지 옵션이 있습니다. 이 튜토리얼에서는 모든 가중치와 활성화 출력을 8bit 정수 데이터로 변환하는 '전체 정수 양자화'를 수행합니다. 반면 다른 전략은 일부 양의 데이터를 부동 소수점에 남길 수 있습니다.
#
# 다양한 양자화 전략에 대해 자세히 알아 보려면 [TensorFlow Lite 모델 최적화](https://www.tensorflow.org/lite/performance/model_optimization)에 대해 읽어보세요.
#
# + [markdown] id="dDqqUIZjZjac"
# ## 설정
# + [markdown] id="I0nR5AMEWq0H"
# 입력 및 출력 텐서를 양자화하려면 TensorFlow r2.3에 추가된 API를 사용해야 합니다.
# + id="WsN6s5L1ieNl"
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
import tensorflow as tf
import numpy as np
assert float(tf.__version__[:3]) >= 2.3
# + [markdown] id="2XsEP17Zelz9"
# ## TensorFlow 모델 생성하기
# + [markdown] id="5NMaNZQCkW9X"
# [MNIST 데이터세트](https://www.tensorflow.org/datasets/catalog/mnist)에서 숫자를 분류하는 간단한 모델을 만들어 보겠습니다.
#
# 이 훈련은 약 ~98%의 정확성으로 훈련하는 단 5 epoch 동안 모델을 훈련하기 때문에 오래 걸리지 않을 것입니다.
# + id="eMsw_6HujaqM"
# Load MNIST dataset
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images.astype(np.float32) / 255.0
test_images = test_images.astype(np.float32) / 255.0
# Define the model architecture
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(28, 28)),
tf.keras.layers.Reshape(target_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=5,
validation_data=(test_images, test_labels)
)
# + [markdown] id="KuTEoGFYd8aM"
# ## TensorFlow Lite 모델로 변환하기
# + [markdown] id="xl8_fzVAZwOh"
# 이제 [`TFLiteConverter` ](https://www.tensorflow.org/lite/convert/python_api) API를 사용하여 훈련된 모델을 TensorFlow Lite 형식으로 변환하고 다양한 정도의 양자화를 적용할 수 있습니다.
#
# 일부 양자화 버전은 일부 데이터를 부동 형식으로 남겨 둡니다. 따라서 다음 섹션에서는 완전히 int8 또는 uint8 데이터인 모델을 얻을 때까지 양자화 양이 증가하는 각 옵션을 보여줍니다(각 옵션에 대한 모든 양자화 단계를 볼 수 있도록 각 섹션에서 일부 코드를 복제합니다).
#
# 먼저, 양자화없이 변환된 모델이 있습니다.
# + id="_i8B2nDZmAgQ"
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# + [markdown] id="7BONhYtYocQY"
# 이제 TensorFlow Lite 모델이지만 모든 매개변수 데이터에 대해 여전히 32bit 부동 소수점 값을 사용하고 있습니다.
# + [markdown] id="jPYZwgZTwJMT"
# ### 동적 범위 양자화를 사용하여 변환하기
#
# + [markdown] id="Hjvq1vpJd4U_"
# 이제 기본 `optimizations` 플래그를 활성화하여 모든 고정 매개변수(예: 가중치)를 양자화합니다.
# + id="HEZ6ET1AHAS3"
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model_quant = converter.convert()
# + [markdown] id="o5wuE-RcdX_3"
# 모델은 이제 양자화된 가중치로 약간 더 작아지지만 다른 변수 데이터는 여전히 부동 형식입니다.
# + [markdown] id="UgKDdnHQEhpb"
# ### 부동 폴백 양자화를 사용하여 변환하기
# + [markdown] id="rTe8avZJHMDO"
# 변수 데이터(예: 모델 입력/출력 및 레이어 간 중간)를 양자화하려면 [`RepresentativeDataset`](https://www.tensorflow.org/api_docs/python/tf/lite/RepresentativeDataset)을 제공해야 합니다. 이것은 일반적인 값을 나타낼 만큼 충분히 큰 입력 데이터세트를 제공하는 생성기 함수입니다. 해당 함수는 변환기로 모든 가변 데이터에 대한 동적 범위를 추정할 수 있습니다(데이터세트는 훈련 또는 평가 데이터세트와 비교할 때 고유할 필요가 없습니다). 여러 입력을 지원하기 위해 각 대표 데이터 포인트는 목록으로 이루어졌고 목록의 요소는 인덱스에 따라 모델에 제공됩니다.
#
# + id="FiwiWU3gHdkW"
def representative_data_gen():
for input_value in tf.data.Dataset.from_tensor_slices(train_images).batch(1).take(100):
# Model has only one input so each data point has one element.
yield [input_value]
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
tflite_model_quant = converter.convert()
# + [markdown] id="_GC3HFlptf7x"
# 이제 모든 가중치와 변수 데이터가 양자화되고 모델은 원본 TensorFlow Lite 모델에 비해 훨씬 작습니다.
#
# 그러나 전통적으로 부동 모델 입력 및 출력 텐서를 사용하는 애플리케이션과의 호환성을 유지하기 위해 TensorFlow Lite 변환기는 모델 입력 및 출력 텐서를 부동 상태로 둡니다.
# + id="id1OEKFELQwp"
interpreter = tf.lite.Interpreter(model_content=tflite_model_quant)
input_type = interpreter.get_input_details()[0]['dtype']
print('input: ', input_type)
output_type = interpreter.get_output_details()[0]['dtype']
print('output: ', output_type)
# + [markdown] id="RACBJuj2XO8x"
# 일반적으로 호환성에는 좋지만 에지 TPU와 같이 정수 기반 작업만 수행하는 기기와는 호환되지 않습니다.
#
# 또한 TensorFlow Lite에 해당 연산에 대한 양자화된 구현이 포함되어 있지 않은 경우 위의 프로세스는 부동 형식으로 연산을 남길 수 있습니다. 이 전략을 사용하면 변환을 완료할 수 있으므로 더 작고 효율적인 모델을 사용할 수 있지만, 정수 전용 하드웨어와는 호환되지 않습니다(이 MNIST 모델의 모든 연산에는 양자화된 구현이 있습니다).
#
# 따라서 엔드 투 엔드 정수 전용 모델을 보장하려면 몇 가지 매개변수가 더 필요합니다.
# + [markdown] id="FQgTqbvPvxGJ"
# ### 정수 전용 양자화를 사용하여 변환하기
# + [markdown] id="mwR9keYAwArA"
# 입력 및 출력 텐서를 양자화하고, 양자화할 수 없는 연산이 발생하는 경우 변환기에서 오류를 발생시키려면 몇 가지 추가 매개변수를 사용하여 모델을 다시 변환합니다.
# + id="kzjEjcDs3BHa"
def representative_data_gen():
for input_value in tf.data.Dataset.from_tensor_slices(train_images).batch(1).take(100):
yield [input_value]
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
# Ensure that if any ops can't be quantized, the converter throws an error
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# Set the input and output tensors to uint8 (APIs added in r2.3)
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model_quant = converter.convert()
# + [markdown] id="wYd6NxD03yjB"
# 내부 양자화는 위와 동일하게 유지되지만 입력 및 출력 텐서는 이제 정수 형식임을 알 수 있습니다.
#
# + id="PaNkOS-twz4k"
interpreter = tf.lite.Interpreter(model_content=tflite_model_quant)
input_type = interpreter.get_input_details()[0]['dtype']
print('input: ', input_type)
output_type = interpreter.get_output_details()[0]['dtype']
print('output: ', output_type)
# + [markdown] id="TO17AP84wzBb"
# 이제 모델의 입력 및 출력 텐서에 정수 데이터를 사용하는 정수 양자화 모델이 있으므로 [에지 TPU](https://coral.ai)와 같은 정수 전용 하드웨어와 호환됩니다.
# + [markdown] id="sse224YJ4KMm"
# ### 모델을 파일로 저장하기
# + [markdown] id="4_9nZ4nv4b9P"
# 다른 기기에 모델을 배포하려면 `.tflite` 파일이 필요합니다. 따라서 변환된 모델을 파일로 저장한 다음 아래에서 추론을 실행할 때 로드해보겠습니다.
# + id="BEY59dC14uRv"
import pathlib
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
# Save the unquantized/float model:
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
# Save the quantized model:
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant.tflite"
tflite_model_quant_file.write_bytes(tflite_model_quant)
# + [markdown] id="9t9yaTeF9fyM"
# ## TensorFlow Lite 모델 실행하기
# + [markdown] id="L8lQHMp_asCq"
# 이제 TensorFlow Lite [`Interpreter`](https://www.tensorflow.org/api_docs/python/tf/lite/Interpreter)로 추론을 실행하여 모델 정확성을 비교합니다.
#
# 먼저 주어진 모델과 이미지로 추론을 실행한 다음 예측을 반환하는 함수가 필요합니다.
#
# + id="X092SbeWfd1A"
# Helper function to run inference on a TFLite model
def run_tflite_model(tflite_file, test_image_indices):
global test_images
# Initialize the interpreter
interpreter = tf.lite.Interpreter(model_path=str(tflite_file))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
predictions = np.zeros((len(test_image_indices),), dtype=int)
for i, test_image_index in enumerate(test_image_indices):
test_image = test_images[test_image_index]
test_label = test_labels[test_image_index]
# Check if the input type is quantized, then rescale input data to uint8
if input_details['dtype'] == np.uint8:
input_scale, input_zero_point = input_details["quantization"]
test_image = test_image / input_scale + input_zero_point
test_image = np.expand_dims(test_image, axis=0).astype(input_details["dtype"])
interpreter.set_tensor(input_details["index"], test_image)
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
predictions[i] = output.argmax()
return predictions
# + [markdown] id="2opUt_JTdyEu"
# ### 하나의 이미지에서 모델 테스트하기
#
# + [markdown] id="QpPpFPaz7eEM"
# 이제 부동 모델과 양자화된 모델의 성능을 비교해 보겠습니다.
#
# - `tflite_model_file`은 부동 소수점 데이터가 있는 원본 TensorFlow Lite 모델입니다.
# - `tflite_model_quant_file`은 정수 전용 양자화를 사용하여 변환된 마지막 모델입니다(입력 및 출력에 uint8 데이터 사용).
#
# 예측값을 출력하는 다른 함수를 만들어 보겠습니다.
# + id="zR2cHRUcUZ6e"
import matplotlib.pylab as plt
# Change this to test a different image
test_image_index = 1
## Helper function to test the models on one image
def test_model(tflite_file, test_image_index, model_type):
global test_labels
predictions = run_tflite_model(tflite_file, [test_image_index])
plt.imshow(test_images[test_image_index])
template = model_type + " Model \n True:{true}, Predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[test_image_index]), predict=str(predictions[0])))
plt.grid(False)
# + [markdown] id="A5OTJ_6Vcslt"
# 이제 부동 모델을 테스트합니다.
# + id="iTK0x980coto"
test_model(tflite_model_file, test_image_index, model_type="Float")
# + [markdown] id="o3N6-UGl1dfE"
# 그리고 양자화된 모델을 테스트합니다.
# + id="rc1i9umMcp0t"
test_model(tflite_model_quant_file, test_image_index, model_type="Quantized")
# + [markdown] id="LwN7uIdCd8Gw"
# ### 모든 이미지에서 모델 평가하기
# + [markdown] id="RFKOD4DG8XmU"
# 이제 이 튜토리얼의 시작 부분에서 로드한 모든 테스트 이미지를 사용하여 두 모델을 모두 실행해보겠습니다.
# + id="05aeAuWjvjPx"
# Helper function to evaluate a TFLite model on all images
def evaluate_model(tflite_file, model_type):
global test_images
global test_labels
test_image_indices = range(test_images.shape[0])
predictions = run_tflite_model(tflite_file, test_image_indices)
accuracy = (np.sum(test_labels== predictions) * 100) / len(test_images)
print('%s model accuracy is %.4f%% (Number of test samples=%d)' % (
model_type, accuracy, len(test_images)))
# + [markdown] id="xnFilQpBuMh5"
# 부동 모델을 평가합니다.
# + id="T5mWkSbMcU5z"
evaluate_model(tflite_model_file, model_type="Float")
# + [markdown] id="Km3cY9ry8ZlG"
# 양자화된 모델을 평가합니다.
# + id="-9cnwiPp6EGm"
evaluate_model(tflite_model_quant_file, model_type="Quantized")
# + [markdown] id="L7lfxkor8pgv"
# 이제 부동 모델과 비교하여 정확성에 거의 차이가 없는 정수로 모델을 양자화했습니다.
#
# 다른 양자화 전략에 대해 자세히 알아 보려면 [TensorFlow Lite 모델 최적화](https://www.tensorflow.org/lite/performance/model_optimization)에 대해 읽어보세요.
| site/ko/lite/performance/post_training_integer_quant.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbgrader={}
# # Project Euler: Problem 9
# + [markdown] nbgrader={}
# https://projecteuler.net/problem=9
#
# A Pythagorean triplet is a set of three natural numbers, $a < b < c$, for which,
#
# $$a^2 + b^2 = c^2$$
#
# For example, $3^2 + 4^2 = 9 + 16 = 25 = 5^2$.
#
# There exists exactly one Pythagorean triplet for which $a + b + c = 1000$. Find the product abc.
# + deletable=false nbgrader={"checksum": "6cff4e8e53b15273846c3aecaea84a3d", "solution": true}
# YOUR CODE HERE
def Pyth_Trip(x):
#since c>b>a, the limits on b and a can be restricted
for c in range(0,x):
for b in range(0,c):
for a in range(0,b):
if a + b + c == x:
if a**2 + b**2 == c**2:
return [a, b, c, a*b*c]
# -
# This program takes a few seconds to run
print(Pyth_Trip(1000))
# + deletable=false nbgrader={"checksum": "b69b04efdc2f53ed5e3904c5ed86c12e", "grade": true, "grade_id": "projecteuler9", "points": 10}
# This cell will be used for grading, leave it at the end of the notebook.
| assignments/assignment02/ProjectEuler9.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: fastai
# language: python
# name: fastai
# ---
# # "fastcore: An Underrated Python Library"
#
# > A unique python library that extends the python programming language and provides utilities that enhance productivity.
# - author: "<a href='https://twitter.com/HamelHusain'><NAME></a>, <a href='https://twitter.com/jeremyphoward'><NAME></a>"
# - toc: false
# - image: images/copied_from_nb/fastcore_imgs/td.png
# - comments: true
# - categories: [fastcore, fastai]
# - permalink: /fastcore/
# - badges: true
# 
#
# # Background
#
# I recently embarked on a journey to sharpen my python skills: I wanted to learn advanced patterns, idioms, and techniques. I started with reading books on advanced Python, however, the information didn't seem to stick without having somewhere to apply it. I also wanted the ability to ask questions from an expert while I was learning -- which is an arrangement that is hard to find! That's when it occurred to me: What if I could find an open source project that has fairly advanced python code and write documentation and tests? I made a bet that if I did this it would force me to learn everything very deeply, and the maintainers would be appreciative of my work and be willing to answer my questions.
#
# And that's exactly what I did over the past month! I'm pleased to report that it has been the most efficient learning experience I've ever experienced. I've discovered that writing documentation forced me to deeply understand not just what the code does but also _why the code works the way it does_, and to explore edge cases while writing tests. Most importantly, I was able to ask questions when I was stuck, and maintainers were willing to devote extra time knowing that their mentorship was in service of making their code more accessible! It turns out the library I choose, [fastcore](https://fastcore.fast.ai/) is some of the most fascinating Python I have ever encountered as its purpose and goals are fairly unique.
#
# For the uninitiated, [fastcore](https://fastcore.fast.ai/) is a library on top of which many [fast.ai](https://github.com/fastai) projects are built on. Most importantly, [fastcore](https://fastcore.fast.ai/) extends the python programming language and strives to eliminate boilerplate and add useful functionality for common tasks. In this blog post, I'm going to highlight some of my favorite tools that fastcore provides, rather than sharing what I learned about python. My goal is to pique your interest in this library, and hopefully motivate you to check out the documentation after you are done to learn more!
#
# # Why fastcore is interesting
#
# 1. **Get exposed to ideas from other languages without leaving python:** I’ve always heard that it is beneficial to learn other languages in order to become a better programmer. From a pragmatic point of view, I’ve found it difficult to learn other languages because I could never use them at work. Fastcore extends python to include patterns found in languages as diverse as Julia, Ruby and Haskell. Now that I understand these tools I am motivated to learn other languages.
# 2. **You get a new set of pragmatic tools**: fastcore includes utilities that will allow you to write more concise expressive code, and perhaps solve new problems.
# 3. **Learn more about the Python programming language:** Because fastcore extends the python programming language, many advanced concepts are exposed during the process. For the motivated, this is a great way to see how many of the internals of python work.
#
#
# # A whirlwind tour through fastcore
#
# Here are some things you can do with fastcore that immediately caught my attention.
#hide
from fastcore.foundation import *
from fastcore.utils import *
from fastcore.test import *
from nbdev.showdoc import *
from fastcore.dispatch import typedispatch
from functools import partial
import numpy as np
import inspect
# ---
#
# ## Making **kwargs transparent
#
# Whenever I see a function that has the argument <strong>**kwargs</strong>, I cringe a little. This is because it means the API is obfuscated and I have to read the source code to figure out what valid parameters might be. Consider the below example:
# +
def baz(a, b=2, c =3, d=4): return a + b + c
def foo(c, a, **kwargs):
return c + baz(a, **kwargs)
inspect.signature(foo)
# -
# Without reading the source code, it might be hard for me to know that `foo` also accepts and additional parameters `b` and `d`. We can fix this with [`delegates`](https://fastcore.fast.ai/foundation.html#delegates):
# +
def baz(a, b=2, c =3, d=4): return a + b + c
@delegates(baz) # this decorator will pass down keyword arguments from baz
def foo(c, a, **kwargs):
return c + baz(a, **kwargs)
inspect.signature(foo)
# -
# You can customize the behavior of this decorator. For example, you can have your cake and eat it too by passing down your arguments and also keeping `**kwargs`:
# +
@delegates(baz, keep=True)
def foo(c, a, **kwargs):
return c + baz(a, **kwargs)
inspect.signature(foo)
# -
# You can also exclude arguments. For example, we exclude argument `d` from delegation:
# +
def basefoo(a, b=2, c =3, d=4): pass
@delegates(basefoo, but= ['d']) # exclude `d`
def foo(c, a, **kwargs): pass
inspect.signature(foo)
# -
# You can also delegate between classes:
# +
class BaseFoo:
def __init__(self, e, c=2): pass
@delegates()# since no argument was passsed here we delegate to the superclass
class Foo(BaseFoo):
def __init__(self, a, b=1, **kwargs): super().__init__(**kwargs)
inspect.signature(Foo)
# -
# For more information, read the [docs on delegates](https://fastcore.fast.ai/foundation.html#delegates).
#
# ___
# ## Avoid boilerplate when setting instance attributes
#
# Have you ever wondered if it was possible to avoid the boilerplate involved with setting attributes in `__init__`?
class Test:
def __init__(self, a, b ,c):
self.a, self.b, self.c = a, b, c
# Ouch! That was painful. Look at all the repeated variable names. Do I really have to repeat myself like this when defining a class? Not Anymore! Checkout [store_attr](https://fastcore.fast.ai/utils.html#store_attr):
# +
class Test:
def __init__(self, a, b, c):
store_attr()
t = Test(5,4,3)
assert t.b == 4
# -
# You can also exclude certain attributes:
# +
class Test:
def __init__(self, a, b, c):
store_attr(but=['c'])
t = Test(5,4,3)
assert t.b == 4
assert not hasattr(t, 'c')
# -
# There are many more ways of customizing and using `store_attr` than I highlighted here. Check out [the docs](https://fastcore.fast.ai/utils.html#store_attr) for more detail.
# ---
# ## Avoiding subclassing boilerplate
#
# One thing I hate about python is the `__super__().__init__()` boilerplate associated with subclassing. For example:
# +
class ParentClass:
def __init__(self): self.some_attr = 'hello'
class ChildClass(ParentClass):
def __init__(self):
super().__init__()
cc = ChildClass()
assert cc.some_attr == 'hello' # only accessible b/c you used super
# -
# We can avoid this boilerplate by using the metaclass [PrePostInitMeta](https://fastcore.fast.ai/foundation.html#PrePostInitMeta). We define a new class called `NewParent` that is a wrapper around the `ParentClass`:
# +
class NewParent(ParentClass, metaclass=PrePostInitMeta):
def __pre_init__(self, *args, **kwargs): super().__init__()
class ChildClass(NewParent):
def __init__(self):pass
sc = ChildClass()
assert sc.some_attr == 'hello'
# -
# ---
#
# ## Type Dispatch
#
# Type dispatch, or [Multiple dispatch](https://en.wikipedia.org/wiki/Multiple_dispatch#Julia), allows you to change the way a function behaves based upon the input types it receives. This is a prominent feature in some programming languages like Julia. For example, this is a [conceptual example](https://en.wikipedia.org/wiki/Multiple_dispatch#Julia) of how multiple dispatch works in Julia, returning different values depending on the input types of x and y:
#
# ```julia
# collide_with(x::Asteroid, y::Asteroid) = ...
# # deal with asteroid hitting asteroid
#
# collide_with(x::Asteroid, y::Spaceship) = ...
# # deal with asteroid hitting spaceship
#
# collide_with(x::Spaceship, y::Asteroid) = ...
# # deal with spaceship hitting asteroid
#
# collide_with(x::Spaceship, y::Spaceship) = ...
# # deal with spaceship hitting spaceship
# ```
#
# Type dispatch can be especially useful in data science, where you might allow different input types (i.e. Numpy arrays and Pandas dataframes) to a function that processes data. Type dispatch allows you to have a common API for functions that do similar tasks.
#
# Unfortunately, Python does not support this out-of-the box. Fortunately, there is the [@typedispatch](https://fastcore.fast.ai/dispatch.html#typedispatch-Decorator) decorator to the rescue. This decorator relies upon type hints in order to route inputs the correct version of the function:
# +
@typedispatch
def f(x:str, y:str): return f'{x}{y}'
@typedispatch
def f(x:np.ndarray): return x.sum()
@typedispatch
def f(x:int, y:int): return x+y
# -
# Below is a demonstration of type dispatch at work for the function `f`:
f('Hello ', 'World!')
f(2,3)
f(np.array([5,5,5,5]))
# There are limitations of this feature, as well as other ways of using this functionality that [you can read about here](https://fastcore.fast.ai/dispatch.html). In the process of learning about typed dispatch, I also found a python library called [multipledispatch](https://github.com/mrocklin/multipledispatch) made by [<NAME>](https://github.com/mrocklin) (the creator of Dask).
#
# After using this feature, I am now motivated to learn languages like Julia to discover what other paradigms I might be missing.
#
# ---
# ## A better version of functools.partial
#
# `functools.partial` is a great utility that creates functions from other functions that lets you set default values. Lets take this function for example that filters a list to only contain values >= `val`:
# +
test_input = [1,2,3,4,5,6]
def f(arr, val):
"Filter a list to remove any values that are less than val."
return [x for x in arr if x >= val]
f(test_input, 3)
# -
# You can create a new function out of this function using `partial` that sets the default value to 5:
filter5 = partial(f, val=5)
filter5(test_input)
# One problem with `partial` is that it removes the original docstring and replaces it with a generic docstring:
filter5.__doc__
# [fastcore.utils.partialler](https://fastcore.fast.ai/utils.html#partialler) fixes this, and makes sure the docstring is retained such that the new API is transparent:
filter5 = partialler(f, val=5)
filter5.__doc__
# ---
#
# ## Composition of functions
#
# A technique that is pervasive in functional programming languages is function composition, whereby you chain a bunch of functions together to achieve some kind of result. This is especially useful when applying various data transformations. Consider a toy example where I have three functions: (1) Removes elements of a list less than 5 (from the prior section) (2) adds 2 to each number (3) sums all the numbers:
# +
def add(arr, val): return [x + val for x in arr]
def arrsum(arr): return sum(arr)
# See the previous section on partialler
add2 = partialler(add, val=2)
transform = compose(filter5, add2, arrsum)
transform([1,2,3,4,5,6])
# -
# But why is this useful? You might me thinking, I can accomplish the same thing with:
#
# ```py
# arrsum(add2(filter5([1,2,3,4,5,6])))
# ```
# You are not wrong! However, composition gives you a convenient interface in case you want to do something like the following:
# +
def fit(x, transforms:list):
"fit a model after performing transformations"
x = compose(*transforms)(x)
y = [np.mean(x)] * len(x) # its a dumb model. Don't judge me
return y
# filters out elements < 5, adds 2, then predicts the mean
fit(x=[1,2,3,4,5,6], transforms=[filter5, add2])
# -
# For more information about `compose`, read [the docs](https://fastcore.fast.ai/utils.html#compose).
#
# ---
# ## A more useful <code>__repr__</code>
#
# In python, `__repr__` helps you get information about an object for logging and debugging. Below is what you get by default when you define a new class. (Note: we are using `store_attr`, which was discussed earlier).
# +
class Test:
def __init__(self, a, b=2, c=3): store_attr() # `store_attr` was discussed previously
Test(1)
# -
# We can use [basic_repr](https://fastcore.fast.ai/utils.html#basic_repr) to quickly give us a more sensible default:
# +
class Test:
def __init__(self, a, b=2, c=3): store_attr()
__repr__ = basic_repr('a,b,c')
Test(2)
# -
# ---
#
# ## Monkey Patching With A Decorator
#
# It can be convenient to [monkey patch](https://www.geeksforgeeks.org/monkey-patching-in-python-dynamic-behavior/) with a decorator, which is especially helpful when you want to patch an external library you are importing. We can use the [decorator @patch](https://fastcore.fast.ai/foundation.html#patch) from `fastcore.foundation` along with type hints like so:
# +
class MyClass(int): pass
@patch
def func(self:MyClass, a): return self+a
mc = MyClass(3)
# -
# Now, `MyClass` has an additional method named `func`:
mc.func(10)
# Still not convinced? I'll show you another example of this kind of patching in the next section.
# ---
#
# ## A better pathlib.Path
#
# When you see [these extensions](https://fastcore.fast.ai/utils.html#Extensions-to-Pathlib.Path) to pathlib.path you won't ever use vanilla pathlib again! A number of additional methods have been added to pathlib, such as:
#
# - `Path.readlines`: same as `with open('somefile', 'r') as f: f.readlines()`
# - `Path.read`: same as `with open('somefile', 'r') as f: f.read()`
# - `Path.save`: saves file as pickle
# - `Path.load`: loads pickle file
# - `Path.ls`: shows the contents of the path as a list.
# - etc.
#
# [Read more about this here](https://fastcore.fast.ai/utils.html#Extensions-to-Pathlib.Path). Here is a demonstration of `ls`:
from pathlib import Path
p = Path('../_notebooks')
p.ls() # you don't get this with vanilla Pathlib.Path!!
# Wait! What's going on here? We just imported `pathlib.Path` - why are we getting this new functionality? Thats because we imported the `fastcore.foundation` module, which patches this module via the `@patch` decorator discussed earlier. Just to drive the point home on why the `@patch` decorator is useful, I'll go ahead and add another method to `Path` right now:
# +
@patch
def fun(self:Path): return "This is fun!"
p.fun()
# -
# That is magical, right? I know! That's why I'm writing about it!
# ---
#
# ## An Even More Concise Way To Create Lambdas
#
#
# `Self`, with an uppercase S, is an even more concise way to create lambdas that are calling methods on an object. For example, let's create a lambda for taking the sum of a Numpy array:
arr=np.array([5,4,3,2,1])
f = lambda a: a.sum()
assert f(arr) == 15
# You can use `Self` in the same way:
f = Self.sum()
assert f(arr) == 15
# Let's create a lambda that does a groupby and max of a Pandas dataframe:
# +
import pandas as pd
df=pd.DataFrame({'Some Column': ['a', 'a', 'b', 'b', ],
'Another Column': [5, 7, 50, 70]})
f = Self.groupby('Some Column').mean()
f(df)
# -
# Read more about `Self` in [the docs](https://fastcore.fast.ai/utils.html#Self-(with-an-uppercase-S)).
# ---
#
# ## Notebook Functions
#
# These are simple but handy, and allow you to know whether or not code is executing in a Jupyter Notebook, Colab, or an Ipython Shell:
in_notebook(), in_colab(), in_ipython()
# This is useful if you are displaying certain types of visualizations, progress bars or animations in your code that you may want to modify or toggle depending on the environment.
# ---
#
# ## A Drop-In Replacement For List
#
# You might be pretty happy with Python's `list`. This is one of those situations that you don't know you needed a better list until someone showed one to you. Enter `L`, a list like object with many extra goodies.
#
# The best way I can describe `L` is to pretend that `list` and `numpy` had a pretty baby:
# define a list (check out the nice `__repr__` that shows the length of the list!)
L(1,2,3)
# Shuffle a list:
p = L.range(20).shuffle()
p
# Index into a list:
p[2,4,6]
# L has sensible defaults, for example appending an element to a list:
1 + L(2,3,4)
# There is much more `L` has to offer. Read [the docs](https://fastcore.fast.ai/foundation.html#Class-L-Methods) to learn more.
# # But Wait ... There's More!
#
# 
#
# There are more things I would like to show you about fastcore, but there is no way they would reasonably fit into a blog post. Here is a list of some of my favorite things that I didn't demo in this blog post:
#
# ## Utilities
#
# The [Utilites](https://fastcore.fast.ai/utils.html) section contain many shortcuts to perform common tasks or provide an additional interface to what standard python provides.
#
# - [mk_class](https://fastcore.fast.ai/utils.html#mk_class): quickly add a bunch of attributes to a class
# - [wrap_class](https://fastcore.fast.ai/utils.html#wrap_class): add new methods to a class with a simple decorator
# - [groupby](https://fastcore.fast.ai/utils.html#groupby): similar to Scala's groupby
# - [merge](https://fastcore.fast.ai/utils.html#merge): merge dicts
# - [fasttuple](https://fastcore.fast.ai/utils.html#fastuple): a tuple on steroids
# - [Infinite Lists](https://fastcore.fast.ai/utils.html#Infinite-Lists): useful for padding and testing
# - [chunked](https://fastcore.fast.ai/utils.html#chunked): for batching and organizing stuff
#
# ## Multiprocessing
#
# The [Multiprocessing section](http://fastcore.fast.ai/utils.html#Multiprocessing) extends python's multiprocessing library by offering features like:
#
# - progress bars
# - ability to pause to mitigate race conditions with external services
# - processing things in batches on each worker, ex: if you have a vectorized operation to perform in chunks
#
# ## Functional Programming
#
# The [functional programming section](http://fastcore.fast.ai/utils.html#Functions-on-Functions) is my favorite part of this library.
#
# - [maps](https://fastcore.fast.ai/utils.html#maps): a map that also composes functions
# - [mapped](https://fastcore.fast.ai/utils.html#mapped): A more robust `map`
# - [using_attr](https://fastcore.fast.ai/utils.html#using_attr): compose a function that operates on an attribute
#
# ## Transforms
#
# [Transforms](https://fastcore.fast.ai/transform.html) is a collection of utilities for creating data transformations and associated pipelines. These transformation utilities build upon many of the building blocks discussed in this blog post.
#
#
# ## Further Reading
#
# **It should be noted that you should read the [main page of the docs](https://fastcore.fast.ai/) first, followed by the section on [tests](https://fastcore.fast.ai/) to fully understand the documentation.**
#
# - The [fastcore documentation site](https://fastcore.fast.ai/).
# - The [fastcore GitHub repo](https://github.com/fastai/fastcore).
# - Blog post on [delegation](https://www.fast.ai/2019/08/06/delegation/).
# # Shameless plug: fastpages
#
# This blog post was written entirely in a Jupyter Notebook, which GitHub automatically converted into to a blog post! Sound interesting? [Check out fastpages](https://github.com/fastai/fastpages).
| _notebooks/2020-09-01-fastcore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # How to create a BoxPlot
# ### First we import magics
import Magics.macro as magics
# ### Define the projection
# +
map = magics.mmap(subpage_map_projection = "cartesian",
subpage_x_axis_type = 'date',
subpage_y_axis_type = 'regular',
subpage_x_date_min = '2016-11-03 00:00',
subpage_x_date_max = '2016-11-13 12:00',
subpage_y_min = 10.,
subpage_y_max = 40.,
subpage_y_position = 5.)
horizontal_axis = magics.maxis(axis_orientation = "horizontal",
axis_type = 'date',
axis_grid = "on",
axis_grid_line_style = "solid",
axis_grid_thickness = 1,
axis_grid_colour = "grey",
axis_minor_tick ='on',
axis_minor_grid ='on',
axis_minor_grid_line_style = "dot",
axis_minor_grid_colour = "grey",
axis_title = 'on',
axis_title_text = "Time...",
)
vertical_axis = magics.maxis(axis_orientation = "vertical",
axis_grid = "on",
axis_grid_line_style = "solid",
axis_grid_thickness = 1,
axis_grid_colour = "grey",
)
title = magics.mtext(text_lines=['Setting up a Box Plot'])
magics.plot(map, horizontal_axis, vertical_axis, title)
# -
# ### Defining the input data ..
# In this example we are using a 10 days temperature forecast
# +
import numpy
steps= ['2016-11-03 06:00', '2016-11-03 12:00', '2016-11-03 18:00',
'2016-11-04 00:00', '2016-11-04 06:00', '2016-11-04 12:00', '2016-11-04 18:00',
'2016-11-05 00:00', '2016-11-05 06:00', '2016-11-05 12:00', '2016-11-05 18:00',
'2016-11-06 00:00', '2016-11-06 06:00', '2016-11-06 12:00', '2016-11-06 18:00',
'2016-11-07 00:00', '2016-11-07 06:00', '2016-11-07 12:00', '2016-11-07 18:00',
'2016-11-08 00:00', '2016-11-08 06:00', '2016-11-08 12:00', '2016-11-08 18:00',
'2016-11-09 00:00', '2016-11-09 06:00', '2016-11-09 12:00', '2016-11-09 18:00',
'2016-11-10 00:00', '2016-11-10 06:00', '2016-11-10 12:00', '2016-11-10 18:00',
'2016-11-11 00:00', '2016-11-11 06:00', '2016-11-11 12:00', '2016-11-11 18:00',
'2016-11-12 00:00', '2016-11-12 06:00', '2016-11-12 12:00', '2016-11-12 18:00',
'2016-11-13 00:00']
min = numpy.array([ 20.11, 22.13, 26.13, 22.93, 20.19, 23.61, 28.19, 23.66,
21.63, 25.32, 29.07, 23.35, 22.26, 24.88, 28.81, 23.48,
20.83, 22.85, 26.6 , 21.36, 19.44, 22.81, 25.14, 20.95,
20.12, 22.74, 23.64, 20.62, 19.52, 22.31, 27.21, 21.16,
20.55, 23.7 , 25.92, 21.48, 20.99, 22.06, 23.51, 20.74])
max = numpy.array([ 21.87, 25.78, 30.05, 24.85, 22.42, 27.4 , 32.44, 26.14,
23.29, 27.8 , 34.48, 27.21, 24.19, 28.01, 33.23, 27.4 ,
23.6 , 27.64, 34.16, 26.57, 22.8 , 27.02, 35.19, 26.19,
22.7 , 26.97, 32.82, 25.82, 22.55, 27.33, 33.13, 26.61,
23.21, 27.79, 34.48, 28.86, 25.17, 29.95, 34.86, 28.17])
seventyfive = [ 21.05, 24.83, 28.67, 24.32, 21.99, 26.12, 30.73, 25.27,
22.83, 27.49, 32.76, 26.43, 23.32, 26.93, 32.07, 25.99,
22.63, 26.83, 32. , 25.56, 22.21, 25.99, 31.17, 24.97,
22. , 26.16, 30.79, 24.68, 21.44, 26.05, 31.51, 25.37,
22.23, 26.93, 32.31, 25.92, 22.9 , 27.67, 31.92, 25.75]
twentyfive = [ 20.45, 23.53, 27.29, 23.54, 21.29, 24.86, 29.28, 24.42,
22.4 , 26.59, 31.12, 25.42, 22.7 , 26.09, 30.51, 25.11,
21.91, 25.93, 29.79, 24.16, 21.57, 25.04, 28.95, 23.51,
21.11, 25.05, 28.59, 22.86, 20.83, 24.96, 29.7 , 23.38,
21.27, 25.32, 29.15, 23.73, 21.73, 25.26, 27.33, 23.05]
median = [ 20.87, 24.16, 27.86, 23.75, 21.78, 25.31, 30.02, 24.81,
22.58, 27.13, 31.76, 25.74, 23.08, 26.58, 31.54, 25.5 ,
22.3 , 26.35, 31.07, 24.95, 21.9 , 25.5 , 29.79, 24.35,
21.48, 25.66, 29.74, 23.95, 21.11, 25.58, 30.3 , 24.39,
21.73, 26.18, 31.14, 24.72, 22.43, 26.75, 29.92, 24.19]
boxplot = magics.mboxplot(boxplot_date_positions=steps,
boxplot_minimum_values = min,
boxplot_maximum_values = max,
boxplot_box_upper_values = seventyfive,
boxplot_box_lower_values = twentyfive,
boxplot_median_values = median,
boxplot_box_width = 0.5,
boxplot_box_colour = "navy")
# -
# ### Then we redefine the projection to take into account the values
map = magics.mmap(subpage_map_projection = "cartesian",
subpage_x_axis_type = 'date',
subpage_y_axis_type = 'regular',
subpage_x_date_min = steps[0],
subpage_x_date_max = steps[-1],
subpage_y_min = min.min(),
subpage_y_max = max.max(),
subpage_y_position = 5.)
magics.plot(map, horizontal_axis, vertical_axis, boxplot, title)
| notebook/boxplot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Spot-checking Classification Algorithms
# Discovering which algorithms perform well on our machine learning problem.
#
# ### Algorithms
#
# Linear machine learning algorithms:
# * Logistic Regression.
# * Linear Discriminant Analysis.
#
# Non-linear machine learning algorithms:
# * k-Nearest Neighbors.
# * Naive Bayes.
# * Classification and Regression Trees.
# * Support Vector Machines.
# +
#Read dataset
from pandas import read_csv
import pandas as pd
import numpy as np
np.set_printoptions(threshold=5)
np.set_printoptions(precision=2)
fileName = 'Data/Processed/Numerized_Allergy_and_Asthma.txt'
names = ['Prot1', 'Prot2', 'Score']
ppiData = read_csv(fileName, delimiter='\t', names=names)
ppiList = ppiData.values
X = ppiList[:, 0:2]
Y = ppiList[:, 2]
print(ppiData.shape)
# +
# No need for preprocessing (Scaling, Standardizing, Normalizing)
# because the X values are all same units representing proteins
# Split K data into train and test sets using KFold
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
seed = 7 # purely random number
splits = 38 # Arbitrary number from intutition - sqrt(N), where N=Number of datasets
kfold = KFold(n_splits=38, random_state=seed)
# -
# ## Linear Machine Learning Algorithms
# This section demonstrates minimal recipes for how to use two linear machine learning algorithms: logistic regression and linear discriminant analysis.
#
# ### Logistic Regression
# Logistic regression assumes a Gaussian distribution for the numeric input variables and can model binary classification problems. You can construct a logistic regression model using the LogisticRegression class.
# http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
# +
# Logistic Regression Classification
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
results = cross_val_score(model, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# -
# ### Linear Discriminant Analysis
# Linear Discriminant Analysis or LDA is a statistical technique for binary and multiclass classification. It too assumes a Gaussian distribution for the numerical input variables. You can construct an LDA model using the LinearDiscriminantAnalysis class.
# http://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis.html### Logistic Regression
#
# Logistic regression assumes a Gaussian distribution for the numeric input variables and can model binary classification problems. You can construct a logistic regression model using the LogisticRegression class.
# +
# LDA Classification
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
model = LinearDiscriminantAnalysis()
results = cross_val_score(model, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# -
# ## Nonlinear Machine Learning Algorithms
# This section demonstrates minimal recipes for how to use 4 nonlinear machine learning algorithms.
#
# ### k-Nearest Neighbors
# The k-Nearest Neighbors algorithm (or KNN) uses a distance metric to find the k most similar instances in the training data for a new instance and takes the mean outcome of the neighbors as the prediction. You can construct a KNN model using the KNeighborsClassifier class.
# http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
# +
# KNN Classification
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier()
results = cross_val_score(model, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# -
# ### Naive Bayes
# Naive Bayes calculates the probability of each class and the conditional probability of each class given each input value. These probabilities are estimated for new data and multiplied together, assuming that they are all independent (a simple or naive assumption). When working with real-valued data, a Gaussian distribution is assumed to easily estimate the probabilities for input variables using the Gaussian Probability Density Function. You can construct a Naive Bayes model using the GaussianNB class.
# http://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.GaussianNB.html
# +
# Gaussian Naive Bayes Classification
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
results = cross_val_score(model, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# -
# ### Classification and Regression Trees
#
# Classification and Regression Trees (CART or just decision trees) construct a binary tree from the training data. Split points are chosen greedily by evaluating each attribute and each value of each attribute in the training data in order to minimize a cost function (like the Gini index). You can construct a CART model using the DecisionTreeClassifier class.
# http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html
# +
# CART Classification
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
results = cross_val_score(model, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# -
# ### Support Vector Machines
#
# Support Vector Machines (or SVM) seek a line that best separates two classes. Those data instances that are closest to the line that best separates the classes are called support vectors and influence where the line is placed. SVM has been extended to support multiple classes. Of particular importance is the use of different kernel functions via the kernel parameter. A powerful Radial Basis Function is used by default. You can construct an SVM model using the SVC class.
# http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
# +
# SVM Classification
from sklearn.svm import SVC
model = SVC()
results = cross_val_score(model, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# -
# ## Compare Machine Learning Algorithms
#
# It is important to compare the performance of multiple different machine learning algorithms consistently.
# Each model has different performance characteristics. Using resampling methods like cross-validation, we can get an estimate for how accurate each model may be on unseen data. We need to be able to use these estimates to choose one or two best models from the suite of models that we have created.
# +
# Compare Algorithms
from matplotlib import pyplot
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
results = []
names = []
scoring = 'accuracy'
for name, model in models:
cv_results = cross_val_score(model, X, Y, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s:\tAccuracy: %.2f%%\t Sigma: %.2f" % (name, cv_results.mean()*100, cv_results.std())
print(msg)
# boxplot algorithm comparison
fig = pyplot.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()
# -
# ## Bagging Algorithms
# Bootstrap Aggregation (or Bagging) involves taking multiple samples from your training dataset (with replacement) and training a model for each sample. The final output prediction is averaged across the predictions of all of the sub-models. The three bagging models covered in this section are as follows:
# * Bagged Decision Trees.
# * Random Forest.
# * Extra Trees.
#
# ### Bagged Decision Trees
# 15.2.1 Bagged Decision Trees Bagging performs best with algorithms that have high variance. A popular example are decision trees, often constructed without pruning. In the example below is an example of using the BaggingClassifier with the Classification and Regression Trees algorithm (DecisionTreeClassifier). A total of 100 trees are created.
# Running the example, we get a robust estimate of model accuracy.
# http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html
# +
# Bagged Decision Trees for Classification
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
cart = DecisionTreeClassifier()
num_trees = 50
model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=seed)
results = cross_val_score(model, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# -
# ### Random Forest
# Random Forests is an extension of bagged decision trees. Samples of the training dataset are taken with replacement, but the trees are constructed in a way that reduces the correlation between individual classifiers. Specifically, rather than greedily choosing the best split point in the construction of each tree, only a random subset of features are considered for each split. You can construct a Random Forest model for classification using the RandomForestClassifier class. The example below demonstrates using Random Forest for classification with 100 trees and split points chosen from a random selection of 3 features.
# http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
# Running the example provides a mean estimate of classification accuracy.
# Random Forest Classification
from sklearn.ensemble import RandomForestClassifier
max_features = 2
model = RandomForestClassifier(n_estimators=num_trees, max_features=max_features)
results = cross_val_score(model, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# ### Extra Trees
# Extra Trees are another modification of bagging where random trees are constructed from samples of the training dataset. You can construct an Extra Trees model for classification using the ExtraTreesClassifier class. The example below provides a demonstration of extra trees with the number of trees set to 100 and splits chosen from 7 random features.
# http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
# Running the example provides a mean estimate of classification accuracy.
# +
# Extra Trees Classification
from sklearn.ensemble import ExtraTreesClassifier
model = ExtraTreesClassifier(n_estimators=num_trees, max_features=max_features)
results = cross_val_score(model, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# -
# ## Boosting Algorithms
# Boosting ensemble algorithms creates a sequence of models that attempt to correct the mistakes of the models before them in the sequence. Once created, the models make predictions which may be weighted by their demonstrated accuracy and the results are combined to create a final output prediction. The two most common boosting ensemble machine learning algorithms are:
# * AdaBoost.
# * Stochastic Gradient Boosting.
#
# ### AdaBoost
# AdaBoost was perhaps the first successful boosting ensemble algorithm. It generally works by weighting instances in the dataset by how easy or difficult they are to classify, allowing the algorithm to pay less attention to them in the construction of subsequent models. You can construct an AdaBoost model for classification using the AdaBoostClassifier class4 . The example below demonstrates the construction of 30 decision trees in sequence using the AdaBoost algorithm.
# +
# AdaBoost Classification
from sklearn.ensemble import AdaBoostClassifier
model = AdaBoostClassifier(n_estimators=num_trees, random_state=seed)
results = cross_val_score(model, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# -
# ### Stochastic Gradient Boosting
#
# Stochastic Gradient Boosting (also called Gradient Boosting Machines) are one of the most sophisticated ensemble techniques. It is also a technique that is proving to be perhaps one of the best techniques available for improving performance via ensembles. You can construct a Gradient Boosting model for classification using the GradientBoostingClassifier class. The example below demonstrates Stochastic Gradient Boosting for classification with 100 trees.
# +
# Stochastic Gradient Boosting Classification
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier(n_estimators=num_trees, random_state=seed)
results = cross_val_score(model, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# -
# ### Voting Ensemble
#
# Voting is one of the simplest ways of combining the predictions from multiple machine learning algorithms. It works by first creating two or more standalone models from your training dataset. A Voting Classifier can then be used to wrap your models and average the predictions of the sub-models when asked to make predictions for new data. The predictions of the sub-models can be weighted, but specifying the weights for classifiers manually or even heuristically is difficult. More advanced methods can learn how to best weight the predictions from sub-models, but this is called stacking (stacked aggregation) and is currently not provided in scikit-learn.
#
# You can create a voting ensemble model for classification using the VotingClassifier class. The code below provides an example of combining the predictions of logistic regression, classification and regression trees and support vector machines together for a classification problem
# http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.VotingClassifier.html
# +
# Voting Ensemble for Classification
from sklearn.ensemble import VotingClassifier
# create the sub models
estimators = []
estimators.append(('LR', LogisticRegression()))
estimators.append(('LDA', LinearDiscriminantAnalysis()))
estimators.append(('KNN', KNeighborsClassifier()))
estimators.append(('CART', DecisionTreeClassifier()))
estimators.append(('NB', GaussianNB()))
estimators.append(('SVM', SVC()))
# create the ensemble model
ensemble = VotingClassifier(estimators)
results = cross_val_score(ensemble, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# -
# ##### As a bonus...
# ### Neural network
#
# Class MLPClassifier implements a multi-layer perceptron (MLP) algorithm that trains using Backpropagation.
#
# http://scikit-learn.org/stable/modules/neural_networks_supervised.html
# +
#Neural network Classification
from sklearn.neural_network import MLPClassifier
model = MLPClassifier(random_state=7)
results = cross_val_score(model, X, Y, cv=kfold)
print('Accuracy: %.2f%%\tSigma: %.2f' % (results.mean()*100, results.std()))
# -
# ## Summary
#
# * Preprocessing not needed for this particular dataset
# * KFold Validation is the preffered method of checking the accuracy of our prediction models
# * All models performed dismally given the fact that we are predicting against using only two features: Prot1 & Prot2
# * ML Algorithm categories (Regression algorithms, Classification algorithms) were evaluated
# * Although Linear algorithms (LR & LDA) outperformed the rest, my choice for the best algorithm for this ML task is ```Stochastic Gradient Boosting```
# * This algorithm is a Boosting algorithm, meaning it learns from the mistakes of the its models and corrects itself, I think it is the most to make our predictions
# * There could be arguements to support other perspectives on this as well
# ## Making the Prediction
# Now we need to use Machine Learning to make predictions for Missing PPIs
# In this case, we will use ```Stochastic Gradient Boosting Classifier``` since it showed great promise
# You can as well use any prediction model
# We will fit our chosen model with the actual values and make predictions on Non PPI values
# Note that the model is just about 22% accurate :(
# +
# Read Non-PPI Dataset (Numerized) on which we predict upon
fileName = 'Data/Processed/Numerized_Allergy_and_Asthma_nonPPIs.txt'
names = ['Prot1', 'Prot2']
nonPPIDataNumerized = read_csv(fileName, delimiter='\t', names=names)
nonPPINumerizedList = nonPPIDataNumerized.values
print 'Non PPI (Numerized) List:\n', nonPPINumerizedList
print 'Shape: ', nonPPINumerizedList.shape
# +
# Read the actual Non-PPI Dataset so we can use it to save the reslts
fileName = 'Data/Processed/Allergy_and_Asthma_nonPPIs.txt'
names = ['Prot1', 'Prot2']
nonPPIData = read_csv(fileName, delimiter='\t', names=names)
nonPPIList = nonPPIData.values
print 'Non PPI (Numerized) List:\n', nonPPIList
print 'Shape: ', nonPPIList.shape
# +
# Make the predictions and save the results in file
model = GradientBoostingClassifier(n_estimators=num_trees, random_state=seed)
model.fit(X,Y) #fit model with actual PPI values
predictions = model.predict(nonPPINumerizedList) #Make predictions on NonPPIs
results = []
for prediction, nonPPI in zip(predictions, nonPPIList):
if prediction >= 900:
results.append(nonPPI)
results = np.array(results)
print 'Predicted PPI List:\n', results
print 'Shape: ', results.shape
fileName = 'Data/Results/PredictionResultsPPIs.txt'
#np.savetxt(fileName, results, fmt='%s', sep='\t')
pd.DataFrame(results).to_csv(fileName, sep='\t', header=None, index=False)
# -
# ### Results
# As you can see above, we have our results comprising of 287 PPIs. These were selected with the criteria that they be predicted to have PPI scores of 900 or greater, meaning that they fall in the class of 900 or 950. Whether this is the best approach is debatable since the ML only looks at the scores as classes and not actual weighted values.
| Finding Missing PPIs using ML Algorithms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.8 64-bit
# name: python3
# ---
import sys
sys.path.append("..")
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
import utils.dataset as myDataset
import utils.loss as myLoss
import model.model as myModel
arg_batchSize = 72
arg_nEpoch = 100
arg_pretrainedModel = None
# arg_pretrainedModel = "../model/pretrainedModel/final_facedet.pt"
arg_workers = 12
arg_dataset = "../data/"
arg_split = "train"
arg_outName = "facedet.pt"
dataset = myDataset.FaceDataset(datapath = arg_dataset, split = arg_split)
dataloader = torch.utils.data.DataLoader(dataset, shuffle = True, batch_size = arg_batchSize, \
num_workers = arg_workers, drop_last = False)
# +
writer = SummaryWriter("../log/scene")
print("length of dataset: %s" % (len(dataloader)))
batch_num = len(dataloader)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = myModel.FaceKeypointModel()
model.apply(myModel.weights_init)
if arg_pretrainedModel != None:
model.load_state_dict(torch.load("../model/" + arg_pretrainedModel))
print("Use model from ../model/" + arg_pretrainedModel)
else:
print("Use new model")
if not os.path.exists("../model/pretrainedModel"):
os.makedirs("../model/pretrainedModel")
model.cuda()
model.train()
optimizer = torch.optim.Adam(model.parameters(),lr = 0.00001, betas = (0.9, 0.999))
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.2)
criterion = nn.MSELoss()
loss_sum = 0
# -
for epoch in tqdm(range(arg_nEpoch)):
for i, data in tqdm(enumerate(dataloader)):
image, anno, gtmap= data
image, anno, gtmap= image.to(device, dtype=torch.float), anno.to(device), gtmap.to(device, dtype=torch.float)
image = image/255.0
heatMap = model(image)
# loss = myLoss.calLossMSE(heatMap, anno, gtmap)
loss = criterion(heatMap, gtmap)
loss.backward()
optimizer.step()
writer.add_scalar("training loss", loss.item(), epoch*len(dataloader) + i)
loss_sum = loss_sum + loss.item()
print("[ epoch: %d/%d batch: %d/%d ] loss: %f" % (epoch, arg_nEpoch, i + 1, batch_num, loss_sum))
loss_sum = 0
if epoch % 5 == 4:
torch.save(model.state_dict(), "../model/pretrainedModel/epo" + str(epoch) + arg_outName)
print("Model saved at ../model/pretrainedModel/epo" + str(epoch) + arg_outName)
torch.save(model.state_dict(), "../model/pretrainedModel/1000_target_" + arg_outName)
print("Model saved at ../model/pretrainedModel/final_" + arg_outName)
| tools/train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# match()メソッドでパターンマッチを行う
import re # reモジュールをインポート
line = 'パイソンです'
m = re.match('パイソン', line) # 'パイソン'をパターンマッチさせる
print(m.group()) # マッチした文字列を取り出す
# +
# raw文字列のパターンをRegexオブジェクトにしてマッチングさせる
line = 'パイソンです'
reg = re.compile(r'パイソン')
m = re.match(reg, line)
print(m.group())
# +
# 文字列だけのパターンマッチング
line = 'そういやパイソンはどうなった?'
m = re.search(r'いや', line)
print(m)
print(m.group())
# +
# 複数の候補のパターンマッチング
line = 'こんにちは、パイソン'
m = re.search(r'こんにちは|今日は|こんちは', line)
print(m.group())
# +
# パターンにアンカーを使用する
line = 'これ、いいじゃん'
m = re.search(r'じゃん$', line)
print(m.group())
# +
# 'ども'に続く文字として[~ー…!、]のどれか1文字にマッチさせる
line = 'どもーっす'
m = re.search(r'ども[~ー…!、]', line)
print(m.group())
# +
# 「'うわっ' + 3文字 + !」のマッチング
line = 'うわっ、それか!'
m = re.search(r'うわっ、...!', line)
print(m.group())
# +
# !の1回以上の繰り返しにマッチングさせる
line = 'ええーっ!!たったこれだけ?'
m = re.search(r'^ええーっ!+', line)
print(m.group())
# +
# ( )でグループ化する
line = 'まじで、ほんとにそう思います'
m = re.search(r'(^まじ|ほんと)', line)
print(m.group())
# +
# 文字列の中から電話番号を取得する
# Regexオブジェクトを生成
number = re.compile(r'(\d\d\d)-(\d\d\d-\d\d\d\d)')
# 電話番号をマッチング
m = number.search('電話番号は001-111-9292です。')
m.group() # マッチした文字列全体を取得
# -
m.group(0) # マッチした文字列全体を取得
m.group(1) # グループ1にマッチした文字列を取得
m.group(2) #グループ2にマッチした文字列を取得
# +
# すべてのグループにマッチした文字列を取得
number = re.compile(r'(\d\d\d)-(\d\d\d-\d\d\d\d)')
m = number.search('電話番号は001-100-9292です。')
m.groups()
# +
# グループにマッチした文字列を別々の変数に代入
area_code, main_number = m.groups()
print(area_code)
# -
print(main_number)
# +
# (市外局番)xxx-xxxxのパターンでマッチングさせる
# Regexオブジェクトを生成
number = re.compile(r'(\(\d\d\d\))(\d\d\d-\d\d\d\d)')
# 電話番号をマッチング
m = number.search('電話番号は(001)100-9292です。')
m.group(1)
# -
m.group(2)
# +
# 市外局番なしでも電話番号を取得する
# Regexオブジェクトを生成
number = re.compile(r'(\d\d\d-)?(\d\d\d-\d\d\d\d)')
# 電話番号をマッチング
m1 = number.search('電話番号は001-100-9292です。')
m1.group()
# -
# 電話番号をマッチング
m2 = number.search('電話番号は100-9292です。')
m2.group()
# +
# { }で繰り返しの回数を指定するパターン
# 'は'を3回以上5回以下繰り返すパターン
regex1 = re.compile(r'(は){3,5}')
m1 = regex1.search('わははははは')
m1.group()
# +
# { }で繰り返しの回数を指定するパターン
# 'は'を3回以上5回以下繰り返すパターン(非貪欲マッチ)
regex2 = re.compile(r'(は){3,5}?')
m2 = regex2.search('わははははは')
m2.group()
# +
# search()メソッドでマッチングを行う
# Regexオブジェクトを生成
num_regex = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d')
# 電話番号をマッチング
m = num_regex.search('携帯:999-555-6666 自宅:001-100-9292')
m.group()
# +
# findall ()メソッドでマッチングを行う
num_regex = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d')
num_regex.findall('携帯:999-555-6666 自宅:001-100-9292')
# +
# ( )で囲んだグループのマッチングをfindall ()メソッドで行う
# グループが設定された正規表現のパターン
num_regex = re.compile(r'(\d\d\d)-(\d\d\d)-(\d\d\d\d)')
num_regex.findall('携帯:999-555-6666 自宅:001-100-9292')
# +
# 短縮形を利用したマッチング
regex = re.compile(r'\d+\s+\w+')
month = '1 January, 2 February, 3 March, 4 April, 5 May, 6 June'
regex.findall(month)
# +
# 短縮形の範囲を指定する
reg = re.compile(r'[0-5]')
num = '1, 2, 3, 4, 5, 6, 7, 8'
reg.findall(num)
# +
# .*であらゆる文字列とマッチさせる
name_regex = re.compile(r'姓:(.*) 名:(.*)')
m = name_regex.search('姓:秀和 名:太郎')
m = name_regex.search('姓:秀和 名:太郎')
m.group(1)
# -
m.group(2)
# +
# '.*'のみのパターンの場合
reg1 = re.compile('.*')
# 改行を含む文字列にマッチさせる
m1 = reg1.search('第1主成分\n第2主成分\n第3主成分')
m1.group()
# +
# re.DOTALLを指定
# compile()メソッドの第2引数としてre.DOTALLを指定
reg2 = re.compile('.*', re.DOTALL)
# 改行を含む文字列にマッチさせる
m2 = reg2.search('第1主成分\n第2主成分\n第3主成分')
m2.group()
# +
# 大文字と小文字を区別せずにマッチさせる
# re.compile()メソッドの第2引数としてre.Iを指定
regex = re.compile(r'python', re.I)
# 'Python'にマッチさせる
regex. search('Pythonは面白い').group()
# -
# 'PYTHON'にマッチさせる
regex. search ('PYTHONってよくわからない') .group()
# 'python'にマッチさせる
regex. search ('これがpythonなのか') .group()
# +
# 文字列の一部を置き換える
str = '第1 四半期 売上高 売上予測'
regex = re.compile(r'第1 \w+')
regex.sub('2020年', str)
# +
# マッチした文字列の先頭文字を使って書き換える
str = 'password <PASSWORD> password <PASSWORD> password <PASSWORD>'
# 正規表現のグループ1に(\w)を設定
regex = re.compile(r'password (\w)\w*')
# グループ1にマッチした文字列を使って書き換える
regex.sub(r'\1****', str)
# +
# 先頭から3文字目にマッチさせて書き換える
regex = re.compile(r'password (\w){3}\w*')
regex.sub(r'\1****', str)
# -
# 正規表現のパターンに改行とコメントを入れる
import re
phone = re.compile(r'''(
(0\d{0,3}|\(\d{0,3}\)) # 市外局番
(\s|-) # 区切り
(\d{1,4}) # 市内局番
(\s|-) # 区切り
(\d{3,4}) # 加入者番号
)''', re.VERBOSE)
# +
# 電話番号用の正規表現のパターン
import re
# 電話番号の正規表現
phone_regex = re.compile(r'''(
(0\d{1,4}|\(0\d{1,4}\)) # 市外局番
(\s|-)? # 区切り
(\d{1,4}) # 市内局番
(\s|-) # 区切り
(\d{4}) # 加入者番号
(\s*(内線|\(内\)|\(内.{1,3}\))\s*(\d{2,5}))? # 内線番号
)''', re.VERBOSE)
# -
# マッチングの例
str = '氏名:秀和太郎 住所:東京都中央区 電話番号: (001)5555-6767 (内線)365'
pho = phone_regex.search(str)
print(pho.group())
# +
# メールアドレス用の正規表現のパターン
import re
# メールの正規表現
mail_regex = re.compile(r'''(
[a-zA-Z0-9._%+-]+ # ユーザー名
@ # @ 記号
[a-zA-Z0-9.-]+ # ドメイン名
(\.[a-zA-Z]{2,4}) # トップレベルドメイン
)''', re.VERBOSE)
# -
# マッチングの例
str = '氏名:秀和太郎 住所:東京都中央区 メールアドレス:<EMAIL>'
ml = mail_regex.search(str)
print(ml.group())
| sample/Python_GOKUI/Python_GOKUI/chap03/sec02/RegularExpression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/AaronReichert/DS-Unit-2-Linear-Models/blob/master/module3-ridge-regression/LS_DS_213_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="-A2FxL6-PZhn" colab_type="text"
# Lambda School Data Science
#
# *Unit 2, Sprint 1, Module 3*
#
# ---
# + [markdown] colab_type="text" id="7IXUfiQ2UKj6"
# # Ridge Regression
#
# ## Assignment
#
# We're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices.
#
# But not just for condos in Tribeca...
#
# - [ ] Use a subset of the data where `BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'` and the sale price was more than 100 thousand and less than 2 million.
# - [ ] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test.
# - [ ] Do one-hot encoding of categorical features.
# - [ ] Do feature selection with `SelectKBest`.
# - [ ] Fit a ridge regression model with multiple features. Use the `normalize=True` parameter (or do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html) beforehand — use the scaler's `fit_transform` method with the train set, and the scaler's `transform` method with the test set)
# - [ ] Get mean absolute error for the test set.
# - [ ] As always, commit your notebook to your fork of the GitHub repo.
#
# The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal.
#
#
# ## Stretch Goals
#
# Don't worry, you aren't expected to do all these stretch goals! These are just ideas to consider and choose from.
#
# - [ ] Add your own stretch goal(s) !
# - [ ] Instead of `Ridge`, try `LinearRegression`. Depending on how many features you select, your errors will probably blow up! 💥
# - [ ] Instead of `Ridge`, try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html).
# - [ ] Learn more about feature selection:
# - ["Permutation importance"](https://www.kaggle.com/dansbecker/permutation-importance)
# - [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html)
# - [mlxtend](http://rasbt.github.io/mlxtend/) library
# - scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection)
# - [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson.
# - [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients.
# - [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way.
# - [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).
# + colab_type="code" id="o9eSnDYhUGD7" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
# + colab_type="code" id="QJBD4ruICm1m" colab={}
import pandas as pd
import pandas_profiling
# Read New York City property sales data
df = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv')
# Change column names: replace spaces with underscores
df.columns = [col.replace(' ', '_') for col in df]
# SALE_PRICE was read as strings.
# Remove symbols, convert to integer
df['SALE_PRICE'] = (
df['SALE_PRICE']
.str.replace('$','')
.str.replace('-','')
.str.replace(',','')
.astype(int)
)
# + id="QhkA1CT0EZl-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="b1dc6fda-7e74-4e07-9e22-7f16e0ca5a1b"
df.isnull().sum()
# + id="XvLveVSQhUXo" colab_type="code" colab={}
df['LAND_SQUARE_FEET']=(df['LAND_SQUARE_FEET'].str.replace(',',''))
# + id="i4xuGOqGPZhx" colab_type="code" colab={}
# BOROUGH is a numeric column, but arguably should be a categorical feature,
# so convert it from a number to a string
df['BOROUGH'] = df['BOROUGH'].astype(str)
# + id="cNvkxZozPZhz" colab_type="code" colab={}
# Reduce cardinality for NEIGHBORHOOD feature
# Get a list of the top 10 neighborhoods
top10 = df['NEIGHBORHOOD'].value_counts()[:10].index
# At locations where the neighborhood is NOT in the top 10,
# replace the neighborhood with 'OTHER'
df.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
# + id="kdJS7be8PZh2" colab_type="code" outputId="cb26526e-fea0-4b94-d0a5-16b00f3d0b1f" colab={"base_uri": "https://localhost:8080/", "height": 394}
# [ ] Use a subset of the data where BUILDING_CLASS_CATEGORY == '01 ONE FAMILY DWELLINGS'
df_one=df[df['BUILDING_CLASS_CATEGORY']=='01 ONE FAMILY DWELLINGS']
df_one.head()
# + id="_trpQkQ_R51j" colab_type="code" outputId="e0689cc2-9212-432b-fe95-95df9bcdbe02" colab={"base_uri": "https://localhost:8080/", "height": 168}
# [ ] Use a subset of the data where the sale price was more than 100 thousand and less than 2 million.
df_high=df_one[df_one['SALE_PRICE']<2000000]
df_low=df_high[df_high['SALE_PRICE']>100000]
print(df_low.shape)
df_low.sample()
# + id="9Fx4Ptvq4tNP" colab_type="code" outputId="519d95b5-10d8-40e1-8749-cdab8c7de8ab" colab={"base_uri": "https://localhost:8080/", "height": 119}
# [ ] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test.
df_date=df_low
df_date['SALE_DATE']=df_date['SALE_DATE'].astype('datetime64')
split = pd.to_datetime('2019-04-01')
df_train=df_low[df_low['SALE_DATE']<split]
df_test=df_low[df_low['SALE_DATE']>=split]
# + [markdown] id="PAwYOIJM9niG" colab_type="text"
# # I have no idea what that section of code is talking about
#
#
# ```
# /usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:2: SettingWithCopyWarning:
# A value is trying to be set on a copy of a slice from a DataFrame.
# Try using .loc[row_indexer,col_indexer] = value instead
#
# See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
#
# ```
#
#
# + id="H37oKNAN9IWc" colab_type="code" outputId="4bb95ffa-b0a8-4f0b-eadd-44bb19a1391a" colab={"base_uri": "https://localhost:8080/", "height": 643}
# double checking
df_train
# + id="dAxyGsTZ9LuP" colab_type="code" outputId="9bbc4009-23ac-4394-fc04-231c3ec88530" colab={"base_uri": "https://localhost:8080/", "height": 660}
df_test
# + id="auZ9mdnrt36Z" colab_type="code" outputId="07854e6c-828d-4656-c64f-82d515f6f60b" colab={"base_uri": "https://localhost:8080/", "height": 408}
# [ ] Do one-hot encoding of categorical features.
# first lets get info on the columns
colls=df_train.columns
print('unique values per column')
print('------------------------')
for col in colls:
print (f'{col} ({df_low[col].nunique()})')
# + id="KbGpdBJdvkuH" colab_type="code" outputId="78345b44-b492-42a4-b2ec-64e473bf5713" colab={"base_uri": "https://localhost:8080/", "height": 816}
# these should be 1 hot encoded
colls_hot=('BOROUGH','NEIGHBORHOOD','TAX_CLASS_AT_PRESENT','BUILDING_CLASS_AT_PRESENT','BUILDING_CLASS_AT_TIME_OF_SALE')
for col in colls_hot:
print (df_train[col].value_counts())
print()
# + id="w4WHDpx41cnt" colab_type="code" outputId="1062cd3e-ef36-429d-e6b0-6edfdd35adde" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# These should be left alone
colls=df_low.columns
colls_cold=colls.drop(['BOROUGH','NEIGHBORHOOD','TAX_CLASS_AT_PRESENT','BUILDING_CLASS_AT_PRESENT','BUILDING_CLASS_AT_TIME_OF_SALE'])
for col in colls_cold:
print (df_train[col].value_counts())
print()
# + id="wOxMnlCM2bDN" colab_type="code" colab={}
# now to do the actual one hot encoding
target='SALE_PRICE'
features=['BOROUGH','NEIGHBORHOOD','TAX_CLASS_AT_PRESENT','BUILDING_CLASS_AT_PRESENT','BUILDING_CLASS_AT_TIME_OF_SALE']
# + id="WvA3mUoMLcPM" colab_type="code" colab={}
# okay lets do the test train split
X_train=df_train[features]
y_train=df_train[target]
X_test=df_test[features]
y_test=df_test[target]
# + id="nmJAetIy-U7T" colab_type="code" colab={}
import category_encoders as ce
# + id="Z70tjqN7K7gq" colab_type="code" colab={}
# this one hot encodes the chosen features
encoder=ce.one_hot.OneHotEncoder(use_cat_names=True)
X_train_enc=encoder.fit(X_train) #just to remember to not fit_transform both
X_train_enc=encoder.transform(X_train)
X_test_enc=encoder.transform(X_test)
# + id="xu_AH8eDM1F1" colab_type="code" colab={}
# now lets add in the other columns
target='SALE_PRICE'
features=['BOROUGH','NEIGHBORHOOD','TAX_CLASS_AT_PRESENT','BUILDING_CLASS_AT_PRESENT','BUILDING_CLASS_AT_TIME_OF_SALE']
colls=df_low.columns
dropped=features+[target]+['BUILDING_CLASS_CATEGORY','ADDRESS','APARTMENT_NUMBER','SALE_DATE','EASE-MENT','TAX_CLASS_AT_TIME_OF_SALE']
colls_merge=colls.drop(dropped)
X_train_merged=pd.concat([X_train_enc,df_train[colls_merge]],axis=1)
X_test_merged=pd.concat([X_test_enc,df_test[colls_merge]],axis=1)
# used this to double check
# print(X_train_enc.shape)
# print(df_train[colls_merge].shape)
# print(X_train_merged.shape)
# X_train_merged
# + id="DupxEgcvPjtw" colab_type="code" colab={}
# [ ] Do feature selection with SelectKBest.
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
import numpy as np
# + id="ZWgtQ7D8XGcd" colab_type="code" colab={}
colls=X_train_merged.columns
for col in colls:
X_train_merged[col]=X_train_merged[col].astype(np.float64)
# + id="-uLYq3D-Y0J5" colab_type="code" colab={}
X_train_merged
# + id="lCWROu4yX-mu" colab_type="code" colab={}
X_train_merged.info()
# + id="E4wmq3eIPqL5" colab_type="code" outputId="2a35b2eb-68c2-4350-b5ea-427c1b184a0f" colab={"base_uri": "https://localhost:8080/", "height": 799}
# how many to chose?
mae_train_list=[]
mae_test_list=[]
features=len(X_train_merged.columns)
x=0
while x < features:
x+=1
selector=SelectKBest(score_func=f_regression,k=x)
X_train_kbest=selector.fit_transform(X_train_merged,y_train)
X_test_kbest=selector.transform(X_test_merged)
model=LinearRegression()
model.fit(X_train_kbest,y_train)
# x_pred=model.predict(X_train_kbest)
y_pred=model.predict(X_test_kbest)
# mae_train=mean_absolute_error(y_pred,y_test)
mae_test=mean_absolute_error(y_pred,y_test)
# mae_train.append(mae)
print(f'MAE on test with {x} features ${mae_test:,.2f}')
mae_test_list.append(mae_test)
# + id="wX0ldKCUhobS" colab_type="code" colab={}
# + id="I88HvEeLFo32" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 799} outputId="e95be7ab-b233-4168-aa3e-12f08e4ef66a"
mae_test_list
# + id="1cBR7gzshI-C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4562db53-9a24-4b3c-8640-80c07d37dd3a"
X_train_merged['TAX_CLASS_AT_TIME_OF_SALE'].nunique()
# + id="mqhnob5cdbjy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 456} outputId="e0d29954-6786-433c-93e0-f25f50d19970"
X_train_merged
| module3-ridge-regression/LS_DS_213_assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Grey `SPOT`
# + raw_mimetype="text/html" active=""
# <script>
# // Force "pop out" links to open in the browser (rather than download)
# document.addEventListener("DOMContentLoaded", function(event) {
# var links = document.getElementsByClassName("reference download internal")
# for (var i = 0; i < links.length; i++) {
# links[i].outerHTML = links[i].outerHTML.replace("download=\"\"", "");
# }
# });
# </script>
# + tags=["hide_input"]
# %matplotlib inline
# + tags=["hide_input"]
# %run notebook_setup.py
# + tags=["hide_input"]
import starry
starry.config.lazy = False
starry.config.quiet = True
# -
# In this notebook we'll instantiate a Doppler map with the word "SPOT" and visualize it with the interactive ``show()`` method. The plots below are fully interactive: move the mouse and scroll over the maps to control the spectra that are displayed below them.
import starry
import numpy as np
# +
# Instantiate
map = starry.DopplerMap(ydeg=15, nt=20, inc=60, veq=50000)
# Load the SPOT map
spectrum = 1.0 - 0.75 * np.exp(-0.5 * (map.wav0 - 643.0) ** 2 / 0.0085 ** 2)
map.load(maps="spot", spectra=spectrum, smoothing=0.075)
# Visualize
map.visualize()
# + tags=["hide_input"]
map.visualize(file="doppler_grey.html")
# + raw_mimetype="text/restructuredtext" active=""
# Click here to :download:`pop out <doppler_grey.html>` the visualization.
| notebooks/DopplerImagingGallery_Grey.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import json
from math import log
pd.set_option('display.max_columns', 30)
pd.set_option('display.max_rows', 300)
quarters = [20171231, 20180331, 20180630, 20180930, 20181231, 20190331]
splc_all = pd.read_excel("Supply Chain Data All.xlsx", sheet_name="Supply Chain")
splc_all.head()
central_ticker_list = splc_all['Central Ticker'].unique().tolist()
central_ticker_list
region = pd.read_excel("Supply_Chain_Data_20190510.xlsx", sheet_name="Country Code")
region.head()
# +
# scores
scores_sheet = pd.read_excel("abnormal financial change fangyi.xlsx")
scores = {}
for _, info in scores_sheet.iterrows():
ticker = info["Tickers"]
if ticker not in scores:
scores[ticker] = {"Risk Score": [], "Borrow Need": [], "FX Need": []}
if pd.isna(info["Borrow Need"]): info["Borrow Need"] = 0
if pd.isna(info["FX Need"]): info["FX Need"] = 0
if pd.isna(info["Risk Score"]): info["Risk Score"] = 0
scores[ticker]["Borrow Need"].append(info["Borrow Need"])
scores[ticker]["Risk Score"].append(info["Risk Score"])
scores[ticker]["FX Need"].append(info["FX Need"])
#print(scores["000120 KS Equity"])
for k, v in scores.items():
try:
assert(len(v["Borrow Need"]) == 4)
assert(len(v["FX Need"]) == 4)
assert(len(v["Risk Score"]) == 4)
except:
print(k)
# +
data_centrality = pd.read_excel("Impact Score(4).xlsx")
centrality_hub = {}
centrality_auth = {}
for i in range(6):
comp_q_data = data_centrality[(data_centrality["date"] == quarters[i])]
print(len(comp_q_data))
for _, info in comp_q_data.iterrows():
if info["Ticker"] not in centrality_hub:
centrality_hub[info["Ticker"]] = {}
centrality_auth[info["Ticker"]] = {}
centrality_hub[info["Ticker"]][i] = info["Hub score"] / 0.003956894201773358
centrality_auth[info["Ticker"]][i] = info["Auth score"] / 0.04582665265807497
# -
number_of_sup = json.load(open("numofsupplier.json"))
region_dict = {}
for _, info in region.iterrows():
region_dict[info["Country Code"]] = info["Region"]
print(region_dict)
# +
nodes = {}
links = {}
def set_default_n(node):
node['centrality_score'] = {}
node['size'] = {}
node['market_cap'] = {}
node['borrow'] = {}
node['risk'] = {}
node['FX'] = {}
node['num_supplier'] = {}
for i in range(6):
node['centrality_score'][i] = 0
node['size'][i] = 3
node['market_cap'][i] = '--'
node['borrow'][i] = -0.001
node['risk'][i] = -0.001
node['FX'][i] = -0.001
node['num_supplier'][i] = '--'
def set_default_l(link):
link['score'] = {}
for i in range(6):
link['score'][i] = 1
for i in range(6):
comp_q_data = splc_all[(splc_all["As Of Date Bloomberg"] == quarters[i])]
for _, info in comp_q_data.iterrows():
if info['Relationship Type'] == 'Customers': continue
link_id = info['Supplier Ticker'] + "_" + info['Central Ticker']
if link_id not in links:
link = {}
link['source'] = info['Supplier Ticker']
link['target'] = info['Central Ticker']
set_default_l(link)
links[link_id] = link
else:
link = links[link_id]
relation_value = info["Relationship Value USD"]
link["score"][i] = max(log(relation_value + 1) / 5 - 1, 1)
if info['Supplier Ticker'] not in nodes:
node = {}
node['ticker'] = info['Supplier Ticker']
node['name'] = info['Supplier Company']
node['type'] = "supplier1"
country = info['Supplier Country']
node['country'] = country
if country in region_dict:
node['region'] = region_dict[country]
else:
node['country'] = "CN"
node['region'] = "China"
set_default_n(node)
nodes[info['Supplier Ticker']] = node
else:
node = nodes[info['Supplier Ticker']]
market_cap = info['Supplier Market Cap']
if market_cap == '#N/A' or market_cap == '--' or market_cap == 0 or pd.isna(market_cap):
node["size"][i] = 5
node["market_cap"][i] = "--"
else:
node["size"][i] = max(log(market_cap) - 12, 5)
node["market_cap"][i] = market_cap
if i > 1 and (node['ticker'] in scores):
node['borrow'][i] = scores[node['ticker']]['Borrow Need'][i - 2]
node['risk'][i] = scores[node['ticker']]['Risk Score'][i - 2]
node['FX'][i] = scores[node['ticker']]['FX Need'][i - 2]
if i in centrality_hub[node['ticker']]:
node["centrality_score"][i] = centrality_hub[node['ticker']][i]
else:
node["centrality_score"][i] = -1
node['num_supplier'][i] = number_of_sup[node['ticker']][i]
# also add the central node
if info['Central Ticker'] not in nodes:
node = {}
node['ticker'] = info['Central Ticker']
node['name'] = info['Central Company']
node['type'] = "manufacturer"
set_default_n(node)
country = info['Central Country']
node['country'] = country
if country in region_dict:
node['region'] = region_dict[country]
else:
node['country'] = "CN"
node['region'] = "China"
nodes[info['Central Ticker']] = node
else:
node = nodes[info['Central Ticker']]
if market_cap == '#N/A' or market_cap == '--' or market_cap == 0 or pd.isna(market_cap):
node["size"][i] = 5
node["market_cap"][i] = "--"
else:
node["size"][i] = max(log(market_cap) - 12, 5)
node["market_cap"][i] = market_cap
if i > 1 and (node['ticker'] in scores):
node['borrow'][i] = scores[node['ticker']]['Borrow Need'][i - 2]
node['risk'][i] = scores[node['ticker']]['Risk Score'][i - 2]
node['FX'][i] = scores[node['ticker']]['FX Need'][i - 2]
if i in centrality_auth[node['ticker']]:
node["centrality_score"][i] = centrality_auth[node['ticker']][i]
else:
node["centrality_score"][i] = -1
node['num_supplier'][i] = number_of_sup[node['ticker']][i]
# -
json.dump({'nodes': list(nodes.values()), 'links': list(links.values())}, open("splc_six_quarters.json", 'w'), indent=2)
| data/clean_data_tradewar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ashahdeen/Linear-Discriminant-Analysis-Application/blob/main/LDA_lesson_Plan.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="K_8xeoTr_xIL"
# # **LINEAR DISCRIMINANT ANALYSIS.**
# + [markdown] id="CzyGZXN7_6vT"
#
#
# * Discriminant analysis is a technique that is used for dimensionality reduction, classification, and data visualization.
#
# * It is used to reduce the number of variables in a dataset while retaining all the information or as much information as possible.
#
# * Linear Discriminant Analysis is most commonly used for supervised classification problems
#
#
#
# + [markdown] id="bCvxTjLnDRNi"
# ## Application of LDA in the real world
#
# * Face Recognition
#
# It is a widely used application for computer vision, where every face represented with large pixel values. Here LDA reduces the number the number of features and make it more managable.Every one of the new dimensions is a linear combination of pixel values, which form a template.
#
# * Marketing
#
# It is used in marketing to identify the factors that differentiate between various types of consumers and/or products, based on data collected on in store.
#
# * Medical Field.
#
# LDA is used to classify the state of the patients for disease as mild, moderate, or severe based on fewer parameters and the treatment of the patoent is going as scheduled.
#
# etc
#
# + [markdown] id="J3xivqzqCe3C"
# ## Objectives of Linear Discriminant Analysis.
#
#
# 1. Development of discrimination function, or linear combination of predictor or independent variables, which will best discriminate between categories of criterion or dependent group.
#
# 2. Checks to see whether there are any disparities between the characterized groups.
#
#
# + [markdown] id="hlRtzC8_VDLl"
# #Data Description
# + [markdown] id="Zt_0mTonFG07"
# We will use the Iris dataset to show a step by step implemetation of Linear Discriminant Analysis
# + [markdown] id="9bi9dLlu7dbM"
# The Iris Dataset includes three iris species with 50 samples each as well as some properties about each flower. One flower species is linearly separable from the other two, but the other two are not linearly separable from each other.
#
# This is the Link to the dataset[https://www.kaggle.com/uciml/iris]
#
#
#
#
# + [markdown] id="Mo3pAByvFkmu"
#
# + [markdown] id="R-oYc1b43pm9"
# ###Loading Libraries
# + id="1SAeISiLGDTH"
#load libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# + [markdown] id="_pXIPjyhUoVp"
# #Loading Dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="77NAWZNgQxiK" outputId="ea4a7a94-8156-4ef8-e96d-951c86588dd6"
#load the dataset
Iris=pd.read_csv('/content/Iris.csv')
Iris
# + [markdown] id="okhqyCDTUxnB"
# #Checking the Dataset
# + colab={"base_uri": "https://localhost:8080/"} id="VzOSfGT0S5Ua" outputId="67eeee5f-af25-4364-c0bc-5a05073ccb8d"
#check the dataset
Iris.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="6XYBmK54TDsn" outputId="e1034e9c-72ea-4b5c-bc0e-db2137a4f845"
#previewing the head of the dataset
Iris.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="08cab61b-d50f-4dd3-c642-7bc6fe6e461a" id="Z-eRcnzyTLLW"
#previewing the tail of the dataset
Iris.tail(5)
# + colab={"base_uri": "https://localhost:8080/"} id="Botj-WyaTiC0" outputId="6d24af1a-ca12-4ac2-b150-4c7724eca709"
#checking column names
Iris.columns
#We have 6 columns in our Iris dataset.
# + colab={"base_uri": "https://localhost:8080/"} id="iQmlWcHKTmXT" outputId="d54ccaeb-2993-497e-a71d-1d38d907c0a0"
#checking data types of our dataset
Iris.dtypes
# + colab={"base_uri": "https://localhost:8080/"} id="g5pr0kPYTzHu" outputId="4aa56a78-09c2-49f5-ab3e-135b52235381"
#checking the shape of our dataset
Iris.shape
# + colab={"base_uri": "https://localhost:8080/"} id="wmctG6qWT9v0" outputId="b3d13648-11e6-49a1-d88e-88486e5db1df"
#Checking Species
Iris.Species.value_counts()
#We have 3 species of Iris.
# + [markdown] id="KjlncCTCiZJh"
# #Data Cleaning
# + colab={"base_uri": "https://localhost:8080/"} id="xJRX0DEPiQGu" outputId="21a0e871-498d-4bfe-87c5-72a3502f8001"
#making the columns uniform by removing whitespaces
Iris.columns=Iris.columns.str.strip().str.lower().str.replace(' ','_').str.replace('(','').str.replace(')','')
Iris.columns
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="gAy-ZTUogJry" outputId="56feaa06-ee3f-4ae0-da40-698290ec6f16"
#Drop Unnecessary Columns
#We will drop the Id column.
Iris=Iris.drop('id',axis=1)
Iris
# + colab={"base_uri": "https://localhost:8080/"} id="uThZQ4qbf9U5" outputId="56707cbf-c70c-4d23-e83c-5eb306921512"
#Checking for Missing values
Iris.isnull().any()
#We have no missing values in our dataset
# + colab={"base_uri": "https://localhost:8080/"} id="YxWsDgpShWRj" outputId="6a5abf61-bf26-4342-b0a0-30ec740a3715"
#Checking for duplicates
Iris.duplicated().sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="EYWp2sithkoe" outputId="569de0e4-37b3-456d-d2e0-3ebde13a34b7"
#Drop duplicates from our dataset
Iris.drop_duplicates()
# + id="XZnGhDQ0isQy"
cols=['sepallengthcm','sepalwidthcm','petallengthcm','petalwidthcm']
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="ngBei5G-h81i" outputId="da79c74f-27d8-40d7-dbd5-46974cfebb96"
import seaborn as sns
fig, ax = plt.subplots(len(cols), figsize=(15,40))
for i, col_val in enumerate(cols):
sns.boxplot(Iris[col_val], ax=ax[i])
ax[i].set_title('Box plot - {}'.format(col_val), fontsize=10)
ax[i].set_xlabel(col_val, fontsize=8)
plt.show()
# + [markdown] id="aP1jNzlk9Ozu"
# We have few outliers in sepalwidh column that we are going to remove since LDA is affected by outliers.
# + id="NMvFd-mrkDWW"
#Removing Outliers
Q1 = Iris.quantile(0.25)
Q3 = Iris.quantile(0.75)
IQR = Q3 - Q1
Iris = Iris[~((Iris < (Q1 - 1.5 * IQR)) |(Iris > (Q3 + 1.5 * IQR))).any(axis=1)]
# + [markdown] id="Q1DdEyYaZNNr"
# # Exploratory Data Analysis
# + [markdown] id="9fzAj758aLE7"
# ## Univariate Analysis
#
# * This is the analysis of one variable.
# * The purpose is to desscribe data and find patterns that exist within it
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="wmZ_Hy2pac_R" outputId="2fda9a0f-f89a-4a38-fa6e-c8542631f044"
#descrbing our dataset
Iris.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="jIS808O4al4j" outputId="4179f491-03bd-4392-f3a4-7d40472a5632"
#measure of dispersion.
for i in cols:
print(i)
print("The skewness is:", Iris[i].skew())
print("The kurtosis is:", Iris[i].kurt())
s= abs(Iris[i].skew())
if s > 1:
print('highly skewed distribution')
else:
print('')
k = Iris[i].kurt()
if k >3:
print('the distribution is heavy-tailed(leptokutic)')
elif k < 3:
print('Distribution is light tailed')
print("******"*10)
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="O3DgMmIXbbse" outputId="a7fc8d6e-eff7-44e8-d81b-420a4f961ea1"
#a countplot showing Iris species
sns.countplot(x='species', data=Iris)
#Iris-virginic has the highest number of species followed closely by Iris-versicolor
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="RpyVsF66bXAB" outputId="0b63e27a-75ee-4e4e-8d4e-b653a5d7b5bb"
#we will use a for loop and a histogram inside it to show visual
for i in cols:
Iris.hist(i)
# + [markdown] id="mi5lRI-tcGn5"
# # Bivariate Analysis
#
# * This is analysis where you are comparing two variables to study their relationship.
#
# * The variables could be dependent or independent to each other.
# + colab={"base_uri": "https://localhost:8080/", "height": 350} id="cSbStC-5cef8" outputId="41de1345-1c3c-41cd-b240-6be5c365f2c1"
#a heatmap showing correlation between variables.
sns.heatmap(data=Iris.corr(),annot=True,vmax=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="0WFUrxFVdDz_" outputId="c1595de7-a591-4aef-ec00-9793c38dc11e"
for i in cols:
Iris.plot(x = 'species', y = i, kind='scatter')
# + [markdown] id="deaOZJqGdleW"
# # Multivariate Analysis
#
# * It is similar to Bivariate Analysis but Multivariate Analysis compares more than 2 variables.
#
# * MuLtivariate Techniques.
# 1. Linear Discriminant Analysis.
# 2. Principal Component Analysis.
# 3. Factor Analysis.
# 4. Cluster Analysis.
#
#
# etc
#
# + [markdown] id="I5IxczUllY9f"
# ##LDA implementation
# + [markdown] id="Oj5cUndSd_GA"
#
# + [markdown] id="1hH2WUI9lv-x"
# Once we have Loaded the dataset,checked the dataset to understand the dataset better and cleaned our data .
#
# We will follow the following steps
#
# * Step 1: Divide the dataset divide the dataset into class and targets.
#
# * Step 2: Splitting the dataset into training and test sets.
#
# * Step 3: Feature scaling
#
# * Feature scaling is a technique of bringing down the values of all the independent features of our dataset on the same scale.(A further explanation of Feature scaling https://medium.datadriveninvestor.com/feature-scaling-in-data-science-5b1e82492727
# )
#
#
# * Step 4: Apply Linear Discriminant Analysis from Sklearn Library.
#
#
# * Step 5: Performance of the model.
# + id="hoq_h1ZGlgVY"
#Step 1
#Divide the dataset into class and target variable.
#We will use iloc to select columns from the dataset
#0:4 select from index 0 to 3
#4 selects the last column which is the species column.
X = Iris.iloc[:, 0:4].values
y = Iris.iloc[:, 4].values
# + id="gtMs0TfNqu_0"
#step 2
#Splitting data into train and test sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# + id="LRRVBtmfri6p"
#Step 3
# Feature scaling
# we will use standardization method to normalize our data.
#Standardization is scaling technique in which the mean will be equal to zero and the standard deviation equal to one
#We will use StandardScalar from the Sci-Kit learn library in our dataset.
#One of the assumption of LDA is that the data is normaly distributed.
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# + id="4KjOdk5J1ZUU"
#step 4
#LDA Application
#We will use sklearn.discriminant_analysis library to Perform LDA .
#LDA has a parameter n_components
#n_components is number of components for dimensionality reduction.
#We will set the n_component as 1 to see the performance of our model with a single Linead Discriminant.
#For more information on Linear Discriminant Analysis check Sklearn guide [https://scikit-learn.org/0.16/modules/generated/sklearn.lda.LDA.html]
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components=1)
X_train = lda.fit_transform(X_train, y_train)
X_test = lda.transform(X_test)
# + id="jzOY1o7GwF-E"
#Step 5
#Making prediction
#We will Random Forest Classification to make predictions on our dataset.
#Random Forest is a classification algorithm consisting of many decisions tree.
#It uses averaging to improve the predictive accuracy and control over-fitting.
#
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="ObhQhVZG0s4A" outputId="86e3caf4-ca31-401f-8467-6721e6c52703"
#check performance of our model using confusion matrix and accuracy score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
print('Accuracy : ' + str(accuracy_score(y_test, y_pred)))
conf_m = confusion_matrix(y_test, y_pred)
print(conf_m)
#Our model has an accuracy of 100% and correct prediction of 30 variables.
# + [markdown] id="puYJqrQXJuEW"
# Conclusion
# + [markdown] id="tu-qwhLKJ2om"
#
#
# * Linear Dimensionality Analysis avoids the curse of dimensionality and reduces resources and dimensional costs.
#
# * Linear Dimensionality Analysis has a more accurate conclusion.
#
# * LDA is sensitive to outliers.
#
#
# + id="2FJ-GOI7mQEK"
| LDA_lesson_Plan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.options.display.max_columns = None
# -
df_calendar = pd.read_parquet('data/trusted/training/calendar.parquet')
df_listings = pd.read_parquet('data/trusted/training/listing.parquet')
df_reviews = pd.read_parquet('data/trusted/training/reviews.parquet')
list(df_listings.columns)
# __Característica da variável resposta__
df_listings.price.quantile([.25, .5, .75, 0.8, 0.9, 0.95, 0.99])
df_listings.price.max()
# Observa-se que a variável resposta está muito concentrada em valores de até 5.7k, enquanto 1% dos valores vão de 5.7k até 137k. É importante para o sucesso do modelo conseguir identificar as caracteríticas desse grupo, segue como essa distribuição se parece
plt.figure(figsize=(30,10))
df_listings.price.plot.hist(bins=3000)
# Como cada faixa de preço vem com suas características para o público alvo, vamos separar as faixas de preço entre `comum`, `plus` e `premium`
#
# Entre `comum` e `plus`, podemos cortar no percentile 80%, `< 661.00` e ` >= 661.00`
#
# Já entre `plus` e `premium`, cortaremos no percentile 95%, `< 1928.00` e `>= 1928.00`, uma ponderação entre representatividade e caracterítica.
# É necessário achar um meio de diferenciar o seguimento premium dos demais para ajudar o modelo a pelo menos não tentar generalizar os pontos fora da curva.
listing_comum = df_listings[df_listings.price < 661.00].copy().reset_index(drop=True)
listing_comum.shape
listing_plus = df_listings[(df_listings.price >= 661.00) & (df_listings.price < 1928.00)].copy().reset_index(drop=True)
listing_plus.shape
listing_premium = df_listings[df_listings.price >= 1928.00].copy().reset_index(drop=True)
listing_premium.shape
# ## Hipóteses
#
# ### Subprefeituras
def display_category_boxplot(df, col):
sorted_col = df.groupby(col) \
.agg({'price': 'mean'}) \
.reset_index() \
.sort_values('price', ascending=False) \
[col]
display(df.groupby(col) \
.agg({'price': ['mean', 'std', 'count']}))
plt.xticks(rotation=90)
sns.boxplot(data=df, x=col, y='price', order=sorted_col)
display_category_boxplot(listing_comum, 'subprefeitura')
display_category_boxplot(listing_plus, 'subprefeitura')
display_category_boxplot(listing_premium, 'subprefeitura')
# Das observações podemos concluir que:
#
# 1. No seguimento comum, a zona geral é uma boa preditora;
# 1. A medida que seguimos para valores maiores, a variância de preço aumenta;
# 1. Duas zonas concentram quase todos anúncios premium: `Barra da Tijuca` e `Zona Sul`.
# ### Amenities
def count_amenities(df):
amenities_columns = [col for col in df.columns if 'amenities_' == col[:10]]
df['num_amenities'] = 0
for col in amenities_columns:
df['num_amenities'] += df[col]
return df
listing_comum = count_amenities(listing_comum)
display(listing_comum['num_amenities'].quantile([0.2, 0.4, 0.6, 0.8]))
listing_comum['num_amenities'].plot.hist()
listing_plus = count_amenities(listing_plus)
display(listing_plus['num_amenities'].quantile([0.2, 0.4, 0.6, 0.8]))
listing_plus['num_amenities'].plot.hist()
listing_premium = count_amenities(listing_premium)
display(listing_premium['num_amenities'].quantile([0.2, 0.4, 0.6, 0.8]))
listing_premium['num_amenities'].plot.hist()
df_listings = count_amenities(df_listings)
display(df_listings['num_amenities'].quantile([0.2, 0.4, 0.6, 0.8]))
df_listings['num_amenities'].plot.hist()
# As distribuições de amenities não parecem variar entre os tiers de preço, muito pelo contrário, os anúncios contém uma quantidade maior de até 20 amenidades. Provável que seja um mecanismo de compensação do anunciante querendo chamar atenção nos filtros de busca.
df_count_reviews = df_reviews.groupby('listing_id').agg({'id': 'count'})
| 2 - Explora Dados.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tf35]
# language: python
# name: conda-env-tf35-py
# ---
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import OneHotEncoder
# +
#Tensorflow tutorial
#Tensorflow executes code under a Tensorflow session
sess = tf.Session()
print(sess.run(tf.constant("Hi I am here")))
# +
#Tensorflow is very close to the numpy api. Tensorflow converts everything to tensors which are multidimensional
#arrays. Tensors refer to the subscipt below the variable indicating multiple dimensions. Tensors are a notation for
#matrices.
a = tf.constant(10)
b = tf.constant(20)
result_add = sess.run(tf.add(a,b))
result_multiply = sess.run(tf.multiply(a,b))
result_divide = sess.run(tf.divide(a,b))
print(result_add,result_multiply,result_divide)
# +
#Tensorflow was designed as a graph. NN following cs231n assignment1
# +
def trans_for_one(labels):
return np.array(labels).reshape(len(labels),-1)
def analyze_classifier(sess,i,w1,b1,w2,b2,XOR_X,XOR_Y):
print('Epoch:%i'%i)
print('Hypo:'% sess.run(hypothesis,feed_dict={input_:XOR_X, target:XOR_T}))
print('w1:%s' % sess.run(w1))
print('b1:%s' % sess.run(w1))
print('w2:%s' % sess.run(w2))
print('b2:%s' % sess.run(b2))
print('cost (ce):%s' % sess.run(cross_entropy, feed_dict={input_:XOR_X, target:XOR_T}))
#visualize classification boundary
xs = np.linspace(-5,5)
ys = np.linspace(-5,5)
pred_classes = []
for x in xs:
for y in ys:
pred_class = sess.run(hypothesis,feed_dict={input_:[[x,y]]})
pred_classes.append((x,y,pred_class.argmax()))
xs_p, ys_p = [],[]
xs_n, ys_n = [],[] #negative and positive WRT classification boundary
for x,y,c in pred_classes:
if c==0:
xs_n.append(x)
ys_n.append(y)
else:
xs_p.append(x)
ys_p.append(y)
plt.plot(xs_p,ys_p,'ro',xs_n,xs_p,'bo')
plt.show()
XOR_X = [[0,0],[0,1],[1,0],[1,1]]
XOR_Y = [0,1,1,0]
enc = OneHotEncoder() #where is this from, there should be a tf.onehotencoder
enc.fit(trans_for_one(XOR_Y))
XOR_T = enc.transform(trans_for_one(XOR_Y)).toarray()
nb_classes=2
input_ = tf.placeholder(tf.float32,shape=[None,len(XOR_X[0])],name="input")
target = tf.placeholder(tf.float32, shape=[None,nb_classes],name="output")
nb_hidden_nodes = 2
w1 = tf.Variable(tf.random_uniform([2,nb_hidden_nodes],-1,1,seed=0),name="Weights1")
w2 = tf.Variable(tf.random_uniform([nb_hidden_nodes,nb_classes],-1,1,seed=0),name="Weights2")
b1 = tf.Variable(tf.zeros([nb_hidden_nodes]),name="Biases1")
b2 = tf.Variable(tf.zeros([nb_classes]),name="Biases2")
activation2 = tf.sigmoid(tf.matmul(input_,w1)+b1)
hypothesis = tf.nn.softmax(tf.matmul(activation2,w2)+b2)
cross_entropy = -tf.reduce_sum(target*tf.log(hypothesis))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(20000):
sess.run(train_step,feed_dict={input_:XOR_X,target:XOR_T})
if(i%10000==0):
analyze_classifier(sess,i,w1,b1,w2,b2,XOR_X,XOR_T)
# +
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import OneHotEncoder
def trans_for_ohe(labels):
"""Transform a flat list of labels to what one hot encoder needs."""
return np.array(labels).reshape(len(labels), -1)
def analyze_classifier(sess, i, w1, b1, w2, b2, XOR_X, XOR_T):
"""Visualize the classification."""
print('\nEpoch %i' % i)
print('Hypothesis %s' % sess.run(hypothesis,
feed_dict={input_: XOR_X,
target: XOR_T}))
print('w1=%s' % sess.run(w1))
print('b1=%s' % sess.run(b1))
print('w2=%s' % sess.run(w2))
print('b2=%s' % sess.run(b2))
print('cost (ce)=%s' % sess.run(cross_entropy,
feed_dict={input_: XOR_X,
target: XOR_T}))
# Visualize classification boundary
xs = np.linspace(-5, 5)
ys = np.linspace(-5, 5)
pred_classes = []
for x in xs:
for y in ys:
pred_class = sess.run(hypothesis,
feed_dict={input_: [[x, y]]})
pred_classes.append((x, y, pred_class.argmax()))
xs_p, ys_p = [], []
xs_n, ys_n = [], []
for x, y, c in pred_classes:
if c == 0:
xs_n.append(x)
ys_n.append(y)
else:
xs_p.append(x)
ys_p.append(y)
plt.plot(xs_p, ys_p, 'ro', xs_n, ys_n, 'bo')
plt.show()
# The training data
XOR_X = [[0, 0], [0, 1], [1, 0], [1, 1]] # Features
XOR_Y = [0, 1, 1, 0] # Class labels
assert len(XOR_X) == len(XOR_Y) # sanity check
# Transform labels to targets
enc = OneHotEncoder()
enc.fit(trans_for_ohe(XOR_Y))
XOR_T = enc.transform(trans_for_ohe(XOR_Y)).toarray()
# The network
nb_classes = 2
input_ = tf.placeholder(tf.float32,
shape=[None, len(XOR_X[0])],
name="input")
target = tf.placeholder(tf.float32,
shape=[None, nb_classes],
name="output")
nb_hidden_nodes = 2
# enc = tf.one_hot([0, 1], 2)
w1 = tf.Variable(tf.random_uniform([2, nb_hidden_nodes], -1, 1, seed=0),
name="Weights1")
w2 = tf.Variable(tf.random_uniform([nb_hidden_nodes, nb_classes], -1, 1,
seed=0),
name="Weights2")
b1 = tf.Variable(tf.zeros([nb_hidden_nodes]), name="Biases1")
b2 = tf.Variable(tf.zeros([nb_classes]), name="Biases2")
activation2 = tf.sigmoid(tf.matmul(input_, w1) + b1)
hypothesis = tf.nn.softmax(tf.matmul(activation2, w2) + b2)
cross_entropy = -tf.reduce_sum(target * tf.log(hypothesis))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)
# Start training
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(20001):
sess.run(train_step, feed_dict={input_: XOR_X, target: XOR_T})
if i % 10000 == 0:
analyze_classifier(sess, i, w1, b1, w2, b2, XOR_X, XOR_T)
# +
x = tf.placeholder(tf.float32, shape=[4,2], name='x')
y = tf.placeholder(tf.float32, shape=[4,1], name='y')
#setup weights and bias z = w*x+b
w0 = tf.Variable(tf.random_uniform([2,2], -1, 1), name="w0")
w1 = tf.Variable(tf.random_uniform([2,2], -1, 1), name="w1")
bias_0 = tf.Variable(tf.zeros([2]), name="bias_0")
bias_1 = tf.Variable(tf.zeros([2]), name="bias_1")
z = tf.sigmoid(tf.matmul(x,w0)+bias_0)
hypothesis = tf.sigmoid(tf.matmul(z,w1)+bias_1)
cost = tf.reduce_mean((y*tf.log(hypothesis))+(1-y)*tf.log(1. - hypothesis)*-1.)
train = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
XOR_X = [[0,0],[0,1],[1,0],[1,1]]
XOR_Y = [[0],[1],[1],[0]]
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# +
for i in range(100000):
sess.run(train, feed_dict={x: XOR_X, y: XOR_Y})
if i % 1000 == 0:
print('Epoch ', i)
print('Hypothesis ', sess.run(hypothesis, feed_dict={x: XOR_X, y: XOR_Y}))
print('Theta1 ', sess.run(w0))
print('Bias1 ', sess.run(bias_0))
print('Theta2 ', sess.run(w1))
print('Bias2 ', sess.run(bias_1))
print('cost ', sess.run(cost, feed_dict={x: XOR_X, y: XOR_Y}))
# -
| hinton/.ipynb_checkpoints/tf_xor-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 2.2 Write a program that uses input to prompt a user for their name and then welcomes them. Note that input will pop up a dialog box. Enter Sarah in the pop-up box when you are prompted so your output will match the desired output.
# +
# The code below almost works
name = input("Enter your name")
print("Hello", name)
# -
# 2.3 Write a program to prompt the user for hours and rate per hour using input to compute gross pay. Use 35 hours and a rate of 2.75 per hour to test the program (the pay should be 96.25). You should use input to read a string and float() to convert the string to a number. Do not worry about error checking or bad user data.
# +
# This first line is provided for you
hrs = input("Enter Hours:")
rate = input("Enter Rate:")
pay = float(rate) * float(hrs)
print("Pay:", pay)
# -
# 3.1 Write a program to prompt the user for hours and rate per hour using input to compute gross pay. Pay the hourly rate for the hours up to 40 and 1.5 times the hourly rate for all hours worked above 40 hours. Use 45 hours and a rate of 10.50 per hour to test the program (the pay should be 498.75). You should use input to read a string and float() to convert the string to a number. Do not worry about error checking the user input - assume the user types numbers properly.
# +
hrs = input("Enter Hours:")
h = float(hrs)
rate = input("Enter Rate per Hour:")
r = float(rate)
if h <= 40:
gross_pay = h * r
else:
diff = h - 40
gross_pay = 40 * r + diff * r * 1.5
print(gross_pay)
# -
# 3.3 Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error. If the score is between 0.0 and 1.0, print a grade using the following table:
# Score Grade
# >= 0.9 A
# >= 0.8 B
# >= 0.7 C
# >= 0.6 D
# < 0.6 F
# If the user enters a value out of range, print a suitable error message and exit. For the test, enter a score of 0.85.
# +
score = input("Enter Score: ")
fs = float(score)
if fs > 1:
print("Error! Please enter a number between 0 and 1.")
quit()
elif fs < 0:
print("Error! Please enter a number between 0 and 1.")
quit()
else:
if fs >= 0.9:
grade = "A"
elif fs >= 0.8:
grade = "B"
elif fs >= 0.7:
grade = "C"
elif fs >= 0.6:
grade = "D"
else:
grade = "F"
print(grade)
# -
# 4.6 Write a program to prompt the user for hours and rate per hour using input to compute gross pay. Pay should be the normal rate for hours up to 40 and time-and-a-half for the hourly rate for all hours worked above 40 hours. Put the logic to do the computation of pay in a function called computepay() and use the function to do the computation. The function should return a value. Use 45 hours and a rate of 10.50 per hour to test the program (the pay should be 498.75). You should use input to read a string and float() to convert the string to a number. Do not worry about error checking the user input unless you want to - you can assume the user types numbers properly. Do not name your variable sum or use the sum() function.
# +
def computepay(h,r):
if h > 40:
dif = h - 40
return r*40 + dif * 1.5 * r
else:
return h * r
hrs = input("Enter Hours:")
rate = input("Enter Rate per Hour:")
h = float(hrs)
r = float(rate)
p = computepay(h,r)
print(p)
# -
# 5.2 Write a program that repeatedly prompts a user for integer numbers until the user enters 'done'. Once 'done' is entered, print out the largest and smallest of the numbers. If the user enters anything other than a valid number catch it with a try/except and put out an appropriate message and ignore the number. Enter 7, 2, bob, 10, and 4 and match the output below.
# +
largest = None
smallest = None
while True:
num = input("Enter a number: ")
if num == "done" :
break
try:
inum = int(num)
except:
print('Invalid input')
continue
if largest is None:
largest = inum
elif smallest is None:
smallest = inum
elif inum > largest:
largest = inum
elif inum < smallest:
smallest = inum
print("Maximum is", largest)
print("Minimum is", smallest)
| 01_Programming for Everybody__Getting Started with Python/.ipynb_checkpoints/Assignments-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.9 64-bit (''jupyter_py3.6'': venv)'
# language: python
# name: python36964bitjupyterpy36venv7e7336c5157b4b88ae24da5b2197478f
# ---
# I will continue explaining the process I went through for analyzing airbnb data. This time I'm using TextBlob for sentiment analysis and Azure Cognitive Services for image recognition.
#
# ## Image Recognition
#
# First, I started configuring an azure cognitive services API. From it I got a subscription key and an endpoint to connect. More can be found in [azure-doc](https://docs.microsoft.com/en-us/azure/cognitive-services/computer-vision/quickstarts/python-analyze).
#
# 
# +
import requests
import pandas as pd
import json
#subscription key and endpoint
subscription_key = ''
endpoint = 'https://sdm-airbnb-images.cognitiveservices.azure.com/'
analyze_url = endpoint + "vision/v2.1/analyze"
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params = {'visualFeatures': 'Categories,Description,Color'}
# +
# This part of the code is just for testing and understanding
# of the response from the Azure API.
data = {'url': 'https://a0.muscache.com/im/pictures/67002727-38d2-4be7-aa2d-3beabc29df91.jpg?aki_policy=large'}
response = requests.post(analyze_url, headers=headers, params=params, json=data)
response.raise_for_status()
analysis = response.json()
image_description = analysis["description"]["captions"][0]["text"]
image_dominantfgColor = analysis["color"]["dominantColorForeground"]
image_dominantbgColor = analysis["color"]["dominantColorBackground"]
print(image_description)
print(image_dominantfgColor)
print(image_dominantbgColor)
# -
# ### Azure ComputerVisio API Response and storage
# Now that I have a better understanding of what I can find in each response from the API I can collect and append this data to my **airbnb_listing.csv** dataset.
#
# Azure Computer Vision can extract information from any image and retrieves, identifying colors, objects, words, faces and more!
#
# See [here](https://azure.microsoft.com/en-us/services/cognitive-services/computer-vision/#features) a few examples of how powerful this tool can be. In this case I'm using it for something very simple.
# +
image_desc = []
image_fgColor = []
image_bgColor = []
# Looping in airbnb_listing.csv
for image_url in airbnb_df['picture_url']:
# Set image_url to the URL of an image that you want to analyze.
data = {'url': image_url}
response = requests.post(analyze_url, headers=headers, params=params, json=data)
response.raise_for_status()
analysis = response.json()
#print(json.dumps(response.json()))
try:
image_desc.append(analysis["description"]["captions"][0]["text"])
image_fgColor.append(analysis["color"]["dominantColorForeground"])
image_bgColor.append(analysis["color"]["dominantColorBackground"])
except:
print('Error image_url: ' +image_url)
continue
# -
airbnb_df['image_description'] = image_desc
airbnb_df['image_dominantfgColor'] = image_fgColor
airbnb_df['image_dominantbgColor'] = image_bgColor
airbnb_df.head(5)
# len(image_desc)
airbnb_df.to_csv ('airbnb_listing_imagereco.csv', index = None, header=True)
# At this point the image recognition process is now complete. It is time to proceed with the sentiment analysis part.
#
# With this we are all set to start the analysis!
| _ipynb/airbnb_imageRecognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: AI
# language: python
# name: ai
# ---
# +
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim import Adam
from torchvision import datasets, transforms
USE_CUDA = False
# -
class Mnist:
def __init__(self, batch_size):
dataset_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = datasets.MNIST('../data', train=True, download=True, transform=dataset_transform)
test_dataset = datasets.MNIST('../data', train=False, download=True, transform=dataset_transform)
self.train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
self.test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
class ConvLayer(nn.Module):
def __init__(self, in_channels=1, out_channels=256, kernel_size=9):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1
)
def forward(self, x):
return F.relu(self.conv(x))
class PrimaryCaps(nn.Module):
def __init__(self, num_capsules=8, in_channels=256, out_channels=32, kernel_size=9):
super(PrimaryCaps, self).__init__()
self.capsules = nn.ModuleList([
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=2, padding=0)
for _ in range(num_capsules)])
def forward(self, x):
u = [capsule(x) for capsule in self.capsules]
u = torch.stack(u, dim=1)
u = u.view(x.size(0), 32 * 6 * 6, -1)
return self.squash(u)
def squash(self, input_tensor):
squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)
output_tensor = squared_norm * input_tensor / ((1. + squared_norm) * torch.sqrt(squared_norm))
return output_tensor
class DigitCaps(nn.Module):
def __init__(self, num_capsules=10, num_routes=32 * 6 * 6, in_channels=8, out_channels=16):
super(DigitCaps, self).__init__()
self.in_channels = in_channels
self.num_routes = num_routes
self.num_capsules = num_capsules
self.W = nn.Parameter(torch.randn(1, num_routes, num_capsules, out_channels, in_channels))
def forward(self, x):
batch_size = x.size(0)
x = torch.stack([x] * self.num_capsules, dim=2).unsqueeze(4)
W = torch.cat([self.W] * batch_size, dim=0)
u_hat = torch.matmul(W, x)
b_ij = Variable(torch.zeros(1, self.num_routes, self.num_capsules, 1))
if USE_CUDA:
b_ij = b_ij.cuda()
num_iterations = 3
for iteration in range(num_iterations):
c_ij = F.softmax(b_ij)
c_ij = torch.cat([c_ij] * batch_size, dim=0).unsqueeze(4)
s_j = (c_ij * u_hat).sum(dim=1, keepdim=True)
v_j = self.squash(s_j)
if iteration < num_iterations - 1:
a_ij = torch.matmul(u_hat.transpose(3, 4), torch.cat([v_j] * self.num_routes, dim=1))
b_ij = b_ij + a_ij.squeeze(4).mean(dim=0, keepdim=True)
return v_j.squeeze(1)
def squash(self, input_tensor):
squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)
output_tensor = squared_norm * input_tensor / ((1. + squared_norm) * torch.sqrt(squared_norm))
return output_tensor
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.reconstraction_layers = nn.Sequential(
nn.Linear(16 * 10, 512),
nn.ReLU(inplace=True),
nn.Linear(512, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 784),
nn.Sigmoid()
)
def forward(self, x, data):
classes = torch.sqrt((x ** 2).sum(2))
classes = F.softmax(classes)
_, max_length_indices = classes.max(dim=1)
masked = Variable(torch.eye(10))
if USE_CUDA:
masked = masked.cuda()
masked = masked.index_select(dim=0, index=max_length_indices.squeeze(1).data)
reconstructions = self.reconstraction_layers((x * masked[:, :, None, None]).view(x.size(0), -1))
reconstructions = reconstructions.view(-1, 1, 28, 28)
return reconstructions, masked
class CapsNet(nn.Module):
def __init__(self):
super(CapsNet, self).__init__()
self.conv_layer = ConvLayer()
self.primary_capsules = PrimaryCaps()
self.digit_capsules = DigitCaps()
self.decoder = Decoder()
self.mse_loss = nn.MSELoss()
def forward(self, data):
output = self.digit_capsules(self.primary_capsules(self.conv_layer(data)))
reconstructions, masked = self.decoder(output, data)
return output, reconstructions, masked
def loss(self, data, x, target, reconstructions):
return self.margin_loss(x, target) + self.reconstruction_loss(data, reconstructions)
def margin_loss(self, x, labels, size_average=True):
batch_size = x.size(0)
v_c = torch.sqrt((x**2).sum(dim=2, keepdim=True))
left = F.relu(0.9 - v_c).view(batch_size, -1)
right = F.relu(v_c - 0.1).view(batch_size, -1)
loss = labels * left + 0.5 * (1.0 - labels) * right
loss = loss.sum(dim=1).mean()
return loss
def reconstruction_loss(self, data, reconstructions):
loss = self.mse_loss(reconstructions.view(reconstructions.size(0), -1), data.view(reconstructions.size(0), -1))
return loss * 0.0005
capsule_net = CapsNet()
if USE_CUDA:
capsule_net = capsule_net.cuda()
optimizer = Adam(capsule_net.parameters())
# +
batch_size = 100
mnist = Mnist(batch_size)
n_epochs = 1
for epoch in range(n_epochs):
capsule_net.train()
train_loss = 0
for batch_id, (data, target) in enumerate(mnist.train_loader):
target = torch.eye(10).index_select(dim=0, index=target)
data, target = Variable(data), Variable(target)
if USE_CUDA:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output, reconstructions, masked = capsule_net(data)
loss = capsule_net.loss(data, output, target, reconstructions)
loss.backward()
optimizer.step()
train_loss += loss.data[0]
if batch_id % 100 == 0:
print("train accuracy:", sum(np.argmax(masked.data.cpu().numpy(), 1) ==
np.argmax(target.data.cpu().numpy(), 1)) / float(batch_size))
print('target shape: ', target.size())
print(train_loss / len(mnist.train_loader))
capsule_net.eval()
test_loss = 0
for batch_id, (data, target) in enumerate(mnist.test_loader):
target = torch.eye(10).index_select(dim=0, index=target)
data, target = Variable(data), Variable(target)
if USE_CUDA:
data, target = data.cuda(), target.cuda()
output, reconstructions, masked = capsule_net(data)
loss = capsule_net.loss(data, output, target, reconstructions)
test_loss += loss.data[0]
if batch_id % 100 == 0:
print("test accuracy:", sum(np.argmax(masked.data.cpu().numpy(), 1) ==
np.argmax(target.data.cpu().numpy(), 1)) / float(batch_size))
print(test_loss / len(mnist.test_loader))
# +
import matplotlib
import matplotlib.pyplot as plt
def plot_images_separately(images):
"Plot the six MNIST images separately."
fig = plt.figure()
for j in xrange(1, 7):
ax = fig.add_subplot(1, 6, j)
ax.matshow(images[j-1], cmap = matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
# -
plot_images_separately(data[:6,0].data.cpu().numpy())
plot_images_separately(reconstructions[:6,0].data.cpu().numpy())
| .ipynb_checkpoints/Capsule Network-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Course 2 week 1 lecture notebook Exercise 04
# # Concordance index
# In this week's graded assignment, you will implement the concordance index (c-index). To get some practice with what you've seen in lecture, and to prepare for this week's assignment, you will write code to find permissible pairs, concordant pairs, and risk ties.
#
# First start by importing packages and generating a small dataset. The data is small enough that you can visually check the pairs of patients.
# import packages
import pandas as pd
# ### Define the outcome `y`
#
# - You will let `y` refer to the actual health outcome of the patient.
# - 1 indicates disease, 0 indicates health (normal)
# define 'y', the outcome of the patient
y = pd.Series([0,0,1,1,0])
y.name="health"
y
# ### Define the risk scores
# Define some risk scores that some model might produce for each patient. Normally, you would run the patient features through a risk model to create these risk scores. For practice, you will use the following values in the next cell.
# Define the risk scores for each patient
risk_score = pd.Series([2.2, 3.3, 4.4, 4.4])
risk_score.name='risk score'
risk_score
# ### Identify a permissible pair
# A pair of patients is permissible if their outcomes are different. Use code to compare the labels.
# Check patients 0 and 1 make a permissible pair.
if y[0] != y[1]:
print(f"y[0]={y[0]} and y[1]={y[1]} is a permissible pair")
else:
print(f"y[0]={y[0]} and y[1]={y[1]} is not a permissible pair")
# Check if patients 0 and 2 make a permissible pair
if y[0] != y[2]:
print(f"y[0]={y[0]} and y[2]={y[2]} is a permissible pair")
else:
print(f"y[0]={y[0]} and y[2]={y[2]} is NOT permissible pair")
# ### Check for risk ties
# - For permissible pairs, check if they have the same risk score
# Check if patients 2 and 3 make a risk tie
if risk_score[2] == risk_score[3]:
print(f"patient 2 ({risk_score[2]}) and patient 3 ({risk_score[3]}) have a risk tie")
else:
print(f"patient 2 ({risk_score[2]}) and patient 3 ({risk_score[3]}) DO NOT have a risk tie")
# ### Concordant pairs
# - Check if a permissible pair is also a concordant pair
# - You'll check one case, where the first patient is healthy and the second has the disease.
# Check if patient 1 and 2 make a concordant pair
if y[1] == 0 and y[2] == 1:
if risk_score[1] < risk_score[2]:
print(f"patient 1 and 2 is a concordant pair")
# - Note that you checked the situation where patient 1 is healthy and patient 2 has the disease.
# - You should also check the other situation where patient 1 has the disease and patient 2 is healthy.
#
# You'll practice implementing the complete algorithm for c-index in this week's assignment!
# ### This is the end of this practice section.
#
# Please continue on with the lecture videos!
#
# ---
| AI_for_Medical_Prognosis/Week1/Ungraded_Exercises/C2_W1_lecture_ex_04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Usetex Demo
#
#
# Shows how to use latex in a plot.
#
# Also refer to the :doc:`/tutorials/text/usetex` guide.
#
# +
import numpy as np
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
# interface tracking profiles
N = 500
delta = 0.6
X = np.linspace(-1, 1, N)
plt.plot(X, (1 - np.tanh(4 * X / delta)) / 2, # phase field tanh profiles
X, (1.4 + np.tanh(4 * X / delta)) / 4, "C2", # composition profile
X, X < 0, 'k--') # sharp interface
# legend
plt.legend(('phase field', 'level set', 'sharp interface'),
shadow=True, loc=(0.01, 0.48), handlelength=1.5, fontsize=16)
# the arrow
plt.annotate("", xy=(-delta / 2., 0.1), xytext=(delta / 2., 0.1),
arrowprops=dict(arrowstyle="<->", connectionstyle="arc3"))
plt.text(0, 0.1, r'$\delta$',
{'color': 'black', 'fontsize': 24, 'ha': 'center', 'va': 'center',
'bbox': dict(boxstyle="round", fc="white", ec="black", pad=0.2)})
# Use tex in labels
plt.xticks((-1, 0, 1), ('$-1$', r'$\pm 0$', '$+1$'), color='k', size=20)
# Left Y-axis labels, combine math mode and text mode
plt.ylabel(r'\bf{phase field} $\phi$', {'color': 'C0', 'fontsize': 20})
plt.yticks((0, 0.5, 1), (r'\bf{0}', r'\bf{.5}', r'\bf{1}'), color='k', size=20)
# Right Y-axis labels
plt.text(1.02, 0.5, r"\bf{level set} $\phi$", {'color': 'C2', 'fontsize': 20},
horizontalalignment='left',
verticalalignment='center',
rotation=90,
clip_on=False,
transform=plt.gca().transAxes)
# Use multiline environment inside a `text`.
# level set equations
eq1 = r"\begin{eqnarray*}" + \
r"|\nabla\phi| &=& 1,\\" + \
r"\frac{\partial \phi}{\partial t} + U|\nabla \phi| &=& 0 " + \
r"\end{eqnarray*}"
plt.text(1, 0.9, eq1, {'color': 'C2', 'fontsize': 18}, va="top", ha="right")
# phase field equations
eq2 = r'\begin{eqnarray*}' + \
r'\mathcal{F} &=& \int f\left( \phi, c \right) dV, \\ ' + \
r'\frac{ \partial \phi } { \partial t } &=& -M_{ \phi } ' + \
r'\frac{ \delta \mathcal{F} } { \delta \phi }' + \
r'\end{eqnarray*}'
plt.text(0.18, 0.18, eq2, {'color': 'C0', 'fontsize': 16})
plt.text(-1, .30, r'gamma: $\gamma$', {'color': 'r', 'fontsize': 20})
plt.text(-1, .18, r'Omega: $\Omega$', {'color': 'b', 'fontsize': 20})
plt.show()
| usetext_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="aF3bZfc3tyv-"
# Implement neural network from scratch using python for the following datasets and predict the values for the following datasets:
# 1. Boston House prices dataset: https://www.kaggle.com/vikrishnan/boston-house-prices
#
# -
# ### Importing Libraries
import pandas as pd
import numpy as np
import io
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# %matplotlib inline
# ### Reading the Dataset
column_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
df= pd.read_csv('housing.csv', header=None, delimiter=r"\s+", names=column_names) #load dataset
m=df.shape[0] # m--> number of rows
n=df.shape[1] # n--> number of columns
df
# +
sc = StandardScaler()
df.iloc[:,:]=sc.fit_transform(df.iloc[:,:])
x_train, x_test, y_train, y_test = [i.to_numpy() for i in train_test_split(df.iloc[:,:n-1],df.iloc[:,-1],test_size=0.2)]
# 80-20 ratio split of the data and convertion to numpy array
print("Size of train_x : {}".format(x_train.shape))
print("Size of train_y : {}".format(y_train.shape))
print("Size of test_x : {}".format(x_test.shape))
print("Size of test_y : {}".format(y_test.shape))
# -
# ### Define Relu Function
# +
def relu(z):
return np.maximum(0,z)
print("Relu(5) : {}".format(relu([5,-2,3,-1,4,0,-3])))
# -
# ### Plotting Relu
plt.plot(np.linspace(-50,100,5000),relu(np.linspace(-50,100,5000)),c = 'orange')
# ### Intializing Parameters of the Network
# +
def initialize_params(layer_sizes):
params = {} #defining paramets as dictionary which contains weights and biases as W and B in the prefix
for i in range(1, len(layer_sizes)):
params['W' + str(i)] = np.random.randn(layer_sizes[i], layer_sizes[i-1])*0.01
params['B' + str(i)] = np.random.randn(layer_sizes[i],1)*0.01
return params
params_temp = initialize_params([13,0,20,10,1])
params_temp
# -
# ### Forward Propagation
# +
def forward_propagation(X_train, params):
layers = len(layer_sizes)-1 # Defining the number of layers
values = {} #defining as dictionary to store the values of Z and Activations required for the backpropagations
for i in range(1, layers+1):
if i==1: #for first layer, the imput is multiplied with weights and aDDED with bias
values['Z' + str(i)] = np.dot(params['W' + str(i)], X_train) + params['B' + str(i)]
values['A' + str(i)] = relu(values['Z' + str(i)]) #computing activations
else:
values['Z' + str(i)] = np.dot(params['W' + str(i)], values['A' + str(i-1)]) + params['B' + str(i)]
if i==layers: #for last layer, activations are computed without applying relu
values['A' + str(i)] = values['Z' + str(i)]
else:
values['A' + str(i)] = relu(values['Z' + str(i)])
return values
# val = forward_propagation(x_train.T,params_temp)
# -
# ### Compute Cost
def compute_cost(values, Y_train): #mean squared error computation
layers = len(layer_sizes)-1
Y_pred = values['A' + str(layers)]
cost = 1/(2*len(Y_train)) * (np.sum(np.square(Y_pred - Y_train)))
return cost
# ### Back Prop
def backward_propagation(params, values, X_train, Y_train):
layers = len(layer_sizes)-1
m = len(Y_train)
grads = {}
for i in range(layers,0,-1):
if i==layers: #for last layer dz = da as no relu non-linearity has been applied
dA = 1/m * (values['A' + str(i)] - Y_train)
dZ = dA
else:
dA = np.dot(params['W' + str(i+1)].T, dZ) #for internal layers multiply the dz of next layer with the weights
dZ = np.multiply(dA, np.where(values['A' + str(i)]>=0, 1, 0)) # calculate dz using relu concept of backprop on da
if i==1:
grads['W' + str(i)] = 1/m * np.dot(dZ, X_train.T)
grads['B' + str(i)] = 1/m * np.sum(dZ, axis=1, keepdims=True)
else:
grads['W' + str(i)] = 1/m * np.dot(dZ,values['A' + str(i-1)].T)
grads['B' + str(i)] = 1/m * np.sum(dZ, axis=1, keepdims=True)
return grads
# ### Update Parameters with Gradients
def update_params(params, grads, learning_rate):
layers = len(layer_sizes)-1
params_updated = {}
for i in range(1,layers+1):
params_updated['W' + str(i)] = params['W' + str(i)] - learning_rate * grads['W' + str(i)]
params_updated['B' + str(i)] = params['B' + str(i)] - learning_rate * grads['B' + str(i)]
return params_updated
# ### Compute Test Loss
def compute_test_loss(x_test,y_test, params):
values_train = forward_propagation(x_test.T, params)
test_loss = np.sqrt(mean_squared_error(y_test, values_train['A' + str(len(layer_sizes)-1)].T))
return test_loss
# ### Predicition
def predict(X, params):
values = forward_propagation(X.T, params)
predictions = values['A' + str(len(values)//2)].T
return predictions
# ### Conjuring up a Neural Net Model
# + colab={"base_uri": "https://localhost:8080/"} id="wSqY-bZG9Z8A" outputId="efdbbaaa-2549-4662-c7dc-e37b071dac29"
def model(X_train, Y_train, layer_sizes, num_iters, learning_rate):
train_loss = []
test_loss = []
params = initialize_params(layer_sizes)
for i in range(num_iters):
values = forward_propagation(X_train.T, params)
cost = compute_cost(values, Y_train.T)
test_cost = compute_test_loss(x_test,y_test,params)
train_loss.append(cost)
test_loss.append(test_cost)
grads = backward_propagation(params, values,X_train.T, Y_train.T)
params = update_params(params, grads, learning_rate)
if(i%100 == 0):
print("Current Learning Rate is : {}".format(learning_rate))
print('Cost at iteration ' + str(i+1) + ' = ' + str(cost) + '\n')
if(i!=0):
learning_rate = learning_rate*0.95 #learning rate scheduler
fig, ax = plt.subplots(1,2, figsize = (15,8))
ax[0].plot(range(num_iters),train_loss)
ax[0].set_title("training loss trend")
ax[1].plot(range(num_iters),test_loss, color = 'red')
ax[1].set_title("test loss trend")
return params
# +
layer_sizes = [13, 32, 64, 32, 8, 1] #set layer sizes ; size of the first and last layer must\
#be according to the features and expected output dimensions
num_iters = 5000 #set number of training iterations over
learning_rate = 0.1 #set learning rate for gradient descent
params = model(x_train, y_train, layer_sizes, num_iters, learning_rate) #train the model on the traingin data
test_rmse= compute_test_loss(x_test, y_test, params) #get training and test accuracy
print('Root Mean Squared Error on Testing Data = ' + str(test_rmse))
# -
# ### Thank You
| Assignment_8/197178_Assignment_8.1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('myenv')
# language: python
# name: python3
# ---
# ## Solution using Grover search
#
# ### The problem
#
# Design a quantum circuit that finds the subsets of $[5,7,8,9,1]$ for which the elements add to $16$.
#
# ### Basic idea
#
# **Note:** I spend some time here going over my thought process as I approached this problem, and the first thing I thought of was not good. **To jump straight to the solution that worked, go [here](#another_cell)**.
#
# I can use one-hot encoding for the subsets, for example, the subset with the first and third elements can be represented as the vector $\ket{10100}$. I can generate a uniform superposition over all such vectors by a Hadamard transform on the state $\ket{00000}$. If I have an oracle that can compute the sum of a subset with such an encoding of the subset as input, an oracle call can write the sum of elements of the subset to a quantum register. I can then query this oracle with the uniform superposition over all such encodings and measure the 'answer' register. If the answer register reads $16$, then the qubits encoding the subset will have a possible subset that adds to $16$.
# We start by importing the required libraries and tools.
# +
import matplotlib.pyplot as plt
import numpy as np
import math
# importing Qiskit
import qiskit
from qiskit import QuantumCircuit, transpile, assemble, Aer
# import basic plot tools
from qiskit.visualization import plot_histogram
from qiskit.tools.visualization import circuit_drawer
# -
# We set two global variables. $N$ is the size of the input list and $B$ is the number of qubits required to encode each element of the list. In this case, even though the largest number in the list is $9$ which can be encoded using $4$ qubits, since I eventually want to compute the subset sums, I choose to represent the elements using $B = 5$ qubits. The extra qubit will contain any carry-overs.
LIST = [5,7,8,9,1]
N = len(LIST)
B = 5
# ### First Pass at the Oracle
#
# The oracle would contain $N$ query qubits and $N \times B + B$ ancilla qubits. The first $B \times N$ ancilla qubits would be memory qubits, each set of $B$ qubits initialized, using $X$ gates, to represent an encoding of the numbers in the list. The sum of the elements of the queried subset would be stored in the last $B$ ancilla qubits. The sum is computed using controlled Draper QFT Adders, where the uncontrolled version was from the Qiskit Circuit Library. Specifically, the adders are always between one of the $N$ sets of memory qubits and the sum qubits. There are $N$ adders, each controlled using one wire of the query qubits. If a control qubit is one, the circuit adds the corresponding number to the sum qubits. For now, I have kept $N+B$ classical bits to store the result of measuring the query qubits and the sum qubits.
from qiskit.circuit.library import DraperQFTAdder
controlled_adder = DraperQFTAdder(B).control(1)
# +
qc = QuantumCircuit(N + N*B + B, N+B)
for i in range(N):
qc.h([i])
qc.x([5])
qc.x([7])
qc.x([10])
qc.x([11])
qc.x([12])
qc.x([13])
qc.x([18])
qc.x([20])
qc.x([23])
qc.append(controlled_adder, [0,5,6,7,8,9,30,31,32,33,34])
qc.append(controlled_adder, [1,10,11,12,13,14,30,31,32,33,34])
qc.append(controlled_adder, [2,15,16,17,18,19,30,31,32,33,34])
qc.append(controlled_adder, [3,20,21,22,23,24,30,31,32,33,34])
qc.append(controlled_adder, [4,25,26,27,28,29,30,31,32,33,34])
qc.barrier()
qc.measure([0,1,2,3,4,30,31,32,33,34],range(N+B))
circuit_drawer(qc, output='mpl')
# -
# ... which seems to work fine, until I get to the next step: compiling and running the circuit.
# +
# Use Aer's qasm_simulator
backend_sim = Aer.get_backend('qasm_simulator')
# Execute the circuit on the qasm simulator.
job_sim = backend_sim.run(transpile(qc, backend_sim), shots=1)
# Grab the results from the job.
result_sim = job_sim.result()
# -
# 😫
#
# Of course, that's hardly surprising. I just attempted to create a circuit with $35$ qubits. That's $34359738368$ possible states. Even if the coefficient of each state could be represented by just a single bit, that's over 4GB of memory! Clearly this is not the right way to create an oracle.
# ### Oracle, version 2
#
# <a id='another_cell'></a>
#
# This time, the oracle will contain just $N$ query qubits and $B + B$ ancilla qubits. Using the query qubits as control, I can write the elements of the list to the first $B$ ancilla qubits. Then I can use a Draper adder between the first and second sets of $B$ ancilla qubits - this will add the number in the first $B$ ancilla qubits to the second $B$ ancilla qubits. I then restore the first $B$ qubits to the $\ket{00000}$ state by applying the same controlled gates again, and proceed. Eventually, I get the subset sum in the last $B$ ancilla qubits. The oracle upto this point is as follows.
oracle = QuantumCircuit(N + B + B)
oracle.cx(0,7)
oracle.cx(0,5)
oracle.append(DraperQFTAdder(B), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(0,7)
oracle.cx(0,5)
oracle.barrier()
oracle.cx(1,5)
oracle.cx(1,6)
oracle.cx(1,7)
oracle.append(DraperQFTAdder(B), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(1,5)
oracle.cx(1,6)
oracle.cx(1,7)
oracle.barrier()
oracle.cx(2,8)
oracle.append(DraperQFTAdder(B), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(2,8)
oracle.barrier()
oracle.cx(3,8)
oracle.cx(3,5)
oracle.append(DraperQFTAdder(B), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(3,8)
oracle.cx(3,5)
oracle.barrier()
oracle.cx(4,5)
oracle.append(DraperQFTAdder(B), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(4,5)
oracle.barrier()
circuit_drawer(oracle, output='mpl')
# Before proceeding, let's test the oracle to ensure that it is actually working as intended. The test circuit creates a uniform superposition of queries, queries the oracle, measures the query and sum qubits and returns the result.
test_circ = QuantumCircuit(N+B+B,N+B)
for i in range(5):
test_circ.h(i)
test_circ = test_circ.compose(oracle)
test_circ.measure((list(range(N)) + list(range(N+B,N+B+B))), range(N+B))
circuit_drawer(test_circ, output='mpl')
# I test the result so far as follows: from each sample from the circuit, I compute which subset was queried and what the result was, and match it with the expected result of the sum. If the computed result and the expected result match for each sample, we are good!
# +
# Use Aer's qasm_simulator
backend_sim = Aer.get_backend('qasm_simulator')
# Execute the circuit on the qasm simulator.
job_sim = backend_sim.run(transpile(test_circ, backend_sim), shots=4096)
# Grab the results from the job.
result_sim = job_sim.result()
counts = result_sim.get_counts(test_circ)
flag = True
for item in counts.keys():
num = 0
comp = np.dot(LIST, [int(item[-(i+1)]) for i in range(5)])
for id, c in enumerate(item[:5]):
c = int(c)
num += c * 2**(B - 1 - id)
if num != comp:
flag = False
break
if not flag:
print('Not working :(')
else:
print('Working!')
# -
# I plot the outputs. Clearly, no one input dominates the rest. This is of course to be expected since we queried in equal superposition.
plot_histogram(result_sim.get_counts())
# ### Increasing the success probability using Grover search
#
# #### Creating the Grover oracle
#
# We have created some oracle, but right now if there are $m$ subsets that add up to $16$ (in this case $m=2$), the algorithm has only an $m/2^N$ chance of getting the correct answer. That is not good enough.
#
# The way I increase this is by using Grover search. Define a function $f$ such that takes (the encoding of) a subset as input, and returns $1$ if the sum of that subset is $16$ and zero otherwise. I will now construct an oracle $\mathcal{O}_f$ that has the following form:
#
# \begin{align*}
# \mathcal{O}_f \ket{x} \otimes \ket{0}^m = (-1)^{f(x)} \ket{x} \otimes \ket{0}^m
# \end{align*}
#
# I will take $m = 2B$. To do this, I start with the oracle I defined above and build on top of it.
#
# I first test if the sum qubits encode the number $16$. In binary, $16$ is $10000$, and so a classical Boolean formula to check this would be $(\neg a_0) \land (\neg a_1) \land (\neg a_2) \land (\neg a_3) \land a_4$. Inspired by this, I check this quantumly by applying an $X$ gate to the qubits representing $a_0$, $a_1$, $a_2$ and $a_3$, and take a multi-controlled $X$-gate onto one of the other ancilla qubits.
oracle.x(10)
oracle.x(11)
oracle.x(12)
oracle.x(13)
oracle.mcx([10,11,12,13,14], 9)
# To now get the required phase shift, I do a controlled phase shift from the ancilla qubit I just used to the one of the sum qubits. By construction, I know that this ancilla qubit is $\ket{1}$ if and only if *all* of the sum qubits are $\ket{1}$, and so this will add a phase if and only if the sum qubits encode $16$.
oracle.cp(math.pi, 9, 14)
# Now, I need to uncompute everything in the ancilla qubits to get it into the form I want. This is not hard, since most of the gates are self-inverses. The uncomputation is accomplished in the following.
oracle.mcx([10,11,12,13,14], 9)
oracle.x(10)
oracle.x(11)
oracle.x(12)
oracle.x(13)
oracle.barrier()
oracle.cx(4,5)
oracle.append(DraperQFTAdder(B).inverse(), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(4,5)
oracle.barrier()
oracle.cx(3,8)
oracle.cx(3,5)
oracle.append(DraperQFTAdder(B).inverse(), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(3,8)
oracle.cx(3,5)
oracle.barrier()
oracle.cx(2,8)
oracle.append(DraperQFTAdder(B).inverse(), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(2,8)
oracle.barrier()
oracle.cx(1,5)
oracle.cx(1,6)
oracle.cx(1,7)
oracle.append(DraperQFTAdder(B).inverse(), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(1,5)
oracle.cx(1,6)
oracle.cx(1,7)
oracle.barrier()
oracle.cx(0,7)
oracle.cx(0,5)
oracle.append(DraperQFTAdder(B).inverse(), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(0,7)
oracle.cx(0,5)
oracle.barrier()
# The full circuit is as follows.
circuit_drawer(oracle, output='mpl')
# I will now put the entire oracle into a function. It takes two integer parameters, $n$ and $b$, which play the same roles as $N$ and $B$. I also turned the oracle into a composite gate that I can then stick into other circuits.
# +
def oracle(n: int, b: int) -> QuantumCircuit:
oracle = QuantumCircuit(n + b + b)
oracle.cx(0,7)
oracle.cx(0,5)
oracle.append(DraperQFTAdder(b), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(0,7)
oracle.cx(0,5)
# oracle.barrier()
oracle.cx(1,5)
oracle.cx(1,6)
oracle.cx(1,7)
oracle.append(DraperQFTAdder(b), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(1,5)
oracle.cx(1,6)
oracle.cx(1,7)
# oracle.barrier()
oracle.cx(2,8)
oracle.append(DraperQFTAdder(b), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(2,8)
# oracle.barrier()
oracle.cx(3,8)
oracle.cx(3,5)
oracle.append(DraperQFTAdder(b), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(3,8)
oracle.cx(3,5)
# oracle.barrier()
oracle.cx(4,5)
oracle.append(DraperQFTAdder(b), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(4,5)
# oracle.barrier()
oracle.x(10)
oracle.x(11)
oracle.x(12)
oracle.x(13)
oracle.mcx([10,11,12,13,14], 9)
oracle.cp(math.pi, 9, 14)
oracle.mcx([10,11,12,13,14], 9)
oracle.x(10)
oracle.x(11)
oracle.x(12)
oracle.x(13)
# oracle.barrier()
oracle.cx(4,5)
oracle.append(DraperQFTAdder(b).inverse(), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(4,5)
# oracle.barrier()
oracle.cx(3,8)
oracle.cx(3,5)
oracle.append(DraperQFTAdder(b).inverse(), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(3,8)
oracle.cx(3,5)
# oracle.barrier()
oracle.cx(2,8)
oracle.append(DraperQFTAdder(b).inverse(), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(2,8)
# oracle.barrier()
oracle.cx(1,5)
oracle.cx(1,6)
oracle.cx(1,7)
oracle.append(DraperQFTAdder(b).inverse(), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(1,5)
oracle.cx(1,6)
oracle.cx(1,7)
# oracle.barrier()
oracle.cx(0,7)
oracle.cx(0,5)
oracle.append(DraperQFTAdder(b).inverse(), [5,6,7,8,9,10,11,12,13,14])
oracle.cx(0,7)
oracle.cx(0,5)
# oracle.barrier()
return oracle
o = transpile(oracle(5,5), backend=Aer.get_backend('statevector_simulator'))
o = o.to_gate(label='Oracle')
# -
# Let me test this oracle to see if it is functioning as required. To do this, I use $X$ gates to control what is being queried, and use the `statevector_simulator` to see the output state vector. I first check with the subset $[7,9]$, encoded as $01010$, that I know sums to $16$.
# +
circ = QuantumCircuit(N+B+B)
circ.x(1)
circ.x(3)
circ.append(o, range(N+B+B))
backend = Aer.get_backend('statevector_simulator')
circ = transpile(circ, backend)
job = backend.run(circ)
result = job.result()
outputstate = result.get_statevector(circ, decimals=3)
d = outputstate.to_dict()
print(d)
# -
# We got the phase of $-1$, so that works! Now I check with a subset, let's say $[5,7,8]$, encoded as $11100$, that does *not* sum to $16$, and see the result.
# +
circ = QuantumCircuit(N+B+B)
circ.x(0)
circ.x(1)
circ.x(2)
circ.append(o, range(N+B+B))
backend = Aer.get_backend('statevector_simulator')
circ = transpile(circ, backend)
job = backend.run(circ)
result = job.result()
outputstate = result.get_statevector(circ, decimals=3)
d = outputstate.to_dict()
print(d)
# -
# Here there's no phase, so that also works! We can check similarly with other subsets. Clearly if it works for these basis states, it also works under superposition.
# #### The inversion around mean circuit
#
# The other ingredient that goes into the Grover iteration is the inversion around mean circuit, the key ingredient of which is the conditional phase-shift circuit $\mathcal{O}_{\phi}$ that has the following action:
#
# \begin{align*}
# \mathcal{O}_{\phi} \ket{x}=
# \begin{cases}
# \ket{x} & \text{if } x = 0,\\
# - \ket{x} & \text{if } x \neq 0.
# \end{cases}
# \end{align*}
# This is a standard circuit that can be implemented as follows using $X$ gates, $H$ gates and a multicontrolled $X$ gate. I also turned the oracle into a composite gate that I can then stick into other circuits.
def conditional_ps(n: int) -> QuantumCircuit:
circ = QuantumCircuit(n)
for i in range(n):
circ.x(i)
# circ.barrier()
circ.h(n-1)
circ.mcx([i for i in range(n-1)], n-1)
circ.h(n-1)
# circ.barrier()
for i in range(n):
circ.x(i)
return circ
circ = conditional_ps(5)
cp = circ.to_instruction(label='cond-p-shift')
circuit_drawer(circ, output='mpl')
# Using this, the inversion around mean circuit can be defined as follows.
# +
def inv_around_mean(n: int) -> QuantumCircuit:
circ = QuantumCircuit(n)
cp = conditional_ps(n).to_gate(label='cond-p-shift')
for i in range(n):
circ.h(i)
circ.append(cp, range(n))
for i in range(n):
circ.h(i)
return circ
circ = inv_around_mean(5)
im = circ.to_gate(label='inv-mean')
# -
# With all the pieces in place, we simply run the Grover iteration. Since there are $2$ marked items and $32$ total possibilities, approximately $\sqrt{32/2} = 4$ iterations will suffice. In my testing, I found that $3$ iterations gave the best answer.
main_circuit = QuantumCircuit(15,5)
for i in range(5):
main_circuit.h(i)
for _ in range(3):
main_circuit.append(o, range(15))
main_circuit.append(im, range(5))
main_circuit.measure(range(5), range(5))
circuit_drawer(main_circuit, output='mpl')
# We now test this circuit by running it and sampling the first $N$ qubits.
backend_sim = Aer.get_backend('qasm_simulator')
job_sim = backend_sim.run(transpile(main_circuit, backend_sim), shots=4096)
result_sim = job_sim.result()
counts = result_sim.get_counts(main_circuit)
plot_histogram(result_sim.get_counts())
# With some run-to-run variance, the success probability is now around $96\%$!
measured_str = max(counts, key=counts.get)
measured_str
# Note the way indexing in Qiskit works. In our encoding, the subset indicated is denoted by $01101$ and is the subset $[7,8,1]$. The subset $[7,9]$ is the other highly probable result.
print((counts['10110'] + counts['01010'])/4096)
# ### Possible improvements
#
# 1. *A priori* we don't know how many solutions there are to the search problem, and therefore we don't know how many times we need to run the Grover iterations. However, this is not hard to fix because the quantum counting algorithm does exactly this and can be run before running the above circuit to estimate the number of solutions.
# 2. I have used ten ancillary qubits to create the oracle, which, even though less than the 35 I needed in the first attempt, still seems a little excessive. An interesting problem is to try and do it using less qubits, though that might have the side effect of increasing the depth of the circuit.
# 3. The depth of each Grover iteration is also very high, partly because of how I constructed the oracle. High depth circuits are difficult to implement, so it would be nice to reduce the depth of the circuit (though algorithms such as these are not expected to run on near-term hardware anyway)
# 4. I have currently hard-coded the circuits based on the specific example that we had to calculate. The way I have made the circuits, however, this can be easily fixed.
| book/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <style>
# .rendered_html * + ul {
# margin-top: 0.5em;
# }
# div.text_cell_render {
# padding: 0.0em 0.0em 0.0em 0.0em;
# }
# .reveal p {
# margin: 20px 10;
# line-height: 1.3;
# }
# html, body, .reveal div, .reveal span, .reveal applet, .reveal object, .reveal iframe, .reveal h1, .reveal h2, .reveal h3, .reveal h4, .reveal h5, .reveal h6, .reveal p, .reveal blockquote, .reveal pre, .reveal a, .reveal abbr, .reveal acronym, .reveal address, .reveal big, .reveal cite, .reveal code, .reveal del, .reveal dfn, .reveal em, .reveal img, .reveal ins, .reveal kbd, .reveal q, .reveal s, .reveal samp, .reveal small, .reveal strike, .reveal strong, .reveal sub, .reveal sup, .reveal tt, .reveal var, .reveal b, .reveal u, .reveal center, .reveal dl, .reveal dt, .reveal dd, .reveal ol, .reveal ul, .reveal li, .reveal fieldset, .reveal form, .reveal label, .reveal legend, .reveal table, .reveal caption, .reveal tbody, .reveal tfoot, .reveal thead, .reveal tr, .reveal th, .reveal td, .reveal article, .reveal aside, .reveal canvas, .reveal details, .reveal embed, .reveal figure, .reveal figcaption, .reveal footer, .reveal header, .reveal hgroup, .reveal menu, .reveal nav, .reveal output, .reveal ruby, .reveal section, .reveal summary, .reveal time, .reveal mark, .reveal audio, .reveal video {
# margin-bottom: -1px;
# }
# div.text_cell_render {
# padding: 0em 0em 0.5em 0.0em;
# }
# </style>
#
# # Session 5: Strings, Queries and APIs
#
# *<NAME>*
# + [markdown] slideshow={"slide_type": "slide"}
# # Solutions
#
# Solutions for module 2 to 4 should now be up!
# + [markdown] slideshow={"slide_type": "slide"}
# # Assignment 1
#
# should also be up!
# + [markdown] slideshow={"slide_type": "slide"}
# # Supervision Sheet
#
# On Absalon (under _files_), you can find a document called "Supervision Sheet".
# + [markdown] slideshow={"slide_type": "fragment"}
# At some point before your first supervision meeting, we want you to:
# - Meet with your group
# - Fill in the supervision sheet
# - Print it (and bring it to your first round of supervision)
# + [markdown] slideshow={"slide_type": "fragment"}
# We don't want to increase your workload... but we hope that this will:
# - Motivate you to think about topic
# - Motivate you to think about data
# - Motivate you to think about how to distribute responsibilities
# - Make your meeting with supervisor more productive
# + [markdown] slideshow={"slide_type": "slide"}
# ## Recap (I/II)
#
# We can think of there as being two 'types' of plots:
# - **Exploratory** plots: Figures for understanding data
# - Quick to produce $\sim$ minimal polishing
# - Interesting feature may by implied by the producer
# - Be careful showing these out of context
# - **Explanatory** plots: Figures to convey a message
# - Polished figures
# - Direct attention to interesting feature in the data
# - Minimize risk of misunderstanding
# + [markdown] slideshow={"slide_type": "fragment"}
# There exist several packages for plotting. Some popular ones:
# - `Matplotlib` is good for customization (explanatory plots)
# - Might take a lot of time when customizing!
# - `Seaborn` and `Pandas` are good quick and dirty plots (exploratory)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Recap (II/II)
#
# We need to put a lot of thinking in how to present data.
#
# In particular, one must consider the *type* of data that is to be presented:
# + [markdown] slideshow={"slide_type": "fragment"}
# - One variable:
# - Categorical: Pie charts, simple counts, etc.
# - Numeric: Histograms, distplot (/cumulative), boxplot in seaborn
#
# + [markdown] slideshow={"slide_type": "fragment"}
# - Multiple variables:
# - `scatter` (matplotlib) or `jointplot` (seaborn) for (i) simple descriptives when (ii) both variables are numeric and (iii) there are not too many observations
# - `lmplot` or `regplot` (seaborn) when you also want to fit a linear model
# - `barplot` (matplotlib), `catplot` and `violinplot` (both seaborn) when one or more variables are categorical
# - The option `hue` allows you to add a "third" categorical dimension... use with care
# - Lots of other plot types and options. Go explore yourself!
# + [markdown] slideshow={"slide_type": "fragment"}
# - When you just want to explore: `pairplot` (seaborn) plots all pairwise correlations
# + [markdown] slideshow={"slide_type": "slide"}
# # Questions from Yesterday
#
# I have tried to gather some questions that seemed to address more general issues:
# - Something is wrong when I use split-apply-combine...
# - I cannot delete duplicates...
# + [markdown] slideshow={"slide_type": "slide"}
# # Something Is Wrong When I Use Split-Apply-Combine (I/III)
#
# Let's "simulate" some data!
# -
import numpy as np
import pandas as pd
df = pd.DataFrame(np.arange(32).reshape(8,4), columns = ["cat1", "cat2", "val1", "val2"])
df.loc[[0,1,2,3],['cat1']]='a'
df.loc[[4,5,6,7],['cat1']]='b'
df.loc[[0,1,4,5],['cat2']]='c'
df.loc[[2,3,6,7],['cat2']]='d'
df
# + [markdown] slideshow={"slide_type": "slide"}
# # Something Is Wrong When I Use Split-Apply-Combine (II/III)
#
# In general, the syntax is always the same. For most purposes, do exactly THIS (unless you use `.transform()`)
# -
split_vars = ['cat1', 'cat2']
apply_vars = ['val1', 'val2']
apply_fcts = ['median', 'mean', 'std']
combined = df.groupby(split_vars)[apply_vars].agg(apply_fcts)
combined
# + [markdown] slideshow={"slide_type": "slide"}
# # Something Is Wrong When I Use Split-Apply-Combine (III/III)
#
# *But now I have a multiindex... How do I access a given value?*
# + [markdown] slideshow={"slide_type": "fragment"}
# What you can do is to use a tuple-like structure:
# + slideshow={"slide_type": "-"}
combined.loc[('a', 'd'), ('val1', 'median')]
# + [markdown] slideshow={"slide_type": "slide"}
# ## I Cannot Delete Duplicates... (I/III)
#
# When you make apply (chains of) methods, it may seem that the changes that you make are temporary...
# +
import numpy as np
import pandas as pd
df = pd.DataFrame(np.arange(16).reshape(4,4), columns = ["one", "two", "three", "four"])
df.loc[[1, 3], ["one", "four"]] = '?'
df.replace('?', np.NaN).dropna()
df
# + [markdown] slideshow={"slide_type": "slide"}
# ## I Cannot Delete Duplicates... (II/III)
#
# Problem: Whenever you apply a (method) to a dataframe and modify it, the new dataframe will not exist (outside the memory) until you assign it to something.
# +
df = pd.DataFrame(np.arange(16).reshape(4,4), columns = ["one", "two", "three", "four"])
df.loc[[1, 3], ["one", "four"]] = '?'
df_new = df.replace('?', np.NaN).dropna().copy()
df_new
# + [markdown] slideshow={"slide_type": "slide"}
# ## I Cannot Delete Duplicates... (III/III)
#
# Sometimes, the `inplace` argument can also be of use...
# -
df.replace('?', np.NaN, inplace = True)
df
# + [markdown] slideshow={"slide_type": "slide"}
# # Overview of Session 5
#
# Today, we will work with strings, requests and APIs. In particular, we will cover:
# 1. Text as Data:
# - What is a string, and how do we work with it?
# - What kinds of text data does there exist?
# 2. Key Based Containers:
# - What is a dictionary, and how is this different from lists and tuples?
# - When are dictionaries useful, and how do we work with them?
# 3. Interacting with the Web:
# - What is HTTP and HTML?
# - What is an API, and how do interact with it?
# 4. Leveraging APIs:
# - What kinds of data can be extracted via an API?
# - How do we translate an API into useful data?
# + [markdown] slideshow={"slide_type": "slide"}
# # Associated Readings
# + [markdown] slideshow={"slide_type": "fragment"}
# PDA:
# - Section 2.3: How to work with strings in Python
# - Section 3.3: Opening text files, interpreting characters
# - Section 6.1: Opening and working with CSV files
# - Section 6.3: Intro to interacting with APIs
# - Section 7.3: Manipulating strings
# + [markdown] slideshow={"slide_type": "fragment"}
# Gazarov (2016): "What is an API? In English, please."
# - Excellent and easily understood intro to the concept
# - Examples of different 'types' of APIs
# - Intro to the concepts of servers, clients and HTML
# + [markdown] slideshow={"slide_type": "slide"}
# # Text as Data
# + [markdown] slideshow={"slide_type": "slide"}
# ## Why Text Data
#
# Data is everywhere... and collection is taking speed!
# - Personal devices and [what we have at home](https://www.nytimes.com/wirecutter/blog/amazons-alexa-never-stops-listening-to-you/)
# - Online in terms of news websites, wikipedia, social media, blogs, document archives
#
# Working with text data opens up interesting new avenues for analysis and research. Some cool examples:
# - Text analysis, topic modelling and monetary policy:
# - [Transparency and shifts in deliberation about monetary policy](https://sekhansen.github.io/pdf_files/qje_2018.pdf)
# - [Narrative signals about uncertainty in inflation reports drive long-run outcomes](https://sekhansen.github.io/pdf_files/jme_2019.pdf)
# - [More partisanship (polarization) in congressional speeches](https://www.brown.edu/Research/Shapiro/pdfs/politext.pdf)
# + [markdown] slideshow={"slide_type": "slide"}
# ## How Text Data
#
# Data from the web often come in HTML or other text format
#
# In this course, you will get tools to do basic work with text as data.
#
# However, in order to do that:
#
# - learn how to manipulate and save strings
# - save our text data in smart ways (JSON)
# - interact with the web
# + [markdown] slideshow={"slide_type": "slide"}
# ## Videos and Exercises
#
# Now proceed to the notebook with videos and exercises, where you will first learn a bit about string operations and different non-sequantial containers. Then we proceed and you will get an opportunity to interact with the web.
#
# The structure of the notebook is as follows:
# 1. String Operations:
# - Common string operations...
# - ... more string operations...
# - ... and warm-up exercises
# 2. Saving as Text File
# 3. Python Containers and Dictionaries
# 4. Python and the Web
# - Application Programming Interface (API)
# - The Punk API
# - The API for Statistics Denmark
# + [markdown] slideshow={"slide_type": "slide"}
# # Video 5.1: Key Based Containers
# + [markdown] slideshow={"slide_type": "slide"}
# ## Containers Recap (I/II)
#
# *What are containers? Which have we seen?*
# + [markdown] slideshow={"slide_type": "fragment"}
# Sequential containers:
# - `list` which we can modify (**mutable**).
# - useful to collect data on the go
# - `tuple` which is after initial assignment **immutable**
# - tuples are faster as they can do less things
# - `array`
# - which is mutable in content (i.e. we can change elements)
# - but immutable in size
# - great for data analysis
# + [markdown] slideshow={"slide_type": "slide"}
# ## Containers Recap (II/II)
# + [markdown] slideshow={"slide_type": "fragment"}
# Non-sequential containers:
# - Dictionaries (`dict`) which are accessed by keys (immutable objects).
# - Sets (`set`) where elements are
# - unique (no duplicates)
# - not ordered
# - disadvantage: cannot access specific elements!
# + [markdown] slideshow={"slide_type": "slide"}
# ## Dictionaries Recap (I/II)
#
# *How did we make a container which is accessed by arbitrary keys?*
# + [markdown] slideshow={"slide_type": "fragment"}
# By using a dictionary, `dict`. Simple way of constructing a `dict`:
# + slideshow={"slide_type": "-"}
my_dict = {'Andreas': 'Assistant Professor',
'Joachim': 'PhD Fellow',
'Nicklas': 'PhD Fellow',
'Terne': 'PhD Fellow'}
# + slideshow={"slide_type": "-"}
print(my_dict['Joachim'])
# + slideshow={"slide_type": "-"}
my_new_dict = {}
for a in range(0,100):
my_new_dict["cube%s" %a] = a**3
print(my_new_dict['cube10'])
# + [markdown] slideshow={"slide_type": "slide"}
# ## Dictionaries Recap (II/II)
#
# Dictionaries can also be constructed from two associated lists. These are tied together with the `zip` function. Try the following code:
# + slideshow={"slide_type": "-"}
keys = ['a', 'b', 'c']
values = range(2,5)
key_value_pairs = list(zip(keys, values))
print(key_value_pairs) #Print as a list of tuples
# + slideshow={"slide_type": "-"}
my_dict2 = dict(key_value_pairs)
print(my_dict2) #Print dictionary
# -
print(my_dict2['a']) #Fetch the value associated with 'a'
# + [markdown] slideshow={"slide_type": "slide"}
# ## Storing Containers
#
# *Does there exist a file format for easy storage of containers?*
# + [markdown] slideshow={"slide_type": "fragment"}
# Yes, the JSON file format.
# - Can store lists and dictionaries.
# - Syntax is the same as Python lists and dictionaries - only add quotation marks.
# - Example: `'{"a":1,"b":1}'`
# + [markdown] slideshow={"slide_type": "fragment"}
# *Why is JSON so useful?*
# + [markdown] slideshow={"slide_type": "fragment"}
# - Standard format that looks exactly like Python.
# - Extreme flexibility:
# - Can hold any list or dictionary of any depth which contains only float, int, str.
# - Does not work well with other formats, but normally holds any structured data.
# - Extension to spatial data: GeoJSON
# + [markdown] slideshow={"slide_type": "slide"}
# # VIDEO 5.2: Interacting with the Web
# + [markdown] slideshow={"slide_type": "slide"}
# ## The Internet as Data (I/II)
#
# When we surf around the internet we are exposed to a wealth of information.
# + [markdown] slideshow={"slide_type": "fragment"}
# - What if we could take this and analyze it?
# + [markdown] slideshow={"slide_type": "fragment"}
# Well, we can. And we will.
# + [markdown] slideshow={"slide_type": "fragment"}
# Examples: Facebook, Twitter, Reddit, Wikipedia, Airbnb etc.
# + [markdown] slideshow={"slide_type": "slide"}
# ## The Internet as Data (II/II)
#
# Sometimes we get lucky. The data is served to us.
# + [markdown] slideshow={"slide_type": "fragment"}
# - The data is provided as an `API` service (today)
# - The data can extracted by queries on underlying tables (scraping sessions). However, often we need to do the work ourselves:
# - We need to explore the structure of the webpage we are interested in
# - We can extract relevant elements
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Web Interactions
#
# In the words of Gazarov (2016): The web can be seen as a large network of connected servers
# - A page on the internet is stored somewhere on a remote server
# - Remote server $\sim$ remotely located computer that is optimized to process requests
# + [markdown] slideshow={"slide_type": "fragment"}
# - When accessing a web page through browser:
# - Your browser (the *client*) sends a request to the website's server
# - The server then sends code back to the browser
# - This code is interpreted by the browser and displayed
#
# + [markdown] slideshow={"slide_type": "fragment"}
# - Websites come in the form of HTML $-$ APIs only contain data (often in *JSON* format) without presentational overhead
# + [markdown] slideshow={"slide_type": "slide"}
# ## The Web Protocol
# *What is `http` and where is it used?*
# + [markdown] slideshow={"slide_type": "fragment"}
# - `http` stands for HyperText Transfer Protocol.
# - `http` is good for transmitting the data when a webpage is visited:
# - the visiting client sends request for URL or object;
# - the server returns relevant data if active.
# + [markdown] slideshow={"slide_type": "fragment"}
# *Should we care about `http`?*
# + [markdown] slideshow={"slide_type": "fragment"}
# - In this course we ***do not*** care explicitly about `http`.
# - We use a Python module called `requests` as a `http` interface.
# - However... Some useful advice - you should **always**:
# - use the encrypted version, `https`;
# - use authenticated connection, i.e. private login, whenever possible.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Markup Language
# *What is `html` and where is it used?*
# + [markdown] slideshow={"slide_type": "fragment"}
# - HyperText Markup Lanugage
# - `html` is a language for communicating how a webpage looks like and behaves.
# - That is, `html` contains: content, design, available actions.
# + [markdown] slideshow={"slide_type": "fragment"}
# *Should we care about `html`?*
# + [markdown] slideshow={"slide_type": "fragment"}
# - Yes, `html` is often where the interesting data can be found.
# - Sometimes, we are lucky, and instead of `html` we get a JSON in return.
# - Getting data from `html` will the topic of the subsequent scraping sessions.
# + [markdown] slideshow={"slide_type": "slide"}
# # VIDEO 5.3: Leveraging APIs
# + [markdown] slideshow={"slide_type": "slide"}
# ## Web APIs (I/IV)
# *So when do we get lucky, i.e. when is `html` not important?*
# + [markdown] slideshow={"slide_type": "fragment"}
# - When we get a Application Programming Interface (`API`) on the web
# - What does this mean?
# - We send a query to the Web API
# - We get a response from the Web API with data back in return, typically as JSON.
# - The API usually provides access to a database or some service
# + [markdown] slideshow={"slide_type": "slide"}
# ## Web APIs (II/IV)
# *So where is the API?*
# + [markdown] slideshow={"slide_type": "fragment"}
# - Usually on separate sub-domain, e.g. `api.github.com`
# - Sometimes hidden in code (see sessions on scraping)
# + [markdown] slideshow={"slide_type": "fragment"}
# *So how do we know how the API works?*
# + [markdown] slideshow={"slide_type": "fragment"}
# - There usually is some documentation. E.g. google ["api github com"](https://www.google.com/search?q=api+github)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Web APIs (III/IV)
# *So is data free? As in free lunch?*
# + [markdown] slideshow={"slide_type": "fragment"}
# - Most commercial APIs require authentication and have limited free usage
# - e.g. Google Maps, various weather services
# + [markdown] slideshow={"slide_type": "fragment"}
# - Some open APIs that are free
# - Danish
# - Danish statistics (DST)
# - Danish weather data (DMI)
# - Danish spatial data (DAWA, danish addresses)
# - Global
# - OpenStreetMaps, Wikipedia
# + [markdown] slideshow={"slide_type": "fragment"}
# - If no authentication is required the API may be delimited.
# - This means only a certain number of requests can be handled per second or per hour from a given IP address.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Web APIs (IV/IV)
# *So how do make the URLs?*
# + [markdown] slideshow={"slide_type": "fragment"}
# - An `API` query is a URL consisting of:
# - Server URL, e.g. `https://api.github.com`
# - Endpoint path, `/users/isdsucph/repos`
# + [markdown] slideshow={"slide_type": "fragment"}
# We can convert a string to JSON with `loads`.
# + [markdown] slideshow={"slide_type": "slide"}
# ## File Handling
# *How can we remove a file?*
# + [markdown] slideshow={"slide_type": "fragment"}
# The module `os` can do a lot of file handling tasks, e.g. removing files:
# + slideshow={"slide_type": "-"}
import os
os.remove('my_file.json')
| teaching_material/module_5/module_5_slides.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# # 如何使用Python擬合資料?
# ## 參考資料
# * <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html" target="_blank">numpy.polyfit</a>
# * <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html" target="_blank">numpy.linalg.lstsq</a>
# * <a href="http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html" target="_blank">scipy.stats.linregress</a>
# * <a href="http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html" target="_blank">scipy.optimize.curve_fit</a>, <a href="http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html" target="_blank">scipy.optimize.least_squares</a>
# * <a href="http://docs.astropy.org/en/stable/modeling/index.html" target="_blank">astropy.modeling</a>
# * <a href="http://www.astroml.org/" target="_blank">AstroML</a>
# ## 準備工作:範例所需檔案下載及說明
# * O-C_ephemeris.txt (已在files4examples資料夾中無須下載):
#
# 檔案說明(待補)
#
#
# * pulse-profile.txt (已在files4examples資料夾中無須下載):
#
# 檔案說明(待補)
#
#
# * QSOnumber.txt (已在files4examples資料夾中無須下載):
#
# 檔案說明(待補)
#
#
# * 其他範例檔案(待補)
#
# ## 範例1:以NumPy的polyfit 進行斜直線fitting
# +
from astropy.io import ascii
radio_infrared_NLSy1 = ascii.read('../files4examples/Radio_Infrared_NLSy1.txt',names=['radio luminosity','infrared luminosity'])
radio_infrared_SG = ascii.read('../files4examples/Radio_Infrared_spiral.txt',names=['radio luminosity','infrared luminosity'])
radio_infrared_RG = ascii.read('../files4examples/Radio_Infrared_radio.txt',names=['radio luminosity','infrared luminosity'])
L_radio_NLSy1=radio_infrared_NLSy1['radio luminosity']
L_infrared_NLSy1=radio_infrared_NLSy1['infrared luminosity']
L_radio_SG=radio_infrared_SG['radio luminosity']
L_infrared_SG=radio_infrared_SG['infrared luminosity']
L_radio_RG=radio_infrared_RG['radio luminosity']
L_infrared_RG=radio_infrared_RG['infrared luminosity']
# +
import numpy as np
x1=np.log10(L_radio_NLSy1)
x2=np.log10(L_radio_SG)
x3=np.log10(L_radio_RG)
y1=np.log10(L_infrared_NLSy1)
y2=np.log10(L_infrared_SG)
y3=np.log10(L_infrared_RG)
p_NLSy1 = np.poly1d(np.polyfit(x1,y1,1))
p_SG = np.poly1d(np.polyfit(x2,y2,1))
p_RG = np.poly1d(np.polyfit(x3,y3,1))
# -
# * <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html" target="_blank">polyfit</a>為NumPy中以多項式曲線來擬合資料的函式,回傳值為多項式的係數。 <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.poly1d.html" target="_blank">poly1d</a>為NumPy中用來產生多項式物件的類別。
# +
# %matplotlib notebook
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111,aspect='equal')
plt.xlim(32,40)
plt.ylim(20,28)
x = np.linspace(32,40,100)
plt.scatter(x1,y1,facecolor='none',edgecolor='r',marker='D')
plt.plot(x,p_NLSy1(x),'r-')
plt.scatter(x2,y2,color='k',marker='+',s=80)
plt.plot(x,p_SG(x),'k-')
plt.scatter(x3,y3,facecolor='none',edgecolor='k',marker='^',s=80)
plt.plot(x,p_RG(x),'k--')
plt.xlabel(r'$\nu$ L$_\nu$ [22$\mu$m] log(W)')
plt.ylabel(r'$L_\nu$ [1.4GHz] log(W/Hz)')
plt.legend(('NLSy1','SG','RG'),loc='upper left')
plt.show()
# -
print(np.polyfit(x1,y1,1))
print(np.polyfit(x2,y2,1))
print(np.polyfit(x3,y3,1))
# ## 範例2:以NumPy的linalg.lstsq 進行斜直線fitting
# +
from astropy.io import ascii
radio_infrared_NLSy1 = ascii.read('../files4examples/Radio_Infrared_NLSy1.txt',names=['radio luminosity','infrared luminosity'])
radio_infrared_SG = ascii.read('../files4examples/Radio_Infrared_spiral.txt',names=['radio luminosity','infrared luminosity'])
radio_infrared_RG = ascii.read('../files4examples/Radio_Infrared_radio.txt',names=['radio luminosity','infrared luminosity'])
L_radio_NLSy1=radio_infrared_NLSy1['radio luminosity']
L_infrared_NLSy1=radio_infrared_NLSy1['infrared luminosity']
L_radio_SG=radio_infrared_SG['radio luminosity']
L_infrared_SG=radio_infrared_SG['infrared luminosity']
L_radio_RG=radio_infrared_RG['radio luminosity']
L_infrared_RG=radio_infrared_RG['infrared luminosity']
# +
import numpy as np
x1=np.log10(L_radio_NLSy1)
x2=np.log10(L_radio_SG)
x3=np.log10(L_radio_RG)
y1=np.log10(L_infrared_NLSy1)
y2=np.log10(L_infrared_SG)
y3=np.log10(L_infrared_RG)
x = np.linspace(32,40,100)
# +
A1 = np.vstack([x1, np.ones(len(x1))]).T
m1,c1 = np.linalg.lstsq(A1, y1)[0]
A2 = np.vstack([x2, np.ones(len(x2))]).T
m2,c2 = np.linalg.lstsq(A2, y2)[0]
A3 = np.vstack([x3, np.ones(len(x3))]).T
m3,c3 = np.linalg.lstsq(A3, y3)[0]
# Example
# y = Ap, where A = [[x 1]] and p = [[m], [c]]
# x = np.array([0, 1, 2, 3])
# A = np.vstack([x, np.ones(len(x))]).T
# A =
# array([[ 0., 1.],
# [ 1., 1.],
# [ 2., 1.],
# [ 3., 1.]])
# len() : 求出矩陣中元素個數
# ones() : 令矩陣中所有元素的值等於 1
# vstack().T : 將兩個矩陣進行堆疊並轉置
# +
# %matplotlib notebook
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111,aspect='equal')
plt.xlim(32,40)
plt.ylim(20,28)
plt.scatter(x1, y1,facecolor='none',edgecolor='r',marker='D')
plt.plot(x, m1*x + c1, 'r',label='NLSy1')
plt.scatter(x2, y2,color='k',marker='+',s=80)
plt.plot(x, m2*x + c2, 'k', label='SG')
plt.scatter(x3, y3,facecolor='none',edgecolor='k',marker='^',s=80)
plt.plot(x, m3*x + c3, 'k--', label='RG')
plt.xlabel(r'$\nu$ $L_\nu$ [22$\mu$m] log(W)')
plt.ylabel(r'$L_\nu$ [1.4GHz] log(W/Hz)')
plt.legend(('NLSy1','SG','RG'),loc='upper left')
plt.show()
# -
print(m1, c1)
print(m2, c2)
print(m3, c3)
# ## 範例3:以SciPy的linregress 進行斜直線fitting
# +
from astropy.io import ascii
radio_infrared_NLSy1 = ascii.read('../files4examples/Radio_Infrared_NLSy1.txt',names=['radio luminosity','infrared luminosity'])
radio_infrared_SG = ascii.read('../files4examples/Radio_Infrared_spiral.txt',names=['radio luminosity','infrared luminosity'])
radio_infrared_RG = ascii.read('../files4examples/Radio_Infrared_radio.txt',names=['radio luminosity','infrared luminosity'])
L_radio_NLSy1=radio_infrared_NLSy1['radio luminosity']
L_infrared_NLSy1=radio_infrared_NLSy1['infrared luminosity']
L_radio_SG=radio_infrared_SG['radio luminosity']
L_infrared_SG=radio_infrared_SG['infrared luminosity']
L_radio_RG=radio_infrared_RG['radio luminosity']
L_infrared_RG=radio_infrared_RG['infrared luminosity']
# +
import numpy as np
x1=np.log10(L_radio_NLSy1)
x2=np.log10(L_radio_SG)
x3=np.log10(L_radio_RG)
y1=np.log10(L_infrared_NLSy1)
y2=np.log10(L_infrared_SG)
y3=np.log10(L_infrared_RG)
from scipy import stats
slope_1, intercept_1, r_value_1, p_value_1, std_err_1 = stats.linregress(x1,y1)
slope_2, intercept_2, r_value_2, p_value_2, std_err_2 = stats.linregress(x2,y2)
slope_3, intercept_3, r_value_3, p_value_3, std_err_3 = stats.linregress(x3,y3)
# +
# %matplotlib notebook
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111,aspect='equal')
plt.xlim(32,40)
plt.ylim(20,28)
x = np.linspace(32,40,100)
plt.scatter(x1,y1,facecolor='none',edgecolor='r',marker='D')
plt.plot(x,slope_1*x+intercept_1,'r-')
plt.scatter(x2,y2,color='k',marker='+',s=80)
plt.plot(x,slope_2*x+intercept_2,'k-')
plt.scatter(x3,y3,facecolor='none',edgecolor='k',marker='^',s=80)
plt.plot(x,slope_3*x+intercept_3,'k--')
plt.xlabel(r'$\nu$ $L_\nu$ [22$\mu$m] log(W)')
plt.ylabel(r'$L_\nu$ [1.4GHz] log(W/Hz)')
plt.legend(('NLSy1','SG','RG'),loc='upper left')
plt.show()
# -
print(slope_1, intercept_1, r_value_1, p_value_1, std_err_1)
print(slope_2, intercept_2, r_value_2, p_value_2, std_err_2)
print(slope_3, intercept_3, r_value_3, p_value_3, std_err_3)
# ## 範例4:以NumPy的polyfit多項式曲線擬合O-C ephemeris
from astropy.io import ascii
oc_ephemeris = ascii.read('../files4examples/O-C_ephemeris.txt', names=['cycles', 'delay', 'error'])
cycles = oc_ephemeris['cycles']
delay = oc_ephemeris['delay']
error = oc_ephemeris['error']
# * <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html" target="_blank">polyfit</a>為NumPy中以多項式曲線來擬合資料的函式,回傳值為多項式的係數。 <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.poly1d.html" target="_blank">poly1d</a>為NumPy中用來產生多項式物件的類別。
import numpy as np
p2 = np.poly1d(np.polyfit(cycles, delay, 2))
p3 = np.poly1d(np.polyfit(cycles, delay, 3))
x = np.linspace(-10000, 50000, 100)
# %matplotlib notebook
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
fig = plt.figure()
ax = fig.add_subplot(111)
plt.errorbar(cycles, delay, yerr=error, fmt='sk')
plt.hold(True)
plt.plot(x, p2(x), 'r-')
plt.plot(x, p3(x), '--')
plt.hold(False)
plt.ylim(-0.005, 0.03)
plt.legend(('Second', 'Third', 'Data'))
plt.xlabel('N (cycles)')
plt.ylabel('Delay (days)')
ax.xaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax.ticklabel_format(axis='x', style='sci', scilimits=(0,0))
plt.show()
# ## 範例5:以AstroML的LinearRegression來擬合pulse-profile
# * <a href="https://github.com/astroML/astroML" target="_blank">AstroML</a> 是一個專門用於天文資料的統計、探勘與機器學習的Python套件,雖然Anaconda沒有預先安裝此套件,不過可透過pip指令安裝:
# ```bash
# pip install astroML
# pip install astroML_addons
# ```
from astropy.io import ascii
pulse_profile = ascii.read('../files4examples/pulse-profile.txt', names=['phase', 'rate', 'error'])
phase = pulse_profile['phase']
rate =pulse_profile['rate']
error = pulse_profile['error']
# +
import numpy as np
from astroML.linear_model import LinearRegression
x = np.array([np.sin(2 * np.pi * phase), np.cos(2 * np.pi * phase),
np.sin(4 * np.pi * phase), np.cos(4 * np.pi * phase)]).T
model = LinearRegression()
model.fit(x, rate, error)
coef = model.coef_
y_pred = model.predict(x)
print(coef)
# +
# %matplotlib notebook
import matplotlib.pyplot as plt
phase2 = np.append(phase, phase + 1)
rate2 = np.append(rate, rate)
error2 = np.append(error, error)
y_pred2 = np.append(y_pred, y_pred)
plt.figure()
plt.errorbar(phase2, rate2, yerr=error2, fmt="*k", label='Data')
plt.hold(True)
plt.plot(phase2, y_pred2, 'r-', label='Model')
plt.hold(False)
plt.xlabel('Phase')
plt.ylabel('Conuts/s')
plt.legend()
plt.show()
# -
# ## 範例6:以SciPy的curve_fit來擬合pulse-profile
from astropy.io import ascii
pulse_profile = ascii.read('../files4examples/pulse-profile.txt', names=['phase', 'rate', 'error'])
phase = pulse_profile['phase']
rate =pulse_profile['rate']
error = pulse_profile['error']
# +
from scipy.optimize import curve_fit
import numpy as np
# 定義模型
def model(x, a0, a1, a2, a3, a4):
return ( a0 + a1 * np.sin(2 * np.pi * x) + a2 * np.cos(2 * np.pi * x) +
a3 * np.sin(4 * np.pi * x) + a4 * np.cos(4 * np.pi * x) )
# 曲線擬合
popt, pcov = curve_fit(model, phase, rate, sigma=error)
perr = np.sqrt(np.diag(pcov))
print(popt)
print(pcov)
print("a0 =", popt[0], "+/-", perr[0])
print("a1 =", popt[1], "+/-", perr[1])
print("a2 =", popt[2], "+/-", perr[2])
print("a3 =", popt[3], "+/-", perr[3])
print("a4 =", popt[4], "+/-", perr[4])
# -
# %matplotlib notebook
import matplotlib.pyplot as plt
phase2 = np.append(phase, phase + 1)
rate2 = np.append(rate, rate)
error2 = np.append(error, error)
plt.figure()
plt.errorbar(phase2, rate2, yerr=error2, fmt="*k", label='Data')
plt.hold(True)
plt.plot(phase2, model(phase2, popt[0], popt[1], popt[2], popt[3], popt[4]), 'r-', label='Model')
plt.hold(False)
plt.xlabel('Phase')
plt.ylabel('Conuts/s')
plt.legend()
plt.show()
# ## 範例7:以astropy.modeling中的一維高斯曲線擬合QSO數量分佈
from astropy.io import ascii
data = ascii.read('../files4examples/QSOnumber.txt')
x = data['x']
y = data['y']
from astropy.modeling import models as mo, fitting as fit
import numpy as np
model_init = mo.Gaussian1D(amplitude=4220, mean=-0.25, stddev=0.1)
fitter = fit.LevMarLSQFitter()
fit_res = fitter(model_init, x, y)
print(fit_res.amplitude)
print(fit_res.mean)
print(fit_res.stddev)
# %matplotlib notebook
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
plt.bar(x, y, width=x[1]-x[0], align='center', edgecolor='black', fill=False)
plt.hold(True)
plt.plot(x, fit_res(x), 'r')
plt.hold(False)
plt.xlim(-0.6, 0.6)
plt.ylim(0, 5000)
plt.xlabel('log($f_{\lambda_{3100A}}$ / $f_{\lambda_{2200A}}$)', fontsize=20)
plt.ylabel('Numbers of QSO', fontsize=20)
ax.tick_params(axis='y', labelsize=20)
plt.xticks(np.arange(-0.4,0.5,0.2), fontsize=20)
plt.show()
# ## 範例8:以astropy.modeling中多個sin的疊加來擬合pulse-profile
from astropy.io import ascii
pulse_profile = ascii.read('../files4examples/pulse-profile.txt', names=['phase', 'rate', 'error'])
phase = pulse_profile['phase']
rate =pulse_profile['rate']
error = pulse_profile['error']
# +
from astropy.modeling import models as mo, fitting as fit
import numpy as np
# 不給初始值, 使用預設初始值
mo_init = (mo.Const1D() +
mo.Sine1D(frequency=1, fixed={'frequency':True, 'phase':True}) +
mo.Sine1D(frequency=1, phase=0.25, fixed={'frequency':True, 'phase':True}) +
mo.Sine1D(frequency=2, fixed={'frequency':True, 'phase':True}) +
mo.Sine1D(frequency=2, phase=0.25, fixed={'frequency':True, 'phase':True}))
# 給初始值
# mo_init = (mo.Const1D(amplitude=np.mean(rate)) +
# mo.Sine1D(amplitude=-0.7, frequency=1, fixed={'frequency':True, 'phase':True}) +
# mo.Sine1D(amplitude=2, frequency=1, phase=0.25, fixed={'frequency':True, 'phase':True}) +
# mo.Sine1D(amplitude=0.7, frequency=2, fixed={'frequency':True, 'phase':True}) +
# mo.Sine1D(amplitude=0.6, frequency=2, phase=0.25, fixed={'frequency':True, 'phase':True}))
fitter = fit.LevMarLSQFitter()
fit_res = fitter(mo_init, phase, rate)
a0 = fit_res.amplitude_0.value
a1 = fit_res.amplitude_1.value
a2 = fit_res.amplitude_2.value
a3 = fit_res.amplitude_3.value
a4 = fit_res.amplitude_4.value
print(a0, a1, a2, a3, a4)
# -
# %matplotlib notebook
import matplotlib.pyplot as plt
phase2 = np.append(phase, phase + 1)
rate2 = np.append(rate, rate)
error2 = np.append(error, error)
plt.figure()
plt.errorbar(phase2, rate2, yerr=error2, fmt='*k')
plt.hold(True)
plt.plot(phase2, fit_res(phase2), 'r')
plt.hold(False)
plt.legend(('Model', 'Data'))
plt.xlabel('Phase')
plt.ylabel('Conuts/s')
plt.show()
# ## 範例9:以astropy.modeling中的二維高斯曲線擬合 (待博識補充)
# +
# Gaussian fit 的 function
def get_gaussfit(bin_img):
'''
Propose: to fit an 2D image with an 2D gaussian function
initial guess of parameters:
x_mean & y_mean at the center of the image
x_width & y_width = 2 pixels
amplitude = max(image)
best parameters were optimized by least-square method
Parameter
----------------------
bin_img: an 2D image
======================
Output
----------------------
object of fitting.LevMarLSQFitter()
'''
x_size=bin_img.shape[0]
y_size=bin_img.shape[1]
p_init = models.Gaussian2D(amplitude=np.max(bin_img),x_mean=0.5*x_size,y_mean=0.5*y_size,x_stddev=2,y_stddev=2)
y, x = np.mgrid[:bin_img.shape[1], :bin_img.shape[0]]
fit_p = fitting.LevMarLSQFitter()
p = fit_p(p_init, x,y,bin_img)
return p;
import numpy as np
import astropy.units as units
import matplotlib.pyplot as plt
from astropy.table import Table
from astroquery.vizier import Vizier
from astropy.coordinates import SkyCoord
from astropy.io import ascii
from astropy.modeling import models, fitting
#catalog_list = Vizier.find_catalogs('PPMXL')
#取消回傳的 row <= 50的限制
Vizier.ROW_LIMIT = -1
#建立格點
x=36 #全天
y=17 #全天
grid_x=np.arange(x)*10
grid_y=np.arange(y)*10-80
# 定義輸出的表格
result_table = Table(names=('RA', 'DE', 'mRA_offset','mDE_offset'), dtype=('f4', 'f4', 'f4', 'f4'))
result_table['RA'].unit=units.deg
result_table['DE'].unit=units.deg
result_table['mRA_offset'].unit=units.mas/units.year
result_table['mDE_offset'].unit=units.mas/units.year
#對每個格點在 Vizier上 query PPMXL, 搜尋半徑 10 arcminutes
for x in grid_x:
for y in grid_y:
result = Vizier.query_region(SkyCoord(ra=x*units.deg, dec=y*units.deg,frame="icrs"), radius=10.0*units.arcmin, catalog=('I/317'))
#name=str(x)+'_'+str(y)+'.csv'
#ascii.write(result[0], name, format='csv')
#plt.plot(result[0]['pmRA'],result[0]['pmDE'],'r.')
bin_img, yedges, xedges = np.histogram2d(result[0]['pmRA'], result[0]['pmDE'], (20,20),range=[[-100,100],[-100,100]])
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
#plt.imshow(bin_img, extent=extent, interpolation='nearest', cmap='gist_yarg', origin='lower')
fit_results=get_gaussfit(bin_img)
ra_off=(fit_results.x_mean-(bin_img.shape[0]*0.5))*10
de_off=(fit_results.y_mean-(bin_img.shape[1]*0.5))*10
#theta=1./np.cos(y)*
result_table.add_row([x,y,ra_off,de_off])
# -
result_table
# %matplotlib inline
plt.quiver(result_table['RA'],result_table['DE'],result_table['mRA_offset'],result_table['mDE_offset'])
| notebooks/notebooks4HowtoSeries/how_to_fit_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="PiIDDU_iVVsG"
# # %tensorflow_version 2.x
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
# + colab={} colab_type="code" id="GF3ongiwCjIo"
mnist = tf.keras.datasets.mnist # mnist data of grayscale images of handwritten digits
(x_train, y_train), (x_test, y_test) = mnist.load_data() # load it into training and testing sets
x_train, x_test = x_train/255.0, x_test/255.0 # the data is in the range (0,255) grayscale value for each pixel, normalize it
# + colab={} colab_type="code" id="rRd0_7ktI0Fd"
print(x_train.shape, x_test.shape) # look at how many images are there
# + colab={} colab_type="code" id="MMa4n9DUI25i"
print(y_train.shape, y_test.shape)
print(y_train[:10])
# + colab={} colab_type="code" id="rCrAKCRUS64a"
# plot the first ten images
for i in range(10):
plt.figure()
plt.imshow(x_train[i], cmap='gray')
# + colab={} colab_type="code" id="u0zNDSlJguBM"
#---------------------------------------------------
# build an autoencoder model
def create_modelAE():
# build a sequential AutoEncoder model
model = tf.keras.models.Sequential([
# the input layer
tf.keras.layers.InputLayer((28,28)),
# flatten it into a single column
tf.keras.layers.Flatten(),
# bottleneck layer
tf.keras.layers.Dense(32, activation='relu'),
# the output layer
tf.keras.layers.Dense(28*28, activation='sigmoid'),
# the output layer, reshape the flattened column into an image shape
tf.keras.layers.Reshape((28, 28, 1))
])
return model
# + colab={"base_uri": "https://localhost:8080/", "height": 332} colab_type="code" id="6yAtvAd6izhP" outputId="291990ad-ed7a-4a30-abfa-bcd50028eab7"
modelAE = create_modelAE()
print(modelAE.summary())
# + colab={} colab_type="code" id="Ql_eSskiJ2y4"
500*500*3*32 + 32
# + colab={} colab_type="code" id="Djti_AdHlHoj"
model = modelAE
# + colab={} colab_type="code" id="CqFlPJkxgU2H"
# compile, using the adamax optimizer for gradient descend and mean square error for loss
model.compile(optimizer='adamax', loss='mse')
# + colab={"base_uri": "https://localhost:8080/", "height": 384} colab_type="code" id="tWyYxt3klDq3" outputId="4a218475-b2ed-4903-e3ab-c35232ac0146"
# fit (train) the model on the test data. The input and the output is the same because
# we are trying to re-construct the original input after compression
train_hist = model.fit(x_train, x_train, epochs=10)
# + colab={} colab_type="code" id="gdqVpHsTlgNh"
#---------------------------------------------------
# plots n images in two rows, the first row is the original images
# and the second row is the reconstructed (decoded) images
def show_results(n):
# plot
plt.figure(figsize=(12, 12))
for i in range(n):
# display original image
ax = plt.subplot(3, n, i + 1)
ax.set_title("Original")
plt.imshow(original[i])
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(3, n, i + 1 + n)
ax.set_title("Decoded")
plt.imshow(decoded[i])
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# + colab={} colab_type="code" id="KBDl49xjOHsJ"
original = []
decoded = []
n=0
# predict the first 10 items from the test set
for img in x_test[:10]:
#predict
pimg = model.predict(tf.expand_dims(img, 0))
original.append(img) # original
decoded.append(pimg.squeeze()) # predicted
n += 1
# + colab={"base_uri": "https://localhost:8080/", "height": 358} colab_type="code" id="8Azgx94TO-2q" outputId="7e1c3ab8-3e39-4e31-d39f-3ae807d6c140"
show_results(n)
# + colab={} colab_type="code" id="uT6E9d60jmho"
28*28/32
| notebooks/siggraph2020/Class 3 - Autoencoder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# 
#
# ### <NAME> and <NAME>
#
# Some of the demos assume you have run `python -m pip install particle`. The demos use Python 3, though the package also supports Python 2 for now. You can [view the demo here](https://nbviewer.jupyter.org/github/scikit-hep/particle/blob/master/notebooks/ParticleDemo.ipynb) or [run it here](https://mybinder.org/v2/gh/scikit-hep/particle/master?urlpath=lab/tree/notebooks/ParticleDemo.ipynb) on Binder.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Demo 1: Command line usage
#
# There are two modules in `Particle`:
#
# * PDGID - Find out as much as possible from the PDG ID number. **No table lookup**.
# * Particle - Loads **PDG data tables** and implements search and manipulations / display.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### General usage
# + slideshow={"slide_type": "-"}
# !python -m particle -h
# -
# !python -m particle --version
# + [markdown] slideshow={"slide_type": "subslide"}
# ### PDGID
# -
# !python -m particle pdgid 211
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Particle
# -
# !python -m particle search 211
# !python -m particle search "pi+"
# !python -m particle search "pi(1400)+"
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Bonus feature: ZipApp
#
# We also have released a new ZipApp version - **one file** that runs on **any computer with Python**, no other dependencies! Find it [attached to releases](https://github.com/scikit-hep/particle/releases) starting with version 0.4.4.
# -
# Example:
#
# ```bash
# ./particle.pyz search gamma
# ```
# All dependencies (including the two backports) are installed inside the ZipApp, and the data lookup is handled in a zip-safe way inside particle. Python 3 is used to make the zipapp, but including the backports makes it work on Python 2 as well.
#
# The command line mode could be enhanced to make it a useful tool in bash scripts! Stay tuned...
# + [markdown] slideshow={"slide_type": "slide"}
# ## Demo 2: Python usage
#
# ### PDGID
#
# Let's start with `PDGID` again.
# + slideshow={"slide_type": "subslide"}
from particle import PDGID, Particle, Charge
# -
p = PDGID(211)
p
print(p.info())
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Literals
#
# `Particle` has literals, as well; these are dynamically generated on import for **both PDGID and Particle** classes!
# -
import particle.pdgid.literals as pdgid_literals
pdgid_literals.phi_1020
# + [markdown] slideshow={"slide_type": "slide"}
# ### Particle
#
# There are lots of ways to create a particle:
# -
# #### From PDGID
Particle.from_pdgid(211)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Literals
# -
import particle.particle.literals as particle_literals
particle_literals.phi_1020
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Searching
#
# The most powerful method: `find` (or `findall`).
# -
Particle.find('phi(1020)')
# You can specify search terms as keywords - any particle property:
Particle.find(latex_name=r'\phi(1020)')
# + [markdown] slideshow={"slide_type": "subslide"}
# Some properties have enums available. For example, you can directly check the numeric charge:
# -
Particle.findall('pi', charge=-1)
# Or you can use the enum (for charge, this is 3 times the charge, hence the name `three_charge`)
Particle.findall('pi', three_charge=Charge.p)
# + [markdown] slideshow={"slide_type": "subslide"}
# Or use a **lambda function** for the ultimate in generality! For example, to find all the neutral particles with a bottom quark between 5.2 and 5.3 GeV:
# -
from hepunits import GeV, s # Units are good. Use them.
Particle.findall(lambda p:
p.pdgid.has_bottom
and p.charge==0
and 5.2*GeV < p.mass < 5.3*GeV
)
# + [markdown] slideshow={"slide_type": "subslide"}
# Another lambda function example: You can use the width or the lifetime:
# + slideshow={"slide_type": "-"}
Particle.findall(lambda p: p.lifetime > 1000*s)
# + [markdown] slideshow={"slide_type": "subslide"}
# If you want infinite lifetime, you could just use the keyword search instead:
# -
Particle.findall(lifetime=float('inf'))
# + [markdown] slideshow={"slide_type": "slide"}
# ### Display
# -
# Nice display in Jupyter notebooks, as well as `str` and `repr` support:
p = particle_literals.D_0
p
print(p)
print(repr(p))
# + [markdown] slideshow={"slide_type": "subslide"}
# Full descriptions:
# -
print(p.describe())
# + [markdown] slideshow={"slide_type": "subslide"}
# You may find LaTeX or HTML to be more useful in your program, both are supported:
# -
print(p.latex_name, p.html_name)
# It is easy to get hold of the whole list of particle (instances) as a list:
Particle.all()
# + [markdown] slideshow={"slide_type": "slide"}
# ### Properties
#
# You can do things to particles, like **invert** them:
# -
~p
# There are a plethora of properties you can access:
p.spin_type
# + [markdown] slideshow={"slide_type": "subslide"}
# You can quickly access the PDGID of a particle:
# -
p.pdgid
PDGID(p)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Advanced usage
#
# You can:
#
# * Extend or replace the default table in `Particle`
# * Adjust properties for a particle
# * Make custom particles
# + [markdown] slideshow={"slide_type": "fragment"}
# Now let's look at one of the users of `Particle`: the [DecayLanguage](https://github.com/scikit-hep/decaylanguage
# ) package!
| notebooks/ParticleDemo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sos
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SoS
# language: sos
# name: sos
# ---
# + [markdown] kernel="SoS"
# # Fine-mapping with SuSiE RSS model
#
# This notebook mainly responsible for a matrixs of RSS using both mvsusie and a loop of uni_susie
# + [markdown] kernel="SoS"
# 3. GWAS summary statistics input `z` and `R`. We assume `z` scores have been computed after removal of covariates `C`.
# + [markdown] jp-MarkdownHeadingCollapsed=true kernel="SoS" tags=[]
# ## Input
#
# 1. A recipe file documenting the path to
#
# 1.1. A list of analysis_unit_list generated from Meta_Analysis step for each gene.
#
# Each line of the index record the name of 1 RDS file, containing a list with bhat and sbhat object matrixs, with dimension = (number of snps*number of theme)
#
# 1.2.The path to a residual corr file from meta_analysis step [FIXME: Specified resid_corr for each gene to be added in the future]
#
# 1.3. 3 col each record the path to a prior file from meta_analysis step.
#
# 2. LD Recipe: a three colnums table with Theme as each of the Theme_prefix and other columns = ld_file_prefix, ld_file_surfix
#
# Only SNPS presented in both the LD matrixs and the sumstat rds will be analysis.
# In unisusie_rss, each of theme will use the corresponding ld file
# In mvsusie_rss, the last row will be used.
#
# The ld_prefix and ld_surfix are such that paste0(ld_file_prefix,gene_ID,ld_file_surfix) generate the path to each ld matrix for each genes
# + [markdown] kernel="SoS"
# ## Output
#
# For each analysis unit we output:
#
# 1. Analysis results in RDS format: A mvsusie Model
# 2. A vcf file with selected snps
# ES:PIP:CS
# + [markdown] kernel="SoS"
# ## examples
#
#
# + kernel="SoS"
[global]
import glob
import pandas as pd
# Reciepe
parameter: recipe = path("./")
#file_inv = pd.read_csv(recipe, sep = "\t")
parameter: merged_analysis_unit = path#(file_inv["merged_analysis_unit"].values.tolist()[0])
#parameter: resid_cor = path(file_inv["resid_corr"].values.tolist()[0])
parameter: Theme_prefix = str #file_inv["Theme_prefix"].values.tolist()[0]
## LD Recipe: a three colnums table with n_themes rows, as only 1 LD is required for each gene and each theme for uni_susie_RSS, columns = ld_file_prefix, ld_file_surfix, Theme
parameter: LD_Recipe = path
## The snp with pip > criterior will be reported
parameter: pip_criterior = 0.1
#parameter: prior = path(file_inv["prior"].values.tolist()[0])
## data file suffix
parameter: data_suffix = ""
#
## An identifier for your run of analysis
parameter: name = Theme_prefix
#
regions = [x.replace("\"","").strip().split() for x in open(merged_analysis_unit).readlines() if x.strip() and not x.strip().startswith('#')]
genes = regions
## Path to work directory where output locates
parameter: wd = path("./output")
## Containers that contains the necessary packages
parameter: container = "/mnt/mfs/statgen/containers/twas_latest.sif"
## Only 1 LD for each gene is required for each analysis
# + kernel="SoS"
[Fine_mapping_1,summary_stats_preprocessing]
parameter: ld_type = 'original'
parameter: bhat = "bhat"
parameter: sbhat = "sbhat"
input: genes, group_by = 1
output: processed = f'{wd:a}/preprocessed/{_input:b}'
task: trunk_workers = 1, trunk_size = 1, walltime = '2h', mem = '55G', cores = 1, tags = f'{step_name}_{_output:bn}'
R: expand = '${ }', stdout = f"{_output:n}.stdout", stderr = f"{_output:n}.stderr", container = container
library("dplyr")
library("readr")
dat = readRDS(${_input:r})
if(is.null(dat$Z)){
dat$Z = dat$${bhat}/dat$${sbhat}
}
gene_id = read.table(text = "${_input:bnn}", sep = "_")
gene_id = gene_id[,ncol(gene_id)][[1]]
ld.table = read_delim("${LD_Recipe}" , "\t")%>%mutate(ld.path = paste0(ld_file_prefix, gene_id ,ld_file_surfix ))
rownames(ld.table) = ld.table$Theme
ld.file = ld.table[nrow(ld.table),]
dat$ld.file = ld.file
dat$ld.table = ld.table
saveRDS(dat, ${_output:r})
# + kernel="SoS"
[Fine_mapping_2,MvSuSiE_summary_stats_analysis_1]
parameter: max_L = 10
parameter: max_iter = 10
parameter: ld_type = 'original'
parameter: bhat = "bhat"
parameter: sbhat = "sbhat"
parameter: resid_cor = path#(file_inv["resid_corr"].values.tolist()[0])
parameter: prior = path#(file_inv["prior"].values.tolist()[0])
input: output_from("summary_stats_preprocessing")
output: f'{wd:a}/{_input:bnn}.LD{ld_type}{resid_cor:bnx}.mvsusierss.model.rds'
task: trunk_workers = 1, trunk_size = 1, walltime = '2h', mem = '55G', cores = 1, tags = f'{step_name}_{_output:bn}'
R: expand = '${ }', stdout = f"{_output:n}.stdout", stderr = f"{_output:n}.stderr", container = container
get_prior_indices <- function(Z, U) {
# make sure the prior col/rows match the colnames of the Y matrix
z_names = colnames(Z)
u_names = colnames(U)
if (is.null(z_names) || is.null(u_names)) {
return(NULL)
} else if (identical(z_names, u_names)) {
return(NULL)
} else {
return(match(z_names, u_names))
}
}
library(mvsusieR)
library("dplyr")
library("tibble")
library("purrr")
library("readr")
library("tidyr")
dat = readRDS(${_input:r})
gene_id = "${_input:bnn}"
resid_cor = ${resid_cor:r}
V = readRDS(resid_cor)
prior = readRDS(${prior:r})
print(paste("Number of components in the mixture prior:", length(prior$U)))
prior = mvsusieR::create_mash_prior(mixture_prior=list(weights=prior$w, matrices=prior$U),
include_indices = get_prior_indices(dat$Z, prior$U[[1]]),
max_mixture_len=-1)
if("${ld_type}" == 'original'){
R = readRDS(dat$ld.file$ld.path)
}else if("${ld_type}" == 'remove_cov'){
R = dat$LD
}
## Remove the NA SNPs in R, assuming all NA = 0
R[is.na(R)] = 0
## Remove duplicated in Z
dat$Z = dat$Z[!duplicated(dat$snps),]
dat$snps = dat$snps[!duplicated(dat$snps)]
# Retaining only the overlapping snps
R = R[which(rownames(R)%in%dat$snps),which(colnames(R)%in%dat$snps)]
dat$Z = dat$Z[which(dat$snps%in%rownames(R)),]
dat$snps = dat$snp[which(dat$snps%in%rownames(R))]
## Initiate the computation
st = proc.time()
mv_res = mvsusieR::mvsusie_rss(dat$Z, R, L=${max_L},
prior_variance=prior, residual_variance=V,
precompute_covariances=T, compute_objective=T,
estimate_prior_variance=T, estimate_prior_method='EM',
max_iter = ${max_iter}, n_thread=1)
mv_res$time = proc.time() - st
#if(mv_res$convergence$converged == FALSE){
# stop('Fail to converge.')
#}
mv_res$cs_corr = susieR:::get_cs_correlation(mv_res, Xcorr=R)
# Get list of cs snps
mv_output = mv_res
saveRDS(mv_res, ${_output[0]:r})
# + kernel="SoS"
[Fine_mapping_3,MvSuSiE_summary_stats_analysis_2]
input: group_by = 1
output: f'{_input:nn}.result.vcf.bgz'
task: trunk_workers = 1, trunk_size = 1, walltime = '2h', mem = '55G', cores = 1, tags = f'{step_name}_{_output:bn}'
R: expand = '${ }', stdout = f"{_output[0]:n}.stdout", stderr = f"{_output[0]:n}.stderr"
library("dplyr")
library("tibble")
library("purrr")
library("readr")
library("tidyr")
mv_res = readRDS(${_input:r})
## Define create_vcf function
create_vcf = function (chrom, pos, nea, ea, snp = NULL, ea_af = NULL, effect = NULL,
se = NULL, pval = NULL, name = NULL,cs = NULL, pip = NULL)
{
stopifnot(length(chrom) == length(pos))
if (is.null(snp)) {
snp <- paste0(chrom, ":", pos)
}
snp <- paste0(chrom, ":", pos)
nsnp <- length(chrom)
gen <- list()
## Setupt data content for each sample column
if (!is.null(ea_af))
gen[["AF"]] <- matrix(ea_af, nsnp)
if (!is.null(effect))
gen[["ES"]] <- matrix(effect, nsnp)
if (!is.null(se))
gen[["SE"]] <- matrix(se, nsnp)
if (!is.null(pval))
gen[["LP"]] <- matrix(-log10(pval), nsnp)
if (!is.null(cs))
gen[["CS"]] <- matrix(cs, nsnp)
if (!is.null(pip))
gen[["PIP"]] <- matrix(pip, nsnp)
gen <- S4Vectors::SimpleList(gen)
## Setup snps info for the fix columns
gr <- GenomicRanges::GRanges(chrom, IRanges::IRanges(start = pos,
end = pos + pmax(nchar(nea), nchar(ea)) - 1, names = snp))
coldata <- S4Vectors::DataFrame(Studies = name, row.names = name)
## Setup header informations
hdr <- VariantAnnotation::VCFHeader(header = IRanges::DataFrameList(fileformat = S4Vectors::DataFrame(Value = "VCFv4.2",
row.names = "fileformat")), sample = name)
VariantAnnotation::geno(hdr) <- S4Vectors::DataFrame(Number = c("A",
"A", "A", "A", "A", "A"), Type = c("Float", "Float",
"Float", "Float", "Float", "Float"), Description = c("Effect size estimate relative to the alternative allele",
"Standard error of effect size estimate", "-log10 p-value for effect estimate",
"Alternate allele frequency in the association study",
"The CS this variate are captured, 0 indicates not in any cs", "The posterior inclusion probability to a CS"),
row.names = c("ES", "SE", "LP", "AF", "CS", "PIP"))
## Save only the meta information in the sample columns
VariantAnnotation::geno(hdr) <- subset(VariantAnnotation::geno(hdr),
rownames(VariantAnnotation::geno(hdr)) %in% names(gen))
## Save VCF
vcf <- VariantAnnotation::VCF(rowRanges = gr, colData = coldata,
exptData = list(header = hdr), geno = gen)
VariantAnnotation::alt(vcf) <- Biostrings::DNAStringSetList(as.list(ea))
VariantAnnotation::ref(vcf) <- Biostrings::DNAStringSet(nea)
## Add fixed values
VariantAnnotation::fixed(vcf)$FILTER <- "PASS"
return(sort(vcf))
}
# Get list of cs snps
mv_output_snps = tibble( snps = mv_res$variable_names[which(mv_res$pip > 0)], snps_index = which((mv_res$pip > 0)) )
mv_output_snps = mv_output_snps%>%mutate( cs = map(snps_index,~which(mv_res$sets$cs %in% .x))%>%as.numeric%>%replace_na(0),
pip = map_dbl(snps_index,~(mv_res$pip[.x])),
chr = map_chr(snps,~read.table(text = .x,sep = ":",as.is = T)$V1),
pos_alt_ref = map_chr(snps,~read.table(text = .x,sep = ":",as.is = TRUE)$V2),
pos = map_dbl(pos_alt_ref,~read.table(text = .x,sep = "_",as.is = TRUE)$V1),
alt = map_chr(pos_alt_ref,~read.table(text = .x,sep = "_",as.is = TRUE, colClass = "character")$V2),
ref = map_chr(pos_alt_ref,~read.table(text = .x,sep = "_",as.is = TRUE, colClass = "character")$V3))
effect_mtr = mv_res$coef[mv_output_snps$snps_index+1,]
colnames(effect_mtr) = mv_res$condition_names
rownames(effect_mtr) = mv_output_snps$snps
cs_mtr = effect_mtr
for(i in 1:nrow(cs_mtr)) cs_mtr[i,] = mv_output_snps$cs[[i]]
pip_mtr = effect_mtr
for(i in 1:nrow(pip_mtr)) pip_mtr[i,] = mv_output_snps$pip[[i]]
output_vcf = create_vcf(
chrom = mv_output_snps$chr,
pos = mv_output_snps$pos,
ea = mv_output_snps$alt,
nea = mv_output_snps$ref,
effect = effect_mtr ,
pip = pip_mtr,
cs = cs_mtr,
name = colnames(effect_mtr)
)
saveRDS(mv_output_snps, "${_output[0]:nn}.rds")
VariantAnnotation::writeVcf(output_vcf,${_output[0]:nr},index = TRUE)
# + kernel="SoS"
[Fine_mapping_4,MvSuSiE_summary_stats_analysis_3]
input: group_by = "all"
output: f'{wd}/{Theme_prefix}.mvsusie_rss.output_list.txt'
python: expand= "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
import pandas as pd
pd.DataFrame({"output_vcf" : [$[_input:ar,]]}).to_csv("$[_output]",index = False ,header = False, sep = "t")
# + [markdown] kernel="SoS"
# ## Univariate SuSiE RSS
# + kernel="SoS"
[Fine_mapping_5,UniSuSiE_summary_stats_analysis_1]
parameter: max_L = 10
parameter: ld_type = 'original'
parameter: bhat = "bhat"
parameter: sbhat = "sbhat"
input: output_from("summary_stats_preprocessing")["processed"]
output: uni_rds = f'{wd:a}/{_input:bnn}.LD{ld_type}.unisusierss.model.rds',
uni_vcf = f'{wd:a}/{_input:bnn}.LD{ld_type}.unisusierss.vcf.bgz'
task: trunk_workers = 1, trunk_size = 1, walltime = '2h', mem = '55G', cores = 1, tags = f'{step_name}_{_output[0]:bn}'
R: expand = '${ }', stdout = f"{_output[0]:n}.stdout", stderr = f"{_output[0]:n}.stderr"
library("susieR")
library("dplyr")
library("tibble")
library("purrr")
library("readr")
library("tidyr")
## Define create_vcf function
create_vcf = function (chrom, pos, nea, ea, snp = NULL, ea_af = NULL, effect = NULL,
se = NULL, pval = NULL, name = NULL,cs = NULL, pip = NULL)
{
stopifnot(length(chrom) == length(pos))
if (is.null(snp)) {
snp <- paste0(chrom, ":", pos)
}
snp <- paste0(chrom, ":", pos)
nsnp <- length(chrom)
gen <- list()
## Setupt data content for each sample column
if (!is.null(ea_af))
gen[["AF"]] <- matrix(ea_af, nsnp)
if (!is.null(effect))
gen[["ES"]] <- matrix(effect, nsnp)
if (!is.null(se))
gen[["SE"]] <- matrix(se, nsnp)
if (!is.null(pval))
gen[["LP"]] <- matrix(-log10(pval), nsnp)
if (!is.null(cs))
gen[["CS"]] <- matrix(cs, nsnp)
if (!is.null(pip))
gen[["PIP"]] <- matrix(pip, nsnp)
gen <- S4Vectors::SimpleList(gen)
## Setup snps info for the fix columns
gr <- GenomicRanges::GRanges(chrom, IRanges::IRanges(start = pos,
end = pos + pmax(nchar(nea), nchar(ea)) - 1, names = snp))
coldata <- S4Vectors::DataFrame(Studies = name, row.names = name)
## Setup header informations
hdr <- VariantAnnotation::VCFHeader(header = IRanges::DataFrameList(fileformat = S4Vectors::DataFrame(Value = "VCFv4.2",
row.names = "fileformat")), sample = name)
VariantAnnotation::geno(hdr) <- S4Vectors::DataFrame(Number = c("A",
"A", "A", "A", "A", "A"), Type = c("Float", "Float",
"Float", "Float", "Float", "Float"), Description = c("Effect size estimate relative to the alternative allele",
"Standard error of effect size estimate", "-log10 p-value for effect estimate",
"Alternate allele frequency in the association study",
"The CS this variate are captured, 0 indicates not in any cs", "The posterior inclusion probability to a CS"),
row.names = c("ES", "SE", "LP", "AF", "CS", "PIP"))
## Save only the meta information in the sample columns
VariantAnnotation::geno(hdr) <- subset(VariantAnnotation::geno(hdr),
rownames(VariantAnnotation::geno(hdr)) %in% names(gen))
## Save VCF
vcf <- VariantAnnotation::VCF(rowRanges = gr, colData = coldata,
exptData = list(header = hdr), geno = gen)
VariantAnnotation::alt(vcf) <- Biostrings::DNAStringSetList(as.list(ea))
VariantAnnotation::ref(vcf) <- Biostrings::DNAStringSet(nea)
## Add fixed values
VariantAnnotation::fixed(vcf)$FILTER <- "PASS"
return(sort(vcf))
}
dat = readRDS(${_input:r})
gene_id = "${_input:bnn}"
## Initiate the computation
st = proc.time()
susie_list = list()
for (i in 1:ncol(dat$Z)) {
## Retaining only the overlapping snps
ld = readRDS(dat$ld.table[colnames(dat$Z)[i],]$ld.path)
int_snps = intersect(rownames(ld),dat$snps)
ld = ld[which(rownames(ld)%in%int_snps),which(colnames(ld)%in%int_snps)]
ld[is.na(ld)] = 0
Z = dat$Z[which(dat$snps%in%int_snps),i]
snps = dat$snps[which(dat$snps%in%int_snps)]
susie_list[[i]] = susie_rss(Z, ld)
susie_list[[i]]$conditions_name = colnames(dat$Z)[[i]]
susie_list[[i]]$variable_name = snps
}
susie_tb_ls = list()
for (i in 1:length(susie_list)){
susie_tb = tibble( snps = susie_list[[i]]$variable_name[which( susie_list[[i]]$pip >= 0)], snps_index = which(( susie_list[[i]]$pip >= 0)) )
susie_tb_ls[[i]]= susie_tb%>%mutate( cs = map(snps_index,~which( susie_list[[i]]$sets$cs %in% .x))%>%as.numeric%>%replace_na(0),
pip = map_dbl(snps_index,~( susie_list[[i]]$pip[.x])),
coef = map_dbl(snps_index,~(coef.susie( susie_list[[i]])[.x+1])))
}
for(i in 2:length(susie_tb_ls)){susie_tb_ls[[i]] = full_join(susie_tb_ls[[i-1]],susie_tb_ls[[i]], by = "snps") }
m = c("cs","pip","coef")
output = list()
for(i in m){
output[[i]] = susie_tb_ls[[length(susie_tb_ls)]]%>%select(contains(i))%>%as.matrix
}
snps_tb = susie_tb_ls[[length(susie_tb_ls)]]%>%mutate(
chr = map_chr(snps,~read.table(text = .x,sep = ":",as.is = T)$V1),
pos_alt_ref = map_chr(snps,~read.table(text = .x,sep = ":",as.is = TRUE)$V2),
pos = map_dbl(pos_alt_ref,~read.table(text = .x,sep = "_",as.is = TRUE)$V1),
alt = map_chr(pos_alt_ref,~read.table(text = .x,sep = "_",as.is = TRUE, colClass = "character")$V2),
ref = map_chr(pos_alt_ref,~read.table(text = .x,sep = "_",as.is = TRUE, colClass = "character")$V3))
output_vcf = create_vcf(
chrom = snps_tb$chr,
pos = snps_tb$pos,
ea = snps_tb$alt,
nea = snps_tb$ref,
effect = output$coef ,
pip = output$pip,
cs = output$cs,
name = colnames(dat$Z)
)
saveRDS(susie_list, ${_output[0]:r})
VariantAnnotation::writeVcf(output_vcf,${_output[1]:nr},index = TRUE)
# + kernel="SoS"
[Fine_mapping_6,UniSuSiE_summary_stats_analysis_2]
input: output_from("UniSuSiE_summary_stats_analysis_1")["uni_vcf"], group_by = "all"
output: f'{wd}/{Theme_prefix}.unisusie_rss.output_list.txt'
python: expand= "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
import pandas as pd
pd.DataFrame({"output_vcf" : [$[_input:ar,]]}).to_csv("$[_output]",index = False ,header = False, sep = "t")
# + kernel="SoS"
| pipeline/SuSiE_RSS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/amckenny/text_analytics_intro/blob/main/notebooks/04_building_a_corpus.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="6GlbytuO_o-r"
# #Prerequisites
# + id="B2XsAIe587HA"
# Get external files and install 3rd party packages
# !mkdir -p texts
# !mkdir -p texts/10ks
# !wget -q https://www.dropbox.com/s/5ibk0k4mibcq3q6/AussieTop100private.zip?dl=1 -O ./texts/AussieTop100private.zip
# !wget -q https://www.dropbox.com/s/u6m4k0uhhj9m2um/Sample_Qualtrics_Output.xlsx?dl=1 -O ./texts/Sample_Qualtrics_Output.xlsx
# !unzip -qq -d ./texts/ ./texts/AussieTop100private.zip
# !pip install -U sec-edgar-downloader
# Standard library imports
import glob, pprint, random, requests, time
from pathlib import Path
from IPython.display import display
# 3rd party imports
import pandas as pd
from bs4 import BeautifulSoup
from sec_edgar_downloader import Downloader
# + [markdown] id="SKQ-ksuD_1uf"
# #Module 4 - Building a Corpus
# ---
# + [markdown] id="TlQMH3o7_7U8"
# One of the most time consuming aspects of text analysis is actually building the corpus of texts themselves. With the increasing availability of texts in electronic format over the Internet, this is getting increasingly easier and faster. However, it is still far from a trivial process.
#
# In this module, we'll introduce several methods of obtaining and getting texts into Python for analysis. The goals for this module are:
#
# * Load a corpus from text files.
# * Load a corpus from Qualtrics survey exports.
# * Load a corpus from an API.
# * Load a corpus from web scraping.
#
# **Note**: Whereas modules 1-3 were designed to be used in a self-directed manner, modules 4 and on are designed to be part of my workshop/course. There is far less prose explanation in these notebooks. However, with some tinkering you may still be able to work through these on your own.
# + [markdown] id="YszE3YarJ7Ot"
# ##4.1. Building a Corpus from Text Files
# ---
# + [markdown] id="MHbFCH9wKW1G"
# The *prerequisites* code automatically loaded two text corpora into the './texts/About/' and './texts/PR/' directories.
#
# Go to the file navigator in Colab (on the left, it looks like a folder) and verify that they're there. If they're not there, ensure that you ran the prerequisites code above and click the refresh button just under "Files" in the file navigator (looks like a folder with a circle at the bottom-right).
#
# If you don't see these folders here, neither will Python!
# + [markdown] id="0CNJ7qOaPSST"
# We want to load every '.txt' file in those directories, so first we need to tell Python where those directories are.
# + id="Q08XhiQWPm4Y"
# Tell Python directories where texts are located
texts_dir = Path.cwd() / "texts"
about_dir = texts_dir / "About"
pr_dir = texts_dir / "PR"
dirs_to_load = [about_dir, pr_dir]
# + [markdown] id="RIc3V5LOQCMc"
# We will then create two loops to get all the files in the directories:
#
# 1. Loop through all of the directories we want to load texts from (`dirs_to_load`)
#
# 2. Loop through all .txt files in that directory
#
# Then for each
# + id="uabTVGJeRXQn"
# Loads the texts into a list called "texts"
texts = []
for directory in dirs_to_load: # Loop 1
for file in glob.glob(f"{directory}/*.txt"): # Loop 2
with open(file, 'r') as infile: # Open the text file
text_type = file.split("/")[-2]
text_id = file.split("/")[-1]
texts.append({'text_type': text_type, 'text_id': text_id, 'text': infile.read()}) # Save the contents of the file to the "texts" list
# Creates a Pandas DataFrame from the corpus and saves the Dataframe as a .csv file
corpus_df = pd.DataFrame(texts)
corpus_df.to_csv(texts_dir / "about_pr_texts.csv")
# Displays information about the corpus
print(f"There are {len(corpus_df)} texts in the corpus")
print("\nThe number of each type of text:")
display(corpus_df.groupby(by='text_type').agg('count')['text'])
print("\nA sample of what's in the table:")
display(corpus_df.head(5))
# + [markdown] id="WB3JUPf4VUq0"
# That's it! There are ways of getting word files/PDF files/etc into Python as well. They follow a similar pattern, but are beyond the scope of this module.
# + [markdown] id="DR1pB8CAYPPs"
# ##4.2. Building a Corpus from Qualtrics
# ---
# + [markdown] id="jc5yQp-MYfEV"
# Another source of texts you may want to analyze is results from free-response questions in a survey/experiment. Often we are able to export the full results from our survey instrument; however, it'd be nice not to have to break the spreadsheet into separate text documents. Let's see how this can be done using the Excel exported by Qualtrics.
#
# First, we tell Python the name and location of the Qualtrics export file. In this case we use Excel format.
# + id="c1J8yHvob3M0"
# Tells Python where to find the qualtrics export file
qualtrics_file = texts_dir / "Sample_Qualtrics_Output.xlsx"
# + [markdown] id="SdMn9-eecD15"
# Once Python knows where the Wualtrics file can be found, one line of code imports that file into a Pandas DataFrame.
#
# From there we probably want to drop the first row (Qualtrics has two header rows and by default Pandas reads one of them in as 'data'). We also may want to save only certain columns of relevance to our analysis (in this case we'll save only the Progress, Duration, and text) to make viewing the output easier.
# + id="54qji5kXcEMR"
# Load the Qualtrics export file into a Pandas DataFrame
qualtrics_df = pd.read_excel(qualtrics_file, header=0)
# Eliminates unneeded rows/columns and saves result to a csv file
qualtrics_df = qualtrics_df.drop(0)
qualtrics_df = qualtrics_df[["Progress","Duration (in seconds)", "sample_Text"]]
qualtrics_df.to_csv(texts_dir / "qualtrics_texts.csv")
# Displays the first five rows of the DataFrame
display(qualtrics_df.head(5))
# + [markdown] id="GmUmcguEgrGv"
# As with loading text files, there are also multiple ways of loading survey data into Python (e.g., with a csv file, etc). Here too, it follows a similar procedure, but is beyond the scope of this notebook.
# + [markdown] id="RpoYYOtkgpli"
# ##4.3. Building a Corpus from an API
# ---
# + [markdown] id="EZf3xsAlhCvR"
# Sometimes we want to collect texts from organizations who have built an 'API' or **A**pplication **P**rogramming **I**nterface to help us obtain texts. Myriad organizations have such APIs and each one works a little differently.
#
# Depending on the API, sometimes you can find Python code written by someone else that will make using the API easier. We're going to look at how to get 10-K documents from the SEC EDGAR database. As it turns out, [<NAME>](https://github.com/jadchaar/) has written some Python code we're going to use to make our lives easier: the [sec-edgar-downloader](https://github.com/jadchaar/sec-edgar-downloader) package.
#
# (The installation and loading of this package is done in the *Prerequisites* code.)
#
#
# + [markdown] id="H_gSZeU9jdTL"
# According to his package documentation, we first need to create a Downloader object and tell it where to store the files.
#
# Notice that we're telling Python where to *store* them, not where to *find* them. Unlike the manual/qualtrics corpus examples, here we're getting the files from an online source and the API already knows where to find them.
# + id="fI70l6gIjJWb"
# Initializes the SEC downloader and tells it where to store the 10-Ks
tenk_directory = Path.cwd() / "texts" / "10ks"
dl = Downloader(tenk_directory)
# + [markdown] id="ygZbGwNBkUZf"
# We then tell the Downloader object what files we want and from what company. Let's download **all** of IBM and Apple's 10-K documents.
# + id="TdIYBXIzkmEu"
# Tells the API we want the IBM and Apple 10-Ks
company_tickers = ["IBM", "AAPL"]
for ticker in company_tickers:
dl.get("10-K", ticker)
# + [markdown] id="TfwDtQdwk9qP"
# Go over to the file navigator in Colab and look in the './texts/10ks' directory. You'll see that there is now an entire directory tree housing the downloaded texts. They're not all downloaded into one directory like we had when we used our own texts.
#
# You *could* copy and paste them all into one directory, but imagine if we had used a loop to get all 10-k documents for all S&P 500 companies. That would take forever! Let's use Python to go through these directories for us so we don't have to!
# + id="8Ec3a5ekmR28"
# Identifies the companies based on the tickers in the sec-edgar-filings directory
results_dir = tenk_directory / "sec-edgar-filings"
companies = [company.name for company in results_dir.iterdir() if results_dir.is_dir()]
texts = []
# Loops through the directories and collects all of the 10-K information in a list called 'texts'
for company in companies:
company_10k_dir = results_dir / company / "10-K"
filings = [filing.name for filing in company_10k_dir.iterdir() if company_10k_dir.is_dir()]
for filing_id in filings:
tenk_filename = company_10k_dir / filing_id / "full-submission.txt"
if tenk_filename.exists():
with open(tenk_filename, 'r') as infile:
texts.append({'company': company, 'filing_type': "10-K", 'filing_id': filing_id, 'text': infile.read()})
# Converts the 'texts' list to a Pandas DataFrame and outputs it to a csv file
tenk_df = pd.DataFrame(texts)
tenk_df.to_csv(texts_dir / "tenk_texts.csv")
# Displays information about the corpus
print(f"There are {len(tenk_df)} texts in the corpus")
print("\nThe number of texts from each company:")
display(tenk_df.groupby(by='company').agg('count')['text'])
# + [markdown] id="WsqbHBSKqn1b"
# Now let's take a look at one of our texts and see what it looks like.
# + id="5fuJMJP9qsYe"
# Displays the first 10,000 characters of one of the texts in the corpus
print(tenk_df.iloc[-1]['text'][:10000])
# + [markdown] id="VsdPTIXord5u"
# Well... that's certainly a 10-K... however, that's not all text. That almost looks like the code behind an HTML file! ...and yep, that's how you get it. It looks like texts from this data source are going to require some cleaning before we use them in a text analysis!
#
# Every API works a little bit differently, so you're often going to have to go to the API documentation and tinker a bit. However, once you have been through a few APIs, you'll generally see the same ideas implemented over and over again with a few tweaks from API to API.
# + [markdown] id="ZiqOx_h0wDVo"
# ##4.4. Scraping a Corpus
# ---
#
# + [markdown] id="llNPlOvNxuF3"
# Sometimes the texts that you want are online and there is no API readily available to interface with. In this case, you're left with a decision: scrape the text or collect it manually.
#
# There are advantages and disadvantages of each, and we'll talk about that in the workshop/course. However, one thing I want to put in the notebook is an ethical concern. Some websites explicitly disallow scraping. I've observed some scholars scraping these sites (I suspect without permission), but I don't agree with this approach. Use this knowledge/these tools for good and where permitted.
#
# For our example, let's see what's going on at the [Kelley School of Business](https://news.iu.edu/tags/kelley-school-of-business). I searched through the site and didn't see anything prohibiting it, so it seems fair to use so long as we're mindful not to be too taxing on the system.
# + [markdown] id="rmzQgmEuh-Iw"
# ###4.4.1. Building the List of URLs to Scrape
# ---
# + [markdown] id="mq0yWDYwzNKH"
# First let's have Python go out and get the news page and see what we see:
# + id="Ns9feiGz0C_H"
# Has Python call the webpage with the article links on it and displays whether the page was accessed successfully
url = "https://news.iu.edu/tags/kelley-school-of-business"
response = requests.get(url)
status = response.status_code
if status == 200:
print(f"The status code was {status} - that means that we received the webpage back")
else:
print(f"The status code was {status} - something didn't work")
# + [markdown] id="qB5thp8Z6IgS"
# Now let's look at the "text" we got back:
# + id="IYp1xtW11-tv"
# Displays the contents of the webpage that was accessed
text = response.text
print(text)
# + [markdown] id="rgcIbiiR6YDY"
# Well that's massive, and again, in HTML... but if we [navigate to the website](https://news.iu.edu/tags/kelley-school-of-business) and compare what we see there to the code, a pattern emerges:
#
# * Each story we want to access seems to be contained in a tag called: `<div class="grid-item--container">`
#
# This insight enables us to extract from the HTML only the bits that surround the news articles. We do so with BeautifulSoup:
# + id="hDK0ORLB0NUQ"
# Displays only the website text within the grid-item--container sections
bs_text = BeautifulSoup(text)
article_containers = bs_text.find_all('div', attrs={'class':'grid-item--container'})
print(article_containers)
# + [markdown] id="rJy9lhuDGcc1"
# What we want from here is just the URL to the articles. We see that within each container tag, the URLs are stored within an `<a href=...>` tag.
#
# Let's get just those.
# + id="viKA9TVd0flK"
# Displays the URLs to the articles
for article in article_containers:
print(article.a['href'])
# + [markdown] id="bCFSaTSgT3U3"
# OK, so now we can see the URLs... but there appears to be two different kinds:
# * Stories: Start with /
# * Blog entries: Contain the full URL.
#
# We could do both, but that would require us to scrape two pages with two separate formats. Let's just do the stories for our demo.
#
# We know that the stories all start with https://news.iu.edu, so let's prepend that and add those to a list of all articles:
# + id="0OnwRDTFU5kg"
# Creates a list of URLs for the selected articles starting with a forward slash (/)
article_urls = ["https://news.iu.edu"+article.a['href'] for article in article_containers if article.a['href'].startswith('/')]
print(article_urls)
# + [markdown] id="raHYm3wGVd4J"
# That's great, but there's one more important piece of information... this isn't the last page of news stories... there are many more pages we need to get the links from.
#
# How can we tell this? Well if you look at the webpage, there is a "Next >" button when there are no more pages of news.
#
# If we look in our HTML, we see that there is a `<li class="next">` tag when that button is there. Let's take a look at that:
# + id="IABlHz9YV1Gf"
# Finds the HTML code for the 'next' button and prints it
next_page_code = bs_text.find('li', attrs={'class':'next'})
print(next_page_code)
# + [markdown] id="TmbuJEy5XgIj"
# It looks like it too has a URL in it within an `<a href=...>` tag... but this time starting with a question-mark. That just means at the end of "https://news.iu.edu/tags/kelley-school-of-business' we need to add a question-mark and the page number like so:
#
# `https://news.iu.edu/tags/kelley-school-of-business?page=2`
#
# Let's get this URL as well so we know what page to get data from next:
# + id="BBJtBQpqYPRE"
# Extracts the URL from the HTML code for the 'next' button.
next_news_url = "https://news.iu.edu/tags/kelley-school-of-business"+next_page_code.a['href']
print(next_news_url)
# + [markdown] id="j4CdGLMHYmOa"
# Let's systematize what we've done a little bit:
# + id="GScCGzZPY8WH"
def get_kelley_news_urls(url):
# Get URL Data
response = requests.get(url)
status = response.status_code
if status == 200:
print(f"URL \"{url}\" successfully requested. Parsing...", end=" ")
else:
print(f"URL \"{url}\" failed with code {status}. Skipping...", end=" ")
return ([], None)
html_code = response.text
bs_text = BeautifulSoup(html_code)
# Parse news URLs
article_containers = bs_text.find_all('div', attrs={'class':'grid-item--container'})
article_urls = ["https://news.iu.edu"+article.a['href'] for article in article_containers if article.a['href'].startswith('/')]
# Find "Next" button: Return URL if it's there or None if it isn't.
next_page_code = bs_text.find('li', attrs={'class':'next'})
print("Returning...", end=" ")
if next_page_code is None:
return (article_urls, None)
else:
next_news_url = "https://news.iu.edu/tags/kelley-school-of-business"+next_page_code.a['href']
return (article_urls, next_news_url)
# + [markdown] id="E1Xyl1k_cyxc"
# And let's see if it produces consistent results:
# + id="dFUh5DwWc48F"
# Gets all article URLs and the 'next' URL from the specified webpage
url = "https://news.iu.edu/tags/kelley-school-of-business"
get_kelley_news_urls(url)
# + [markdown] id="sZ8JtIUIe_uU"
# Ok, now let's use a loop to get **all** of the URLs.
# + id="cF_amodufYo_"
# Iteratively accesses the Kelley news page, extracting the article URLs and 'next' URL until there are no more articles to be extracted.
url = "https://news.iu.edu/tags/kelley-school-of-business"
list_of_article_urls = []
while True:
result_tuple = get_kelley_news_urls(url)
list_of_article_urls.extend(result_tuple[0])
url = result_tuple[1]
if not url:
break
else:
print(f"Sleeping...")
time.sleep(3)
print(f"\n\nThe full list of articles is: {list_of_article_urls}")
# + [markdown] id="wn3iBgIpiF8V"
# ###4.4.2. Scraping the News Articles
# ---
# + [markdown] id="pPWFGGueiRcz"
# Now that we have a list of the URLs for the articles themselves, we will largely repeat what we did above. The difference is that here we are looking for the text of the article, not the URLs to be scraped.
#
# Let's start with one article, the first in our list:
# + id="Xe0uPrdOinQh"
url = list_of_article_urls[0]
# Get URL Data
response = requests.get(url)
html_code = response.text
bs_text = BeautifulSoup(html_code)
print(bs_text)
# + [markdown] id="hAz3zDyrimm3"
# And we're back to a mess of HTML again, but we see some valuable data in this HTML:
#
# *note*: You'll see I added 'try' and 'except' blocks here. This is because some articles may/may not have each field. If you try to access a field that doesn't exist, Python will throw an error at you. The 'try' and 'except's just tell Python what to do if there is no error ('try') and what to do if there is an error ('except').
# + id="dp5HCeY2jYTX"
# The category
try:
category = bs_text.find('div', attrs={'class': 'article-category'}).a.text.strip()
print(category)
except:
print("There was no category for this article")
# + id="DMQ-GzI6kBWg"
# The title
title = bs_text.find('h1', attrs={'class': 'article--title'}).text.strip()
print(title)
# + id="Nb1Fq7c6kxiF"
# The subtitle
try:
subtitle = bs_text.find('h2', attrs={'class': 'article--subtitle'}).text.strip()
print(subtitle)
except:
print("There was no subtitle for this article")
# + id="VmfQvf6klFu0"
# The author
try:
author = bs_text.find('p', attrs={'class': 'byline author'}).text.replace("By\n", " ").strip()
print(author)
except:
print("There was no author for this article")
# + id="ev9pZyFalZXK"
# The date
try:
date = bs_text.find('p', attrs={'class': 'byline date'}).text.strip()
print(date)
except:
print("There was no date for this article")
# + [markdown] id="06PAFC5XmQny"
# We also see the body of the text in the `<div class="text">` tags. However, unlike the others, there are more than one of them, and they contain HTML tags in them:
# + id="PYawdkTRlz7q"
# The text body
body_text = bs_text.find_all('div', attrs={'class': 'text'})
for section in body_text:
print(section)
# + [markdown] id="t4I5K2O6nFJG"
# Fortunately, BeautifulSoup has a `get_text()` function that will help us get only the printed text from this. We can stitch together the multiple sections ourselves.
# + id="Ibu4ok7LnUa5"
# Extracts the displayed text from the HTML
fulltext = ""
for section in body_text:
fulltext = fulltext + " " + section.get_text().strip()
print(fulltext)
# + [markdown] id="E01rLdKJn6Jp"
# Again, let's pull this together into one function:
# + id="EyQK4aWjoBlz"
def parse_kelley_news_page(url):
# Get URL Data
response = requests.get(url)
status = response.status_code
if status == 200:
print(f"URL \"{url}\" successfully requested. Parsing...", end=" ")
else:
print(f"URL \"{url}\" failed with code {status}. Skipping...", end=" ")
return (None)
html_code = response.text
bs_text = BeautifulSoup(html_code)
# Parse HMTL into article sections
article = {}
article["url"] = url
# Not every article will have every field, so we use 'try' and 'except' statements to handle cases when it does not
try:
article["title"] = bs_text.find('h1', attrs={'class': 'article--title'}).text.strip()
except:
article["title"] = "None"
try:
article["category"] = bs_text.find('div', attrs={'class': 'article-category'}).a.text.strip()
except:
article["category"] = "None"
try:
article["subtitle"] = bs_text.find('h2', attrs={'class': 'article--subtitle'}).text.strip()
except:
article["subtitle"] = "None"
try:
article["author"] = bs_text.find('p', attrs={'class': 'byline author'}).text.replace("By\n", " ").strip()
except:
article["author"] = "None"
try:
article["date"] = bs_text.find('p', attrs={'class': 'byline date'}).text.strip()
except:
article["date"] = "None"
try:
body_text = bs_text.find_all('div', attrs={'class': 'text'})
article["fulltext"] = ""
for section in body_text:
article["fulltext"] = article["fulltext"] + " " + section.get_text().strip()
except:
article["fulltext"] = ""
print("Done...", end=" ")
return article
# + [markdown] id="hrpjIPDEpJdJ"
# Let's see if it produces consistent results to what we saw previously:
# + id="0Qy1XkkgpNF2"
# Tests our custom parsing function to see if it pulls the right information
result_dict = parse_kelley_news_page(list_of_article_urls[2])
print(f"\n{result_dict}")
# + [markdown] id="Xq4-JDbqp2gA"
# Now we will build a loop to take us through all of the article URLs we scraped:
# + id="YQMayCRjp2Lz"
article_texts = []
# Iterates through all article URLs, extracting the article information and text, and stores it to a list 'article_texts'
for url in list_of_article_urls:
result_dict = parse_kelley_news_page(url)
if result_dict:
article_texts.append(result_dict)
print(f"Sleeping...")
time.sleep(3)
# + id="vw-ClkOGzDdb"
# Creates a Pandas DataFrame from the results and saves the corpus to a csv file
kelleynews_df = pd.DataFrame(article_texts)
kelleynews_df.to_csv(texts_dir/"kelleynews_texts.csv")
# Displays the contents of the corpus
display(kelleynews_df)
# + [markdown] id="HCQibGUMZ3v0"
# Now you have a corpus of Kelley-related news articles scraped from the IU webpage. If you go to the file navigator on the left, you can download the corpus from the server to your local machine.
| notebooks/04_building_a_corpus.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Big Query Results in a Panda's Dataframe
# +
query="""
SELECT
departure_delay,
COUNT(1) AS num_flights,
APPROX_QUANTILES(arrival_delay, 10) AS arrival_delay_deciles
FROM
`bigquery-samples.airline_ontime_data.flights`
GROUP BY
departure_delay
HAVING
num_flights > 100
ORDER BY
departure_delay ASC
"""
from google.cloud import bigquery
df = bigquery.Client().query(query).to_dataframe()
df.head()
# -
import pandas as pd
percentiles = df['arrival_delay_deciles'].apply(pd.Series)
percentiles = percentiles.rename(columns = lambda x : str(x*10) + "%")
df = pd.concat([df['departure_delay'], percentiles], axis=1)
df.head()
without_extremes = df.drop(['0%', '100%'], 1)
without_extremes.plot(x='departure_delay', xlim=(-30,50), ylim=(-50,50));
| bigQuery/bigQuery-results-dandas-dataframe/BigQueryResultsPandasDataframe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CH. 8 - Market Basket Analysis
# ## Activities
# #### Activity 8.01: Load and Prep Full Online Retail Data
import matplotlib.pyplot as plt
import mlxtend.frequent_patterns
import mlxtend.preprocessing
import numpy
import pandas
online = pandas.read_excel(
io="~/Desktop/Online Retail.xlsx",
sheet_name="Online Retail",
header=0
)
online['IsCPresent'] = (
online['InvoiceNo']
.astype(str)
.apply(lambda x: 1 if x.find('C') != -1 else 0)
)
online1 = (
online
.loc[online["Quantity"] > 0]
.loc[online['IsCPresent'] != 1]
.loc[:, ["InvoiceNo", "Description"]]
.dropna()
)
invoice_item_list = []
for num in list(set(online1.InvoiceNo.tolist())):
tmp_df = online1.loc[online1['InvoiceNo'] == num]
tmp_items = tmp_df.Description.tolist()
invoice_item_list.append(tmp_items)
online_encoder = mlxtend.preprocessing.TransactionEncoder()
online_encoder_array = online_encoder.fit_transform(invoice_item_list)
online_encoder_df = pandas.DataFrame(
online_encoder_array,
columns=online_encoder.columns_
)
## COL in different order
online_encoder_df.loc[
20125:20135,
online_encoder_df.columns.tolist()[100:110]
]
# +
# Activity 8.01 Unit Test
def unittest_activity_8_01(df):
df_shape = df.shape
assert df_shape == (20136, 4077), "Dimension mismatch"
unique_vals = list(set(df.nunique().values.tolist()))[0]
assert unique_vals == 2, "Too many unique values"
unittest_activity_8_01(df=online_encoder_df)
# -
# #### Activity 8.02: Apriori on the Complete Online Retail Data Set
mod_colnames_minsupport = mlxtend.frequent_patterns.apriori(
online_encoder_df,
min_support=0.01,
use_colnames=True
)
mod_colnames_minsupport.loc[0:6]
mod_colnames_minsupport[
mod_colnames_minsupport['itemsets'] == frozenset(
{'ANTIQUE SILVER TEA GLASS ETCHED', 'REGENCY CAKESTAND 3 TIER'}
)
]
mod_colnames_minsupport['length'] = (
mod_colnames_minsupport['itemsets'].apply(lambda x: len(x))
)
# +
## item set order different
mod_colnames_minsupport[
(mod_colnames_minsupport['length'] == 2) &
(mod_colnames_minsupport['support'] >= 0.02) &
(mod_colnames_minsupport['support'] < 0.021)
]
# -
mod_colnames_minsupport.hist("support", grid=False, bins=30)
plt.title("Support")
# +
# Activity 8.02 Unit Test
def unittest_activity_8_02(df):
df_shape = df.shape
assert df_shape == (1854, 3), "Dimension mismatch"
df_filt_shape = df[(df['length'] == 2)
& (df['support'] >= 0.02)
& (df['support'] < 0.021)].shape
assert df_filt_shape == (17, 3), "Filtered dimension mismatch"
unittest_activity_8_02(df=mod_colnames_minsupport)
# -
# #### Activity 8.03: Find the Association Rules on the Complete Online Retail Data Set
# +
rules = mlxtend.frequent_patterns.association_rules(
mod_colnames_minsupport,
metric="confidence",
min_threshold=0.6,
support_only=False
)
rules.loc[0:6]
# -
print("Number of Associations: {}".format(rules.shape[0]))
rules.plot.scatter("support", "confidence", alpha=0.5, marker="*")
plt.xlabel("Support")
plt.ylabel("Confidence")
plt.title("Association Rules")
plt.show()
rules.hist("lift", grid=False, bins=30)
plt.title("Lift")
rules.hist("leverage", grid=False, bins=30)
plt.title("Leverage")
plt.hist(rules[numpy.isfinite(rules['conviction'])].conviction.values, bins = 30)
plt.title("Conviction")
# +
# Activity 8.03 Unit Test
def unittest_activity_8_03(df):
df_shape = df.shape
assert df_shape == (498, 9), "Dimension mismatch"
vals = df[numpy.isfinite(df['conviction'])].conviction.values
vmin = round(numpy.min(vals), 2)
vmax = round(numpy.max(vals), 2)
assert (vmin, vmax) == (2.25, 23.37), "Conviction incorrect"
unittest_activity_8_03(df=rules)
# -
| Chapter08/tests/Activity8.01-Activity8.03_UnitTests.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
# %load_ext autoreload
# %autoreload 2
#import prediction
#from utils import load_data,get_data_day
import logging
logging.basicConfig(level=logging.INFO)
import utils
import time
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
from sklearn.multioutput import MultiOutputRegressor
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
import pickle
from utils import load_data
from utils import load_data,get_data_day
import numpy as np
import pickle
import datetime
import calendar
import time
import os
import utils
import pandas as pd
import logging
# -
ddd=load_data()
sites=ddd['sites']
sites['idPolair']=pd.to_numeric(sites['idPolair'])
sites=sites.set_index('idPolair')
sites.head()
sites=sites.iloc[:,1:3]
sites=sites
sites.head()
data=pd.read_csv('fillna/final.csv')
data['date']=pd.to_datetime(data['date'])
data=data.set_index(['idPolair','date'])
data.head()
newData=pd.merge(sites,data.reset_index().set_index('idPolair'),left_index=True,right_index=True,how='left')
newData.head()
newData=newData.reset_index().set_index(['idPolair','date'])
newData.corr()
gg=newData.groupby(level=0)
sites=np.array([])
for n,g in gg:
sites=np.append(sites,int(n))
np.save('models/sites.npy',sites)
data.isnull().sum()
newDataSet=None
groups=newData.groupby(level=0)
i=0
for name,g in groups:
gg=g.reset_index().set_index('date').resample('D').mean().reset_index().set_index(['idPolair','date'])
print(gg.shape)
if(i==0):
newDataSet=gg
else:
newDataSet=pd.concat([newDataSet,gg])
i=i+1
newDataSet=pd.read_csv('fillna/finalDays.csv')
newDataSet['date']=pd.to_datetime(newDataSet['date'])
newDataSet=newDataSet.set_index(['idPolair','date'])
x=pd.merge(sites,newDataSet,left_index=True,right_index=True)
x.head()
models=dict({})
groups=x.groupby(level=0)
model = MultiOutputRegressor(estimator=RandomForestRegressor(random_state=0,n_estimators=10,max_depth=10))
xx=x.iloc[:,2:]
Xy=pd.concat([x,x.shift(-1),x.shift(-2),x.shift(-3),x.shift(-4),x.shift(-5),x.shift(-6),x.shift(-7),x.shift(-8),x.shift(-9),xx.shift(-10),xx.shift(-11),xx.shift(-12)],keys=['t','t+1','t+2','t+3','t+4','t+5','t+6','t+7','t+8','t+9','t+10','t+11','t+12'],axis=1).dropna()
X=Xy.iloc[:,:60]
y=Xy.iloc[:,60:]
In=X.values
Out=y.values
model.fit(In,Out)
filename = 'bigForest.model'
pickle.dump(model, open(filename, 'wb'))
# +
for name,x in groups:
Xy=pd.concat([x,x.shift(-1),x.shift(-2),x.shift(-3),x.shift(-4),x.shift(-5),x.shift(-6),x.shift(-7),x.shift(-8),x.shift(-9),x.shift(-10),x.shift(-11),x.shift(-12)],keys=['t','t+1','t+2','t+3','t+4','t+5','t+6','t+7','t+8','t+9','t+10','t+11','t+12'],axis=1).dropna()
X=Xy.iloc[:,:40]
y=Xy.iloc[:,40:]
In=X.values.reshape(X.shape[0],10,4)
Out=y.values.reshape(y.shape[0],12)
model = models[name]=model
model.reset_states()
model.fit(In,Out,verbose=2,epochs=100)
models[name]=model
break
# -
model
newDataSet.shift(-1).head()
import time
x=time.time()
y=time.time()
y-x
def load_models(dir='models/'):
x=time.time()
sites=np.load(dir+'sites.npy')
models=dict({})
for i in sites:
print(i)
models[i]=load_model(dir+str(int(i))+'.model')
y=time.time()
print((y-x))
return models
models=load_models()
models
K.clear_session()
x=time.time()
m=load_model('models/33111.model')
y=time.time()
x1=time.time()
m1=load_model('models/33111.model',compile=False,)
y1=time.time()
y-x
y1-x1
m.save_weights('models/w.test')
m1.weights
mo = Sequential()
mo.add(LSTM(200,input_shape=(4,10),dropout=0.4))
mo.add(Dense(12))
mo.compile(loss='mean_squared_error', optimizer='adam')
x2=time.time()
mo.load_weights('models/w.test')
y2=time.time()
y2-x2
newDataSet=pd.read_csv('fillna/finalDays.csv')
newDataSet.set_index(['idPolair','date'],inplace=True)
groups=newDataSet.groupby(level=0)
# +
mmm=dict({})
for name,x in groups:
Xy=pd.concat([x,x.shift(-1),x.shift(-2),x.shift(-3),x.shift(-4),x.shift(-5),x.shift(-6),x.shift(-7),x.shift(-8),x.shift(-9),x.shift(-10),x.shift(-11),x.shift(-12)],keys=['t','t+1','t+2','t+3','t+4','t+5','t+6','t+7','t+8','t+9','t+10','t+11','t+12'],axis=1).dropna()
X=Xy.iloc[:,:40]
y=Xy.iloc[:,40:]
In=X.values.reshape(X.shape[0],4,10)
Out=y.values.reshape(y.shape[0],12)
model = Sequential()
model.add(LSTM(20,input_shape=(4,10),dropout=0.4))
model.add(Dense(12))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(In,Out,verbose=2,epochs=100)
mmm[name]=model
mmm[name].save('models2/'+str(int(name))+'.model')
# -
x2=time.time()
m2=load_model('models2/7001.model')
y2=time.time()
y2-x2
x2=time.time()
m2=load_model('models2/7001.model',compile=False)
y2=time.time()
y2-x2
import h5py
def load_models2(dir='models/'):
x=time.time()
sites=np.load(dir+'sites.npy')
models=dict({})
for i in sites:
K.clear_session()
print(i)
models[i]=load_model(dir+str(int(i))+'.model')
y=time.time()
print((y-x))
return models
momo=load_models2()
x=utils.load_data()
x.keys()
y=utils.get_data_day(10,x)
gg=y[0]['NO2'].set_index(['idPolair','date']).sort_index().groupby(level=0)
for i,j in gg:
print(j)
break
def run_predict(year=2016, max_days=10, dirname="../Data/training", list_days=None):
"""
year : year to be evaluated
max_days: number of past days allowed to predict a given day (set to 10 on the platform)
dirname: path to the dataset
list_days: list of days to be evaluated (if None the full year is evaluated)
"""
overall_start = time.time() # <== Mark starting time
data = load_data(year=year, dirname=dirname) # load all data files
sites = data["sites"] # get sites info
day_results = dict({})
if list_days is None:
if calendar.isleap(year): # check if year is leap
list_days = range(366)
else:
list_days = range(365)
for day in list_days:
print(day)
return get_data_day(day, data, max_days=max_days,
year=year) # you will get an extraction of the year datasets, limited to the past max_days for each day
day_results[day] = predict(day, sites, chimeres_day, geops_day, meteo_day,
concentrations_day) # do the prediction
overall_time_spent = time.time() - overall_start # end computation time
pickle.dump(day_results, open('submission/results.pk', 'wb')) # save results
pickle.dump(overall_time_spent, open('submission/time.pk', 'wb')) # save computation time
chimeres_day, geops_day, meteo_day, concentrations_day =run_predict(list_days=[14])
c=concentrations_day
c.keys()
def toPredcit(concentrations_day,sites):
sites['idPolair']=pd.to_numeric(sites['idPolair'])
sites=sites.set_index('idPolair').loc[:,['coord_x_l93','coord_y_l93']]/10000
O3=pd.DataFrame(concentrations_day['O3'].set_index(['idPolair','date'])['Valeur'])
O3.columns=['O3']
NO2=pd.DataFrame(concentrations_day['NO2'].set_index(['idPolair','date'])['Valeur'])
NO2.columns=['NO2']
PM10=pd.DataFrame(concentrations_day['PM10'].set_index(['idPolair','date'])['Valeur'])
PM10.columns=['PM10']
PM25=pd.DataFrame(concentrations_day['PM25'].set_index(['idPolair','date'])['Valeur'])
PM25.columns=['PM25']
cons=pd.merge(O3,pd.merge(NO2,pd.merge(PM10,PM25,left_index=True,right_index=True,how='outer'),left_index=True,right_index=True,how='outer'),left_index=True,right_index=True,how='outer').reset_index()
cons['idPolair']=pd.to_numeric(cons['idPolair'])
cons['date']=pd.to_datetime(cons['date'])
cons=cons.set_index('idPolair')
X=pd.merge(sites,cons,left_index=True,right_index=True)
X=X.reset_index()
X=X.set_index(['idPolair','date']).sort_index()
i=0
means=X.reset_index().set_index('date').resample('D').mean()
del means['idPolair']
meanOfmeans=pd.DataFrame((np.array(means.mean().tolist()).reshape(1,6)),columns=['X','Y','O3', 'NO2', 'PM10', 'PM25'])
meanOfmeans=pd.concat([meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans],axis=1)
newX=None
groups=X.groupby(level=0)
i=0
for name,g in groups:
gg=g.reset_index().set_index('date').resample('D').mean().reset_index().set_index(['idPolair','date']).fillna(method='ffill').fillna(method='bfill').fillna(means).fillna(0)
shape=gg.shape[0]
gg=pd.DataFrame(np.array(pd.concat([gg.shift(10),gg.shift(9),gg.shift(8),gg.shift(7),gg.shift(6),gg.shift(5),gg.shift(4),gg.shift(3),gg.shift(2),gg.shift(1)],axis=1).iloc[shape-1,:].tolist()).reshape(1,60),index=[name],columns=['X','Y','O3', 'NO2', 'PM10', 'PM25','X','Y','O3', 'NO2', 'PM10', 'PM25','X','Y','O3', 'NO2', 'PM10', 'PM25','X','Y','O3', 'NO2', 'PM10', 'PM25','X','Y','O3', 'NO2', 'PM10', 'PM25','X','Y','O3', 'NO2', 'PM10', 'PM25','X','Y','O3', 'NO2', 'PM10', 'PM25','X','Y','O3', 'NO2', 'PM10', 'PM25','X','Y','O3', 'NO2', 'PM10', 'PM25','X','Y','O3', 'NO2', 'PM10', 'PM25'])
meanOfmeans.index=[name]
gg=gg.fillna(meanOfmeans)
if(i==0):
newX=gg
else:
newX=pd.concat([newX,gg])
i=i+1
return newX
newX=toPredcit(concentrations_day,x['sites'])
newX
newX.iloc[:5,7*4:]
yy['NO2']
# +
model_lkl=load_model('lklMarraWe7di.model')
def predict(X,model,dir='models/'):
#define a dictionary to return the result
results = dict({})
#add the 4 pol to the dict
for pol in ["PM10", "PM25", "O3", "NO2"]:
results[pol] = dict({})
#add each idPolair to each pol in the dict
sites=np.load('models/sites.npy')
for idPolair in sites:
results[pol][''+str(int(idPolair))] = dict({})
#for each pol predict and add the result
for index,row in X.iterrows():
#predict the 12 values
y_predict = model.predict(row.values.reshape(1,10,6))
#assign each value to the right place
idPolair=str(int(index))
print(idPolair)
results["NO2"][idPolair]["D0"] = np.full((24), y_predict[0, 1])
results["NO2"][idPolair]["D1"] = np.full((24),y_predict[0, 5])
results["NO2"][idPolair]["D2"] = np.full((24),y_predict[0, 9])
results["O3"][idPolair]["D0"] = np.full((24),y_predict[0, 0])
results["O3"][idPolair]["D1"] = np.full((24),y_predict[0, 4])
results["O3"][idPolair]["D2"] = np.full((24),y_predict[0, 8])
results["PM10"][idPolair]["D0"] = np.full((24),y_predict[0, 2])
results["PM10"][idPolair]["D1"] = np.full((24),y_predict[0, 6])
results["PM10"][idPolair]["D2"] = np.full((24),y_predict[0, 10])
results["PM25"][idPolair]["D0"] = np.full((24),y_predict[0, 3])
results["PM25"][idPolair]["D1"] = np.full((24),y_predict[0, 7])
results["PM25"][idPolair]["D2"] = np.full((24),y_predict[0, 11])
return results
# -
yy=predict(newX,model_lkl)
# +
for idPolair in sites.idPolair:
if int(idPolair) in X:
y_predict = model.predict(np.array([X[int(idPolair)]]))
results["NO2"][idPolair]["D0"] = np.full((24), y_predict[0, 0])
results["NO2"][idPolair]["D1"] = np.full((24),y_predict[0, 4])
results["NO2"][idPolair]["D2"] = np.full((24),y_predict[0, 8])
results["O3"][idPolair]["D0"] = np.full((24),y_predict[0, 1])
results["O3"][idPolair]["D1"] = np.full((24),y_predict[0, 5])
results["O3"][idPolair]["D2"] = np.full((24),y_predict[0, 9])
results["PM10"][idPolair]["D0"] = np.full((24),y_predict[0, 2])
results["PM10"][idPolair]["D1"] = np.full((24),y_predict[0, 6])
results["PM10"][idPolair]["D2"] = np.full((24),y_predict[0, 10])
results["PM25"][idPolair]["D0"] = np.full((24),y_predict[0, 3])
results["PM25"][idPolair]["D1"] = np.full((24),y_predict[0, 7])
results["PM25"][idPolair]["D2"] = np.full((24),y_predict[0, 11])
else:
# -
yy
sites.loc[:,['coord_x_l93','coord_y_l93']]
import p
p.run_predict()
import scoring
del scoring
model.reset_states()
from test import pr
# +
from utils import load_data, get_data_day
import numpy as np
import pandas as pd
import pickle
import datetime
import calendar
import time
import os
import utils
import logging
import pickle
logging.basicConfig(level=logging.INFO)
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense
from keras.layers import Masking
from keras.models import load_model
from keras import backend as K
dirname = os.path.dirname(__file__)
"""
Author: <NAME>
Models: Keras LSTM for each site taking the concentrations for the previous 10 days as input and predict the concentration for the 3 next days.
"""
def predict(day, sites, chimeres_day, geops_day, meteo_day, concentrations_day, model=None):
start=time.time()
X=toPredcit(concentrations_day)
#define a dictionary to return the result
results = dict({})
#add the 4 pol to the dict
for pol in ["PM10", "PM25", "O3", "NO2"]:
results[pol] = dict({})
#add each idPolair to each pol in the dict
#sites=np.load('models/sites.npy')
for idPolair in sites.idPolair:
results[pol][''+str(int(idPolair))] = dict({})
#results[pol]['33374'] = dict({})
#for each pol predict and add the result
for index,row in X.iterrows():
#remove old model from GPU to speed up the model
K.clear_session()
#load the model of this site
filename = os.path.join(dirname,'models/'+ str(int(index))+'.model')
model=load_model(filename)
#predict the 12 values
y_predict = model.predict(row.values.reshape(1,10,4))
#assign each value to the right place
idPolair=str(int(index))
results["NO2"][idPolair]["D0"] = np.full((24), y_predict[0, 1])
results["NO2"][idPolair]["D1"] = np.full((24),y_predict[0, 5])
results["NO2"][idPolair]["D2"] = np.full((24),y_predict[0, 9])
results["O3"][idPolair]["D0"] = np.full((24),y_predict[0, 0])
results["O3"][idPolair]["D1"] = np.full((24),y_predict[0, 4])
results["O3"][idPolair]["D2"] = np.full((24),y_predict[0, 8])
results["PM10"][idPolair]["D0"] = np.full((24),y_predict[0, 2])
results["PM10"][idPolair]["D1"] = np.full((24),y_predict[0, 6])
results["PM10"][idPolair]["D2"] = np.full((24),y_predict[0, 10])
results["PM25"][idPolair]["D0"] = np.full((24),y_predict[0, 3])
results["PM25"][idPolair]["D1"] = np.full((24),y_predict[0, 7])
results["PM25"][idPolair]["D2"] = np.full((24),y_predict[0, 11])
end=time.time()
print((end-start))
return results
def run_predict(year=2016, max_days=10, dirname="../Data/training", list_days=None):
"""
year : year to be evaluated
max_days: number of past days allowed to predict a given day (set to 10 on the platform)
dirname: path to the dataset
list_days: list of days to be evaluated (if None the full year is evaluated)
"""
overall_start = time.time() # <== Mark starting time
data = load_data(year=year, dirname=dirname) # load all data files
sites = data["sites"] # get sites info
day_results = dict({})
if list_days is None:
if calendar.isleap(year): # check if year is leap
list_days = range(366)
else:
list_days = range(365)
for day in list_days:
print(day)
chimeres_day, geops_day, meteo_day, concentrations_day = get_data_day(day, data, max_days=max_days,
year=year) # you will get an extraction of the year datasets, limited to the past max_days for each day
day_results[day] = predict(day, sites, chimeres_day, geops_day, meteo_day,
concentrations_day) # do the prediction
overall_time_spent = time.time() - overall_start # end computation time
pickle.dump(day_results, open('submission/results.pk', 'wb')) # save results
pickle.dump(overall_time_spent, open('submission/time.pk', 'wb')) # save computation time
def toPredcit(concentrations_day):
O3=pd.DataFrame(concentrations_day['O3'].set_index(['idPolair','date'])['Valeur'])
O3.columns=['O3']
NO2=pd.DataFrame(concentrations_day['NO2'].set_index(['idPolair','date'])['Valeur'])
NO2.columns=['NO2']
PM10=pd.DataFrame(concentrations_day['PM10'].set_index(['idPolair','date'])['Valeur'])
PM10.columns=['PM10']
PM25=pd.DataFrame(concentrations_day['PM25'].set_index(['idPolair','date'])['Valeur'])
PM25.columns=['PM25']
X=pd.merge(O3,pd.merge(NO2,pd.merge(PM10,PM25,left_index=True,right_index=True,how='outer'),left_index=True,right_index=True,how='outer'),left_index=True,right_index=True,how='outer')
X=X.reset_index()
X['idPolair']=pd.to_numeric(X['idPolair'])
X['date']=pd.to_datetime(X['date'])
X=X.set_index(['idPolair','date']).sort_index()
i=0
means=X.reset_index().set_index('date').resample('D').mean()
del means['idPolair']
meanOfmeans=pd.DataFrame((np.array(means.mean().tolist()).reshape(1,4)),columns=['O3', 'NO2', 'PM10', 'PM25'])
meanOfmeans=pd.concat([meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans,meanOfmeans],axis=1)
newX=None
groups=X.groupby(level=0)
i=0
for name,g in groups:
gg=g.reset_index().set_index('date').resample('D').mean().reset_index().set_index(['idPolair','date']).fillna(method='ffill').fillna(method='bfill').fillna(means).fillna(0)
shape=gg.shape[0]
gg=pd.DataFrame(np.array(pd.concat([gg.shift(10),gg.shift(9),gg.shift(8),gg.shift(7),gg.shift(6),gg.shift(5),gg.shift(4),gg.shift(3),gg.shift(2),gg.shift(1)],axis=1).iloc[shape-1,:].tolist()).reshape(1,40),index=[name],columns=['O3', 'NO2', 'PM10', 'PM25','O3', 'NO2', 'PM10', 'PM25','O3', 'NO2', 'PM10', 'PM25','O3', 'NO2', 'PM10', 'PM25','O3', 'NO2', 'PM10', 'PM25','O3', 'NO2', 'PM10', 'PM25','O3', 'NO2', 'PM10', 'PM25','O3', 'NO2', 'PM10', 'PM25','O3', 'NO2', 'PM10', 'PM25','O3', 'NO2', 'PM10', 'PM25'])
meanOfmeans.index=[name]
gg=gg.fillna(meanOfmeans)
if(i==0):
newX=gg
else:
newX=pd.concat([newX,gg])
i=i+1
return newX
| tito.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import os
# +
os.chdir(r'C:\Users\user\Documents\GitHub\COVID-19\python\scripts')
df_file = r'C:\Users\user\Documents\GitHub\COVID-19\consolidated_data\country_report.json'
config = pd.read_csv('config.csv',index_col='var').fillna('-')
file_dir = config.loc['flourish_data_dir'].path
data = pd.read_json(df_file)
# region mapping dictionary:
region_mapping_dict = pd.read_csv('label_map.csv',header=None,index_col=0).to_dict()[1]
# -
# # DATA FOR FLOURISH @ https://app.flourish.studio/@psycho.presley
def flourish_racing_bars(df,parameters,initial_date,file_dir,file_name='racing_bars'):
'''
With this function it is possible to generate the dataset as used
by Florish Studio @ https://app.flourish.studio/@psycho.presley
Parameters
----------
df: obj, DataFrame
pandas DataFrame with the data to be used. The DataFrame must
have been generated by the world_data_formatter function
presented in the pycovidfunc.py module
parameters: str, array-like
list with the columns of df to be used. Each column will
generate one separate and independent file to be used in
Flourish studio
initial_date: str
string of the date in the YYYY-MM-DD format to be the first
date to be considered in the final file
file_dir: str
string of the root dir where the flourish data must be saved
file_name: str
the name of the *.csv file to be created
'''
from pandas import DataFrame, concat
from os import path
print('--------------------------')
print('Creating files for the flourish racing bars chart')
try:
countries = df['Country/Region'].unique().tolist()
# The entry 'French Guiana' is resulting in errors in Fourish studio
# so it will be removed:
countries.remove('French Guiana')
for item in parameters:
print('creating the {} cases file'.format(item))
columns = ['Country/Region','Date',item]
flourish = DataFrame()
for country in countries:
df_aux = df[columns].loc[df['Country/Region'] == country]
df_aux = df_aux.pivot(index='Country/Region',columns='Date', values=item)
flourish = concat([flourish,df_aux]).interpolate(method='linear',limit=3)
flourish.fillna(method='bfill',inplace=True)
file = path.join(file_dir,file_name + '_' + item + '.csv')
flourish.loc[:,initial_date:].to_csv(file)
print('Files created succesfully!')
except:
print('Process aborted! No files for flourish studio were created.')
finally:
print('End execution of the flourish racing bars chart function.')
print('--------------------------')
def flourish_parliament_map(df,seats,region_mapping_dict,file_dir,places=1000,file_name='parliament_map'):
'''
With this function it is possible to generate the dataset as used
by the parliament map viz in Florish Studio
@ https://app.flourish.studio/@psycho.presley
Parameters
----------
df: obj, DataFrame
pandas DataFrame with the data to be used. The DataFrame must
have been generated by the world_data_formatter function
presented in the pycovidfunc.py module
seats: str, array-like
list with the columns of df to be used as seats. Each column
represents one seat tab in the Flourish studio parliament chart
region_mapping_chart: dict
dictionary with the countries as keys and their region as values
for region mapping
file_dir: str
string of the root dir where the flourish data must be saved
places: int
desired number of places in the parliament chart
file_name: str
the name of the *.csv file to be created
'''
from os import path
print('--------------------------')
print('Creating files for the flourish studio parliament map')
try:
columns = ['Country/Region']
columns.extend(seats)
df_aux = df[columns].loc[df['Date'] == max(df['Date'])]
for item in seats:
df_aux[item] = df_aux[item].apply(lambda x:places*x/df_aux[item].sum())
# Saving the first file for the countries parliament chart:
df_aux.to_csv(path.join(file_dir,file_name + '_country.csv'),index=False)
# Now ready to create the regions parliament chart
# mapping the country -> region:
df_aux['Country/Region'] = df_aux['Country/Region'].transform(lambda x: region_mapping_dict[x]
if x in region_mapping_dict.keys()
else x)
df_aux = df_aux.groupby('Country/Region').sum().reset_index()
df_aux.to_csv(path.join(file_dir,file_name + '_region.csv'),index=False)
print('Files created succesfully!')
except:
print('Process aborted! No files for flourish studio were created.')
finally:
print('End execution of the flourish parliament map function.')
print('--------------------------')
def flourish_hierarchy_chart(df,cases,region_mapping_dict,file_dir,file_name='hierarchy_chart'):
'''
With this function it is possible to generate the dataset as used
by the parliament map viz in Florish Studio
@ https://app.flourish.studio/@psycho.presley
Parameters
----------
df: obj, DataFrame
pandas DataFrame with the data to be used. The DataFrame must
have been generated by the world_data_formatter function
presented in the pycovidfunc.py module
cases: str, array-like
list with the columns of df to be used as seats. Each column
represents one seat tab in the Flourish studio parliament chart
region_mapping_chart: dict
dictionary with the countries as keys and their region as values
for region mapping
file_dir: str
string of the root dir where the flourish data must be saved
file_name: str
the name of the *.csv file to be created
'''
from os import path
print('--------------------------')
print('Creating files for the flourish studio hierarchy chart')
try:
columns = ['Country/Region']
columns.extend(cases)
df_aux = df[columns].loc[df['Date'] == max(df['Date'])]
# mapping the country -> region:
df_aux['Group'] = df_aux['Country/Region'].transform(lambda x: region_mapping_dict[x]
if x in region_mapping_dict.keys()
else x)
# Saving the first file for the countries parliament chart:
df_aux.to_csv(path.join(file_dir,file_name + '.csv'),index=False)
print('Files created succesfully!')
except:
print('Process aborted! No files for flourish studio were created.')
finally:
print('End execution of the flourish hierarchy chart function.')
print('--------------------------')
def flourish_point_map(df,parameters,lat,long,file_dir,file_name='point_map'):
'''
With this function it is possible to generate the dataset as used
by the parliament map viz in Florish Studio
@ https://app.flourish.studio/@psycho.presley
Parameters
----------
df: obj, DataFrame
pandas DataFrame with the data to be used. The DataFrame must
have been generated by the world_data_formatter function
presented in the pycovidfunc.py module
parameters: str, array-like
list with the columns of df to be used as map variables. Each
column represents one seat tab in the Flourish studio
parliament chart
lat: dict
dictionary with the countries as keys and their latitude
coordinate as values for mapping
long: dict
dictionary with the countries as keys and their longitude
coordinate as values for mapping
file_dir: str
string of the root dir where the flourish data must be saved
file_name: str
the name of the *.csv file to be created
'''
from os import path
print('--------------------------')
print('Creating files for the flourish studio point map')
try:
df_aux=df[['Country/Region','Date']]
for item in parameters:
df_aux = pd.concat([df_aux,
df.groupby('Country/Region')[item].diff().fillna(value=0)],
axis=1).sort_values(by='Date')
# mapping the country -> Lat/Long:
df_aux['Latitude'] = df_aux['Country/Region'].transform(lambda x: lat[x]
if x in lat.keys()
else 0)
df_aux['Longitude'] = df_aux['Country/Region'].transform(lambda x: long[x]
if x in long.keys()
else 0)
df_aux.to_csv(path.join(file_dir,file_name + '.csv'),index=False)
print('Files created succesfully!')
except:
print('Process aborted! No files were created.')
finally:
print('End execution of the flourish point map function.')
print('--------------------------')
def flourish_card_plot(df,cases,region_mapping_dict,file_dir,file_name='card_plot'):
'''
With this function it is possible to generate the dataset as used
by the card plot viz in Florish Studio
@ https://app.flourish.studio/@psycho.presley
Parameters
----------
df: obj, DataFrame
pandas DataFrame with the data to be used. The DataFrame must
have been generated by the world_data_formatter function
presented in the pycovidfunc.py module
cases: str, array-like
list with the columns of df to be used as seats. Each column
represents one seat tab in the Flourish studio card plot
region_mapping_chart: dict
dictionary with the countries as keys and their region as values
for region mapping
file_dir: str
string of the root dir where the flourish data must be saved
file_name: str
the name of the *.csv file to be created
'''
from pandas import read_csv
from os import path
from quantiphy import Quantity as qty
print('--------------------------')
print('Creating files for the flourish studio card plot')
try:
columns = ['Country/Region']
columns.extend(cases)
df_aux = df[columns].loc[df['Date'] == max(df['Date'])]
# mapping the country -> region:
df_aux['Group'] = df_aux['Country/Region'].transform(lambda x: region_mapping_dict[x]
if x in region_mapping_dict.keys()
else x)
df_aux = df_aux.groupby('Group').sum()
df_aux.drop('Other',inplace=True)
df_logo = read_csv('region_logo.csv', index_col='Group')
df_aux = df_aux.join(df_logo, on='Group')
for item in cases:
df_aux[item] = df_aux[item].transform(lambda x:qty(x).render(prec=2))
# Saving the first file for the countries parliament chart:
df_aux.to_csv(path.join(file_dir,file_name + '.csv'),index=False)
print('Files created succesfully!')
except:
print('Process aborted! No files for flourish studio were created.')
finally:
print('End execution of the flourish card plot function.')
print('--------------------------')
def flourish_survey_chart(df,cases,region_mapping_dict,file_dir,file_name='survey_chart'):
'''
With this function it is possible to generate the dataset as used
by the survey chart viz in Florish Studio
@ https://app.flourish.studio/@psycho.presley
Parameters
----------
df: obj, DataFrame
pandas DataFrame with the data to be used. The DataFrame must
have been generated by the world_data_formatter function
presented in the pycovidfunc.py module
cases: str, array-like
list with the columns of df to be used as seats. Each column
represents one seat tab in the Flourish studio card plot
region_mapping_chart: dict
dictionary with the countries as keys and their region as values
for region mapping
file_dir: str
string of the root dir where the flourish data must be saved
file_name: str
the name of the *.csv file to be created
'''
from os import path
from quantiphy import Quantity as qty
print('--------------------------')
print('Creating files for the flourish studio survey chart')
# creating quartiles map function:
def quart_func(x,q,case):
if x < q[0]:
return 'very few ' + case.lower() + ' cases'
elif q[0] <= x < q[1]:
return 'few ' + case.lower() + ' cases'
elif q[1] <= x < q[2]:
return 'high ' + case.lower() + ' cases'
else:
return 'very high ' + case.lower() + ' cases'
try:
columns = ['Country/Region','Confirmed']
columns.extend(cases)
df = df[columns].loc[df['Date'] == max(df['Date'])]
for item in cases:
new_column = 'percentage of '+item.lower()
df[new_column] = (df[item]*100/df['Confirmed']).round(2)
quantile = df[new_column].quantile(q=[0.2,0.5,0.8])
df[item.lower() + ' cases interval']=df[new_column].apply(
lambda x:quart_func(x,quantile.values,item)
)
#df[new_column] = df[new_column].transform(lambda x:qty(x,'%'))
# mapping the country -> region:
df['WHO region'] = df['Country/Region'].transform(lambda x: region_mapping_dict[x]
if x in region_mapping_dict.keys()
else x)
df.drop(cases,axis=1,inplace=True)
# Saving the first file for the countries parliament chart:
df.to_csv(path.join(file_dir,file_name + '.csv'),index=False)
print('Files created succesfully!')
except:
print('Process aborted! No files for flourish studio were created.')
finally:
print('End execution of the flourish survey chart function.')
print('--------------------------')
def flourish_slope_chart(df,file_dir,file_name='slope_chart',
case='Confirmed',initial_month=3):
'''
With this function it is possible to generate the dataset as used
by the slope chart viz in Florish Studio
@ https://app.flourish.studio/@psycho.presley
Parameters
----------
df: obj, DataFrame
pandas DataFrame with the data to be used. The DataFrame must
have been generated by the world_data_formatter function
presented in the pycovidfunc.py module
case: str, array-like
list with the columns of df to be used as seats. Each column
represents one seat tab in the Flourish studio card plot
region_mapping_chart: dict
dictionary with the countries as keys and their region as values
for region mapping
file_dir: str
string of the root dir where the flourish data must be saved
file_name: str
the name of the *.csv file to be created
'''
from os import path
from calendar import month_abbr
from pandas import DataFrame, concat, read_csv
print('--------------------------')
print('Creating files for the flourish studio slope chart')
try:
new_case = case + ' new cases'
columns = ['Country/Region','Date',case]
df['Date'] = df['Date'].transform(lambda x:x.month)
countries = df['Country/Region'].unique()
df_slope_chart = DataFrame()
for country in countries:
df_aux = df[columns].loc[df['Country/Region'] == country]
df_aux = df_aux[df_aux.Date >= initial_month]
total_confirmed = df_aux[case].max()
df_aux[new_case] = df_aux.groupby('Country/Region')[case].diff().fillna(value=0)
df_aux = df_aux.groupby(['Country/Region','Date']).sum().reset_index().drop(case,axis=1)
df_aux[new_case] = (df_aux[new_case]*100/total_confirmed).round(1)
df_aux[new_case] = df_aux[new_case].transform(lambda x:max(0,x))
df_aux = df_aux.pivot(index='Country/Region',columns='Date',values=new_case)
df_slope_chart = concat([df_slope_chart,df_aux]).fillna(value=0)
df_slope_chart.columns = [month_abbr[i] for i in df_slope_chart.columns]
df_region = read_csv('region_mapping.csv',index_col = 'Country/Region')
#df_slope_chart = df_slope_chart.join(df_region, on = 'Country/Region', how='inner')
df_slope_chart.to_csv(path.join(file_dir,file_name + '.csv'),index=False)
print('Files created succesfully!')
except:
print('Process aborted! No files for flourish studio were created.')
finally:
print('End execution of the flourish slope chart function.')
print('--------------------------')
def flourish_heat_map(df,parameters,file_dir,file_name='heat_map'):
'''
With this function it is possible to generate the dataset as used
by the heat map viz in Florish Studio @ https://app.flourish.studio/@psycho.presley
Parameters
----------
df: obj, DataFrame
pandas DataFrame with the data to be used. The DataFrame must
have been generated by the world_data_formatter function
presented in the pycovidfunc.py module
parameters: str, array-like
list with the columns of df to be used as map variables. Each
column represents one seat tab in the Flourish studio
heat chart
file_dir: str
string of the root dir where the flourish data must be saved
file_name: str
the name of the *.csv file to be created
'''
from os import path
print('--------------------------')
print('Creating files for the flourish studio heat map')
try:
if 'Active' in parameters:
df_aux = df[['Country/Region','Date','Active']]
parameters.remove('Active')
else:
df_aux=df[['Country/Region','Date']]
for item in parameters:
df_aux = pd.concat([df_aux,
df.groupby('Country/Region')[item].diff().fillna(value=0).transform(lambda x:max(0,x))],
axis=1).sort_values(by='Date')
df_aux.to_csv(path.join(file_dir,file_name + '.csv'),index=False)
print('Files created succesfully!')
except:
print('Process aborted! No files were created.')
finally:
print('End execution of the flourish heat map function.')
print('--------------------------')
# +
df = data.copy()
# # 1 - Racing bars chart:
# initial_date = '2020-03-06'
# parameters = ['Active','Confirmed','Deaths','Recovered']
# flourish_racing_bars(df,parameters,initial_date,file_dir)
# # ===============
# # 2 - Parliament map:
# seats = ['Confirmed','Active','Recovered','Deaths']
# flourish_parliament_map(df,seats,region_mapping_dict,file_dir)
# # ===============
# # 3 - Point map:
# lat = pd.read_csv('coordinates.csv',header=None,index_col=0).to_dict()[1]
# long = pd.read_csv('coordinates.csv',header=None,index_col=0).to_dict()[2]
# parameters = ['Confirmed','Active','Recovered','Deaths']
# flourish_point_map(df,parameters,lat,long,file_dir)
# # ===============
# # 4 - Hierarchy chart:
# cases = ['Confirmed','Active','Recovered','Deaths']
# flourish_hierarchy_chart(df,cases,region_mapping_dict,file_dir)
# # ===============
# # 5 - Card plot:
# cases = ['Confirmed','Active','Recovered','Deaths']
# flourish_card_plot(df,cases,region_mapping_dict,file_dir)
# # ===============
# # 6 - Survey chart:
# cases = ['Active','Recovered','Deaths']
# flourish_survey_chart(df,cases,region_mapping_dict,file_dir)
# # ===============
# # 7 - Slope chart:
# flourish_survey_chart(df,file_dir)
# ===============
# 8 - Heat map:
cases = ['Confirmed','Active','Recovered','Deaths']
flourish_heat_map(df,cases,file_dir)
# -
| python/notebooks/Covid19_Flourish.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style="background-color: #c1f2a5">
#
#
# # PS7
#
# In this problem set, we will use the symplest type of recurrent network, and Elman network, to try to learn a symple language..
# # Instructions
#
#
#
# Remember to do your problem set in Python 3. Fill in `#YOUR CODE HERE`.
#
# Make sure:
# - that all plots are scaled in such a way that you can see what is going on (while still respecting specific plotting instructions)
# - that the general patterns are fairly represented.
# - to label all x- and y-axes, and to include a title.
#
#
# **N.B.** The ideas in this notebook draw heavily from the readings
# <ol>
# <li> <NAME>. (1990). Finding structure in time. _Cognitive Science, 14_, 179-211.
# <li><NAME>., & <NAME>. (1986). Past tenses of English verbs. In McClelland, J. and <NAME>. (Eds.) _Parallel distributed processing: Explorations in the microstructure of cognition. Vol. 2: Applications_ (pp. 216-271). Cambridge, MA: MIT Press.
# </ol>
# <br>
# If you are confused about some of the ideas in this notebook or would like further clarification, we recommend having a look there.
# </div>
# + deletable=false
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# + [markdown] deletable=false
# ## Introduction
#
# One of the most successful (and controversial) applications of neural
# networks has been as models of human language. Specifically, contrary to the rules/symbols approach proposed by Chomsky, neural networks have been used to demonstrate that distributed representations can give rise to language learning. You will test whether a
# simple neural network is capable of learning the rule underlying a
# context-free language.
#
# The language $a^nb^n$, being the set of all strings containing a
# sequence of $a$'s followed by a sequence of $b$'s of the same length
# is a simple example of a language that can be generated by a
# context-free grammar but not a finite-state grammar (because a finite state grammar cannot keep track of the number of times a string has occurred in the sentence). Human languages
# exhibit similar long-range constraints -- for example, a plural noun
# at the start of a sentence affects conjugation of a verb at the end,
# regardless of what intervenes. Some criticisms of applications of
# neural networks to human languages are based upon their apparent
# reliance on local sequential structure, which makes them seem much
# more similar to finite-state grammars than to context-free
# grammars. In other words, simple neural networks will mostly use the most recent word to predict the next one. An interesting question to explore is thus whether a
# recurrent neural network can learn to generalize a simple rule
# characterizing a long-range dependency, such as the rule underlying
# $a^nb^n$.
#
# Recall that an "Elman" network, as discussed by Elman (1990), is a
# recurrent network where the activation of the hidden units at the
# previous timestep are used as input to the hidden units on the current
# timestep. This type of network architecture allows the
# network to learn about sequential dependencies in the input data. In this notebook we will evaluate whether such a network can learn an $a^nb^n$
# grammar. Here we formalize learning a grammar as being able to correctly
# predict what the next item in a sequence should be given the
# rules of the grammar. Therefore, the output node represents the
# networks's prediction for what the next item in the sequence (the next
# input) will be -- it outputs a $1$ if it thinks the current input will
# be followed by an $a$, and outputs a $0$ if it thinks the current
# input will be followed by a $b$.
# + [markdown] deletable=false
# ## Q1. Data [5pts, SOLO]
# We will use the `abdata.npz` dataset for this problem. Make sure that the data file is in the same directory as your notebook while working on the problem set.
#
# This dataset has two keys.
# - The array `train_data` contains the sequence we will use to train our network.
# - The array `test_data` contains the sequence we will use to evaluate the
# network.
#
# In both `train_data` and `test_data` a $1$ represents an $a$ and a $0$ represents a $b$.
#
# `train_data` was constructed by concatenating a randomly ordered
# set of strings of the form $a^nb^n$, with $n$ ranging from 1 to 11.
# The frequency of sequences for a given value of $n$ in the training set
# are given by `np.ceil(50/n)`, thus making them inversely proportional to $n$.
# The `np.ceil` function returns the smallest integer greater or equal to
# its input. For example, `np.ceil(3)` is 3, but `np.ceil(3.1)` is
# 4 . `test_data` contains an ordered sequence of strings of the form
# $a^nb^n$, with $n$ increasing from 1 to 18 over the length of the
# string.
#
# Take a look at the data! You can first print the variables. Next plot the first 100 values of train_data and the first 100 values of test_data in two subplots. Make sure to label the axes and the titles. Upload your plot to gradescope as PS7_Q1.png
#
# + deletable=false
ab_data = np.load("abdata.npz")
ab_data.keys()
# look at train_data
print('train_data')
train_data = ab_data['train_data']
print(train_data[:30])
print(len(train_data))
## look at test_data
print('test_data')
test_data = ab_data['test_data']
print(test_data)
## plot the data
#YOUR CODE HERE
figure.savefig('PS7_Q1.png')
# + [markdown] deletable=false
# ## Q2. Input/output [3 pts HELP]
# In order to train your network, you will need both training *input* and
# training *output*.
#
# That is, you need a sequence of inputs of the form
# $a^nb^n$, and a corresponding sequence with the correct output for
# each item in the input sequence.
#
# For this problem we're going to use `train_data[:-1]` as the
# input training sequence, and `train_data[1:]` as the output
# training sequence.
#
# Explain in gradescope why `train_data[:-1]` and `train_data[1:]` are appropriate input and output
# sequences. If you're confused by what the sequences
# `train_data[:-1]` and `train_data[1:]` look like,
# try creating them in a cell and compare them to `train_data`.
# + [markdown] deletable=false
# ## Elman network (provided)
# We have provided you with a function, `train_Elman`, which takes four arguments:
# - `input` -- the training input sequence
# - `output` -- the training output sequence
# - `num_hidden` -- the number of hidden units
# - `num_iters` -- the number of training iterations: network needs to be trained on many iterations
#
#
# `train_Elman` will:
#
# 1) create a network with one input node, the specified number of hidden units, and one output node
#
#
# 2) train the network on the training data for the specified number of iterations.
#
#
# The network sees the
# training data one input at a time (in our case, it sees a single $1$
# or $0$ per time step).
# +
def train_Elman(inputs, outputs, num_hidden, num_iters):
"""
Initializes and trains an Elman network. For details see Elman (1990).
Parameters
----------
inputs : numpy array
A one dimensional sequence of input values to the network
outputs : numpy array
A one-dimensional sequence of desired output values for each of the
items in inputs
num_hidden : int
The number of hidden units to use in the network
num_iters : int
The number of training iterations to run the network for
Returns
-------
net : dict
Dictionary object containing the trained network weights for each layer.
Key 1 corresponds to the weights from the visibles to the hidden units,
key 2 corresponds to the weights from the hiddens to the output units.
NOTE: Poorly-Python-ported from trainElman.m, which in turn was adapted from
code from http://www.cs.cmu.edu/afs/cs/academic/class/15782-f06/matlab/
recurrent/ which in turn was adapted from Elman (1990) :-)
"""
np.random.seed(seed=1)
# Parameters
# increment to the derivative of the transfer function (Fahlman's trick)
DerivIncr = 0.2
Momentum = 0.05
LearnRate = 0.001
num_input = 1
num_output = 1
num_train = inputs.shape[0]
if inputs.ndim == 2:
num_input = inputs.shape[0]
num_output = outputs.shape[0]
num_train = inputs.shape[1]
if not all([outputs.ndim == inputs.ndim,
inputs.shape[0] == outputs.shape[0]]):
raise ValueError('unequal number of input and output examples')
# create a dictionary to hold the network weights
net = {}
net[1] = np.random.rand(num_hidden, num_input + num_hidden + 1) - 0.5
net[2] = np.random.rand(num_output, num_hidden + 1) - 0.5
# the context layer
# zeros because it is not active when the network starts
Result1 = np.zeros((num_hidden, num_train))
# the row of ones is the bias
Inputs = np.vstack((inputs, np.ones(num_train)))
Desired = outputs
delta_w1 = 0.
delta_w2 = 0.
# Training
for ii in range(num_iters):
# Recurrent state
# includes current inputs, as well as the output of the hidden layer
# from the previous time step
Input1 = np.vstack((Inputs, np.hstack([np.zeros((num_hidden,1)), Result1[:,:-1]])))
# Forward propagate activations
# input --> hidden
NetIn1 = np.dot(net[1], Input1)
Result1 = np.tanh(NetIn1)
# Hidden --> output
# we again add a row of ones for bias
Input2 = np.vstack((Result1, np.ones(num_train)))
NetIn2 = np.dot(net[2], Input2)
Result2 = np.tanh(NetIn2)
# Backprop errors
# output --> hidden
Result2Error = Result2 - Desired
In2Error = Result2Error * (DerivIncr + np.cosh(NetIn2)**(-2))
# hidden --> input
Result1Error = np.dot(net[2].T, In2Error)
In1Error = Result1Error[:-1, :] * (DerivIncr + np.cosh(NetIn1)**(-2))
# Calculate weight updates
dw2 = np.dot(In2Error, Input2.T)
dw1 = np.dot(In1Error, Input1.T)
delta_w2 = -LearnRate * dw2 + Momentum * delta_w2
delta_w1 = -LearnRate * dw1 + Momentum * delta_w1
net[2] = net[2] + delta_w2
net[1] = net[1] + delta_w1
return net
# -
# ## Q3 - learn anbn language [HELP 5 pts]
# Complete the function `anbn_learner` below to train an "Elman"
# network with two hidden units using the provided function `train_Elman` (remember
# to use the input **train_data[:-1]** and output **train_data[1:]** sequences from Q2).
#
#
#
# Train the network for *100 iterations*, and return the final output of the network.
# We provide test cases. If you got the function right, the following cell should print "Success". Otherwise, it will give you an error message that will help with debugging.
#
# Copy your code into gradescope.
# + deletable=false nbgrader={"checksum": "c780e1b1346209cf913d01a4babea476", "grade": false, "grade_id": "anbn_learner", "locked": false, "schema_version": 1, "solution": true}
def anbn_learner(train_data):
"""
Creates an "Elman" neural network with two hidden units and trains it
on the provided data.
Parameters
----------
train_data: numpy array of shape (n,)
the data on which to train the Elman network
Returns
-------
net: dictionary with 2 keys
a dictionary containing the weights of the network. Valid keys are 1 and 2.
key 1 is for the weights between the input and the hidden units, and
key 2 is for the weights between the hidden units and the output units.
"""
# YOUR CODE HERE
# + deletable=false nbgrader={"checksum": "8474acbd70334d48c94b2a3f9c2ddd76", "grade": true, "grade_id": "check_anbn_learner", "locked": false, "points": 0.5, "schema_version": 1, "solution": false}
"""Check that anbn_learner returns the correct output"""
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_almost_equal
# check that abdata hasn't been modified
ab = np.load("data/abdata.npz")
assert_array_equal(test_data, ab['test_data'], "test_data array has changed")
assert_array_equal(train_data, ab['train_data'], "train_data array has changed")
# generate test data
traindata = np.zeros(20)
traindata[10:] = 1.
net = anbn_learner(traindata)
# check that net has the correct shape and type
assert_equal(type(net), dict, "net should be a dict of network weights")
assert_equal(len(net), 2, "incorrect number of layers in net")
assert_equal(list(net.keys()), [1,2], "keys for net should be 1 and 2")
# check the dimensions of the weight matrices
assert_equal(net[1].shape, (2,4), "invalid network weights for the input -> hidden layer")
assert_equal(net[2].shape, (1,3), "invalid network weights for the hidden -> output layer")
# check the weight matrix sums to the correct value on testdata
assert_almost_equal(np.sum(net[1]), -1.9326, places=4, msg="weights for input --> hidden layer are incorrect")
assert_almost_equal(np.sum(net[2]), 0.01825, places=4, msg="weights for hidden --> output layer are incorrect")
print("Success!")
# + [markdown] deletable=false
# ## Q4 - checking the trained network [5 pts, solo]
# Once the network is trained (*your anbn_learner function should pass the test case in the previous cell)*, you can test it on a new set of sequences
# and evaluate its predictions to see how well it has learned the target
# grammar. To generate predictions from the trained network, we use the provided function `predict_Elman`.
#
# Use your `anbn_learner` function on `train_data` to train a network, then use your trained network (by passing it as the first input to `predict_Elman` function) to predict the sequences in `test_data`.
#
# A) The `predict_Elman` function returns an array of predicted values with the same dimensions as the input.Plot your prediction (sequence index number on x axis, predicted values on y axis) and upload the figure to gradescope as PS7_Q4.png.
#
# B) Look back at your plot of the test data in Q1 - what do you think of the network's predictions? Do your predictions approximate the testing data? Explain why you think they do, or why you think they do not, by referring to the mechanisms of recurrent networks and the nature of the data.
# +
def predict_Elman(net, inputs):
"""
Uses the Elman network parameterized by the weights in net to generate
predictions for the elements in inputs.
Parameters
----------
net : dict
Dictionary object containing the trained network weights and
recurrent connections as produced by train_Elman. Key 1 corresponds
to the weights from the visibles to the hidden units, key 2
corresponds to the weights from the hiddens to the output units.
inputs : numpy array
A one dimensional sequence of input values to the network
Returns
-------
outputs : numpy array
An array containing the predictions generated by the Elman network for the
items in inputs.
NOTE: Poorly-Python-ported from predictElman.m, which in turn was adapted from
code from http://www.cs.cmu.edu/afs/cs/academic/class/15782-f06/matlab/
recurrent/ which in turn was adapted from Elman (1990) :-)
"""
num_output = 1
num_hidden = net[1].shape[0]
num_train = inputs.shape[0]
if inputs.ndim == 2:
num_train = inputs.shape[1]
Inputs = np.vstack((inputs, np.ones([1, num_train])))
Result1 = np.zeros([num_hidden, 1])
outputs = np.zeros(num_train)
for i in range(num_train):
Input1 = np.append(Inputs[:, i], Result1)
NetIn1 = np.dot(net[1], Input1)
Result1 = np.tanh(NetIn1)
Input2 = np.append(Result1, np.ones((1, 1)))
NetIn2 = np.dot(net[2], Input2)
outputs[i] = np.tanh(NetIn2)
return outputs
# + deletable=false
## YOUR CODE HERE
figure.savefig('PS7_Q4.png')
# + [markdown] deletable=false
# ## Q5 - Quantifying the model performance [2pts, HELP]
#
# How well does the network do at predicting the next letter? Has it learned the language? Let's look more carefully.
#
# To quantify how well the network performs we are going to look at how much the predicted sequence deviates from expectations. The squared error (SE) for a prediction $p_i$ in the prediction vector ${\bf p}$
# compared to a target value ${y_i}$ in the target vector ${\bf y}$ is
#
# \begin{equation}
# SE_i = (p_i-y_i)^2
# \end{equation}
#
# That is, the squared error is just the squared difference between the
# predicted and target value.
#
# Complete the function `squared_error`, which takes in an array of test data and an array of
# predictions. The function should return an error array **with the same number of elements as the test data**, containing the SE for each
# of the predictions of the network compared against the corresponding value in `test_data`.
#
# Remember that the predictions refer to the _next_ item in the sequence
# (e.g. `predictions[0]` should be compared to
# `test_data[1]`, etc.). You should append an $a$ (coded as a $1$) to the end of your test data to equate the array sizes (describing the start of a new sequence of $a^nb^n$).
#
# We have provided test cases again in the cell below. If your function is right, it should print success. Otherwise, it will give you an error with feedback.
#
# Copy your function into gradescope.
# + deletable=false nbgrader={"checksum": "dc34fc992d440beb6bea797faabbc3e5", "grade": false, "grade_id": "sq_err", "locked": false, "schema_version": 1, "solution": true}
def squared_error(predictions, test_data):
"""
Uses equation 1 to compute the SE for each of the predictions made
by the network.
Parameters
----------
predictions: numpy array of shape (n,)
an array of predictions from the Elman network
test_data: numpy array of shape (n,)
the array of test data from which predictions were generated
Returns
-------
se_vector: numpy array of shape (n,)
an array containing the SE for each of items in predictions
"""
# YOUR CODE HERE
# +
#YOUR OWN TESTS HERE
# + deletable=false nbgrader={"checksum": "b208daede991b158bc6b80965cd889e0", "grade": true, "grade_id": "check_sq_err", "locked": false, "points": 1.25, "schema_version": 1, "solution": false}
"""Check that squared_error returns the correct output"""
from nose.tools import assert_equal
# generate test data
pred = np.array([1, 0, 1])
test = np.array([0, 1, 0])
se = squared_error(pred, test)
# check that squared_error returns the correct output for testdata
#assert_equal(se.dtype, np.float64, "squared_error should return an array of floats")
assert_equal(se.shape, (3,), "squared_error returned an array of the incorrect size on the validate testdata")
assert_array_equal(se, np.zeros(3), "squared_error should return all zeros on the validate testdata")
# check that squared_error compares the correct elements
pred = np.zeros(1)
test = np.zeros(1)
se = squared_error(pred, test)
assert_equal(se, np.ones(1), "squared_error([0],[0]) should have returned a 1 (did you remember to append an a to testdata?")
print("Success!")
# + [markdown] deletable=false
# ---
#
# ## Q6 [5 pts, SOLO]
# Train the network on the train_data, then apply it to the test_data: try to predict test data using the weights of the trained network. Measure the resulting squared error.
#
# Use matplotlib to plot a bar graph of the squared error for each training example. You should have the sequence iteration number on the x axis, and the error values on y axis. Don't forget to provide a title and labels your $x$ and $y$ axes!
#
# If you have difficulty interpreting this graph, you may want to
# examine a few of the values in `test_data`, `predictions`, and your `mse_vector` to see how they are related.
# Upload your figure PS7_Q6.png to gradescope.
# + deletable=false nbgrader={"checksum": "361a23caac2e11f8082d5b4f29f2e4be", "grade": false, "grade_id": "plot_error", "locked": false, "schema_version": 1, "solution": true}
# YOUR CODE HERE
# create the figure
fig, axis = plt.subplots()
axis.set_xlim([0.0, 350.0]), axis.set_ylim([0.0,.7])
# YOUR CODE HERE
axis.bar(np.arange(len(se_vector)),se_vector)
# + [markdown] deletable=false
# ## Q7 [SOLO, 5pts]
#
# To get a better idea of what is going on, let's have a look at the specific values in `test_data` where the prediction error spikes. Use the provided code below and look at its output.
#
# At which points in test_data do the large errors occur? Explain which parts of the data the network is predicting ok, and which parts it's not predicting well. Answer in gradescope.
# + deletable=false
# prints the 3 values preceding and 2 values following the spot where
# the prediction error >= 0.5
error_spike_idxs = np.argwhere(se_vector >= 0.5) + 1
error_spike_idxs = error_spike_idxs[:-1]
for i in error_spike_idxs:
print('3 values preceding MSE spike: {}\tValue at MSE spike: {}'
'\t\t2 values following MSE spike: {}'\
.format(test_data[i[0]-3:i[0]], test_data[i[0]], test_data[i[0]+1:i[0]+3]))
# + [markdown] deletable=false
#
#
# ## Q8.1. Conclusions [SOLO 5 pts]
#
# Earlier we said that we can evaluate whether the network has learned
# the grammar by looking at the predictions it makes. If the network has
# learned the $a^nb^n$ grammar, in what cases should it make correct
# predictions? When should it make incorrect predictions?
#
# Do your predictions about when the network should make correct/incorrect predictions if it has learned the $a^nb^n$ grammar match the the times when the network makes large errors, as identified in Q7? Did the network learn the $a^nb^n$ language?
# + [markdown] deletable=false
# ---
#
# ## Q8.2. Conclusion [SOLO 5pts]
# At what level of the Chomsky hierarchy is the $a^nb^n$ grammar?
#
# How does this compare to the level of most natural languages? Specifically, what level of Chomsky's hierarchy describes most of the natural languages, and are these levels higher relative to that of $a^nb^n$?
#
# Use this to explore the implications of your results from Q7 and Q8.1 for using the Elman network to model the relationships present in human language. Is the Elman network likely to be sufficient to capture human language satisfactorily?
#
# -
# <div style="background-color: #c1f2a5">
#
# # Submission
#
# ### <span style="color:red">Attention! Code submission requirement!</span>
#
# When you're done with your problem set, do the following:
# - Upload your answers in Gradescope's PS7.
# - Convert your Jupyter Notebook into a `.py` file by doing so:
#
# </div>
#
#
# <center>
# <img src="https://www.dropbox.com/s/7s189m4dsvu5j65/instruction.png?dl=1" width="300"/>
# </center>
#
# <div style="background-color: #c1f2a5">
#
# - Submit the `.py` file you just created in Gradescope's PS7-code.
#
# </div>
#
#
#
#
# </div>
#
# + [markdown] deletable=false nbgrader={}
# ---
| PS7/PS7_final_MR_AC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Practice reading in HYCOM model data.
# +
from __future__ import division
import netCDF4 as nc
import numpy as np
from salishsea_tools import (
nc_tools)
import datetime
import os
import urllib2
from StringIO import StringIO
import pandas as pd
from dateutil import tz
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from salishsea_tools import viz_tools
from salishsea_tools.nowcast import residuals, figures
# %matplotlib inline
# -
# #HYCOM
# The HYCOM model provides ssh forecasts. Data is saved at a 3 hourly frequency. We can access the HYCOM sea surface height at a url like this:
#
# * http://nomads.ncep.noaa.gov:9090/dods/rtofs/rtofs_global20150716/rtofs_glo_2ds_forecast_3hrly_diag.ascii?ssh[0:64][0][1661:1662][1931:1932]
#
# ** Variables**
# * time - measured in days since 1-1-1 00:00:0.0
# * ssh - metres
# * lat, lon don't have a lot of meta data
#
# First, try to figure out if this data is the same as the NOAA forecast.
#
# Below, I copied and pasted the data from the 2015-07-16 forecast page. I will compare this data with the Neah Bay forecasts with are collecting.
# Reading in data
def read_url(date,i,j):
"""Reads text from a url and save the output to a file
Returns the filename of the output file
:arg date: the data of the download
:type date: datetime object
:arg i: the i index of the hycom grid
:type i: integer
:arg j: the j index of the hycom grid
:type j: integer
:returns: filename, the name of the file where the data was saved"""
url = ('http://nomads.ncep.noaa.gov:9090/dods/rtofs/rtofs_global{}/rtofs_glo_2ds_forecast_3hrly_diag.ascii'
'?ssh[0:64][0][{}][{}]'.format(date.strftime('%Y%m%d'),j,i))
response = urllib2.urlopen(url)
html = response.read()
#We might want to save output like we do for neah bay
directory = date.strftime('%Y-%m-%d')
if not os.path.exists(directory):
os.makedirs(directory)
filename = '{}/hycom_{}_{}.txt'.format(directory,i,j)
text_file = open(filename, "w")
text_file.write(html)
text_file.close()
return filename
# * Notice the first output for ssh is bizzare. 1276 m ???
def parse_hycom_text(filename):
"""Parses the text in a output file from the hycom model.
:arg filename: file where the hycom model data is stored
:type filename: string
:returns: data, lon, lat
data is a data frame with ssh and time columns
lon is the longitude of the hycom model point
lat is the latitude of the hycom grid point
"""
ssh_read = False
time_read = False
lat_read = False
lon_read = False
#initialize variables
sshs = []
times = []
lat=0
lon=0
#variable to define number of lines to skip
skip_lines = 0
with open(filename) as f:
#loop through each line
for line in f:
# check if we should skip a line
if skip_lines >0:
skip_lines=skip_lines -1
continue
# read the line
words = line.split()
if words: #there is data in the line, do stuff with it
#if we should read a variable, read it
#read ssh
if ssh_read:
if words[0] =='time,': # check we are still in the ssh part
time_read = True
ssh_read = False
else:
sshs.append(float(words[1])) #append the ssh data t list
skip_lines = 2
# read time
elif time_read:
if words[0]=='lev,': # check we are still in the time part?
time_read = False
else:
times = words
#read lat
elif lat_read:
lat=float(words[0])
lat_read = False
# read lon
elif lon_read:
lon=float(words[0]) - 360 # subtract 360 for conersion to model coordinates
#if we aren't reading a variable, check that we can determine which variable should be read next
if words[0] =='ssh,':
ssh_read = True
elif words[0] == 'time,':
time_read = True
ssh_read = False
elif words[0] == 'lat,':
lat_read = True
time_read = False
elif words[0] =='lon,':
lon_read= True
lat_read = False
#finished reading the file
#convert times to datetimes
time_units = 'days since 1-1-1 00:00:0.0'
for i, t in enumerate(times):
t = float(t[:-1])
times[i] = nc.num2date(t, time_units)
# remove first ssh/times element because it is not real data
sshs = sshs[1:]
times = times[1:]
#add the data to a data frame
data = pd.DataFrame({'ssh': sshs, 'time': times})
return data, lon, lat
# #Compare with Neah Bay forecast and observations
# +
grid_b = nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc')
fig,axs= plt.subplots(1,2,figsize=(10,5))
ax=axs[0]
axm=axs[1]
date = datetime.datetime(2015,7,30)
#Hycom
iss = np.arange(1934,1920,-1)
jss = np.arange(1661,1665)
for i,j in zip(iss,jss):
#filename = read_url(date,i,j) read website
filename = '/data/nsoontie/MEOPAR/analysis/Nancy/nowcast/{}/hycom_{}_{}.txt'.format(date.strftime('%Y-%m-%d'),i,j)
data, lon, lat = parse_hycom_text(filename)
ax.plot(data['time'],data['ssh'],label='Hycom')
axm.plot(lon,lat,'o')
viz_tools.plot_coastline(axm,grid_b,coords='map')
#Neah Bay forecast
filename = '/ocean/nsoontie/MEOPAR/sshNeahBay/txt/sshNB_2015-07-29_18.txt'
NBdata = residuals._load_surge_data(filename)
surge, dates = residuals._retrieve_surge(NBdata, datetime.datetime(2015,7,20))
ax.plot(dates[:],surge[:],label = 'Neah Bay')
# Neah Bay observations
obs = figures.get_NOAA_wlevels(figures.SITES['Neah Bay']['stn_no'], '20-Jul-2015', '31-Jul-2015')
tides = figures.get_NOAA_tides(figures.SITES['Neah Bay']['stn_no'], '20-Jul-2015', '31-Jul-2015')
res = residuals.calculate_residual(obs.wlev, obs.time, tides.pred, tides.time)
ax.plot(obs.time,res,label='observation')
axm.set_xlim([-125.5,-124])
axm.set_ylim([48,49])
ax.legend(loc=0)
ax.grid()
fig.autofmt_xdate()
ax.set_xlim([datetime.datetime(2015,7,28),datetime.datetime(2015,8,3)])
# -
# At first glance, these models look very different! I need some more data points. Will need an longer time period for comparions with observations.
# * We might be able to interpolate these points across our open boundary
# #Compare between forecasts
# Two forecasts are available each day. How different are they?
# +
fig= plt.figure(figsize=(10,10))
axm=fig.add_subplot(1,2,2)
ax1=fig.add_subplot(4,2,1)
ax2=fig.add_subplot(4,2,3)
ax3=fig.add_subplot(4,2,5)
ax4=fig.add_subplot(4,2,7)
axs=[ax1,ax2,ax3,ax4]
colors=['b','g','r','c']
#first forecast
date = datetime.datetime(2015,7,29)
count=0
i = 1933
iss = np.arange(1934,1920,-1)
jss = np.arange(1661,1665)
for i,j, ax in zip(iss,jss, axs):
filename = '/data/nsoontie/MEOPAR/analysis/Nancy/nowcast/{}/hycom_{}_{}.txt'.format(date.strftime('%Y-%m-%d'),i,j)
data, lon, lat = parse_hycom_text(filename)
ax.plot(data['time'],data['ssh'],marker='o',c=colors[count],label='{}'.format(date.strftime('%Y%m%d')))
axm.plot(lon,lat,'o')
count=count+1
viz_tools.plot_coastline(axm,grid_b,coords='map')
#second forecast
date = datetime.datetime(2015,7,30)
count=0
i = 1933
iss = np.arange(1934,1920,-1)
jss = np.arange(1661,1665)
for i,j,ax in zip(iss,jss,axs):
filename = '/data/nsoontie/MEOPAR/analysis/Nancy/nowcast/{}/hycom_{}_{}.txt'.format(date.strftime('%Y-%m-%d'),i,j)
data, lon, lat = parse_hycom_text(filename)
ax.plot(data['time'],data['ssh'],marker='*',c=colors[count],label='{}'.format(date.strftime('%Y%m%d')))
count=count+1
ax.legend()
ax.grid()
ax.set_ylim([-.1,.1])
axm.set_xlim([-125.5,-124])
axm.set_ylim([48,49])
fig.autofmt_xdate()
# -
# Need to save this data daily. That might mean changing the name of the text files or writing in some other file format.
#
# Right now all of the text files have the same name.
#
# I don't think there is any need to look at the further in the future forecast.
#
# Notes:
# * Hycom model files don't change day to change day to day. For example, if on Jul 21 I look at Hycom July 22 results, that will be the same if I looked at July 22 results on July 22.
#
# Potential difficulties:
# * HYCOM doesn't have any observations. What do we use for nowcast? We might have to use a combination of neah bay and hycom. (but it has nowcasts)
#
#
| Nancy/nowcast/HYCOM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Coding Exercise #0607
import nltk
import re
# %%time
# You should download the NLTK data once.
# It can be a bit time consuming.
# nltk.download()
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
# ### 1. More about the NLTK library:
# +
#
# From the US president B. Obama's Nov-4th-2008 speech.
#
paragraph = """If there is anyone out there who still doubts that America is a place where all things are possible; who still wonders if the dream of our founders is alive in our time; who still questions the power of our democracy: Tonight is your answer.
It's the answer told by lines that stretched around schools and churches in numbers this nation has never seen; by people who waited three hours and four hours, many for the very first time in their lives, because they believed that this time must be different; that their voices could be that difference.
It's the answer spoken by young and old, rich and poor, Democrat and Republican, black, white, Hispanic, Asian, Native American, gay, straight, disabled and not disabled -- Americans who sent a message to the world that we have never been just a collection of individuals or a collection of Red States and Blue States: we are, and always will be, the United States of America!
It's the answer that -- that led those who have been told for so long by so many to be cynical, and fearful, and doubtful about what we can achieve to put their hands on the arc of history and bend it once more toward the hope of a better day.
It's been a long time coming, but tonight, because of what we did on this day, in this election, at this defining moment, change has come to America.
A little bit earlier this evening, I received an extraordinarily gracious call from Senator McCain. Senator McCain fought long and hard in this campaign, and he's fought even longer and harder for the country that he loves. He has endured sacrifices for America that most of us cannot begin to imagine. We are better off for the service rendered by this brave and selfless leader. I congratulate him; I congratulate Governor Palin for all that they've achieved, and I look forward to working with them to renew this nation's promise in the months ahead.
I want to thank my partner in this journey, a man who campaigned from his heart and spoke for the men and women he grew up with on the streets of Scranton and rode with on the train home to Delaware, the Vice President-elect of the United States, Joe Biden. """
# -
# #### 1.1. Tokenization and pre-processing:
# Tokenize into sentences.
sentences = nltk.sent_tokenize(paragraph)
for i in range(len(sentences)):
sentences[i] = sentences[i].lower()
sentences[i] = re.sub(r'\W',' ',sentences[i]) # Substitute the non-alphanumeric characters with space.
sentences[i] = re.sub(r'\s+',' ',sentences[i]) # Remove the excess of white spaces.
sentences[i] = re.sub(r'\s$','',sentences[i]) # Remove the space at the end of a sentence.
print(sentences)
# #### 1.2. Removal of the stop words:
from nltk.corpus import stopwords
for i in range(len(sentences)):
words = nltk.word_tokenize(sentences[i]) # Tokenize into words.
words = [x for x in words if x not in stopwords.words('english')] # Remove the stop words.
sentences[i] = ' '.join(words) # Rejoin as a sentence.
print(sentences)
# #### 1.3. POS tagging:
# Test sentence.
my_sentence = "The Colosseum was built by the emperor Vespassian"
# Simple pre-processing.
my_words = nltk.word_tokenize(my_sentence)
for i in range(len(my_words)):
my_words[i] = my_words[i].lower()
my_words
# POS tagging.
# OUTPUT: A list of tuples.
my_words_tagged = nltk.pos_tag(my_words)
my_words_tagged
# Join the words + POS as a sentence.
my_words_tagged2 = []
for tw in my_words_tagged:
my_words_tagged2.append(tw[0] + '(' + tw[1] + ')')
my_sentence_tagged = ' '.join(my_words_tagged2)
my_sentence_tagged
| SIC_AI_Coding_Exercises/SIC_AI_Chapter_07_Coding_Exercises/ex_0607.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + deletable=true editable=true
import glob
import os.path
import numpy
import deepometry.parse
# + [markdown] deletable=true editable=true
# # Parse TIFs
#
# Use `deepometry.parse` module to transform .TIF files to NumPy arrays. In this example, .TIF files are stored at `/data/raw/` in subdirectories corresponding to the class labels. Filenames should contain a prefix "Ch" for specifying channels.
#
# /data/raw/
# positive/
# foo_Ch3.tif
# foo_Ch4.tif
# foo_Ch6.tif
# bar_Ch3.tif
# bar_Ch4.tif
# bar_Ch6.tif
# ...
# negative/
# foo_Ch3.tif
# foo_Ch4.tif
# foo_Ch6.tif
# bar_Ch3.tif
# bar_Ch4.tif
# bar_Ch6.tif
# ...
#
# We parse the images of selected channels of each object into a numpy array, e.g. one cell - one numpy array that contains multiple channels. The arrays are stored at `/data/parsed` in subdirectories corresponding to the class labels. Array filenames have the patient prefixes, followed by a hex series.
#
# /data/parsed/
# positive/
# foo__32e88e1ac3a8f44bf8f77371155553b9.npy
# bar__3dc56a0c446942aa0da170acfa922091.npy
# ...
# negative/
# foo__8348deaa70dfc95c46bd02984d28b873.npy
# bar__c1ecbca7bd98c01c1d3293b64cd6739a.npy
# ...
# ...
# + deletable=true editable=true
src = "/data/raw/"
dest = "/data/parsed/"
labels = ["positive", "negative"]
# + deletable=true editable=true
channels = [3, 6]
# + deletable=true editable=true
image_size = 48
# + deletable=true editable=true
for label in labels:
src_dir = os.path.join(src, label)
print("Parsing directory: {}".format(src_dir))
dest_dir = os.path.join(dest, label)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
pathnames = glob.glob(os.path.join(src_dir, "*.tif"))
deepometry.parse.parse(pathnames, dest_dir, image_size, channels)
print('Done')
# + [markdown] deletable=true editable=true
# If you need to delete the folder of many files, do this in terminal
#
# - mkdir empty_dir
#
# - rsync -a --delete empty_dir/ yourdirectory/
| examples/parse_TIF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Fast iteration
s="abcd"
for c in s:
print(c)
a=(1,2,3)
for b in a:
print(b)
| Lecture 2 Conditionals loops and Functions/Fast Iterations/Fast Iterations-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../datacamp-projects/"))
# Any results you write to the current directory are saved as output.
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
df = pd.read_csv('../datacamp-projects/AppleStore.csv')
# + _uuid="07e33fd69f3e7f384fe98e950330a188dbc76974"
df.head(5)
# + _uuid="a2eb208c300465645b7e94c496d42da4c7cc73d9"
df.shape
# + _uuid="c1046fc5c17a4cc06870fb917fb09969959c5a77"
# # opened_file = open('../input/AppleStore.csv')
# # read_file = opened_file.read()
# # read_file
# split_dataset = read_file.split('\n')
# header = split_dataset[0]
# first_5 = split_dataset[1:6]
# print(header)
# print(first_5)
# + _uuid="81c351c5bb1dfbff39dcb57ab8033d0407861a9c"
opened_file = open('../datacamp-projects/AppleStore.csv', encoding="utf8")
from csv import reader
read_file = reader(opened_file)
apps_data = list(read_file)
# + _uuid="bc0b85a55858395a6728c97dd5242bc688aff14e"
len(apps_data)
# + _uuid="961b30544ffb191d740dcad0b8d25db98152ccba"
print(apps_data[0])
# + _uuid="21cc142ca17d6eacff36fcea2ae3595c0a02d17f"
print(apps_data[1:3])
# + _uuid="ce5d75f83412657aed051446ff6928aef98c40cd"
col_names = apps_data[0]
# + _uuid="ea7ff164d4c404fbedfc78e755902b1bbee6218f"
apps_data = apps_data[1:]
# + _uuid="8b8b94b3c40e78e44300289c48eb1b0780e04862"
len(apps_data)
# + _uuid="999a360a0d6e0006407f7809a44aae7e85e1d7e0"
print(col_names)
# + _uuid="8c1e147a7abcbc579234ce02916ce1dc3e0a2863"
print(apps_data[0:5])
# + _uuid="65acc81ad1e1ca163949a5847f0e2bcb7aa7087b"
row_1 = ['Facebook', 0.0, 'USD', 2974676, 3.5]
row_2 = ['Instagram', 0.0, 'USD', 2161558, 4.5]
row_3 = ['Clash of Clans', 0.0, 'USD', 2130805, 4.5]
row_4 = ['Temple Run', 0.0, 'USD', 1724546, 4.5]
row_5 = ['Pandora - Music & Radio', 0.0, 'USD', 1126879, 4.0]
app_data_set = [row_1, row_2, row_3, row_4, row_5]
for row in app_data_set:
print(row)
print('----------------')
# for row in app_data_set:
# print(sum(int(row[-1])))
# + _uuid="b6020283c5033ef1883eb00afdf9d0a2d1be4e31"
for element in row_1:
print(element)
print('---------------')
# + _uuid="0f86d81cd1edc042dbd0f8d83eb422dfd3f29cfe"
list_a = [10, 20, 30, 40]
# + _uuid="3048d092026eb7cf910ca18c03fa21dd21dcaa86"
for element in list_a:
print(element)
print('---------------')
# + _uuid="a639d8410dc42070048f907ab98d1ebddd8e9e92"
list_of_lists = [[1,2], [11,12], [21,22]]
# + _uuid="a62bd25638a6f01e94ba884f714a20c682121691"
for element in list_of_lists:
print(element[0])
# + _uuid="1b20fea577f933f79cdf948d0c7955da640bea3c"
for element in list_of_lists:
print(element[-1])
# + _uuid="7dcf4f60c7618415fda22052df7aee759e59cdea"
for element in list_of_lists:
print(element)
# + _uuid="10e5ba899ac7dbf6be270a6bb44082a5e829cac9"
a_sum = 0
list_b = [1, 3, 5]
print(a_sum)
for value in list_b:
a_sum += value
# a_sum = a_sum + value
print(a_sum)
# + _uuid="48138b84490c6a39ee6a86d8d5d57a1ad590d932"
a_list = [1,3,5]
a_sum = 0
for value in a_list:
a_sum += value
print(a_sum)
print(a_sum)
# + [markdown] _uuid="4630e98ff8d930d2af76fe8b40e1d2af2c80d33b"
# ### Find the average app rating for the apps stored in the `app_data_set` variable.
# * Initialize a variable named rating_sum with a value of zero outside the loop body.
# * Loop (iterate) over the app_data_set list of lists. For each of the five iterations of the loop:
# * Extract the rating of the app and store it to a variable named rating.
# * Add the value stored in rating to the current value of the rating_sum.
# * Outside the loop body, divide the rating sum (stored in rating_sum) by the number of ratings to get an average value. Store the result in a variable named avg_rating.
# + _uuid="25e0540c33aea3a0a5a17e8ca77e84260c81bf3a"
# rating_sum = 0
# for row in app_data_set:
# rating = row[-1]
# rating_sum = rating_sum + rating
# avg_rating = rating_sum / len(app_data_set)
rating_sum = 0
for row in app_data_set:
rating = row[-1]
rating_sum += rating
avg_rating = rating_sum / len(app_data_set)
print(avg_rating)
# + [markdown] _uuid="b2b0f61c31c187a7b42bbf6df81bebdec4c24fd9"
# ### Compute the average app rating for all the 7,197 apps stored in the data set.
# * Transform the AppleStore.csv file into a list of lists.
# * Open the file using the open() command. Save the output to a variable named opened_file.
# * Read in the opened file using the reader() command. Save the output to a variable named read_file. You'll have to import the reader() command from the csv module.
# * Transform the read-in file to a list of lists using the list() command. Save the list of lists to a variable named apps_data.
# * Initialize a variable named rating_sum with a value of zero.
# * Loop through the apps_data[1:] list of lists (make sure you don't include the header row). For each of the 7,197 iterations of the loop:
# * Extract the rating of the app and store it to a variable named rating. Make sure you convert the rating value from a string to a float.
# * Add the value stored in rating to the current value of the rating_sum.
# * Divide the rating sum (stored in rating_sum) by the number of ratings to get an average value. Store the result in a variable named avg_rating.
# + _uuid="7c5d0c05a8cfc4ac9320fed71127d686c3dc800f"
rating_sum = 0
for row in apps_data:
rating = float(row[7])
rating_sum += rating
avg_rating = rating_sum / len(apps_data)
print(avg_rating)
# + [markdown] _uuid="c78fd55ec55e93553cf055b3f36c1e40c2f3d4b6"
# ### Using the append method on a list
# + _uuid="96701ccf55423f613b04c840f132ee655120f4c3"
z = [7, 4, 3, 2]
z.append(3)
print(z)
# + _uuid="56e4805917110927b81590f928764963a5ab2bd0"
# 1. initialize empty list
all_ratings = []
# 2. loop over the data set and extract rating
for row in apps_data:
rating = float(row[8])
all_ratings.append(rating) # 3. append rating to the empty list
print(all_ratings)
avg_rating = sum(all_ratings) / len(all_ratings) # 4. sum up the ratings and divide the ratings by the number of ratings
print(avg_rating)
# + _uuid="ea10198caf59dc33b201af254c11e8f3d18590a2"
# some_string = '"1","281656475","PAC-MAN Premium",100788224,"USD",3.99,21292,26,4,4.5,"6.3.5","4+","Games",38,5,10,1\n"2","281796108","Evernote - stay organized",158578688,"USD",0,161065,26,4,3.5,"8.2.2","4+","Productivity",37,5,23,1'
# + _uuid="420b0494e22fe4b1346757e180bc55df3286708d"
# some_string.split('\n')
# + [markdown] _uuid="d92d5e1dc7b8f312eac636618f5f5294349ff9ee"
# ### Using the split method on a list
# + _uuid="a87ce139af60e3c53878e37b0b10a3ad1cfaee40"
# opened_file = open('../input/AppleStore.csv')
# read_file = opened_file.read()
# read_file
split_dataset = read_file.split('\n')
header = split_dataset[0]
first_5 = split_dataset[1:6]
print(header)
print(first_5)
# + _uuid="6ace17992c70723feed819b8029ac11e585ce772"
# opened_file = open('AppleStore.csv')
# read_file = opened_file.read()
# split_dataset = read_file.split('\n')
final_dataset = []
for row in split_dataset:
split_row = row.split(',')
final_dataset.append(split_row)
print(final_dataset[:6])
header = final_dataset[0]
instagram = final_dataset[2]
insta_rating = instagram[7]
# + _uuid="17266c0dd415237c795ff5381f576ce72911b669"
| dataquest/01-python_for_data_science_fundamentals/dataquest-projects-master/lists_loops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sk774325/ehr-dream-challenges/blob/master/Observation_period.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="F7XQVJEZj_D1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="d77b5bf6-e51e-49cb-cb67-398797f575df"
import pandas as pd
import matplotlib.pyplot as plt
from google.colab import drive
drive.mount('/content/drive')
# + id="FYxTVOExoyWR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="06a30c32-05d3-47b8-b2a0-b8ba540bcec4"
df = pd.read_csv("/content/drive/Shared drives/COVID分析/synthetic_data/observation_period.csv")
df_gs = pd.read_csv("/content/drive/Shared drives/COVID分析/synthetic_data/goldstandard.csv")
df = df.sort_values("person_id").reset_index(drop = "true")
df
# + id="xqy4WMr8o9uK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="e2030c98-f099-4e40-9b82-ffb213546383"
df['status'] = 0
for i in range(len(df.index)):
x = df.loc[i, 'person_id']
df.at[i, 'status'] = df_gs.loc[x, 'status']
df
# + id="-RMsxLbbpDI4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="4803994e-c45f-4e33-cd74-0c4fb2ce36d9"
mask_positive = (df.status == 1.0)
mask_negative = (df.status == 0.0)
df_pos = df[mask_positive].reset_index(drop = 'true')
df_neg = df[mask_negative]
df_pos
# + id="RHIR98yopu13" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="5744a91b-f130-4788-a443-4882f4fd194e"
df_pos['period_type_concept_id'].value_counts()
| Observation_period.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
import numpy as np
from scipy.stats import chi2, kstest, binom, norm
from tools.helpers import Progress
SAMPLES = 100000
DIM = 10
# +
def gen_samples(d, samples, projection):
return (projection(np.random.randn(d)) for _ in Progress(range(samples)))
def stat_dim(d, samples, projection):
samples = gen_samples(d, samples, projection)
return np.sum(np.linalg.norm(x)**2 for x in samples) / SAMPLES
def test(d, samples, projection, pmf, cdf):
y = [np.linalg.norm(x)**2 for x in gen_samples(d, samples, projection)]
print(np.mean(y))
pl.hist(y, bins=100, normed=True)
xs = np.linspace(*pl.xlim(), 1000)
pl.plot(xs, pmf(xs))
pl.show()
print(kstest(y, cdf))
def model_to_f(model):
pdf = lambda x: sum(w * chi2(k).pdf(x) for k, w in model.items() if k > 0)
cdf = lambda x: sum(w * chi2(k).cdf(x) for k, w in model.items() if k > 0) \
+ model.get(0, 0)
return pdf, cdf
# -
# ### No projection
#
# -> Stat. Dimension of space = dimension of space
test(DIM, SAMPLES, lambda x: x, chi2(DIM).pdf, chi2(DIM).cdf)
# +
dist = norm(DIM, np.sqrt(2 * DIM))
test(DIM, SAMPLES, lambda x: x, dist.pdf, dist.cdf)
# -
# ### Halfspace
# +
def hs_proj(x, cd=2):
for i in range(cd):
x[i] = 0 if x[i] < 0 else x[i]
return x
pdf, cdf = model_to_f({DIM: .25, DIM - 1: .5, DIM - 2: .25})
test(DIM, SAMPLES, hs_proj, pdf, cdf)
# -
# ### Subspace
sel = np.zeros(DIM)
sel[:10] = 1.
test(DIM, SAMPLES, lambda x: x * sel, chi2(10).pdf, chi2(10).cdf)
# ### Pos. Octant
from scipy.special import binom
# ### not sure if this is right...
# +
ws = np.array([binom(DIM, k) / 2**DIM for k in range(DIM + 1)])
indices = list(enumerate(ws))
pdf = lambda x: sum(w * chi2(k).pdf(x) for k, w in indices[1:])
cdf = lambda x: sum(w * chi2(k).cdf(x) for k, w in indices[1:]) + ws[0]
test(DIM, SAMPLES, lambda x: x*(x > 0), pdf, cdf)
# -
# <img src="G0VU1KKCICWJTDAC8WKSP8BBR7JP40RR.png"/>
| Cones'n Stuff.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Tutorial 05.2 - Dropout Regularization in Keras
# ## Setup DevCube GPU
# Before you can start, you have to find a GPU on the system that is not heavily used by other users. Otherwise you cannot initialize your neural network.
#
#
# **Hint:** the command is **nvidia-smi**, just in case it is displayed above in two lines because of a line break.
#
# As a result you get a summary of the GPUs available in the system, their current memory usage (in MiB for megabytes), and their current utilization (in %). There should be six or eight GPUs listed and these are numbered 0 to n-1 (n being the number of GPUs). The GPU numbers (ids) are quite at the beginning of each GPU section and their numbers increase from top to bottom by 1.
#
# Find a GPU where the memory usage is low. For this purpose look at the memory usage, which looks something like '365MiB / 16125MiB'. The first value is the already used up memory and the second value is the total memory of the GPU. Look for a GPU where there is a large difference between the first and the second value.
#
# **Remember the GPU id and write it in the next line instead of the character X.**
# +
# Change X to the GPU number you want to use,
# otherwise you will get a Python error
# e.g. USE_GPU = 4
USE_GPU = 4 # your choice
# +
# # !nvidia-smi
# -
# ### Choose one GPU
#
# **The following code is very important and must always be executed before using TensorFlow in the exercises, so that only one GPU is used and that it is set in a way that not all its memory is used at once. Otherwise, the other students will not be able to work with this GPU.**
#
# The following program code imports the TensorFlow library for Deep Learning and outputs the version of the library.
#
# Then, TensorFlow is configured to only see the one GPU whose number you wrote in the above cell (USE_GPU = X) instead of the X.
#
# Finally, the GPU is set so that it does not immediately reserve all memory, but only uses more memory when needed.
#
# (The comments within the code cell explains a bit of what is happening if you are interested to better understand it. See also the documentation of TensorFlow for an explanation of the used methods.)
# +
# Import TensorFlow
import tensorflow as tf
# Print the installed TensorFlow version
print(f'TensorFlow version: {tf.__version__}\n')
# Get all GPU devices on this server
gpu_devices = tf.config.list_physical_devices('GPU')
# Print the name and the type of all GPU devices
print('Available GPU Devices:')
for gpu in gpu_devices:
print(' ', gpu.name, gpu.device_type)
# Set only the GPU specified as USE_GPU to be visible
tf.config.set_visible_devices(gpu_devices[USE_GPU], 'GPU')
# Get all visible GPU devices on this server
visible_devices = tf.config.get_visible_devices('GPU')
# Print the name and the type of all visible GPU devices
print('\nVisible GPU Devices:')
for gpu in visible_devices:
print(' ', gpu.name, gpu.device_type)
# Set the visible device(s) to not allocate all available memory at once,
# but rather let the memory grow whenever needed
for gpu in visible_devices:
tf.config.experimental.set_memory_growth(gpu, True)
# -
# please run this cell in order to have the plots displayed in the notebook
# %matplotlib inline
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# ## Adding Dropout
#
#
# Dropout is one of the most effective and most commonly used regularization techniques for neural networks. Dropout, applied to a layer, consists of randomly "dropping out" (i.e. setting to zero) a number of output features of the layer during training. The `dropout rate` is the fraction of the features that are being zeroed-out; it is usually set between 0.2 and 0.5. At test time, no units are dropped out, and instead the layer's output values are scaled down by a factor equal to the dropout rate, so as to balance for the fact that more units are active than at training time.
#
# 
# ### Example or Dropout regularization on MNIST classification
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
# +
# subsetting the dataset
train_labels = train_labels[:5000]
test_labels = test_labels[:1000]
train_images = train_images[:5000]/ 255.0 #...and scaling the pixel values
test_images = test_images[:1000]/ 255.0
# -
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras import regularizers
# Define a simple sequential model
def create_model():
model = tf.keras.models.Sequential([
Flatten(input_shape=(28,28,1)),
Dense(256, activation='relu', kernel_regularizer=tf.keras.regularizers.L2(0.2),),
Dropout(.5),
Dense(10)
])
return model
# Create a basic model instance
model = create_model()
# Display the model's architecture
model.summary()
history = model.compile(optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.metrics.SparseCategoricalAccuracy()])
model.fit(train_images, train_labels, batch_size =32, epochs = 32, validation_data = (test_images, test_labels))
| Week 05/5.02 - Tutorial Keras regularization dropout-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.10 64-bit (''base'': conda)'
# name: python3
# ---
# # cuDF sandbox
#
import cudf
import dask_cudf
import nvtabular as nvt
# +
filepath = '/home/jupyter/criteo_parquet/day_1.parquet'
num_rows, num_row_groups, names = cudf.io.read_parquet_metadata(filepath)
print('Num rows:', num_rows)
print('Num row groups: ', num_row_groups)
print('Names: ', names)
# +
filepath = '/home/jupyter/criteo_parquet/day_1.parquet'
row_groups = list(range(50))
df = cudf.io.read_parquet(filepath, row_groups=row_groups)
# +
filepath = '/home/jupyter/criteo_parquet/day_1.parquet'
row_groups = [3,4]
df = cudf.io.read_parquet(filepath, row_groups=row_groups)
# +
filepath = '/home/jupyter/criteo_parquet/day_1.parquet'
row_groups = [217,218]
df = cudf.io.read_parquet(filepath, row_groups=row_groups)
# +
filepath = 'gs://workshop-datasets/criteo-parque/day_1.parquet'
row_groups = [1,2]
row_groups = range(80)
df = cudf.io.read_parquet(filepath, row_groups=row_groups)
# +
filepath = 'gs://workshop-datasets/criteo-parque/day_1.parquet'
row_groups = [218,219]
df = cudf.io.read_parquet(filepath, row_groups=row_groups)
# -
def _memory_usage(df):
"""Return the total memory usage of a DataFrame"""
return df.memory_usage(deep=True).sum()
df = cudf.io.read_parquet(filepath, row_groups=217)
_memory_usage(df)
list(range(10))
# +
filepath = '/home/jupyter/criteo_parquet/day_1.parquet'
filepath = 'gs://workshop-datasets/criteo-parque/day_1.parquet'
row_groups = [217,218]
df = cudf.io.read_parquet(filepath, row_groups=row_groups)
# -
df.head()
df.C17.value_counts()
# +
filepath = '/home/jupyter/criteo_parquet/day_1.parquet'
filepath = 'gs://workshop-datasets/criteo-parque/day_1.parquet'
ddf = dask_cudf.read_parquet(
filepath,
split_row_groups=20)
# -
ddf
result = ddf.C1.value_counts()
result.compute()
# +
output_path = '/home/jupyter/output_path'
ddf.to_parquet(output_path)
# -
| cudf-sandbox/cudf-sandbox.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:analysis3-22.01]
# language: python
# name: conda-env-analysis3-22.01-py
# ---
# # Example: Cross-contour transport
#
# This notebook uses the `cosima_cookbook` to calculate transport across an arbitrary contour. We do this by first creating the contour, such as sea surface height, and extracting the coordinates using `matplotlib`'s `Path` class. We then create some masks to indicate which direction is across the contour at each position along the contour. We then load the transport data and compute the transport, resulting in data with dimensions depth and along contour index.
#
# First, we load useful packages:
# +
# %matplotlib inline
import cosima_cookbook as cc
import matplotlib.pyplot as plt
import netCDF4 as nc
import xarray as xr
import numpy as np
from dask.distributed import Client
# -
client = Client(n_workers=4)
client
# #### Choose database
session = cc.database.create_session('/g/data/ik11/databases/cosima_master.db')
# #### Choose experiment
expt = '01deg_jra55v13_ryf9091'
# #### Choose a latitude range so the contour fits in the range, but there is not too much extra space. Extra space slows down the computation.
lat_range = slice(-64.99,-47)
# #### We must make sure that this latitude range is so that the t-cells are always south and west of the u-cells.
#
# This is important because the meridional and zonal transports occur on different grids to each other. We can check this by loading the u-cell and t-cell coordinates.
# +
# %%time
yt_ocean = cc.querying.getvar(expt,'yt_ocean',session,n=1)
yt_ocean = yt_ocean.sel(yt_ocean=lat_range)
xt_ocean = cc.querying.getvar(expt,'xt_ocean',session,n=1)
yu_ocean = cc.querying.getvar(expt,'yu_ocean',session,n=1)
yu_ocean = yu_ocean.sel(yu_ocean=lat_range)
xu_ocean = cc.querying.getvar(expt,'xu_ocean',session,n=1)
# -
if len(yt_ocean) != len(yu_ocean):
print('help! different size')
if yt_ocean.min('yt_ocean')> yu_ocean.min('yu_ocean'):
print('help! wrong order')
if len(xt_ocean) != len(xu_ocean):
print('help! x different size')
if xt_ocean.min('xt_ocean')> xu_ocean.min('xu_ocean'):
print('help! x wrong order')
# #### Load quantity we want a contour of, e.g. SSH averaged over a year
# +
start_time = '2170-01-01'
end_time = '2170-12-31'
time_slice = slice(start_time, end_time)
ssh = cc.querying.getvar(expt,'sea_level', session, start_time = start_time, end_time = end_time)
# select one year and latitude range
ssh = ssh.sel(yt_ocean = lat_range, time = time_slice)
# weighed time mean by month length
month_length = ssh.time.dt.days_in_month
ssh_mean = (ssh*month_length).sum('time')/365
fig = plt.figure(figsize = (10,4))
ssh_mean.plot()
ssh_mean.plot.contour(levels = [-1.2],colors = 'k', linestyles = '-')
plt.title('Sea surface height (m)')
# -
h = ssh_mean.load()
# Fill in land with zeros:
h = h.fillna(0)
# Choose whether you want your contour on the u or t grid.
grid_sel = 't'
if grid_sel == 'u':
x_var = xu_ocean
y_var = yu_ocean
elif grid_sel == 't':
x_var = xt_ocean
y_var = yt_ocean
# #### Choose your desired contour value
contour_depth = -1.20
# + [markdown] tags=[]
# #### Select the contour
# If there are multiple contours satisfying this contour level, change the `count` in the `if` statement below until desired contour is highlighted red. Counting starts from the south west. For example, if we chose `coount == 1` the Antarctic Peninsula would instead be selected, rather than the circumpolar contour.
# -
fig = plt.figure(figsize = (10,4))
count = 0
x_contour = []
y_contour = []
# Create the contour:
sc = plt.contour(h, levels=[contour_depth])
for collection in sc.collections:
for path in collection.get_paths():
count += 1
if count == 2:
# Write down the lat/lon indices
for ii in range(np.size(path.vertices[:,0])):
x_contour.append(int(np.round(path.vertices[ii][0])))
y_contour.append(int(np.round(path.vertices[ii][1])))
plt.scatter(x_contour, y_contour, s=5, alpha=0.5, color='tomato');
# #### Processing
# Now process these coordinates to make sure there are no double ups.
# Difference between two neighbouring indices
diff_x_contour = np.diff(x_contour)
diff_y_contour = np.diff(y_contour)
# Get a list with the indices of duplicates
diff_ind = []
for ii in range(len(diff_x_contour)):
if (diff_x_contour[ii]==0) and (diff_y_contour[ii]==0):
diff_ind.append(ii)
# # Now remove the indices (start from the end so the indices don't shift)
for ii in range(len(diff_ind)):
index = diff_ind[::-1][ii]
del x_contour[index]
del y_contour[index]
h_contour = np.zeros(len(x_contour))#np.zeros_like(x_contour)
for ii in range(len(h_contour)):
h_contour[ii] = h[y_contour[ii], x_contour[ii]]
# Due to the discrete grid, the values on our contour are not exactly the same. We check this makes sense -- if this plot is blank, then something has gone wrong.
fig = plt.figure(figsize=(10,5))
plt.plot(h_contour, 'o', markersize=1)
plt.axhline(contour_depth, color='k', linewidth=0.5);
# #### Get lat and lon along the contour
# +
lat_along_contour = np.zeros((len(x_contour)))
lon_along_contour = np.zeros((len(x_contour)))
for ii in range(len(h_contour)):
lon_along_contour[ii] = x_var[x_contour[ii]]
lat_along_contour[ii] = y_var[y_contour[ii]]
# -
# #### Repeat the leftmost point at the end of the array.
#
# (Required for masking contour above and below)
lat_along_contour = np.append(lat_along_contour,lat_along_contour[0])
lon_along_contour = np.append(lon_along_contour,lon_along_contour[0])
# Number of grid points on the contour
num_points = len(lat_along_contour)
# #### Now we number the points along the contour
# +
contour_mask_numbered = np.zeros_like(lon_along_contour)
for ii in range(num_points-1):
lat1 = lat_along_contour[ii]
lat2 = lat_along_contour[ii+1]
lon1 = lon_along_contour[ii]
lon2 = lon_along_contour[ii+1]
contour_mask_numbered[ii] = ii
# -
contour_mask = h*0
for ii in range(num_points-1):
contour_mask[y_contour[ii],x_contour[ii]] = contour_mask_numbered[ii]+1
plt.figure(1,figsize=(16,8))
plt.pcolormesh(contour_mask.xt_ocean,contour_mask.yt_ocean,contour_mask)
plt.colorbar()
# #### Create mask
# Now we create a mask below contour so that the direction of the contour can be determined
# +
#contour_mask = contour_mask[1:-2][1:-2]
contour_mask_numbered = contour_mask
# fill in points to south of contour:
contour_masked_above = np.copy(contour_mask_numbered)
contour_masked_above[-1,0] = -100
# from top left:
for ii in range(len(contour_mask.xt_ocean)-1):
for jj in range(len(contour_mask.yt_ocean))[::-1][:-1]:
if contour_masked_above[jj,ii] == -100:
if contour_masked_above[jj-1,ii] == 0:
contour_masked_above[jj-1,ii] = -100
if contour_masked_above[jj,ii+1] == 0:
contour_masked_above[jj,ii+1] = -100
#from top right:
for ii in range(len(contour_mask.xt_ocean))[::-1][:-1]:
for jj in range(len(contour_mask.yt_ocean))[::-1][:-1]:
if contour_masked_above[jj,ii] == -100:
if contour_masked_above[jj-1,ii] == 0:
contour_masked_above[jj-1,ii] = -100
if contour_masked_above[jj,ii-1] == 0:
contour_masked_above[jj,ii-1] = -100
# from bottom right:
for ii in range(len(contour_mask.xt_ocean))[::-1][:-1]:
for jj in range(len(contour_mask.yt_ocean)-1):
if contour_masked_above[jj,ii] == -100:
if contour_masked_above[jj+1,ii] == 0:
contour_masked_above[jj+1,ii] = -100
if contour_masked_above[jj,ii-1] == 0:
contour_masked_above[jj,ii-1] = -100
#from bottom left:
for ii in range(len(contour_mask.xt_ocean)-1):
for jj in range(len(contour_mask.yt_ocean)-1):
if contour_masked_above[jj,ii] == -100:
if contour_masked_above[jj+1,ii] == 0:
contour_masked_above[jj+1,ii] = -100
if contour_masked_above[jj,ii+1] == 0:
contour_masked_above[jj,ii+1] = -100
# -
plt.figure(1,figsize=(16,8))
plt.pcolormesh(contour_mask.xt_ocean,contour_mask.yt_ocean, contour_masked_above)
plt.colorbar()
# South of the contour, values have been filled in to be -100, and it is thus a different colour in the plot.
#
# #### Direction of cross-contour transport
# Now we can use the mask south of the contour to determine whether the transport across the contour should be north, east, south or west (the grid is made of discrete square(ish) shaped cells). This is done by looping through the contour points and determining in which directions there are zeros (above contour) and -100 (below contour). This means the orientation of the contour can be determined. This is saved as `mask_x_transport`, which has -1 and +1 in a 2D (x and y) array where the contour has eastward transport, and `mask_y_transport` which as -1 and +1 for coordinates with northward transport. All other positions in the array are 0. This means that multiplying the northward transport `ty_trans` by the `mask_y_transport` gives all the northward transport across the contour, and zeros everywhere else (e.g. where contour goes upwards and cross-contour transport is thus eastward).
# +
mask_x_transport = np.zeros_like(contour_mask_numbered)
mask_y_transport = np.zeros_like(contour_mask_numbered)
mask_y_transport_numbered = np.zeros_like(contour_mask_numbered)
mask_x_transport_numbered = np.zeros_like(contour_mask_numbered)
# make halos:
contour_masked_above_halo = np.zeros((contour_masked_above.shape[0],
contour_masked_above.shape[1]+2))
contour_masked_above_halo[:,0] = contour_masked_above[:,-1]
contour_masked_above_halo[:,1:-1] = contour_masked_above
contour_masked_above_halo[:,-1] = contour_masked_above[:,0]
new_number_count = 1
for mask_loc in range(1,int(np.max(contour_mask_numbered))+1):
#if mask_loc%100 == 0:
# print('mask for x/y transport at point '+str(mask_loc))
index_i = np.where(contour_mask_numbered==mask_loc)[1]
index_j = np.where(contour_mask_numbered==mask_loc)[0]
# if point above is towards Antarctica and point below is away from Antarctica:
# take transport grid point to north of t grid:
if (contour_masked_above[index_j+1,index_i]==0) and (contour_masked_above[index_j-1,index_i]!=0):
mask_y_transport[index_j,index_i] = -1
# important to do
mask_y_transport_numbered[index_j,index_i] = new_number_count
new_number_count += 1
# if point below is towards Antarctica and point above is away from Antarctica:
# take transport grid point to south of t grid:
elif (contour_masked_above[index_j-1,index_i]==0) and (contour_masked_above[index_j+1,index_i]!=0):
mask_y_transport[index_j-1,index_i] = 1
mask_y_transport_numbered[index_j-1,index_i] = new_number_count
new_number_count += 1
# if point to right is towards Antarctica and point to left is away from Antarctica:
# zonal indices increased by 1 due to halos
# take transport grid point on right of t grid:
if (contour_masked_above_halo[index_j,index_i+2]==0) and (contour_masked_above_halo[index_j,index_i]!=0):
mask_x_transport[index_j,index_i] = -1
mask_x_transport_numbered[index_j,index_i] = new_number_count
new_number_count += 1
# if point to left is towards Antarctica and point to right is away from Antarctica:
# take transport grid point on left of t grid:
elif (contour_masked_above_halo[index_j,index_i]==0) and (contour_masked_above_halo[index_j,index_i+2]!=0):
mask_x_transport[index_j,index_i-1] = 1
mask_x_transport_numbered[index_j,index_i-1] = new_number_count
new_number_count += 1
# -
plt.figure(1,figsize=(16,8))
plt.pcolormesh(contour_mask.xt_ocean,contour_mask.yt_ocean,mask_x_transport)
plt.colorbar()
# As can be seen, in `mask_x_transport` there is yellow (+1) where eastward transport crosses the contour, and (-1) where westward transport crosses the contour (in the net northward direction). There are zeros everywhere else.
#
# ### We now have the coordinates of the contours, and whether the x or y transport is needed to calculate cross-contour transport.
#
# We now proceed to calculate transports across the contour
# +
# Convert contour masks to data arrays, so we can multiply them later.
# We need to ensure the lat lon coordinates correspond to the actual data location:
# The y masks are used for ty_trans, so like vhrho this should have dimensions (yu_ocean, xt_ocean).
# The x masks are used for tx_trans, so like uhrho this should have dimensions (yt_ocean, xu_ocean).
# However the actual name will always be simply y_ocean/x_ocean irrespective of the variable
# to make concatenation of transports in both direction and sorting possible.
mask_x_transport = xr.DataArray(mask_x_transport, coords = [yt_ocean, xu_ocean], dims = ['y_ocean','x_ocean'])
mask_y_transport = xr.DataArray(mask_y_transport, coords = [yu_ocean, xt_ocean], dims = ['y_ocean','x_ocean'])
mask_x_transport_numbered = xr.DataArray(mask_x_transport_numbered, coords = [yt_ocean, xu_ocean], dims = ['y_ocean','x_ocean'])
mask_y_transport_numbered = xr.DataArray(mask_y_transport_numbered, coords = [yu_ocean, xt_ocean], dims = ['y_ocean','x_ocean'])
# -
# #### Stack contour data into 1D
# +
# Create the contour order data-array. Note that in this procedure the x-grid counts have x-grid
# dimensions and the y-grid counts have y-grid dimensions, but these are implicit, the dimension
# *names* are kept general across the counts, the generic y_ocean, x_ocean, so that concatening works
# but we dont double up with numerous counts for one lat/lon point.
# stack contour data into 1d:
mask_x_numbered_1d = mask_x_transport_numbered.stack(contour_index = ['y_ocean', 'x_ocean'])
mask_x_numbered_1d = mask_x_numbered_1d.where(mask_x_numbered_1d > 0, drop = True)
mask_y_numbered_1d = mask_y_transport_numbered.stack(contour_index = ['y_ocean', 'x_ocean'])
mask_y_numbered_1d = mask_y_numbered_1d.where(mask_y_numbered_1d > 0, drop = True)
contour_ordering = xr.concat((mask_x_numbered_1d,mask_y_numbered_1d), dim = 'contour_index')
contour_ordering = contour_ordering.sortby(contour_ordering)
contour_index_array = np.arange(1,len(contour_ordering)+1)
# -
# #### Load transports `tx_trans` and `ty_trans`
# +
ty_trans = cc.querying.getvar(expt,'ty_trans',session, start_time = start_time, end_time = end_time)
ty_trans = ty_trans.sel(yu_ocean = lat_range, time = time_slice)
tx_trans = cc.querying.getvar(expt,'tx_trans',session, start_time = start_time, end_time = end_time)
tx_trans = tx_trans.sel(yt_ocean = lat_range, time = time_slice)
ty_trans = ty_trans.rename({'yu_ocean':'y_ocean', 'xt_ocean':'x_ocean'})
tx_trans = tx_trans.rename({'yt_ocean':'y_ocean', 'xu_ocean':'x_ocean'})
# -
# #### Take time average
# +
# %%time
# weighed time mean by month length
month_length = ty_trans.time.dt.days_in_month
ty_trans = (ty_trans*month_length).sum('time')/365
tx_trans = (tx_trans*month_length).sum('time')/365
ty_trans = ty_trans.load()
tx_trans = tx_trans.load()
# -
# #### Convert from mass transport to volume transport
rho_0 = 1035
ty_trans = ty_trans*mask_y_transport/rho_0
tx_trans = tx_trans*mask_x_transport/rho_0
# #### Extract transport values along contour
# +
# %%time
## We could also loop in time if we didn't want the time average. Initialise a data array and fill in data by looping in time.
# stack transports into 1d and drop any points not on contour:
x_transport_1d = tx_trans.stack(contour_index = ['y_ocean', 'x_ocean'])
x_transport_1d = x_transport_1d.where(mask_x_numbered_1d>0, drop = True)
y_transport_1d = ty_trans.stack(contour_index = ['y_ocean', 'x_ocean'])
y_transport_1d = y_transport_1d.where(mask_y_numbered_1d>0, drop = True)
# combine all points on contour:
vol_trans_across_contour = xr.concat((x_transport_1d, y_transport_1d), dim = 'contour_index')
vol_trans_across_contour = vol_trans_across_contour.sortby(contour_ordering)
vol_trans_across_contour.coords['contour_index'] = contour_index_array
vol_trans_across_contour = vol_trans_across_contour.load()
# -
fig, ax = plt.subplots(figsize = (10,4))
vol_trans_across_contour.sum('st_ocean').cumsum('contour_index').plot()
ax.set_ylabel('Cumulative transport across contour')
# #### Finally, we can extract the coordinates of the contour index, and the distance, for a more meaningful x axis.
# +
contour_ordering = xr.concat((mask_x_numbered_1d,mask_y_numbered_1d), dim = 'contour_index')
contour_ordering = contour_ordering.sortby(contour_ordering)
# get lat and lon along contour, useful for plotting later:
lat_along_contour = contour_ordering.y_ocean
lon_along_contour = contour_ordering.x_ocean
contour_index_array = np.arange(1,len(contour_ordering)+1)
# don't need the multi-index anymore, replace with contour count and save
lat_along_contour.coords['contour_index'] = contour_index_array
lon_along_contour.coords['contour_index'] = contour_index_array
# -
# #### Code to extract distance in between contour coordinates, using length of diagonal if there is a bend.
# Loop through the contour, determining if diagonal is required or not, and save the distance along each segment. Then, cumulatively sum the distances along each segment to get the distance from the first point.
# +
# %%time
dxu = cc.querying.getvar(expt,'dxu',session,ncfile = 'ocean_grid.nc',n=1)
dxu = dxu.sel(yu_ocean = lat_range)
dyt = cc.querying.getvar(expt,'dyt',session,ncfile = 'ocean_grid.nc',n=1)
dyt = dyt.sel(yt_ocean = lat_range)
num_points = len(lat_along_contour)
lat_t = cc.querying.getvar(expt,'geolat_t',session,ncfile = 'ocean_grid.nc',n=1)
lat_t = lat_t.sel(yt_ocean = lat_range)
lon_t = cc.querying.getvar(expt,'geolon_t',session,ncfile = 'ocean_grid.nc',n=1)
lon_t = lon_t.sel(yt_ocean = lat_range)
# if there is a bend in the contour, add the distance using length of diagonal, not sum of
# 2 edges, to be more representative.
distance_along_contour = np.zeros((num_points))
x_indices = np.sort(mask_x_transport_numbered.values[mask_x_transport_numbered.values>0])
y_indices = np.sort(mask_y_transport_numbered.values[mask_y_transport_numbered.values>0])
skip = False
for count in range(1,num_points):
if skip == True:
skip = False
continue
if count in y_indices:
if count + 1 in y_indices:
# note dxu and dyt do no vary in x:
jj = np.where(mask_y_transport_numbered==count)[0]
distance_along_contour[count-1] = (dxu[jj,990])[0]
else:
jj0 = np.where(mask_y_transport_numbered==count)[0]
jj1 = np.where(mask_x_transport_numbered==count+1)[0]
diagonal_distance = 0.5*np.sqrt((dxu[jj0,990])[0]**2+\
(dyt[jj1,990])[0]**2)
distance_along_contour[count-1] = diagonal_distance
distance_along_contour[count] = diagonal_distance
# skip to next count:
skip = True
# count in x_indices:
else:
if count + 1 in x_indices:
jj = np.where(mask_x_transport_numbered==count)[0]
distance_along_contour[count-1] = (dyt[jj,990])[0]
else:
jj0 = np.where(mask_x_transport_numbered==count)[0]
jj1 = np.where(mask_y_transport_numbered==count+1)[0]
diagonal_distance = 0.5*np.sqrt((dyt[jj0,990])[0]**2+\
(dxu[jj1,990])[0]**2)
distance_along_contour[count-1] = diagonal_distance
distance_along_contour[count] = diagonal_distance
# skip to next count:
skip = True
# fix last value:
if distance_along_contour[-1] == 0:
count = count + 1
if count in y_indices:
jj = np.where(mask_y_transport_numbered==count)[0]
distance_along_contour[-1] = (dxu[jj,990])[0]
else:
jj = np.where(mask_x_transport_numbered==count)[0]
distance_along_contour[-1] = (dyt[jj,990])[0]
# units are 10^3 km:
distance_along_contour = np.cumsum(distance_along_contour)/1e3/1e3
# -
# #### Select the indices for axis labels of specific longitudes, so we can plot transport vs distance but have longitude labels instead of length
distance_indices = np.zeros(8)
for i in np.arange(100,len(lon_along_contour.values)):
if (distance_indices[1]==0):
if (lon_along_contour.values[i]>-240):
distance_indices[1] = lon_along_contour.contour_index.values[i]
if (distance_indices[2]==0):
if (lon_along_contour.values[i]>-180):
distance_indices[2] = lon_along_contour.contour_index.values[i]
if (distance_indices[3]==0):
if (lon_along_contour.values[i]>-120):
distance_indices[3] = lon_along_contour.contour_index.values[i]
if (distance_indices[4]==0):
if lon_along_contour.values[i]>-60:
distance_indices[4] = lon_along_contour.contour_index.values[i]
if (distance_indices[5]==0):
if (lon_along_contour.values[i]>0):
distance_indices[5] = lon_along_contour.contour_index.values[i]
if (distance_indices[6]==0):
if (lon_along_contour.values[i]>60):
distance_indices[6] = lon_along_contour.contour_index.values[i]
distance_indices[7] = len(lon_along_contour.contour_index.values)-1
# #### Plot cumulative transport against distance along the contour.
# +
fig, axes = plt.subplots(nrows = 2,figsize = (10,8))
axes[0].plot(distance_along_contour,(10**(-6)*vol_trans_across_contour.sel(st_ocean = slice(0,500)).sum('st_ocean').cumsum('contour_index')))
axes[0].set_ylabel('Cumulative transport (Sv)')
axes[0].set_xlabel('Distance from 80$^\circ$E, 10$^3$ km')
axes[0].set_xlim(0,distance_along_contour[-1]);
axes[1].plot(distance_along_contour,(10**(-6)*vol_trans_across_contour.sel(st_ocean = slice(0,500)).sum('st_ocean').cumsum('contour_index')))
axes[1].set_ylabel('Cumulative transport (Sv)')
axes[1].set_xticks(distance_along_contour[distance_indices.astype(int)[:-1]])
axes[1].set_xticklabels(('80$^\circ$E','120$^\circ$E','180$^\circ$W','120$^\circ$W','60$^\circ$W','0$^\circ$','60$^\circ$E'));
axes[1].set_xlim(0,distance_along_contour[-1]);
axes[1].set_xlabel('Longitude coordinates along contour')
axes[0].set_title('Cumulative transport across SSH=-1.2 m in top 500 m depth')
# -
# We can see that there is a net northward transport across the Antarctic Circumpolar Current in the top 500m - this is the Ekman wind-driven transport. We could then choose to extract the density (or salt and temperature) along this same path, do this by interpolating density to the north and eastern edge of t-cells. Then we could bin the transports in each depth level into the corresponding density, to determine the transport across the contour in density space. An example of this calculation can be found in https://github.com/claireyung/Topographic_Hotspots_Upwelling-Paper_Code/blob/main/Analysis_Code/Save_and_bin_along_contours.ipynb.
| DocumentedExamples/Cross-contour_transport.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_tensorflow2_p36)
# language: python
# name: conda_tensorflow2_p36
# ---
# # TensorFlow Script Mode with Pipe Mode Input
#
#
# SageMaker Pipe Mode is an input mechanism for SageMaker training containers based on Linux named pipes. SageMaker makes the data available to the training container using named pipes, which allows data to be downloaded from S3 to the container while training is running. For larger datasets, this dramatically improves the time to start training, as the data does not need to be first downloaded to the container. To learn more about pipe mode, please consult the AWS documentation at: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-trainingdata.
#
# In this tutorial, we show you how to train a TensorFlow estimator using data read with SageMaker Pipe Mode. We use the SageMaker PipeModeDataset class - a special TensorFlow Dataset built specifically to read from SageMaker Pipe Mode data. This Dataset is available in our TensorFlow containers for TensorFlow versions 1.7.0 and up. It's also open-sourced at https://github.com/aws/sagemaker-tensorflow-extensions and can be built into custom TensorFlow images for use in SageMaker.
#
# Although you can also build the PipeModeDataset into your own containers, in this tutorial we'll show how you can use the PipeModeDataset by launching training from the SageMaker Python SDK. The SageMaker Python SDK helps you deploy your models for training and hosting in optimized, production-ready containers in SageMaker. The SageMaker Python SDK is easy to use, modular, extensible and compatible with TensorFlow and many other deep learning frameworks.
#
# Different collections of S3 files can be made available to the training container while it's running. These are referred to as "channels" in SageMaker. In this example, we use two channels - one for training data and one for evaluation data. Each channel is mapped to S3 files from different directories. The SageMaker PipeModeDataset knows how to read from the named pipes for each channel given just the channel name. When we launch SageMaker training we tell SageMaker what channels we have and where in S3 to read the data for each channel.
#
#
# ## Setup
# The following code snippet sets up some variables we'll need later on.
# +
from sagemaker import get_execution_role
from sagemaker.session import Session
# S3 bucket for saving code and model artifacts.
# Feel free to specify a different bucket here if you wish.
bucket = Session().default_bucket()
# Location to save your custom code in tar.gz format.
custom_code_upload_location = 's3://{}/tensorflow_scriptmode_pipemode/customcode'.format(bucket)
# Location where results of model training are saved.
model_artifacts_location = 's3://{}/tensorflow_scriptmode_pipemode/artifacts'.format(bucket)
# IAM execution role that gives SageMaker access to resources in your AWS account.
role = get_execution_role()
# -
# ## Complete training source code
#
# In this tutorial we train a TensorFlow LinearClassifier using pipe mode data. The TensorFlow training script is contained in following file:
# !pygmentize "pipemode.py"
# The above script is compatible with the SageMaker TensorFlow script mode container. (See: [Preparing TensorFlow Training Script](https://github.com/aws/sagemaker-python-sdk/tree/master/src/sagemaker/tensorflow#preparing-a-script-mode-training-script)).
#
# Using a `PipeModeDataset` to train an estimator using a Pipe Mode channel, we can construct an function that reads from the channel and return an `PipeModeDataset`. This is a TensorFlow Dataset specifically created to read from a SageMaker Pipe Mode channel. A `PipeModeDataset` is a fully-featured TensorFlow Dataset and can be used in exactly the same ways as a regular TensorFlow Dataset can be used.
#
# The training and evaluation data used in this tutorial is synthetic. It contains a series of records stored in a TensorFlow Example protobuf object. Each record contains a numeric class label and an array of 1024 floating point numbers. Each array is sampled from a multi-dimensional Gaussian distribution with a class-specific mean. This means it is possible to learn a model using a TensorFlow Linear classifier which can classify examples well. Each record is separated using RecordIO encoding (though the `PipeModeDataset` class also supports the TFRecord format as well).
#
# The training and evaluation data were produced using the benchmarking source code in the sagemaker-tensorflow-extensions benchmarking sub-package. If you want to investigate this further, please visit the GitHub repository for sagemaker-tensorflow-extensions at https://github.com/aws/sagemaker-tensorflow-extensions.
#
# The following example code shows how to construct a `PipeModeDataset`.
#
# ```python
# from sagemaker_tensorflow import `PipeModeDataset`
#
#
# # Simple example data - a labeled vector.
# features = {
# 'data': tf.FixedLenFeature([], tf.string),
# 'labels': tf.FixedLenFeature([], tf.int64),
# }
#
# # A function to parse record bytes to a labeled vector record
# def parse(record):
# parsed = tf.parse_single_example(record, features)
# return ({
# 'data': tf.decode_raw(parsed['data'], tf.float64)
# }, parsed['labels'])
#
# # Construct a `PipeModeDataset` reading from a 'training' channel, using
# # the TF Record encoding.
# ds = `PipeModeDataset`(channel='training', record_format='TFRecord')
#
# # The `PipeModeDataset` is a TensorFlow Dataset and provides standard Dataset methods
# ds = ds.repeat(20)
# ds = ds.prefetch(10)
# ds = ds.map(parse, num_parallel_calls=10)
# ds = ds.batch(64)
#
# ```
# # Running training using the Python SDK
#
# We can use the SDK to run our local training script on SageMaker infrastructure.
#
# 1. Pass the path to the pipemode.py file, which contains the functions for defining your estimator, to the ``sagemaker.tensorflow.TensorFlow`` init method.
# 2. Pass the S3 location that we uploaded our data to previously to the ``fit()`` method.
# +
from sagemaker.tensorflow import TensorFlow
tensorflow = TensorFlow(entry_point='pipemode.py',
role=role,
framework_version='1.15.2',
input_mode='Pipe',
output_path=model_artifacts_location,
code_location=custom_code_upload_location,
train_instance_count=1,
py_version='py3',
train_instance_type='ml.c4.xlarge')
# -
# After we've created the SageMaker Python SDK TensorFlow object, we can call ``fit()`` to launch TensorFlow training:
# +
# %%time
import boto3
# use the region-specific sample data bucket
region = boto3.Session().region_name
train_data = 's3://sagemaker-sample-data-{}/tensorflow/pipe-mode/train'.format(region)
eval_data = 's3://sagemaker-sample-data-{}/tensorflow/pipe-mode/eval'.format(region)
tensorflow.fit({'train':train_data, 'eval':eval_data})
# -
# After training finishes, the trained model artifacts will be uploaded to S3. This following example notebook shows how to deploy a model trained with script mode: https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-python-sdk/tensorflow_script_mode_training_and_serving
| sagemaker-python-sdk/tensorflow_script_mode_pipe_mode/tensorflow_script_mode_pipe_mode.ipynb |
# ##### Copyright 2021 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # secret_santa2
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/contrib/secret_santa2.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a>
# </td>
# <td>
# <a href="https://github.com/google/or-tools/blob/master/examples/contrib/secret_santa2.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a>
# </td>
# </table>
# First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab.
# !pip install ortools
# +
# Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Secret Santa problem II in Google CP Solver.
From Maple Primes: 'Secret Santa Graph Theory'
http://www.mapleprimes.com/blog/jpmay/secretsantagraphtheory
'''
Every year my extended family does a 'secret santa' gift exchange.
Each person draws another person at random and then gets a gift for
them. At first, none of my siblings were married, and so the draw was
completely random. Then, as people got married, we added the restriction
that spouses should not draw each others names. This restriction meant
that we moved from using slips of paper on a hat to using a simple
computer program to choose names. Then people began to complain when
they would get the same person two years in a row, so the program was
modified to keep some history and avoid giving anyone a name in their
recent history. This year, not everyone was participating, and so after
removing names, and limiting the number of exclusions to four per person,
I had data something like this:
Name: Spouse, Recent Picks
Noah: Ava. Ella, Evan, Ryan, John
Ava: Noah, Evan, Mia, John, Ryan
Ryan: Mia, Ella, Ava, Lily, Evan
Mia: Ryan, Ava, Ella, Lily, Evan
Ella: John, Lily, Evan, Mia, Ava
John: Ella, Noah, Lily, Ryan, Ava
Lily: Evan, John, Mia, Ava, Ella
Evan: Lily, Mia, John, Ryan, Noah
'''
Note: I interpret this as the following three constraints:
1) One cannot be a Secret Santa of one's spouse
2) One cannot be a Secret Santa for somebody two years in a row
3) Optimization: maximize the time since the last time
This model also handle single persons, something the original
problem don't mention.
Compare with the following models:
* Google CP Solver: http://www.hakank.org/google_or_tools/secret_santa.py
* MiniZinc: http://www.hakank.org/minizinc/secret_santa2.mzn
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
# Create the solver.
solver = pywrapcp.Solver('Secret Santa problem II')
#
# data
#
#
# The matrix version of earlier rounds.
# M means that no earlier Santa has been assigned.
# Note: Ryan and Mia has the same recipient for years 3 and 4,
# and Ella and John has for year 4.
# This seems to be caused by modification of
# original data.
#
n_no_single = 8
M = n_no_single + 1
rounds_no_single = [
# N A R M El J L Ev
[0, M, 3, M, 1, 4, M, 2], # Noah
[M, 0, 4, 2, M, 3, M, 1], # Ava
[M, 2, 0, M, 1, M, 3, 4], # Ryan
[M, 1, M, 0, 2, M, 3, 4], # Mia
[M, 4, M, 3, 0, M, 1, 2], # Ella
[1, 4, 3, M, M, 0, 2, M], # John
[M, 3, M, 2, 4, 1, 0, M], # Lily
[4, M, 3, 1, M, 2, M, 0] # Evan
]
#
# Rounds with a single person (fake data)
#
n_with_single = 9
M = n_with_single + 1
rounds_single = [
# N A R M El J L Ev S
[0, M, 3, M, 1, 4, M, 2, 2], # Noah
[M, 0, 4, 2, M, 3, M, 1, 1], # Ava
[M, 2, 0, M, 1, M, 3, 4, 4], # Ryan
[M, 1, M, 0, 2, M, 3, 4, 3], # Mia
[M, 4, M, 3, 0, M, 1, 2, M], # Ella
[1, 4, 3, M, M, 0, 2, M, M], # John
[M, 3, M, 2, 4, 1, 0, M, M], # Lily
[4, M, 3, 1, M, 2, M, 0, M], # Evan
[1, 2, 3, 4, M, 2, M, M, 0] # Single
]
if single == 1:
n = n_with_single
Noah, Ava, Ryan, Mia, Ella, John, Lily, Evan, Single = list(range(n))
rounds = rounds_single
else:
n = n_no_single
Noah, Ava, Ryan, Mia, Ella, John, Lily, Evan = list(range(n))
rounds = rounds_no_single
M = n + 1
persons = [
'Noah', 'Ava', 'Ryan', 'Mia', 'Ella', 'John', 'Lily', 'Evan', 'Single'
]
spouses = [
Ava, # Noah
Noah, # Ava
Mia, # Rya
Ryan, # Mia
John, # Ella
Ella, # John
Evan, # Lily
Lily, # Evan
-1 # Single has no spouse
]
#
# declare variables
#
santas = [solver.IntVar(0, n - 1, 'santas[%i]' % i) for i in range(n)]
santa_distance = [
solver.IntVar(0, M, 'santa_distance[%i]' % i) for i in range(n)
]
# total of 'distance', to maximize
z = solver.IntVar(0, n * n * n, 'z')
#
# constraints
#
solver.Add(solver.AllDifferent(santas))
solver.Add(z == solver.Sum(santa_distance))
# Can't be one own's Secret Santa
# (i.e. ensure that there are no fix-point in the array.)
for i in range(n):
solver.Add(santas[i] != i)
# no Santa for a spouses
for i in range(n):
if spouses[i] > -1:
solver.Add(santas[i] != spouses[i])
# optimize 'distance' to earlier rounds:
for i in range(n):
solver.Add(santa_distance[i] == solver.Element(rounds[i], santas[i]))
# cannot be a Secret Santa for the same person
# two years in a row.
for i in range(n):
for j in range(n):
if rounds[i][j] == 1:
solver.Add(santas[i] != j)
# objective
objective = solver.Maximize(z, 1)
#
# solution and search
#
db = solver.Phase(santas, solver.CHOOSE_MIN_SIZE_LOWEST_MIN,
solver.ASSIGN_CENTER_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print('total distances:', z.Value())
print('santas:', [santas[i].Value() for i in range(n)])
for i in range(n):
print('%s\tis a Santa to %s (distance %i)' % \
(persons[i],
persons[santas[i].Value()],
santa_distance[i].Value()))
# print 'distance:', [santa_distance[i].Value()
# for i in range(n)]
print()
print('num_solutions:', num_solutions)
print('failures:', solver.Failures())
print('branches:', solver.Branches())
print('WallTime:', solver.WallTime(), 'ms')
single = 0
| examples/notebook/contrib/secret_santa2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pathlib import Path
import os
# %autoreload 2
import src.utils as utils
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
os.getcwd()
# ### Choose experiment, print out configurations
# +
experiment = "drosophila-3-rdpg-sbm"
run = 6
exp_path = Path(f"../models/runs/{experiment}/{run}")
run_path = exp_path / "run.json"
config_path = exp_path / "config.json"
config = utils.get_json(config_path)
print(f"Experiment: {experiment}")
print(f"Run: {run}")
print(f"Path: {run_path}")
print()
print("Experiment configuration:")
print()
for key, value in config.items():
if not key == "__doc__":
print(key)
print(value)
print()
dfs = utils.run_to_df(run_path)
sbm_df = dfs[0]
rdpg_df = dfs[1]
tsbm_df = dfs[2]
rdpg_df["RDPG"] = "RDPG"
# -
# ### Plot the noise observed in SBM model fitting
# +
# Plotting setup
sns.set_context("talk", font_scale=1.5)
plt_kws = dict(s=75, linewidth=0, legend='brief')
sbm_cmap = sns.light_palette("purple", as_cmap=True)
rdpg_cmap = sns.xkcd_palette(["grass green"])
# Plot 1
plt.figure(figsize=(22,12))
sns.scatterplot(data=sbm_df,
x="n_params_gmm",
y="mse",
hue="n_block_try",
size="n_components_try",
alpha=0.5,
palette=sbm_cmap,
**plt_kws)
sns.scatterplot(data=rdpg_df,
x="n_params",
y="mse",
hue="RDPG",
palette=rdpg_cmap,
**plt_kws)
plt.xlabel("# Params (GMM params for SBMs)")
plt.ylabel("MSE")
plt.title(f"Drosophila old MB left, directed ({experiment}:{run})");
# Plot 2
plt.figure(figsize=(20,10))
sns.scatterplot(data=sbm_df,
x="n_params_gmm",
y="mse",
hue="n_components_try",
palette=sbm_cmap,
alpha=0.5,
**plt_kws,)
sns.scatterplot(data=rdpg_df,
x="n_params",
y="mse",
hue="RDPG",
palette=rdpg_cmap,
**plt_kws)
plt.xlabel("# Params (GMM params for SBMs)")
plt.ylabel("MSE")
plt.title(f"Drosophila old MB left, directed ({experiment}:{run})");
# # Plot 3
# plt.figure(figsize=(20,10))
# sns.scatterplot(data=sbm_df, x="n_params_sbm", y="mse", hue="n_components_try", **plt_kws, alpha=0.5)
# sns.scatterplot(data=rdpg_df, x="n_params", y="mse", **plt_kws)
# plt.xlabel("# Params (SBM params for SBMs)")
# plt.ylabel("MSE")
# plt.title(f"Drosophila old MB left, directed ({experiment}:{run})");
# -
# ### Get the best MSE SBM model fitting for each parameter set
param_df = sbm_df[sbm_df["sim_ind"]==0]
labels = ["n_block_try", "n_components_try", "mse"]
param_df = param_df.loc[:, labels]
param_df["best_sim"] = 0
param_df["best_ind"] = 0
for i in range(50):
df = sbm_df[sbm_df["sim_ind"]==i]
for j, row in df.iterrows():
params = row[labels]
p_df = param_df.loc[(param_df[labels[0]]==row[labels[0]])
& (param_df[labels[1]]==row[labels[1]])]
ind = p_df.index
if row["mse"] <= param_df.loc[ind, "mse"].values[0] :
param_df.loc[ind, "mse"] = row["mse"]
param_df.loc[ind, "best_sim"] = row["sim_ind"]
param_df.loc[ind, "best_ind"] = j
best_sbm_df = sbm_df.loc[param_df["best_ind"].values, :]
# +
plt.figure(figsize=(22,12))
cmap = sns.light_palette("purple", as_cmap=True)
sns.scatterplot(data=best_sbm_df,
x="n_params_gmm",
y="mse",
hue="n_block_try",
size="n_components_try",
palette=cmap,
**plt_kws,)
cmap = sns.xkcd_palette(["grass green"])
s = sns.scatterplot(data=rdpg_df,
x="n_params",
y="mse",
hue="RDPG",
palette=cmap,
**plt_kws)
leg = s.axes.get_legend()
leg.get_texts()[0].set_text("SBM: K, best of 50")
leg.get_texts()[6].set_text("SBM: d, best of 50")
leg.get_texts()[11].set_text("RDPG: directed")
plt.xlabel("# Params (GMM params for SBMs)")
plt.ylabel("MSE")
plt.title(f"Drosophila old MB left, directed ({experiment}:{run})");
# +
plt.figure(figsize=(22,12))
plt_kws = dict(s=75, linewidth=0, legend=False)
cmap = sns.light_palette("purple", as_cmap=True)
sns.scatterplot(data=best_sbm_df,
x="n_params_gmm",
y="mse",
hue="n_components_try",
size="n_block_try",
palette=cmap,
**plt_kws,)
cmap = sns.light_palette("teal", as_cmap=True)
s = sns.scatterplot(data=tsbm_df,
x="n_params_gmm",
y="mse",
hue="n_components_try",
size="n_block_try",
palette=cmap,
**plt_kws)
cmap = sns.xkcd_palette(["grass green"])
s = sns.scatterplot(data=rdpg_df,
x="n_params",
y="mse",
hue="RDPG",
palette=cmap,
**plt_kws)
# leg = s.axes.get_legend()
# leg.get_texts()[0].set_text("SBM: d, best of 50")
# leg.get_texts()[5].set_text("SBM: K, best of 50")
# leg.get_texts()[11].set_text("RDPG: directed")
plt.xlabel("# Params (GMM params for SBMs)")
plt.ylabel("MSE")
plt.title(f"Drosophila old MB left, directed ({experiment}:{run})");
# +
# param_df = sbm_df[sbm_df["sim_ind"]==0]
# labels = ["n_block_try", "mse"]
# param_df = param_df.loc[:, labels]
# param_df["best_sim"] = 0
# param_df["best_ind"] = 0
# for i in range(2):
# df = sbm_df[sbm_df["sim_ind"]==i]
# for j, row in df.iterrows():
# params = row[labels]
# p_df = param_df.loc[param_df[labels[0]]==row[labels[0]]]
# ind = p_df.index
# if row["mse"] <= param_df.loc[ind, "mse"].values[0]:
# param_df.loc[ind, "mse"] = row["mse"]
# param_df.loc[ind, "best_sim"] = row["sim_ind"]
# param_df.loc[ind, "best_ind"] = j
# best_sbm_df = sbm_df.loc[param_df["best_ind"].values, :]
# +
# plt.figure(figsize=(20,10))
# sns.scatterplot(data=best_sbm_df, x="n_params_gmm", y="mse", hue="n_block_try", **plt_kws, alpha=0.5)
# sns.scatterplot(data=rdpg_df, x="n_params", y="mse", **plt_kws)
# plt.xlabel("# Params (GMM params for SBMs)")
# plt.ylabel("MSE")
# plt.title(f"Drosophila old MB left, directed ({experiment}:{run})");
# plt.figure(figsize=(20,10))
# sns.scatterplot(data=best_sbm_df, x="n_params_gmm", y="mse", hue="n_components_try", **plt_kws, alpha=0.5)
# sns.scatterplot(data=rdpg_df, x="n_params", y="mse", **plt_kws)
# plt.xlabel("# Params (GMM params for SBMs)")
# plt.ylabel("MSE")
# plt.title(f"Drosophila old MB left, directed ({experiment}:{run})");
# +
plt.figure(figsize=(22,12))
cmap = sns.light_palette("purple", as_cmap=True)
sns.scatterplot(data=best_sbm_df,
x="n_params_gmm",
y="score",
hue="n_block_try",
size="n_components_try",
palette=cmap,
**plt_kws,)
cmap = sns.xkcd_palette(["grass green"])
s = sns.scatterplot(data=rdpg_df,
x="n_params",
y="score",
hue="RDPG",
palette=cmap,
**plt_kws)
leg = s.axes.get_legend()
leg.get_texts()[0].set_text("SBM: K, best of 50")
leg.get_texts()[6].set_text("SBM: d, best of 50")
leg.get_texts()[11].set_text("RDPG: directed")
plt.xlabel("# Params (GMM params for SBMs)")
plt.ylabel("MSE")
plt.title(f"Drosophila old MB left, directed ({experiment}:{run})");
#####
plt.figure(figsize=(22,12))
cmap = sns.light_palette("purple", as_cmap=True)
sns.scatterplot(data=best_sbm_df,
x="n_params_gmm",
y="score",
hue="n_components_try",
size="n_block_try",
palette=cmap,
**plt_kws,)
cmap = sns.xkcd_palette(["grass green"])
s = sns.scatterplot(data=rdpg_df,
x="n_params",
y="score",
hue="RDPG",
palette=cmap,
**plt_kws)
leg = s.axes.get_legend()
leg.get_texts()[0].set_text("SBM: d, best of 50")
leg.get_texts()[5].set_text("SBM: K, best of 50")
leg.get_texts()[11].set_text("RDPG: directed")
plt.xlabel("# Params (GMM params for SBMs)")
plt.ylabel("MSE")
plt.title(f"Drosophila old MB left, directed ({experiment}:{run})");
| notebooks/1.1-BDP-sbm-rdpg-tsbm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="1jBDe1295CJY"
# Before to begin if you are planning to train a model:
#
# (1) set the runtime with a TPU or GPU
#
# (2) Make sure a folder tmp_model_dir is not in the working directory from a previous training
# + colab={"base_uri": "https://localhost:8080/"} id="xFA7poG-88jK" outputId="85b8caa7-a12b-4817-d6f6-eec5eff3bc23"
#Print current hardware information (variable in Colab)
# gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
if gpu_info.find('failed') >= 0:
print('Not connected to a GPU')
else:
print(gpu_info)
# + id="qL1nvR-8QJS6"
# %%capture
#@title
import os
import sys
import tensorflow.compat.v1 as tf
# + [markdown] id="IspsBhcCp9h6"
# Read from my Google Drive
# + colab={"base_uri": "https://localhost:8080/"} id="jlkN4_368tvu" outputId="30781f52-8186-4c5b-9520-b457879b8bb6"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="NY4ymVII9JJv"
# Move to "_modified-EfficientDet" folder and install requirements
# + colab={"base_uri": "https://localhost:8080/"} id="yFJbk-nr8vqS" outputId="3eb70dab-c283-4ede-f7aa-90b227dbed32"
os.chdir('drive/MyDrive/_modified-EfficientDet')
sys.path.append('.')
# !pip install -r requirements.txt
# !pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'
# + colab={"base_uri": "https://localhost:8080/"} id="-A0FGxWW_Xj_" outputId="f8103aaf-eecf-4d0f-edb6-8fa508ba77b5"
MODEL = 'efficientdet-d0_1-5' #@param
# Prepare image and visualization settings.
image_url = 'https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png'#@param
image_name = 'img.png' #@param
# !wget {image_url} -O img.png
import os
img_path = os.path.join(os.getcwd(), 'img.png')
min_score_thresh = 0.35 #@param
max_boxes_to_draw = 200 #@param
line_thickness = 2#@param
import PIL
# Get the largest of height/width and round to 128.
image_size = max(PIL.Image.open(img_path).size)
# + [markdown] id="S8iBnul8i9Aq"
# Visualize model graph
# + id="ufiERLoDi6IP"
# !python model_inspect.py --model_name={MODEL} --logdir=logs &> /dev/null
# %load_ext tensorboard
# %tensorboard --logdir logs
# + colab={"base_uri": "https://localhost:8080/"} id="_kNJiErEw8wC" outputId="2757a4bf-6c49-445f-8ca1-9f5849249e02"
# Get Trash-ICRA19 trainval data (TFrecord previously generated)
file_pattern_train = 'TrashICRA19-train-*-of-19.tfrecord'
images_per_epoch = 1200 #Only the first 4 shards are used as they have annotations for row, plastic and bio only (the other shards add other classes). Each shard has 300 samples.
print('images_per_epoch = {}'.format(images_per_epoch))
# + id="K5UyF8xwKDuL"
# Train efficientdet from scratch with backbone checkpoint.
backbone_name = {
'efficientdet-d0': 'efficientnet-b0',
'efficientdet-d0_1-5': 'efficientnet-b0', #BiFPN with depth 1, box/class nets with depth 5
'efficientdet-d0_5-1': 'efficientnet-b0', #BiFPN with depth 5, box/class nets with depth 1
'efficientdet-d1': 'efficientnet-b1',
'efficientdet-d1_1-5': 'efficientnet-b1',
'efficientdet-d2': 'efficientnet-b2',
'efficientdet-d2_1-5': 'efficientnet-b2',
'efficientdet-d3': 'efficientnet-b3',
'efficientdet-d3_1-5': 'efficientnet-b3',
'efficientdet-d4': 'efficientnet-b4',
'efficientdet-d4_1-5': 'efficientnet-b4',
'efficientdet-d5': 'efficientnet-b5',
'efficientdet-d6': 'efficientnet-b6',
'efficientdet-d7': 'efficientnet-b6',
}[MODEL]
# generating train tfrecord is large, so we skip the execution here.
import os
if backbone_name not in os.listdir():
# !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/{backbone_name}.tar.gz
# !tar xf {backbone_name}.tar.gz
# !mkdir tmp_model_dir
# key option: use --backbone_ckpt rather than --ckpt.
# Don't use ema since we only train a few steps.
# !python main.py --mode=train_and_eval \
# --train_file_pattern=tfrecord/{file_pattern_train} \
# --val_file_pattern=tfrecord/{file_pattern_train} \
# --model_name={MODEL} \
# --model_dir=tmp_model_dir/{MODEL}-scratch-Trash \
# --backbone_ckpt={backbone_name} \
# --train_batch_size=4 \
# --eval_batch_size=4 --eval_samples={images_per_epoch} \
# --num_examples_per_epoch={images_per_epoch} --num_epochs=35 \
# --hparams="num_classes=3,moving_average_decay=0,mixed_precision=true"
# + [markdown] id="n0lUxpFYdUB-"
# Visualize the performance and training records
# + id="_jW5AjDQQiJv"
# %load_ext tensorboard
# %tensorboard --logdir tmp_model_dir
# + [markdown] id="Ka6_R4zqZD_k"
# Evaluate the network latency, i.e. from first convolution to box/class predictions
# + id="p7qA8Rx_aCcd"
print(MODEL)
# !python model_inspect.py --runmode=bm --model_name={MODEL} --hparams="mixed_precision=true"
# + [markdown] id="NLmopF0LbSQx"
# REFERENCES
#
# [1] <NAME> et al., Towards More Efficient EfficientDets and Low-Light Real-Time Marine Debris Detection, https://arxiv.org/pdf/2203.07155.pdf
#
# [2] https://github.com/google/automl/blob/master/efficientdet/tutorial.ipynb
#
# [3] https://openaccess.thecvf.com/content_CVPR_2020/papers/Tan_EfficientDet_Scalable_and_Efficient_Object_Detection_CVPR_2020_paper.pdf
#
#
| _modified-EfficientDet/Tutorial-training_TrashICRA19-federico_zocco.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: geocomp-ml-gpu
# kernelspec:
# display_name: geoml
# language: python
# name: geoml
# ---
# # Semantic segmentation with U-Net
#
# - U-Net is an architecture developed to generate mapping between images and correspoding masks for each image.
# - It was developed for 'few shot' learning: making a reliable model with few training examples.
# - Original U-net paper (https://arxiv.org/abs/1505.04597)
# # About the data
#
# The seismic data used in this exercise is from the Kerry 3D data set offshore New Zealand: https://dataunderground.org/dataset/kerry. You do not need to download the entire dataset for this exercise. The training images have already been created for you.
#
# The training data consists of a series of subsections (tiles) along inlines across the volume. The modeled fault regions where picked using Microsoft's Visual Object Tagging Tool, https://github.com/microsoft/VoTT.
# First we'll import the usual supporting libraries.
# + inputHidden=false outputHidden=false
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# + inputHidden=false outputHidden=false
import tensorflow as tf
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import (Input, BatchNormalization, Activation, Dense, Dropout,
Lambda, RepeatVector, Reshape, Conv2D, Conv2DTranspose,
MaxPooling2D, GlobalMaxPool2D, UpSampling2D, concatenate, add)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import (array_to_img, img_to_array,
load_img)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# +
def cross_entropy_balanced(y_true, y_pred):
# Implementation from https://github.com/xinwucwp/faultSeg
# Note: tf.nn.sigmoid_cross_entropy_with_logits expects y_pred is logits,
# Keras expects probabilities.
# transform y_pred back to logits
_epsilon = _to_tensor(tf.keras.backend.epsilon(), y_pred.dtype.base_dtype)
y_pred = tf.clip_by_value(y_pred, _epsilon, 1 - _epsilon)
y_pred = tf.math.log(y_pred/ (1 - y_pred))
y_true = tf.cast(y_true, tf.float32)
count_neg = tf.reduce_sum(1. - y_true)
count_pos = tf.reduce_sum(y_true)
beta = count_neg / (count_neg + count_pos)
pos_weight = beta / (1 - beta)
cost = tf.nn.weighted_cross_entropy_with_logits(logits=y_pred, labels=y_true, pos_weight=pos_weight)
cost = tf.reduce_mean(cost * (1 - beta))
return tf.where(tf.equal(count_pos, 0.0), 0.0, cost)
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
# Arguments
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
# Returns
A tensor.
"""
x = tf.convert_to_tensor(x)
if x.dtype != dtype:
x = tf.cast(x, dtype)
return x
def little_unet(input_size=(256,256,1)):
inputs = Input(input_size)
conv1 = Conv2D(16, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(16, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2,2))(conv1)
conv2 = Conv2D(32, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(32, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2,2))(conv2)
conv3 = Conv2D(64, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(64, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2,2))(conv3)
conv4 = Conv2D(512, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(conv4)
up5 = concatenate([UpSampling2D(size=(2,2))(conv4), conv3], axis=3)
conv5 = Conv2D(64, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(up5)
conv5 = Conv2D(64, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(conv5)
up6 = concatenate([UpSampling2D(size=(2,2))(conv5), conv2], axis=3)
conv6 = Conv2D(32, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(up6)
conv6 = Conv2D(32, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(conv6)
up7 = concatenate([UpSampling2D(size=(2,2))(conv6), conv1], axis=3)
conv7 = Conv2D(16, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(up7)
conv7 = Conv2D(16, (3,3), activation='relu', padding='same', kernel_initializer = 'he_normal')(conv7)
conv8 = Conv2D(1, (1,1), activation='sigmoid')(conv7)
model = Model(inputs, conv8)
model.compile(optimizer = Adam(lr = 1e-4), loss = cross_entropy_balanced, metrics = ['accuracy'])
return model
# -
# To generate more samples from the training dataset we have, we can use an ImageDataGenerator object. This object can be parametrized to produce more samples for training automatically.
# + inputHidden=false outputHidden=false
aug_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
# +
def scaleData(img, mask):
"""Helper function to normalize images and masks.
It also binarizes the mask.
"""
img = img / 255
mask = mask > 127
return img, mask
def trainGenerator(train_path, image_folder, mask_folder, aug_dict, seed = 1):
"""Produces samples of training images from the train pool of images.
I can generate infinite samples based on the augmentation parameters.
"""
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
train_path,
classes = [image_folder],
class_mode = None,
color_mode = "grayscale",
target_size = (256,256),
batch_size = 2,
seed = seed)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes = [mask_folder],
class_mode = None,
color_mode = "grayscale",
target_size = (256,256),
batch_size = 2,
seed = seed)
train_generator = zip(image_generator, mask_generator)
for img, mask in train_generator:
img, mask = scaleData(img, mask)
yield img, mask
# -
# We initialize and store the data generation object that will feed the network.
data_gen = trainGenerator('../data/faultpick/', 'seismic', 'fault', aug_args)
# Initialize the network.
# + inputHidden=false outputHidden=false
model = little_unet()
model_checkpoint = ModelCheckpoint('seis_unet.hdf5', monitor='loss',
verbose=1, save_best_only=True,
save_weights_only=True)
# -
# Train the network.
# + inputHidden=false outputHidden=false
model.fit(data_gen, steps_per_epoch=50, epochs=5, callbacks=[model_checkpoint])
# + [markdown] tags=["exe"]
# ### EXERCISE
#
# Write a workflow to make predictions from the images in `'../data/faultpick/test/'` using the model trained.
# + inputHidden=false outputHidden=false tags=["hide"]
img = Image.open('../data/faultpick/test/section_166_1_inline.png')
img = img.resize((256,256))
img
# +
img_arr = np.array(img, dtype=float)[:,:,0]
img_arr /= 255.0
plt.imshow(img_arr)
# + inputHidden=false outputHidden=false tags=["hide"]
prediction = model.predict(img_arr.reshape(1, 256, 256, 1))
# + inputHidden=false outputHidden=false tags=["hide"]
prediction = prediction.reshape((256,256))
# + tags=["hide"]
plt.imshow(prediction)
# -
| master/Semantic_segmentation_with_U-Net.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Tce3stUlHN0L"
# ##### Copyright 2020 The TensorFlow IO Authors.
# + cellView="form" id="tuOe1ymfHZPu"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="qFdPvlXBOdUN"
# # Audio Data Preparation and Augmentation
# + [markdown] id="MfBg1C5NB3X0"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/io/tutorials/audio"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/io/blob/master/docs/tutorials/audio.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/io/blob/master/docs/tutorials/audio.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/io/docs/tutorials/audio.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="xHxb-dlhMIzW"
# ## Overview
#
# One of the biggest challanges in Automatic Speech Recognition is the preparation and augmentation of audio data. Audio data analysis could be in time or frequency domain, which adds additional complex compared with other data sources such as images.
#
# As a part of the TensorFlow ecosystem, `tensorflow-io` package provides quite a few useful audio-related APIs that helps easing the preparation and augmentation of audio data.
# + [markdown] id="MUXex9ctTuDB"
# ## Setup
# + [markdown] id="upgCc3gXybsA"
# ### Install required Packages, and restart runtime
# + id="uUDYyMZRfkX4"
# !pip install tensorflow-io
# + [markdown] id="J0ZKhA6s0Pjp"
# ## Usage
# + [markdown] id="yZmI7l_GykcW"
# ### Read an Audio File
#
# In TensorFlow IO, class `tfio.audio.AudioIOTensor` allows you to read an audio file into a lazy-loaded `IOTensor`:
# + id="nS3eTBvjt-O5"
import tensorflow as tf
import tensorflow_io as tfio
audio = tfio.audio.AudioIOTensor('gs://cloud-samples-tests/speech/brooklyn.flac')
print(audio)
# + [markdown] id="z9GCyPWNuOm7"
# In the above example, the Flac file `brooklyn.flac` is from a publicly accessible audio clip in [google cloud](https://cloud.google.com/speech-to-text/docs/quickstart-gcloud).
#
# The GCS address `gs://cloud-samples-tests/speech/brooklyn.flac` are used directly because GCS is a supported file system in TensorFlow. In addition to `Flac` format, `WAV`, `Ogg`, `MP3`, and `MP4A` are also supported by `AudioIOTensor` with automatic file format detection.
#
# `AudioIOTensor` is lazy-loaded so only shape, dtype, and sample rate are shown initially. The shape of the `AudioIOTensor` is represented as `[samples, channels]`, which means the audio clip you loaded is mono channel with `28979` samples in `int16`.
# + [markdown] id="IF_kYz_o2DH4"
# The content of the audio clip will only be read as needed, either by converting `AudioIOTensor` to `Tensor` through `to_tensor()`, or though slicing. Slicing is especially useful when only a small portion of a large audio clip is needed:
# + id="wtM_ixN724xb"
audio_slice = audio[100:]
# remove last dimension
audio_tensor = tf.squeeze(audio_slice, axis=[-1])
print(audio_tensor)
# + [markdown] id="IGnbXuVnSo8T"
# The audio can be played through:
# + id="0rLbVxuFSvVO"
from IPython.display import Audio
Audio(audio_tensor.numpy(), rate=audio.rate.numpy())
# + [markdown] id="fmt4cn304IbG"
# It is more convinient to convert tensor into float numbers and show the audio clip in graph:
# + id="ZpwajOeR4UMU"
import matplotlib.pyplot as plt
tensor = tf.cast(audio_tensor, tf.float32) / 32768.0
plt.figure()
plt.plot(tensor.numpy())
# + [markdown] id="86qE8BPl5rcA"
# ### Trim the noise
#
# Sometimes it makes sense to trim the noise from the audio, which could be done through API `tfio.experimental.audio.trim`. Returned from the API is a pair of `[start, stop]` position of the segement:
# + id="eEa0Z5U26Ep3"
position = tfio.experimental.audio.trim(tensor, axis=0, epsilon=0.1)
print(position)
start = position[0]
stop = position[1]
print(start, stop)
processed = tensor[start:stop]
plt.figure()
plt.plot(processed.numpy())
# + [markdown] id="ineBzDeu-lTh"
# ### Fade In and Fade Out
#
# One useful audio engineering technique is fade, which gradually increases or decreases audio signals. This can be done through `tfio.experimental.audio.fade`. `tfio.experimental.audio.fade` supports different shapes of fades such as `linear`, `logarithmic`, or `exponential`:
# + id="LfZo0XaaAaeM"
fade = tfio.experimental.audio.fade(
processed, fade_in=1000, fade_out=2000, mode="logarithmic")
plt.figure()
plt.plot(fade.numpy())
# + [markdown] id="7rhLvOSZB0k0"
# ### Spectrogram
#
# Advanced audio processing often works on frequency changes over time. In `tensorflow-io` a waveform can be converted to spectrogram through `tfio.experimental.audio.spectrogram`:
# + id="UyFMBK-LDDnN"
# Convert to spectrogram
spectrogram = tfio.experimental.audio.spectrogram(
fade, nfft=512, window=512, stride=256)
plt.figure()
plt.imshow(tf.math.log(spectrogram).numpy())
# + [markdown] id="pZ92HnbJGHBS"
# Additional transformation to different scales are also possible:
# + id="ZgyedQdxGM2y"
# Convert to mel-spectrogram
mel_spectrogram = tfio.experimental.audio.melscale(
spectrogram, rate=16000, mels=128, fmin=0, fmax=8000)
plt.figure()
plt.imshow(tf.math.log(mel_spectrogram).numpy())
# Convert to db scale mel-spectrogram
dbscale_mel_spectrogram = tfio.experimental.audio.dbscale(
mel_spectrogram, top_db=80)
plt.figure()
plt.imshow(dbscale_mel_spectrogram.numpy())
# + [markdown] id="nXd776xNIr_I"
# ### SpecAugment
#
# In addition to the above mentioned data preparation and augmentation APIs, `tensorflow-io` package also provides advanced spectrogram augmentations, most notably Frequency and Time Masking discussed in [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition (Park et al., 2019)](https://arxiv.org/pdf/1904.08779.pdf).
# + [markdown] id="dajm7k-2J5l7"
# #### Frequency Masking
#
# In frequency masking, frequency channels `[f0, f0 + f)` are masked where `f` is chosen from a uniform distribution from `0` to the frequency mask parameter `F`, and `f0` is chosen from `(0, ν − f)` where `ν` is the number of frequency channels.
# + id="kLEdfkkoK27A"
# Freq masking
freq_mask = tfio.experimental.audio.freq_mask(dbscale_mel_spectrogram, param=10)
plt.figure()
plt.imshow(freq_mask.numpy())
# + [markdown] id="_luycpCWLe5l"
# #### Time Masking
#
# In time masking, `t` consecutive time steps `[t0, t0 + t)` are masked where `t` is chosen from a uniform distribution from `0` to the time mask parameter `T`, and `t0` is chosen from `[0, τ − t)` where `τ` is the time steps.
# + id="G1ie8J3wMMEI"
# Time masking
time_mask = tfio.experimental.audio.time_mask(dbscale_mel_spectrogram, param=10)
plt.figure()
plt.imshow(time_mask.numpy())
| docs/tutorials/audio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Libraries
# +
import pandas as pd
import numpy as np
from sklearn.datasets import load_boston
from sklearn_pandas.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import (RandomForestClassifier
,GradientBoostingClassifier)
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.ensemble import partial_dependence
# Plotting
import matplotlib.pyplot as plt
import seaborn as sns
# PDPs
from pdpbox import pdp
# ICE plots
from pycebox.ice import ice, ice_plot
# -
# # Boston Housing
#
# Get data.
boston = load_boston()
print(boston.DESCR)
# Set up features and target.
X = pd.DataFrame(boston.data, columns=boston.feature_names)
X.head()
X.info()
y = pd.DataFrame(boston.target, columns=['MEDV'])
#
# # Pipeline
# Not going to do transformations off the bat, but set up just in case.
numeric_features = list(X.select_dtypes(exclude='object'))
numeric_transformer = Pipeline(steps=[('keeper', None)])
preprocessor = ColumnTransformer(transformers=[('num', numeric_transformer
, numeric_features)])
regressors = [RandomForestRegressor(n_estimators=100, max_depth=5)
,GradientBoostingRegressor(n_estimators=100)
]
X_train, X_test, y_train, y_test = train_test_split(X, y ,random_state=10)
rf = RandomForestRegressor()
rf.fit(X_train, y_train)
# +
cv_list = []
for regressor in regressors:
clf1 = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', regressor)])
clf1.fit(X_train, y_train)
print(clf1.score(X_test, y_test))
#cv_scores = cross_val_score(clf1, X_train, y_train, cv=5)
#cv_list.append(cv_scores)
#one_hot_names = list(clf1.named_steps['preprocessor'].transformers_[1][1].named_steps['onehot'].get_feature_names())
#final_feats = numeric_features + one_hot_names
print(regressor)
print('\n')
#print('Training Metrics')
#pitch_functions.calc_acc_and_f1_score(y_train, clf1.predict(X_train))
#print('\n')
#print('Testing Metrics')
#pitch_functions.calc_acc_and_f1_score(y_test, clf1.predict(X_test))
#print('\n')
#print('Average Cross Val Score, k=5')
#print('{:.3}'.format(np.mean(cv_scores)))
# -
# Looks, like XGBoost performed a bit better in training. Let's look at a partial dependence plot.
# ### Partial Dependency Plot
# +
nox = pdp.pdp_isolate(clf1, X, X.columns, 'NOX', 100)
pdp_nox = pdp.pdp_plot(nox,'NOX', center=True)
# Save the plot to PNG
pdp_nox[0].savefig("pdp_boston_nox.png")
# -
# Appears that home prices are somewhat immune to NOX levels, until around .65 parts per 10 million, at which the average prediction drops about -2.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Individual Conditional Expection (ICE) plots
# -
X_train.columns
xgb = GradientBoostingRegressor(n_estimators=100)
xgb.fit(X_train, y_train)
# ICE Plot: NOX
# +
# Create dataframe input for ICE plot
ff = ice(data=X_train, column='NOX', predict=xgb.predict)
fig, ax = plt.subplots(figsize=(8,10))
ice_plot(ff, c='dimgray', linewidth=0.2, plot_pdp=True
,pdp_kwargs={'c': 'blue', 'linewidth': 2}, ax=ax, alpha=0.7 )
ax.set_title('ICE Plot: NOX vs MEDV (XGBoost)')
ax.set_ylabel("MEDV")
ax.set_xlabel("NOX")
fig.tight_layout()
# Save that figure
fig.savefig('ice_boston_nox.png')
# -
# ICE Plot: DIS
# +
# Create dataframe input for ICE plot
ff = ice(data=X_train, column='DIS', predict=xgb.predict)
fig, ax = plt.subplots(figsize=(8,10))
ice_plot(ff, c='dimgray', linewidth=0.2, plot_pdp=True
,pdp_kwargs={'c': 'blue', 'linewidth': 2}, ax=ax, alpha=0.7 )
ax.set_title('ICE Plot: DIS vs MEDV (XGBoost)')
ax.set_ylabel("MEDV")
ax.set_xlabel("DIS")
fig.tight_layout()
# Save that figure
fig.savefig('ice_boston_dis.png')
# -
# ---
# # Cars dataset
#
# Got this version from Kaggle:
# https://www.kaggle.com/uciml/autompg-dataset/downloads/autompg-dataset.zip/3
autos = pd.read_csv('data/auto-mpg.csv')
autos.head()
# What is `origin`?
autos['origin'].describe()
autos.info()
# Noticed that `horsepower` is object. Will fix that?
autos['horsepower'].value_counts().head()
autos['horsepower'] = autos['horsepower'].astype(float, errors='ignore', copy=True)
autos['horsepower'] = pd.to_numeric(autos['horsepower'], errors='coerce')
autos['horsepower'].value_counts(sort=True)
autos.info()
horse_median = np.median(autos.horsepower[autos.horsepower.isna()==False])
autos['horsepower'].fillna(value=horse_median, inplace=True)
autos.info()
autos['origin'].value_counts()
# There's only 3. Anecdotal evidence from the internet says they are:
#
# origin | origin country
# ---|---
# 1 | US
# 2 | Europe
# 3 | Japan
#
# Let's inspect/test that visually.
autos.loc[autos['origin']==1,['car name']].head(10)
autos.loc[autos['origin']==2,['car name']].head(10)
autos.loc[autos['origin']==3,['car name']].head(10)
# Seems legit. Let's map in country names to make the data more readable.
origin_map = {1: 'US', 2: 'Europe', 3: 'Japan' }
autos['origin_nm'] = autos['origin'].map(origin_map)
autos.tail(10)
# Ok, let's do a boxplot by Origin Name.
fig, ax = plt.subplots(figsize=(10,8))
sns.boxplot(data=autos, x='origin_nm', y='mpg', ax=ax)
fig.suptitle('Miles Per Gallon Distribution by Origin')
ax.set_xlabel('Origin')
ax.set_ylabel('MPG')
fig.savefig('mpg_boxplot.png')
plt.show()
# Let's fit a Random Forest.
predictors = autos.drop(['car name','origin','mpg'],axis=1).copy()
predictors.head()
target = autos[['mpg']].copy()
target.head()
type(target)
cats = predictors.select_dtypes(include='object')
cats.head()
ohe = OneHotEncoder(sparse=False)
ohe.fit(cats)
cats_ohe = ohe.transform(cats)
nms = ohe.get_feature_names([''])
origin_ohe = pd.DataFrame(cats_ohe, columns=nms)
origin_ohe.head()
predictors.head()
predictors = predictors.drop(['origin_nm'], axis=1).copy()
predictors.head()
predictors = pd.concat([predictors,origin_ohe], axis=1)
predictors.head()
rf = RandomForestRegressor()
X_train, X_test, y_train, y_test = train_test_split(predictors, target, random_state=10)
rf.fit(X_train, y_train)
rf.score(X_train, y_train)
# +
# Create dataframe input for ICE plot
auto_ice = ice(data=X_train, column='acceleration', predict=rf.predict)
fig, ax = plt.subplots(figsize=(8,10))
ice_plot(auto_ice, c='dimgray', linewidth=0.2, plot_pdp=True
,pdp_kwargs={'c': 'blue', 'linewidth': 2}, ax=ax, alpha=0.7 )
ax.set_title('ICE Plot: Acceleration vs MPG (Random Forest Regressor)')
ax.set_ylabel("MPG")
ax.set_xlabel("Acceleration")
fig.tight_layout()
# Save that figure
fig.savefig('ice_auto_accel.png')
# -
# How about if we look at by Origin?
X_train.columns
# #### Make flags for countries so can do pure binary comparisons
predictors.columns
is_japan = predictors.index[predictors['_Japan']==1].tolist()
is_us = predictors.index[predictors['_US']==1].tolist()
is_eur = predictors.index[predictors['_Europe']==1].tolist()
len(is_japan) + len(is_us) + len(is_eur)
predictors.loc[set(predictors.index)-set(is_japan)].head()
# US vs Europe
X_train, X_test, y_train, y_test = train_test_split(predictors.loc[set(predictors.index)-set(is_japan)]
, target.loc[set(predictors.index)-set(is_japan)], random_state=10)
rf.fit(X_train, y_train)
rf.score(X_train, y_train)
# +
# Create dataframe input for ICE plot
auto_ice = ice(data=X_train, column='_US', predict=rf.predict)
fig, ax = plt.subplots(figsize=(8,10))
ice_plot(auto_ice, c='dimgray', linewidth=0.2, plot_pdp=True
,pdp_kwargs={'c': 'blue', 'linewidth': 2}, ax=ax, alpha=0.7, plot_points=True )
ax.set_title('ICE Plot: Origin=US vs MPG (Random Forest Regressor)')
ax.set_ylabel("MPG")
ax.set_xlabel("Origin = US \n vs Europe" )
fig.tight_layout()
# Save that figure
fig.savefig('ice_auto_US.png')
# -
# Japan vs Europe
X_train, X_test, y_train, y_test = train_test_split(predictors.loc[set(predictors.index)-set(is_us)]
, target.loc[set(predictors.index)-set(is_us)], random_state=10)
rf.fit(X_train, y_train)
rf.score(X_train, y_train)
# +
# Create dataframe input for ICE plot
auto_ice = ice(data=X_train, column='_Japan', predict=rf.predict)
fig, ax = plt.subplots(figsize=(8,10))
ice_plot(auto_ice, c='dimgray', linewidth=0.2, plot_pdp=True
,pdp_kwargs={'c': 'blue', 'linewidth': 2}, ax=ax, alpha=0.7, plot_points=True )
ax.set_title('ICE Plot: Origin=Japan vs MPG (Random Forest Regressor)')
ax.set_ylabel("MPG")
ax.set_xlabel("Origin = Japan \n vs Europe")
fig.tight_layout()
# Save that figure
fig.savefig('ice_auto_japan.png')
# -
| blog_4_examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (dl)
# language: python
# name: dl
# ---
# # Train on Full Dataset
# Now that we've created a dataset, let's try training on it.
from fastai.core import *
from fastai.vision import *
from fastai.vision.models import resnet18, resnet101
# +
import torch
from torch import nn
from torch.nn import functional as F
"""
This file contains helper functions for building the model and for loading model parameters.
These helper functions are built to mirror those in the official TensorFlow implementation.
"""
import re
import math
import collections
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import model_zoo
########################################################################
############### HELPERS FUNCTIONS FOR MODEL ARCHITECTURE ###############
########################################################################
# Parameters for the entire model (stem, all blocks, and head)
GlobalParams = collections.namedtuple('GlobalParams', [
'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate',
'num_classes', 'width_coefficient', 'depth_coefficient',
'depth_divisor', 'min_depth', 'drop_connect_rate', 'image_size'])
# Parameters for an individual model block
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'stride', 'se_ratio'])
# Change namedtuple defaults
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
def round_filters(filters, global_params):
""" Calculate and round number of filters based on depth multiplier. """
multiplier = global_params.width_coefficient
if not multiplier:
return filters
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
""" Round number of filters based on depth multiplier. """
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def drop_connect(inputs, p, training):
""" Drop connect. """
if not training: return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def get_same_padding_conv2d(image_size=None):
""" Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models. """
if image_size is None:
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
class Conv2dDynamicSamePadding(nn.Conv2d):
""" 2D Convolutions like TensorFlow, for a dynamic image size """
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class Conv2dStaticSamePadding(nn.Conv2d):
""" 2D Convolutions like TensorFlow, for a fixed image size"""
def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = image_size if type(image_size) == list else [image_size, image_size]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class Identity(nn.Module):
def __init__(self, ):
super(Identity, self).__init__()
def forward(self, input):
return input
########################################################################
############## HELPERS FUNCTIONS FOR LOADING MODEL PARAMS ##############
########################################################################
def efficientnet_params(model_name):
""" Map EfficientNet model name to parameter coefficients. """
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
class BlockDecoder(object):
""" Block Decoder for readability, straight from the official TensorFlow repository """
@staticmethod
def _decode_block_string(block_string):
""" Gets a block through a string notation of arguments. """
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert (('s' in options and len(options['s']) == 1) or
(len(options['s']) == 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
kernel_size=int(options['k']),
num_repeat=int(options['r']),
input_filters=int(options['i']),
output_filters=int(options['o']),
expand_ratio=int(options['e']),
id_skip=('noskip' not in block_string),
se_ratio=float(options['se']) if 'se' in options else None,
stride=[int(options['s'][0])])
@staticmethod
def _encode_block_string(block):
"""Encodes a block to a string."""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
"""
Decodes a list of string notations to specify blocks inside the network.
:param string_list: a list of strings, each string is a notation of block
:return: a list of BlockArgs namedtuples of block args
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""
Encodes a list of BlockArgs to a list of strings.
:param blocks_args: a list of BlockArgs namedtuples of block args
:return: a list of strings, each string is a notation of block
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.2,
drop_connect_rate=0.2, image_size=None, num_classes=1000):
""" Creates a efficientnet model. """
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
dropout_rate=dropout_rate,
drop_connect_rate=drop_connect_rate,
# data_format='channels_last', # removed, this is always true in PyTorch
num_classes=num_classes,
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
depth_divisor=8,
min_depth=None,
image_size=image_size,
)
return blocks_args, global_params
def get_model_params(model_name, override_params):
""" Get the block args and global params for a given model """
if model_name.startswith('efficientnet'):
w, d, s, p = efficientnet_params(model_name)
# note: all models have drop connect rate = 0.2
blocks_args, global_params = efficientnet(
width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)
else:
raise NotImplementedError('model name is not pre-defined: %s' % model_name)
if override_params:
# ValueError will be raised here if override_params has fields not included in global_params.
global_params = global_params._replace(**override_params)
return blocks_args, global_params
url_map = {
'efficientnet-b0': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b0-355c32eb.pth',
'efficientnet-b1': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b1-f1951068.pth',
'efficientnet-b2': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b2-8bb594d6.pth',
'efficientnet-b3': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b3-5fb5a3c3.pth',
'efficientnet-b4': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b4-6ed6700e.pth',
'efficientnet-b5': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b5-b6417697.pth',
'efficientnet-b6': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b6-c76e70fd.pth',
'efficientnet-b7': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b7-dcc49843.pth',
}
url_map_advprop = {
'efficientnet-b0': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b0-b64d5a18.pth',
'efficientnet-b1': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b1-0f3ce85a.pth',
'efficientnet-b2': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b2-6e9d97e5.pth',
'efficientnet-b3': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b3-cdd7c0f4.pth',
'efficientnet-b4': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b4-44fb3a87.pth',
'efficientnet-b5': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b5-86493f6b.pth',
'efficientnet-b6': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b6-ac80338e.pth',
'efficientnet-b7': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b7-4652b6dd.pth',
'efficientnet-b8': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b8-22a8fe65.pth',
}
def load_pretrained_weights(model, model_name, load_fc=True, advprop=False):
""" Loads pretrained weights, and downloads if loading for the first time. """
# AutoAugment or Advprop (different preprocessing)
url_map_ = url_map_advprop if advprop else url_map
state_dict = model_zoo.load_url(url_map_[model_name])
if load_fc:
model.load_state_dict(state_dict)
else:
state_dict.pop('_fc.weight')
state_dict.pop('_fc.bias')
res = model.load_state_dict(state_dict, strict=False)
assert set(res.missing_keys) == set(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights'
print('Loaded pretrained weights for {}'.format(model_name))
class MBConvBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
if self.has_se:
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Output phase
final_oup = self._block_args.output_filters
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._swish(self._bn0(self._expand_conv(inputs)))
x = self._swish(self._bn1(self._depthwise_conv(x)))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(self._swish(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
x = self._bn2(self._project_conv(x))
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export)"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
class EfficientNet(nn.Module):
"""
An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods
Args:
blocks_args (list): A list of BlockArgs to construct blocks
global_params (namedtuple): A set of GlobalParams shared between blocks
Example:
model = EfficientNet.from_pretrained('efficientnet-b0')
"""
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args) > 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Batch norm parameters
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
# Stem
in_channels = 3 # rgb
out_channels = round_filters(32, self._global_params) # number of output channels
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(MBConvBlock(block_args, self._global_params))
if block_args.num_repeat > 1:
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(MBConvBlock(block_args, self._global_params))
# Head
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Final linear layer
self._avg_pooling = nn.AdaptiveAvgPool2d(1)
self._dropout = nn.Dropout(self._global_params.dropout_rate)
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
self._swish = MemoryEfficientSwish()
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export)"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
for block in self._blocks:
block.set_swish(memory_efficient)
def extract_features(self, inputs):
""" Returns output of the final convolution layer """
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
# Head
x = self._swish(self._bn1(self._conv_head(x)))
return x
def forward(self, inputs):
""" Calls extract_features to extract features, applies final linear layer, and returns logits. """
bs = inputs.size(0)
# Convolution layers
x = self.extract_features(inputs)
# Pooling and final linear layer
x = self._avg_pooling(x)
x = x.view(bs, -1)
x = self._dropout(x)
x = self._fc(x)
return x
@classmethod
def from_name(cls, model_name, override_params=None):
cls._check_model_name_is_valid(model_name)
blocks_args, global_params = get_model_params(model_name, override_params)
return cls(blocks_args, global_params)
@classmethod
def from_pretrained(cls, model_name, advprop=False, num_classes=1000, in_channels=3):
model = cls.from_name(model_name, override_params={'num_classes': num_classes})
load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000), advprop=advprop)
if in_channels != 3:
Conv2d = get_same_padding_conv2d(image_size = model._global_params.image_size)
out_channels = round_filters(32, model._global_params)
model._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
return model
@classmethod
def get_image_size(cls, model_name):
cls._check_model_name_is_valid(model_name)
_, _, res, _ = efficientnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name):
""" Validates model name. """
valid_models = ['efficientnet-b'+str(i) for i in range(9)]
if model_name not in valid_models:
raise ValueError('model_name should be one of: ' + ', '.join(valid_models))
# -
path = Path('../data/cropped_faces')
src = ImageList.from_folder(path).split_by_folder(train='train', valid='valid')
def get_data(bs,size):
data = (src.label_from_re('([A-Z]+).jpg$')
.transform(get_transforms(max_warp=0, max_zoom=1), size=size, resize_method=ResizeMethod.PAD, padding_mode="zeros")
.databunch(bs=bs).normalize(imagenet_stats))
return data
bs, sz = 32, 256
data = get_data(bs, sz)
data.show_batch(rows=4, figsize=(10,7))
model = EfficientNet.from_pretrained('efficientnet-b1', num_classes=data.c)
learner = Learner(data, model, metrics=[accuracy])
learner.fit_one_cycle(10, 1e-3)
learner.recorder.plot_losses()
torch.save(learner.model.state_dict, 'ten_epochs.pth')
| face_detection/14_TrainOnFullDataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Variable Aggregation and Feature Engineering
# +
import os
import glob
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook as tqdm
from datetime import datetime
from constants import DATA_DIR, DF_FEATURES
# -
pd.set_option("display.max_columns",30)
pd.set_option("display.max_rows",100)
# %load_ext Cython
# ### Dataframe for the Baseline Model
# Mean of past N-1 games for each statistic when predicting N-th game performance
def generate_baseline(df):
baseline = {'Date':[], 'Name':[], 'FPTS':[], 'PTS':[], '3P':[], 'AST':[],
'TRB':[], 'STL':[], 'BLK':[], 'TOV':[], 'DD':[], 'TD':[]}
for i in tqdm(range(df.shape[0])):
date = df.loc[i,'Date']
name = df.loc[i,'Name']
df_name = df.loc[df['Name']==name].reset_index(drop=True)
index = df_name.loc[df_name['Date']==date].index[0]
# Check if there is past statistics available
# Same baseline as DraftKings official data where season average is taken
if index >= 1:
df_past = df_name[0:index].reset_index(drop=True)
for key in baseline.keys():
if key in ['Date', 'Name', 'FPTS']:
baseline[key].append(df_name.loc[index, key])
else:
baseline[key].append(df_past[key].mean())
return pd.DataFrame(baseline)
# ### Additional Statistics and Recency Effect
def draw_weights():
weights_dic = {}
weighting = ['sqrt_0.5','linear_1.0', 'quad_2.0']
for key in weighting:
weights = np.array([np.power(i, float(key[-3:])) for i in range(1,11)])
weights = weights/weights.sum()
weights_dic[key[:-4]] = weights
sns.set_style("darkgrid")
for key in weights_dic.keys():
plt.plot(weights_dic[key])
plt.xlabel('n-th game', fontsize=12)
plt.ylabel('Weight', fontsize=12)
plt.xticks([i for i in range(0,10)], [i for i in range(1,11)])
plt.show()
draw_weights()
# + language="cython"
# def calculate_weighted_mean(list weights, values):
# n = len(weights)
# weighted_sum = [weights[i] * values[i] for i in range(n)]
# weighted_mean = sum(weighted_sum) / sum(weights)
#
# return weighted_mean
# -
def generate_features(df, weighting):
data = {'Date':[], 'Name':[], 'FPTS':[],
# New feature
'Rest':[], 'Value':[], 'FPTS_std':[],
# Basic 9 Variables
'PTS':[], '3P':[], 'AST':[], 'TRB':[], 'STL':[], 'BLK':[], 'TOV':[], 'DD':[], 'TD':[],
# Additional Variables from Basketball-Reference.com
'MP':[], 'FT':[], 'FTA':[], 'FGA':[], '3PA':[], 'DRB':[], 'ORB':[],
# Advanced Statistics from Basketball-Reference.com
'USG_perc':[], 'DRtg':[], 'ORtg':[], 'AST_perc':[], 'DRB_perc':[], 'ORB_perc':[],
'BLK_perc':[], 'TOV_perc':[], 'STL_perc':[], 'eFG_perc':[], 'FG_perc':[], '3P_perc':[], 'FT_perc':[]
}
for i in tqdm(range(df.shape[0])):
date = df.loc[i,'Date']
name = df.loc[i,'Name']
df_name = df.loc[df['Name']==name].reset_index(drop=True)
index = df_name.loc[df_name['Date']==date].index[0]
# Generate features from the past 10 games
if index >= 10:
df_past = df_name[index-10:index].reset_index(drop=True)
# Consider the number of days between the current game and the previous game
current = datetime.strptime(str(df_name.loc[index, 'Date']), '%Y%m%d')
previous = datetime.strptime(str(df_past.loc[df_past.shape[0]-1, 'Date']), '%Y%m%d')
rest = current - previous
data['Rest'].append(rest.days)
# Weights higehr towards the most recent game
if weighting == 'linear':
weights = [i for i in range(1,11)]
elif weighting == 'quad':
weights = [i**2 for i in range(1,11)]
elif weighting == 'sqrt':
weights = [i**(1/2) for i in range(1,11)]
for key in data.keys():
if key in ['Date', 'Name', 'FPTS']:
data[key].append(df_name.loc[index, key])
elif key == 'FPTS_std':
data[key].append(df_past['FPTS'].std())
elif key != 'Rest':
weighted_mean = calculate_weighted_mean(weights, df_past[key])
data[key].append(weighted_mean)
return pd.DataFrame(data)
# ### Add Roster Availability
def add_roster_info(df):
df['Rota_All'] = [0 for i in range(df.shape[0])]
df['Rota_Pos'] = [0 for i in range(df.shape[0])]
for date in tqdm(list(set(df['Date']))):
for team in list(set(df['Team'])):
df_rota = df.loc[(df['Date']==date) & (df['Team']==team)]
if df_rota.shape[0] != 0:
rota_all = df_rota.shape[0]
for pos in ['PG','SG', 'F', 'C']:
df_pos = df_rota.loc[df_rota[pos]==1]
rota_pos = df_pos.shape[0]
df.loc[(df['Date']==date) & (df['Team']==team) & (df[pos]==1), 'Rota_All'] = rota_all
df.loc[(df['Date']==date) & (df['Team']==team) & (df[pos]==1), 'Rota_Pos'] = rota_pos
seasons = ['2014-15', '2015-16', '2016-17', '2017-18', '2018-19']
# Takes 15-20 mins
for season in tqdm(seasons):
df = pd.read_csv(os.path.join(DATA_DIR, 'Dataframes', 'Merged', 'df_{}.csv'.format(season)))
df_baseline = generate_baseline(df)
df_baseline = df_baseline.loc[:,['Date','Name','FPTS','PTS','3P','AST','TRB','STL','BLK','TOV','DD','TD']]
df_baseline.to_csv(os.path.join(DATA_DIR, 'Dataframes','Modelling','Baseline','{}.csv'.format(season)),
index=False)
# +
weighting_types = ['sqrt', 'linear', 'quad']
# Takes ~ 2 hrs in total
# TODO: Optimize
for weighting in tqdm(weighting_types):
for season in tqdm(seasons):
df = pd.read_csv(os.path.join(DATA_DIR, 'Dataframes', 'Merged', 'df_{}.csv'.format(season)))
df_features = generate_features(df, weighting)
# Add Starter, Listed Position and Team
df_features = pd.merge(df.loc[:,['Date', 'Name', 'Salary', 'Team', 'Starter', 'Home', 'Pos',
'PG', 'SG', 'F', 'C']],
df_features, on=['Date','Name'],
how='inner')
# Add roster information
add_roster_info(df_features)
df_features = df_features.loc[:, DF_FEATURES]
df_features.to_csv(os.path.join(DATA_DIR, 'Dataframes','Modelling','Features', \
weighting,'{}.csv'.format(season)),
index=False)
| src/04.feature_engineering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Full import of Qiskit library
from qiskit import *
# +
try:
# Create a Quantum Register with 1 Qubits.
qr = QuantumRegister(1)
# Create a classical register with 1 bits
cr = ClassicalRegister(1)
# Create a Quantum Circuit containing our QR and CR.
circuit = QuantumCircuit(qr,cr)
# Prepare the method to draw our quantum program
circuit.draw();
except NameError:
print("ERROR: There is either an error in your code - or you have not run the library import block above correctly\n"*10)
# +
# When we run a qubit through a single X-Gate in |1> then it should always at time of measurement 0 as 100%
circuit.x(qr[0])
# When we run the second X-Gate on the qubit it will have rotated 360 degrees, being at the same position as it
# started in.
circuit.x(qr[0])
# Prepare the method to draw our quantum program
circuit.draw();
# Adding the measurement operation to all Quantum Registers
circuit.measure(qr, cr);
# -
circuit.draw(output='mpl')
# +
# We load the backend to run our Quantum Program
backend = BasicAer.get_backend('qasm_simulator')
# We execute the Quantum Programwe, but have added "shots=4096, memory=True" to the execute() function.
job = qiskit.execute(circuit, backend, shots=4096, memory=True)
# Get the results from the job
result = job.result().get_counts(circuit)
# A quick print out of our result
print(result)
# +
from qiskit.tools.visualization import plot_histogram
plot_histogram(result)
# -
| Exam question 2 - The Double X-Gate (Input Ket Zero).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <font face="微軟正黑體">
#
# ## Pandas dtypes (數據類型)
# + colab={} colab_type="code" id="lh9gyloHVblR"
import os
import numpy as np
import pandas as pd
# + colab={} colab_type="code" id="z4iLB9cKVblV"
# 設定 data_path
dir_data = '../data/'
f_app_train = os.path.join(dir_data, 'application_train.csv')
f_app_test = os.path.join(dir_data, 'application_test.csv')
app_train = pd.read_csv(f_app_train)
app_test = pd.read_csv(f_app_test)
# + colab={} colab_type="code" id="NvxV5-OzVblX" outputId="9bf04bf8-6e24-408a-d441-73caf888623b"
# 檢視資料中各個欄位類型的數量
# app_train.dtypes.value_counts()
app_train.get_dtype_counts()
# + colab={} colab_type="code" id="Yf16HFmgVblc" outputId="d0d39d02-1ffb-4238-df40-0e67b0c123b0"
# 檢視資料中類別型欄位各自類別的數量
app_train.select_dtypes(include=["object"]).apply(pd.Series.nunique, axis = 0)
# + [markdown] colab_type="text" id="P7RU9oyvVblg"
# <font face="微軟正黑體">
#
# ### Label encoding
#
# 有仔細閱讀[參考資料](https://medium.com/@contactsunny/label-encoder-vs-one-hot-encoder-in-machine-learning-3fc273365621)的人可以發現,Label encoding 的表示方式會讓同一個欄位底下的類別之間有大小關係 (0<1<2<...),所以在這裡我們只對有類別數量小於等於 2 的類別型欄位示範使用 Label encoding,但不表示這樣處理是最好的,一切取決於欄位本身的意義適合哪一種表示方法
# + colab={} colab_type="code" id="bhsQcnbrVblj" outputId="36f5888f-09fd-4d92-de58-74041808aee2"
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder() # Create a label encoder object
le_count = 0
le_column = []
for col in app_train: # Iterate through the columns
if app_train[col].dtype == 'object':
if len(list(app_train[col].unique())) <= 2: # If 2 or fewer unique categories
# Train on the training data
le.fit(app_train[col])
# Transform both training and testing data
app_train[col] = le.transform(app_train[col])
app_test[col] = le.transform(app_test[col])
# Keep track of how many columns were label encoded
le_count += 1
le_column.append(col)
print('{} columns {} were label encoded.'.format(le_count, ", ".join(le_column)))
# + [markdown] colab_type="text" id="fZ14f-GpVbll"
# <font face="微軟正黑體">
#
# ### One Hot encoding
#
# pandas 中的 one hot encoding 非常方便,一行程式碼就搞定
# 可以觀察到原來的類別型欄位都轉為 0/1 了
# + colab={} colab_type="code" id="1O-M3EdDVblm" outputId="93ac010f-a46c-426e-a83b-64ca6a39a8db"
app_train = pd.get_dummies(app_train)
app_test = pd.get_dummies(app_test)
print(app_train.filter(regex=("CODE_GENDER_*")).head())
print()
print(app_train['NAME_EDUCATION_TYPE_Higher education'].head())
# + [markdown] colab={} colab_type="code" id="d5IeEcPVVblq"
# <font face="微軟正黑體">
#
# ## HW
#
# 將下列部分資料片段 sub_train 使用 One Hot encoding, 並觀察轉換前後的欄位數量 (使用 shape) 與欄位名稱 (使用 head) 變化
# +
app_train = pd.read_csv(f_app_train)
sub_train = pd.DataFrame(app_train['WEEKDAY_APPR_PROCESS_START'])
print(sub_train.shape)
sub_train.head(n=5)
# +
# Use One Hot encoding to handle categorical data
import json
sub_train = pd.get_dummies(sub_train)
print("DataFrame size: {}".format(sub_train.shape))
print("DataFrame columns: {}".format(json.dumps(sub_train.columns.values.tolist(), indent=4)))
sub_train.head(n=6)
| homework/Day_004_HW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Linear Regression - wine quality data
# +
import pandas as pd
wine_quality_white = pd.read_csv("winequality-white.csv", sep=";")
wine_quality_red = pd.read_csv("winequality-red.csv", sep=";")
wine_quality = pd.concat([wine_quality_white, wine_quality_red])
wine_quality.head()
# -
# ## Making Predictions
# +
from numpy import cov
def calc_slope(x, y):
return cov(x, y)[0, 1] / x.var()
# Calculate the intercept given the x column, y column, and the slope
def calc_intercept(x, y, slope):
return y.mean() - (slope * x.mean())
x = wine_quality["density"]
y = wine_quality["quality"]
m = calc_slope(x, y)
b = calc_intercept(x, y, m)
def calc_predicted_y(x):
return m * x + b
predicted_quality = wine_quality["density"].apply(calc_predicted_y)
# -
predicted_quality.head()
# ## Finding Error
# +
from scipy.stats import linregress
# We've seen the r_value before -- we'll get to what p_value and stderr_slope are soon -- for now, don't worry about them.
slope, intercept, r_value, p_value, stderr_slope = linregress(wine_quality["density"], wine_quality["quality"])
# As you can see, these are the same values we calculated (except for slight rounding differences)
print(slope)
print(intercept)
def calc_predicted_y(x):
return slope * x + intercept
predicted_y = wine_quality["density"].apply(calc_predicted_y)
sq_residuals = (predicted_y - wine_quality["quality"]) ** 2
rss = sq_residuals.sum()
print(rss)
# -
# ## Standard Error
# +
from scipy.stats import linregress
import numpy as np
# We can do our linear regression
# Sadly, the stderr_slope isn't the standard error, but it is the standard error of the slope fitting only
# We'll need to calculate the standard error of the equation ourselves
slope, intercept, r_value, p_value, stderr_slope = linregress(wine_quality["density"], wine_quality["quality"])
predicted_y = np.asarray([slope * x + intercept for x in wine_quality["density"]])
residuals = (wine_quality["quality"] - predicted_y) ** 2
rss = sum(residuals)
n = len(wine_quality["quality"])
standard_error = (rss / (n - 2)) ** (1/2)
within_one, within_two, within_three = 0, 0, 0
for idx, y in enumerate(wine_quality["quality"]):
error = abs(y - predicted_y[idx])
if error / standard_error <= 1:
within_one += 1
if error / standard_error <= 2:
within_two += 1
if error / standard_error <= 3:
within_three += 1
within_one = within_one / n
within_two = within_two / n
within_three = within_three / n
# -
within_one, within_two, within_three
| wine_quality/wine_quality.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: keras
# language: python
# name: keras
# ---
from keras.models import load_model
# !ls 'model_data'
model_path = 'model_data/yolo.h5'
model = load_model(model_path, compile = False)
from keras.utils.vis_utils import plot_model
plot_model(model, to_file='yolov3_model.png', show_shapes=True)
# +
# plot_model??
# -
model.summary()
| Yolov3_Inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import sys
import os
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import scipy.signal as sig
from IPython import embed
from load_intan_rhs_format import read_data
# +
sys.path.append('C:\Users\Senan\Downloads\load_intan_rhs_format')
sys.path.append(
'\\\\Phsvcefbdl1isimgt.partners.org\MGH-NEURO-CASHLAB\Projects\ADELPHI_Senan\Stim Data\Stim Data 190119\Data')
os.chdir('\\\\Phsvcefbdl1isimgt.partners.org\MGH-NEURO-CASHLAB\Projects\ADELPHI_Senan\Stim Data\Stim Data 190119\Data')
# dat = read_data('fKS01_190118_190118_190202.rhs')
# +
# type(dat)
# +
# np.save('testdata.npy', dat)
# -
dat = np.load('testdata.npy').item()
len(dat)
data = (dat['amplifier_data'])
samp = sig.decimate(data, 10)
# plt.plot(samp)
# +
fig = plt.figure()
plt.plot(np.arange(0, 1.5, 1/2000.), samp[12][0:3000], 'r' ,
np.arange(0, 1.5, 1/2000.), samp[13][0:3000], 'g',
np.arange(0, 1.5, 1/2000.), samp[14][0:3000], 'b',
np.arange(0, 1.5, 1/2000.), samp[15][0:3000], 'c',
np.arange(0, 1.5, 1/2000.), samp[0][0:3000], 'y',
np.arange(0, 1.5, 1/2000.), samp[1][0:3000], 'm',
np.arange(0, 1.5, 1/2000.), samp[7][0:3000], 'plum',
np.arange(0, 1.5, 1/2000.), samp[8][0:3000], 'steelblue'
)
plt.xlabel('Time (s)')
plt.ylabel('Voltage (microvolts)')
plt.legend(('L Hcp Stim 1', 'L Hcp Stim 2', 'L Hcp Rec 1', 'L Hcp Rec 2', 'L Ent Rec 1', 'L Ent Rec 2', 'R Hcp Rec 1', 'R Hcp Rec 2'),loc=1)
# -
len(samp[0][0:4000])
fig.savefig('datatest5.png', dpi = 1000)
os.getcwd()
fig.savefig('datatest4.eps', dpi = 1000)
datnorm = read_data('fKS01_190118_190118_190459.rhs')
datanorm = (datnorm['amplifier_data'])
samp2 = sig.decimate(datanorm, 10)
# +
fig = plt.figure()
plt.plot(np.arange(0, 1.5, 1/2000.), samp2[12][17000:20000], 'r',
np.arange(0, 1.5, 1/2000.), samp2[13][17000:20000], 'g',
np.arange(0, 1.5, 1/2000.), samp2[14][17000:20000], 'b',
np.arange(0, 1.5, 1/2000.), samp2[15][17000:20000], 'c',
np.arange(0, 1.5, 1/2000.), samp2[0][17000:20000], 'y',
np.arange(0, 1.5, 1/2000.), samp2[1][17000:20000], 'm',
np.arange(0, 1.5, 1/2000.), samp2[7][17000:20000], 'plum',
np.arange(0, 1.5, 1/2000.), samp2[8][17000:20000], 'steelblue'
)
plt.xlabel('Time (s)')
plt.ylabel('Voltage (microvolts)')
# plt.legend(('L Hcp Stim 1', 'L Hcp Stim 2', 'L Hcp Rec 1', 'L Hcp Rec 2', 'L Ent Rec 1', 'L Ent Rec 2', 'R Hcp Rec 1', 'R Hcp Rec 2'),loc=1)
# -
fig.savefig('normstim2.png', dpi = 1000)
datastim = read_data('fKS01_190118_190118_194620.rhs')
datstim = (datastim['amplifier_data'])
samp3 = sig.decimate(datstim, 10)
# +
fig = plt.figure()
plt.plot(np.arange(0, 1.5, 1/2000.), samp3[12][16500:19500], 'r',
np.arange(0, 1.5, 1/2000.), samp3[13][16500:19500], 'g',
np.arange(0, 1.5, 1/2000.), samp3[14][16500:19500], 'b',
np.arange(0, 1.5, 1/2000.), samp3[15][16500:19500], 'c',
np.arange(0, 1.5, 1/2000.), samp3[0][16500:19500], 'y',
np.arange(0, 1.5, 1/2000.), samp3[1][16500:19500], 'm',
np.arange(0, 1.5, 1/2000.), samp3[7][16500:19500], 'plum',
np.arange(0, 1.5, 1/2000.), samp3[8][16500:19500], 'steelblue'
)
plt.xlabel('Time (s)')
plt.ylabel('Voltage (microvolts)')
# plt.legend(('L Hcp Stim 1', 'L Hcp Stim 2', 'L Hcp Rec 1', 'L Hcp Rec 2', 'L Ent Rec 1', 'L Ent Rec 2', 'R Hcp Rec 1', 'R Hcp Rec 2'),loc=1)
# -
fig.savefig('preictstim2.png', dpi = 1000)
np.save('datastim.npy', datastim)
np.save('datanorm.npy', datnorm)
| Stim Analysis/.ipynb_checkpoints/StimTestAnalysis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="q9LsyItZbXTf"
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# + id="11QAeqECeZ5C"
mnist = tf.keras.datasets.fashion_mnist
# + colab={"base_uri": "https://localhost:8080/"} id="kNwJQStTe5XM" outputId="fbe7fa46-7ef3-463f-c540-5b36cd33083a"
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
# + colab={"base_uri": "https://localhost:8080/", "height": 758} id="oHA9U4Yfe-3t" outputId="f761a149-924a-46c3-9658-fbc34e7f124c"
np.set_printoptions(linewidth=200)
plt.imshow(training_images[0])
print(training_labels[0])
print(training_images[0])
# + [markdown] id="UvA627wmfhcN"
# Normalizing the pixel data to have values between 0 and 1
# + id="kRr4EYROfmuT"
training_images = training_images / 255.0
test_images = test_images / 255.0
# + id="EtH5h0EHfpX1"
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
# + [markdown] id="I64pUc7TgYDI"
# Building the model, then evaluate the model.
# + colab={"base_uri": "https://localhost:8080/"} id="15b2gLf2gORC" outputId="a61b101e-1736-4651-b433-2adf611dabf9"
model.compile(optimizer = tf.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5)
# + colab={"base_uri": "https://localhost:8080/"} id="PK-d-MsYgcJ8" outputId="a84bcf06-9d04-40eb-e240-6b3de2065576"
model.evaluate(test_images, test_labels)
# + colab={"base_uri": "https://localhost:8080/"} id="wf8YKGyOgyrC" outputId="57c5f361-5ae9-4069-d0bf-ed46bfc23d8c"
classifications = model.predict(test_images)
print(classifications[0])
# + id="76fWBzrUhGx7"
| Basic_CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import random
from sympy import *
init_printing()
# +
# Matrix??
# +
A = Matrix([
[3, 3],
[2, S(3)/2]
])
A
# -
A.inv()*Matrix([6,5])
A.det()
A.rref()
A.inv()
# +
alpha = symbols('alpha')
# -
A = Matrix([
[3, 3],
[2, alpha]
])
A.det()
# if alpha = 2 then det(A)=0 => A is not invertible
# otherwise A is invertible.
| cut_material/generated_A.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Neural Crest Cell Count Analysis
# This code was used to analyze neural crest cell counts measured from section images.
#
# Required inputs for this script:
#
# 1. .csv file documenting the neural crest cell counts (Snai2, Sox9) on experimental and contralateral control sides.
#
# Script prepared by <NAME>, May 2021
# +
# Import data handling and analysis packages
import os
import glob
import pandas as pd
from scipy import stats
# Import plotting packages
import iqplot
import bokeh.io
from bokeh.io import output_file, show
from bokeh.layouts import column, row
bokeh.io.output_notebook()
# -
# ## Import Data
source_data = pd.read_csv('Fig2_source_data_cellcounts.csv')
source_data.head()
# ### Process data by generating the mean value of 2aRFP and dnBMPR1A cells per section, per embryo
# +
# Get a list of treatments
treatment_list = source_data.Treatment.unique()
treatment_list = treatment_list.tolist()
# Group dataframe by treatment
mean_sections = (source_data.groupby(['Treatment', 'Channel', 'ExptDate', 'Embryo', 'Somites',
'EmbID','ROI'])['ROI','Count'].mean())
mean_sections.head()
# -
# ## Plot and perform statistical analysis
# +
################### Isolate data for analysis ###################
# Annotate data further to plot
cntl_construct = '2aRFP'
expt_construct = 'dnBMPR1A'
# Pull out only cells and treaments of interest, and rename ROIs with the appropriate constructs
df=mean_sections.reset_index()
df.replace(to_replace = {'Cntl': cntl_construct, 'Expt': expt_construct}, inplace=True)
################### Plot as strip plot ###################
# Plot as strip plot
p1 = iqplot.strip(data=df
,q='Count', q_axis='y'
,cats=['Channel', 'ROI']
,y_range=(0,55)
,frame_height = 250, frame_width = 200
,y_axis_label= 'Average Cell Counts Per Section'
,x_axis_label='Treatment'
,palette = ['black', 'black', 'black', 'black']
,parcoord_column='EmbID'
)
# p1.axis.axis_label_text_font_style = 'bold italic'
p1.axis.axis_label_text_font_size = '14px'
p1.axis.major_label_text_font_size = '12px'
p1.axis.axis_label_text_font_style = 'normal'
p1.xaxis.major_label_orientation = 7
show(row(p1))
################### Perform statistical analysis ###################
# Perform Paired t test for Snai2
snai2 = df.loc[df['Channel'] == 'SNAI2']
cntl_snai2 = snai2.loc[snai2['ROI'] == cntl_construct]['Count']
expt_snai2 = snai2.loc[snai2['ROI'] == expt_construct]['Count']
ttest_snai2 = stats.ttest_rel(cntl_snai2,expt_snai2)
# Display test results
print('Paired t-test results for Snai2 cell counts: \n\t\t statistic=' + str(ttest_snai2[0]) +
'\n\t\t p-value=' + str(ttest_snai2[1]))
# Perform Paired t test for Sox9
sox9 = df.loc[df['Channel'] == 'SOX9']
cntl_sox9 = sox9.loc[sox9['ROI'] == cntl_construct]['Count']
expt_sox9 = sox9.loc[sox9['ROI'] == expt_construct]['Count']
ttest_sox9 = stats.ttest_rel(cntl_sox9,expt_sox9)
# Display test results
print('Paired t-test results for Sox9 cell counts: \n\t\t statistic=' + str(ttest_sox9[0]) +
'\n\t\t p-value=' + str(ttest_sox9[1]))
# -
# ## Normalize to control (dnBMPR1A count / 2aRFP count) and plot ratios
norm_df=df.filter(['EmbID', 'Channel', 'ROI', 'Count'])
norm_df=norm_df.pivot(index='EmbID', columns='ROI', values='Count')
norm_df=norm_df.reset_index()
norm_df['ExptDate'], norm_df['Emb'], norm_df['Channel'] = zip(*norm_df['EmbID'].map(lambda x: x.split('_')))
norm_df.replace(to_replace=['Snai2', 'Sox9'], value=['SNAI2', 'SOX9'], inplace=True)
norm_df['Expt/Cntl']=norm_df['dnBMPR1A']/norm_df['2aRFP']
norm_df=norm_df.filter(['EmbID', 'ExptDate', 'Emb', 'Channel', '2aRFP', 'dnBMPR1A', 'Expt/Cntl'])
norm_df.head()
# +
################### Plot as stripbox plot ###################
# Build Stripbox plot
stripbox = iqplot.stripbox(
# Data to plot
data=norm_df,
q='Expt/Cntl', q_axis='y',
cats='Channel',
# Plot details
jitter=True, jitter_kwargs=dict(width=0.3),
marker_kwargs=dict(alpha=0.8, size=7
,color='dimgray'
),
box_kwargs=dict(line_color='black', line_width=1.5),
whisker_kwargs=dict(line_color='black', line_width=1.5),
median_kwargs=dict(line_color='black', line_width=2),
top_level='box',
frame_width=150, frame_height=350,
# Plot customizations
y_range=(0,1.8),
y_axis_label='Norm. Cell Counts (dnBMPR1A / 2a-RFP)',
x_axis_label='Marker',
show_legend=False,
)
# Final customizations
stripbox.axis.axis_label_text_font_size = '14px'
stripbox.axis.major_label_text_font_size = '12px'
stripbox.axis.axis_label_text_font_style = 'normal'
# View plot
show(stripbox)
# -
| Fig2_dnBMPR1A_Migration/FigS2B_CellCounts/2021_NeuralCrest_CellCounts_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Imports
# +
from collections import Counter, defaultdict
from sklearn.model_selection import cross_val_score, StratifiedShuffleSplit, train_test_split
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn import utils
from tabulate import tabulate
import matplotlib.pyplot as plt
import seaborn as sns
from gensim.models.word2vec import Word2Vec
from gensim.models import Word2Vec
import gensim
import re
import numpy as np
import pylab as pl
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import learning_curve, GridSearchCV
import pandas as pd
import numpy as np
from tqdm import tqdm
tqdm.pandas(desc="progress-bar")
# %matplotlib inline
# +
# imports needed and logging
import gzip
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
# -
# ## Reading Data
# ### Reading the BNC written data
# +
# Natural Language Toolkit: Plaintext Corpus Reader
#
# Copyright (C) 2001-2019 NLTK Project
# Author: <NAME> <<EMAIL>>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""Corpus reader for the XML version of the British National Corpus."""
from nltk.corpus.reader.util import concat
from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView, ElementTree
class BNCCorpusReader(XMLCorpusReader):
"""Corpus reader for the XML version of the British National Corpus.
For access to the complete XML data structure, use the ``xml()``
method. For access to simple word lists and tagged word lists, use
``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``.
You can obtain the full version of the BNC corpus at
http://www.ota.ox.ac.uk/desc/2554
If you extracted the archive to a directory called `BNC`, then you can
instantiate the reader as::
BNCCorpusReader(root='BNC/Texts/', fileids=r'[A-K]/\w*/\w*\.xml')
"""
def __init__(self, root, fileids, lazy=True):
XMLCorpusReader.__init__(self, root, fileids)
self._lazy = lazy
def words(self, fileids=None, strip_space=True, stem=True):
"""
:return: the given file(s) as a list of words
and punctuation symbols.
:rtype: list(str)
:param strip_space: If true, then strip trailing spaces from
word tokens. Otherwise, leave the spaces on the tokens.
:param stem: If true, then use word stems instead of word strings.
"""
return self._views(fileids, False, None, strip_space, stem)
def tagged_words(self, fileids=None, c5=True, strip_space=True, stem=True):
"""
:return: the given file(s) as a list of tagged
words and punctuation symbols, encoded as tuples
``(word,tag)``.
:rtype: list(tuple(str,str))
:param c5: If true, then the tags used will be the more detailed
c5 tags. Otherwise, the simplified tags will be used.
:param strip_space: If true, then strip trailing spaces from
word tokens. Otherwise, leave the spaces on the tokens.
:param stem: If true, then use word stems instead of word strings.
"""
tag = 'c5' if c5 else 'pos'
return self._views(fileids, False, tag, strip_space, stem)
def sents(self, fileids=None, strip_space=True, stem=True):
"""
:return: the given file(s) as a list of
sentences or utterances, each encoded as a list of word
strings.
:rtype: list(list(str))
:param strip_space: If true, then strip trailing spaces from
word tokens. Otherwise, leave the spaces on the tokens.
:param stem: If true, then use word stems instead of word strings.
"""
return self._views(fileids, True, None, strip_space, stem)
def tagged_sents(self, fileids=None, c5=True, strip_space=True, stem=True):
"""
:return: the given file(s) as a list of
sentences, each encoded as a list of ``(word,tag)`` tuples.
:rtype: list(list(tuple(str,str)))
:param c5: If true, then the tags used will be the more detailed
c5 tags. Otherwise, the simplified tags will be used.
:param strip_space: If true, then strip trailing spaces from
word tokens. Otherwise, leave the spaces on the tokens.
:param stem: If true, then use word stems instead of word strings.
"""
tag = 'c5' if c5 else 'pos'
return self._views(
fileids, sent=True, tag=tag, strip_space=strip_space, stem=stem
)
def _views(self, fileids=None, sent=False, tag=False, strip_space=True, stem=True):
"""A helper function that instantiates BNCWordViews or the list of words/sentences."""
f = BNCWordView if self._lazy else self._words
return concat(
[
f(fileid, sent, tag, strip_space, stem)
for fileid in self.abspaths(fileids)
]
)
def _words(self, fileid, bracket_sent, tag, strip_space, stem):
"""
Helper used to implement the view methods -- returns a list of
words or a list of sentences, optionally tagged.
:param fileid: The name of the underlying file.
:param bracket_sent: If true, include sentence bracketing.
:param tag: The name of the tagset to use, or None for no tags.
:param strip_space: If true, strip spaces from word tokens.
:param stem: If true, then substitute stems for words.
"""
result = []
xmldoc = ElementTree.parse(fileid).getroot()
for xmlsent in xmldoc.findall('.//s'):
sent = []
for xmlword in _all_xmlwords_in(xmlsent):
word = xmlword.text
if not word:
word = "" # fixes issue 337?
if strip_space or stem:
word = word.strip()
if stem:
word = xmlword.get('hw', word)
if tag == 'c5':
word = (word, xmlword.get('c5'))
elif tag == 'pos':
word = (word, xmlword.get('pos', xmlword.get('c5')))
sent.append(word)
if bracket_sent:
result.append(BNCSentence(xmlsent.attrib['n'], sent))
else:
result.extend(sent)
assert None not in result
return result
# +
def _all_xmlwords_in(elt, result=None):
if result is None:
result = []
for child in elt:
if child.tag in ('w'):
result.append(child)
else:
_all_xmlwords_in(child, result)
return result
class BNCSentence(list):
"""
A list of words, augmented by an attribute ``num`` used to record
the sentence identifier (the ``n`` attribute from the XML).
"""
def __init__(self, num, items):
self.num = num
list.__init__(self, items)
# -
class BNCWordView(XMLCorpusView):
"""
A stream backed corpus view specialized for use with the BNC corpus.
"""
tags_to_ignore = set(
['stext','pb', 'gap', 'vocal', 'event', 'unclear', 'shift', 'pause', 'align','c']
)
"""These tags are ignored. For their description refer to the
technical documentation, for example,
http://www.natcorp.ox.ac.uk/docs/URG/ref-vocal.html
"""
def __init__(self, fileid, sent, tag, strip_space, stem):
"""
:param fileid: The name of the underlying file.
:param sent: If true, include sentence bracketing.
:param tag: The name of the tagset to use, or None for no tags.
:param strip_space: If true, strip spaces from word tokens.
:param stem: If true, then substitute stems for words.
"""
if sent:
tagspec = '.*/s'
else:
tagspec = '.*/s/(.*/)?(w)'
self._sent = sent
self._tag = tag
self._strip_space = strip_space
self._stem = stem
self.title = None #: Title of the document.
self.author = None #: Author of the document.
self.editor = None #: Editor
self.resps = None #: Statement of responsibility
XMLCorpusView.__init__(self, fileid, tagspec)
# Read in a tasty header.
self._open()
self.read_block(self._stream, '.*/teiHeader$', self.handle_header)
self.close()
# Reset tag context.
self._tag_context = {0: ()}
def handle_header(self, elt, context):
# Set up some metadata!
titles = elt.findall('titleStmt/title')
if titles:
self.title = '\n'.join(title.text.strip() for title in titles)
authors = elt.findall('titleStmt/author')
if authors:
self.author = '\n'.join(author.text.strip() for author in authors)
editors = elt.findall('titleStmt/editor')
if editors:
self.editor = '\n'.join(editor.text.strip() for editor in editors)
resps = elt.findall('titleStmt/respStmt')
if resps:
self.resps = '\n\n'.join(
'\n'.join(resp_elt.text.strip() for resp_elt in resp) for resp in resps
)
def handle_elt(self, elt, context):
if self._sent:
return self.handle_sent(elt)
else:
return self.handle_word(elt)
def handle_word(self, elt):
word = elt.text
if elt.get('c5') not in ['PUL','PUN','PUQ','PUR']:
if not word:
word = "" # fixes issue 337?
if self._strip_space or self._stem:
word = word.strip()
if self._stem:
word = elt.get('hw', word)
if self._tag == 'c5':
word = (word, elt.get('c5'))
elif self._tag == 'pos':
word = (word, elt.get('pos', elt.get('c5')))
return word
def handle_sent(self, elt):
sent = []
for child in elt:
if child.tag in ('mw', 'hi', 'corr', 'trunc'):
sent += [self.handle_word(w) for w in child]
elif child.tag in ('w'):
sent.append(self.handle_word(child))
elif child.tag not in self.tags_to_ignore:
raise ValueError('Unexpected element %s' % child.tag)
return BNCSentence(elt.attrib['n'], sent)
parser = BNCCorpusReader(root='BNC/Texts/', fileids=r'[A-K]/\w*/\w*\.xml')
tagged_sentences_wordforms=[]
#we are having the root form of the words
for word in parser.tagged_sents():
tagged_sentences_wordforms.append(word)
tagged_sentences_wordforms[:10]
# + code_folding=[4]
# # for l in d0:
# for l in tagged_sentences_wordforms:
# for t in l:
# T = list(t)
# if str(T[1]) in ['PUL','PUN','PUQ','PUR']:
# l.remove(t)
# -
tagged_sentences=[]
for s in tagged_sentences_wordforms:
sentence=[]
for w in s:
if w!= None:
sentence.append(w)
tagged_sentences.append(sentence)
word_verb=[]
# for l in d0:
for l in tagged_sentences:
word_verb_tagged=[]
for t in l:
T = list(t)
if str(T[1]).startswith('V'):
T[0]=T[0]+'_V'
word_verb_tagged.append(T[0])
word_verb.append(word_verb_tagged)
word_verb[:10]
# ### training word2vec on BNC corpus
# +
# model_BNC_tagged_sentences_verb_sg = Word2Vec(word_verb, min_count=5,size= 300,workers=3, window =5)
# +
# model_BNC_tagged_sentences_verb_sg = Word2Vec(word_verb, min_count=5,size= 300,workers=3, window =5,sg=1)
# -
model_BNC_tagged_sentences_verb = Word2Vec(word_verb, min_count=5,size= 300,workers=3, window =5,sg=1)
model_BNC_tagged_sentences_verb.save("model_BNC_tagged_sentences_verb_sg.model")
# +
# model_BNC_tagged_sentences_verb.save("model_BNC_tagged_sentences_verb_sg_not_stem.model")
# -
| model/BNC_Vtagged_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pd.options.display.max_rows = 100
plt.rcParams['figure.dpi'] = 100
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', 300)
dir_figures = './figures/'
dir_results = './results/'
# -
# # 1. Untargeted - Whitebox
df_cifar = pd.DataFrame()
df_cifar = pd.concat([df_cifar, pd.read_csv(dir_results + 'evalQ_untgt-whitebox-cifar10.csv', index_col = 0)])
df_cifar = pd.concat([df_cifar, pd.read_csv(dir_results + 'eval_untgt-whitebox-cifar10.csv', index_col = 0)])
df_cifar = df_cifar[df_cifar['atk_name'] == 'sgd-uap']
df_svhn = pd.DataFrame()
df_svhn = pd.concat([df_svhn, pd.read_csv(dir_results + 'evalQ_untgt-whitebox-svhn.csv', index_col = 0)])
df_svhn = pd.concat([df_svhn, pd.read_csv(dir_results + 'eval_untgt-whitebox-svhn.csv', index_col = 0)])
df_svhn = df_svhn[df_svhn['atk_name'] == 'sgd-uap']
# model_names = list(df_cifar['model_name'].unique())
model_names = ['resnet18',
'resnet18_sfP',
'resnet18_sfP-mixup',
'resnet18_sfP-cutout',
'resnet18_P2-0.3',
'resnet18_P3-0.3',
'resnet18_P4-0.3',
'resnet20_Q2',
'resnet20_Q3',
'resnet20_Q4']
# +
plt.figure(figsize = (10, 4))
for model in model_names:
temp = df_cifar[df_cifar['model_name'] == model]
plt.grid(axis = 'both', alpha = 0.5)
plt.xlabel(r'Perturbation Size ($\varepsilon$)')
plt.ylabel('Top 1 Error')
plt.plot(temp['eps'], temp['UER'], marker = 'o', label = model)
plt.ylim([0, 1])
plt.legend(bbox_to_anchor=(1.3, 1))
plt.savefig(dir_figures + 'untgt-whitebox-cifar10.png', dpi = 300, bbox_inches='tight')
# +
plt.figure(figsize = (10, 4))
for model in model_names:
temp = df_svhn[df_svhn['model_name'] == model]
plt.grid(axis = 'both', alpha = 0.5)
plt.xlabel(r'Perturbation Size ($\varepsilon$)')
plt.ylabel('Top 1 Error')
plt.plot(temp['eps'], temp['UER'], marker = 'o', label = model)
plt.ylim([0, 1])
plt.legend(bbox_to_anchor=(1.3, 1))
plt.savefig(dir_figures + 'untgt-whitebox-svhn.png', dpi = 300, bbox_inches='tight')
# -
# # 2. Untargeted - Transfer
df_cifar = pd.read_csv(dir_results + 'eval_untgt-transfer-cifar10.csv', index_col = 0)
df_svhn = pd.read_csv(dir_results + 'eval_untgt-transfer-svhn.csv', index_col = 0)
model_names = list(df_cifar['model_name'].unique())
atk_sources = list(df_cifar['atk_source'].unique())
model_names
model_names = ['resnet18',
'resnet18_sfP',
'resnet18_sfP-mixup',
'resnet18_sfP-cutout',
'resnet18_P2-0.3',
'resnet18_P3-0.3',
'resnet18_P4-0.3',
'resnet20_Q2',
'resnet20_Q3',
'resnet20_Q4']
atk_sources = model_names
# ### To make evaluation easier, fix EPS value when doing comparison
temp_cifar = df_cifar[df_cifar['eps'] == 10]
temp_svhn = df_svhn[df_svhn['eps'] == 10]
# + active=""
# # From model's perspective
# for model in model_names:
# print('\n')
# print(temp_cifar[temp_cifar['model_name'] == model].sort_values(by = 'UER', ascending = False)[['model_name', 'atk_source', 'UER']].to_string())
# + active=""
# # From attack source's perspective
# for source in atk_sources:
# print('\n')
# print(temp_cifar[temp_cifar['atk_source'] == source].sort_values(by = 'UER', ascending = False)[['model_name', 'atk_source', 'UER']].to_string())
# + active=""
# # From model's perspective
# for model in model_names:
# print('\n')
# print(temp_svhn[temp_svhn['model_name'] == model].sort_values(by = 'UER', ascending = False)[['model_name', 'atk_source', 'UER']].to_string())
# + active=""
# # From attack source's perspective
# for source in atk_sources:
# print('\n')
# print(temp_svhn[temp_svhn['atk_source'] == source].sort_values(by = 'UER', ascending = False)[['model_name', 'atk_source', 'UER']].to_string())
# -
# ## Confusion matrix
conf_mat = (temp_cifar.set_index(['model_name', 'atk_source'])['UER']
.unstack(fill_value = 0)
.reindex(index = model_names, columns = atk_sources, fill_value=0)
.rename_axis(None)
.rename_axis(None, axis = 1))
# +
plt.figure(figsize = (12, 8))
sns.heatmap(np.concatenate([conf_mat, np.expand_dims(conf_mat.mean(axis = 1), axis = 1)], axis = 1), cmap = 'Blues', fmt = '.2f', annot = True, cbar = False, annot_kws = {'fontsize' : 12}, square = True, linewidth = 0.1)
plt.xlabel('Attack Source', fontsize = 24)
plt.ylabel('Model', fontsize = 24)
plt.xticks(ticks = np.arange(0.5, len(model_names) + 1, 1), labels = model_names + ['Average UER'], size = 12, rotation = 90)
plt.yticks(ticks = np.arange(0.5, len(model_names), 1), labels = model_names, size = 12, rotation = 0)
plt.savefig(dir_figures + 'untgt-transfer-cifar10.png', dpi = 300, bbox_inches='tight')
# -
conf_mat = (temp_svhn.set_index(['model_name', 'atk_source'])['UER']
.unstack(fill_value = 0)
.reindex(index = model_names, columns = atk_sources, fill_value=0)
.rename_axis(None)
.rename_axis(None, axis = 1))
# +
plt.figure(figsize = (12, 8))
sns.heatmap(np.concatenate([conf_mat, np.expand_dims(conf_mat.mean(axis = 1), axis = 1)], axis = 1), cmap = 'Blues', fmt = '.2f', annot = True, cbar = False, annot_kws = {'fontsize' : 12}, square = True, linewidth = 0.1)
plt.xlabel('Attack Source', fontsize = 24)
plt.ylabel('Model', fontsize = 24)
plt.xticks(ticks = np.arange(0.5, len(model_names) + 1, 1), labels = model_names + ['Average UER'], size = 12, rotation = 90)
plt.yticks(ticks = np.arange(0.5, len(model_names), 1), labels = model_names, size = 12, rotation = 0)
plt.savefig(dir_figures + 'untgt-transfer-svhn.png', dpi = 300, bbox_inches='tight')
# -
| results_untgt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="F7BlFuwTdS2H" outputId="c6266add-7353-4938-a15f-e23188b780d9"
# !pip install PyGithub
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
import seaborn as sns
from github import Github
import github
matplotlib.style.use('seaborn')
# %matplotlib inline
# + id="uMR4NPXbdOMm" colab={"base_uri": "https://localhost:8080/", "height": 833} outputId="c5226fcd-56e6-42e6-c5b9-b7665b85ac86"
pd.set_option("display.precision", 3)
Models=['CNN','MLP','LSTM','GRU']
r2_scores={}
mae={}
rmse={}
states=[]
from IPython.display import display
for model in Models:
print(model+' model')
#print("Model:",model," result")
df=pd.read_csv("https://raw.githubusercontent.com/sureshkuc/Data-Science-in-Life-Science-Project/main/Indian-States-Model-Results/"+model+"_on_short_data.csv").drop(columns =["Unnamed: 0"])
df=df[df['State']!='Delhi']
#print(df.head())
temp1=df.groupby(['State']).apply(lambda df: df.loc[df.RMSE.idxmin()]).copy()
temp1.reset_index(drop=True, inplace=True)
display(temp1[['State','MAE','RMSE','R2_Score']])
temp1=temp1.sort_values(["State"], ascending = (False))
#print(temp1[['State','MAE','RMSE','R2_Score']])
r2_scores[model]=list(temp1['R2_Score'])
mae[model]=list(temp1['MAE'])
rmse[model]=list(temp1['RMSE'])
states=list(temp1['State'])
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="w3kkjdbddOMo" outputId="8eae7fa8-da2d-4f02-e29e-dc64a534ee95"
state_df=pd.DataFrame({'State':states})
r2_score_df = pd.DataFrame(r2_scores)
mae_df = pd.DataFrame(mae)
rmse_df = pd.DataFrame(rmse)
result= pd.concat([state_df, r2_score_df,mae_df,rmse_df], axis=1)
display(result)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="4PGlVvg8imV0" outputId="c7e885ca-face-4d1c-df28-d9db6d24ee38"
r2_score_df
# + colab={"base_uri": "https://localhost:8080/"} id="GquuxwL3dOMo" outputId="45d773be-a361-485b-98f7-83184f200953"
print(result.to_latex())
# + id="FrYYcpsMdOMp"
def radar_chart(categories,CNN,MLP,LSTM,GRU,title):
categories = [*categories, categories[0]]
CNN = [*CNN, CNN[0]]
MLP = [*MLP, MLP[0]]
LSTM = [*LSTM, LSTM[0]]
#BLSTM = [*BLSTM, BLSTM[0]]
GRU = [*GRU, GRU[0]]
N=len(LSTM)-1
angle=[n/float(N)*2*3.14 for n in range(N)]
angle+=angle[:1]
plt.figure(figsize=(8, 8))
plt.polar(angle,CNN,label='CNN',color='g')
plt.polar(angle,MLP,label='MLP',color='r')
plt.polar(angle,LSTM,label='LSTM',color='blue')
#plt.polar(angle,BLSTM,label='BLSTM',color='purple')
plt.polar(angle,GRU,label='GRU',color='black')
plt.xticks(angle,labels=categories[:-1])
plt.yticks()
plt.title("Radar Chart:"+title,size=20)
plt.legend()
plt.savefig(title+'.pdf')
# + colab={"base_uri": "https://localhost:8080/", "height": 516} id="cQcmAB0QdOMq" outputId="b9bdcba5-815e-4c48-8da8-afa98994c2f5"
radar_chart(states,r2_scores['CNN'],r2_scores['MLP'],r2_scores['LSTM'],r2_scores['GRU'],title="R2-Score")
# + colab={"base_uri": "https://localhost:8080/", "height": 516} id="9DMsJIIxdOMq" outputId="c6fab9ab-d5b4-49d6-82df-fdeba67f5339"
radar_chart(states,mae['CNN'],mae['MLP'],mae['LSTM'],mae['GRU'],title="Mean Absolute Error")
# + colab={"base_uri": "https://localhost:8080/", "height": 516} id="arX-jWAfdOMq" outputId="3e66cffd-0d9c-4ad3-85d9-879a81bcd654"
radar_chart(states,rmse['CNN'],rmse['MLP'],rmse['LSTM'],rmse['GRU'],title="Root Mean Squared Error")
| Implementation/Pytorch/Result_script(short_data).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
dfsel = df[['internet_usage']].copy()
dfsel['pred'] = y_pred
dfsel
| #04. Decision Tree. A Supervised Classification Model/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os, sys, math
from glob import glob
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
sys.path.insert(0,"/home/nico/Documents/TEAR/Codes_TEAR/PythonCodes/LibFolder")
from Lib_GeneralFunctions import *
from Lib_ProfilePlotting import *
from Lib_ProfileProcessing import *
from se2waveload import *
import pandas as pd
from scipy import fft
from matplotlib.gridspec import GridSpec
plt.style.use('seaborn-whitegrid')
from matplotlib import cm
from matplotlib.colors import ListedColormap
viridisBig = cm.get_cmap('viridis_r', 512)
cmap = ListedColormap(viridisBig(np.linspace(0.20, 1., 256)))
import time
start_time = time.time()
from matplotlib.lines import Line2D
from scipy import signal
# +
# Function to calculate the analytical Kostrov Solution
def CalculateKostrovSolution(Time,Stress,mu, v,r):
result = (24.0/(np.pi*5.0))*(Stress/mu)*np.sqrt((v**2)*(Time**2)-r**2)
if math.isnan(result):
return 0.0
else:
return result
# Function to calculate the root-mean-square of a target list of data against a target list of data
def rmse(predictions, targets):
return np.sqrt(((np.asarray(predictions)-np.asarray(targets))**2).mean()).tolist()
# Function to filter using a Butterworth filter
def Butterworth(Signal, Type = "low",CutoffFrequency = 7., SamplingFrequency = 200):
NormFrequency = CutoffFrequency / (SamplingFrequency / 2.0)
b,a = signal.butter(1, NormFrequency, Type)
output = signal.filtfilt(b, a, np.asarray(Signal)).tolist()
return output
# Function to extract h p delta values from the folder naming
def FilePath2hpdelta(SingleFile):
head, tail = os.path.split(SingleFile)
hpdelta = head.split("/")[-1].split("-")
h_ref = int(''.join([s for s in hpdelta[0] if s.isdigit()]))
p_ref = int(''.join([s for s in hpdelta[1] if s.isdigit()]))
delta_ref = float(''.join([s for s in hpdelta[2] if (s.isdigit() or s == ".")]))
return h_ref, p_ref, delta_ref
def CalculateFFT(Data, sampling = 0.005, TMax = 4.0):
N=int(TMax/sampling)
yf = fft(Data)
xf = np.linspace(0.0, 1.0/(2.0*sampling), N//2)
yf = 2.0/N * np.abs(yf[0:N//2])
return(xf,yf)
# +
# Class and functions to extract and plot Parameters of Interest for error analysis
class ReferenceStorage:
def __init__(self):
self.ReferenceName = ""
self.ReferencePos = []
self.ReferenceSlip = []
self.ReferenceSlipRate = []
self.ReferenceTime = []
self.PeakSR = []
self.PeakSRTime = []
self.TailSR = []
self.TailSRTime = []
self.PT_SRdiff = []
self.PT_SRdiffTime = []
def StoreReference(self, RefSourceName, RefPosition, RefSlipData, RefSRData, RefTimeData, epsilon = 1e-3):
self.ReferenceName = RefSourceName
self.ReferencePos.append(RefPosition)
self.ReferenceSlip.append(RefSlipData)
self.ReferenceSlipRate.append(RefSRData)
self.ReferenceTime.append(RefTimeData)
# Peak Slip Rate and respective Timing
Pidx = RefSRData.index(max(RefSRData))
self.PeakSR.append(RefSRData[Pidx])
self.PeakSRTime.append(RefTimeData[Pidx])
# Tail Slip Rate and respective Timing
Tidx = next((RefSRData.index(x) for x in RefSRData if x > epsilon), None)
self.TailSR.append(RefSRData[Tidx])
self.TailSRTime.append(RefTimeData[Tidx])
self.PT_SRdiff.append(RefSRData[Pidx] - RefSRData[Tidx])
self.PT_SRdiffTime.append(RefTimeData[Pidx] - RefTimeData[Tidx])
# Class to initialize, append and store an object to store the Variables of Interest
class VOI_TimeProfile:
def __init__(self):
self.P_Refinement = []
self.H_Refinement = []
self.delta_Refinement = []
self.Coord = []
self.PeakSlipRate = []
self.PeakSlipRateTimeArrival = []
self.TailSlipRate = []
self.TailSlipRateTimeArrival = []
self.PeakTailSR = []
self.PeakTailSRTime = []
#end __init__
def __repr__(self):
return "Number of stored -Variables Of Interest- is {}".format(self.NumStoredObjects())
#end __repr__
def __str__(self):
TemplateText = "{idx}: p-ref: {p}, h-ref: {h}, delta-ref {delta}, Receiver Coord: {Coord}\n"
OutputText = [TemplateText.format(idx = idx, p = self.P_Refinement[idx],h = self.H_Refinement[idx], delta = self.delta_Refinement[idx],
Coord = self.Coord[idx]) for idx in range(self.NumStoredObjects())]
return "".join(OutputText)
#end __str__
def NumStoredObjects(self):
return len(self.Coord)
def GetPeakInList(self,TPOItemList,TPOItemTimeList):
idx = TPOItemList.index(max(TPOItemList))
PeakInList = TPOItemList[idx]
PeakInListTimeArrival = TPOItemTimeList[idx]
return PeakInList, PeakInListTimeArrival
def GetFirstNonZeroInList(self, TPOItemList, TPOItemTimeList, epsilon=1e-3):
idx = next((TPOItemList.index(x) for x in TPOItemList if x > epsilon), None)
PeakSlipRate = TPOItemList[idx]
PeakSlipRateTimeArrival = TPOItemTimeList[idx]
return PeakSlipRate, PeakSlipRateTimeArrival
def addNewItem(self, P_ref, H_ref, delta_ref, TPOItem, **kwargs):
self.P_Refinement.append(P_ref)
self.H_Refinement.append(H_ref)
self.delta_Refinement.append(delta_ref)
self.Coord.append(TPOItem.Coord[0])
TailMagnitudeSR, TailTimingSR = self.GetFirstNonZeroInList(TPOItem.VelX, TPOItem.Time, **kwargs)
PeakMagnitudeSR, PeakTimingSR = self.GetPeakInList(TPOItem.VelX, TPOItem.Time,)
self.PeakSlipRate.append(PeakMagnitudeSR)
self.PeakSlipRateTimeArrival.append(PeakTimingSR)
self.TailSlipRate.append(TailMagnitudeSR)
self.TailSlipRateTimeArrival.append(TailTimingSR)
self.PeakTailSR.append(PeakMagnitudeSR - TailMagnitudeSR)
self.PeakTailSRTime.append(PeakTimingSR - TailTimingSR)
def ConvertPOI2Pandas(self):
zippedList = list(zip(self.P_Refinement, self.H_Refinement, self.delta_Refinement,
self.Coord, self.PeakSlipRate, self.PeakSlipRateTimeArrival,
self.TailSlipRate, self.TailSlipRateTimeArrival,
self.PeakTailSR, self.PeakTailSRTime))
ColumnName = ["pref", "href", "deltaref", "Loc", "PeakSR", "PeakSRtime",
"TailSR", "TailSRtime", "PTSRdiff", "PTSRtimediff"]
dfObject = pd.DataFrame(zippedList, columns= ColumnName)
return dfObject
def Fill_VoiObject(ListOfFolders,VOI_Object):
for SingleFile in ListOfFolders:
h_ref, p_ref, delta_ref = FilePath2hpdelta(SingleFile)
head, tail = os.path.split(SingleFile)
ListOfTPObjects = LoadPickleFile(Filename = tail,FolderPath = head+"/")
for idx, STP in enumerate(ListOfTPObjects):
VOI_Object.addNewItem(p_ref, h_ref, delta_ref, STP)
# +
def PlotBimatReference(ax, path, SlipSlipRate):
FileNameDict = {"Slip" : "slip (m)", "SlipRate" : "slip rate (m/s)", "Shear":"shear stress (Pa)"}
for NumStat in range(1,5):
Station = pd.read_csv(path + "Bimat/pcsi_SSC.obs{}".format(NumStat), delimiter=r"\s+",header=None,
names=["time (s)" , "slip rate (m/s)", "slip (m)", "shear stress (Pa)", "normal stress (Pa)","strength (Pa)"])
ax.plot(Station["time (s)"],Butterworth(Station[FileNameDict[SlipSlipRate]]), c = "k", ls= "-", zorder=1)
ax.set_xlabel("time (s)")
ax.set_ylabel(FileNameDict[SlipSlipRate])
def PlotSem2dpackReference(ax, path, SlipSlipRate):
FileNameDict = {"Slip" : "slip", "SlipRate" : "sr", "Shear":"sxy"}
ListReference = sorted(glob(path+"sem2dpack/sem2d-{}-*.txt".format(FileNameDict[SlipSlipRate])))
for idx, item in enumerate(ListReference):
line = pd.read_csv(item,header=None)
if SlipSlipRate=="Shear":
line[1] = line[1]*10e5
ax.plot(line[0],Butterworth(line[1]), label = "", c = "k", ls = "-", zorder=1)
ax.set_xlabel("time (s)")
FileNameDict = {"Slip" : "slip (m)", "SlipRate" : "slip rate (m/s)", "Shear":"shear stress (Pa)"}
ax.set_ylabel(FileNameDict[SlipSlipRate])
def PlotHandPickedReference(ax, path, SlipSlipRate):
ListReference = sorted(glob(path + "HandPicking/{}/".format(SlipSlipRate)+"*.csv"))
for idx, item in enumerate(ListReference):
line = pd.read_csv(item)
ax.plot(line["X"],line["Y"],c = "k", label = "", ls = "-", zorder=1)
ax.set_xlabel("time (s)")
FileNameDict = {"Slip" : "slip (m)", "SlipRate" : "slip rate (m/s)", "Shear":"shear stress (Pa)"}
ax.set_ylabel(FileNameDict[SlipSlipRate])
def PlotReference(ax,SlipSlipRate):
Stress = 10.0e6
ElasticShearModulus = 13.333e9
Velocity = 2.0e3
TimeList = np.arange(0, 5, 0.01).tolist()
if SlipSlipRate=="Slip":
for r in [0,2000,4000,6000,8000]:
Slip = [CalculateKostrovSolution(Time,Stress,ElasticShearModulus, Velocity, r) for Time in TimeList]
ax.plot(TimeList,Slip,ls="-",c="k", lw=1, zorder=1)
elif SlipSlipRate=="SlipRate":
for r in [0,2000,4000,6000,8000]:
Slip = [CalculateKostrovSolution(Time,Stress,ElasticShearModulus, Velocity, r) for Time in TimeList]
SlipRate = np.gradient(Slip, TimeList[1]-TimeList[0],edge_order=2)
ax.plot(TimeList, SlipRate, ls = "-", c = "k", lw = 1, zorder=1)
# +
# List of functions related to loading and plotting a slip/ slip rate reference.
# This reference can be extracted from the output of either Bimat or sem2dpack from Ampuero's programs
# For these plots, the filename is hard-coded
# also at the end is the legend style for reference vs colors for the receivers
def PlotBimatReference(ax, path, SlipSlipRate):
FileNameDict = {"Slip" : "slip (m)", "SlipRate" : "slip rate (m/s)", "Shear":"shear stress (Pa)"}
for NumStat in range(1,5):
Station = pd.read_csv(path + "Bimat/pcsi_SSC.obs{}".format(NumStat), delimiter=r"\s+",header=None,
names=["time (s)" , "slip rate (m/s)", "slip (m)", "shear stress (Pa)", "normal stress (Pa)","strength (Pa)"])
ax.plot(Station["time (s)"],Butterworth(Station[FileNameDict[SlipSlipRate]]), c = "k", ls= "-", zorder=1)
ax.set_xlabel("time (s)")
ax.set_ylabel(FileNameDict[SlipSlipRate])
def PlotSem2dpackReference(ax, path, SlipSlipRate):
FileNameDict = {"Slip" : "slip", "SlipRate" : "sr", "Shear":"sxy"}
ListReference = sorted(glob(path+"sem2dpack/sem2d-{}-*.txt".format(FileNameDict[SlipSlipRate])))
for idx, item in enumerate(ListReference):
line = pd.read_csv(item,header=None)
if SlipSlipRate=="Shear":
line[1] = line[1]*10e5
ax.plot(line[0],Butterworth(line[1]), label = "", c = "k", ls = "-", zorder=1)
ax.set_xlabel("time (s)")
FileNameDict = {"Slip" : "slip (m)", "SlipRate" : "slip rate (m/s)", "Shear":"shear stress (Pa)"}
ax.set_ylabel(FileNameDict[SlipSlipRate])
def PlotHandPickedReference(ax, path, SlipSlipRate):
ListReference = sorted(glob(path + "HandPicking/{}/".format(SlipSlipRate)+"*.csv"))
for idx, item in enumerate(ListReference):
line = pd.read_csv(item)
ax.plot(line["X"],line["Y"],c = "k", label = "", ls = "-", zorder=1)
ax.set_xlabel("time (s)")
FileNameDict = {"Slip" : "slip (m)", "SlipRate" : "slip rate (m/s)", "Shear":"shear stress (Pa)"}
ax.set_ylabel(FileNameDict[SlipSlipRate])
def PlotReference(ax,SlipSlipRate):
Stress = 10.0e6
ElasticShearModulus = 13.333e9
Velocity = 2.0e3
TimeList = np.arange(0, 5, 0.01).tolist()
if SlipSlipRate=="Slip":
for r in [0,2000,4000,6000,8000]:
Slip = [CalculateKostrovSolution(Time,Stress,ElasticShearModulus, Velocity, r) for Time in TimeList]
ax.plot(TimeList,Slip,ls="-",c="k", lw=1, zorder=1)
elif SlipSlipRate=="SlipRate":
for r in [0,2000,4000,6000,8000]:
Slip = [CalculateKostrovSolution(Time,Stress,ElasticShearModulus, Velocity, r) for Time in TimeList]
SlipRate = np.gradient(Slip, TimeList[1]-TimeList[0],edge_order=2)
ax.plot(TimeList, SlipRate, ls = "-", c = "k", lw = 1, zorder=1)
# Here are the style definition and legend functions
def addRefnColorLegend(ax,SlipSlipRate):
Refpath ="/home/nico/Documents/TEAR/Codes_TEAR/ProfilePicking/Output/Reference/"
#PlotReference(ax,SlipSlipRate)
PlotSem2dpackReference(ax, Refpath, SlipSlipRate)
#PlotHandPickedReference(ax, Refpath, SlipSlipRate)
custom_lines = [Line2D([0],[0], c="k", ls= "-", lw=2)]
[custom_lines.append(Line2D([0],[0], c = cmap(numCol/4), ls= "-", lw=2)) for numCol in range(5)]
legend2 = plt.legend(custom_lines, ['Reference', '0m', '2000m','4000m','6000m', '8000m'],loc=1)
ax.add_artist(legend2)
# +
def PlotTimeProfileSet(ax, Set,SlipSlipRate,title,Filtered = False, absolute = False, **kwargs):
UnitsDict = {"Slip" : "Slip [m]", "SlipRate" : "Slip Rate [m/s]"}
ax.set(xlabel = 'Time [s]', ylabel = UnitsDict[SlipSlipRate],
title = title)
OrdinateVariableList=[]
for idx,item in enumerate(Set):
if (SlipSlipRate == "Slip"):
OrdinateVariableList.append([a for a in item.DispX])
elif (SlipSlipRate == "SlipRate"):
OrdinateVariableList.append([a for a in item.VelX])
if (Filtered):
OrdinateVariableList[idx] = [a for a in Butterworth(OrdinateVariableList[idx])]
if (absolute):
OrdinateVariableList[idx] = [abs(a) for a in OrdinateVariableList[idx]]
for idx,item in enumerate(Set):
ax.plot(item.Time, OrdinateVariableList[idx], c = cmap(item.Coord[0]/8000), label = "{x}m".format(x = item.Coord[0]), **kwargs)
# +
def InitDefaultFigure():
try:
fig = plt.figure(figsize = (10,5), constrained_layout=True)
gs = fig.add_gridspec(1, 1)
ax = fig.add_subplot(gs[:, :])
except:
fig = plt.figure(figsize = (10,5))
ax = fig.add_subplot(1,1,1)
ax.set_xlim([0,4])
ax.set_ylim([-1,10])
return fig, ax
def PlotSpecificLegend(ax, ListOfFiles,SlipSlipRate,title,LegendNamesList,Filtered=True,**kwargs):
for iidx,SingleFile in enumerate(ListOfFiles):
head, tail = os.path.split(SingleFile)
File = LoadPickleFile(Filename = tail,FolderPath = head+"/")
PlotTimeProfileSet(ax, File, SlipSlipRate, title,Filtered, zorder= iidx + 2,**kwargs )
hpd = FilePath2hpdelta(SingleFile)
addRefnColorLegend(ax,SlipSlipRate)
return ax
# +
LegendNamesList=[r"$\omega(|\sigma_{xy}^{trial}|)$, $w=tanh(\frac{\phi\ 2p}{dy})$",
r"$\omega(\sigma_{xy}^{trial})$, $w=tanh(\frac{\phi\ 2p}{dy})$",
r"$\omega(\sigma_{xy}^{trial})$, $w^\pm=\frac{1}{2}tanh(\frac{\phi\ 2p}{dy}\pm1/2)\pm\ 1/2$",
r"$\omega(\sigma_{xy}^{trial})$, $w^\pm=|\frac{1}{2}tanh(\frac{\phi\ 2p}{dy}\pm1/2)\pm\ 1/2|$",
]
path = "/home/nico/Documents/TEAR/Codes_TEAR/ProfilePicking/Output/20200723/"
ListOfFolders = sorted(glob(path+"*/*.pickle"))[:1]
[print(A) for A in ListOfFolders]
fig = plt.figure(figsize = (10,8), constrained_layout=True)
gs = GridSpec(2, 1, figure=fig)
ax1 = fig.add_subplot(gs[0, :])
ax1.set_xlim([0,4])
if ListOfFolders != []:
PlotSpecificLegend(ax1,ListOfFolders,"SlipRate","Weighthing functions for traction\n100dx - p = 3 - $\delta$ = 50.005 - $\eta\ =\ 0.2\Delta t$",LegendNamesList)
ax2 = fig.add_subplot(gs[1, :])
ax2.set_xlim([0,4])
if ListOfFolders != []:
PlotSpecificLegend(ax2,ListOfFolders,"SlipRate","Non-Filtered",LegendNamesList,Filtered=False)
# +
LegendNamesList=[r"$\omega(|\sigma_{xy}^{trial}|)$, $w=tanh(\frac{\phi\ 2p}{dy})$",
r"$\omega(\sigma_{xy}^{trial})$, $w=tanh(\frac{\phi\ 2p}{dy})$",
r"$\omega(\sigma_{xy}^{trial})$, $w^\pm=\frac{1}{2}tanh(\frac{\phi\ 2p}{dy}\pm1/2)\pm\ 1/2$",
r"$\omega(\sigma_{xy}^{trial})$, $w^\pm=|\frac{1}{2}tanh(\frac{\phi\ 2p}{dy}\pm1/2)\pm\ 1/2|$",
]
path = "/home/nico/Documents/TEAR/Codes_TEAR/ProfilePicking/Output/20200723/"
ListOfFolders = sorted(glob(path+"*/*.pickle"))[:1]
[print(A) for A in ListOfFolders]
fig = plt.figure(figsize = (10,8), constrained_layout=True)
gs = GridSpec(2, 1, figure=fig)
ax1 = fig.add_subplot(gs[0, :])
ax1.set_xlim([0,4])
if ListOfFolders != []:
PlotSpecificLegend(ax1,ListOfFolders,"Slip","Weighthing functions for traction\n100dx - p = 3 - $\delta$ = 50.005 - $\eta\ =\ 0.2\Delta t$",LegendNamesList)
ax2 = fig.add_subplot(gs[1, :])
ax2.set_xlim([0,4])
if ListOfFolders != []:
PlotSpecificLegend(ax2,ListOfFolders,"Slip","Non-Filtered",LegendNamesList,Filtered=False)
# +
path = "/home/nico/Documents/TEAR/Codes_TEAR/ProfilePicking/Output/20200726/"
ListOfFolders = sorted(glob(path+"*/*.pickle"))
[print(A) for A in ListOfFolders]
fig = plt.figure(figsize = (10,8), constrained_layout=True)
gs = GridSpec(2, 1, figure=fig)
ax1 = fig.add_subplot(gs[0, :])
ax1.set_xlim([0,4])
if ListOfFolders != []:
PlotSpecificLegend(ax1,ListOfFolders,"SlipRate","Weighthing functions for traction\n50dx - p = 4 - $\delta$ = 25.003 - $\eta\ =\ 0.6\Delta t$",LegendNamesList)
ax2 = fig.add_subplot(gs[1, :])
ax2.set_xlim([0,4])
if ListOfFolders != []:
PlotSpecificLegend(ax2, ListOfFolders, "SlipRate", "Non-Filtered output", LegendNamesList, Filtered = False)
# +
path = "/home/nico/Documents/TEAR/Codes_TEAR/ProfilePicking/Output/20200726/"
ListOfFolders = sorted(glob(path+"*/*.pickle"))
[print(A) for A in ListOfFolders]
fig = plt.figure(figsize = (10,8), constrained_layout=True)
gs = GridSpec(2, 1, figure=fig)
ax1 = fig.add_subplot(gs[0, :])
ax1.set_xlim([0,4])
if ListOfFolders != []:
PlotSpecificLegend(ax1,ListOfFolders,"Slip","Weighthing functions for traction\n50dx - p = 4 - $\delta$ = 25.003 - $\eta\ =\ 0.6\Delta t$",LegendNamesList)
ax2 = fig.add_subplot(gs[1, :])
ax2.set_xlim([0,4])
if ListOfFolders != []:
PlotSpecificLegend(ax2,ListOfFolders,"Slip","Non-Filtered",LegendNamesList,Filtered=False)
# +
path = "/home/nico/Documents/TEAR/Codes_TEAR/ProfilePicking/Output/20200703/"
ListOfFolders = sorted(glob(path+"*/*.pickle"))[:1]
[print(A) for A in ListOfFolders]
fig = plt.figure(figsize = (10,8), constrained_layout=True)
gs = GridSpec(2, 1, figure=fig)
ax1 = fig.add_subplot(gs[0, :])
ax1.set_xlim([0,4])
if ListOfFolders != []:
PlotSpecificLegend(ax1,ListOfFolders,"SlipRate","Weighthing functions for traction\n50dx - p = 4 - $\delta$ = 25.003 - $\eta\ =\ 0.6\Delta t$",LegendNamesList)
ax2 = fig.add_subplot(gs[1, :])
ax2.set_xlim([0,4])
if ListOfFolders != []:
PlotSpecificLegend(ax2, ListOfFolders, "SlipRate", "Non-Filtered output", LegendNamesList, Filtered = False)
# -
| PythonCodes/.ipynb_checkpoints/Checkpoint-SSC-plotting-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Timecluster hub
# > Hub for different visual analytics approaches for high-dimensional time series. Inspired by the paper ["Timecluster: dimension reduction applied to temporal data for visual analytics"](https://link.springer.com/article/10.1007/s00371-019-01673-y)
# The main intention of this repo is twofold:
# 1. Replicate the ideas of the Timecluster paper, and apply them to the data from PACMEL.
# 2. Extend the ideas of the paper for high-dimensional time series. The idea is to find the most important variables that make that a time window from
# the original space (high-dimensional time series) is mapped to a specific point of the final 2D space, and focus only on them, to make it easier for the
# domain expert to analyse and cluster the behaviour of the process.
#
# The visual part of this repo can also be used as a testbed to validate different approaches to unsupervised learning for time series data. This includes clustering, anomaly detection, segmentation, annotation...
# ## Deploy
# To run the notebooks and the app, install `docker` and `docker-compose` in your system.
# Then, create a new *.env* file in the root of the project following the structure:
# ```
# # The name of the docker-compose project
# COMPOSE_PROJECT_NAME=your_project_name
# # The user ID you are using to run docker-compose
# USER_ID=your_numeric_id
# # The group ID you are using to run docker-compose (you can get it with id -g in a terminal)
# GROUP_ID=your_numeric_id
# # The user name assigned to the user id
# USER_NAME=your_user_name
# # The port from which you want to access Jupyter lab
# JUPYTER_PORT=XXXX
# # The port from which you want to access RStudio server
# RSTUDIO_PORT=XXXX
# # The password you want to access RStudio server (user is given by USER_NAME)
# RSTUDIO_PASSWD=<PASSWORD>
# # The path to your data files to train/test the models
# LOCAL_DATA_PATH=/path/to/your/data
# # The W&B personal API key (see https://wandb.ai/authorize)
# WANDB_API_KEY=your_wandb_api_key
# ```
# You'll also need to have a `.gitconfig` file in your home folder. It can be an empty file that you create manually, or it can contain your git global configuration. For the latter case, run:
# - `git config --global user.name "YOUR NAME IN THIS GITLAB INSTANCE"`
# - `git config --global user.email "YOUR EMAIL IN THIS GITLAB INSTANCE"`
#
# This will automatically create the `~/.gitconfig` file in your home folder.
#
# Finally, in a terminal located in the root of this repository, run:
#
# ```docker-compose up -d --build```
#
# then go to `localhost:{{JUPYTER_PORT}}` to run the notebooks or go to `localhost:{{RSTUDIO_PORT}}` to run the app. In case you are working in a remote server, replace `localhost` with the IP of your remote server.
# ## Contribute
# This project has been created using [nbdev](https://github.com/fastai/nbdev), a library that allows to create Python projects directly from Jupyter Notebooks. Please refer to this library when adding new functionalities to the project, in order to keep the structure of it.
#
# We recommend using the following procedure to contribute and resolve issues in the repository:
#
# 1. Because the project uses nbdev, we need to run `nbdev_install_git_hooks` the first time after the repo is cloned and deployed; this ensures that our notebooks are automatically cleaned and trusted whenever we push to Github/Gitlab. The command has to be run from within the container. Also, it can be run from outside if you pip install nbdev in your local machine.
#
# 1. Create a local branch in your development environment to solve the issue XX (or add a new functionality), with the name you want to give your merge request (use something that will be easy for you to remember in the future if you need to update your request):
# ```
# git checkout -b issueXX
# ```
#
# 2. Make whatever changes you want to make in the code and notebooks, and remember to run nbdev_build_lib when you're done to ensure that the libraries are built from your notebook changes (unless you only changed markdown, in which case that's not needed). It's also a good idea to check the output of git diff to ensure that you haven't accidentally made more changes than you planned.
#
# 3. Make a commit of the changes made
# ```
# git commit -am "Fix issue #XX"
# ```
#
# 4. Test that there are not merging problems in the Jupyter Notebooks with the function [**nbdev_fix_merge**](https://nbdev.fast.ai/cli#nbdev_fix_merge)
#
# 5. Push your local branch to a branch in the gitlab repository with an identiffying name:
# ```
# git push -u origin HEAD
# ```
# 6. When the push is made, a link will appear in the terminal to create a merge request. Click on it.
# ```
# remote:
# remote: To create a merge request for test_branch, visit:
# remote: https://gitlab.geist.re/pml/x_timecluster_extension/-/merge_requests/new?merge_request%5Bsource_branch%5D=issueXX_solved
# remote:
# ```
# 7. In the gitlab website:
# * Write in the description what is the problem to solve with your branch using a hyperlink to the issue (just use the hashtag symbol "#" followed by the issue number)
# * Click on the option "Delete source branch when merge request is accepted" and assign the merge to your profile.
# * Click on the button "Create merge request"
# 
#
# 8. Wait to the merge to be accepted. In case you're solving an issue, we recommend to move the issue to the field "In review" (in the Issue Board). To keep your branch up to date with the changes to the main repo, run:
# ```
# git pull upstream master
# ```
#
# 9. If there are no problems, the merge request will be accepted and the issue will be closed. Once your PR has been merged or rejected, you can delete your branch if you don't need it any more:
# ```
# git branch -d issueXX
# ```
| nbs/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Preprocessing for ML/DL Models
#
# This is the tutorial notebook for how to preprocess data for the **stress determinator**.
#
# **Data used:**
#
# All the data sourced from Dr. Wong's mouse neuron experiments in Douglas Research Center with two main categories used in our model training experiments:
# * Bullying mouse in the enclosure
#
#
# <img src="https://github.com/Adrian107/Neural-Decoding-Project/blob/master/img/one_free.PNG?raw=true" width="200"/>
#
# * Bullying and defeated mice are both free to move in the cage
#
# <img src="https://github.com/Adrian107/Neural-Decoding-Project/blob/master/img/two_free.PNG?raw=true" width="150"/>
#
#
# **Important:**
#
# In order to make this experiment reproducible, please make sure the following data are available for input:
# * Neuron activity data, primarily extracted from [CNMF-E](https://github.com/zhoupc/CNMF_E), available in CNMF-E folder
# * Mouse behavioral data, primarily extracted and labeled from [DeepLabCut](https://github.com/AlexEMG/DeepLabCut), available in DeepLabCut folder
# * Timestamp file that automatically generated by the camera and its application is file for aligning behavioral camera and neuron camera, available in the index format of mouse experiments date and time (./Raw data).
# #### Packages install
import cv2
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, TensorDataset
from sklearn.model_selection import train_test_split
from preprocessor import distOneF,distTwoF,fourPointTransform,locCoordConvert,ptsCoordConvert,align
# preprocessor in utils folder
# ## Data read and preparation
#
# ### Overall procedure:
#
# 1. **Align** the frames between behavioral data and neuron data by timestamp file
# 1. **Determine** the four corner coordinate points
# 2. **Transform** the defeated behavioral data from pixel-based to centimeter-based
# 3. **Transform** bullying mouse behavioral data
# 4. Calculate the two mice real **distance**
# 5. **Classify** the distance into two groups:
# * interacted with bullying mouse: <10/15 cm
# * no interaction: >10/15 cm
#
#
# ### Some term explanations:
#
# * **gap_time**: In the raw video, there might be a gap time that the mouse is yet in the cage, so excludes this part helps the experimental veracity. Two ways to determine the gap time:
# * CNMF-E folder may contain the gap information, such as 1056_SI_A_Substack (240-9603), means after 240 frames, the mouse then shows up in the cage
# * Manually check the video one by one
# * **timestamp**: The file seperator is '\t' and must exclude the first row (header) and rename them in ["camNum","frameNum","sysClock","buffer"]
# * **video coordinates**: In the raw video, upper-left is (0,0), horizontal is X-axis, and vertical is Y-axis. While looking for the four corner points, screenshot one frame from the raw video, then import to image application, say photoshop/online RGB checker, to check its coordinates. (automation could be possible, but take time). Here are some corner points references for the experiment video:
# * Two free-moving mice scenario: np.array([(40,60),(213,62), (205,405),(42,405)], np.float32)
# * One free-moving mouse scenario: np.array([(85,100),(85,450), (425,440), (420,105)], np.float32)
#
# In addition, the actual size of the cage varies:
# * Two free-moving mice: 22 * 44
# * One free-moving mouse: 44 * 44
#
# but it all depends on which scenario the experiment chose
#
# * **bullying mouse position**:
# * For one free-moving scenario, the bullying mouse is fixed inside the enclosure, its position is therefore fixed as well. In such case, we will use the central point of the enclosure as the bullying mouse position
# * For two free-moving scenario, the bulling mouse position is read from DeepLabCut behaviral data. Therefore the distance calculation would be different
#
# * **one-hot encoding**: Convert numerical labelled distance to one-hot format
#
# * **neuron_A/B**:
# * neuron_A: no bullying mouse
# * neuron_B: bullying mouse presents
#
# +
gap_time_A = 240
gap_time_B = 150
dlc_A = pd.read_csv("//DMAS-WS2017-006/E/A RSync FungWongLabs/DLC_Data/1053 SI_A, Mar 22, 9 14 20/videos/\
1056 SI_A, Mar 22, 12 45 13DeepCut_resnet50_1053 SI_A, Mar 22, 9 14 20Jul31shuffle1_600000.h.csv", skiprows = 2).iloc[gap_time_A:,]
dlc_B = pd.read_csv("//DMAS-WS2017-006/E/A RSync FungWongLabs/DLC_Data/1053 SI_A, Mar 22, 9 14 20/videos/\
1056 SI_B, Mar 22, 12 52 59DeepCut_resnet50_1053 SI_A, Mar 22, 9 14 20Jul31shuffle1_600000.h.csv", skiprows = 2).iloc[gap_time_B:,]
neuron_A = pd.read_csv("//Dmas-ws2017-006/e/A RSync FungWongLabs/CNMF-E/1056/SI/1056_SI_A_Substack (240-9603)_source_extraction/frames_1_9364/LOGS_15-Sep_13_52_07/1056SI_A_240-9603.csv", header = None).T
neuron_B = pd.read_csv("//Dmas-ws2017-006/e/A RSync FungWongLabs/CNMF-E/1056/SI/1056_SI_B_source_extraction/frames_1_27256/LOGS_19-Apr_00_38_59/1056SI_B.csv", header = None).T.iloc[gap_time_B:,]
timestamp_A = pd.read_csv("//DMAS-WS2017-006/H/Donghan's Project Data Backup/Raw Data/Witnessing/female/Round 8/3_22_2019/H12_M45_S13/timestamp.dat", \
sep='\t', header = None, skiprows=1, names = ["camNum","frameNum","sysClock","buffer"])
timestamp_B = pd.read_csv("//DMAS-WS2017-006/H/Donghan's Project Data Backup/Raw Data/Witnessing/female/Round 8/3_22_2019/H12_M52_S59/timestamp.dat", \
sep='\t', header = None, skiprows=1, names = ["camNum","frameNum","sysClock","buffer"])
timestamp_A = timestamp_A[timestamp_A["frameNum"]>=gap_time_A]
timestamp_B = timestamp_B[timestamp_B["frameNum"]>=gap_time_B]
# f = open("1056SIA_test_0.0001_0.3_lstm_10_pytorch.txt",'w+')
# IF ONE FREE-MOVING MOUSE
msCam, behavCam = align(neuron_A, dlc_A, timestamp_A, gap_time_A) # alignment[0] == aligned neurons_1053B; alignment[1] == aligned dlc_1053B
pts = np.array([(85,100),(85,450), (425,440), (420,105)], np.float32) # four corner points
newLoc = locCoordConvert(behavCam,pts,44,44) # convert to new location data with new dimension
referPt = ptsCoordConvert(pts, [400,270], 44, 44)[0] # convert bullying mouse location with new dimension
dist = distOneF(newLoc, referPt) # calculate distance between bullying and defeated mouse
labeled = [1 if i < 10 else 0 for i in dist] # if dist < 15, label 1 (has interaction), else 0 (no interaction)
# IF TWO FREE-MOVING MICE
msCam, behavCam = align(neuron_A, dlc_A, timestamp_A, gap_time_A) # alignment[0] == aligned neurons_1053B; alignment[1] == aligned dlc_1053B
pts = np.array([(40,60),(213,62), (205,405),(42,405)], np.float32) # four corner points
newLoc = locCoordConvert(behavCam,pts,22,44) # convert to new location data with new dimension
# referPt = ptsCoordConvert(pts, [400,270], 44, 44)[0] # convert bullying mouse location with new dimension
dist = distTwoF(newLoc, "head") # calculate distance between bullying and defeated mouse
labeled = [1 if i < 15 else 0 for i in dist] # if dist < 15, label 1 (has interaction), else 0 (no interaction)
data = pd.concat([msCam, pd.DataFrame(labeled)], axis=1).dropna(axis = 0)
data.columns = list(range(1,len(msCam.columns)+2)) # avoid duplicate column name
data = data.rename(columns={len(msCam.columns)+1:"interaction"})
# One hot encoding
one_hot = pd.get_dummies(data['interaction'])
one_hot.columns = ["interaction.a", "interaction.b"]
data = data.drop("interaction", axis = 1).join(one_hot)
frac = 0.3
x_train, x_test, y_train, y_test = \
train_test_split(data[list(range(1,len(data.columns)-1))], data[["interaction.a", "interaction.b"]], test_size=frac, random_state=0)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=0)
x_train = x_train.drop(1, axis = 1)
# -
| src/.ipynb_checkpoints/preprocessing-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: simstrat
# language: python
# name: simstrat
# ---
import os
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import math
os.listdir("../../Simstrat-Operational/Simulation/Soppensee/Results")
def simstrat_time_to_datetime(days, reference_date):
return reference_date + timedelta(days=days)
reference_date = datetime(1981, 1, 1)
df = pd.read_csv("../../Simstrat-Operational/Simulation/Soppensee/Results/T_out.dat")
df["time"] = df.iloc[:,0].apply(lambda x: simstrat_time_to_datetime(x, reference_date))
df = df.set_index('time', drop=True)
df = df.drop(df.columns[0], axis=1)
df
delta.days
delta = max(df.index) - min(df.index)
base = min(df.index).replace(hour=0, minute=0, second=0, microsecond=0)
arr = np.array([base + timedelta(days=i) for i in range(delta.days)])
min(df.index).year
min(df.index)
list(range(2010, 2021))
df1 = df1.merge(df2, left_index=True, right_index=True)
df1
import os
files = os.listdir("Q:\Messdaten\Simstrat\Simstrat")
folders = []
for file in files:
if "." not in file:
folders.append(file)
folder = "../data/surface_temperature"
files = os.listdir(folder)
for file in files:
year = file.split("T_")[1].split(".json")[0]
newname = "T_" + year + "0101" + "_" + year + "1231.json"
os.rename(os.path.join(folder, file), os.path.join(folder, newname))
os.listdir(folder)
date = "20211202"
int(date[0:4])
int(date[4:6])
int(date[6:8])
| tests/Testing.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.3.8
# language: julia
# name: julia-0.3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Convex Optimization in Julia
#
# ## <NAME> | ISMP 2015
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Convex.jl team
#
# * [Convex.jl](https://github.com/cvxgrp/Convex.jl): <NAME>, <NAME>, <NAME>, <NAME>
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Collaborators/Inspiration:
#
# * [CVX](http://www.cvxr.com): <NAME>, <NAME>
# * [CVXPY](https://github.com/cvxgrp/cvxpy): <NAME>, <NAME>, <NAME>
# * [JuliaOpt](https://github.com/JuliaOpt): <NAME>, <NAME>, <NAME>
# -
# initial package installation
Pkg.add("Convex")
Pkg.add("SCS")
Pkg.add("Gadfly")
Pkg.add("Interact")
# +
# Make the Convex.jl module available
using Convex
using SCS # first order splitting conic solver [O'Donoghue et al., 2014]
set_default_solver(SCSSolver(verbose=0)) # could also use Gurobi, Mosek, CPLEX, ...
# Generate random problem data
m = 50; n = 100
A = randn(m, n)
x♮ = sprand(n, 1, .5) # true (sparse nonnegative) parameter vector
noise = .1*randn(m) # gaussian noise
b = A*x♮ + noise # noisy linear observations
# Create a (column vector) variable of size n.
x = Variable(n)
# nonnegative elastic net with regularization
λ = 1
μ = 1
problem = minimize(norm(A * x - b)^2 + λ*norm(x)^2 + μ*norm(x, 1),
x >= 0)
# Solve the problem by calling solve!
solve!(problem)
println("problem status is ", problem.status) # :Optimal, :Infeasible, :Unbounded etc.
println("optimal value is ", problem.optval)
# -
using Gadfly, Interact
@manipulate for λ=0:.1:5, mu=0:.1:5
problem = minimize(norm(A * x - b)^2 + λ*norm(x)^2 + μ*norm(x, 1),
x >= 0)
solve!(problem)
plot(x=x.value, Geom.histogram(minbincount = 20),
Scale.x_continuous(minvalue=0, maxvalue=3.5))#, Scale.y_continuous(minvalue=0, maxvalue=6))
end
# + [markdown] slideshow={"slide_type": "slide"}
# # Quick convex prototyping
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Variables
# + slideshow={"slide_type": "fragment"}
# Scalar variable
x = Variable()
# + slideshow={"slide_type": "fragment"}
# (Column) vector variable
y = Variable(4)
# + slideshow={"slide_type": "fragment"}
# Matrix variable
Z = Variable(4, 4)
# + [markdown] slideshow={"slide_type": "subslide"}
# # Expressions
# + [markdown] slideshow={"slide_type": "subslide"}
# Convex.jl allows you to use a [wide variety of functions](http://convexjl.readthedocs.org/en/latest/operations.html) on variables and on expressions to form new expressions.
# + slideshow={"slide_type": "fragment"}
x + 2x
# -
e = y[1] + logdet(Z) + sqrt(x) + minimum(y)
# ### Examine the expression tree
# + slideshow={"slide_type": "fragment"}
e.children[2]
# + [markdown] slideshow={"slide_type": "subslide"}
# # Constraints
#
# A constraint is convex if convex combinations of feasible points are also feasible. Equivalently, feasible sets are convex sets.
#
# In other words, convex constraints are of the form
#
# * `convexExpr <= 0`
# * `concaveExpr >= 0`
# * `affineExpr == 0`
# + slideshow={"slide_type": "fragment"}
x <= 0
# -
x^2 <= sum(y)
# + slideshow={"slide_type": "fragment"}
M = Z
for i = 1:length(y)
M += rand(size(Z))*y[i]
end
M ⪰ 0
# + [markdown] slideshow={"slide_type": "subslide"}
# # Problems
# + slideshow={"slide_type": "fragment"}
x = Variable()
y = Variable(4)
objective = 2*x + 1 - sqrt(sum(y))
constraint = x >= maximum(y)
p = minimize(objective, constraint)
# + slideshow={"slide_type": "fragment"}
# solve the problem
solve!(p)
p.status
# + slideshow={"slide_type": "fragment"}
x.value
# + slideshow={"slide_type": "subslide"}
# can evaluate expressions directly
evaluate(objective)
# -
# ## Pass to solver
#
# call a `MathProgBase` solver suited for your problem class
#
# * see the [list of Convex.jl operations](http://convexjl.readthedocs.org/en/latest/operations.html) to find which cones you're using
# * see the [list of solvers](http://www.juliaopt.org/) for an up-to-date list of solvers and which cones they support
# to solve problem using a different solver, just import the solver package and pass the solver to the `solve!` method: eg
#
# using Mosek
# solve!(p, MosekSolver())
# ## Warmstart
# +
# Generate random problem data
m = 50; n = 100
A = randn(m, n)
x♮ = sprand(n, 1, .5) # true (sparse nonnegative) parameter vector
noise = .1*randn(m) # gaussian noise
b = A*x♮ + noise # noisy linear observations
# Create a (column vector) variable of size n.
x = Variable(n)
# nonnegative elastic net with regularization
λ = 1
μ = 1
problem = minimize(norm(A * x - b)^2 + λ*norm(x)^2 + μ*norm(x, 1),
x >= 0)
@time solve!(problem)
λ = 1.5
@time solve!(problem, warmstart = true)
# + [markdown] slideshow={"slide_type": "slide"}
# # DCP examples
# + slideshow={"slide_type": "subslide"}
# affine
x = Variable(4)
y = Variable (2)
sum(x) + y[2]
# -
2*maximum(x) + 4*sum(y) - sqrt(y[1] + x[1]) - 7 * minimum(x[2:4])
# + slideshow={"slide_type": "subslide"}
# not dcp compliant
log(x) + x^2
# + slideshow={"slide_type": "subslide"}
# $f$ is convex increasing and $g$ is convex
square(pos(x))
# + slideshow={"slide_type": "subslide"}
# $f$ is convex decreasing and $g$ is concave
invpos(sqrt(x))
# + slideshow={"slide_type": "subslide"}
# $f$ is concave increasing and $g$ is concave
sqrt(sqrt(x))
| examples/Convex.jl_intro_ISMP2015.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="_TMgM9e1q03O"
import os, time, gc, datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
import lightgbm as lgb
pd.set_option('max_rows', 9999)
pd.set_option('max_columns', 9999)
start = time.time()
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 752, "status": "ok", "timestamp": 1566445019497, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04271815384066255727"}, "user_tz": -540} id="XsHZRDT_q2am" outputId="fddc050c-2bfe-444d-c2b5-b445ad15b85f"
from google.colab import drive
drive.mount('/content/gdrive')
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 734, "status": "ok", "timestamp": 1566445021813, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04271815384066255727"}, "user_tz": -540} id="QndhXSFEq4CU" outputId="a2018fd3-c79c-4c6b-de36-ef4e102d461e"
# cd /content/gdrive/My Drive/ML 4
# + colab={} colab_type="code" id="a-B5ZH82q6iP"
train = pd.read_csv("transaction_id_combined.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 4728, "status": "ok", "timestamp": 1566445027364, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04271815384066255727"}, "user_tz": -540} id="I6L6BEN5HTRn" outputId="5dd16d48-14fb-4c27-826b-65ba0dff3e24"
# cd /content/gdrive/My Drive/ML 4/Test
# + colab={"base_uri": "https://localhost:8080/", "height": 89} colab_type="code" executionInfo={"elapsed": 3166, "status": "ok", "timestamp": 1566445029303, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04271815384066255727"}, "user_tz": -540} id="IYoLmlC5PqIq" outputId="5c2ddd94-fbc5-49a7-e2fe-60f1bb841dc9"
# ls
# + colab={} colab_type="code" id="KUpkD30oHpUZ"
test = pd.read_csv("Test_Please.csv")
# + colab={} colab_type="code" id="Xw_jsqPSq7ZT"
not_fraud = train[train.isFraud==0]
fraud = train[train.isFraud==1]
# + colab={} colab_type="code" id="lrBkpnWNq7j2"
from sklearn.utils import resample
not_fraud_downsampled = resample(not_fraud,
n_samples = len(fraud), # match minority n
random_state = 27) # reproducible results
# + colab={} colab_type="code" id="2LHeFApurfHO"
downsampled = pd.concat([not_fraud_downsampled, fraud])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1093, "status": "ok", "timestamp": 1566445039259, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04271815384066255727"}, "user_tz": -540} id="HwMuPU4Tr55_" outputId="8624c1bb-5af6-487b-fdbe-dc73fb7e602b"
sns.countplot(x='isFraud', data=downsampled)
plt.show()
# + colab={} colab_type="code" id="4e6NV3Uh55y8"
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, roc_auc_score
import xgboost as xgb
import matplotlib.pyplot as plt
# + colab={} colab_type="code" id="0J_P9KoVr-Am"
# make Dataset
features = [c for c in downsampled.columns if c not in ['TransactionID', 'isFraud']]
target = 'isFraud'
X = downsampled[features].values
Y = downsampled[target].values
X_test = test[features].values
oof = np.zeros(len(downsampled))
preds = np.zeros(len(test))
feature_importance = np.zeros(len(features))
# + colab={} colab_type="code" id="a4h6RcRjshoZ"
# Config
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)
params = {
'task': 'train',
'objective': 'binary',
'metrics': 'auc',
'max_depth': 9,
'learning_rate': 0.15,
'random_state': 0,
'bagging_fraction': 0.9,
'feature_fraction': 0.9,
}
config = {
'num_boost_round': 500,
'early_stopping_rounds': 100,
'verbose_eval': 2000
}
# + colab={"base_uri": "https://localhost:8080/", "height": 557} colab_type="code" executionInfo={"elapsed": 72964, "status": "ok", "timestamp": 1566445116388, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04271815384066255727"}, "user_tz": -540} id="nqB0rW5asnaW" outputId="8adce1f3-6ece-4a49-91fb-099737ec23e7"
# # %%time
# Model training
for i, (trn_index, val_index) in enumerate(cv.split(X, Y)):
print('{} Folds'.format(i + 1))
_start = time.time()
X_train, Y_train = X[trn_index], Y[trn_index]
X_valid, Y_valid = X[val_index], Y[val_index]
trn_data = lgb.Dataset(X_train, label=Y_train)
val_data = lgb.Dataset(X_valid, label=Y_valid, reference=trn_data)
model = lgb.train(params, trn_data, valid_sets=[val_data, trn_data], valid_names=['eval', 'train'], **config)
oof[val_index] = model.predict(X_valid)
preds += model.predict(X_test, iteration=model.best_iteration) / cv.get_n_splits()
feature_importance += model.feature_importance(iteration=model.best_iteration)
elapsedtime = time.time() - _start
s = datetime.timedelta(seconds=elapsedtime)
print('{} Folds Running Time: {}'.format(i + 1, str(s)))
print('#' * 50)
del model
gc.collect()
# + colab={} colab_type="code" id="WbTrD1cctbpF"
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
# + colab={"base_uri": "https://localhost:8080/", "height": 107} colab_type="code" executionInfo={"elapsed": 701, "status": "ok", "timestamp": 1566447218104, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04271815384066255727"}, "user_tz": -540} id="tzizLDHVswDR" outputId="f4dae70d-f9e3-426f-83eb-4cabd54863ed"
# pred = clf.predict(X_test)
# print('F1:{}'.format(f1_score(y_test, pred)))
print('F1:{}'.format(f1_score(downsampled['isFraud'], oof > 0.5)),
'ROC_AUC:{}'.format(roc_auc_score(downsampled['isFraud'], oof > 0.5)), sep = '\n')
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 1917, "status": "ok", "timestamp": 1566445591227, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04271815384066255727"}, "user_tz": -540} id="sJeJXymuuybq" outputId="e8d5ab2a-93ee-4db6-f3dc-602e079c001f"
importance_df = pd.DataFrame({
'feature': features,
'importance': feature_importance
})
fig = plt.figure(figsize=(12, 20))
sns.barplot(x='importance', y='feature', data=importance_df.sort_values(by='importance', ascending=False)[:50])
plt.show()
# + colab={} colab_type="code" id="krH5Z00IJNzk"
sub = pd.read_csv('sample_submission.csv')
sub['isFraud'] = preds
sub.to_csv('submission.csv', index=False)
| 19_ybita_fraud-detection/modeling_and_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/AnalyzingRadicalFunctions/analyzing-radical-functions.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
# + language="html"
# <style>
# .output_wrapper button.btn.btn-default,
# .output_wrapper .ui-dialog-titlebar {
# display: none;
# }
# </style>
# -
# # Analyzing Radical Functions
#
# ## Mathematics 20-2
# +
from IPython.display import display, Math, Latex, HTML, clear_output, Markdown
import numpy as np
import matplotlib.pyplot as plt
from numpy import sqrt as sqrt
import ipywidgets
from ipywidgets import widgets, interact, interactive, interact_manual, Button, Layout
# %matplotlib inline
# -
# # Introduction
# This notebook will explain how to analyze and graph radical functions. We will look at the square root function, which is the most common type of radical, as well as other forms of radicals.
#
# The most important thing to understand about radical functions is where the function *exists*. This is often called the *domain of existence* and will be further explained in the notebook. Looking at rational functions will incorporate aspects of intervals and inequalities. It will also help find the roots of quadratic equations.
# <center><img src="./images/Spiral_gif.gif"></center>
# # Background
#
# ## Radical Functions
#
# A radical function is any function that is inside a **radical**. This tells us that some function, $f(x)$, is contained within a fractional exponent. This can be written as:
#
# <h3 align='center'>$f(g(x)) = g(x)^{\frac{1}{n}}$</h3>
#
# Where $n$ is an integer for the following exercises.
#
# So what does $g(x)$ look like? Well, it can take on any form! Let's look at a few examples of different $g(x)$ functions:
#
# 1. If $g(x) = x$, then $f(g(x)) = g(x)^{\frac{1}{n}} = x^{\frac{1}{n}}$
#
# 2. If $g(x) = x^2 + 2x$, then $f(g(x)) = (x^2 + 2x)^{\frac{1}{n}}$
#
# 3. If $g(x) = \frac{x^3 - 9x + 3}{2x - 1}$, then $f(g(x)) = \Big(\frac{x^3 - 9x + 3}{2x - 1}\Big)^{\frac{1}{n}}$
#
# The *most common radical function* is **the square root function**:
#
# <h3 align='center'>$\sqrt{x}$</h3>
#
# Which is equivalent to writing:
#
# <h3 align='center'>$x^{\frac{1}{2}}$</h3>
#
# So if we wanted to obtain the *square root* of some function $f(x)$, this is done by doing:
#
# <h3 align='center'>$\big[ \ f(x) \ \big]^{\frac{1}{2}} = \sqrt{f(x)}$</h3>
#
# Now, if we try finding the *root of a function*, (*where $f(x) = 0$*), understanding how radicals work becomes important. If we only look at quadratic functions of the form:
#
# <h3 align='center'>$f(x) = ax^2 + bx + c$</h3>
#
# There are many cases where the roots of this function can be found immediately by taking the square root. But, remember that when you take the square root of a single value, such as $4$, we get the result of:
#
# <h3 align='center'>$\sqrt{4} = \pm \ 2$</h3>
#
# Where either positive, ($+$), or negative, ($-$), $2$ can be squared to produce a value of $4$:
#
# <h3 align='center'>$(2)^2 = (-2)^2 = 4$</h3>
#
# This exact same principle applies when dealing with functions. For example, let us look at the function $f(x) = x^2$:
# +
# function to be plotted
x = np.linspace(-10,10,100)
fx = x**2.
# plot showing f(x) = x^2 with (2,4) and (-2,4) plotted points to show +/- result for square roots.
plt.figure(1,figsize = (10,7))
hold = True
ticks = np.arange(-5,6)
plt.plot(x,fx,'r-',label = r'$f(x) = x^2$')
plt.plot([-10,10],[0,0],'k-',alpha = 1,linewidth = 1)
plt.plot([0,0],[-10,15],'k-',alpha = 1,linewidth = 1)
plt.plot(2,4,'k.',markersize = 10)
plt.plot(-2,4,'k.', markersize = 10)
plt.grid(alpha = 0.7)
plt.legend(loc = 'lower right',fontsize = 13)
plt.xticks(ticks)
plt.ylim([-2,15])
plt.xlim([-5,5])
plt.show()
# -
# As you can see, at *both* $x=2$ and $x = -2$ our function equals $4$, which is marked with the *black dots*.
#
# If you were to take the square root of this function $f(x) = x^2$ you could write it as the following:
#
# <h3 align='center'>$\sqrt{f(x)} = \sqrt{x^2} = (x^{2})^{\frac{1}{2}}$</h3>
#
# Where you may be tempted to write the final answer like this:
#
# <h3 align='center'>$\sqrt{x^2} = \pm \ x$</h3>
#
# This is the formatting we had for a single value like $4$. **But,** when taking the square root of a function, $\pm$ doesn't actually tell you what is happening in this scenario. Instead, we write it as:
#
# <h3 align='center'>$\sqrt{x^2} = \lvert x \rvert$ </h3>
#
# Where $\lvert x \rvert$ is defined as the **absolute value** function. This means that:
#
# <h3 align='left'> - If $x \geq 0$, then $\lvert x\rvert$ equals $(x)$ </h3>
#
# <h3 align='left'> - If $x < 0$, then $\lvert x\rvert$ equals $-(x)$ </h3>
#
# > ## **Note**
# Our second line states that *"If $x<0$, then $\lvert x \rvert$ equals $-(x)$"*. This means that for any **negative** value of $x$, we substitute that value into the $x$ in $-(x)$, and get a double negative. This results in a **positive** value. For example: $\lvert -2 \rvert = -(-2) = 2$. This is true for all negative values inside the absolute value function.
#
# A formal definition of this would be:
#
# <h3 align='center'>$\lvert x\rvert = \begin{cases}x , & \text{if } x\geq 0 \\ -(x),& \text{if }x<0\end{cases}$</h3>
# Now that we've introduced the absolute value function, we can begin *finding the roots* of some quadratic functions.
#
# Remember that the root of a function is where $f(x) = 0$. In our initial example, where $f(x) = x^2$, we can prove by inspection of our graphic that the function has a root at $x = 0$. But, if we were to find this function's root more rigoroursly, you would **follow these steps**:
#
# <h3 align='left'> 1. Set $f(x)$ equal to $0$ so that $f(x) = x^2 = 0$. </h3>
#
# <h3 align='left'> 2. Take the square root of BOTH sides so that $\lvert x \rvert = 0$ </h3>
#
# <h3 align='left'> 3. Solve the expression for the POSITIVE case of the absolute value function. </h3>
#
# > ## **Note**
# For step 3 we only look at the **positive** case of the absolute function. This is because, in our formal definition, the only condition where $x$ is defined at 0 is when x is **positive**. This is what is needed to solve for the root.
# How about a less trivial example?
#
# Let's look at the function:
#
# <h3 align='center'>$h(x) = x^2 - 10x + 25$</h3>
#
# If you can, take a moment to try and factor this by hand. You should end up with an expression that looks like $(x-a)^2$, where $a$ is an integer value. To check your answer, click on the button below.
# +
#Construction of button to provide factored polynomial result.
buttonShowAnswer = widgets.Button(description="Show Answer")
def displayAnswer(a):
display(Math('x^2 - 10x + 25 = (x-5)^2'))
buttonShowAnswer.close()
display(buttonShowAnswer)
buttonShowAnswer.on_click(displayAnswer)
# -
# This is in the same form as our last example, since $(x-5)$ is all squared. Therefore, we can follow the exact same steps as before:
#
# 1. $\sqrt{(x-5)^2} = 0$
#
# 2. $\lvert(x-5)\rvert = 0$
#
# 3. Positive case: $x-5 = 0$ $\color{red}\rightarrow$ $x=5$
#
# We've found the root!
#
# It is important to note that functions of the form $f(x) = (ax-b)^2$ only have **1** real root. This root can be found by inspecting the graph or by applying the method we just saw.
#
# So what would all these operations look like graphically? This is a good opportunity to practice making graphs of mathematical functions in Python!
#
# First off, let's look at our original functions $f(x)$ and $h(x)$. Graphically, the $x$ value is in the range of $[-5,10]$, (We are **only** looking at the values of $x$ between $-5$ and $10$, including $-5$ and $10$).
# +
# functions to be plotted
x = np.linspace(-10,10,1000)
fx = x**2.
gx = (x-5)**2.
# plot showing f(x) and g(x)
plt.figure(1,figsize = (10,7))
hold = True
plt.plot(x,fx,'r-',label = r'$f(x) = x^2$')
plt.plot(x,gx,'b-',label = r'$g(x) = (x-5)^2$')
plt.plot([-10,10],[0,0],'k-',alpha = 1,linewidth = 1)
plt.plot([0,0],[-10,10],'k-',alpha = 1,linewidth = 1)
plt.grid(alpha = 0.7)
plt.legend(loc = 'lower right',fontsize = 13)
plt.xticks(np.arange(-5,11))
plt.ylim([-2,5])
plt.xlim([-5,10])
plt.show()
# -
# As we found earlier, $f(x)$ and $h(x)$ are parabolas with roots at $x = 0$ and $x = 5$ respectively.
#
# The next step will be to *take the square root of our functions* and check what this process looks like. This time, you will input the required code for the functions, so that you become familiar with how to do it. To do this, here's a quick overview of how you type mathematical symbols:
#
# - $+$ and $-$ are simply typed as you see here
# - To do **multiplication** ($\times$), you must type in an *asterisk*, ( $^{*}$ )
# - To do **division** ($\div$), you must type in a *forward slash*, ( $/$ )
# - To do an **exponent** ($x^{n}$), you must type in *two asterisks* followed by the *exponent*, ( $x^{**}n$ )
# - To do a **square root** ($\sqrt{x}$), you must type "$sqrt(x)$".
# +
from IPython.display import clear_output
x = np.linspace(-10,10,1000)
#Used to ensure the description text doesn't get cut off.
style = {'description_width': 'initial'}
#Create an input text widget.
question = widgets.Text(
value=None,
description='Type your answer here: ',
disabled=False,
style = style
)
#Create a button titled "Check Answer"
checkButton = widgets.Button(description = "Check Answer")
def checkAnswer(a):
#questionAnswer will be the input given by the user.
questionAnswer = question.value
#answers is a list of possible answers that can be inputted.
answers = ["sqrt((x-5)**2)", "(x-5)", "x-5"]
#Check if the input is in the list of answers. If this is the case:
if questionAnswer in answers:
clear_output()
display(writtenQuestion)
print("You answered: " + questionAnswer)
print("Well done!")
fx = sqrt(x**2)
gx = sqrt((x-5)**2)
# plot the absolute value functions of both f(x) and g(x)
plt.figure(1,figsize = (10,7))
hold = True
plt.plot(x,x**2.,'r-',label = r'$x^2$')
plt.plot(x,fx,'m--',label = r'$\sqrt{x^2}$')
plt.plot(x,(x-5)**2.,'b-',label = r'$(x-5)^2$')
plt.plot(x,gx,'c--',label = r'$\sqrt{(x-5)^2}$')
plt.plot([-10,10],[0,0],'k-',alpha = 1,linewidth = 1)
plt.plot([0,0],[-10,10],'k-',alpha = 1,linewidth = 1)
plt.grid(alpha = 0.7)
plt.legend(loc = 'lower right',fontsize = 13)
plt.xticks(np.arange(-5,11))
plt.ylim([-2,5])
plt.xlim([-5,10])
plt.show()
#Otherwise, if no answer has been given, do nothing.
elif (questionAnswer == ""):
None
#Lastly, if the answer is wrong, give the user a hint.
else:
clear_output()
display(writtenQuestion)
display(question)
display(checkButton)
print('Not quite right. \nRemember, the answer is just "sqrt(h(x))"')
#writtenQuestion is the question the user is asked.
writtenQuestion = Latex("Using the syntax from above, please enter what the \
square root of $h(x) = (x-5)^2$ is, (Remember to include brackets)")
display(writtenQuestion)
display(question)
display(checkButton)
checkButton.on_click(checkAnswer)
# -
# To summarize, taking the square root of functions of the form:
#
# <h3 align='center'>$f(x) = (ax-b)^2$ </h3>
#
# Where $a$ and $b$ are constants, will give the result:
#
# <h3 align='center'>$\sqrt{f(x)} = \lvert ax-b\rvert$ </h3>
#
# We can also generalize the principle root, which will be denoted as $x_0$, of such functions as
#
# <h3 align='center'>$x_0 = \frac{b}{a} \ \ \ \ a \neq 0 $ </h3>
# ## Rational Exponents (n $\geq$ 2)
#
# As mentioned before we defined a radical function to be of the form $f(g(x)) = g(x)^{\frac{1}{n}}$. Now we are going to look at functions where $n\geq2$.
#
# Below is an *interactive slider*. Moving the slider will change the value of $n$ in our rational exponent expression. See what happens when you move this around.
# +
#Construction of interactive plot to visualize x^(1/n)
def slider(n):
x = np.linspace(1e-7,10,500)
if n != 0:
plt.figure(1,figsize = (10,7))
hold = True
plt.plot(x,x**(1./n),'b-',linewidth = 2,label = str((r'$y = x^{1/' + str(n) + '}$')))
plt.xlabel('$x$',fontsize = 14)
plt.ylabel('$y$',fontsize = 14)
plt.plot([-10,10],[0,0],'k-',alpha = 1,linewidth = 1)
plt.plot([0,0],[-10,10],'k-',alpha = 1,linewidth = 1)
plt.grid(alpha = 0.7)
plt.xticks(np.arange(-5,11))
plt.ylim([-2,5])
plt.xlim([-5,10])
plt.legend(loc = 'best', fontsize = 18)
plt.show()
return n
#Store a slider with a range between 2 and 10 in the character j.
j = interactive(slider, n=(2,10,1))
display(j)
# -
# Likewise, we can look at functions where the exponent is negative, such that:
#
# <h3 align='center'>$f(g(x)) = g(x)^{\frac{-1}{n}} = \frac{1}{g(x)^{\frac{1}{n}}}$</h3>
#
# Again, an interactive graph is shown below, to observe how changing the value of n affects the behaviour of the function.
# +
#Construction of interactive plot to visualize x^(-1/n)
def slider(n):
x = np.linspace(1e-7,10,500)
if n != 0:
plt.figure(1,figsize = (10,7))
hold = True
plt.plot(x,x**(1./n),'b-',linewidth = 2,label = str((r'$y = x^{-1/' + str(abs(n)) + '}$')))
plt.xlabel('$x$',fontsize = 14)
plt.ylabel('$y$',fontsize = 14)
plt.plot([-10,10],[0,0],'k-',alpha = 1,linewidth = 1)
plt.plot([0,0],[-10,10],'k-',alpha = 1,linewidth = 1)
plt.grid(alpha = 0.7)
plt.xticks(np.arange(-5,11))
plt.ylim([-2,5])
plt.xlim([-5,10])
plt.legend(loc = 'best', fontsize = 18)
plt.show()
return n
#Store a slider with a range between -2 and -10 in the character j.
j = interactive(slider, n=(-10,-2,1))
display(j)
# -
# We can see that no matter what value appears in the exponent, the function is **always** greater than 0.
#
# This is because a radical function like $x^{\frac{1}{n}}$ is **only** defined for values of $x\geq0$. Functions like $x^{\frac{-1}{n}}$ are also **only** defined for values of $x > 0$. This is true of all radical functions. So:
#
# <h3 align='left'>- An expression under an exponent $(1/n)$ must be a POSITIVE value </h3>
#
# <h3 align='left'>- An expressions under an exponent $(-1/n)$ must be both POSITIVE and NON-ZERO</h3>
#
# Some examples of this are:
#
# a)$\ \sqrt{4-x}$ must be a **POSITIVE** quantity.
#
# b)$\ (2x-10)^{\frac{1}{3}}$ must be a **POSITIVE** quantity.
#
# c)$\ (9-7x)^{\frac{-1}{5}}$ must be a **POSITIVE** and **NON-ZERO** quantity.
#
# So what exactly does this mean? How do we know where these quantities become negative? What might these functions look like? This will all be answered in the next section!
# ## Domain of Existence
#
# It is very important that the value underneath a fractional exponent ($\frac{1}{n}$) is positive. Where the values are positive defines the **domain of existence**. Let's see if we can figure out what the domains of existence are for $a)$, $b)$ and $c)$.
#
# For $a)$, we need to see where $(4-x) \geq 0$. To do this, we can **follow these steps**:
#
#
# <h3 align='left'> 1. Write out the expression, ( $(4 - x) \geq 0$ ) </h3>
#
# <h3 align='left'> 2. Isolate for $x$, ( $-x \geq -4 $ ) </h3>
#
# <h3 align='left'> 3. Ensure that $x$ doesn't have a negative sign in front of it, ( $x \leq 4$ ) </h3>
#
# > ## **Note**
# Remember that *flipping a sign* from negative, ($-$), to positive, ($+$), will *flip the inequality*. This is also true the other way around, (Flipping a sign from positive to negative).
#
# So, we've found that the domain of existence for $\sqrt{4-x}$ is all $x$ values such that $x \leq 4$.
#
# Now, $b)$ and $c)$ will be left for you to solve and input into the boxes provided below. Remember, we want to find where the expression under the exponent is **positive**. If you get stuck, look back to how the domain of existence was found for function $a)$.
#
# ### How to Input Domain of Existence
# Let's say we wanted to input our answer for the domain of existence of $\sqrt{4-x}$, (Which was $x \leq 4$). The input formatting for the *"less than or equal to"* symbol is written exactly how it is said. So, if you are given an input box to enter your answer, you would type in:
#
# <h3 align='center'> $x <= 4$ </h3>
#
# > ## **Note**
# *Spaces are important.* You should also write fractions like $\frac{5}{3}$ as "$5/3$"
# +
from IPython.display import clear_output
#Used to ensure the description text doesn't get cut off.
style = {'description_width': 'initial'}
#Create an input text widget.
question = widgets.Text(
value=None,
description='Input: ',
disabled=False,
style = style
)
checkButton = widgets.Button(description = "Check Answer")
def checkAnswer(a):
#questionAnswer will be the input given by the user.
questionAnswer = question.value
#answers is a list of possible answers that can be inputted.
answers = ["x <= 4"]
#Check if the input is in the list of answers. If this is the case:
if questionAnswer in answers:
clear_output()
display(writtenQuestion)
print("You answered: " + questionAnswer)
print("Well done!")
#Otherwise, if no input has been given, do nothing:
elif (questionAnswer == ""):
None
#Lastly, if the answer is wrong, give the user a hint:
else:
clear_output()
display(writtenQuestion)
display(question)
display(checkButton)
print('Not quite right. \nRemember, you only have to type in "x <= 4".')
#writtenQuestion is the question the user is asked.
writtenQuestion = Latex('Type "$x <= 4$" into the input box below to ensure you are \
using the appropriate syntax.')
display(writtenQuestion)
display(question)
display(checkButton)
checkButton.on_click(checkAnswer)
# +
from IPython.display import clear_output
#Used to ensure the description text doesn't get cut off.
style = {'description_width': 'initial'}
question = widgets.Text(
value=None,
description='Input: ',
disabled=False,
style = style
)
checkButton = widgets.Button(description = "Check Answer")
def checkAnswer(a):
#questionAnswer will be the input given by the user.
questionAnswer = question.value
#answers is a list of possible answers that can be inputted.
answers = ["x >= 5"]
#Check if the input is in the list of answers. If this is the case:
if questionAnswer in answers:
clear_output()
display(writtenQuestion)
print("You answered: " + questionAnswer)
print(correct)
#Otherwise, if no input has been given, do nothing:
elif (questionAnswer == ""):
None
#Lastly, if the answer is wrong, give the user a hint:
else:
clear_output()
display(writtenQuestion)
display(question)
display(checkButton)
print(hint)
#writtenQuestion is the question the user is asked.
writtenQuestion = Latex("Find the domain of existence for: $(2x - 10)^{1/3}$")
#hint is the hint the user is given when the answer is incorrect.
hint = 'Not quite right. \nRemember, the process to find the domain of existence is:\n\
1. Write out the expression,\n\
2. Isolate for x, \n\
3. Ensure that x is not negative.'
#correct is the message outputed for when the user gets the question correct.
correct = "Well done!\n\
1. Write out the expression, ((2x - 10) >= 0),\n\
2. Isolate for x, (x >= 5),\n\
3. Ensure that x doesn't have a negative sign, (x >= 5)"
display(writtenQuestion)
display(question)
display(checkButton)
checkButton.on_click(checkAnswer)
# +
from IPython.display import clear_output
#Used to ensure the description text doesn't get cut off.
style = {'description_width': 'initial'}
question = widgets.Text(
value=None,
description='Input: ',
disabled=False,
style = style
)
checkButton = widgets.Button(description = "Check Answer")
def checkAnswer(a):
#questionAnswer will be the input given by the user.
questionAnswer = question.value
#answers is a list of possible answers that can be inputted.
answers = ["x < 9/7", "x < (9/7)"]
#Check if the input is in the list of answers. If this is the case:
if questionAnswer in answers:
clear_output()
display(writtenQuestion)
print("You answered: " + questionAnswer)
print(correct)
#Otherwise, if no input has been given, do nothing:
elif (questionAnswer == ""):
None
#Lastly, if the answer is wrong, give the user a hint:
else:
clear_output()
display(writtenQuestion)
display(question)
display(checkButton)
print(hint)
#writtenQuestion is the question the user is asked.
writtenQuestion = Latex("Find the domain of existence for: $(9 - 7x)^{-1/5}$")
#hint is the hint the user is given when the answer is incorrect.
hint = 'Not quite right. \nRemember, the process to find the domain of existence is:\n\
1. Write out the expression,\n\
2. Isolate for x, \n\
3. Ensure that x is not negative.'
#correct is the message outputed for when the user gets the question correct.
correct = "Well done!\n\
1. Write out the expression, ((9 - 7x) > 0),\n\
2. Isolate for x, (-x > -9/7),\n\
3. Ensure that x doesn't have a negative sign, (x < 9/7)"
display(writtenQuestion)
display(question)
display(checkButton)
checkButton.on_click(checkAnswer)
# -
# The following graph shows us what these functions we worked with look like. It should be clear that each function exists exactly where we defined them in the previous exercise.
# +
""""
plot of all the functions a, b and c that students will have worked with
shows the domains of existence that were found in the exercises
"""
x1 = np.linspace(-10,3,500)
x2 = np.linspace(5,10,500)
x3 = np.linspace(-10.,8.9999/7.,500)
plt.figure(1,figsize = (10,7))
hold = True
plt.plot(9./7. * np.ones(100),np.linspace(0,12,100),'k--',alpha = 0.5)
plt.plot(x1,sqrt(3-x1),'b-',label = r'$y = \sqrt{3-x}$')
plt.plot(x2,(2*x2-10)**(1./3.),'r-', label = r'$y = (2x-10)^{\frac{1}{3}}$')
plt.plot(x3,(9-7*x3)**(-1./5.),'g-', label = r'$y = (9-7x)^{\frac{-1}{5}}$')
plt.text(9./7.-0.4,-0.25,r'$9/7$',fontsize = 13)
plt.xlabel('$x$',fontsize = 14)
plt.ylabel('$y$',fontsize = 14)
plt.plot([-10,10],[0,0],'k-',alpha = 1,linewidth = 1)
plt.plot([0,0],[-10,10],'k-',alpha = 1,linewidth = 1)
plt.grid(alpha = 0.7)
plt.xticks(np.arange(-10,11))
plt.ylim([-2,5])
plt.xlim([-10,10])
plt.legend(loc = 'best', fontsize = 18)
plt.show()
# -
# # Questions
#
# ## Problem Set 1
# Here, you will be tested on the topics we covered in this notebook. You will be asked to find the roots for functions that must be factored first. You will also have to find the domain of existence of certain functions using the method we just applied in the section above. Remember to be careful of negative exponents!
# +
from IPython.display import clear_output
#Used to ensure the description text doesn't get cut off.
style = {'description_width': 'initial'}
question = widgets.Text(
value=None,
description='Input: ',
disabled=False,
style = style
)
checkButton = widgets.Button(description = "Check Answer")
def checkAnswer(a):
#questionAnswer will be the input given by the user.
questionAnswer = question.value
#answers is a list of possible answers that can be inputted.
answers = ["x <= 7"]
#Check if the input is in the list of answers. If this is the case:
if questionAnswer in answers:
clear_output()
display(writtenQuestion)
print("You answered: " + questionAnswer)
print(correct)
#Otherwise, if no input has been given, do nothing:
elif (questionAnswer == ""):
None
#Lastly, if the answer is wrong, give the user a hint:
else:
clear_output()
display(writtenQuestion)
display(question)
display(checkButton)
print(hint)
#writtenQuestion is the question the user is asked.
writtenQuestion = Latex("Find the domain of existence for: $(-2x + 14)^{1/2}$")
#hint is the hint the user is given when the answer is incorrect.
hint = 'Not quite right. \nRemember, the process to find the domain of existence is:\n\
1. Write out the expression,\n\
2. Isolate for x, \n\
3. Ensure that x is not negative.'
#correct is the message outputed for when the user gets the question correct.
correct = "Well done!\n\
1. Write out the expression, ((-2x + 14) >= 0),\n\
2. Isolate for x, (-x >= -7),\n\
3. Ensure that x doesn't have a negative sign, (x <= 7)"
display(writtenQuestion)
display(question)
display(checkButton)
checkButton.on_click(checkAnswer)
# +
from IPython.display import clear_output
#Used to ensure the description text doesn't get cut off.
style = {'description_width': 'initial'}
question = widgets.Text(
value=None,
description='Input: ',
disabled=False,
style = style
)
checkButton = widgets.Button(description = "Check Answer")
def checkAnswer(a):
#questionAnswer will be the input given by the user.
questionAnswer = question.value
#answers is a list of possible answers that can be inputted.
answers = ["x > 5/23", "x > (5/23)"]
#Check if the input is in the list of answers. If this is the case:
if questionAnswer in answers:
clear_output()
display(writtenQuestion)
print("You answered: " + questionAnswer)
print(correct)
#Otherwise, if no input has been given, do nothing:
elif (questionAnswer == ""):
None
#Lastly, if the answer is wrong, give the user a hint:
else:
clear_output()
display(writtenQuestion)
display(question)
display(checkButton)
print(hint)
#writtenQuestion is the question the user is asked.
writtenQuestion = Latex("Find the domain of existence for: $(15x-5+8x)^{-1/5}$")
#hint is the hint the user is given when the answer is incorrect.
hint = 'Not quite right. \nRemember, the process to find the domain of existence is:\n\
1. Write out the expression,\n\
2. Isolate for x, \n\
3. Ensure that x is not negative.'
#correct is the message outputed for when the user gets the question correct.
correct = "Well done!\n\
1. Write out the expression, ((23x - 5) > 0),\n\
2. Isolate for x, (x > 5/23),\n\
3. Ensure that x doesn't have a negative sign, (x > 5/23)"
display(writtenQuestion)
display(question)
display(checkButton)
checkButton.on_click(checkAnswer)
# +
from IPython.display import clear_output
#Used to ensure the description text doesn't get cut off.
style = {'description_width': 'initial'}
question = widgets.IntText(
value=None,
description='Input: ',
disabled=False,
style = style
)
checkButton = widgets.Button(description = "Check Answer")
def checkAnswer(a):
#questionAnswer will be the input given by the user.
questionAnswer = question.value
#answers is a list of possible answers that can be inputted.
answers = [3]
#Check if the input is in the list of answers. If this is the case:
if questionAnswer in answers:
clear_output()
display(writtenQuestion)
print("You answered: " + str(questionAnswer) )
print(correct)
#Otherwise, if no input has been given, do nothing:
elif (questionAnswer == 0):
None
#Lastly, if the answer is wrong, give the user a hint:
else:
clear_output()
display(writtenQuestion)
display(question)
display(checkButton)
print(hint)
#writtenQuestion is the question the user is asked.
writtenQuestion = Latex("Find the principle root for: $(x^2-6x+9)^{1/3}$")
#hint is the hint the user is given when the answer is incorrect.
hint = 'Not quite right. \nRemember, the process to find the principle root is:\n\
1. Set f(x) equal to 0 so that f(x) = 0,\n\
2. Cube BOTH sides so that |x| = 0,\n\
3. Solve the expression for the POSITIVE case of the absolute value function.'
#correct is the message outputed for when the user gets the question correct.
correct = "Well done!\n\
1. Set f(x) equal to 0, ((x**2 - 6x + 9)**(1/3) = 0),\n\
2. Cube BOTH sides so that |x**2 - 6x + 9| = 0,\n\
3. Solve the expression for the POSITIVE case, (x = 3)"
display(writtenQuestion)
display(question)
display(checkButton)
checkButton.on_click(checkAnswer)
# -
# ## Problem Set 2
#
# The next problems involve interacting with plots which already have functions graphed on them. In each function of the form:
#
# <h3 align='center'>$y = (a-bx)^{\frac{1}{n}}$</h3>
#
# It is your job to find the right unknown parameters, **(a, b, n)**. This can be done by adjusting the sliders provided and matching the function graphically. This exercise will provide intuition on how adjusting the values in a function manipulates a graph.
# +
"""
interactive plot where students have to match parameters a, b and n in the function f(x) = (a-bx)^(1/n)
if & else statements are required to adjust the x-linspace such that python does not constantly return errors when
an invalid (complex) value is encountered under the exponent
"""
def slider(a,b,n):
if b != 0:
i = float(a)/float(b)
else:
i = 0
a = 0 # when b = 0, set a = 0 to avoid all a < 0. Note at the bottom of each figure
if b > 0:
x = np.linspace(-10,i,500)
else:
x = np.linspace(i,10,500)
f = (a-b*x)**(1./n)
xx = np.linspace(-10,2,200)
plt.figure(1,figsize = (10,7))
hold = True
plt.plot(x,f,'m-',linewidth = 3)
plt.plot(xx,(6-3*xx)**(1./3.),'b--', linewidth = 2, label = r'$y = (a-bx)^{\frac{1}{n}}$')
#If the answer is correct,
if a == 6 and b == 3 and n == 3:
plt.text(1,2,'VERY GOOD!', fontsize = 25, fontweight = 'bold',color = 'r')
plt.plot(x,f,'m-',linewidth = 3, label = r'$y = (6-3x)^{\frac{1}{3}}$')
plt.xlabel('$x$',fontsize = 14)
plt.ylabel('$y$',fontsize = 14)
plt.plot([-10,10],[0,0],'k-',alpha = 1,linewidth = 1)
plt.plot([0,0],[-10,10],'k-',alpha = 1,linewidth = 1)
plt.grid(alpha = 0.7)
plt.xticks(np.arange(-10,11))
plt.ylim([-2,5])
plt.xlim([-10,10])
plt.legend(loc = 'best', fontsize = 18)
plt.show()
sl = interactive(slider, n=widgets.IntSlider(value = 2, min = 2,max = 5,step = 1, continuous_update = False),
a=widgets.IntSlider(value = 0, min = -10,max = 10,step = 1, continuous_update = False),
b=widgets.IntSlider(value = 0, min = -10,max = 10,step = 1, continuous_update = False))
display(sl)
display(Latex("*If you're seeing no change, make sure b is NOT set to 0.*"))
# +
def slider(a,b,n):
if b != 0:
i = float(a)/float(b)
else:
i = 0
a = 0
if b > 0:
x = np.linspace(-10,i,500)
else:
x = np.linspace(i,10,500)
f = (a-b*x)**(1./n)
xx = np.linspace(9./4,10,200)
plt.figure(1,figsize = (10,7))
hold = True
plt.plot(x,f,'m-',linewidth = 3)
plt.plot(xx,(-9+4*xx)**(1./2.),'b--', linewidth = 2, label = r'$y = (a-bx)^{\frac{1}{n}}$')
#If the answer is correct,
if a == -9 and b == -4 and n == 2:
plt.text(-7,2,'VERY GOOD!', fontsize = 25, fontweight = 'bold',color = 'r')
plt.plot(x,f,'m-',linewidth = 3, label = r'$y = (-9+4x)^{\frac{1}{2}}$')
plt.xlabel('$x$',fontsize = 14)
plt.ylabel('$y$',fontsize = 14)
plt.plot([-10,10],[0,0],'k-',alpha = 1,linewidth = 1)
plt.plot([0,0],[-10,10],'k-',alpha = 1,linewidth = 1)
plt.grid(alpha = 0.7)
plt.xticks(np.arange(-10,11))
plt.ylim([-2,5])
plt.xlim([-10,10])
plt.legend(loc = 'best', fontsize = 18)
plt.show()
sl = interactive(slider, n=widgets.IntSlider(value = 2, min = 2,max = 5,step = 1, continuous_update = False),
a=widgets.IntSlider(value = 0, min = -10,max = 10,step = 1, continuous_update = False),
b=widgets.IntSlider(value = 0, min = -10,max = 10,step = 1, continuous_update = False))
display(sl)
display(Latex("*If you're seeing no change, make sure b is NOT set to 0.*"))
# +
def slider(a,b,n):
if b != 0:
i = float(a)/float(b)
else:
i = 0
a = 0
if b > 0:
x = np.linspace(-10,i,500)
else:
x = np.linspace(i,10,500)
f = (a-b*x)**(1./n)
xx = np.linspace(-4,10,200)
plt.figure(1,figsize = (10,7))
hold = True
plt.plot(x,f,'m-',linewidth = 3)
plt.plot(xx,(8+2*xx)**(1./5.),'b--', linewidth = 2, label = r'$y = (a-bx)^{\frac{1}{n}}$')
#If the answer is correct,
if a == 8 and b == -2 and n == 5:
plt.text(-8,3,'VERY GOOD!', fontsize = 25, fontweight = 'bold',color = 'r')
plt.plot(x,f,'m-',linewidth = 3, label = r'$y = (8+2x)^{\frac{1}{5}}$')
plt.xlabel('$x$',fontsize = 14)
plt.ylabel('$y$',fontsize = 14)
plt.plot([-10,10],[0,0],'k-',alpha = 1,linewidth = 1)
plt.plot([0,0],[-10,10],'k-',alpha = 1,linewidth = 1)
plt.grid(alpha = 0.7)
plt.xticks(np.arange(-10,11))
plt.ylim([-2,5])
plt.xlim([-10,10])
plt.legend(loc = 'best', fontsize = 18)
plt.show()
sl = interactive(slider, n=widgets.IntSlider(value = 2, min = 2,max = 5,step = 1, continuous_update = False),
a=widgets.IntSlider(value = 0, min = -10,max = 10,step = 1, continuous_update = False),
b=widgets.IntSlider(value = 0, min = -10,max = 10,step = 1, continuous_update = False))
display(sl)
display(Latex("*If you're seeing no change, make sure b is NOT set to 0.*"))
# +
def slider(a,b,n):
if b != 0 and a != 0:
i = float(a)/float(b)
elif b == 0:
a = 0
x = 1
else:
i = 0
if b > 0:
x = np.linspace(-11,i-1e-5,500)
elif b < 0:
x = np.linspace(i+1e-5,11,500)
elif b == 0:
b = -1
f = (a-b*x)**(-1./n)
xx = np.linspace(-10,5-1e-5,300)
plt.figure(1,figsize = (10,7))
hold = True
plt.plot(x,f,'m-',linewidth = 3)
plt.plot(xx,(5-xx)**(-1./4.),'b--', linewidth = 2, label = r'$y = (a-bx)^{\frac{1}{n}}$')
#If the answer is correct,
if a == 5 and b == 1 and n == 4:
plt.text(-7,2,'VERY GOOD!', fontsize = 25, fontweight = 'bold',color = 'r')
plt.plot(x,f,'m-',linewidth = 3, label = r'$y = (5-x)^{\frac{-1}{4}}$')
plt.xlabel('$x$',fontsize = 14)
plt.ylabel('$y$',fontsize = 14)
plt.plot([-10,10],[0,0],'k-',alpha = 1,linewidth = 1)
plt.plot([0,0],[-10,10],'k-',alpha = 1,linewidth = 1)
plt.grid(alpha = 0.7)
plt.xticks(np.arange(-10,11))
plt.ylim([-2,5])
plt.xlim([-10,10])
plt.legend(loc = 'best', fontsize = 18)
plt.show()
sl = interactive(slider, n=widgets.IntSlider(value = 2, min = 2,max = 5,step = 1, continuous_update = False),
a=widgets.IntSlider(value = 0, min = -10,max = 10,step = 1, continuous_update = False),
b=widgets.IntSlider(value = 0, min = -10,max = 10,step = 1, continuous_update = False))
display(sl)
display(Latex("*If you're seeing no change, make sure b is NOT set to 0.*"))
# -
# # The Spiral of Theordorus
# https://en.wikipedia.org/wiki/Spiral_of_Theodorus
#
# The *Spiral of Theodorus or Pythagorean Spiral* is a geometrical construction of right angle triangles. It's a very interesting and fun application of square roots! So, let's look at an image of the spiral first and then we can look at the mathematics of how the square root gets involved.
#
# <center><img src="./images/Spiral_image.png"></center>
# So what exactly is happening here? Well, as you may have noticed all the triangles that form this design are *right angle triangles*.
#
# Starting from the innermost triangle, you can see that it has two side lengths equal to 1 with a hypotenuse equal to $\sqrt{2}$.
#
# Moving to the next triangle, we see it is again another right angle triangle, rotated now by some angle $\phi$. It now has a base length of $\sqrt{2}$, a hypotenuse of $\sqrt{3}$, and the remaining side is still 1. Can you see the pattern here? Each of the $n^{\rm th}$ triangle has its outermost side length fixed at 1. The base and the hypotenuse of these triangles are $\sqrt{n}$ and $\sqrt{n+1}$ respectively, for $n > 0$.
#
# We can also calculate what the angle $\phi$ is for the $n^{\rm th}$ triangle:
#
# <h3 align='center'>$\phi = \arctan\Big(\frac{1}{\sqrt{n}}\Big)$</h3>
#
# This is true because $\tan\phi = \frac{\rm opposite}{\rm adjacent}$, and our side opposite $\phi$ is fixed at length 1, while the adjacent side changes to $\sqrt{n}$ for the $n^{\rm th}$ triangle.
#
# From these simple facts, one can construct a code that creates this fun visual application of square roots! Feel free to look at ``Spiral_Of_Theodorus.py`` to see how the spiral was created. Below is an interactive widget where you can change how many triangles are formed in the spiral.
# +
# #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri May 11 11:03:28 2018
@author: jonathan
"""
import numpy as np
import matplotlib.pyplot as plt
from math import atan, sqrt, cos, sin
col = ['#e02727','#e06e27','#e0ca27','#a9e027','#57e027','#27e07e','#27e0bc','#27cde0','#2789e0','#2750e0','#2c27e0','#6827e0','#8427e0','#b527e0','#e027de','#e0279f','#e02766']
c = len(col)
def Spiral(n):
col_counter = 0
N = np.arange(1,n)
phi = 0
plt.figure(1,figsize = (12,9))
hold = True
for n in N:
phi_n = atan(1./sqrt(n))
phi += phi_n
if n == 1:
plt.plot([0,1],[0,0],linewidth = 1.5,color = col[col_counter])
plt.plot([1,1],[0,1],linewidth = 1.5,color = col[col_counter])
r = sqrt(n + 1)
x = r*cos(phi)
y = r*sin(phi)
plt.plot([0,x],[0,y],linewidth = 1.5,color = col[col_counter])
string = str((r'$\sqrt{' + str(n + 1) + '}$'))
plt.text(x/1.4,y/1.4,string)
X = [0,1,x]
Y = [0,0,y]
plt.fill(X,Y,color = col[col_counter],alpha = 0.45)
last_x = 1
last_y = 1
else:
r = sqrt(n + 1)
x = r*cos(phi)
y = r*sin(phi)
plt.plot([0,x], [0,y], linewidth = 1.5,color = col[col_counter])
plt.plot([last_x,x], [last_y,y],color = col[col_counter])
string = str((r'$\sqrt{' + str(n + 1) + '}$'))
plt.text(x/1.4,y/1.4,string,fontsize = 13)
X = [0,last_x,x]
Y = [0,last_y,y]
plt.fill(X,Y,color = col[col_counter],alpha = 0.45)
last_x = x
last_y = y
col_counter += 1
if col_counter > c-1:
col_counter = 0
sl = interactive(Spiral, n=widgets.IntSlider(value = 17, min = 2,max = 100,step = 1, continuous_update = False))
display(sl)
# -
# # Conclusion
#
# In this notebook, we covered the *fundamentals of radical functions* and their *domains of existence*. You should be able to recognize that any function underneath a radical requires its values to be *positive* and that we find this by solving for the root of the function inside the radical. It is important to differentiate that functions of the form:
#
# <h3 align='left'> a) $\ f(g(x)) = g(x)^{\frac{1}{n}}$ </h3>
#
# <h3 align='left'> b) $\ f(g(x)) = g(x)^{\frac{-1}{n}}$ </h3>
#
# In $a)$, the domain of existence is values of $g(x) \geq 0$, while in $b)$, the domain of existence is values of $g(x) > 0$. This is because *with a negative exponent, $g(x) = 0$ does not exist*.
#
# We also covered the most common radical function, *the square root*, and showed how this connects to the aboslute value function. It can also be utilized to find the roots of certain quadratic functions of the form :
#
# <h3 align='center'>$f(x) = (ax-b)^2$</h3>
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| _sources/curriculum-notebooks/Mathematics/AnalyzingRadicalFunctions/analyzing-radical-functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
import subprocess
import time
from os import path
import camhd_motion_analysis as ma
# +
metadata_repo = "/home/aaron/workspace/camhd_analysis/CamHD_motion_metadata"
movie_repo = "/home/aaron/canine/workspace/camhd_analysis/test_data"
movie = "/RS03ASHS/PN03B/06-CAMHDA301/2016/03/01/CAMHDA301-20160301T000000Z.mov"
regions_filename = metadata_repo + (path.splitext(movie)[0]) + "_optical_flow_regions.json"
movie_filename = movie_repo + movie
out_dir = "CAMHDA301-20160301T000000Z"
# +
with open(regions_filename,'r') as infile:
j = json.load( infile )
regions = j['regions']
# -
for region in regions:
bounds = region['bounds']
#preseek_time = time.strftime("%H:%M:%S", time.gmtime( r[0]/29.97-2 ) )
start_time = time.strftime("%H:%M:%S", time.gmtime( bounds[0]/29.97 ) )
duration = time.strftime("%H:%M:%S", time.gmtime( (bounds[1] - bounds[0])/29.97 ) )
print(bounds, start_time, duration)
out_file = "%s/region_%08d_%08d_%s.mkv" % (out_dir, bounds[0], bounds[1], region['type'])
subprocess.run( ['ffmpeg', '-y', '-ss', start_time, '-i', movie_filename, '-t', duration, out_file])
| cpp/analysis/Static Section Extractor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/semishen/ML100Days/blob/master/Day_041_HW.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="jpTcZrqUyrmi" colab_type="text"
# ## 作業
#
#
#
# 閱讀以下兩篇文獻,了解決策樹原理,並試著回答後續的問題
# - [決策樹 (Decision Tree) - 中文](https://medium.com/@yehjames/%E8%B3%87%E6%96%99%E5%88%86%E6%9E%90-%E6%A9%9F%E5%99%A8%E5%AD%B8%E7%BF%92-%E7%AC%AC3-5%E8%AC%9B-%E6%B1%BA%E7%AD%96%E6%A8%B9-decision-tree-%E4%BB%A5%E5%8F%8A%E9%9A%A8%E6%A9%9F%E6%A3%AE%E6%9E%97-random-forest-%E4%BB%8B%E7%B4%B9-7079b0ddfbda)
# - [how decision tree works - 英文](http://dataaspirant.com/2017/01/30/how-decision-tree-algorithm-works/)
#
# <br/>
#
# ### Q1: 在分類問題中,若沒有任何限制,決策樹有辦法在訓練時將 training loss 完全降成 0 嗎?
# ### A1: 理論上是可以的,不過會讓模型的複雜度大幅提升,導致過擬合與計算量爆炸。
#
# <br/>
#
# ### Q2: 決策樹只能用在分類問題嗎?還是可以用來解決回歸問題?
# ### A2: 決策樹中可以使用回歸樹或提升樹解決回歸問題。
# + id="CKxE1Uqfyrmj" colab_type="code" colab={}
| Day_041_HW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Set Up
# The first 5 lines are importing libraries that will be needed later in the notebook. The next lines are setting up the connection to the google service account.
#
# # Getting a Google Service Account
# Here is another great tutorial on using Google Sheets and in the begining it shows the steps to create a google service account to use: https://www.twilio.com/blog/2017/02/an-easy-way-to-read-and-write-to-a-google-spreadsheet-in-python.html.
#
# After setting up the service account you have to share the google sheet with the service account so that it has permission to access it. Then all you have to do is add you client_secret.json file so that the service account can be authorized.
#
# # Drive Folder
# The drive folder were the sheets discussed here can be found at: https://drive.google.com/drive/folders/1FoTM8DRPcfbevmKnmUQN1-LPvE4oE9hJ?usp=sharing.
#
# The Google Sheets that end with 'Orig' is how the Google sheet looked before I ran this notebook and the Google Sheets that end with 'Calculations' is what it looks like after I have ran this notebook.
# +
import pandas as pd
import numpy as np
import csv
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
# -
# # Create Pandas Dataframes
# In the next cell I will create two pandas dataframes each containing one of the two google sheets that I will connect to.
#
# The first thing to do is to open the Google Sheets so that they can be manipulated. After the sheets are I opened I used the 'get_all_values()' function to get all of the data from that Google sheet. Now the 'get_all_values()' function returns a list of lists which is not my prefered data structure for doing math operations on.
#
# I decided to create a dataframe out of each of those list of lists. I set the columns of the dataframe to the first list in the list, and then all the other lists were set as the data.
#
# The last thing I do in this cell is print out one of the finished dataframes.
# +
# open the google sheets
pendulum_1 = client.open('pendulum1GoodMeasurementsCalculations').sheet1
pendulum_2 = client.open('pendulum2GoodMeasurementsCalculations').sheet1
#read in the data from the spreadsheet
pendulum_1_data = pendulum_1.get_all_values()
pendulum_2_data = pendulum_2.get_all_values()
# make a pandas dataframe out of the data
pendulum_1_df = pd.DataFrame(pendulum_1_data[1:], columns = pendulum_1_data[0])
pendulum_2_df = pd.DataFrame(pendulum_2_data[1:], columns = pendulum_2_data[0])
# print out the data from one of the sheets as an example
pendulum_2_df
# -
# # Convert Strings to Numeric Values
# For some reason the default data type of values read in from Google Sheets are strings. I can not do math operations on strings so the next cell converts the columns that I need to work with to numeric values.
# +
# Convert the Time and Counts columns to numeric values
pendulum_2_df['Time'] = pd.to_numeric(pendulum_2_df['Time'])
pendulum_2_df['Counts'] = pd.to_numeric(pendulum_2_df['Counts'])
pendulum_1_df['Time'] = pd.to_numeric(pendulum_1_df['Time'])
pendulum_1_df['Counts'] = pd.to_numeric(pendulum_1_df['Counts'])
# -
# # Do My Calculations
# This data was originally for a lab I did in my last year of university, and the following cell is just copied from the notebook I used for it.
#
# The lab was Kater's Pendulum and for that lab my lab partners and I had to count the number of times a pendulum passed in front of a sensor while timing how long that took. The first calculation is the period of each of the trials that were done.
#
# After getting the period for each trial I calculated the standard deviation and the mean of the those values.
#
# Finally I printed out those values.
# +
# Calculate the period of each trial for each pendulum
pendulum_1_df['Period'] = pendulum_1_df['Time'] / (pendulum_1_df['Counts'] / 2)
pendulum_2_df['Period'] = pendulum_2_df['Time'] / (pendulum_2_df['Counts'] / 2)
# calculate the standard deviation of each pendulum
std_period1 = pendulum_1_df.loc[:,"Period"].std()
std_period2 = pendulum_2_df.loc[:,"Period"].std()
# Calculate the mean of each pendulum
mean_period1 = pendulum_1_df.loc[:,"Period"].mean()
mean_period2 = pendulum_2_df.loc[:,"Period"].mean()
# print out the mean and error of each period
print("Period1: " + str(mean_period1))
print("Period2: " + str(mean_period2))
print("Period1 error: " + str(std_period1/np.sqrt(50)))
print("Period2 error: " + str(std_period2/np.sqrt(50)))
# -
# # Get a List of New Values
# In the following cell I simply took the column that I want to add to Google sheets and made it into a list.
# +
# convert the Period columns to a list
period_1 = pendulum_1_df['Period'].tolist()
period_2 = pendulum_2_df['Period'].tolist()
print(period_1)
# -
# # Updating Google Sheets
# In the next two cells I update the google sheets with the new 'Period' column. I used the 'update_cell()' function to accomplish this.
# add the period column to the pendulum 1 Google Sheet
pendulum_1.update_cell(1, 7, 'Period')
for row_index, curr_period in enumerate(period_1):
pendulum_1.update_cell(row_index + 2, 7, curr_period)
# add the period column to the pendulum 2 Google Sheet
pendulum_2.update_cell(1, 7, 'Period')
for row_index, curr_period in enumerate(period_2):
pendulum_2.update_cell(row_index + 2, 7, curr_period)
# # Adding Mean and Error
# To finish off I added the mean and the error of the period distributions to the end of their respective google sheets.
# +
# Add the mean and error in mean calculations to the google sheets.
pendulum_1.update_cell(52, 1, 'Period Mean')
pendulum_1.update_cell(52, 7, mean_period1)
pendulum_1.update_cell(53, 1, 'Error in Mean')
pendulum_1.update_cell(53, 7, std_period1/np.sqrt(50))
pendulum_2.update_cell(52, 1, 'Period Mean')
pendulum_2.update_cell(52, 7, mean_period2)
pendulum_2.update_cell(53, 1, 'Error in Mean')
pendulum_2.update_cell(53, 7, std_period2/np.sqrt(50))
# -
| GoogleSheetsExample/goodPendulumMeausurementAnlysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # FTE/BTE Experiment for Aircraft & Birdsnap
#
# ---
#
# This experiment investigates the ability of progressive learning on tranferring knowledge across different datasets. Two datasets, the [FGVC-Aircraft-2013b](https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/), an image dataset of aircrafts, and [Birdsnap](http://thomasberg.org/), an image dataset of birds, have been used. The information of the datasets has been listed below.
#
# | Dataset |# images | # labels | # images per label (minimum) |
# | --------------- |:-----------------:| :-------:|---------------------------:|
# | fgvc-aircraft-2013b | 10000 | 100 | 100 |
# | birdSnap | 49829 | 500 | 69 (100 for most species) |
#
# Before the experiment, we uniform the images by padding them into squares and then by resizing them to $32\times32$ (pixels). The code for dataset preprocessing and the resized images can be found [here](https://github.com/chenyugoal/Dataset_Preprocessing).
#
# Considering the difference between the two datasets in the number of images, we choose a proportion of each dataset to use so that the number of classes and the number of images per class are equal.
import numpy as np
from joblib import Parallel, delayed
# ### Load tasks
#
# The processed dataset can be downloaded [here](https://github.com/chenyugoal/Dataset_Preprocessing).
#
# After running the blocks of code below, we will get the splitted tasks. We have 10 tasks in total, with first 5 set up from Aircraft and the last 5 set up from Birdsnap. Each task has 20 labels to classify. For each label, there are 90 samples.
#
# Therefore, the total number of samples is:
#
# $10\times20\times90=18000$
# +
path_aircraft_x_all = (
"F:/Programming/Python/NDD/Dataset_Preprocessing/data/aircraft_x_all.npy"
)
path_aircraft_y_all = (
"F:/Programming/Python/NDD/Dataset_Preprocessing/data/aircraft_y_all.npy"
)
path_birdsnap_x_all = (
"F:/Programming/Python/NDD/Dataset_Preprocessing/data/birdsnap_x_all.npy"
)
path_birdsnap_y_all = (
"F:/Programming/Python/NDD/Dataset_Preprocessing/data/birdsnap_y_all.npy"
)
# +
from functions.fte_bte_aircraft_bird_functions import load_tasks
train_x_task, test_x_task, train_y_task, test_y_task = load_tasks(
path_aircraft_x_all, path_aircraft_y_all, path_birdsnap_x_all, path_birdsnap_y_all
)
# -
# ### Sample images
#
# Let's take a look at images from Aircraft and Birdsnap by running the block below.
# +
from functions.fte_bte_aircraft_bird_functions import show_image
show_image(train_x_task)
# -
# ### Run progressive learning
#
# Here we provide two options of implementations of progressive learning:
#
# - omnidirectional forest (Odif), which uses uncertainty forests as the base representer
# - omnidirectional networks (Odin), which uses a deep network as the base representer.
#
# Use `odif` for omnidirectional forest and `odin` for omnidirectional networks.
# +
from functions.fte_bte_aircraft_bird_functions import single_experiment
model = "odif" # Choose 'odif' or 'odin'
ntrees = 10 # Number of trees
num_repetition = 30
accuracy_list = Parallel(n_jobs=6)(
delayed(single_experiment)(
train_x_task, test_x_task, train_y_task, test_y_task, ntrees, model
)
for _ in range(num_repetition)
)
accuracy_all_avg = np.average(accuracy_list, axis=0)
# -
# ### Calculate and plot transfer efficiency
# +
from functions.fte_bte_aircraft_bird_functions import calculate_results
err, bte, fte, te = calculate_results(accuracy_all_avg)
# +
from functions.fte_bte_aircraft_bird_functions import plot_all
plot_all(err, bte, fte, te)
| docs/experiments/fte_bte_aircraft_bird.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### 1. If you have a classification problem with multiple labels, how does the neural network architecture change?
# ##### Ans: Have a logistic layer for each label, and send the outputs of the logistic layer to a softmax layer
# #### 2. If you have thousands of classes, computing the cross-entropy loss can be very slow. Which of these is a way to help address that problem?
# ##### Ans: Use a noise-contrastive loss function
| Coursera/Art and Science of Machine Learning/Week-2/Quiz/Multi-class-Neural-Networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Research of passl-by-value and passl-by-reference topic
# > Test for all Python simple variable and complicated
#
# +
'''
Test of variable
'''
# int
print('################# Test for int #################')
a = 4
print('a=',a)
b = a
print('a=', a, 'b=', b)
a = 5
print('a=', a, 'b=', b)
print('################# end of test #################\n')
# float/double
print('################# Test for float/double #################')
a = 4.1
print('a=',a)
b = a
print('a=', a, 'b=', b)
a = 5.2
print('a=', a, 'b=', b)
print('################# end of test #################\n')
# string
print('################# Test for string #################')
a = 'this is a test string'
print('a=',a)
b = a
print('a=', a, 'b=', b)
a = 'update!' + a
print('a=', a, 'b=', b)
print('################# end of test #################\n')
# list
print('################# Test for list #################')
a = ['a','b','c']
print('type of a is:', type(a), 'a=', a)
b = a
c = a[:]
d = a.copy()
print('a=', a, 'b=', b)
a.append('d')
print('a=', a, 'b=', b, 'c=', c, 'd=', d)
print('################# end of test #################\n')
import copy
# dict
print('################# Test for dict #################')
a = {'name': 'lucy', 'age:': '30'}
print('a=',a)
b = a
c = copy.deepcopy(a)
print('a=', a, 'b=', b)
a['sex'] = 'female'
print('a=', a, 'b=', b, 'c=', c)
print('################# end of test #################\n')
| python_trainning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] render=true
# # Use decision optimization to determine Cloud balancing.
#
# This tutorial includes everything you need to set up decision optimization engines, build mathematical programming models, and a solve a capacitated facility location problem to do server load balancing.
#
#
# When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.
#
# >This notebook is part of **[Prescriptive Analytics for Python](http://ibmdecisionoptimization.github.io/docplex-doc/)**
# >
# >It requires either an [installation of CPLEX Optimizers](http://ibmdecisionoptimization.github.io/docplex-doc/getting_started.html) or it can be run on [IBM Watson Studio Cloud](https://www.ibm.com/cloud/watson-studio/>) (Sign up for a [free IBM Cloud account](https://dataplatform.cloud.ibm.com/registration/stepone?context=wdp&apps=all>)
# and you can start using Watson Studio Cloud right away).
#
#
# Table of contents:
#
# - [The business problem](#The-business-problem:--Games-Scheduling-in-the-National-Football-League)
# * [How decision optimization (prescriptive analytics) can help](#How--decision-optimization-can-help)
# * [Use decision optimization](#Use-decision-optimization)
# * [Step 1: Import the library](#Step-1:-Import-the-library)
# - [Step 2: Model the Data](#Step-2:-Model-the-data)
# * [Step 3: Prepare the data](#Step-3:-Prepare-the-data)
# - [Step 4: Set up the prescriptive model](#Step-4:-Set-up-the-prescriptive-model)
# * [Define the decision variables](#Define-the-decision-variables)
# * [Express the business constraints](#Express-the-business-constraints)
# * [Express the objective](#Express-the-objective)
# * [Solve with Decision Optimization](#Solve-with-Decision-Optimization)
# * [Step 5: Investigate the solution and run an example analysis](#Step-5:-Investigate-the-solution-and-then-run-an-example-analysis)
# * [Summary](#Summary)
#
# + [markdown] render=true
# ## The business problem: Capacitated Facility Location.
#
#
# * The description of the problem can be found here: http://blog.yhat.com/posts/how-yhat-does-cloud-balancing.html
# + [markdown] render=true
# ## How decision optimization can help
#
# * Prescriptive analytics (decision optimization) technology recommends actions that are based on desired outcomes. It takes into account specific scenarios, resources, and knowledge of past and current events. With this insight, your organization can make better decisions and have greater control of business outcomes.
#
# * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes.
#
# * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage.
# <br/>
#
# <u>With prescriptive analytics, you can:</u>
#
# * Automate the complex decisions and trade-offs to better manage your limited resources.
# * Take advantage of a future opportunity or mitigate a future risk.
# * Proactively update recommendations based on changing events.
# * Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes.
#
#
# -
# ## Use decision optimization
# ### Step 1: Import the library
#
# Run the following code to import the Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming (docplex.mp) and Constraint Programming (docplex.cp).
import sys
try:
import docplex.mp
except:
raise Exception('Please install docplex. See https://pypi.org/project/docplex/')
# + [markdown] render=true
# ### Step 2: Model the data
# In this scenario, the data is simple and is delivered in the json format under the Optimization github.
# -
from collections import namedtuple
class TUser(namedtuple("TUser", ["id", "running", "sleeping", "current_server"])):
def __str__(self):
return self.id
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
# +
import csv
data_url = "https://github.com/vberaudi/utwt/blob/master/users.csv?raw=true"
xld = urlopen(data_url).read()
xlds = StringIO(xld.decode('utf-8'))
reader = csv.reader(xlds)
users = [(row[0], int(row[1]), int(row[2]), row[3]) for row in reader]
# + [markdown] render=true
# ### Step 3: Prepare the data
#
# Given the number of teams in each division and the number of intradivisional and interdivisional games to be played, you can calculate the total number of teams and the number of weeks in the schedule, assuming every team plays exactly one game per week.
#
#
# The season is split into halves, and the number of the intradivisional games that each team must play in the first half of the season is calculated.
# +
max_processes_per_server = 50
users = [TUser(*user_row) for user_row in users]
servers = list({t.current_server for t in users})
# + [markdown] render=true
# ### Step 4: Set up the prescriptive model
# -
from docplex.mp.environment import Environment
env = Environment()
env.print_information()
# #### Create the DOcplex model
# The model contains all the business constraints and defines the objective.
# +
from docplex.mp.model import Model
mdl = Model("truck")
# -
# #### Define the decision variables
# +
active_var_by_server = mdl.binary_var_dict(servers, name='isActive')
def user_server_pair_namer(u_s):
u, s = u_s
return '%s_to_%s' % (u.id, s)
assign_user_to_server_vars = mdl.binary_var_matrix(users, servers, user_server_pair_namer)
max_sleeping_workload = mdl.integer_var(name="max_sleeping_processes")
# -
def _is_migration(user, server):
""" Returns True if server is not the user's current
Used in setup of constraints.
"""
return server != user.current_server
# + [markdown] render=true
# #### Express the business constraints
# -
mdl.add_constraints(
mdl.sum(assign_user_to_server_vars[u, s] * u.running for u in users) <= max_processes_per_server
for s in servers)
mdl.print_information()
# each assignment var <u, s> is <= active_server(s)
for s in servers:
for u in users:
ct_name = 'ct_assign_to_active_{0!s}_{1!s}'.format(u, s)
mdl.add_constraint(assign_user_to_server_vars[u, s] <= active_var_by_server[s], ct_name)
# sum of assignment vars for (u, all s in servers) == 1
for u in users:
ct_name = 'ct_unique_server_%s' % (u[0])
mdl.add_constraint(mdl.sum((assign_user_to_server_vars[u, s] for s in servers)) == 1.0, ct_name)
mdl.print_information()
# +
number_of_active_servers = mdl.sum((active_var_by_server[svr] for svr in servers))
mdl.add_kpi(number_of_active_servers, "Number of active servers")
number_of_migrations = mdl.sum(
assign_user_to_server_vars[u, s] for u in users for s in servers if _is_migration(u, s))
mdl.add_kpi(number_of_migrations, "Total number of migrations")
for s in servers:
ct_name = 'ct_define_max_sleeping_%s' % s
mdl.add_constraint(
mdl.sum(
assign_user_to_server_vars[u, s] * u.sleeping for u in users) <= max_sleeping_workload,
ct_name)
mdl.add_kpi(max_sleeping_workload, "Max sleeping workload")
mdl.print_information()
# + [markdown] render=true
# #### Express the objective
# +
# Set objective function
mdl.minimize(number_of_active_servers)
mdl.print_information()
# + [markdown] render=true
# ### Solve with Decision Optimization
#
# You will get the best solution found after n seconds, due to a time limit parameter.
#
# +
# build an ordered sequence of goals
ordered_kpi_keywords = ["servers", "migrations", "sleeping"]
ordered_goals = [mdl.kpi_by_name(k) for k in ordered_kpi_keywords]
mdl.solve_lexicographic(ordered_goals)
mdl.report()
# + [markdown] render=true
# ### Step 5: Investigate the solution and then run an example analysis
# +
active_servers = sorted([s for s in servers if active_var_by_server[s].solution_value == 1])
print("Active Servers: {}".format(active_servers))
print("*** User assignment ***")
for (u, s) in sorted(assign_user_to_server_vars):
if assign_user_to_server_vars[(u, s)].solution_value == 1:
print("{} uses {}, migration: {}".format(u, s, "yes" if _is_migration(u, s) else "no"))
print("*** Servers sleeping processes ***")
for s in active_servers:
sleeping = sum(assign_user_to_server_vars[u, s].solution_value * u.sleeping for u in users)
print("Server: {} #sleeping={}".format(s, sleeping))
# -
# ## Summary
#
#
# You learned how to set up and use IBM Decision Optimization CPLEX Modeling for Python to formulate a Constraint Programming model and solve it with IBM Decision Optimization on Cloud.
# + [markdown] render=true
# #### References
# * [Decision Optimization CPLEX Modeling for Python documentation](http://ibmdecisionoptimization.github.io/docplex-doc/)
# * [Decision Optimization on Cloud](https://developer.ibm.com/docloud/)
# * Need help with DOcplex or to report a bug? Please go [here](https://stackoverflow.com/questions/tagged/docplex).
# * Contact us at <EMAIL>.
#
# -
# Copyright © 2017-2019 IBM. IPLA licensed Sample Materials.
| examples/mp/jupyter/load_balancing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.13 64-bit (''ml_basics'': conda)'
# name: python3
# ---
# ---
# title: "Anomaly detection with PCA"
# description: "Anomaly detection with PCA and python"
# lead: "Anomaly detection with PCA and python"
# date: 2021-09-06T08:49:31+00:00
# lastmod: 2021-09-06T08:49:31+00:00
# draft: false
# images: []
# menu:
# machinelearning:
# parent: "ml_basics"
# weight: 730
# toc: true
# ---
# ## Introduction
#
# Outlier detection with Principal Component Analysis (PCA) is an unsupervised strategy for identifying outliers when the data are unlabeled. Although this strategy makes use of PCA, it does not directly use its output as a way to detect anomalies, but uses the reconstruction error produced by reversing dimensionality reduction. The reconstruction error as a strategy to detect anomalies is based on the following idea: dimensionality reduction methods allow the observations to be projected into a space of lower dimension than the original space, while trying to preserve as much information as possible. The way they manage to minimize the overall loss of information is by finding a new space in which the majority of observations can be well represented.
#
# The PCA method creates a function that maps the position that each observation occupies in the original space to the position it occupies in the newly generated space. This mapping works in both directions, so it is also possible to go from the new space to the original space. Only those observations that have been well projected will be able to return to the position they occupied in the original space with high accuracy.
#
# Since the search for that new space has been guided by the majority of observations, it will be the observations closest to the average that can be best projected and consequently best reconstructed. The anomalous observations, on the contrary, will be badly projected and their reconstruction will be worse. It is this reconstruction error (squared) that can be used to identify anomalies.
#
# For more details on how PCA works and its application in Python, see Principal Component Analysis with Python.
#
# ## Packages
# +
# Data processing
# ==============================================================================
import numpy as np
import pandas as pd
from mat4py import loadmat
# Plots
# ==============================================================================
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
style.use('ggplot') or plt.style.use('ggplot')
# Preprocessing and modeling
# ==============================================================================
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from tqdm import tqdm
# Warnings configuration
# ==============================================================================
import warnings
warnings.filterwarnings('ignore')
# -
# ## Data
#
# The data used in this paper were obtained from Outlier Detection DataSets (ODDS), a repository of data commonly used to compare the ability of different algorithms to identify outliers. _<NAME> (2016). [ODDS Library](http://odds.cs.stonybrook.edu). Stony Brook, NY: Stony Brook University, Department of Computer Science._
#
# All of these data sets are labeled, it is known whether or not the observations are outliers (y-variable). Although the methods described in the paper are unsupervised, i.e., they do not make use of the response variable, knowing the true classification allows evaluation of their ability to correctly identify anomalies.
#
# Link to [Cardiotocography data set](http://odds.cs.stonybrook.edu/cardiotocogrpahy-dataset/):
#
# * Number of observations: 1831
# * Number of variables: 21
# * Number of outliers: 176 (9.6%)
# * y: 1 = outliers, 0 = inliers
# * Remarks: all variables are centered and scaled (mean 0, sd 1).
# * Reference: _<NAME> and <NAME>, "Theoretical foundations and algorithms for outlier ensembles". ACM SIGKDD Explorations Newsletter, vol. 17, no. 1, pp. 24-47, 2015. <NAME> and <NAME>. LODES: Local density meets spectral outlier detection. SIAM Conference on Data Mining, 2016._
#
# Link to [speech dataset](http://odds.cs.stonybrook.edu/speech-dataset/):
#
# * Number of observations: 3686.
# * Number of variables: 400
# * Number of outliers: 61 (1.65%).
# * y: 1 = outliers, 0 = inliers
# * Reference: _Learing Outlier Ensembles: The Best of Both Worlds - Supervised and Unsupervised. <NAME>, <NAME>, and <NAME>, KDD ODD2 Workshop, 2014._
#
# Link to [Shuttle dataset](http://odds.cs.stonybrook.edu/shuttle-dataset/):
#
# * Number of observations: 49097.
# * Number of variables: 9
# * Number of outliers: 3511 (7%).
# * y: 1 = outliers, 0 = inliers.
# * Reference: _<NAME>, <NAME> and <NAME>. "Outlier detection using active learning." Proceedings of the 12th ACM SIGKDD international conference on knowledge discovery and data mining. ACM, 2006._
#
# The data are available in matlab (.mat) format. The `loadmat()` function of the `mat4py 0.1.0` package is used to read its contents.
# Lectura de datos
# ==============================================================================
datos = loadmat(filename='cardio.mat')
datos_X = pd.DataFrame(datos['X'])
datos_X.columns = ["col_" + str(i) for i in datos_X.columns]
datos_y = pd.DataFrame(datos['y'])
datos_y = datos_y.to_numpy().flatten()
# ## PCA
#
# Principal Component Analysis (PCA) is a statistical method that simplifies the complexity of sample spaces with multiple dimensions while preserving their information. Suppose there is a sample with $n$ individuals each with $p$ variables $( X_1 , X_2 , ..., X_p )$, i.e. the space has $p$ dimensions. PCA allows to find a number of underlying factors, $(z<p)$ that explain approximately the same as the original $p$ variables. Where previously $p$ values were needed to characterize each individual, now $z$ values suffice. Each of these $z$ new variables is called a principal component.
#
# Each principal component $( Z_i )$ is obtained by linear combination of the original variables. They can be understood as new variables obtained by combining the original variables in a certain way. The first principal component of a group of variables $( X_1 , X_2 , ..., X_p )$ is the normalized linear combination of these variables that has the highest variance. Once the first component $( Z_1 )$ has been calculated, the second component $( Z_2 )$ is calculated by repeating the same process, but adding the condition that the linear combination cannot be correlated with the first component. The process is repeated iteratively until all possible components are calculated (min(n-1, p)) or until it is decided to stop the process.
#
# The main limitation of PCA as a dimensionality reduction method is that **it only considers linear combinations of the original variables**, which means that it is not able to capture other types of relationships.
| content/machinelearning/ml_basics/anomaly_detection_pca.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from matplotlib import pyplot as plt
from os import sys
from ipywidgets import interact, IntSlider
sys.path.append('../')
import gridemic
SEIIRT = gridemic.Model(seed_random = 1, N = 50, tauW = 0.15, etaW = 0.15,
tauS = 0.15, etaS = 0.15, prob_trace = 0.75, prob_detect=0.75,
num_tests = int(1e3))
basic_reproduction_number = SEIIRT.reproduction_number()
SEIIRT.add_infectious()
# +
t_final = 200
figures = []
population = np.zeros((t_final + 1, 6))
while SEIIRT.time <= t_final:
fig = plt.figure(figsize=(12, 9))
ax = fig.gca()
ax = SEIIRT.visualize_population(ax)
ax.set_title(f'Day {SEIIRT.time}')
figures.append(fig)
plt.close()
population[SEIIRT.time, 0] = np.sum(SEIIRT.disease_state==0) # S
population[SEIIRT.time, 1] = np.sum(SEIIRT.disease_state==1) # E
population[SEIIRT.time, 2] = np.sum(SEIIRT.disease_state==2) # I_w
population[SEIIRT.time, 3] = np.sum(SEIIRT.disease_state==3) # I_s
population[SEIIRT.time, 4] = np.sum(SEIIRT.disease_state==4) # I_R
population[SEIIRT.time, 5] = (np.sum(SEIIRT.testing_state==3)
+ np.sum(SEIIRT.testing_state==4)) # cases
SEIIRT.evolve()
# -
def show_population(day):
return figures[day]
interact(show_population, day = IntSlider(min = 0, max = t_final, step = 1, value = 0))
fig_pop = plt.figure(figsize = (6, 6))
ax = fig_pop.gca()
ax.plot(population[:, 0], label='$S$')
ax.plot(population[:, 1], label='$E$')
ax.plot(population[:, 2], label='$I_w$')
ax.plot(population[:, 3], label='$I_s$')
ax.plot(population[:, 4], label='$R$')
ax.plot(population[:, 5], label='$C$')
ax.set_xlabel('Days')
ax.set_ylabel('Number of people')
ax.legend()
| examples/SEIIRT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/EduardoML7200/daa_2021_1/blob/master/07Octubre.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="COZPCCyIShol"
# # Búsqueda líneal
# Dada un conjunto de datos no ordenados , la busqieda lineal consiste en recorrer el conjunto de datos dese el inicio al final, moviendose de uno en uno hasta encontrar el elemento o llegar al final del conjunto.
#
# datos = [ 4, 18, 47, 2, 34, 14, 78, 12, 48, 21, 31, 19, 1, 3, 5]
#
# #Búsqueda Binaria
# Funciona sobre un conjunto de datos lineal ordenado, consiste en dividir el conjunto en mitades y buscar en esa mitad, si el elemento buscado no está en la mitad, preguntas si el elemento esta a la derecha o a la izquierda.
# Haces la lista igual a la mitad correspondiente y repites el proceso.
#
# L = [1, 2, 3, 4, 5, 12, 14, 18, 19, 21, 31, 34, 47, 48, 78]
#
# DER = LONGITUD ( L ) -1
#
# IZQ = 0
#
# MID = 0 -> apuntara a la mitad del segmento de busqueda
#
# 1. Hacer DER
# 2. Hacer IZQ = 0
# 3. Si IZQ > DER hay un error en datos
# 4. Calcular MID = int( ( IZQ + DER ) / 2 )
# 5. Mientras L[MID] != buscado hacer
# 6. Preguntar L[MID] > buscado
# hacer DER = MID
# de lo contrario
# hacer IZQ = MID
# preguntar (DER - IZQ ) % 2
# MID = ( IZQ + (( DER - IZQ ) / 2 ) + 1
# de lo contrario
# MID = IZQ + (( DER - IZQ ) / 2)
# 7. return MID
#
#
#
#
#
# + id="OCtyeUsbSbIo" outputId="478a2d19-caa8-4bba-851b-3852fd619091" colab={"base_uri": "https://localhost:8080/"}
"""
Busqueda lineal regresa la posicion del elemento "Buscado" si se encuentra dentro de la lista. Regresa -1 si el elemento buscado no existe dentro de ella
"""
def busqueda_lineal( L , buscado ):
indice = -1
contador = 0
for idx in range(len(L)):
contador += 1
if L[idx] == buscado:
indice = idx
break
print(f"numero de comparaciones realizadas = { contador }")
return indice
"""
Busqueda binaria
"""
def busqueda_binaria ( L , buscado ):
IZQ = 0
DER = len(L) - 1
MID = int( ( IZQ + DER ) / 2)
if len(L) % 2 == 0:
MID = ( DER // 2 ) + 1
else:
MID = DER // 2
while (L[MID ] != buscado ):
if L[MID] > buscado:
DER = MID
else:
IZQ = MID
if (DER - IZQ) % 2 == 0:
MID = (IZQ +((DER-IZQ) // 2)) + 1
else:
MID = IZQ + ((DER - IZQ) // 2)
return MID
def main():
#Lista Desordenada
datos = [ 4, 18, 47, 2, 34, 14, 78, 12, 48, 21, 31, 19, 1, 3, 5]
dato = int(input("Que valor deseas buscar?"))
resultado = busqueda_lineal( datos , dato )
print("Resultado:", resultado)
print("Busqueda lineal en una lista ordenada")
#Lista Ordenada
datos.sort()
print(datos)
resultado = busqueda_lineal( datos , dato )
print("Resultado:", resultado)
#Busqueda Binaria
posicion = busqueda_binaria(datos, dato)
print(f"El elemento {dato} esta en la posicion {posicion} de la lista")
main()
| 07Octubre.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#pip install cufflinks
#pip install plotly
#pip install numpy
#pip install pandas
import numpy as np
import pandas as pd
import plotly as pl
import cufflinks as cf
import plotly.offline as po
# %matplotlib inline
# -
po.init_notebook_mode(connected=True)
cf.go_offline()
df= pd.DataFrame(np.random.rand(100,5) , columns=["a","b","c","d","e"])
df2 = pd.DataFrame({"x":["a", "b", "c", "d", "e"], "y":[1,2,3,4,5], "z":[6,7,8,4,3]})
#df.iplot(kind="scatter", x="a", y="b")
#df.iplot(kind="scatter", x="a", y="b", mode="markers")
df.iplot(kind="scatter", x="a", y="b", mode="markers", size=7)
df.scatter_matrix()
| Plotly and cufflinks/point plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Description
#
# Some tests on simulating brownian motion evolution
# ## brownian motion model:
#
# > dX(t) = sigma * d * B(t)
#
# * sigma = standard deviation
# * B = random noise
# * t = time
import dendropy
from scipy.stats import norm
# +
def brownian(x0, n, dt, delta):
for i in xrange(n):
x0 += np.random.normal(scale=delta**2*dt)
return x0
brownian(0.5, 10, 0.5, 0.25)
# +
import random
import dendropy
def process_node(node, start=1.0):
if node.parent_node is None:
node.value = start
else:
node.value = random.gauss(node.parent_node.value, node.edge.length)
for child in node.child_nodes():
process_node(child)
if node.taxon is not None:
print("%s : %s" % (node.taxon, node.value))
mle = dendropy.treesim.birth_death(birth_rate=1, death_rate=0.5, ntax=10)
process_node(mle.seed_node)
# +
from math import sqrt
from scipy.stats import norm
import numpy as np
def brownian(x0, n, dt, delta):
"""\
Generate an instance of Brownian motion (i.e. the Wiener process):
X(t) = X(0) + N(0, delta**2 * t; 0, t)
where N(a,b; t0, t1) is a normally distributed random variable with mean a and
variance b. The parameters t0 and t1 make explicit the statistical
independence of N on different time intervals; that is, if [t0, t1) and
[t2, t3) are disjoint intervals, then N(a, b; t0, t1) and N(a, b; t2, t3)
are independent.
Arguments
---------
x0 : float or numpy array (or something that can be converted to a numpy array
using numpy.asarray(x0)).
The initial condition(s) (i.e. position(s)) of the Brownian motion.
n : int
The number of steps to take.
dt : float
The time step.
delta : float
delta determines the "speed" of the Brownian motion. The random variable
of the position at time t, X(t), has a normal distribution whose mean is
the position at time t=0 and whose variance is delta**2*t.
Returns
-------
A numpy array of floats with shape `x0.shape + (n,)`.
Note that the initial value `x0` is not included in the returned array.
"""
x0 = np.asarray(x0)
# For each element of x0, generate a sample of n numbers from a
# normal distribution.
r = np.random.normal(size=x0.shape + (n,), scale=delta*np.sqrt(dt))
# If `out` was not given, create an output array.
if out is None:
out = np.empty(r.shape)
# This computes the Brownian motion by forming the cumulative sum of
# the random samples.
np.cumsum(r, axis=-1, out=out)
# Add the initial condition.
out += np.expand_dims(x0, axis=-1)
return out
for i in xrange(10):
print brownian(0.5, 1, 1, 0.25)
# +
import random
import dendropy
def process_node(node, start=1.0):
if node.parent_node is None:
node.value = start
else:
x = brownian(node.parent_node.value,
n = 1,
dt = node.edge.length,
delta = 0.25)
x = float(x[-1])
x = x if x >=0 else 0
x = x if x <= 1 else 1
node.value = x
for child in node.child_nodes():
process_node(child)
if node.taxon is not None:
print("%s : %s" % (node.taxon, node.value))
mle = dendropy.treesim.birth_death(birth_rate=1, death_rate=0.5, ntax=10)
process_node(mle.seed_node)
# -
# ## brownian motion and purely random
#
# * function composed of both brownian motion and purely random selection
# * Idea from paper: "How to measure and test phylogenetic signal"
# * http://onlinelibrary.wiley.com/doi/10.1111/j.2041-210X.2012.00196.x/epdf
#
# * a ratio parameter determines how much of the brownian motion vs random continuous value is use
# * range of 0-1, 0 = random, 1 = BM
# * BD_value * ratio + random_value * (1-ratio)
#
# +
import numpy as np
import scipy.stats as stats
import dendropy
def sim_trait(node, start=0, sigma=0.1, ratio=0.5, verbose=False):
if node.parent_node is None:
node.value = start
else:
BM = np.random.normal(loc=node.parent_node.value, scale=sigma)
#rnd = np.random.uniform(minVal, maxVal)
rnd = np.random.normal(loc=start, scale=sigma)
node.value = BM * ratio + rnd * (1 - ratio)
#print([BM, rnd, node.value])
#node.value = node.value if node.value >= minVal else minVal
#node.value = node.value if node.value <= maxVal else maxVal
for child in node.child_nodes():
sim_trait(child, start=start, sigma=sigma,
ratio=ratio, verbose=verbose)
if verbose and node.taxon is not None:
print('{} : {}'.format(node.taxon, node.value))
mle = dendropy.treesim.birth_death(birth_rate=1, death_rate=0.5, ntax=10)
sim_trait(mle.seed_node, verbose=True)
mle.print_plot(display_width=70)
# -
# ## 2nd attempt
# +
from math import sqrt
from scipy.stats import norm
import numpy as np
def brownian(x0, n, dt, delta):
"""\
Generate an instance of Brownian motion (i.e. the Wiener process):
X(t) = X(0) + N(0, delta**2 * t; 0, t)
where N(a,b; t0, t1) is a normally distributed random variable with mean a and
variance b. The parameters t0 and t1 make explicit the statistical
independence of N on different time intervals; that is, if [t0, t1) and
[t2, t3) are disjoint intervals, then N(a, b; t0, t1) and N(a, b; t2, t3)
are independent.
Arguments
---------
x0 : float or numpy array (or something that can be converted to a numpy array
using numpy.asarray(x0)).
The initial condition(s) (i.e. position(s)) of the Brownian motion.
n : int
The number of steps to take.
dt : float
The time step.
delta : float
delta determines the "speed" of the Brownian motion. The random variable
of the position at time t, X(t), has a normal distribution whose mean is
the position at time t=0 and whose variance is delta**2*t.
Returns
-------
A numpy array of floats with shape `x0.shape + (n,)`.
Note that the initial value `x0` is not included in the returned array.
"""
x0 = np.asarray(x0)
# For each element of x0, generate a sample of n numbers from a
# normal distribution.
r = np.random.normal(size=x0.shape + (n,), scale=delta*np.sqrt(dt))
# If `out` was not given, create an output array.
if out is None:
out = np.empty(r.shape)
# This computes the Brownian motion by forming the cumulative sum of
# the random samples.
np.cumsum(r, axis=-1, out=out)
# Add the initial condition.
out += np.expand_dims(x0, axis=-1)
return out
for i in xrange(10):
print brownian(0.5, 1, 1, 0.25)
# +
import numpy as np
import dendropy
def sim_traits(tree, start=0, sigma=0.1, weight=0.5, verbose=False):
"""Trait simulation as detailed in:
author = {<NAME>, <NAME>,
Bruno and <NAME> Jombart,
Thibaut and <NAME>},
title = {How to measure and test phylogenetic signal},
journal = {Methods in Ecology and Evolution}
Args:
tree -- dendropy tree object
start -- starting value for continuous character evolution
sigma -- sigma use for drawing from a normal distribution
weight -- weight parameter for random vs Brownian motion
range: 0-1; 0 = purely random; 1 = purely Brownian
verbose -- verbose output
"""
ntaxa = len(tree.nodes())
# simulate brownian motion
BM = np.random.normal(loc=0, scale=sigma, size=ntaxa)
BM = np.cumsum(BM) + start
# random values
rnd = np.random.permutation(BM)
# making weighted sums
ws = weight * BM + (1-weight) * rnd
# z-scaling weighted sums
ws = (ws - np.mean(ws)) / np.std(ws)
for i, node in enumerate(tree.preorder_node_iter()):
node.value = ws[i]
if verbose and node.taxon is not None:
print('{} : {}'.format(node.taxon, node.value))
mle = dendropy.treesim.birth_death(birth_rate=1, death_rate=0.5, ntax=10)
sim_traits(mle, verbose=True)
mle.print_plot(display_width=70)
# -
mle = dendropy.treesim.birth_death(birth_rate=1, death_rate=0.5, ntax=10)
sim_traits(mle, weight=1, verbose=True)
mle.print_plot(display_width=70)
mle = dendropy.treesim.birth_death(birth_rate=1, death_rate=0.5, ntax=10)
sim_traits(mle, weight=0, verbose=True)
mle.print_plot(display_width=70)
# # Dendropy sandbox
tree = dendropy.treesim.birth_death(birth_rate=1, death_rate=0.5, ntax=10)
tree.find_node
# +
chrMtx = dendropy.ContinuousCharacterMatrix()
# chrMtx.extend?
# -
| ipynb/theory/BM_simulations.ipynb |
% ---
% jupyter:
% jupytext:
% text_representation:
% extension: .m
% format_name: light
% format_version: '1.5'
% jupytext_version: 1.14.4
% kernelspec:
% display_name: Octave
% language: octave
% name: octave
% ---
% # <center>Gaussian normal equation</center>
% ## <center>Linear regression</center>
% For 14 days we are measuring our 5km running results in sec. Furthermore, on every 7 days we run an extra 5km. We would like to use linear regression on the data. What will our modell predict for the 15th day?
% <table>
% <tr>
% <td>t_i (Days) | 1| 2 | 3 | 4 | 5 | 6 | 7 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 14 | 15</td>
% </tr>
% <br>
% <tr>
% <td>f_i (sec)| 1528 | 1504 | 1488 | 1432 | 1528 | 1392 | 1368| 1392 | 1320 | 1316 | 1388 | 1288 | 1352 | 1232 | 1200 | 1244 | ? </td>
% </tr>
% </table>
t = [1:7 7 8:14 14 ];
f = [1528 1504 1488 1432 1528 1392 1368 1392 1320 1316 1328 1288 1352 1232 1200 1244];
plot(t,f, 'm*')
title('5km running results')
xlabel('Day')
ylabel('Sec')
% +
M = [length(t) sum(t); sum(t) sum(t.^2)]
b = [sum(f) t*f']'
x = M\b
%% The prediction fot the 15th day
predicted_value = x(1)+x(2)*15
% +
% Data, predicted value and the linear regression on the same figure
fine_t = linspace(1,15,100);
plot(t,f, 'm*')
hold on
plot(15,predicted_value,'bo')
plot(fine_t, x(1)+x(2)*fine_t)
title('5km running results')
xlabel('Day')
ylabel('Sec')
legend('Data', 'Predicted value', 'Linear regression')
% -
%
% ## <center>Polinomial model</center>
% +
format long
A_2 = [ones(length(t),1) t' (t.^2)'];
A_3 = [ones(length(t),1) t' (t.^2)' (t.^3)'];
%% The Gaussian normal equation A^T*A*x=A^T*f
x_2 = (A_2'*A_2)\(A_2'*f')
x_3 = (A_3'*A_3)\(A_3'*f')
% -
% Data, predicted value and the linear regression on the same figure
plot(t,f, 'm*')
hold on
plot(fine_t, x(1)+x(2)*finom_t) % Linear regression
plot(fine_t, x_2(1)+x_2(2)*(fine_t)+x_2(3)*(fine_t).^2) % Second order
plot(fine_t, x_3(1)+x_3(2)*(fine_t)+x_3(3)*(fine_t).^2+x_3(4)*(fine_t).^3) % Third order
title('5km running results')
xlabel('Day')
ylabel('Sec')
legend('Data', 'Linear regression','Second order', 'Third order')
% Predicted values
lin_reg = predicted_value
second_order = x_2(1)+x_2(2)*(fine_t(end))+x_2(3)*(fine_t(end)).^2
third_order = x_3(1)+x_3(2)*(fine_t(end))+x_3(3)*(fine_t(end)).^2+x_3(4)*(fine_t(end)).^3
% Relevant example:
%
% + <a href="https://www.youtube.com/watch?v=njKP3FqW3Sk&t=2834s" target="_blank">Fitting</a>
%
% <br><br><br>
% ## <center>Trigonometric model</center>
%
%
% <b>Example 1.</b> Determine the coefficients of the model
%
% <br>
% $$F(t)=x_1+x_2\cos(\pi t)+x_3\sin(\pi t)$$
% <br>
% in the least square sense for the data
% <br>
%
% <table>
% <tr>
% <td>t_i | 0| 0.5 | 1 | 1.5 | 2 | 2.5</td>
% </tr>
% <br>
% <tr>
% <td>f_i | 1 | -2 | -2.5 | -0.5 | 1.25 | -1.5 </td>
% </tr>
% </table>
% +
format
t = [0 0.5 1 1.5 2 2.5];
f = [1 -2 -2.5 -0.5 1.25 -1.5];
A = [ones(length(t),1) cos(pi*t)' sin(pi*t)']
A_rank = rank(A)
x = (A'*A)\(A'*f')
tt = linspace(0,2.5,100);
plot(t,f,'mo',tt,x(1)+x(2)*cos(pi*tt)+x(3)*sin(pi*tt))
legend('Data','Trigonometric model')
% -
% <b>Example 2.</b> Determine the coefficients of the model
%
% <br>
% $$F(t)=x_1+x_2\cos(\pi t)+x_3\sin(\pi t)$$
% <br>
% in the least square sense for the data
% <br>
%
% <table>
% <tr>
% <td>t_i | 0| 0.5 | 2 | 2.5</td>
% </tr>
% <br>
% <tr>
% <td>f_i | 1 | -2 | 1.25 | -1.5 </td>
% </tr>
% </table>
% +
format long
t = [0 0.5 2 2.5];
f = [1 -2 1.25 -1.5];
A = [ones(length(t),1) cos(pi*t)' sin(pi*t)']
A_rank = rank(A) % It is not the maximal one
A = [ones(length(t),1) cos(pi*t)']
x = (A'*A)\(A'*f')
tt = linspace(0,2.5,100);
plot(t,f,'mo',tt,x(1)+x(2)*cos(pi*tt))
legend('Data','Trigonometric model')
% -
| Lecture/Block#3.ipynb |