code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: venv
# language: python
# name: venv
# ---
from ctypes.util import find_library
import ctypes
find_library("".join(("gsdll", str(ctypes.sizeof(ctypes.c_voidp) * 8), ".dll")))
# +
import fitz
import pandas as pd
from collections import Counter
doc = fitz.open('CISC367_Final_Paper (1).pdf')
text = "".join(page.get_text("text") for page in doc)
words = pd.Series(text.split())
words.value_counts().head(30)
# -
import camelot
tables = camelot.read_pdf('calendar.pdf')
df = tables[0].df
df
# ### Reddit Image
import requests
data = requests.get("https://www.reddit.com/r/badcode/.json", headers = {'User-agent': 'your bot 0.1'}).json()
import json
with open("reddit.json", "w") as json_file:
json.dump(data,json_file)
pic_list = []
for i in range(25):
index = data["data"]["children"][i]["data"]
for val in index:
if isinstance(index[val],str):
if ".png" in index[val]:
pic_list.append(index[val])
print(pic_list)
print(len(pic_list))
import pytesseract
from PIL import Image
import io as io
all_text = []
for url in pic_list:
response = requests.get(url)
img = Image.open(io.BytesIO(response.content))
text = pytesseract.image_to_string(img)
all_text.append(text)
print("done")
str_text = ""
for text in all_text:
str_text += text
words = pd.Series(str_text.split())
print(words.value_counts())
from textblob import TextBlob
blob = TextBlob(str_text)
pd.Series(blob.word_counts).sort_values().tail(30)
blob = TextBlob(text)
print(blob.sentiment.polarity)
# ### Face Recognition
# +
import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('pic3.jpeg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
#  "Answer")
| UncoventionalData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Instructions
#
# Implement multi output cross entropy loss in pytorch.
#
# Throughout this whole problem we use multioutput models:
# * predicting 4 localization coordinates
# * predicting 4 keypoint coordinates + whale id and callosity pattern
# * predicting whale id and callosity pattern
#
# In order for that to work your loss function needs to cooperate.
# Remember that for the simple single output models the following function will work:
#
# ```python
# import torch.nn.functional as F
# single_output_loss = F.nll_loss(output, target)
# ```
#
# # Your Solution
# Your solution function should be called solution. In this case we leave it for consistency but you don't need to do anything with it.
#
# CONFIG is a dictionary with all parameters that you want to pass to your solution function.
def solution(outputs, targets):
"""
Args:
outputs: list of torch.autograd.Variables containing model outputs
targets: list of torch.autograd.Variables containing targets for each output
Returns:
loss_value: torch.autograd.Variable object
"""
return loss_value
| resources/whales/tasks/task5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Criminology in Portugal (2011)
#
# ## Introduction
#
# > In this _study case_, it will be analysed the **_crimes occurred_** in **_Portugal_**, during the civil year of **_2011_**. It will analysed all the _categories_ or _natures_ of this **_crimes_**, _building some statistics and making some filtering of data related to them_.
#
# > It will be applied some _filtering_ and made some _analysis_ on the data related to **_Portugal_** as _country_, like the following:
# * _Crimes by **Nature/Category**_
# * _Crimes by **Geographical Zone**_
# * _Crimes by **Region/City** (only considered th 5 more populated **regions/cities** in **Portugal**)_
# * _Conclusions_
#
# > It will be applied some _filtering_ and made some _analysis_ on the data related to the **_5 biggest/more populated regions/cities_** (_Metropolitan Area of Lisbon_, _North_, _Center_, _Metropolitan Area of Porto_, and _Algarve_) of **_Portugal_**, like the following:
# * **_Metropolitan Area of Lisbon_**
# * _Crimes by **Nature/Category**_
# * _Crimes by **Locality/Village**_
# * _Conclusions_
# * **_North_**
# * _Crimes by **Nature/Category**_
# * _Crimes by **Locality/Village**_
# * _Conclusions_
# * **_Center_**
# * _Crimes by **Nature/Category**_
# * _Crimes by **Locality/Village**_
# * _Conclusions_
# * **_Metropolitan Area of Porto_**
# * _Crimes by **Nature/Category**_
# * _Crimes by **Locality/Village**_
# * _Conclusions_
# * **_Algarve_**
# * _Crimes by **Nature/Category**_
# * _Crimes by **Locality/Village**_
# * _Conclusions_
# +
# Importing pandas library
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# +
crimes_by_geozone_2011 = pd.read_csv("datasets/ine.pt/2011/dataset-crimes-portugal-2011-by-geozone-2.csv" , header=1)
crimes_by_geozone_2011 = crimes_by_geozone_2011.rename(columns={'Unnamed: 0': 'Zona Geográfica'})
crimes_by_geozone_2011 = crimes_by_geozone_2011.set_index("Zona Geográfica", drop = True)
# -
# #### Data Available in the Dataset
#
# > All the data available and used for this _study case_ can be found in the following _hyperlink_:
# * [dataset-crimes-portugal-2011-by-geozone-2.csv](datasets/ine.pt/2011/dataset-crimes-portugal-2011-by-geozone-2.csv)
#
# ##### Note:
# > If you pretend to see all the data available and used for this _study case_, uncomment the following line.
# +
# Just for debug
#crimes_by_geozone_2011
# -
# ## Starting the Study Case
# ### Criminology in **_Metropolitan Area of Lisbon_** (**_2011_**)
# #### Analysing the **_crimes occurred_** in **_Metropolitan Area of Lisbon_**, during **_2011_**
# * The total of **_crime occurrences_** in **_Metropolitan Area of Lisbon_**, during **_2011_**, filtered by _category or nature of the crime_:
# +
crimes_lisbon_2011 = crimes_by_geozone_2011.loc["170: Área Metropolitana de Lisboa", : ]
crimes_lisbon_2011
# -
# * The total number of **_crime occurrences_** in **_Metropolitan Area of Lisbon_**, during **_2011_**, filtered by _category or nature of the crime_ (_organised as a Data Frame_):
# +
crimes_lisbon_2011 = pd.DataFrame(crimes_lisbon_2011).T
crimes_lisbon_2011 = crimes_lisbon_2011.iloc[:,1:8]
crimes_lisbon_2011
# +
# Just for debug
#crimes_lisbon_2011.columns
# +
crimes_lisbon_2011.values[0,3] = 0
crimes_lisbon_2011.values[0,6] = 0
crimes_lisbon_2011.values[0,0] = int(crimes_lisbon_2011.values[0,0])
crimes_lisbon_2011.values[0,1] = int(float(crimes_lisbon_2011.values[0,1]))
crimes_lisbon_2011.values[0,2] = int(crimes_lisbon_2011.values[0,2])
crimes_lisbon_2011.values[0,4] = int(float(crimes_lisbon_2011.values[0,4]))
crimes_lisbon_2011.values[0,5] = int(float(crimes_lisbon_2011.values[0,5]))
# Just for debug
#crimes_lisbon_2011.values
# +
# Just for debug
#crimes_lisbon_2011
# -
# * The total number of **_crime occurrences_** in **_Metropolitan Area of Lisbon_**, during **_2011_**, filtered by _category or nature of the crime_ (_organised as a Data Frame_ and excluding some _redundant fields and data_):
# +
del crimes_lisbon_2011['Crimes contra a identidade cultural e integridade pessoal']
del crimes_lisbon_2011['Crimes contra animais de companhia']
crimes_lisbon_2011
# +
crimes_lisbon_2011_categories = crimes_lisbon_2011.columns.tolist()
# Just for debug
#crimes_lisbon_2011_categories
# +
crimes_lisbon_2011_values = crimes_lisbon_2011.values[0].tolist()
# Just for debug
#crimes_lisbon_2011_values
# -
# * A _plot_ of a representation of the total of **_crime occurrences_** in **_Metropolitan Area of Lisbon_**, during **_2011_**, filtered by _category or nature of the crime_ (_organised as a Data Frame_ and excluding some _redundant fields and data_):
# +
plt.bar(crimes_lisbon_2011_categories, crimes_lisbon_2011_values)
plt.xticks(crimes_lisbon_2011_categories, rotation='vertical')
plt.xlabel('\nCrime Category/Nature\n')
plt.ylabel('\nNum. Occurrences\n')
plt.title('Crimes in Metropolitan Area of Lisbon, during 2011 (by Crime Category/Nature) - Bars Chart\n')
print('\n')
plt.show()
# +
plt.pie(crimes_lisbon_2011_values, labels=crimes_lisbon_2011_categories, autopct='%.2f%%')
plt.title('Crimes in Metropolitan Area of Lisbon, during 2011 (by Crime Category/Nature) - Pie Chart\n\n')
plt.axis('equal')
print('\n')
plt.show()
# -
# * The total number of **_crime occurrences_** in all the **_localities/villages_** of the **_Metropolitan Area of Lisbon_**, during **_2011_**, filtered by _category or nature of the crime_ (_organised as a Data Frame_):
# +
crimes_lisbon_2011_by_locality = crimes_by_geozone_2011.loc["1701502: Alcochete":"1701114: Vila Franca de Xira", : ]
crimes_lisbon_2011_by_locality
# -
# * The total number of **_crime occurrences_** in all the **_localities/villages_** of the **_Metropolitan Area of Lisbon_**, during **_2011_**, filtered by _category or nature of the crime_ (_organised as a Data Frame_ and excluding some _redundant fields and data_):
# +
del crimes_lisbon_2011_by_locality['Crimes de homicídio voluntário consumado']
del crimes_lisbon_2011_by_locality['Crimes contra a identidade cultural e integridade pessoal']
del crimes_lisbon_2011_by_locality['Crimes contra animais de companhia']
crimes_lisbon_2011_by_locality
# +
top_6_crimes_lisbon_2011_by_locality = crimes_lisbon_2011_by_locality.sort_values(by='Total', ascending=False).head(6)
top_6_crimes_lisbon_2011_by_locality
# +
top_6_crimes_lisbon_2011_by_locality_total = top_6_crimes_lisbon_2011_by_locality.loc[:,"Total"]
top_6_crimes_lisbon_2011_by_locality_total
# +
top_6_crimes_lisbon_2011_by_locality_total = pd.DataFrame(top_6_crimes_lisbon_2011_by_locality_total).T
top_6_crimes_lisbon_2011_by_locality_total = top_6_crimes_lisbon_2011_by_locality_total.iloc[:,0:6]
top_6_crimes_lisbon_2011_by_locality_total
# +
top_6_crimes_lisbon_2011_by_locality_total_localities = top_6_crimes_lisbon_2011_by_locality_total.columns.tolist()
# Just for debug
#top_6_crimes_lisbon_2011_by_locality_total_localities
# +
top_6_crimes_lisbon_2011_by_locality_total_values = top_6_crimes_lisbon_2011_by_locality_total.values[0].tolist()
# Just for debug
#top_6_crimes_lisbon_2011_by_locality_total_values
# +
plt.bar(top_6_crimes_lisbon_2011_by_locality_total_localities, top_6_crimes_lisbon_2011_by_locality_total_values)
plt.xticks(top_6_crimes_lisbon_2011_by_locality_total_localities, rotation='vertical')
plt.xlabel('\nLocality/Village')
plt.ylabel('\nNum. Occurrences\n')
plt.title('Crimes in Metropolitan Area of Lisbon, during 2011 (by Locality/Village in Top 6) - Bars Chart\n')
print('\n')
plt.show()
# +
plt.pie(top_6_crimes_lisbon_2011_by_locality_total_values, labels=top_6_crimes_lisbon_2011_by_locality_total_localities, autopct='%.2f%%')
plt.title('Crimes in Metropolitan Area of Lisbon, during 2011 (by Locality/Village in Top 6) - Pie Chart\n\n')
plt.axis('equal')
print('\n')
plt.show()
# -
# #### Conclusion of the **_crimes occurred_** in **_Metropolitan Area of Lisbon_**, during **_2011_**
# * After studying all the perspectives about the **_crimes occurred_** in **_Metropolitan Area of Lisbon_**, during **_2011_**, it's possible to conclude the following:
#
# * a) The most of the **_crimes_** occurred was against:
#
# > 1) The **_country's patrimony_** (**68.52%**)
#
# > 2) The **_people_**, at general (**20.35%**)
#
# > 3) The **_life in society_** (**9.32%**)
#
#
#
# Thank you, and I hope you enjoy it!
#
# Sincerely,
# > <NAME>.
| criminology-portugal/Criminology in Portugal - 2011.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# load libraries
import networkx as nx
import random
import matplotlib.pyplot as plt
# # PageRank
# Im ersten Schritt erstellen wir ein Netzwerk (hier ein Erdős–Rényi Netzwerk).
# generate graph
N=60
G=nx.erdos_renyi_graph(N,0.1)
# Dieses können wir plotten.
# plot graph
pos = nx.spring_layout(G, iterations=200)
nx.draw(G, pos=pos, with_labels=True)
# Jetzt implementieren wir den "Random-walk PageRank" aus dem Vortrag. Als erstes müssen wir die "Source" (Quelle) definieren und wählen diese zufällig aus der Menge der Vertices aus.
# pick source node
source = random.choice([i for i in range(G.number_of_nodes())])
# Jetzt initialisieren wir das Dictionary "dict_count" (siehe Vortrag).
# initialize "visit" dictionary
dict_count={}
for i in range(G.number_of_nodes()):
dict_count[i]=0
dict_count[source]=1
# Im nächsten Schritt gehen wir durch das Netzwerk ("random walk") und speichern die Besuche der einzelnen Vertices in "dict_count".
# walk through the network and store all visits
iterations = 1000
for i in range(iterations):
neighbors = list(G.neighbors(source))
if len(neighbors)==0:
source=random.choice([i for i in range(G.number_of_nodes())])
dict_count[source]+=1
else:
source=random.choice(neighbors)
dict_count[source]+=1
# Jetzt können wir noch die Resultate plotten (dunkelrot: grosser PageRank, weiss/hellrot: kleiner PageRank).
# plot network with PageRank coloring
node_color = [dict_count[node]/iterations for node in range(N)]
nx.draw(G, pos=pos, with_labels=True, node_color=node_color, cmap=plt.cm.Reds)
# node with largest PageRank
print(max(dict_count, key=dict_count.get), max(node_color))
| PageRank.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **ESG - Post Medium**
#
# ***
# ## Growth in searches on the term "ESG"
# * **Google trends**
# * Download the csv with data from 01/01 / 2011-31 / 12/2020 with searches for the term "ESG" in Brazil and worldwide.
#
# ***
# ## Visualization of portfolio composition
# * **Web scraping of the composition of the portfolio on the B3 website**
# * Ibovespa - [IBOV](http://www.b3.com.br/pt_br/market-data-e-indices/indices/indices-amplos/ibovespa.html)
# * Indice de Sustentabilidade Empresarial - [ISE](http://www.b3.com.br/pt_br/market-data-e-indices/indices/indices-de-sustentabilidade/indice-de-sustentabilidade-empresarial-ise.html)
# * Indice Carbono Eficiente - [ICO2](http://www.b3.com.br/pt_br/market-data-e-indices/indices/indices-de-sustentabilidade/indice-carbono-eficiente-ico2.html)
#
# ***
# ## Download daily quotation data from the indices to visualize the return over time
# * **Investing (API) - [investpy](https://pypi.org/project/investpy/)**
# * **Plot line graph for better visualization**
#
# ***
# ### Importations
import pandas as pd
pd.set_option('display.min_rows', 50)
pd.set_option('display.max_rows', 200)
import investpy as inv
import matplotlib.pyplot as plt
# ### Growth in searches on the term "ESG"
# Upload csv data downloaded from google trends
trends_world = pd.read_csv('esg_mundo_2011-2020.csv', sep=',', header=1)
trends_world.columns = ['Day', 'Search for the term ESG']
trends_world['Day'] = pd.to_datetime(trends_world['Day'])
trends_world['Search for the term ESG'] = trends_world[
'Search for the term ESG'].astype(float)
trends_world.head()
# +
# Generating chart
trends_world.plot('Day',
'Search for the term ESG',
figsize=(25, 8),
title='Search the term "ESG" in the world')
plt.savefig('trends_world.png')
plt.show()
# -
# Upload csv data downloaded from google trends
trends_brasil = pd.read_csv('esg_brasil_2011-2020.csv', sep=',', header=1)
trends_brasil.columns = ['Day', 'Search for the term ESG']
trends_brasil['Day'] = pd.to_datetime(trends_brasil['Day'])
trends_brasil['Search for the term ESG'] = trends_brasil[
'Search for the term ESG'].astype(float)
trends_brasil.head()
# + code_folding=[]
# Generating chart
trends_brasil.plot('Day',
'Search for the term ESG',
figsize=(25, 8),
title='Search for the term "ESG" in Brazil')
plt.savefig('trends_brasil.png')
plt.show()
# -
# ### Visualization of portfolio composition
# Function for accessing indexes
def buscar_carteira_teorica(indice):
url = 'http://bvmf.bmfbovespa.com.br/indices/ResumoCarteiraTeorica.aspx?Indice={}&idioma=pt-br'.format(
indice.upper())
return pd.read_html(url, decimal=',', thousands='.',
index_col='Código')[0][:-1]
# IBOVESPA
IBOV = buscar_carteira_teorica('IBOV')
IBOV.index.names = ['tickers']
IBOV.columns = [
'Company', 'segment', 'theoretical_quantity', 'percentage_share'
]
IBOV.sort_values('percentage_share', ascending=False).head()
# + code_folding=[]
# INDICE DE SUSTENTABILIDADE EMPRESARIAL
ISE = buscar_carteira_teorica('ISE')
ISE.index.names = ['tickers']
ISE.columns = [
'Company', 'segment', 'theoretical_quantity', 'percentage_share'
]
ISE.sort_values('percentage_share', ascending=False).head()
# -
# INDICE DE CARBONO EFICIENTE
ICO2 = buscar_carteira_teorica('ICO2')
ICO2.index.names = ['tickers']
ICO2.columns = [
'Company', 'segment', 'theoretical_quantity', 'percentage_share'
]
ICO2.sort_values('percentage_share', ascending=False).head()
# ### Download daily quotation data from the indices to visualize the return over time
# Index list
indices_list = inv.get_indices_list('Brazil')
indices_list
# Index overview
indices_overview = inv.get_indices_overview('Brazil')
indices_overview
# +
# Dataset
close_index = ['Bovespa', 'Corporate Sustainability', 'Carbon Efficient']
mydata = pd.DataFrame()
for c in close_index:
mydata[c] = inv.get_index_historical_data(c,
country='brazil',
from_date='01/01/2011',
to_date='31/12/2020')["Close"]
mydata.head()
# -
# Normalized return to base 100
(mydata / mydata.iloc[0] * 100).plot(figsize=(25, 8), title='Normalized return to base 100')
plt.savefig('index_return-2011_2020.png')
plt.show()
| ESG-Post_Medium.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# # Using CellTypist for cell type classification
# This notebook showcases the cell type classification for scRNA-seq query data by retrieving the most likely cell type labels from either the built-in CellTypist models or the user-trained custom models.
# -
# Only the main steps and key parameters are introduced in this notebook. Refer to detailed [Usage](https://github.com/Teichlab/celltypist#usage) if you want to learn more.
# ## Install CellTypist
# + tags=[]
# !pip install celltypist
# -
import scanpy as sc
import celltypist
from celltypist import models
# ## Download a scRNA-seq dataset of 2,000 immune cells
# + tags=[]
adata_2000 = sc.read('celltypist_demo_folder/demo_2000_cells.h5ad', backup_url = 'https://celltypist.cog.sanger.ac.uk/Notebook_demo_data/demo_2000_cells.h5ad')
# -
# This dataset includes 2,000 cells and 18,950 genes collected from different studies, thereby showing the practical applicability of CellTypist.
adata_2000.shape
# + [markdown] tags=[]
# The expression matrix (`adata_2000.X`) is pre-processed (and required) as log1p normalised expression to 10,000 counts per cell (this matrix can be alternatively stashed in `.raw.X`).
# + tags=[]
adata_2000.X.expm1().sum(axis = 1)
# -
# Some pre-assigned cell type labels are also in the data, which will be compared to the predicted labels from CellTypist later.
# + tags=[]
adata_2000.obs
# -
# ## Assign cell type labels using a CellTypist built-in model
# In this section, we show the procedure of transferring cell type labels from built-in models to the query dataset.
# Download the latest CellTypist models.
# + tags=[]
# Enabling `force_update = True` will overwrite existing (old) models.
models.download_models(force_update = True)
# -
# All models are stored in `models.models_path`.
models.models_path
# Get an overview of the models and what they represent.
# + tags=[]
models.models_description()
# -
# Choose the model you want to employ, for example, the model with all tissues combined containing low-hierarchy (high-resolution) cell types/subtypes.
# Indeed, the `model` argument defaults to `Immune_All_Low.pkl`.
model = models.Model.load(model = 'Immune_All_Low.pkl')
# This model contains 91 cell states.
# + tags=[]
model.cell_types
# + [markdown] tags=[]
# Some model meta-information.
# + tags=[]
model.description
# + [markdown] tags=[]
# Transfer cell type labels from this model to the query dataset.
# + tags=[]
# Not run; predict cell identities using this loaded model.
#predictions = celltypist.annotate(adata_2000, model = model, majority_voting = True)
# Alternatively, just specify the model name (recommended as this ensures the model is intact every time it is loaded).
predictions = celltypist.annotate(adata_2000, model = 'Immune_All_Low.pkl', majority_voting = True)
# -
# By default (`majority_voting = False`), CellTypist will infer the identity of each query cell independently. This leads to raw predicted cell type labels, and usually finishes within seconds or minutes depending on the size of the query data. You can also turn on the majority-voting classifier (`majority_voting = True`), which refines cell identities within local subclusters after an over-clustering approach at the cost of increased runtime.
# + [markdown] tags=[]
# The results include both predicted cell type labels (`predicted_labels`), over-clustering result (`over_clustering`), and predicted labels after majority voting in local subclusters (`majority_voting`). Note in the `predicted_labels`, each query cell gets its inferred label by choosing the most probable cell type among all possible cell types in the given model.
# + tags=[]
predictions.predicted_labels
# -
# Transform the prediction result into an `AnnData`.
# Get an `AnnData` with predicted labels embedded into the cell metadata columns.
adata = predictions.to_adata()
# Compared to `adata_2000`, the new `adata` has additional prediction information in `adata.obs` (`predicted_labels`, `over_clustering`, `majority_voting` and `conf_score`). Of note, all these columns can be prefixed with a specific string by setting `prefix` in `to_adata`.
# + tags=[]
adata.obs
# -
# In addition to this meta information added, the neighborhood graph constructed during over-clustering is also stored in the `adata`
# (If a pre-calculated neighborhood graph is already present in the `AnnData`, this graph construction step will be skipped).
# This graph can be used to derive the cell embeddings, such as the UMAP coordinates.
# If the UMAP or any cell embeddings are already available in the `AnnData`, skip this command.
sc.tl.umap(adata)
# Visualise the prediction results.
# + tags=[]
sc.pl.umap(adata, color = ['cell_type', 'predicted_labels', 'majority_voting'], legend_loc = 'on data')
# -
# Actually, you may not need to explicitly convert `predictions` output by `celltypist.annotate` into an `AnnData` as above. A more useful way is to use the visualisation function `celltypist.dotplot`, which quantitatively compares the CellTypist prediction result (e.g. `majority_voting` here) with the cell types pre-defined in the `AnnData` (here `cell_type`). You can also change the value of `use_as_prediction` to `predicted_labels` to compare the raw prediction result with the pre-defined cell types.
celltypist.dotplot(predictions, use_as_reference = 'cell_type', use_as_prediction = 'majority_voting')
# For each pre-defined cell type (each column from the dot plot), this plot shows how it can be 'decomposed' into different cell types predicted by CellTypist (rows).
# ## Assign cell type labels using a custom model
# In this section, we show the procedure of generating a custom model and transferring labels from the model to the query data.
# + [markdown] tags=[]
# Use previously downloaded dataset of 2,000 immune cells as the training set.
# -
adata_2000 = sc.read('celltypist_demo_folder/demo_2000_cells.h5ad', backup_url = 'https://celltypist.cog.sanger.ac.uk/Notebook_demo_data/demo_2000_cells.h5ad')
# Download another scRNA-seq dataset of 400 immune cells as a query.
adata_400 = sc.read('celltypist_demo_folder/demo_400_cells.h5ad', backup_url = 'https://celltypist.cog.sanger.ac.uk/Notebook_demo_data/demo_400_cells.h5ad')
# + [markdown] tags=[]
# Derive a custom model by training the data using the `celltypist.train` function.
# + tags=[]
# The `cell_type` in `adata_2000.obs` will be used as cell type labels for training.
new_model = celltypist.train(adata_2000, labels = 'cell_type')
# -
# By default, data is trained using a traditional logistic regression classifier. This classifier is well suited to datasets of small or intermediate sizes (as an empirical estimate, <= 100k cells), and usually leads to an unbiased probability range with less parameter tuning. Among the training parameters, three important ones are `solver` which (if not specified by the user) is selected based on the size of the input data by CellTypist, `C` which sets the inverse of L2 regularisation strength, and `max_iter` which controls the maximum number of iterations before reaching the minimum of the cost function. Other (hyper)parameters from [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) are also applicable in the `train` function.
# When the dimensions of the input data are large, training may take longer time even with CPU parallelisation (achieved by the `n_jobs` argument). To reduce the training time as well as to add some randomness to the classifier's solution, a stochastic gradient descent (SGD) logistic regression classifier can be enabled by `use_SGD = True`.
# +
# For illustration purpose; below is not run for this small training data.
#new_model = celltypist.train(adata_2000, labels = 'cell_type', use_SGD = True)
# -
# A logistic regression classifier with SGD learning reduces the training burden dramatically and has a comparable performance versus a traditional logistic regression classifier. A minor caveat is that more careful model parameter tuning may be needed if you want to utilise the probability values from the model for scoring cell types in the prediction step (the selection of the most likely cell type for each query cell in this notebook is not influenced however). Among the training parameters, two important ones are `alpha` which sets the L2 regularisation strength and `max_iter` which controls the maximum number of iterations. Other (hyper)parameters from [SGDClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html) are also applicable in the `train` function.
# When the training data contains a huge number of cells (for example >500k cells) or more randomness in selecting cells for training is needed, you may consider using the mini-batch version of the SGD logistic regression classifier by specifying `use_SGD = True` and `mini_batch = True`. As a result, in each epoch (default to 10 epochs, `epochs = 10`), cells are binned into equal-sized (the size is default to 1000, `batch_size = 1000`) random batches, and are trained in a batch-by-batch manner (default to 100 batches, `batch_number = 100`).
# + tags=[]
# For illustration purpose; below is not run for this small training data.
#new_model = celltypist.train(adata_2000, labels = 'cell_type', use_SGD = True, mini_batch = True)
# -
# This custom model can be manipulated as with other CellTypist built-in models. First, save this model locally.
# Save the model.
new_model.write('celltypist_demo_folder/model_from_immune2000.pkl')
# + [markdown] tags=[]
# You can load this model by `models.Model.load`.
# -
new_model = models.Model.load('celltypist_demo_folder/model_from_immune2000.pkl')
# Next, we use this model to predict the query dataset of 400 immune cells.
# + tags=[]
# Not run; predict the identity of each input cell with the new model.
#predictions = celltypist.annotate(adata_400, model = new_model, majority_voting = True)
# Alternatively, just specify the model path (recommended as this ensures the model is intact every time it is loaded).
predictions = celltypist.annotate(adata_400, model = 'celltypist_demo_folder/model_from_immune2000.pkl', majority_voting = True)
# -
adata = predictions.to_adata()
sc.tl.umap(adata)
# + tags=[]
sc.pl.umap(adata, color = ['cell_type', 'predicted_labels', 'majority_voting'], legend_loc = 'on data')
# -
celltypist.dotplot(predictions, use_as_reference = 'cell_type', use_as_prediction = 'majority_voting')
# ## Examine expression of cell type-driving genes
# Each model can be examined in terms of the driving genes for each cell type. Note these genes are only dependent on the model, say, the training dataset.
# + tags=[]
# Any model can be inspected.
# Here we load the previously saved model trained from 2,000 immune cells.
model = models.Model.load(model = 'celltypist_demo_folder/model_from_immune2000.pkl')
# -
model.cell_types
# Extract the matrix of gene weights across cell types.
# + tags=[]
weights = model.classifier.coef_
weights.shape
# + [markdown] tags=[]
# Top three driving genes of `Mast cells`.
# + tags=[]
mast_cell_weights = weights[model.cell_types == 'Mast cells']
top_3_genes = model.features[mast_cell_weights.argpartition(-3, axis = None)[-3:]]
top_3_genes
# + tags=[]
# Check expression of the three genes in the training set.
sc.pl.violin(adata_2000, top_3_genes, groupby = 'cell_type', rotation = 90)
# -
# Check expression of the three genes in the query set.
# Here we use `majority_voting` from CellTypist as the cell type labels for this dataset.
sc.pl.violin(adata_400, top_3_genes, groupby = 'majority_voting', rotation = 90)
| docs/notebook/celltypist_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inferring cosmology power spectrum parameters from 2D fields
# (by <NAME> and <NAME>)
#
# A useful example to consider is the inference of the parameter values controlling the shape of the power spectrum used to generate 2D Gaussian random fields. This is interesting because we can analytically calculate the Fisher information to check our convergence and it has aspects which are very similar to cosmological field analysis. This example is split into two parts, first the analytic comparison when the power spectrum is of the form
# $$P(k) = Ak^{-B}$$
# and a second part where we use a jax implementation of a cosmology library (`pip install jax-cosmo`) to infer cosmological parameters from 2D Gaussian random fields.
#
# In this example we will also show how inception blocks can be implemented in a stax-like neural model.
#
# ## Power law example - $P(k) = Ak^{-B}$
#
# The likelihood for an $N_{\rm pix}\times N_{\rm pix}$ Gaussian field, $\boldsymbol{\delta}$, can be explicitly written down for the Fourier transformed data, $\boldsymbol{\Delta}$ as
# $$\mathcal{L}(\boldsymbol{\Delta}|\boldsymbol{\theta}) = \frac{1}{(2\pi)^{N_{\rm pix}^2 / 2} |P({\bf k}, \boldsymbol{\theta})|^{1/2}}\exp{\left(-\frac{1}{2}\boldsymbol{\Delta}\left(P({\bf k}, \boldsymbol{\theta})\right)^{-1}\boldsymbol{\Delta}\right)}$$
# Since the Fisher information can be calculated from the expectation value of the second derivative of the score, i.e. the log likelihood
# $${\bf F}_{\alpha\beta} = - \left.\left\langle\frac{\partial^2\ln\mathcal{L}(\Delta|\boldsymbol{\theta})}{\partial\theta_\alpha\partial\theta_\beta}\right\rangle\right|_{\boldsymbol{\theta}=\boldsymbol{\theta}^\textrm{fid}}$$
# Then we know that analytically the Fisher information must be
# $${\bf F}_{\alpha\beta} = \frac{1}{2} {\rm Tr} \left(\frac{\partial P({\bf k}, \boldsymbol{\theta})}{\partial\theta_\alpha}\left(P({\bf k}, \boldsymbol{\theta})\right)^{-1}\frac{\partial P({\bf k}, \boldsymbol{\theta})}{\partial\theta_\beta}\left(P({\bf k}, \boldsymbol{\theta})\right)^{-1}\right)$$
# where $\alpha$ and $\beta$ label the parameters $A$ and $B$ in the power spectrum. As each $k$-mode is uncoupled for this power law form then the derivatives are
# $$\begin{align}
# \frac{\partial P({\bf k}, \boldsymbol{\theta})}{\partial A} = &~k^{-B}\\
# \frac{\partial P({\bf k}, \boldsymbol{\theta})}{\partial B} = & -Ak^{-B}\ln k.\\
# \end{align}$$
# We can set up these functions. Note that for large fields there can be an issue with numerical overflow and as such we will use jax in 64 bit mode.
from jax.config import config
config.update('jax_enable_x64', True)
import imnn
import imnn.lfi
import jax
import jax.numpy as np
import jax_cosmo as jc
import matplotlib.pyplot as plt
import tensorflow_probability
from jax.experimental import optimizers
from jax.experimental import stax
tfp = tensorflow_probability.substrates.jax
rng = jax.random.PRNGKey(0)
# First we'll set up our $P(k)$
def P(k, A=1, B=0):
return A * k ** (-B)
# And the log likelihood is then
def log_likelihood(k, A, B, Δ, N):
Δ = Δ[1:N // 2, 1:N // 2].flatten()
k = k[1:N // 2, 1:N // 2].flatten()
dlength = len(k)
def fn(_A, _B):
nrm = np.pad(np.ones(dlength - 2) * 2, (1, 1), constant_values=1.)
nrm = jax.ops.index_update(
nrm, np.array([[0],[(dlength - 2)]]), np.array([[1], [1]]))
nrm = 1
powers = P(k, A=_A, B=_B)
C = powers * nrm
invC = 1. / powers
exponent = - 0.5 * np.sum(np.conj(Δ) * invC * Δ)
norm = -0.5 * np.sum(np.log(C)) -0.5 * len(Δ) * np.log(np.pi * 2.)
return (exponent + norm)
return jax.vmap(fn)(A, B)
# and the Fisher information matrix is given by
def fisher(θ, k, N):
A, B = θ
k = k[1:N // 2, 1:N // 2].flatten()
Pk = P(k, A, B)
Cinv = np.diag(1. / Pk)
C_A = np.diag(k ** -B)
C_B = np.diag(- Pk * np.log(k))
F_AA = 0.5 * np.trace((C_A @ Cinv @ C_A @ Cinv))
F_AB = 0.5 * np.trace((C_A @ Cinv @ C_B @ Cinv))
F_BA = 0.5 * np.trace((C_B @ Cinv @ C_A @ Cinv))
F_BB = 0.5 * np.trace((C_B @ Cinv @ C_B @ Cinv))
return np.array([[F_AA, F_AB], [F_BA, F_BB]])
# Notice that we only take the unique modes to calculate the Fisher information. To actually set up our 2D $k$-modes we stack values from $0\to N_i/2 + 1$ to values from $-N_i/2+1\to0$ and normalising them by $2\pi/N_i$ where $N_i$ is the number of $k$-modes in each axis. If we choose that our fields have $128\times128$ pixels then we can calculate $k$ as
# +
N = 128
shape = (N, N)
k = np.sqrt(
np.sum(
np.array(
np.meshgrid(
*((np.hstack(
(np.arange(0, _shape // 2 + 1),
np.arange(-_shape // 2 + 1, 0)))
* 2 * np.pi / _shape)**2.
for _shape in shape))),
axis=0))
# -
# For a fiducial $A^\textrm{fid}=1$ and $B^\textrm{fid}=1/2$ we get a Fisher information matrix equal to
# +
θ_fid = np.array([1., 0.5], dtype=np.float32)
n_params = θ_fid.shape[0]
F = fisher(θ_fid, k, N)
print(f"F = {F}")
print(f"det(F) = {np.linalg.det(F)}")
# -
# ### 2D Gaussian random field simulator in jax
#
# To create a 2D Gaussian random field we can follow these steps:
#
# 1. Generate a $(N_\textrm{pix}\times N_\textrm{pix})$ white noise field $\varphi$ such that $\langle \varphi_k \varphi_{-k} \rangle' = 1$
#
# 2. Fourier Transform $\varphi$ to real space: $R_{\rm white}({\bf x}) \rightarrow R_{\rm white}({\bf k})$
# Note that NumPy's DFT Fourier convention is:
# $$\phi_{ab}^{\bf k} = \sum_{c,d = 0}^{N-1} \exp{(-i x_c k_a - i x_d k_b) \phi^{\bf x}_{cd}}$$
# $$\phi_{ab}^{\bf x} = \frac{1}{N^2}\sum_{c,d = 0}^{N-1} \exp{(-i x_c k_a - i x_d k_b) \phi^{\bf k}_{cd}}$$
#
# 3. Scale white noise $R_{\rm white}({\bf k})$ by the chosen power spectrum evaluated over a field of $k$ values:
# $$R_P({\bf k}) = P^{1/2}(k) R_{\rm white}({\bf k}) $$
# Here we need to ensure that this array of amplitudes are Hermitian, e.g. $\phi^{* {\bf k}}_{a(N/2 + b)} = \phi^{{\bf k}}_{a(N/2 - b)}$. This is accomplished by choosing indices $k_a = k_b = \frac{2\pi}{N} (0, \dots, N/2, -N/2+1, \dots, -1)$ (as above) and then evaluating the square root of the outer product of the meshgrid between the two: $k = \sqrt{k^2_a + k^2_b}$. We can then evaluate $P^{1/2}(k)$.
#
# 4. Fourier Transform $R_{P}({\bf k})$ to real space: $R_P({\bf x}) = \int d^d \tilde{k} e^{i{\bf k} \cdot {\bf x}} R_p({\bf k})$
# $$R_{ab}^{\bf x} = \frac{1}{N^2}\sum_{c,d = 0}^{N-1} \exp{(-i x_c k_a - i x_d k_b) R^{\bf k}_{cd}}$$
#
# We are going to build a broadcastable jax simultor which takes in a variety of different shaped parameter arrays and vmaps them until a single parameter pair are passed. This is very efficient for performing the ABC for example. We're also going make our simulator so that it could have additive foregrounds (although we won't use them in this example) as well as a generator for log normal fields where the $P(k)$ for the Fourier modes are transformed as
# $$P(k)\to\ln(1+P(k))$$
# and rescaled by the volume of the simulation before generating the field, and then the field is transformed as
# $$\phi\to \exp\left(\phi - \frac{\langle\phi\phi\rangle}{2}\right) - 1$$
def simulator(rng, θ, simulator_args, foregrounds=None):
def fn(rng, A, B):
dim = len(simulator_args["shape"])
L = simulator_args["L"]
if np.isscalar(L):
L = [L] * int(dim)
Lk = ()
shape = ()
for i, _shape in enumerate(simulator_args["shape"]):
Lk += (_shape / L[i],)
if _shape % 2 == 0:
shape += (_shape + 1,)
else:
shape += (_shape,)
k = simulator_args["k"]
k_shape = k.shape
k = k.flatten()[1:]
tpl = ()
for _d in range(dim):
tpl += (_d,)
V = np.prod(np.array(L))
scale = V**(1. / dim)
fft_norm = np.prod(np.array(Lk))
rng, key = jax.random.split(rng)
mag = jax.random.normal(
key, shape=shape)
pha = 2. * np.pi * jax.random.uniform(
key, shape=shape)
# now make hermitian field (reality condition)
revidx = (slice(None, None, -1),) * dim
mag = (mag + mag[revidx]) / np.sqrt(2)
pha = (pha - pha[revidx]) / 2 + np.pi
dk = mag * (np.cos(pha) + 1j * np.sin(pha))
cutidx = (slice(None, -1),) * dim
dk = dk[cutidx]
powers = np.concatenate(
(np.zeros(1),
np.sqrt(P(k, A=A, B=B)))).reshape(k_shape)
if simulator_args['vol_norm']:
powers /= V
if simulator_args["log_normal"]:
powers = np.real(
np.fft.ifftshift(
np.fft.ifftn(
powers)
* fft_norm) * V)
powers = np.log(1. + powers)
powers = np.abs(np.fft.fftn(powers))
fourier_field = powers * dk
fourier_field = jax.ops.index_update(
fourier_field,
np.zeros(dim, dtype=int),
np.zeros((1,)))
if simulator_args["log_normal"]:
field = np.real(np.fft.ifftn(fourier_field)) * fft_norm * np.sqrt(V)
sg = np.var(field)
field = np.exp(field - sg / 2.) - 1.
else:
field = np.real(np.fft.ifftn(fourier_field) * fft_norm * np.sqrt(V)**2)
if simulator_args["N_scale"]:
field *= scale
if foregrounds is not None:
rng, key = jax.random.split(key)
foreground = foregrounds[
jax.random.randint(
key,
minval=0,
maxval=foregrounds.shape[0],
shape=())]
field = np.expand_dims(field + foreground, (0,))
if not simulator_args["squeeze"]:
field = np.expand_dims(field, (0, 1))
return np.array(field, dtype='float32')
if isinstance(θ, tuple):
A, B = θ
else:
A = np.take(θ, 0, axis=-1)
B = np.take(θ, 1, axis=-1)
if A.shape == B.shape:
if len(A.shape) == 0:
return fn(rng, A, B)
else:
keys = jax.random.split(rng, num=A.shape[0] + 1)
rng = keys[0]
keys = keys[1:]
return jax.vmap(
lambda key, A, B: simulator(
key, (A, B), simulator_args=simulator_args))(
keys, A, B)
else:
if len(A.shape) > 0:
keys = jax.random.split(rng, num=A.shape[0] + 1)
rng = keys[0]
keys = keys[1:]
return jax.vmap(
lambda key, A: simulator(
key, (A, B), simulator_args=simulator_args))(
keys, A)
elif len(B.shape) > 0:
keys = jax.random.split(rng, num=B.shape[0])
return jax.vmap(
lambda key, B: simulator(
key, (A, B), simulator_args=simulator_args))(
keys, B)
# We can now set the simulator arguments, i.e. the $k$-modes to evaluate, the length of the side of a box, the shape of the box and whether to normalise via the volume and squeeze the output dimensions
simulator_args = dict(
k=k,
L=N,
shape=shape,
vol_norm=True,
N_scale=True,
squeeze=True,
log_normal=False)
# Now we can simulate some target data at, for example, $A^\textrm{target}=0.7$ and $B^\textrm{target}=0.8$:
# +
θ_target = np.array([0.7, 0.8])
rng, key = jax.random.split(rng)
δ_target = simulator(key, θ_target, simulator_args=simulator_args)
plt.imshow(δ_target)
plt.colorbar();
# -
# We can now define our prior distribution (in this case a uniform distribution over $A$ and $B$) with values between 0.1 and 1.25 for both parameters
prior = tfp.distributions.Blockwise(
[tfp.distributions.Uniform(low=low, high=high)
for low, high in zip([0.1, 0.1], [1.25, 1.25])])
prior.low = np.array([0.1, 0.1])
prior.high = np.array([1.25, 1.25])
# To evaluate the likelihood of this field we can now use (dividing the target $\delta$ by $N$ to remove added scaling)
LFI = imnn.lfi.LikelihoodFreeInference(
prior=prior,
gridsize=100)
A, B = np.meshgrid(*LFI.ranges)
LFI.n_targets=1
LFI.put_marginals(
jax.nn.softmax(
np.real(
log_likelihood(
k,
A.ravel(),
B.ravel(),
np.fft.fftn(δ_target / N),
N)
),axis=0
).reshape((100, 100)).T[np.newaxis]);
LFI.marginal_plot(
known=θ_target,
label="Analytic likelihood",
axis_labels=["A", "B"]);
# ## Training an IMNN
#
# Now lets train an IMNN to summaries such Gaussian random fields to see how much information we can extract an what sort of constraints we can get. We will use 5000 simulations to estimate the covariance and use all of their derivatives and we'll summarise the whole random Gaussian field by 2 summaries.
# +
n_s = 5000
n_d = n_s
n_summaries = 2
# -
# We're going to use a fully convolutional inception network built using stax with some custom designed blocks. The inception block itself is implemented as
def InceptBlock(filters, strides, do_5x5=True, do_3x3=True):
"""InceptNet convolutional striding block.
filters: tuple: (f1,f2,f3)
filters1: for conv1x1
filters2: for conv1x1,conv3x3
filters3L for conv1x1,conv5x5"""
filters1, filters2, filters3 = filters
conv1x1 = stax.serial(
stax.Conv(filters1, (1, 1), strides, padding="SAME"))
filters4 = filters2
conv3x3 = stax.serial(
stax.Conv(filters2, (1, 1), strides=None, padding="SAME"),
stax.Conv(filters4, (3, 3), strides, padding="SAME"))
filters5 = filters3
conv5x5 = stax.serial(
stax.Conv(filters3, (1, 1), strides=None, padding="SAME"),
stax.Conv(filters5, (5, 5), strides, padding="SAME"))
maxpool = stax.serial(
stax.MaxPool((3, 3), padding="SAME"),
stax.Conv(filters4, (1, 1), strides, padding="SAME"))
if do_3x3:
if do_5x5:
return stax.serial(
stax.FanOut(4),
stax.parallel(conv1x1, conv3x3, conv5x5, maxpool),
stax.FanInConcat(),
stax.LeakyRelu)
else:
return stax.serial(
stax.FanOut(3),
stax.parallel(conv1x1, conv3x3, maxpool),
stax.FanInConcat(),
stax.LeakyRelu)
else:
return stax.serial(
stax.FanOut(2),
stax.parallel(conv1x1, maxpool),
stax.FanInConcat(),
stax.LeakyRelu)
# We'll also want to make sure that the output of the network is the correct shape, for which we'll introduce a Reshaping layer
def Reshape(shape):
"""Layer function for a reshape layer."""
init_fun = lambda rng, input_shape: (shape,())
apply_fun = lambda params, inputs, **kwargs: np.reshape(inputs, shape)
return init_fun, apply_fun
# Now we can build the network, with kernel sizes of 4 in each direction in each layer
# +
fs = 64
model = stax.serial(
InceptBlock((fs, fs, fs), strides=(4, 4)),
InceptBlock((fs, fs, fs), strides=(4, 4)),
InceptBlock((fs, fs, fs), strides=(4, 4)),
InceptBlock((fs, fs, fs), strides=(2, 2), do_5x5=False, do_3x3=False),
stax.Conv(n_summaries, (1, 1), strides=(1, 1), padding="SAME"),
stax.Flatten,
Reshape((n_summaries,)))
# -
# We'll also grab an adam optimiser from jax.experimental.optimizers
optimiser = optimizers.adam(step_size=1e-3)
# Note that due to the form of the network we'll want to have simulations that have a "channel" dimension, which we can set up by not allowing for squeezing in the simulator.
# ### Initialise IMNN
#
# Finally we can initialise the IMNN, letting the IMNN module decide what type of IMNN subclass will be used (we'll be using SimulatorIMNN)
rng, key = jax.random.split(rng)
IMNN = imnn.IMNN(
n_s=n_s,
n_d=n_d,
n_params=n_params,
n_summaries=n_summaries,
input_shape=(1, 1) + shape,
θ_fid=θ_fid,
model=model,
optimiser=optimiser,
key_or_state=key,
simulator=lambda rng, θ: simulator(
rng, θ, simulator_args={
**simulator_args,
**{"squeeze": False}}))
# And finally we can fit the IMNN (we'll use generic regularisation parameters of $\lambda=10$ and $\epsilon=0.1$) and allow early stopping to determine the end of fitting.
# %%time
rng, key = jax.random.split(rng)
IMNN.fit(λ=10., ϵ=0.1, rng=key, print_rate=None, min_iterations=500, best=True)
IMNN.plot(expected_detF=np.linalg.det(F));
np.linalg.det(IMNN.F) / np.linalg.det(F)
# After nearly 1400 iterations of fitting we obtain (at the last iteration) over 97% of the information. The maximum value of the log determinant of the Fisher information obtained by the IMNN is slightly over the analytic value because it is estimated over a limited set which might accidentally have more information due to the stochastic realisation. For this reason we choose the Fisher information at the last iterations rather than the best fit.
# ## Inference
#
# We can now attempt to do inference of some target data using the IMNN. The first thing we should do is make a Gaussian approximation using a parameter estimate from the IMNN and the Fisher information reached at the end of fitting. Note that since the fiducial parameter values are far from the "target" that this estimate of the Fisher information as the covariance will likely be misleading.
GA = imnn.lfi.GaussianApproximation(
parameter_estimates=IMNN.get_estimate(np.expand_dims(δ_target, (0, 1, 2))),
invF=np.expand_dims(np.linalg.inv(IMNN.F), 0),
prior=prior,
gridsize=100)
GA.marginal_plot(
known=θ_target,
label="Gaussian approximation",
axis_labels=["A", "B"],
colours="C1");
# And finally we can do an approximate Bayesian computation
ABC = imnn.lfi.ApproximateBayesianComputation(
target_data=np.expand_dims(δ_target, (0, 1, 2)),
prior=prior,
simulator=lambda rng, θ: simulator(rng, θ, simulator_args={**simulator_args, **{'squeeze':False}}),
compressor=IMNN.get_estimate,
gridsize=100,
F=np.expand_dims(IMNN.F, 0))
rng, key = jax.random.split(rng)
ABC(ϵ=1., rng=key, n_samples=10000, min_accepted=1000,
smoothing=1, max_iterations=1000);
# +
ax = LFI.marginal_plot(
known=θ_target,
label="Analytic likelihood",
axis_labels=["A", "B"])
GA.marginal_plot(
ax=ax,
label="Gaussian approximation",
colours="C1",
axis_labels=["A", "B"])
ABC.marginal_plot(
ax=ax,
label="Approximate Bayesian computation",
colours="C2");
plt.show()
# -
# # Cosmological parameter inference of log normal fields
#
# As a more realistic example of cosmological parameter inference from dark matter fields, albeit it one where we do not (yet) know the amount of information in the field, we can create a log normal field from a power spectrum generated with cosmological parameters.
#
# For example lets say that our fiducial cosmology has $\Omega_c=0.40$ and $\sigma_8=0.75$, using `jax-cosmo` we can set
cosmo_params = jc.Planck15(Omega_c=0.40, sigma8=0.75)
θ_fid = np.array(
[cosmo_params.Omega_c,
cosmo_params.sigma8],
dtype=np.float32)
# Our new $P(k)$ is simply the linear matter power spectrum defined as
def P(k, A=0.40, B=0.75):
cosmo_params = jc.Planck15(Omega_c=A, sigma8=B)
return jc.power.linear_matter_power(cosmo_params, k)
simulator_args = dict(
k=k,
L=250,
shape=shape,
vol_norm=True,
N_scale=False,
squeeze=True,
log_normal=True)
# Since our lognormal field simulator *and* power spectra code are differentiable via `Jax`, we can simulate a *differentiable* universe. We'll pull out a nice function to visualize fiducial example data and its derivatives with respect to the cosmological parameters.
# +
from imnn.utils import value_and_jacrev, value_and_jacfwd
simulation, simulation_gradient = value_and_jacfwd(
simulator, argnums=1)(
rng, θ_fid, simulator_args=simulator_args)
# +
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig,ax = plt.subplots(nrows=1, ncols=3, figsize=(12,15))
im1 = ax[0].imshow(np.squeeze(simulation),
extent=(0, 1, 0, 1))
ax[0].title.set_text(r'example fiducial $\rm d$')
divider = make_axes_locatable(ax[0])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im1, cax=cax, orientation='vertical')
im1 = ax[1].imshow(np.squeeze(simulation_gradient).T[0].T,
extent=(0, 1, 0, 1))
ax[1].title.set_text(r'$\nabla_{\Omega_m} \rm d$')
divider = make_axes_locatable(ax[1])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im1, cax=cax, orientation='vertical')
im1 = ax[2].imshow(np.squeeze(simulation_gradient).T[1].T,
extent=(0, 1, 0, 1))
ax[2].title.set_text(r'$\nabla_{\sigma_8} \rm d$')
divider = make_axes_locatable(ax[2])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im1, cax=cax, orientation='vertical')
for a in ax:
a.set_xticks([])
a.set_yticks([])
# -
# We'll now make the target universe that we observe generated with realistic parameters - $\Omega_c=0.35$ and $\sigma_8=0.8$
# +
θ_target = np.array([0.35, 0.8])
rng, key = jax.random.split(rng)
δ_target = simulator(
key, θ_target, simulator_args=simulator_args,
)
plt.imshow(δ_target)
plt.colorbar();
# -
# We can now train an IMNN as before
rng, key = jax.random.split(rng)
IMNN = imnn.IMNN(
n_s=n_s,
n_d=n_d,
n_params=n_params,
n_summaries=n_summaries,
input_shape=(1, 1) + shape,
θ_fid=θ_fid,
model=model,
optimiser=optimiser,
key_or_state=key,
simulator=lambda rng, θ: simulator(
rng, θ,
simulator_args={**simulator_args,
**{"squeeze": False}}))
# %%time
rng, key = jax.random.split(rng)
IMNN.fit(λ=10., ϵ=0.1, rng=key, print_rate=None, min_iterations=500, best=False)
IMNN.plot(expected_detF=None);
# And finally we can do our inference. We'll first set the prior distribution
prior = tfp.distributions.Blockwise(
[tfp.distributions.Uniform(low=low, high=high)
for low, high in zip([0., 0.], [1.5, 1.5])])
prior.low = np.array([0., 0.])
prior.high = np.array([1.5, 1.5])
# And make the Gaussian approximation using the Fisher information
GA = imnn.lfi.GaussianApproximation(
parameter_estimates=IMNN.get_estimate(np.expand_dims(δ_target, (0, 1, 2))),
invF=np.expand_dims(np.linalg.inv(IMNN.F), 0),
prior=prior,
gridsize=100)
# And then run the ABC
ABC = imnn.lfi.ApproximateBayesianComputation(
target_data=np.expand_dims(δ_target, (0, 1, 2)),
prior=prior,
simulator=lambda rng, θ: simulator(rng, θ, simulator_args={**simulator_args, **{'squeeze':False}}),
compressor=IMNN.get_estimate,
gridsize=100,
F=np.expand_dims(IMNN.F, 0))
rng, key = jax.random.split(rng)
ABC(ϵ=0.1, rng=key, n_samples=10000, min_accepted=1000,
smoothing=1., max_iterations=5000);
# And then we can plot the constraints obtained using the IMNN and LFI
ax = GA.marginal_plot(
known=θ_target,
label="Gaussian approximation",
axis_labels=[r"$\Omega_c$", r"$\sigma_8$"],
colours="C1")
ABC.marginal_plot(
ax=ax,
label="Approximate Bayesian computation",
colours="C2");
| examples/2d_field_inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Demo - LISA Horizon Distance
# This demo shows how to use ``LEGWORK`` to compute the horizon distance for a collection of sources.
# + tags=["hide_input"]
# %matplotlib inline
# -
import legwork as lw
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
# + tags=["hide_input"]
# %config InlineBackend.figure_format = 'retina'
plt.rc('font', family='serif')
plt.rcParams['text.usetex'] = False
fs = 24
# update various fontsizes to match
params = {'figure.figsize': (12, 8),
'legend.fontsize': fs,
'axes.labelsize': fs,
'xtick.labelsize': 0.9 * fs,
'ytick.labelsize': 0.9 * fs,
'axes.linewidth': 1.1,
'xtick.major.size': 7,
'xtick.minor.size': 4,
'ytick.major.size': 7,
'ytick.minor.size': 4}
plt.rcParams.update(params)
# -
# ## Horizon distance of circular binaries
# The horizon distance for a source is the maximum distance at which the SNR of a source is still above some detectable threshold. The horizon distance can be computed from the SNR as follows since it is inversely proportional to the distance.
# \begin{equation}
# D_{\rm hor} = \frac{\rho(D)}{\rho_{\rm detect}} \cdot D,
# \label{eq:snr_to_hor_dist}
# \end{equation}
# Where $\rho(D)$ is the SNR at some distance $D$ and $\rho_{\rm detect}$ is the SNR above which we consider a source detectable.
#
# Let's start doing this by creating a grid of chirp masses and orbital frequencies and creating a Source class from them.
# +
# create a list of masses and frequencies
m_c_grid = np.logspace(-1, np.log10(50), 500) * u.Msun
f_orb_grid = np.logspace(np.log10(4e-5), np.log10(3e-1), 400) * u.Hz
# turn the two lists into grids
MC, FORB = np.meshgrid(m_c_grid, f_orb_grid)
# flatten grids
m_c, f_orb = MC.flatten(), FORB.flatten()
# convert chirp mass to individual masses for source class
q = 1.0
m_1 = m_c / q**(3/5) * (1 + q)**(1/5)
m_2 = m_1 * q
# use a fixed distance and circular binaries
dist = np.repeat(1, len(m_c)) * u.kpc
ecc = np.zeros(len(m_c))
# create the source class
sources = lw.source.Source(m_1=m_1, m_2=m_2, dist=dist, f_orb=f_orb, ecc=ecc, gw_lum_tol=1e-3)
# -
# Next, we can use `LEGWORK` to compute their merger times and SNRs for the contours.
# calculate merger times and then SNR
sources.get_merger_time()
sources.get_snr(verbose=True)
# We flattened the grid to fit into the Source class but now we can reshape the output to match the original grid.
# reshape the output into grids
t_merge_grid = sources.t_merge.reshape(MC.shape)
snr_grid = sources.snr.reshape(MC.shape)
# Now we can define a couple of functions for formatting the time, distance and galaxy name contours.
# + tags=[]
def fmt_time(x):
if x == 4:
return r"$t_{\rm merge} = T_{\rm obs}$"
elif x >= 1e9:
return "{0:1.0f} Gyr".format(x / 1e9)
elif x >= 1e6:
return "{0:1.0f} Myr".format(x / 1e6)
elif x >= 1e3:
return "{0:1.0f} kyr".format(x / 1e3)
elif x >= 1:
return "{0:1.0f} yr".format(x)
elif x >= 1/12:
return "{0:1.0f} month".format(x * 12)
else:
return "{0:1.0f} week".format(x * 52)
def fmt_dist(x):
if x >= 1e9:
return "{0:1.0f} Gpc".format(x / 1e9)
elif x >= 1e6:
return "{0:1.0f} Mpc".format(x / 1e6)
elif x >= 1e3:
return "{0:1.0f} kpc".format(x / 1e3)
else:
return "{0:1.0f} pc".format(x)
def fmt_name(x):
if x == np.log10(8):
return "MW Centre"
elif x == np.log10(50):
return "SMC/LMC"
elif x == np.log10(800):
return "Andromeda"
elif x == np.log10(40000):
return "GW170817"
# -
# Finally, we put it all together to create a contour plot with all of the information.
# +
# create a square figure plus some space for a colourbar
size = 12
cbar_space = 2
fig, ax = plt.subplots(figsize=(size + cbar_space, size))
# set up scales early so contour labels show up nicely
ax.set_xscale("log")
ax.set_yscale("log")
# set axes labels and lims
ax.set_xlabel(r"Orbital Frequency, $f_{\rm orb} \, [\rm Hz]$")
ax.set_ylabel(r"Chirp Mass, $\mathcal{M}_c \, [\rm M_{\odot}]$")
ax.set_xlim(4e-5, 3e-1)
# calculate the horizon distance
snr_threshold = 7
horizon_distance = (snr_grid / snr_threshold * 1 * u.kpc).to(u.kpc)
# set up the contour levels
distance_levels = np.arange(-3, 6 + 0.5, 0.5)
distance_tick_levels = distance_levels[::2]
# plot the contours for horizon distance
distance_cont = ax.contourf(FORB, MC, np.log10(horizon_distance.value), levels=distance_levels)
# hide edges that show up in rendered PDFs
for c in distance_cont.collections:
c.set_edgecolor("face")
# create a colour with custom formatted labels
cbar = fig.colorbar(distance_cont, ax=ax, pad=0.02, ticks=distance_tick_levels, fraction=cbar_space / (size + cbar_space))
cbar.ax.set_yticklabels([fmt_dist(np.power(10, distance_tick_levels + 3)[i]) for i in range(len(distance_tick_levels))])
cbar.set_label(r"Horizon Distance", fontsize=fs)
cbar.ax.tick_params(axis="both", which="major", labelsize=0.7 * fs)
# annotate the colourbar with some named distances
named_distances = np.log10([8, 50, 800, 40000])
for name in named_distances:
cbar.ax.axhline(name, color="white", linestyle="dotted")
# plot the same names as contours
named_cont = ax.contour(FORB, MC, np.log10(horizon_distance.value), levels=named_distances,
colors="white", alpha=0.8, linestyles="dotted")
ax.clabel(named_cont, named_cont.levels, fmt=fmt_name, use_clabeltext=True, fontsize=0.7*fs,
manual=[(1.1e-3, 2e-1), (4e-3, 2.2e-1), (4e-3,1e0), (3e-3, 1.2e1)])
# add a line for when the merger time becomes less than the inspiral time
time_cont = ax.contour(FORB, MC, t_merge_grid.to(u.yr).value, levels=[4],
colors="black", linewidths=2, linestyles="dotted") #[1/52, 1/12, 4, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10]
ax.clabel(time_cont, time_cont.levels, fmt=fmt_time, fontsize=0.7*fs, use_clabeltext=True, manual=[(2.5e-2, 5e0)])
# plot a series of lines and annotations for average DCO masses
for m_1, m_2, dco in [(0.6, 0.6, "WDWD"), (1.4, 1.4, "NSNS"),
(10, 1.4, "BHNS"), (10, 10, "BHBH"), (30, 30, "BHBH")]:
# find chirp mass
m_c_val = lw.utils.chirp_mass(m_1, m_2)
# plot lines before and after bbox
ax.plot([4e-5, 4.7e-5], [m_c_val, m_c_val],
color="black", lw=0.75, zorder=1, linestyle="--")
ax.plot([1.2e-4, 1e0], [m_c_val, m_c_val],
color="black", lw=0.75, zorder=1, linestyle="--")
# plot name and bbox, then masses below in smaller font
ax.annotate(dco + "\n", xy=(7.5e-5, m_c_val), ha="center", va="center", fontsize=0.7*fs,
bbox=dict(boxstyle="round", fc="white", ec="white", alpha=0.25))
ax.annotate(r"${{{}}} + {{{}}}$".format(m_1, m_2),
xy=(7.5e-5, m_c_val * 0.95), ha="center", va="top", fontsize=0.5*fs)
# ensure that everyone knows this only applies for circular sources
ax.annotate(r"$e = 0.0$", xy=(0.03, 0.04), xycoords="axes fraction", fontsize=0.8*fs,
bbox=dict(boxstyle="round", fc="white", ec="white", alpha=0.25))
ax.set_facecolor(plt.get_cmap("viridis")(0.0))
plt.show()
| docs/demos/HorizonDistance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Recipe Builder Actions Overview
#
# ### Saving a File Cell
# If you wish to save the contents of a cell, simply run it. The `%%writefile` command at the top of the cell will write the contents of the cell to the file named at the top of the cell. You should run the cells manually when applicable. However, **pressing any of the actions at the top will automatically run all file cells relevant to the action**.
#
# ### Training and Scoring
# Press the associated buttons at the top in order to run training or scoring. The training output will be shown below the `evaluator.py` cell and scoring output will be shown below the `datasaver.py` cell. You must run training at least once before you can run scoring. You may delete the output cell(s). Running training the first time or after changing `requirements.txt` will be slower since the dependencies for the recipe need to be installed, but subsequent runs will be significantly faster. If you wish to see the hidden output add `debug` to the end of the output cell and re-run it.
#
# ### Creating the Recipe
# When you are done editing the recipe and satisfied with the training/scoring output, you can create a recipe from the notebook by pressing `Create Recipe`. You must run scoring at least once before you can create the recipe. After pressing it, you will see a progress bar showing how much time is left for the build to finish. If the recipe creation is successful the progress bar will be replaced by an external link that you can click to navigate to the created recipe.
#
#
# ## Caution!
# * **Do not delete any of the file cells**
# * **Do not edit the `%%writefile` line at the top of the file cells**
# * **Do not create recipes in different notebooks at the same time**
#
# ---
# #### **Requirements File** (Optional)
# Add additional libraries you wish to use in the recipe to the cell below. You can specify the version number if necessary. The file cell below is a **commented out example**.
# + id="requirements.txt" tags=["requirements.txt"]
# pandas=0.22.0
# numpy
# -
# Search here for additional libraries https://anaconda.org/. This is the list of main **libraries already in use**:
# `python=3.5.2` `scikit-learn` `pandas` `numpy` `data_access_sdk_python`
# **Warning: libraries or specific versions you add may be incompatible with the above libraries**.
# ---
# #### **Configuration Files**
# List any hyperparamters you wish to use. Specify the dataset(s) and schema(s) that are needed for training/scoring. To find the dataset ids go to the **Data tab** in Adobe Experience Platform or view the **Datasets** folder in the **Notebooks Data tab** on the left. You can also find schema id in the **Notebooks Data tab** under the **Schemas** folder. Each configuration will only be used for its corresponding action. `ACP_DSW_TRAINING_XDM_SCHEMA` and `ACP_DSW_SCORING_RESULTS_XDM_SCHEMA` will only be used after the recipe has been created.
# ##### Training Configuration
# + id="training.conf" tags=["training.conf"]
{
"trainingDataSetId": "<replace>",
"ACP_DSW_TRAINING_XDM_SCHEMA": "<replace>",
"tenant_id": "<replace>",
"num_recommendations": "5",
"sampling_fraction": "0.5"
}
# -
# ##### Scoring Configuration
# + id="scoring.conf" tags=["scoring.conf"]
{
"scoringDataSetId": "<replace>",
"scoringResultsDataSetId": "<replace>",
"ACP_DSW_SCORING_RESULTS_XDM_SCHEMA": "<replace>",
"tenant_id": "<replace>"
}
# -
# **The following configuration parameters are automatically set for you when you train/score:**
# `ML_FRAMEWORK_IMS_USER_CLIENT_ID` `ML_FRAMEWORK_IMS_TOKEN` `ML_FRAMEWORK_IMS_ML_TOKEN` `ML_FRAMEWORK_IMS_TENANT_ID` `saveData`
# ---
# #### **Training Data Loader File**
# Implement the `load` function to load and prepare the training data.
# + id="trainingdataloader.py" tags=["trainingdataloader.py"]
import numpy as np
import pandas as pd
from data_access_sdk_python.reader import DataSetReader
def load(configProperties):
print("Training Data Load Start")
print(configProperties)
prodreader = DataSetReader(client_id=configProperties['ML_FRAMEWORK_IMS_USER_CLIENT_ID'],
user_token=configProperties['ML_FRAMEWORK_IMS_TOKEN'],
service_token=configProperties['ML_FRAMEWORK_IMS_ML_TOKEN'])
train_data = prodreader.load(data_set_id=configProperties['trainingDataSetId'],
ims_org=configProperties['ML_FRAMEWORK_IMS_TENANT_ID'])
print("Training Data Load Finish")
return train_data
# -
# ---
# #### **Scoring Data Loader File**
# Implement the `load` function to load and prepare the scoring data.
# + id="scoringdataloader.py" tags=["scoringdataloader.py"]
import numpy as np
import pandas as pd
from data_access_sdk_python.reader import DataSetReader
def load(configProperties):
print("Scoring Data Load Start")
#########################################
# Load Data
#########################################
prodreader = DataSetReader(client_id=configProperties['ML_FRAMEWORK_IMS_USER_CLIENT_ID'],
user_token=configProperties['ML_FRAMEWORK_IMS_TOKEN'],
service_token=configProperties['ML_FRAMEWORK_IMS_ML_TOKEN'])
df = prodreader.load(data_set_id=configProperties['scoringDataSetId'],
ims_org=configProperties['ML_FRAMEWORK_IMS_TENANT_ID'])
print("Scoring Data Load Finish")
return df
# -
# ---
# #### **Pipeline File**
# Implement the `train` function and return the trained model. Implement the `score` function to return a prediction made on the scoring data.
# + id="pipeline.py" tags=["pipeline.py"]
import pandas as pd
import numpy as np
from collections import Counter
class PopularityBasedRecommendationModel():
def __init__(self, num_to_recommend, configProperties):
self.num_to_recommend = num_to_recommend
self.recommendations = ['dummy']
tenant_id=configProperties['tenant_id']
self.user_id_column = '_%s.userId' % tenant_id
self.recommendations_column = '_%s.recommendations' % tenant_id
self.item_id_column = '_%s.itemId' % tenant_id
def fit(self, df):
df = df[df[self.item_id_column].notnull()]
self.recommendations = [item for item, freq in
Counter(list(df[self.item_id_column].values)).most_common(self.num_to_recommend)]
def predict(self, df):
# remove columns having none
df = df[df[self.item_id_column].notnull()]
df_grouped_by_user = df.groupby(self.user_id_column).agg(
{self.item_id_column: lambda x: ','.join(x)})\
.rename(columns={self.item_id_column:'interactions'}).reset_index()
df_grouped_by_user[self.recommendations_column] = '#'.join(self.recommendations)
df_grouped_by_user = df_grouped_by_user.drop(['interactions'],axis=1)
return df_grouped_by_user
def train(configProperties, data):
print("Train Start")
#########################################
# Extract fields from configProperties
#########################################
num_recommendations = int(configProperties['num_recommendations'])
#########################################
# Fit model
#########################################
print('in train')
print(configProperties)
model = PopularityBasedRecommendationModel(num_recommendations, configProperties)
model.fit(data)
print("Train Complete")
return model
def score(configProperties, data, model):
print("Score Start")
result = model.predict(data)
print("Score Complete")
return result
# -
# ---
# #### **Evaluator File**
# Implement the `split` function to partition the training data and the `evaluate` function to the return the validation metrics you wish to see. Training output will be shown below this file cell.
# + id="evaluator.py" tags=["evaluator.py"]
from ml.runtime.python.Interfaces.AbstractEvaluator import AbstractEvaluator
from data_access_sdk_python.reader import DataSetReader
import numpy as np
import pandas as pd
class Evaluator(AbstractEvaluator):
def split(self, configProperties={}, dataframe=None):
#########################################
# Load Data
#########################################
train = dataframe[:]
test = dataframe[:]
return train, test
def evaluate(self, data=[], model={}, configProperties={}):
print ("Evaluation evaluate triggered")
tenant_id=configProperties['tenant_id']
self.user_id_column = '_%s.userId' % tenant_id
self.recommendations_column = '_%s.recommendations' % tenant_id
self.item_id_column = '_%s.itemId' % tenant_id
# remove columns having none
data = data[data[self.item_id_column].notnull()]
data_grouped_by_user = data.groupby(self.user_id_column).agg(
{self.item_id_column: lambda x: '#'.join(x)})\
.rename(columns={self.item_id_column:'interactions'}).reset_index()
data_recommendations = model.predict(data)
merged_df = pd.merge(data_grouped_by_user, data_recommendations, on=[self.user_id_column]).reset_index()
def compute_recall(row):
set_interactions = set(row['interactions'].split('#'))
set_recommendations = set(row[self.recommendations_column].split('#'))
inters = set_interactions.intersection(set_recommendations)
if len(inters) > 0:
return 1
return 0
def compute_precision(row):
set_interactions = set(row['interactions'].split('#'))
list_recommendations = row[self.recommendations_column].split('#')
score = 0
weight = 0.5
for rec in list_recommendations:
if rec in set_interactions:
score = score + weight
weight = weight / 2
return score
merged_df['recall'] = merged_df.apply(lambda row: compute_recall(row), axis=1)
merged_df['precision'] = merged_df.apply(lambda row: compute_precision(row), axis=1)
recall = merged_df['recall'].mean()
precision = merged_df['precision'].mean()
metric = [{"name": "Recall", "value": recall, "valueType": "double"},
{"name": "Precision", "value": precision, "valueType": "double"}]
print(metric)
return metric
# -
# ---
# #### **Data Saver File**
# Implement the `save` function for saving your prediction. Scoring output will be added below this cell.
# + id="datasaver.py" tags=["datasaver.py"]
from data_access_sdk_python.writer import DataSetWriter
from functools import reduce
import json
def save(configProperties, prediction):
print(prediction)
prodwriter = DataSetWriter(client_id=configProperties['ML_FRAMEWORK_IMS_USER_CLIENT_ID'],
user_token=configProperties['ML_FRAMEWORK_IMS_TOKEN'],
service_token=configProperties['ML_FRAMEWORK_IMS_ML_TOKEN'])
batch_id = prodwriter.write(data_set_id=configProperties['scoringResultsDataSetId'],
dataframe=prediction,
ims_org=configProperties['ML_FRAMEWORK_IMS_TENANT_ID'])
print("Data written successfully to platform:",batch_id)
| Summit/2019/resources/Notebooks-Wed/recommendations-popularity-recipe-generic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Forest Fires : Prediction of Forest Fires using the demographic and weather data of a forest.
# ## Forest fire prediction constitutes a significant com-ponent of forest fire management. It plays a significant role in resource allocation, mitigation, and recovery efforts.
# ## Here below, I have visualized the effects of variation of a particular feature on forest fire and compared the loss of different models after training them.
# +
import matplotlib.pyplot as plt
import math
import numpy as np
import pandas as pd
import random
# importing sklearn libraries
from sklearn import neural_network, linear_model, preprocessing, svm, tree
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.metrics import accuracy_score, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.naive_bayes import GaussianNB
# importing keras libraries
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
import warnings
# supressing the warning on the usage of Linear Regression model
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
# -
# ### Loading the dataset
forest_fires = pd.read_csv('forest_fires.csv')
forest_fires
# ### Converting the labels under month and day to integers
forest_fires.month.replace(('jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec'),(1,2,3,4,5,6,7,8,9,10,11,12), inplace=True)
forest_fires.day.replace(('mon','tue','wed','thu','fri','sat','sun'),(1,2,3,4,5,6,7), inplace=True)
# ### Statistical analysis of dataset
forest_fires.describe()
# ### Corelation analysis for the dataset
forest_fires.corr()
# ### Extracting features from the dataset
# +
x_values = list(forest_fires['X'])
y_values = list(forest_fires['Y'])
loc_values = []
for index in range(0, len(x_values)):
temp_value = []
temp_value.append(x_values[index])
temp_value.append(y_values[index])
loc_values.append(temp_value)
# +
month_values = list(forest_fires['month'])
day_values = list(forest_fires['day'])
ffmc_values = list(forest_fires['FFMC'])
dmc_values = list(forest_fires['DMC'])
dc_values = list(forest_fires['DC'])
isi_values = list(forest_fires['ISI'])
temp_values = list(forest_fires['temp'])
rh_values = list(forest_fires['RH'])
wind_values = list(forest_fires['wind'])
rain_values = list(forest_fires['rain'])
area_values = list(forest_fires['area'])
# +
attribute_list = []
for index in range(0, len(x_values)):
temp_list = []
temp_list.append(x_values[index])
temp_list.append(y_values[index])
temp_list.append(month_values[index])
temp_list.append(day_values[index])
temp_list.append(ffmc_values[index])
temp_list.append(dmc_values[index])
temp_list.append(dc_values[index])
temp_list.append(isi_values[index])
temp_list.append(temp_values[index])
temp_list.append(rh_values[index])
temp_list.append(wind_values[index])
temp_list.append(rain_values[index])
attribute_list.append(temp_list)
# -
# ### Counting the instances of location points in dataset
def count_points(x_points, y_points, scaling_factor):
count_array = []
for index in range(0, len(x_points)):
temp_value = [x_points[index], y_points[index]]
count = 0
for value in loc_values:
if(temp_value == value):
count = count + 1
count_array.append(count * scaling_factor )
return count_array
# ### Histogram plotting function for dataset
def histogram_plot(dataset, title):
plt.figure(figsize=(8, 6))
ax = plt.subplot()
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.title(title, fontsize = 22)
plt.hist(dataset, edgecolor='black', linewidth=1.2)
# ### Scatter plot for the locations
# +
plt.figure(figsize=(8, 6))
ax = plt.subplot()
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.title("Fire location plot", fontsize = 22)
plt.scatter(x_values, y_values, s = count_points(x_values, y_values, 25), alpha = 0.3)
plt.show()
# -
# ### Plotting the distribution of values for the dataset
histogram_plot(month_values, title = "Month distribution")
plt.show()
histogram_plot(day_values, title = "Day distribution")
plt.show()
histogram_plot(ffmc_values, title = "FFMC distribution")
plt.show()
histogram_plot(dmc_values, title = "DMC distribution")
plt.show()
histogram_plot(dc_values, title = "DC distribution")
plt.show()
histogram_plot(isi_values, title = "ISI distribution")
plt.show()
histogram_plot(temp_values, title = "Temperature distribution")
plt.show()
histogram_plot(rh_values, title = "RH distribution")
plt.show()
histogram_plot(wind_values, title = "Wind distribution")
plt.show()
histogram_plot(rain_values, title = "Rain distribution")
plt.show()
histogram_plot(area_values, title = "Burned area distribution")
plt.show()
# ### Percentage of dataset with 'burned area' > 0
# +
total_count = 0
positive_data_count = 0
for value in area_values:
if(value > 0):
positive_data_count = positive_data_count + 1
total_count = total_count + 1
print("The number of data records with 'burned area' > 0 are " + str(positive_data_count) + " and the total number of records are " + str(total_count) + ".")
print("The percentage value is " + str(positive_data_count/total_count * 100) + ".")
# -
# ## Gaining insights with learning models
# ### Spilliting the available data/Setting the initial parameters
train_x, test_x, train_y, test_y = train_test_split(attribute_list, area_values, test_size=0.2, random_state = 9)
mse_values = []
variance_score = []
# ### Printing the actual vs predicted values
def print_values(test, predicted):
print("The actual output and the predicted output are:")
for value in range(0, len(predicted_y)):
print('%.4f' % test_y[value], " ", '%.4f' % predicted_y[value])
# ### Linear regression model
# +
linear_regression = linear_model.LinearRegression()
linear_regression.fit(train_x, train_y)
predicted_y = linear_regression.predict(test_x)
print('Coefficients: \n', linear_regression.coef_)
print("\nMean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Decision tree model
# +
decision_tree = tree.DecisionTreeRegressor(presort = True)
decision_tree.fit(train_x, train_y)
predicted_y = decision_tree.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### MLP model
# +
mlp = neural_network.MLPRegressor(hidden_layer_sizes = (150,50,50), activation = "tanh", solver = "sgd", learning_rate = "adaptive")
mlp.fit(train_x, train_y)
predicted_y = mlp.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### SVM model
# +
svm_model = svm.SVR()
svm_model.fit(train_x, train_y)
predicted_y = svm_model.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Random forest model
# +
random_forest = RandomForestRegressor()
random_forest.fit(train_x, train_y)
predicted_y = random_forest.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Bayesian ridge model
# +
bayesian_ridge = linear_model.BayesianRidge()
bayesian_ridge.fit(train_x, train_y)
predicted_y = bayesian_ridge.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Lasso model
# +
lasso_model = linear_model.Lasso()
lasso_model.fit(train_x, train_y)
predicted_y = lasso_model.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Function for generating the graph
def generate_plot(title, ticks, dataset, color_number):
colors = ["slateblue", "mediumseagreen", "tomato"]
plt.figure(figsize=(8, 6))
ax = plt.subplot()
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.xticks(np.arange(len(ticks)), ticks, fontsize=10, rotation=30)
plt.title(title, fontsize = 22)
plt.bar(ticks, dataset, linewidth=1.2, color=colors[color_number])
# ### Mean squared error graph
ticks = ["Linear Regression", "Decision Tree", "MLP", "SVM", "Random Forest", "Bayesion Ridge", "Lasso"]
generate_plot("Plot of MSE values", ticks, mse_values, 0)
# ### Variance score graph
generate_plot("Plot of Variance scores", ticks, variance_score, 1)
# ## Applying Log-Transformation to the 'burned area' variable
# We can see that the errors in the prediction of burned areas from the given dataset is very high in the above mentioned model. A reason for this could be the high skewness of the 'Burned Area' variable is towards zero.
area_values = list(np.log(np.array(area_values) + 1))
histogram_plot(area_values, title = "Burned area distribution")
# ## Applying learning models on the processed data
# ### Setting the initial parameters
mse_values = []
variance_score = []
# ### Linear regression model
# +
linear_regression = linear_model.LinearRegression()
linear_regression.fit(train_x, train_y)
predicted_y = linear_regression.predict(test_x)
print('Coefficients: \n', linear_regression.coef_)
print("\nMean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Decision tree model
# +
decision_tree = tree.DecisionTreeRegressor(presort = True)
decision_tree.fit(train_x, train_y)
predicted_y = decision_tree.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### MLP model
# +
mlp = neural_network.MLPRegressor(hidden_layer_sizes = (150,30,50), activation = "tanh", solver = "sgd", learning_rate = "adaptive")
mlp.fit(train_x, train_y)
predicted_y = mlp.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### SVM model
# +
svm_model = svm.SVR()
svm_model.fit(train_x, train_y)
predicted_y = svm_model.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Random forest model
# +
random_forest = RandomForestRegressor()
random_forest.fit(train_x, train_y)
predicted_y = random_forest.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Bayesian ridge model
# +
bayesian_ridge = linear_model.BayesianRidge()
bayesian_ridge.fit(train_x, train_y)
predicted_y = bayesian_ridge.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Lasso model
# +
lasso_model = linear_model.Lasso()
lasso_model.fit(train_x, train_y)
predicted_y = lasso_model.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Mean squared error graph
ticks = ["Linear Regression", "Decision Tree", "MLP", "SVM", "Random Forest", "Bayesion Ridge", "Lasso"]
generate_plot("Plot of MSE values", ticks, mse_values, 0)
# ### Variance score graph
generate_plot("Plot of Variance scores", ticks, variance_score, 1)
# ## Normalisation of all data
# +
n_x_values = preprocessing.normalize([x_values])[0]
n_y_values = preprocessing.normalize([y_values])[0]
n_month_values = preprocessing.normalize([month_values])[0]
n_day_values = preprocessing.normalize([day_values])[0]
n_ffmc_values = preprocessing.normalize([ffmc_values])[0]
n_dmc_values = preprocessing.normalize([dmc_values])[0]
n_dc_values = preprocessing.normalize([dc_values])[0]
n_isi_values = preprocessing.normalize([isi_values])[0]
n_temp_values = preprocessing.normalize([temp_values])[0]
n_rh_values = preprocessing.normalize([rh_values])[0]
n_wind_values = preprocessing.normalize([wind_values])[0]
n_rain_values = preprocessing.normalize([rain_values])[0]
n_area_values = preprocessing.normalize([area_values])[0]
n_attribute_list = []
for index in range(0, len(n_x_values)):
temp_list = []
temp_list.append(n_x_values[index])
temp_list.append(n_y_values[index])
temp_list.append(n_month_values[index])
temp_list.append(n_day_values[index])
temp_list.append(n_ffmc_values[index])
temp_list.append(n_dmc_values[index])
temp_list.append(n_dc_values[index])
temp_list.append(n_isi_values[index])
temp_list.append(n_temp_values[index])
temp_list.append(n_rh_values[index])
temp_list.append(n_wind_values[index])
temp_list.append(n_rain_values[index])
n_attribute_list.append(temp_list)
# -
# ## Applying learning models on the normalised data
# ### Setting the initial parameters
mse_values = []
variance_score = []
# ### Spilliting the available data
train_x, test_x, train_y, test_y = train_test_split(n_attribute_list, n_area_values, test_size=0.3, random_state = 9)
# ### Linear regression model
# +
linear_regression = linear_model.LinearRegression()
linear_regression.fit(train_x, train_y)
predicted_y = linear_regression.predict(test_x)
print('Coefficients: \n', linear_regression.coef_)
print("\nMean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Decision tree model
# +
decision_tree = tree.DecisionTreeRegressor(presort = True)
decision_tree.fit(train_x, train_y)
predicted_y = decision_tree.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### MLP model
# +
mlp = neural_network.MLPRegressor(hidden_layer_sizes = (150,50,50), activation = "tanh", solver = "sgd", learning_rate = "adaptive")
mlp.fit(train_x, train_y)
predicted_y = mlp.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### SVM model
# +
svm_model = svm.SVR()
svm_model.fit(train_x, train_y)
predicted_y = svm_model.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Random forest model
# +
random_forest = RandomForestRegressor()
random_forest.fit(train_x, train_y)
predicted_y = random_forest.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Bayesian ridge model
# +
bayesian_ridge = linear_model.BayesianRidge()
bayesian_ridge.fit(train_x, train_y)
predicted_y = bayesian_ridge.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ### Lasso model
# +
lasso_model = linear_model.Lasso()
lasso_model.fit(train_x, train_y)
predicted_y = lasso_model.predict(test_x)
print("Mean squared error: ", mean_squared_error(test_y, predicted_y))
print('Variance score: %.2f' % r2_score(test_y, predicted_y))
mse_values.append(mean_squared_error(test_y, predicted_y))
variance_score.append(r2_score(test_y, predicted_y))
#print_values(test_y, predicted_y)
# -
# ## Visualising the results
# ### Mean squared error graph
ticks = ["Linear Regression", "Decision Tree", "MLP", "SVM", "Random Forest", "Bayesion Ridge", "Lasso"]
generate_plot("Plot of MSE values", ticks, mse_values, 0)
# ### Variance score graph
generate_plot("Plot of Variance scores", ticks, variance_score, 1)
# The next step is to try out binary classification for this problem.
# ## Converting the target values to binary classes
# +
binary_area_values = []
count = 0
for value in area_values:
if(value == 0):
binary_area_values.append(0)
else:
binary_area_values.append(1)
# -
# ## Gaining insights with classification algorithm
# ### Setting the initial parameters
accuracy_values = []
# ### Spilliting the available data
train_x, test_x, train_y, test_y = train_test_split(attribute_list, binary_area_values, test_size=0.15, random_state = 4)
# ### SGD model
# +
sgd = linear_model.SGDClassifier()
sgd.fit(train_x, train_y)
predicted_y = sgd.predict(test_x)
print("The predicted values are:", predicted_y)
print("The accuracy score is " + str(accuracy_score(test_y, predicted_y) * 100) + ".")
accuracy_values.append(accuracy_score(test_y, predicted_y) * 100)
# -
# ### Decision tree model
# +
decision_tree = tree.DecisionTreeClassifier()
decision_tree.fit(train_x, train_y)
predicted_y = decision_tree.predict(test_x)
print("The predicted values are:", predicted_y)
print("The accuracy score is " + str(accuracy_score(test_y, predicted_y) * 100) + ".")
accuracy_values.append(accuracy_score(test_y, predicted_y) * 100)
# -
# ### Naive bayes model
# +
naive_bayes = GaussianNB()
naive_bayes.fit(train_x, train_y)
predicted_y = naive_bayes.predict(test_x)
print("The predicted values are:", predicted_y)
print("The accuracy score is " + str(accuracy_score(test_y, predicted_y) * 100) + ".")
accuracy_values.append(accuracy_score(test_y, predicted_y) * 100)
# -
# ### SVM model
# +
svm_model = svm.SVC(kernel='linear', gamma=100)
svm_model.fit(train_x, train_y)
predicted_y = svm_model.predict(test_x)
print("The predicted values are:", predicted_y)
print("The accuracy score is " + str(accuracy_score(test_y, predicted_y) * 100) + ".")
accuracy_values.append(accuracy_score(test_y, predicted_y) * 100)
# -
# ### Random forest model
# +
random_forest = RandomForestClassifier()
random_forest.fit(train_x, train_y)
predicted_y = random_forest.predict(test_x)
print("The predicted values are:", predicted_y)
print("The accuracy score is " + str(accuracy_score(test_y, predicted_y) * 100) + ".")
accuracy_values.append(accuracy_score(test_y, predicted_y) * 100)
# -
# ## Visualising the results
# ### Variance score graph
ticks = ["SGD", "Decision tree", "Naive bayes", "SVM", "Random Forest"]
generate_plot("Plot of accuracy scores", ticks, accuracy_values, 2)
# ## Artificial Neural Network - Implementation
# +
train_x_a = np.array(train_x)
test_x_a = np.array(test_x)
test_y_a = np.array(test_y)
train_y_temp = np.array(train_y)
train_y_cl = []
for i in range(len(train_y)):
if(train_y[i]>0):
train_y_cl.append(1)
else:
train_y_cl.append(0)
train_y_cl = np.array(train_y_cl)
print("Length of Training data is : "+str(len(train_x_a))+" and Test data is : " + str(len(test_x_a)))
# -
# ### Creating the Keras neural network model
model = Sequential()
model.add(Dense(12, activation='tanh',input_shape=(12,)))
model.add(Dense(12, activation='tanh',input_shape=(12,)))
model.add(Dense(1, activation='sigmoid'))
# ### Training, Fitting and Evaluating the model
# +
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(train_x_a,train_y_cl, epochs=100, batch_size=8)
# Evaluate the model
scores = model.evaluate(test_x_a, test_y_a)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# -
# ## Artificial Neural Network - Regression Model
# ### Defining base model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(12, input_dim=12, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
# ### Creating sequential model
model = Sequential()
model.add(Dense(12, input_dim=12, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# ### Compiling & training the model
# +
# compile model
model.compile(loss='mean_squared_error', optimizer='adam')
# train model
history = model.fit(np.array(train_x), np.array(train_y), epochs=150, batch_size=4, verbose=1)
# -
# ### Make Predictions
predicted = model.predict( np.array(test_x),batch_size=None, verbose=0, steps=1)
# ### Evaluate model with standardized dataset
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=5, verbose=1)
seed = 14
np.random.seed(seed)
# ### Model Validation - 10 fold validation
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(estimator, np.array(test_x), np.array(test_y), cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
# ### Checking the MSE and variance scores
print("Mean squared error: ", mean_squared_error(test_y, predicted))
print('Variance score: %.2f' % r2_score(test_y, predicted))
| Rishabh_Singhal_16NA10019.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Table of Contents
# 1. [Introduction](#introduction)
# 2. [Environment](#environment)
# 1. [Imports](#imports)
# 2. [User-defined inputs](#inputs)
# 3. [Data Download](#download)
# 1. [Data preprocessing](#preprocessing)
# 2. [Download Atmospheric Variables](#atm_vars)
# 3. [Download Precipitation](#precipitation)
# # Download data for the analysis in the paper: <a name="introduction"></a>
# ### [Extreme precipitation events in the Mediterranean: Spatiotemporal characteristics and connection to large-scale atmospheric flow patterns](https://rmets.onlinelibrary.wiley.com/doi/10.1002/joc.6985)
#
# ---
# Author: <NAME>\
# Email: <EMAIL>; <EMAIL>
#
# ---
# This works uses ERA5 data for quantifying the connections of localized extreme precipitation to large-scale patterns. The main variables that are used are:
# 1. Mean Sea Level Pressure
# 2. Temperature at 850hPa
# 3. Geopotential height at 500hPa
# 4. Total Precipitation
#
# Moreover, there are some additional variables tested that are related to moisture/water content. These variables are:
# 1. Specfic humidity at 850hPa
# 2. Water Vapour Flux (eastwards and northwards components)
#
# The downloading is done via the MARS data storage facility of ECMWF. Information about how to download data is available at [this](https://confluence.ecmwf.int/display/CKB/How+to+download+data+via+the+ECMWF+WebAPI) link.
# In case of no access to MARS, the data are also available from Copernicus CDS (https://cds.climate.copernicus.eu/#!/home). Downloading data from CDS requires non-trivial amendments in this script.
# # Environment<a name="environment"></a>
# Load the required packages and get the user-defined inputs.
# The downloading was done in a Linux machine with 8 CPUs and 32 GB RAM. The total duration was about 8 hours. Downloading variables takes about 6 hours per variable if single CPU is used.
# ## Imports<a name="imports"></a>
# Import the required packages (full package or specific functions).
# +
import multiprocessing # parallel processing
import tqdm # timing
from datetime import datetime
from itertools import groupby
from pathlib import Path # creation of dictionaries
import numpy as np
import pandas as pd
import metview as mv # metview package for downloading the data from MARS
# -
# ## User-defined inputs <a name="inputs"></a>
# Define the main folder where the data will be stored.
dir_loc = 'Data/'
Path(dir_loc).mkdir(parents=True, exist_ok=True) # generate the subfolder for storing the results
# Define the inputs for the spatiotemporal coverage, and grid resolution.
# +
dates_generated_all = pd.date_range(start = '19790101', end = '20191231').strftime('%Y%m%d').to_list() # used dates
area_precipt = [47, -8, 29, 38] # coordinates as in N/W/S/E of the subdomain of interest (Mediterranean domain)
grid_precipt = [.25, .25] # grid resolution in degrees
area_atm_var = [80, -90, 10, 50] # [70, -60, 10, 80] # extended area compared to Precipitation data
grid_atm_var = [1, 1] # coarser resolution compared to Precipitation, as the interest is on large-scale patterns
# -
# Define variables to be downloaded. The required information should be given in a list and include the following, in the exact order:
# 1. **levelist**: level of interest, e.g. 0 for surface parameters, 500 for 500 hPa
# 2. **levtype**: leveltype of interest, e.g. pressure levels ('pl'), surface ('sfc')
# 3. **atm_var**: paramater of interest (e.g. the SLP is flagged as 151.128 at *MARS*)
# 4. **file_name**: name of the file to save the data
#
# The above information is used on the function to retrieve data from *MARS* with the *metview* package.
data_inputs = [[0, 'sfc', '151.128', 'D1_Mean_SLP'], # SLP data
[500, 'pl', '129.128', 'D1_Mean_Z500'], # Z500 data
[850, 'pl', '130.128', 'D1_Mean_T850'], # T850 data
[850, 'pl', 'q', 'D1_Mean_Q850'], # Q850 data (not used for the main analysis)
[0, 'sfc', '71.162', 'D1_Mean_WVFeast'], # Water Vapour Flux east data (not used for main analysis)
[0, 'sfc', '72.162', 'D1_Mean_WVFnorth'], # Water Vapour Flux north data (not used for main analysis)
]
# # Data Download<a name="download"></a>
# ## Data preprocessing<a name="preprocessing"></a>
InitializationTime = datetime.now()
# +
# dates are chunked per year-month for efficient download, since MARS uses this subsetting for storing the data
dates_atm_vars = [list(v) for l, v in groupby(dates_generated_all[:], lambda x: x[:6])]
# repeat the last value of each chunk to the next one, since daily precip data need info from previous day as well!
dates_precipit = [dates_atm_vars[i][-1:] + dates_atm_vars[i+1] for i in range(len(dates_atm_vars)-1)] # from 2nd chunk
dates_precipit.insert(0, dates_atm_vars[0]) # append the 1st chunk so all the dates are now complete
# create a slighly larger extend so that the interpolation of precip data on the edges of the domain works better
Area_precipt_ext = [coord+2 if i in [0, 3] else coord-2 for i, coord in enumerate(area_precipt)]
# -
# ## Download mean daily data of atmospheric variables<a name="atm_vars"></a>
# **For some reason, the multiprocessing that is used for speading up the process, works only if the atmospheric variables data are downloaded first, and then the precipitation data**. There is no understanding of how and why this issue occurs, but at least data are correct and there are no wrong outputs from the downloading process.
def atm_subset(input_data):
levelist, levtype, atm_var, dates_subset = input_data # inputs to be a list of 4 in specific order!
'''
Function for downloading data of atmospheric variables from MARS and calculating daily mean values
:param levelist: level of interest, e.g. 0 for surface parameters, 500 for 500 hPa
:param levtype: leveltype of interest, e.g. pressure levels ('pl'), surface ('sfc')
:param atm_var: paramater of interest (e.g. the SLP is flagged as 151.128 at MARS)
:param dates_subset: the subset of dates to be downloaded
'''
# function for retrieving the data from MARS
fc_all = mv.retrieve(Class = 'ea', # class of data, e.g. ERA5 ('ea')
stream = 'oper', # stream of interest, e.g. Ensemble ('enfo'), Deterministic ('oper')
expver = 1, # experiment's version, e.g. Operational (1), Research (xxxx[A-Z/0-9])
type = 'an', # type of data, e.g. Analysis ('an')
param = atm_var,
levtype = levtype,
levelist = levelist,
date = dates_subset,
time = list(range(0,24)), # all hourly timesteps
area = area_atm_var,
grid = grid_atm_var,
)
Daily_sub = mv.Fieldset() # mv for values for dates_subset
fields = mv.grib_get(fc_all, ['date']) # get the 'date' field from the fc_all object
for day_i in dates_subset: # loop through the whole list of unique dates_subset
used_indices = list(np.where(np.array(fields) == day_i)[0]) # indices that belong to the day of interest
used_indices = np.array(used_indices, dtype='float64') # convert to float64 for using it at mv object
daily_subset = fc_all[used_indices] # subset and keep only the fields of the day of interest
Daily_sub.append(mv.mean(daily_subset)) # calculate the daily mean and append it
return Daily_sub
# Download data per variable in a dictionary and name the keys, based on the variable name, e.g. for "D1_Mean_SLP", keep the "SLP" for the key.
#
# Note that for optimizing the downloading in MARS, it is preferable to loop through dates and download all variables, instead of looping through variables and downloading the dates ([find out more](https://confluence.ecmwf.int/display/WEBAPI/Retrieval+efficiency)). The latter is used in this script for making it simpler.
# +
Times = len(dates_atm_vars)
AtmVar = {}
for var in data_inputs:
levelist, levtype, atm_var, file_name = var
Inputs = list(zip([levelist]*Times, [levtype]*Times, [atm_var]*Times, dates_atm_vars))
pool_atmvar = multiprocessing.Pool() # object for multiprocessing
Daily = list(tqdm.tqdm(pool_atmvar.imap(atm_subset, Inputs),
total=Times, position=0, leave=True)) # list of mv.Fieldsets
pool_atmvar.close()
del(pool_atmvar)
for i in range(1, Times): # concatenate all Fieldsets to the first one
Daily[0].append(Daily[i])
Daily = Daily[0] # keep the full set of the atmospheric variable data
mv.write(dir_loc + file_name + '.grb', Daily) # save data
AtmVar[var[-1].split('_')[-1]] = Daily
del(Times, var, levelist, levtype, atm_var, file_name, Inputs, Daily, i)
# -
# ## Download total daily precipitation<a name="precipitation"></a>
def precip_subset(dates_subset):
' Function for downloading precipitation data from MARS and calculating daily total values '
fc_all = mv.retrieve(Class = 'ea', # class of data, e.g. ERA5 ('ea')
stream = 'oper', # stream of interest, e.g. Ensemble ('enfo'), Deterministic ('oper')
expver = 1, # experiment's version, e.g. Operational (1), Research (xxxx[A-Z/0-9])
type = 'fc', # type of data, e.g. Forecast ('fc'), Analysis ('an')
param = 'tp', # used paramater: Total Precipitation ('tp' = '228.128')
levtype = 'sfc',
levelist = 0,
date = dates_subset, # use the subset of dates
time = [6, 18], # time steps of interest (forecast fields only at 06:00 & 18:00)
step = list(range(7,19)), # precipitation is calculated from short-range forecasted data
grid = grid_precipt,
area = Area_precipt_ext,
interpolation = '"--interpolation=grid-box-average"' # first-order conservative remapping
)
Daily_sub = mv.Fieldset() # create the mv object for storing the daily values for the dates_subset
for i_day in range(len(dates_subset) - 1): # loop through the whole list of unique dates_subset
# downloaded data are in the sequence: day_i 06:00 steps 7-18 (12 steps), 18:00 steps 7-18 (12 steps)
start_indice = 12 + 24*i_day # data for daily accumulation start at 18:00 step 7 of previous day
end_indice = 12 + 24*(i_day+1) # data end at 06:00 step 12 of current day (24 hourly steps in total)
sub = mv.sum(fc_all[start_indice : end_indice]) # total daily precipitation
sub = mv.grib_set(sub, ['date', int(dates_subset[i_day + 1])]) # replace the date field with the correct date
Daily_sub.append(sub) # append to Daily_sub metview object
return Daily_sub
# +
pool_precip = multiprocessing.Pool() # object for multiprocessing for creating a list of mv.Fieldsets
Precip = list(tqdm.tqdm(pool_precip.imap(precip_subset, dates_precipit),
total=len(dates_precipit), position=0, leave=True))
pool_precip.close()
for i in range(1, len(Precip)): # concatenate all Fieldsets to the first one
Precip[0].append(Precip[i])
Precip = Precip[0] # keep the full set of the precipitation data
Precip = Precip*1000 # convert to mm
Precip = mv.read(data=Precip, area=area_precipt) # crop to the actual area of interest
mv.write(dir_loc + 'D1_Total_Precipitation.grb', Precip) # Save the daily total precipitation file
del(pool_precip, i)
# -
print('Downloading completed in:', datetime.now() - InitializationTime, ' HR:MN:SC.')
del(InitializationTime)
| Scripts/Script1 Data Download.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
from mma import *
import gudhi as gd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from multipers import *
from joblib import Parallel, delayed
from multiprocessing import Pool, Manager, cpu_count
from sklearn.neighbors import KernelDensity
from joblib import parallel_backend
from numpy.polynomial.polynomial import polyfit
from benchmark_functions import *
from pandas import read_csv
from gudhi.point_cloud.timedelay import TimeDelayEmbedding
import seaborn as sns
# ## Retrieves the datasets from UCR
def get_dataset(dataset = "Coffee"):
dataset_path = "./UCR/" + dataset + "/" + dataset + "_TRAIN.tsv"
data = np.array(read_csv(dataset_path, delimiter='\t', header=None, index_col=0))
return data
data = get_dataset()
tde = TimeDelayEmbedding().transform(data)
len(tde)
# ## A first example : Coffee. Time vs number of simplices
# ### Parameters
# benchmarks the time to compute coffee series
ntries = 1 # Numbers of tries of each series (to make a std, but that's with a parallel computing)
num = 10 # Numbers of lines in set_of_nlines
nlines = 300 # Number of lines at each tries
max_edge_length = 0.1 # Rips threshold
gaussian_var=0.3 # kde bandwidth
set_of_nlines = np.linspace(start=10, stop=300, num = num).astype(int) # set of number of lines
# ### Benchmark computation
times = np.empty(len(tde))
std = np.empty((len(tde), len(set_of_nlines)))
num_simplices = np.empty(len(tde))
for i in tqdm(range(len(tde))):
times[i], _, num_simplices[i] = density_persistence_benchmark(
tde[i],nlines, ntries = ntries, gaussian_var=gaussian_var, max_edge_length=max_edge_length,
filtration="rips", max_dimension=2)
# ### Plot of this benchmark
# +
coeffs = np.polyfit(num_simplices, times,2) # Polyfit of degree 1
poly = np.poly1d(coeffs)
yfit = lambda x: poly(x)
plt.scatter(num_simplices, times)
linspace_ =np.linspace(start=min(num_simplices), stop = max(num_simplices), num=100)
plt.plot(linspace_, yfit(linspace_), c='r', label = "linear regression : {}".format(poly))
plt.legend()
plt.savefig("UCR_Coffee_nlines_" +str(nlines)+".svg")
plt.xlabel("Number of simplices")
plt.ylabel("Time to compute, in seconds")
plt.show()
poly_log = np.poly1d(np.polyfit(np.log10(num_simplices), np.log10(times),1))
yfit_log = lambda x: np.power(10,poly_log(np.log10(x)))
plt.scatter(num_simplices, times)
linspace_ =np.linspace(start=min(num_simplices), stop = max(num_simplices), num=100)
plt.loglog(linspace_, yfit_log(linspace_), c='r', label = "linear regression : {}".format(poly_log))
plt.legend()
plt.savefig("UCR_Coffee_nlines_loglog_" +str(nlines)+".svg")
plt.xlabel("Number of simplices")
plt.ylabel("Time to compute, in seconds")
plt.show()
# -
# ## Another example : Wine dataset. Time vs number of simplices
data = get_dataset("Wine")
tde = TimeDelayEmbedding().transform(data)
print(len(tde))
# benchmarks the time to compute Wine series
ntries = 1
num = 10
nlines = 300
max_edge_length = 0.1
gaussian_var=0.3
max_alpha_square=0.1
simplex_tree = gd.RipsComplex(points=tde[0],max_edge_length= max_edge_length).create_simplex_tree(max_dimension=2)
print(simplex_tree.num_simplices())
simplex_tree = gd.AlphaComplex(points=tde[0]).create_simplex_tree(max_alpha_square=max_alpha_square)
print(simplex_tree.num_simplices())
times = np.empty(len(tde))
num_simplices = np.empty(len(tde))
for i in tqdm(range(len(tde))):
times[i], _, num_simplices[i] = density_persistence_benchmark(
tde[i],nlines, ntries = ntries, gaussian_var=gaussian_var, max_edge_length=max_edge_length,
filtration="alpha", max_dimension=2, max_alpha_square=max_alpha_square)
coeffs = np.polyfit(num_simplices, times,1)
poly = np.poly1d(coeffs)
yfit = lambda x: poly(x)
plt.scatter(num_simplices, times)
linspace_ =np.linspace(start=min(num_simplices), stop = max(num_simplices), num=100)
plt.plot(linspace_, yfit(linspace_), c='r', label = "Linear regression : {}".format(poly))
plt.legend()
plt.savefig("UCR_Wine_nlines_" +str(nlines)+".svg")
plt.xlabel("Number of simplices")
plt.ylabel("Time to compute, in seconds")
plt.show()
| Benchmark_UCR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import os.path
sys.path.insert(1, r'H:\DAV_Desktop\Users\Prescott\code')
from fizzy import checktime
# -
pth = os.path.join('H:', 'DAV_Desktop', 'Users', 'andrew', '140', 'batchbuffer')
print(pth)
checktime(pth, start='2020-01-14 15:00')
| scripts/timer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import networkx as nx
import math
data = pd.read_csv('../data_omim/full_omim_table.txt', delimiter = "\t")
data
#matrix = loadmat('../data_omim/MimMiner_Exp_AC_T_TXCS_basedonACMESH_filt_RW.mat')
matrix = pd.read_csv('../data_omim/MimMiner_Exp_AC_T_TXCS_basedonACMESH_filt_RW.txt', delimiter = "\t")
matrix
my_ids_dictionary = dict()
my_rows_dictionary = dict()
def get_id_of_node(node):
if node not in my_ids_dictionary:
for row in matrix.loc[matrix['100050'] == node].iterrows():
my_ids_dictionary[node] = row[0]
return row[0]
else:
return my_ids_dictionary[node]
def get_similarity_score(node1, node2):
node2_id = get_id_of_node(node2)
if (node2_id != None):
if node1 not in my_rows_dictionary:
for row in matrix.loc[matrix['100050'] == node1].iterrows():
my_rows_dictionary[node1] = row[1]
return row[1][node2_id + 2]
else:
return my_rows_dictionary[node1][node2_id + 2]
return None
print(get_similarity_score(21412, 612124124367))
len(data)
G_diseases = nx.Graph()
for i, row in enumerate(data.iterrows()):
for j, row2 in enumerate(data.iterrows()):
if i > j and not math.isnan(row[1]['phenotypeMimNumber']) and not math.isnan(row2[1]['phenotypeMimNumber']):
phenotypeMimNumber1 = int(row[1]['phenotypeMimNumber'])
phenotypeMimNumber2 = int(row2[1]['phenotypeMimNumber'])
similarity_score = get_similarity_score(phenotypeMimNumber1, phenotypeMimNumber2)
if similarity_score:
G_diseases.add_node(phenotypeMimNumber1)
G_diseases.add_node(phenotypeMimNumber2)
G_diseases.add_edge(phenotypeMimNumber1, phenotypeMimNumber2, weight=similarity_score)
#print(similarity_score)
if i % 10 == 0:
print(i / len(data))
nx.write_pajek(G_diseases, "../networks/disease_disease_network.net")
# +
#G_diseases.nodes(0)
# -
| RWHNDR method/scripts/construct_disease_disease_network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install git+https://github.com/v3io/v3io-gputils
# !rm -f /User/horovod/cats_dogs.hd5
HOROVOD_JOB_NAME = "horovod-cats-n-dogs"
# +
from v3io_gputils.mpijob import MpiJob
job = MpiJob(HOROVOD_JOB_NAME, 'iguaziodocker/horovod:0.1.1', ['/User/demos/gpu-demos/image-classification-horovod/hvd_dogncat.py',
'/User/demos/gpu-demos/image-classification-horvod/cats_and_dogs_filtered',
'/User/demos/gpu-demos/image-classification-horovod/horovod'])
job.replicas(8).gpus(1)
job.submit()
# +
# !kubectl get pods | grep $HOROVOD_JOB_NAME
# -
# !kubectl get mpijob $HOROVOD_JOB_NAME -o yaml
job.delete()
| demos/gpu-demos/image-classification-horovod/02-training-with-horovod-cats-n-dogs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# # This is a research notebook to try to find correlation between hyperwave and volume
# !pip install --upgrade pip
# !pip install quandl
# +
import time
from datetime import datetime, date, time, timedelta
import json
import requests
import os.path as path
from scipy.spatial import ConvexHull
import quandl
quandl.ApiConfig.api_key = "cEofBzyzyihN3fj62kp4"
import pandas as pd
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
import plotly
plotly.tools.set_credentials_file(username='davzucky', api_key='aZw7LRJOSDcPJyIk2G0U')
# This is to avoid warning when dividing by zero
np.seterr(divide='ignore', invalid='ignore')
# -
path.abspath('.')
# mydata = pd.DataFrame(quandl.get("FRED/GDP",returns="numpy", collapse="weekly",qopts = { 'columns': ['ticker', 'date', 'close', 'open', 'low', 'high'] },))
# mydata['diff'] = mydata['open'] - mydata['close']
# +
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# display(mydata)
# +
# f = mydata
# # display(f['open'])
# f = f.set_index('date')
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# display(weekly_data )
# -
# ## This is the place where you setup the symbol and phases
# +
data_3DSystemsCorp ={
'name':'3D SYSTEMS CORP',
'data_source':'quandl',
'symbol':'DDD',
}
data_Valeant = {
'name':'Baush health Company. Ex Valeant',
'data_source':'quandl',
'symbol':'VRX',
}
data_Amazon = {
'name':'Amazon',
'data_source':'investopedia',
'symbol':'AMZN',
}
data_Square = {
'name':'Square',
'data_source':'investopedia',
'symbol':'SQ',
}
data_Netflix= {
'name':'Netflix',
'data_source':'investopedia',
'symbol':'NFLX',
}
data_Apple = {
'name':'Apple',
'data_source':'investopedia',
'symbol':'AAPL',
}
data_Intuit = {
'name':'Intuit',
'data_source':'investopedia',
'symbol':'INTU',
}
data_Gilead_Sciences = {
'name':'Gilead Sciences',
'data_source':'investopedia',
'symbol':'GILD',
}
data_Abiomed = {
'name':'Abiomed',
'data_source':'investopedia',
'symbol':'ABMD',
}
data_Alphabet = {
'name':'Alphabet',
'data_source':'investopedia',
'symbol':'GOOGL',
}
data_BTCUSD = {
'name':'BTCUSD',
'data_source':'CryptoCompare',
'symbol':'BTC-USD',
}
data_DowJones = {
'name':'<NAME>',
'data_source':'investopedia',
'symbol':'^DJI',
}
data_Chevron = {
'name':'Chevron',
'data_source':'investopedia',
'symbol':'CVX',
}
data_DowJones_1920_1933 = {
'name':'<NAME> 1920-1933',
'data_source':'LocalData',
'symbol':'DowJones_1920_1933',
}
data_DowJones = {
'name':'<NAME>',
'data_source':'LocalData',
'symbol':'DowJones_Full',
}
data_10Y_Treasury_Note = {
'name':'10y_usd_treasury_note',
'data_source':'LocalData',
'symbol':'10y_usd_treasury_note',
}
data_Caterpilar = {
'name':'Caterpilar',
'data_source':'investopedia',
'symbol':'CAT',
}
data_DR_Horton = {
'name':'<NAME>',
'data_source':'investopedia',
'symbol':'DHI',
}
# +
company_setup = data_DR_Horton
name = company_setup ['name']
data_source = company_setup ['data_source']
symbol = company_setup ['symbol']
# constant used for other computation
root_date = datetime(1800, 1, 6)
# -
# # Helper function
# This section contain helper function that are here to load and clean the raw data
# +
def get_nb_weeks(row, base_date):
return int((row["date"]-base_date).days/7)
def add_weekid_and_price_is_closing_up(df, base_date ):
df['is_price_closing_up'] = df.close > df.close.shift()
df['weekId'] = df.apply (lambda row: get_nb_weeks (row, base_date),axis=1)
if "volume" in list(df.columns.values):
df = df.drop("volume", axis=1)
return df.sort_values(by="date")
class Investopedia_Loader():
def __init__(self, symbol, timeframe='weekly'):
self._symbol = symbol
self._timeframe = timeframe
def _clean_data(self, df):
df.loc[:,('date')] = pd.to_datetime(df.loc[:,('Date')])
df = df.rename(columns={'Adj. Close':'close', 'Low':'low', 'Open':'open', 'High':'high','Volume':'volume'})
df = df.set_index('date')
df['date'] = df.index
return df
def _fetch_data(self):
url_symbol = "https://www.investopedia.com/markets/api/partial/historical/?Symbol={}&Type=Historical+Prices&Timeframe={}&StartDate=Jan+01%2C+1900".format(self._symbol, self._timeframe)
df_list = pd.read_html(url_symbol, header=0, parse_dates=True)
df_price = df_list[0].dropna()
return df_price
def get_dataframe(self):
raw_data = self._fetch_data()
return self._clean_data(raw_data)
class Quandl_Loader():
def __init__(self, symbol, timeframe='weekly'):
self._symbol = symbol
self._timeframe = timeframe
def _aggregate_ticker_weekly(self, df):
open = df.open.resample('W-MON').last()
close = df.close.resample('W-FRI').last().resample('W-MON').last()
high = df.high.resample('W-MON').max()
low = df.low.resample('W-MON').min()
vol = df.volume.resample('W-MON').sum()
weekly_data = pd.concat([open, close, high, low, vol], axis=1)
weekly_data ['date'] = weekly_data .index
return weekly_data
def _fetch_daily_data(self):
daily_tickers = quandl.get_table('WIKI/PRICES', \
ticker = [self._symbol], \
qopts = { 'columns': ['ticker', 'date', 'close', 'open', 'low', 'high', 'volume'] }, \
date = { 'gte': '1900-01-01'}, \
paginate=True) #, 'lte': '2016-12-31'
daily_tickers = daily_tickers.set_index('date')
daily_tickers['date'] = daily_tickers.index
return daily_tickers
def get_dataframe(self):
daily_dataframe = self._fetch_daily_data()
if( self._timeframe == 'daily'):
return daily_dataframe.dropna()
return self._aggregate_ticker_weekly(daily_dataframe).dropna()
class CryptoCompare_Loader():
def __init__(self, symbol, timeframe='weekly'):
self._symbol = symbol
self._timeframe = timeframe
def _aggregate_ticker_weekly(self, df):
open = df.open.resample('W-MON').last()
close = df.close.resample('W-SUN').last().resample('W-MON').last()
high = df.high.resample('W-MON').max()
low = df.low.resample('W-MON').min()
vol = df.volume.resample('W-MON').sum()
weekly_data = pd.concat([open, close, high, low, vol], axis=1)
weekly_data ['date'] = weekly_data .index
return weekly_data
def _fetch_daily_data(self):
from_symbol, to_symbol = self._symbol.split('-')
url = "https://min-api.cryptocompare.com/data/histoday?fsym={}&tsym={}&allData=true&aggregate=3&e=CCCAGG".format( \
from_symbol, \
to_symbol)
r = requests.get(url)
array = json.dumps(r.json())
data = json.loads(array)
daily_tickers = pd.DataFrame(data["Data"])
daily_tickers['date'] = pd.to_datetime(daily_tickers['time'],unit='s')
daily_tickers = daily_tickers.rename( columns={"volumeto": "volume"})
daily_tickers = daily_tickers.set_index('date')
daily_tickers['date'] = daily_tickers.index
return daily_tickers
def get_dataframe(self):
daily_dataframe = self._fetch_daily_data()
if( self._timeframe == 'daily'):
return daily_dataframe.dropna()
return self._aggregate_ticker_weekly(daily_dataframe).dropna()
class LocalData_Loader():
def __init__(self, symbol, timeframe='weekly'):
self._file_name = "{}.csv".format(symbol)
self._timeframe = timeframe
def get_dataframe(self):
file_path = path.join(path.abspath('.'), 'data', self._file_name)
df = pd.read_csv(file_path, header=0, parse_dates=True)
df = df.rename(columns={column: column.lower() for column in df.columns})
df.loc[:,('date')] = pd.to_datetime(df.loc[:,('date')])
df = df.set_index('date')
df['date'] = df.index
return df
# https://min-api.cryptocompare.com/data/histoday?fsym=BTC&tsym=USD&allData=true&aggregate=3&e=CCCAGG
sources = {
"investopedia": Investopedia_Loader,
"quandl": Quandl_Loader,
"CryptoCompare": CryptoCompare_Loader,
"LocalData": LocalData_Loader
}
def get_historical_data(symbol, source, base_date, timeframe='weekly'):
source_class = sources[source](symbol, timeframe)
df_raw = source_class.get_dataframe()
df_with_weekId = add_weekid_and_price_is_closing_up( df_raw, base_date )
df_with_weekId = df_with_weekId.reset_index(drop=True)
df_with_weekId = df_with_weekId.set_index('weekId')
df_with_weekId['weekId'] = df_with_weekId.index
return df_with_weekId
# +
# url = "https://min-api.cryptocompare.com/data/histoday?fsym=BTC&tsym=USD&allData=true&aggregate=3&e=CCCAGG"
# df = pd.read_json(url)
# df = pd.DataFrame(df.locations.values.tolist())['Data']
# display(df)
# import json
# import pandas as pd
# import requests
# r = requests.get(url)
# # print r.json() #
# array = json.dumps(r.json())
# # print(data["Data"])
# data = json.loads(array)
# df = pd.DataFrame(data["Data"])
# df['date'] = pd.to_datetime(df['time'],unit='s')
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# display(df)
# -
# ## Function that allow to calculate the weekid from a start date
# +
def get_weekId_max_price(df):
return df.loc[ df.loc[:,('close')].argmax()]['weekId']
def get_weekId_min_price(df):
return df.loc[ df.loc[:,('close')].argmin()]['weekId']
# +
# def get_weekId_cartesian_product(df):
# df_from = df.loc[:,('weekId',"close")].rename(index=str, columns={"weekId": "weekId_from", "close": "close_from"})
# df_to = df.loc[:,('weekId',"close")].rename(index=str, columns={"weekId": "weekId_to", "close": "close_to"})
# df_cartesian = df_from.assign(foo=1).merge(df_to.assign(foo=1)).drop('foo', 1)
# return df_cartesian [(df_cartesian .weekId_from < df_cartesian .weekId_to)]
# -
# # From here we start fetching the data
# Above was only about setting up some basic function
# +
# df_daily_price_raw = get_raw_historical_data(symbol, 'Daily')
df_weekly_price = get_historical_data(symbol, data_source, root_date, 'Weekly')
# df_weekly_price = add_weekid_and_price_is_closing_up(df_weekly_price_raw)
max_price_weekId = get_weekId_max_price(df_weekly_price)
df_weekly_price_until_max = df_weekly_price [(df_weekly_price.weekId <= max_price_weekId)]
min_price_before_max_weekId = get_weekId_min_price(df_weekly_price_until_max)
df_weekly_price_until_max = df_weekly_price_until_max [(df_weekly_price_until_max.weekId >= min_price_before_max_weekId)]
# weekId_close_from_to = get_weekId_cartesian_product(df_weekly_price_until_max)
# -
print(max_price_weekId)
print(min_price_before_max_weekId)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(df_weekly_price_until_max)
# +
# print(max_price_weekId)
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# display(df_weekly_price)
# df_weekly_price.ix[:df_weekly_price_until_max]
# -
# +
def get_line_slope_and_origine(df, x1_col_name = 'x1', \
y1_col_name = 'y1', \
x2_col_name = 'x2', \
y2_col_name = 'y2', \
m_col_name = 'm', \
b_col_name = 'b'):
df[m_col_name] = (df[y1_col_name] - df[y2_col_name]) / (df[x1_col_name] - df[x2_col_name])
df[b_col_name] = df[y1_col_name] - (df[x1_col_name] * df[m_col_name])
return df
def get_normalize_column(df, column_name = 'close'):
column_name_normalize = '{}_normalize'.format(column_name )
max_value = df.loc[df[column_name ].idxmax()][column_name ]
min_value = df.loc[df[column_name ].idxmin()][column_name ]
df[column_name_normalize] = (df.loc[:,(column_name)] - min_value)/ (max_value - min_value )
return df
def get_mean_square_error(y_true, y_pred):
# y_square_diff = np.square(y_true-y_pred)
y_square_diff = y_true - y_pred
return np.sum(y_square_diff) / len(y_true)
def nb_is_lower(y_true, y_pred):
lower_item = y_true[y_true <= y_pred]
return len(lower_item )
def get_y(x, m, b):
return x * m + b
def calculate_mean_square_error(row, df):
y_pred = get_y(df['weekId'], row['m'], row['b'])
return get_mean_square_error(df['close'], y_pred)
def nb_cut_price_low(row, df):
y_pred = get_y(df['weekId'], row['m'], row['b'])
return nb_is_lower(df['low'], y_pred)
# def normalize_column(df, column_name = 'close'):
def get_hull_tangent(df_input, x_column_name = 'weekId', y_column_name = 'close'):
df_input = get_normalize_column(df_input, 'close')
df_input= get_normalize_column(df_input, 'weekId')
hull = ConvexHull(df_input[[x_column_name, y_column_name]].dropna())
hull_results = [ [min(pair[0], pair[1]), max(pair[0], pair[1])] for pair in hull.simplices]
data_from_to = [{"x1": df_input['weekId'].iloc[pair[0]], \
"x1_date": root_date + timedelta(weeks = df_input['weekId'].iloc[pair[0]].item()), \
"x1_normalize": df_input['weekId_normalize'].iloc[pair[0]], \
"y1": df_input['close'].iloc[pair[0]], \
"y1_normalize": df_input['close_normalize'].iloc[pair[0]], \
"x2": df_input['weekId'].iloc[pair[1]], \
"x2_date": root_date + timedelta(weeks = df_input['weekId'].iloc[pair[1]].item()), \
"x2_normalize": df_input['weekId_normalize'].iloc[pair[1]], \
"y2": df_input['close'].iloc[pair[1]], \
"y2_normalize": df_input['close_normalize'].iloc[pair[1]]} for pair in hull_results]
df = pd.DataFrame(data_from_to)
df = get_line_slope_and_origine(df)
df = get_line_slope_and_origine(df, \
x1_col_name = 'x1_normalize', \
y1_col_name = 'y1_normalize', \
x2_col_name = 'x2_normalize', \
y2_col_name = 'y2_normalize', \
m_col_name = 'm_normalize', \
b_col_name = 'b_normalize')
df['angle'] = np.rad2deg(np.arctan2(df['m'], 1))
df['angle_normalize'] = np.rad2deg(np.arctan2(df['m_normalize'], 1))
df['weeks'] = np.abs(df['x1'] - df['x2'])
df['mean_error'] = df.apply(lambda row: calculate_mean_square_error (row, df_input),axis=1)
df['nb_is_lower'] = df.apply(lambda row: nb_cut_price_low(row, df_input),axis=1)
df['ratio_error_cut'] = df['mean_error'] / df['nb_is_lower']
df['ratio_slope_y1_normalize'] = df['y1_normalize']/df['m_normalize']
df['ratio_slope_y2_normalize'] = df['y2_normalize']/df['m_normalize']
return df
# def calculate_line_standard(df):
# df = get_line_slope_and_origine(df)
# df['angle'] = np.rad2deg(np.arctan2(df['m'], 1))
# df['days'] = np.abs(df['x1'] - df['x2'])
# df['mean_square_error'] = df.apply(lambda row: calculate_mean_square_error (row, df),axis=1)
# df['nb_is_lower'] = df.apply(lambda row: nb_cut_price_low(row, df),axis=1)
# df['ratio_error_cut'] = df['mean_square_error'] / df['nb_is_lower']
# df['ratio_slope_y1_normalize'] = df['y1_normalize']/df['m']
# df['ratio_slope_y2_normalize'] = df['y2_normalize']/df['m']
# return df
# def calculate_line_normalize(df):
# df = get_line_slope_and_origine(df, \
# x1_col_name = 'x1_normalize', \
# y1_col_name = 'y1_normalize', \
# x2_col_name = 'x2_normalize', \
# y2_col_name = 'y2_normalize')
# df['angle'] = np.rad2deg(np.arctan2(df['m'], 1))
# df['days'] = np.abs(df['x1'] - df['x2'])
# df['mean_square_error'] = df.apply(lambda row: calculate_mean_square_error (row, df),axis=1)
# df['nb_is_lower'] = df.apply(lambda row: nb_cut_price_low(row, df),axis=1)
# df['ratio_error_cut'] = df['mean_square_error'] / df['nb_is_lower']
# df['ratio_slope_y1_normalize'] = df['y1_normalize']/df['m']
# df['ratio_slope_y2_normalize'] = df['y2_normalize']/df['m']
# return df
# def get_hull_tangent_standard(df):
# df = get_normalize_column(df, 'close')
# df = get_normalize_column(df, 'weekId')
# return calculate_line_standard(get_hull_tangent(df))
# def get_hull_tangent_normalize(df):
# df = get_normalize_column(df, 'close')
# df = get_normalize_column(df, 'weekId')
# return calculate_line_normalize(get_hull_tangent(df, 'weekId_normalize', 'close_normalize'))
def get_upper_path(df):
row = df.iloc[0]
upper_path = [row['x1'], row['x2']]
while(row['x2'] != max_price_weekId):
row = df.loc[df['x1'] == row['x2']].head().iloc[0]
upper_path.append(row['x2'])
return upper_path
def delete_path(df, path):
df_cleaned = df.copy(True)
for x1, x2 in zip(path[:-1], path[1:]):
df_cleaned = df_cleaned[(np.logical_not((df_cleaned.x1 == x1) & (df_cleaned.x2 == x2))) ]
return df_cleaned.sort_values(['x1','y2'], ascending=[True, False])
def delete_upper_path(df):
return delete_path(df, get_upper_path(df.sort_values(['x1','x2'], ascending=[True, False])))
def delete_shorter_path(df):
df = df.sort_values(['x1','x2'], ascending=[True, False])
path1 = get_upper_path(df)
path2 = get_upper_path(df.iloc[1:])
path = path1 if len(path1) < len(path2) else path2
return delete_path(df, path)
# +
# hull_results = [ [min(pair[0], pair[1]), max(pair[0], pair[1])] for pair in hull.simplices]
# data_from_to = [{"x1": df_weekly_price_until_max['weekId'].iloc[pair[0]], \
# "y1": df_weekly_price_until_max['close'].iloc[pair[0]], \
# "x2": df_weekly_price_until_max['weekId'].iloc[pair[1]], \
# "y2": df_weekly_price_until_max['close'].iloc[pair[1]]} for pair in hull_results]
# df = pd.DataFrame(data_from_to)
# -
def graph_hyperwave(df_data, df_hyperwave):
x = df_data['weekId'].values
y = df_data['close_normalize'].values
trace0 = go.Scatter(
x = x,
y = y,
mode = 'lines',
name = 'lines'
)
layout = {
# 'xaxis': {
# 'range': [0, 7]
# },
# 'yaxis': {
# 'range': [0, 2.5]
# },
'shapes': [{
'type': 'line',
'x0': row[1]['x1'],
'y0': row[1]['y1'],
'x1': row[1]['x2'],
'y1': row[1]['y2'],
'line': {
'color': 'rgb(55, 128, 191)',
'width': 3,
}
} for row in df_hyperwave.iterrows()
# Line Vertical
]
}
data = [trace0]
# py.iplot(data, filename='line-mode')
fig = {
'data': data,
'layout': layout,
}
return fig
# py.iplot(fig, filename='shapes-lines')
df_hull_tangent = get_hull_tangent(df_weekly_price_until_max)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(df_hull_tangent.sort_values(['x1','x2'], ascending=[True, False]))
py.iplot(graph_hyperwave(df_weekly_price_until_max,df_hull_tangent), filename='hyperwave')
# df_hull_tangent = delete_upper_path(df_hull_tangent)
df_hull_tangent.sort_values(['x1','x2'], ascending=[True, False])
# +
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# display(df_weekly_price_until_max)
# df =delete_upper_path(df_weekly_price_until_max)
# display(df)
class hyperwave:
def __init__(self, \
min_m = 0.5, \
phase2_weeks_find_max = 156, \
phase_grow_factor = 2, \
phase4_min_weeks = 15, \
phase4_validation_previous_high = 1.3):
self.phase2_weeks_find_max = phase2_weeks_find_max
self.phase_grow_factor = phase_grow_factor
self.min_m = min_m
self.phase4_min_weeks= phase4_min_weeks
self.phase4_validation_previous_high = phase4_validation_previous_high
def get_hyperwave(self, df):
# Step 1 - Get the raw Hull from max and min raw data
df_min_to_max = self._borne_raw_data_between_max_to_min( df )
max_weekId = df_min_to_max.loc[ df_min_to_max.loc[:,('weekId')].argmax(), 'weekId']
df_post_max = df.loc[max_weekId:]
df_hull = self._order_and_reset_index( self._delete_above_path( get_hull_tangent( df_min_to_max )))
hw_phases_first_round = self._group_hyperwave_phase_1_to_4(df_hull, df)
# Step 2 - Find max Price prior of start hyperwave
first_phase_id = min(len(hw_phases_first_round), 3) * -1
phase_2 = hw_phases_first_round [first_phase_id ]
min_week = self._get_phase_start_week( df_hull, phase_2 )
max_price_weeks_before_start_week = self._get_max_price_week_before(df, min_week)
hw_start_weekId = self._get_weekId_first_price_greater_than( df_min_to_max, \
min_week, \
max_price_weeks_before_start_week )
# Step 3 - Get new Hull for the borned hyperwave raw data
df_hyperwave_raw_data = df_min_to_max[(df_min_to_max.weekId >= hw_start_weekId )]
df_hull_hyperwave = self._order_and_reset_index( \
self._delete_above_path( get_hull_tangent( df_hyperwave_raw_data )))
hw_phases_temp = self._group_hyperwave_phase_1_to_4(df_hull_hyperwave, df)
max_nb_phases = min(len(hw_phases_temp), 3) * -1
hw_phases_temp = hw_phases_temp[:max_nb_phases]
print(hw_phases_temp)
hyperwave = {}
phase_id = 1
for phase in hw_phases_temp:
phase_id = phase_id + 1
df_phase = df_hull_hyperwave.loc[df_hull_hyperwave.loc[phase].loc[:,('ratio_error_cut')].argmin()]
hyperwave[ phase_id ] = df_phase[self._get_columns_not_normalize(df_phase)].to_dict()
if len(hyperwave) >= 1:
hyperwave[1] = self._get_phase1(hyperwave[2], max_price_weeks_before_start_week)
for (phase_id, phase) in hyperwave.items():
phase["is-broken"] = self._is_price_below_line(df_post_max, phase['m'], phase['b'] )
return (df_hull_hyperwave, hw_phases_temp, hyperwave )
# def df_result_row_to_dictionary(df_result):
def _is_price_below_line(self, df, m, b):
df['phase_line_week_price'] = df["weekId"] * m + b
return df[df["close"] < df["phase_line_week_price"]].any()
# return df.any(axis='is_price_below')
def _get_phase1(self, dic_phase2, price_break):
dic_phase1 = dic_phase2.copy()
dic_phase1['angle'] = 0
dic_phase1['b'] = price_break
dic_phase1['index'] = 0
dic_phase1['m'] = 0
dic_phase1['mean_error'] = 0
dic_phase1['nb_is_lower'] = 0
dic_phase1['ratio_error_cut'] = 0
dic_phase1['weeks'] = 0
return dic_phase1
def _get_columns_not_normalize(self, df):
return [c for c in df.axes[0] if "normalize" not in c]
def _group_hyperwave_phase_1_to_4(self, df_result, df_raw):
filtered_hw = df_result[ (df_result.m_normalize > 0) ]
current_phase_m = filtered_hw.iloc[0].m_normalize
hw_phases_temp = []
hw_current_phase = [filtered_hw.index[0]]
for index, row in filtered_hw.loc[2:].iterrows():
if row.m_normalize < current_phase_m * self.phase_grow_factor:
hw_current_phase.append(index)
else:
hw_phases_temp.append(hw_current_phase)
hw_current_phase = [index]
current_phase_m = row.m_normalize
hw_phases_temp.append(hw_current_phase)
# if len(hw_phases_temp) == 3:
# return hw_phases_temp
for i in np.arange(len(hw_phases_temp)-1, 1, -1):
phase = hw_phases_temp[i]
current_phase_max = self._get_max_value_phase(phase, df_result, df_raw)
previous_phase_max = self._get_max_value_phase(hw_phases_temp[i - 1], df_result, df_raw)
if self._sum_group_weeks( filtered_hw, phase) < self.phase4_min_weeks \
or current_phase_max > previous_phase_max * self.phase4_validation_previous_high:
hw_phases_temp.remove( phase )
hw_phases_temp[ i - 1 ].extend( phase )
display(hw_phases_temp)
return hw_phases_temp
def _get_max_phase_max(self, phase, df_result, df):
df_phase = df_result.loc[phase]
weekId_min = df_phase.loc[df_phase.loc[:,('x1')].argmin(), 'x1']
weekId_max = df_phase.loc[df_phase.loc[:,('x2')].argmax(), 'x2']
return self._get_max_value_between(df, weekId_min, weekId_max)
def _get_max_value_phase(self, phase, df_result, df):
df_phase = df_result.loc[phase]
weekId_min = df_phase.loc[df_phase.loc[:,('x1')].argmin(), 'x1']
weekId_max = df_phase.loc[df_phase.loc[:,('x2')].argmax(), 'x2']
return self._get_max_value_between(df, weekId_min, weekId_max)
def _get_max_value_between(self, df, weekId_min, weekId_max):
df_phase = df.loc[weekId_min:weekId_max]
return df_phase.loc[ df_phase.loc[:,('close')].argmax(), 'close']
def _order_and_reset_index(self, df):
return df.sort_values(['x1','x2'], ascending=[True, False]) \
.reset_index()
def _sum_group_weeks(self, df, group):
return df.loc[group].sum()['weeks']
def _get_weekId_first_price_greater_than(self, df, min_week_id, max_price):
df_week_greater_than = df[(df.weekId >= min_week_id)]
df_val_price_greater_than_max = df_week_greater_than[(df_week_greater_than.close > max_price)]
return df_val_price_greater_than_max.loc[ df_val_price_greater_than_max.loc[:,('weekId')].argmin()]['weekId']
def _get_phase_start_week(self, df_result, phase_lines):
return min(df_result.iloc[phase_lines]['x1'])
def _delete_above_path(self, df):
# As we are using Hull to find the external phase of the graph. The positive mean_error as the way up
# whereas the negative are the way down
return df[(df.mean_error >= 0)]
def _delete_below_path(self, df):
# As we are using Hull to find the external phase of the graph. The positive mean_error as the way up
# whereas the negative are the way down
return df[(df.mean_error < 0)]
def _borne_raw_data_between_max_to_min(self, df):
# Born the dataframe from with all the value before weekId of Max and from them find the min to born the other side
max_price_weekId = self._get_weekId_max_price(df)
df_until_max = df.loc[:max_price_weekId]
min_price_weekId = self._get_weekId_min_price( df_until_max )
df_min_to_max = df_until_max.loc[min_price_weekId:]
return df_min_to_max
def _get_weekId_max_price(self, df):
return df.loc[ df.loc[:,('close')].argmax(), 'weekId']
def _get_weekId_min_price(self, df):
return df.loc[ df.loc[:,('close')].argmin(), 'weekId']
def _get_max_price(self, df, column_name = 'close'):
return df.loc[ df.loc[:,(column_name)].argmax()][column_name]
def _get_max_price_week_before(self, df, weekId):
last_n_weeks_Items = df[(df.weekId <= weekId)].tail(self.phase2_weeks_find_max)
max_price = self._get_max_price( last_n_weeks_Items )
return max_price
# +
hw = hyperwave()
(df_result, phases, hyperwave) = hw.get_hyperwave(df_weekly_price)
# display(df_result)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(df_result)
display(phases)
display(hyperwave)
# display(df_result.loc[2:])
# hw._get_hw_start_week(df_result)
# +
df = df_weekly_price.loc[:11055]
display(df.index)
display(df)
# df.index.idxmax()
# -
# +
df = df_weekly_price_until_max
max_weekId = 9689
nb_tail_items = 156
def get_max_price(df, column_name = 'close'):
return df.loc[ df.loc[:,(column_name)].argmax()][column_name]
# weekId_max_price = get_weekId_max_price(last_n_weeks_Items)
# max_price_last_n_weeks = last_n_weeks_Items[(last_n_weeks_Items.weekId == weekId_max_price )].iloc[0]
last_n_weeks_Items = df[(df.weekId <= max_weekId)].tail(nb_tail_items)
max_price = get_max_price(last_n_weeks_Items )
print( max_price)
df_week_greater_than = df[(df.weekId >= max_weekId)]
# df.loc[ df.loc[:,('close')].argmax()]['weekId']
df_val_price_greater_than_max = df_week_greater_than[(df_week_greater_than.close > max_price)]
weekId_start_hyperwave = df_val_price_greater_than_max.loc[ df_val_price_greater_than_max.loc[:,('weekId')].argmin()]['weekId']
df_week_greater_than = df[(df.weekId >= weekId_start_hyperwave )]
df = get_normalize_column(df_week_greater_than, 'close')
df = get_normalize_column(df, 'weekId')
df_hyperwave = get_hull_tangent(df,"weekId_normalize", "close_normalize").sort_values(['x1','y2'], ascending=[True, False])
df_hyperwave = delete_shorter_path(df_hyperwave)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(df_hyperwave)
py.iplot(graph_hyperwave(df_weekly_price_until_max,df_hyperwave), filename='hyperwave')
# py.iplot(fig, filename='shapes-lines')
# +
# display(df_hyperwave.sort_values(['x1','y2'], ascending=[True, False]))
# print(max_price_weekId)
# print(get_upper_path(df_hyperwave))
df_low_path = delete_upper_path(df_hull_tangent)
display(df_hull_tangent)
display(df_low_path )
print(df_low_path.std())
print(df_low_path.mean()) #(axis=['m'])
print(df_low_path.median())
# +
# def get_line_slope(row):
# return (row.close_from - row.close_to) / (row.weekId_from - row.weekId_to)
# weekId_close_from_to['slope'] = weekId_close_from_to.apply (lambda row: get_line_slope(row),axis=1)
# weekId_close_from_to
# # m = -0.110000
# # b = 1092.930000
# # m = 15.82
# # b = -177675.62
# def is_phase_below(m, b):
# # m = row.m
# # b = row.b
# df = df_weekly_price
# df_weekly_close = df[["weekId", "close"]]
# df_weekly_close['ln_y'] = (df_weekly_close.weekId * m) + b
# df_weekly_close['is_below'] = df_weekly_close.ln_y.le(df_weekly_close.close) | np.isclose(df_weekly_close['ln_y'], df_weekly_close['close'])
# return df_weekly_close['is_below'].all()
# weekId_close_from_to['is_below'] = weekId_close_from_to.apply (lambda row: is_phase_below(row, df_weekly_price),axis=1)
# is_phase_below()
# weekId_close_from_to['r'] = weekId_close_from_to(weekId_close_from_to['m'], weekId_close_from_to['b'] )
# weekId_close_from_to
# import numpy as np
# import pandas as pd
# np.seterr(divide='ignore', invalid='ignore')
# df = df_weekly_price_until_max[:15].rename(columns={'weekId':'x', 'close': 'y'})
# data = {'x':[1,2,3,4],'y':[5,6,10,12]}
# df = pd.DataFrame(data)
# calculate m and b from y = mx = b
# df_m = (df['y'].values - df['y'].values[:, None]) / (df['x'].values - df['x'].values[:, None])
# df_b = df['y'].values - (df['x'].values * df_m)
# import itertools
# nb_partition = int((df['x'].values.shape[0] / 10)) + 1
# x_values_split = np.array_split(df['x'].values, nb_partition)
# y_values_split = np.array_split(df['y'].values, nb_partition)
# def get_y_values(x_value, m, b):
# x_lenght = x_values.shape[0]
# cube_shape = (x_lenght, 1, 1)
# x_cube = np.reshape(x_values, cube_shape)
# return (x_cube * m) + b
# def get_is_line_above_close(x_values, y_values, m, b):
# y_lenght = y_values.shape[0]
# cube_shape = (y_lenght, 1, 1)
# y_cube = np.reshape(y_values, cube_shape)
# return get_y_values(x_values,m, b) < y_cube
# result = [get_y_vlowalues(x_values, df_m, df_b) for (x_values, y_values) in itertools.zip_longest(x_values_split, y_values_split )]
# x_lenght = x_values.shape[0]
# cube_shape = (x_lenght, 1, 1)
# x_cube = np.reshape(x_values, cube_shape)
# y_cube = np.reshape(y_values, cube_shape)
# cube_y_value_calculated = (x_cube * df_m) + df_b
# x_values = x_values_split[0]
# y_values = y_values_split[0]
# x_lenght = x_values.shape[0]
# cube_shape = (x_lenght, 1, 1)
https://www.google.com/search?q=hindsight+meaning&ie=utf-8&oe=utf-8&client=firefox-b-ab
# x_cube = np.reshape(x_values, cube_shape)
# y_cube = np.reshape(y_values, cube_shape)
| hyperwave_find_phases.ipynb |
# +
# Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Discrete tomography in Google CP Solver.
Problem from http://eclipse.crosscoreop.com/examples/tomo.ecl.txt
'''
This is a little 'tomography' problem, taken from an old issue
of Scientific American.
A matrix which contains zeroes and ones gets "x-rayed" vertically and
horizontally, giving the total number of ones in each row and column.
The problem is to reconstruct the contents of the matrix from this
information. Sample run:
?- go.
0 0 7 1 6 3 4 5 2 7 0 0
0
0
8 * * * * * * * *
2 * *
6 * * * * * *
4 * * * *
5 * * * * *
3 * * *
7 * * * * * * *
0
0
Eclipse solution by <NAME>, IC-Parc
'''
Compare with the following models:
* Comet: http://www.hakank.org/comet/discrete_tomography.co
* Gecode: http://www.hakank.org/gecode/discrete_tomography.cpp
* MiniZinc: http://www.hakank.org/minizinc/tomography.mzn
* Tailor/Essence': http://www.hakank.org/tailor/tomography.eprime
* SICStus: http://hakank.org/sicstus/discrete_tomography.pl
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
# Create the solver.
solver = pywrapcp.Solver("n-queens")
#
# data
#
if row_sums == "":
print("Using default problem instance")
row_sums = [0, 0, 8, 2, 6, 4, 5, 3, 7, 0, 0]
col_sums = [0, 0, 7, 1, 6, 3, 4, 5, 2, 7, 0, 0]
r = len(row_sums)
c = len(col_sums)
# declare variables
x = []
for i in range(r):
t = []
for j in range(c):
t.append(solver.IntVar(0, 1, "x[%i,%i]" % (i, j)))
x.append(t)
x_flat = [x[i][j] for i in range(r) for j in range(c)]
#
# constraints
#
[
solver.Add(solver.Sum([x[i][j]
for j in range(c)]) == row_sums[i])
for i in range(r)
]
[
solver.Add(solver.Sum([x[i][j]
for i in range(r)]) == col_sums[j])
for j in range(c)
]
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x_flat)
# db: DecisionBuilder
db = solver.Phase(x_flat, solver.INT_VAR_SIMPLE, solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print_solution(x, r, c, row_sums, col_sums)
print()
num_solutions += 1
solver.EndSearch()
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
#
# Print solution
#
def print_solution(x, rows, cols, row_sums, col_sums):
print(" ", end=" ")
for j in range(cols):
print(col_sums[j], end=" ")
print()
for i in range(rows):
print(row_sums[i], end=" ")
for j in range(cols):
if x[i][j].Value() == 1:
print("#", end=" ")
else:
print(".", end=" ")
print("")
#
# Read a problem instance from a file
#
def read_problem(file):
f = open(file, "r")
row_sums = f.readline()
col_sums = f.readline()
row_sums = [int(r) for r in (row_sums.rstrip()).split(",")]
col_sums = [int(c) for c in (col_sums.rstrip()).split(",")]
return [row_sums, col_sums]
| examples/notebook/contrib/discrete_tomography.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using Monte-Carlo Method to Compute $\pi$ #
#
# One method to estimate the value of $\pi$ (3.141592...) is by using the Monte Carlo method. Assuming we have a circle of radius r=1, enclosed by a 2 × 2 square. The area of the circle is $\pi r^2=\pi$, the area of the square is 4. If we divide the area of the circle, by the area of the square we get $\pi /4$.
#
# We then generate a large number of **uniformly distributed** random points and plot them on the graph. These points can be in any position within the square i.e. between (0,0) and (1,1). If they fall within the circle, they are coloured red, otherwise they are coloured blue. We keep track of the total number of points, and the number of points that are inside the circle. If we divide the number of points within the circle, Ninner by the total number of points, Ntotal, we should get a value that is an approximation of the ratio of the areas we calculated above, $\pi/4$.
#
#
#
# first import the necessary libraries
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# ## random number generators
# In any Monte-Carlo method, a key step is to generate the random numbers that suit the purpose. Often times, the results of the Monte-Carlo simulation depend critically on the quality of the randomw number. <br>
#
# Numpy has a variety of random number generators. Let's take a quick look.
#
# ### uniformly distributed random numbers
# +
# %%time
RN = np.random.random(int(1e7))
print(RN.size)
fig,ax = plt.subplots()
_=ax.hist(RN,bins='auto',density='True')
# -
# ### normally distributed random numbers
# +
RN = np.random.randn(int(1e7))
print(RN.size)
fig,ax = plt.subplots()
_=ax.hist(RN,bins='auto',density='True')
# -
# ## use Monte Carlo method to compute pi
# %%time
def PI_Monte_Carlo(Ntotal,plot_result = False):
#Ntotal = 10000
Ninside = 0
x = np.random.random(Ntotal)
y = np.random.random(Ntotal)
r = np.sqrt(x**2+y**2)
print(x.size,y.size)
inside = r < 1.0
print(inside)
Ninside = np.sum(inside)
Pinside = Ninside / Ntotal
if plot_result:
fig,ax = plt.subplots()
ax.scatter(x[inside],y[inside],c='r')
ax.scatter(x[~inside],y[~inside],c='b')
return 4.0 * Pinside
pi_est = PI_Monte_Carlo(100000,plot_result=True)
err = (pi_est-np.pi)/np.pi*100.0
print('estimated Pi and err',pi_est,err)
T=[True,False,True]
np.sum(T)
# ### Also Check this out:
# In our calculation of $\pi$, we draw a figure with circle inscribed in a square with all of your random dot by using different colors for the dots inside and outside the circle.
# +
import matplotlib.pyplot as plt
from random import random
inside = 0
n = 1000
x_in = []
y_in = []
x_out = []
y_out = []
for i in range(n):
x = 2*np.random.rand() -1
y = 2*np.random.rand() -1
if x**2+y**2 <= 1:
inside += 1
x_in.append(x)
y_in.append(y)
else:
x_out.append(x)
y_out.append(y)
pi = 4*inside/n
print("The estimated value of pi is:",pi)
#to draw circle
theta = np.linspace(0, 2*np.pi,100)
r = 1
x1 = r*np.cos(theta)
x2 = r*np.sin(theta)
plt.figure(figsize=(8,8))
plt.plot(x1,x2,"black")
#plotting the hit points
plt.scatter(x_in, y_in, color='g', marker='s')
plt.scatter(x_out, y_out, color='r', marker='s')
#axes limit
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.axvline(x = 0, color='blue',lw=3)
plt.axhline(y = 0, color='blue',lw=3)
| Compute_pi_MonteCarlo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="HFBqlpldEMZT" colab_type="code" outputId="3ec79934-3b3b-4469-ef47-90ea9b6c99c5" colab={"base_uri": "https://localhost:8080/", "height": 51}
# %tensorflow_version 1.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
# + id="rvBxsqLPCwsF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f26330d2-a7d4-4f94-feae-763bdae9ee41"
tf.__version__
# + id="CPRQpM8opr6j" colab_type="code" colab={}
from google.colab import drive
drive.mount('/content/drive/')
# + id="yDbmvsw-u9eZ" colab_type="code" colab={}
import os
import re
import numpy as np
import pandas as pd
import json
import scipy.stats as stats
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from sklearn.externals import joblib
from tensorflow import keras
from tensorflow.keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional
from tensorflow.keras.models import Model, Sequential
#import transformers as ppb
import torch
import warnings
warnings.filterwarnings('ignore')
# + id="ukmub-oerbtS" colab_type="code" colab={}
os.chdir("/content/drive/My Drive/MIDS/W266/")
# !ls
# + [markdown] id="IiDBKTaUzPky" colab_type="text"
# # Load data
# + [markdown] id="l7KQsxMRzUK0" colab_type="text"
# Dataset from: http://diego.asu.edu/Publications/ADRMine.html
# + id="9xc0QkMcviai" colab_type="code" outputId="326cb445-fd10-46bc-c7dd-d1d7d3f619e7" colab={"base_uri": "https://localhost:8080/", "height": 289}
#load data
data = pd.read_csv('binary_tweet_dataset/binary_tweets_valid.csv', lineterminator='\n')
data.drop(columns=["index"], inplace=True)
data.head()
# + id="noSRAZ89UU28" colab_type="code" colab={}
#remove newline characters from tweets
def preprocess(s):
s = re.sub(r'\n', ' ', s)
return(s)
data['tweet'] = data['tweet'].apply(lambda x: preprocess(x))
# + id="LG9jyAf1886Z" colab_type="code" colab={}
split_data, test_data = train_test_split(data[['tweet','id']], test_size=0.2, random_state=0, stratify=data["id"])
train_data, dev_data = train_test_split(split_data, test_size=0.25, random_state=0, stratify=split_data["id"])
# + id="VBfneep0CkKd" colab_type="code" outputId="8552657c-aeac-465e-fd7c-533bbaafa616" colab={"base_uri": "https://localhost:8080/", "height": 34}
train_data.shape[0], dev_data.shape[0], test_data.shape[0]
# + id="BrUcOFS_ChxX" colab_type="code" colab={}
train_data.to_csv('./binary_tweet_dataset/train.tsv', sep='\t', index=False, header=False)
dev_data.to_csv('./binary_tweet_dataset/dev.tsv', sep='\t', index=False, header=False)
test_data.to_csv('./binary_tweet_dataset/test.tsv', sep='\t', index=False, header=False)
# + [markdown] id="54dmtrFEzaeN" colab_type="text"
# # Fine tune with BERT
#
# + [markdown] id="3o9VVRhYzl2a" colab_type="text"
# Classification code adapted from: https://github.com/google-research/bert
# + id="ztR-Ti7n7pJe" colab_type="code" colab={}
BERT_BASE_DIR="/content/drive/My Drive/MIDS/W266/W266/model/uncased_L-12_H-768_A-12"
DATA_DIR="/content/drive/My Drive/MIDS/W266/binary_tweet_dataset/undersampled"
TRAINED_CLASSIFIER="/content/drive/My Drive/MIDS/W266/trained_models/binary/bert_uncased_binary_undersampled"
# + id="xjly0BFTAukA" colab_type="code" colab={}
if not os.path.exists(TRAINED_CLASSIFIER):
os.makedirs(TRAINED_CLASSIFIER)
# + id="2HHAEAPKS3rW" colab_type="code" colab={}
# !ls "$BERT_BASE_DIR"
# + id="Dx2HAys2EoCo" colab_type="code" colab={}
# !python ./bert/run_classifier.py \
# --task_name=tweet \
# --do_train=true \
# --do_eval=true \
# --do_predict=true \
# --data_dir="$DATA_DIR" \
# --vocab_file="$BERT_BASE_DIR/vocab.txt" \
# --bert_config_file="$BERT_BASE_DIR/bert_config.json" \
# --init_checkpoint="$BERT_BASE_DIR/model.ckpt-100000" \
# --max_seq_length=128 \
# --train_batch_size=32 \
# --learning_rate=2e-5 \
# --num_train_epochs=3.0 \
# --output_dir="$TRAINED_CLASSIFIER"
# + id="pMEqd3L7GRBQ" colab_type="code" colab={}
test_data = pd.read_csv(DATA_DIR+"/test.tsv", delimiter='\t', header=None, lineterminator='\n')
test_y = test_data[1]
# + id="1WtPJzCPE_Z-" colab_type="code" colab={}
test_results = pd.read_csv(TRAINED_CLASSIFIER+"/test_results.tsv", delimiter='\t', header=None)
test_prob = test_results[1]
test_pred = [1 if x >= 0.5 else 0 for x in test_prob]
# + id="en0MabCtGEJO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="c5cfdbf9-f02e-42a4-813e-ac1747c72562"
#test performance
print(f"accuracy: {np.round(accuracy_score(test_pred, test_y),3)}")
print(f"f1-score: {np.round(f1_score(test_pred, test_y),3)}")
# + id="oCgYw5HTGVkE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="345098d5-3973-439a-e47a-da3e7d63b9f0"
# !ls "$TRAINED_CLASSIFIER"
# + [markdown] id="CbK8LAAHz8GJ" colab_type="text"
# # Extract features from fine tuned BERT models
# + id="UxgcYDEYHZuB" colab_type="code" colab={}
EXTRACTED_FEATURES = TRAINED_CLASSIFIER+'/extracted_features'
if not os.path.exists(EXTRACTED_FEATURES):
os.makedirs(EXTRACTED_FEATURES)
# + id="wCxgxKb1h4qv" colab_type="code" colab={}
# !python ./bert/extract_features.py \
# --input_file="$DATA_DIR/dev.tsv" \
# --output_file="$EXTRACTED_FEATURES/dev_features.json" \
# --vocab_file="$BERT_BASE_DIR/vocab.txt" \
# --bert_config_file="$BERT_BASE_DIR/bert_config.json" \
# --init_checkpoint="$TRAINED_CLASSIFIER/model.ckpt-78" \
# --layers=-1,-2,-3,-4 \
# --max_seq_length=128 \
# --batch_size=8
# + id="fg0cSUlg9xvr" colab_type="code" colab={}
def parse_embeddings(line, layer=0):
"""
Parses json features output file from BERT model.
Inputs
i: index of the examples
layer: 0 corresponds to the last layer(-1), 1 corresponds to second to last layer(-2), etc
Outputs
tokens: list of tokens of the example
embeds: embeddings of all the tokens, dimensions are (number of tokens, embedding length)
embeds_avg: average of the embeddings across all the tokens, dimension are (1, embedding length)
"""
embed_json = pd.DataFrame(json.loads(line))
num_tokens = embed_json.shape[0]
tokens = [embed_json["features"][x]["token"] for x in range(num_tokens)]
embeds = np.array([np.array(embed_json["features"][x]["layers"][layer]["values"]) for x in range(num_tokens)])
#embeds_avg = np.mean(embeds, axis=0)
return(tokens, embeds)
# + id="q8AzKAn8L3dE" colab_type="code" colab={}
def get_features(filepath):
with open(filepath) as f:
lines = f.readlines()
all_tokens = []
all_embeds = []
for i in range(len(lines)):
tokens, embeds = parse_embeddings(lines[i], 0)
all_tokens.append(tokens)
all_embeds.append(embeds)
return(all_tokens, all_embeds)
# + id="qZH_REHuL5RT" colab_type="code" colab={}
EXTRACTED_FEATURES = "/content/drive/My Drive/MIDS/W266/trained_models/binary/bert_uncased_binary_undersampled/extracted_features"
DATA_DIR="/content/drive/My Drive/MIDS/W266/binary_tweet_dataset/undersampled"
# + id="TJtUu51fMULx" colab_type="code" colab={}
train_tokens, train_embeds = get_features(EXTRACTED_FEATURES+"/train_features.json")
# + id="-Ljv8xHwnVWR" colab_type="code" colab={}
dev_tokens, dev_embeds = get_features(EXTRACTED_FEATURES+"/dev_features.json")
# + id="u1CMNWUSnVyf" colab_type="code" colab={}
test_tokens, test_embeds = get_features(EXTRACTED_FEATURES+"/test_features.json")
# + id="OEKleJLY9-lz" colab_type="code" colab={}
train_data = pd.read_csv(DATA_DIR+"/train.tsv", delimiter="\t", header=None, lineterminator='\n')
train_y = train_data[1]
train_x = train_embeds
train_x0 = np.array([x[0] for x in train_embeds])
# + id="lBcwDWOjtTMH" colab_type="code" colab={}
dev_data = pd.read_csv(DATA_DIR+"/dev.tsv", delimiter="\t", header=None, lineterminator='\n')
dev_y = dev_data[1]
dev_x = dev_embeds
dev_x0 = np.array([x[0] for x in dev_embeds])
# + id="UDoe0t24ujui" colab_type="code" colab={}
#combine train and dev sets
train_dev_y = np.append(train_data[1], dev_data[1])
train_dev_x = np.append(train_x, dev_x, axis=0)
train_dev_x0 = np.append(train_x0, dev_x0, axis=0)
# + id="dT8SvPTfwW1L" colab_type="code" outputId="c495debf-174d-45de-aa74-4ed71a5ce437" colab={"base_uri": "https://localhost:8080/", "height": 102}
#training data shapes
print(f"shape of y: {train_dev_y.shape}")
print(f"shape of x (cls token embedding): {train_dev_x0.shape}")
print(f"shape of x (embeddings): {train_dev_x.shape}")
print(f"shape of embeddings of first example: {train_dev_x[0].shape}")
print(f"shape of embeddings of second example: {train_dev_x[1].shape}")
# + id="nTHM3wk--ACU" colab_type="code" colab={}
test_data = pd.read_csv(DATA_DIR+"/test.tsv", delimiter="\t", header=None, lineterminator='\n')
test_y = test_data[1]
test_x = np.array(test_embeds)
test_x0 = np.array([x[0] for x in test_embeds])
# + id="VRn31FVjxFL-" colab_type="code" outputId="3138d51b-7d6e-41b0-cc7e-2af1173552e2" colab={"base_uri": "https://localhost:8080/", "height": 102}
#test data shapes
print(f"shape of y: {test_y.shape}")
print(f"shape of x (cls token embedding: {test_x0.shape}")
print(f"shape of x (embeddings): {test_x.shape}")
print(f"shape of embeddings of first example: {test_x[0].shape}")
print(f"shape of embeddings of second example: {test_x[1].shape}")
# + [markdown] id="9pMpiz360iGz" colab_type="text"
# ### Front pad embeddings to max token length
# + id="1G5KwcUa98Mt" colab_type="code" outputId="4d0b3fdd-c06b-4bbe-b073-5e0dd9448f3b" colab={"base_uri": "https://localhost:8080/", "height": 316}
train_token_len = [len(x) for x in train_tokens]
train_token_len.extend([len(x) for x in dev_tokens])
plt.hist(train_token_len)
plt.xlabel("number of tokens")
plt.ylabel("frequency")
print(f"95 percentile of token lengths for training data: {np.percentile(train_token_len, 95)}")
print(f"max token length for training data: {np.max(train_token_len)}")
# + id="AsUSygUX982B" colab_type="code" outputId="9541c43c-ef11-4a9f-b<PASSWORD>" colab={"base_uri": "https://localhost:8080/", "height": 313}
test_token_len = [len(x) for x in test_tokens]
plt.hist(test_token_len)
plt.xlabel("number of tokens")
plt.ylabel("frequency")
print(f"95 percentile of token lengths for test data: {np.percentile(test_token_len, 95)}")
print(f"max token length for test data: {np.max(test_token_len)}")
# + id="ujx_9tRSyqWE" colab_type="code" colab={}
def pad_embeddings(example, max_len):
example_len = example.shape[0]
padding = np.zeros((max_len-example_len,768))
example_padding = np.append(padding, example, axis =0)
return (example_padding)
# + id="mOOHApXCzSdU" colab_type="code" colab={}
max_len = 102
train_dev_x_pad = np.array([pad_embeddings(x, max_len) for x in train_dev_x])
# + id="cxRtqEVa50xg" colab_type="code" colab={}
test_x_pad = np.array([pad_embeddings(x, max_len) for x in test_x])
# + id="59pU3Kwzy43l" colab_type="code" outputId="322efc5e-75de-4945-f19c-7838acb453cc" colab={"base_uri": "https://localhost:8080/", "height": 34}
train_dev_x_pad.shape
# + id="25R4oxkp55Ra" colab_type="code" outputId="624408ac-36e6-4439-d2b8-2a45eb98f96c" colab={"base_uri": "https://localhost:8080/", "height": 34}
test_x_pad.shape
# + [markdown] id="S0eT2VrW0TKI" colab_type="text"
# ### CNN with BERT extracted features
# + id="_qWClwoe9GzG" colab_type="code" outputId="2675d5a0-3b54-4164-cc26-061727cbe993" colab={"base_uri": "https://localhost:8080/", "height": 1000}
#CNN
embed_dim = 5
num_filters = [10, 10, 10, 10]
kernel_sizes = [5, 10, 15, 20]
dense_layer_dims = []
dropout_rate = 0.8
input_layer = tf.keras.layers.Input(shape=(102,768), name="input")
conv_layers_for_all_kernel_sizes = []
for kernel_size, filters in zip(kernel_sizes, num_filters):
conv_layer = keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, activation='relu')(input_layer)
conv_layer = keras.layers.GlobalMaxPooling1D()(conv_layer)
conv_layers_for_all_kernel_sizes.append(conv_layer)
# Concat the feature maps from each different size.
layer = keras.layers.concatenate(conv_layers_for_all_kernel_sizes, axis=1)
#layer = keras.layers.Dropout(rate=dropout_rate, name='dropout')(layer)
for dim in dense_layer_dims:
layer = keras.layers.Dense(dim, activation='relu')(layer)
classification_output = keras.layers.Dense(1, activation='sigmoid', name='sigmoid')(layer)
cnn_model = keras.Model(input_layer, classification_output)
cnn_model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
cnn_model.reset_states()
cnn_model.fit(train_dev_x_pad, train_dev_y, validation_data=[test_x_pad, test_y], epochs=10)
cnn_model.summary()
# + id="b3Ui_eKHDxeJ" colab_type="code" outputId="5dfc80bb-6b1e-4d5f-857e-b5131d21b4f5" colab={"base_uri": "https://localhost:8080/", "height": 51}
#test performance
test_prob = cnn_model.predict(test_x_pad)
test_pred = np.where(test_prob >=0.5, 1, 0)
print(f"accuracy: {np.round(accuracy_score(test_pred, test_y),3)}")
print(f"f1-score: {np.round(f1_score(test_pred, test_y),3)}")
# + id="2tjsaEs2D1jX" colab_type="code" outputId="79d423bf-cc15-4413-d956-9490d101ed85" colab={"base_uri": "https://localhost:8080/", "height": 111}
cm = confusion_matrix(test_pred, test_y)
pd.DataFrame((cm/cm.sum())*100, columns=["pred neg","pred pos"], index=["true neg","true pos"]).round(2).astype(str).add('%')
# + [markdown] id="H8jaLU8P0y0W" colab_type="text"
# ### LSTM with BERT extracted features
# + id="xZfTVRgm-CUJ" colab_type="code" outputId="5e387521-53b8-4aa2-9116-35b073a306c2" colab={"base_uri": "https://localhost:8080/", "height": 612}
#LSTM
input_layer = tf.keras.layers.Input(shape=(102,768), name="input")
lstm_output = LSTM(100, input_shape=(768,102,), name="lstm")(input_layer)
classification_output = Dense(1, activation="sigmoid", name="sigmoid")(lstm_output)
lstm_model = Model(input_layer, classification_output)
lstm_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
lstm_model.reset_states()
lstm_model.fit(train_dev_x_pad, train_dev_y, epochs=10)
lstm_model.summary()
# + id="8tXl0r2d8Qvw" colab_type="code" outputId="840de69e-f79a-4352-893d-2bc6618027d8" colab={"base_uri": "https://localhost:8080/", "height": 51}
test_prob = lstm_model.predict(test_x_pad)
test_pred = np.where(test_prob >=0.5, 1, 0)
test_pred = pd.Series(test_pred.flatten())
print(f"accuracy: {np.round(accuracy_score(test_pred, test_y),3)}")
print(f"f1-score: {np.round(f1_score(test_pred, test_y),3)}")
# + id="KtgmtlbE9yHG" colab_type="code" outputId="b76a44b1-8890-448d-b2e4-21be46c90563" colab={"base_uri": "https://localhost:8080/", "height": 111}
cm = confusion_matrix(test_pred, test_y)
pd.DataFrame((cm/cm.sum())*100, columns=["pred neg","pred pos"], index=["true neg","true pos"]).round(2).astype(str).add('%')
# + [markdown] id="rprn2EAG1FrF" colab_type="text"
# Error analysis
# + id="ouupUX5_Xg07" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 80} outputId="aff0759b-8d85-4091-b139-89e0d1b18f12"
results = pd.concat([test_y,test_pred], axis=1)
results.columns = ["test_y", "test_pred"]
results[results["test_y"]!=results["test_pred"]]
# + id="lwptq2o8YXVC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="fcb6c63d-0d8c-4a7b-bf12-f94e85703483"
test_data[0][826]
# + [markdown] id="VLTFtSdA07Nz" colab_type="text"
# ### Logistic Regression with BERT extracted features for CLS token
# + id="HX_PKM0OQX76" colab_type="code" outputId="1654d0ad-7f19-4ab4-cbc1-7b54e2110927" colab={"base_uri": "https://localhost:8080/", "height": 34}
#logistic regression
param_grid = {"C": np.linspace(0.0001, 100, 30),
"penalty": ["l1", "l2", "elasticnet"],
"class_weight": ["balanced", None]}
lr = LogisticRegression(random_state=0)
search = GridSearchCV(lr, param_grid, cv=3, scoring='f1')
search.fit(train_dev_x0, train_dev_y)
search.best_params_
# + id="rr7SYCLxJQli" colab_type="code" outputId="37545dcf-f5dc-42d5-f3c2-e8b4fce78c43" colab={"base_uri": "https://localhost:8080/", "height": 51}
test_pred = search.predict(test_x0)
print(f"accuracy: {np.round(accuracy_score(test_pred, test_y),3)}")
print(f"f1-score: {np.round(f1_score(test_pred, test_y),3)}")
# + id="ZlR0EO75KDze" colab_type="code" outputId="1a70eacf-0aec-4e00-8809-0486a63ddc3c" colab={"base_uri": "https://localhost:8080/", "height": 111}
cm = confusion_matrix(test_pred, test_y)
pd.DataFrame((cm/cm.sum())*100, columns=["pred neg","pred pos"], index=["true neg","true pos"]).round(2).astype(str).add('%')
| task2_adr/ADR_task_modeling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text"
# This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.
#
# **If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**
#
# This notebook was generated for TensorFlow 2.6.
# + [markdown] colab_type="text"
# # Introduction to Keras and TensorFlow
# + [markdown] colab_type="text"
# ## What's TensorFlow?
# + [markdown] colab_type="text"
# ## What's Keras?
# + [markdown] colab_type="text"
# ## Keras and TensorFlow: a brief history
# + [markdown] colab_type="text"
# ## Setting up a deep-learning workspace
# + [markdown] colab_type="text"
# ### Jupyter notebooks: the preferred way to run deep-learning experiments
# + [markdown] colab_type="text"
# ### Using Colaboratory
# + [markdown] colab_type="text"
# #### First steps with Colaboratory
# + [markdown] colab_type="text"
# #### Installing packages with `pip`
# + [markdown] colab_type="text"
# #### Using the GPU runtime
# + [markdown] colab_type="text"
# ## First steps with TensorFlow
# + [markdown] colab_type="text"
# #### Constant tensors and Variables
# + [markdown] colab_type="text"
# **All-ones or all-zeros tensors**
# + colab_type="code"
import tensorflow as tf
x = tf.ones(shape=(2, 1))
print(x)
# + colab_type="code"
x = tf.zeros(shape=(2, 1))
print(x)
# + [markdown] colab_type="text"
# **Random tensors**
# + colab_type="code"
x = tf.random.normal(shape=(3, 1), mean=0., stddev=1.)
print(x)
# + colab_type="code"
x = tf.random.uniform(shape=(3, 1), minval=0., maxval=1.)
print(x)
# + [markdown] colab_type="text"
# **NumPy arrays are assignable**
# + colab_type="code"
import numpy as np
x = np.ones(shape=(2, 2))
x[0, 0] = 0.
# + [markdown] colab_type="text"
# **Creating a Variable**
# + colab_type="code"
v = tf.Variable(initial_value=tf.random.normal(shape=(3, 1)))
print(v)
# + [markdown] colab_type="text"
# **Assigning a value to a Variable**
# + colab_type="code"
v.assign(tf.ones((3, 1)))
# + [markdown] colab_type="text"
# **Assigning a value to a subset of a Variable**
# + colab_type="code"
v[0, 0].assign(3.)
# + [markdown] colab_type="text"
# **Using assign_add**
# + colab_type="code"
v.assign_add(tf.ones((3, 1)))
# + [markdown] colab_type="text"
# #### Tensor operations: doing math in TensorFlow
# + [markdown] colab_type="text"
# **A few basic math operations**
# + colab_type="code"
a = tf.ones((2, 2))
b = tf.square(a)
c = tf.sqrt(a)
d = b + c
e = tf.matmul(a, b)
e *= d
# + [markdown] colab_type="text"
# #### A second look at the `GradientTape` API
# + [markdown] colab_type="text"
# **Using the GradientTape**
# + colab_type="code"
input_var = tf.Variable(initial_value=3.)
with tf.GradientTape() as tape:
result = tf.square(input_var)
gradient = tape.gradient(result, input_var)
# + [markdown] colab_type="text"
# **Using the GradientTape with constant tensor inputs**
# + colab_type="code"
input_const = tf.constant(3.)
with tf.GradientTape() as tape:
tape.watch(input_const)
result = tf.square(input_const)
gradient = tape.gradient(result, input_const)
# + [markdown] colab_type="text"
# **Using nested gradient tapes to compute second-order gradients**
# + colab_type="code"
time = tf.Variable(0.)
with tf.GradientTape() as outer_tape:
with tf.GradientTape() as inner_tape:
position = 4.9 * time ** 2
speed = inner_tape.gradient(position, time)
acceleration = outer_tape.gradient(speed, time)
# + [markdown] colab_type="text"
# #### An end-to-end example: a linear classifier in pure TensorFlow
# + [markdown] colab_type="text"
# **Generating two classes of random points in a 2D plane**
# + colab_type="code"
num_samples_per_class = 1000
negative_samples = np.random.multivariate_normal(
mean=[0, 3], cov=[[1, 0.5],[0.5, 1]], size=num_samples_per_class)
positive_samples = np.random.multivariate_normal(
mean=[3, 0], cov=[[1, 0.5],[0.5, 1]], size=num_samples_per_class)
# + [markdown] colab_type="text"
# **Stacking the two classes into an array with shape (2000, 2)**
# + colab_type="code"
inputs = np.vstack((negative_samples, positive_samples)).astype(np.float32)
# + [markdown] colab_type="text"
# **Generating the corresponding targets (0 and 1)**
# + colab_type="code"
targets = np.vstack((np.zeros((num_samples_per_class, 1), dtype="float32"),
np.ones((num_samples_per_class, 1), dtype="float32")))
# + [markdown] colab_type="text"
# **Plotting the two point classes**
# + colab_type="code"
import matplotlib.pyplot as plt
plt.scatter(inputs[:, 0], inputs[:, 1], c=targets[:, 0])
plt.show()
# + [markdown] colab_type="text"
# **Creating the linear classifier variables**
# + colab_type="code"
input_dim = 2
output_dim = 1
W = tf.Variable(initial_value=tf.random.uniform(shape=(input_dim, output_dim)))
b = tf.Variable(initial_value=tf.zeros(shape=(output_dim,)))
# + [markdown] colab_type="text"
# **The forward pass function**
# + colab_type="code"
def model(inputs):
return tf.matmul(inputs, W) + b
# + [markdown] colab_type="text"
# **The mean squared error loss function**
# + colab_type="code"
def square_loss(targets, predictions):
per_sample_losses = tf.square(targets - predictions)
return tf.reduce_mean(per_sample_losses)
# + [markdown] colab_type="text"
# **The training step function**
# + colab_type="code"
learning_rate = 0.1
def training_step(inputs, targets):
with tf.GradientTape() as tape:
predictions = model(inputs)
loss = square_loss(predictions, targets)
grad_loss_wrt_W, grad_loss_wrt_b = tape.gradient(loss, [W, b])
W.assign_sub(grad_loss_wrt_W * learning_rate)
b.assign_sub(grad_loss_wrt_b * learning_rate)
return loss
# + [markdown] colab_type="text"
# **The batch training loop**
# + colab_type="code"
for step in range(40):
loss = training_step(inputs, targets)
print(f"Loss at step {step}: {loss:.4f}")
# + colab_type="code"
predictions = model(inputs)
plt.scatter(inputs[:, 0], inputs[:, 1], c=predictions[:, 0] > 0.5)
plt.show()
# + colab_type="code"
x = np.linspace(-1, 4, 100)
y = - W[0] / W[1] * x + (0.5 - b) / W[1]
plt.plot(x, y, "-r")
plt.scatter(inputs[:, 0], inputs[:, 1], c=predictions[:, 0] > 0.5)
# + [markdown] colab_type="text"
# ## Anatomy of a neural network: understanding core Keras APIs
# + [markdown] colab_type="text"
# ### Layers: the building blocks of deep learning
# + [markdown] colab_type="text"
# #### The base `Layer` class in Keras
# + colab_type="code"
from tensorflow import keras
class SimpleDense(keras.layers.Layer):
def __init__(self, units, activation=None):
super().__init__()
self.units = units
self.activation = activation
def build(self, input_shape):
input_dim = input_shape[-1]
self.W = self.add_weight(shape=(input_dim, self.units),
initializer="random_normal")
self.b = self.add_weight(shape=(self.units,),
initializer="zeros")
def call(self, inputs):
y = tf.matmul(inputs, self.W) + self.b
if self.activation is not None:
y = self.activation(y)
return y
# + colab_type="code"
my_dense = SimpleDense(units=32, activation=tf.nn.relu)
input_tensor = tf.ones(shape=(2, 784))
output_tensor = my_dense(input_tensor)
print(output_tensor.shape)
# + [markdown] colab_type="text"
# #### Automatic shape inference: building layers on the fly
# + colab_type="code"
from tensorflow.keras import layers
layer = layers.Dense(32, activation="relu")
# + colab_type="code"
from tensorflow.keras import models
from tensorflow.keras import layers
model = models.Sequential([
layers.Dense(32, activation="relu"),
layers.Dense(32)
])
# + colab_type="code"
model = keras.Sequential([
SimpleDense(32, activation="relu"),
SimpleDense(64, activation="relu"),
SimpleDense(32, activation="relu"),
SimpleDense(10, activation="softmax")
])
# + [markdown] colab_type="text"
# ### From layers to models
# + [markdown] colab_type="text"
# ### The "compile" step: configuring the learning process
# + colab_type="code"
model = keras.Sequential([keras.layers.Dense(1)])
model.compile(optimizer="rmsprop",
loss="mean_squared_error",
metrics=["accuracy"])
# + colab_type="code"
model.compile(optimizer=keras.optimizers.RMSprop(),
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.BinaryAccuracy()])
# + [markdown] colab_type="text"
# ### Picking a loss function
# + [markdown] colab_type="text"
# ### Understanding the `fit` method
# + [markdown] colab_type="text"
# **Calling `fit` with NumPy data**
# + colab_type="code"
history = model.fit(
inputs,
targets,
epochs=5,
batch_size=128
)
# + colab_type="code"
history.history
# + [markdown] colab_type="text"
# ### Monitoring loss & metrics on validation data
# + [markdown] colab_type="text"
# **Using the validation data argument**
# + colab_type="code"
model = keras.Sequential([keras.layers.Dense(1)])
model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=0.1),
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.BinaryAccuracy()])
indices_permutation = np.random.permutation(len(inputs))
shuffled_inputs = inputs[indices_permutation]
shuffled_targets = targets[indices_permutation]
num_validation_samples = int(0.3 * len(inputs))
val_inputs = shuffled_inputs[:num_validation_samples]
val_targets = shuffled_targets[:num_validation_samples]
training_inputs = shuffled_inputs[num_validation_samples:]
training_targets = shuffled_targets[num_validation_samples:]
model.fit(
training_inputs,
training_targets,
epochs=5,
batch_size=16,
validation_data=(val_inputs, val_targets)
)
# + [markdown] colab_type="text"
# ### Inference: using a model after training
# + colab_type="code"
predictions = model.predict(val_inputs, batch_size=128)
print(predictions[:10])
# + [markdown] colab_type="text"
# ## Chapter summary
| chapter03_introduction-to-keras-and-tf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <small><small><i>
# All the IPython Notebooks in this lecture series by Dr. <NAME> are available @ **[GitHub](https://github.com/milaan9/02_Python_Datatypes/tree/main/002_Python_String_Methods)**
# </i></small></small>
# # Python String `lower()`
#
# The string **`lower()`** method converts all uppercase characters in a string into lowercase characters and returns it.
#
# **Syntax**:
#
# ```python
# string.lower()
# ```
# + [markdown] heading_collapsed=true
# ## `lower()` Parameters
#
# The **`lower()`** method doesn't take any parameters.
# -
# ## Return Value from `lower()`
#
# The **`lower()`** method returns the lowercased string from the given string. It converts all uppercase characters to lowercase.
#
# If no uppercase characters exist, it returns the original string.
# +
# Example 1: Convert a string to lowercase
# example string
string = "THIS SHOULD BE LOWERCASE!"
print(string.lower())
# string with numbers
# all alphabets whould be lowercase
string = "Th!s Sh0uLd B3 L0w3rCas3!"
print(string.lower())
# +
# Example 2: How lower() is used in a program?
# first string
firstString = "PYTHON IS AWESOME!"
# second string
secondString = "PyThOn Is AwEsOmE!"
if(firstString.lower() == secondString.lower()):
print("The strings are same.")
else:
print("The strings are not same.")
# -
# >**Note:** Note: If you want to convert to uppercase string, use **[upper()](https://github.com/milaan9/02_Python_Datatypes/blob/main/002_Python_String_Methods/026_Python_String_upper%28%29.ipynb)**. You can also use **[swapcase()](https://github.com/milaan9/02_Python_Datatypes/blob/main/002_Python_String_Methods/027_Python_String_swapcase%28%29.ipynb)** to swap between lowercase to uppercase.
| 002_Python_String_Methods/025_Python_String_lower().ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# > **How to run this notebook (command-line)?**
# 1. Install the `ReinventCommunity` environment:
# `conda env create -f environment.yml`
# 2. Activate the environment:
# `conda activate ReinventCommunity`
# 3. Execute `jupyter`:
# `jupyter notebook`
# 4. Copy the link to a browser
#
#
# # `REINVENT 3.0`: Automated Curriculum Learning Demo
#
# The aim of this notebook is to illustrate how `REINVENT` can be used for the _de novo_ design of molecules in a *Curriculum Learning* (CL) setup. The general idea of CL is to decompose a Production Objective (target properties for the proposed molecules) into simpler sequential Curriculum Objectives to accelerate convergence:
#
# 
# The purpose of the Curriculum Objectives is to guide the REINVENT agent to areas of chemical space that satisfy the Production Objective. The order of Curriculum Objectives is user-defined and each Objective (Curriculum and Production) can consist of any REINVENT scoring function component(s), e.g., Tanimoto similarity. Progression through Curriculum Objectives is controlled by a threshold (can be specified for different Objectives) which the agent must achieve.
#
# In the following section, we will show how to set up an `Automated Curriculum Learning` `REINVENT` run that gradually generates compounds possessing a target scaffold (which is not present in the training set of the provided prior) that represents a difficult task for a standard `REINVENT` run. The target scaffold is dihydro-pyrazoloquinazoline and is a known active scaffold against 3-phosphoinositide-dependent protein kinase-1 (PDK1). The reference paper is:
#
# **<NAME>.; <NAME>. Structure-Based Optimization of Potent PDK1 Inhibitors. Bioorg. Med. Chem. Lett. 2010, 20 (14), 4095–4099. https://doi.org/10.1016/j.bmcl.2010.05.070.**
#
#
# ## 1. Setting the Paths
# _Please update the following code block such that it reflects your system's installation and execute it._
# +
# load dependencies
import os
import re
import json
import tempfile
# --------- change these path variables as required
reinvent_dir = os.path.expanduser("~/Desktop/Reinvent")
reinvent_env = os.path.expanduser("~/miniconda3/envs/reinvent.v3.0")
output_dir = os.path.expanduser("~/Desktop/REINVENT_AutoCL_demo")
# --------- do not change
# get the notebook's root path
try: ipynb_path
except NameError: ipynb_path = os.getcwd()
# if required, generate a folder to store the results
try:
os.mkdir(output_dir)
except FileExistsError:
pass
# -
# ## 2. Setting the Configuration
# In the cells below we will build a nested dictionary object that will be eventually converted to JSON file which in turn will be interpreted by `REINVENT`.
# You can find this file in your `output_dir` location.
# ### A) Declare the Run Type
# initialize the dictionary
configuration = {
"version": 3, # we are going to use REINVENT's newest release
"run_type": "curriculum_learning" # other run types: "sampling", "reinforcement_learning",
# "transfer_learning",
# "scoring" and "create_model"
}
# ### B) Sort out the logging details
# This includes `result_folder` path where the results will be produced.
#
# Also: `REINVENT` can send custom log messages to a remote location. We have retained this capability in the code. if the `recipient` value differs from `"local"`, `REINVENT` will attempt to POST the data to the specified `recipient`.
# add block to specify whether to run locally or not and
# where to store the results and logging
configuration["logging"] = {
"sender": "http://0.0.0.1", # only relevant if "recipient" is set to "remote"
"recipient": "local", # either to local logging or use a remote REST-interface
"logging_frequency": 100, # log every x-th steps
"logging_path": os.path.join(output_dir, "progress.log"), # load this folder in tensorboard
"result_folder": os.path.join(output_dir, "results"), # will hold the compounds (SMILES) and summaries
"job_name": "Automated Curriculum Learning Demo", # set an arbitrary job name for identification
"job_id": "Demo" # only relevant if "recipient" is set to a specific REST endpoint
}
# Create `parameters` field:
# +
# add the "parameters" block
configuration["parameters"] = {}
# First add the paths to the Prior, Agent, and set the curriculum type to automated
configuration["parameters"]["prior"] = os.path.join(ipynb_path, "models/random.prior.new")
configuration["parameters"]["agent"] = os.path.join(ipynb_path, "models/random.prior.new")
configuration["parameters"]["curriculum_type"] = "automated"
# -
# ### C) Specify the Curriculum Strategy
# Overview of important `REINVENT` parameters:
# * **Diversity Filter**: If the agent becomes very focussed, it tends to produce the similar molecules over and over (because they return high scores). To enrich different scaffolds, we can activate the diversity filter, which will "bin" the molecules into groups (scaffolds). Once a given bin is full, all other molecules with the same scaffold will be penalized score-wise, effectively "pushing" the agent out of a local minimum in the score landscape thus enriching diversity.
# * **Inception**: Sometimes agents "linger around" for a while before they (by chance) happen to pick up a trace and generate interesting compounds. To speed up this very early exploration, we can *incept* a couple of promising molecules as list of `SMILES`. Inception also allows storing of molecules up to `memory_size` which correspond to the highest scoring molecules which can be replayed back to the agent to keep it on track.
#
# These 2 parameters are relevant for any `REINVENT` experiment but there are additional features and considerations for a Curriculum Learning experiment. Curriculum Learning is split into a Curriculum Phase and Production Phase. During the Curriculum Phase, Curriculum Objectives are used to guide the agent to favourable chemical space. During the Production Phase, the Production Objective is activated and the agent samples for a pre-defined number of epochs, presumably generating favourable molecules.
# 1) A separate and distinct **Diversity Filter** can be specified in the Curriculum and Production Phases. This is particularly relevant as perhaps a **Diversity Filter** is not desired during the Curriculum Phase since the goal is to guide the agent to favourable chemical space and not necessarily generate molecules that satisfy the Production Objective (target objective). Conversely, once the Production Phase starts, initializing a **Diversity Filter** can ensure agent sampling of diverse minima to balance exploration and exploitation (Setting up a **Diversity Filter** for the Production Phase will be shown in the "Specify the Production Strategy" section)
#
# 2) A separate and distinct **Inception** can also be specified in the Curriculum and Production Phases. A relevant use case is that any stored molecules in the **Inception** memory during the Curriculum Phase may not be relevant in the Production Phase. In this case, initializing a new **Inception** will clear the memory.
#
# The below cell block will set up the Curriculum Strategy which provides all the parameters necessary in the Curriculum Phase.
# set up the Curriculum Strategy
configuration["parameters"]["curriculum_strategy"] = {
"name": "user_defined", # denotes that the order of Curriculum Objectives is defined by the user
"max_num_iterations": 1500, # denotes the total number of epochs to spend in the Curriculum Phase
# if by the end of the total epochs the last Curriculum Objective is not
# satisfied (based on the agent achieving a score >= threshold), the run stops
"batch_size": 128, # specifies how many molecules are generated per epoch
"learning_rate": 0.0001, # sets how strongly the agent is influenced by each epoch
"sigma": 128, # used to calculate the "augmented likelihood", see publication
"diversity_filter": {
"name": "NoFilter", # other options are: "IdenticalTopologicalScaffold",
# "IdenticalMurckoScaffold", and "ScaffoldSimilarity"
"bucket_size": 25, # the bin size; penalization will start once this is exceeded
"minscore": 0.4, # the minimum total score to be considered for binning
"minsimilarity": 0.4 # the minimum similarity to be placed into the same bin
},
"inception": {
"smiles": [], # fill in a list of SMILES here that can be used (or leave empty)
"memory_size": 100, # sets how many molecules are to be remembered
"sample_size": 10 # how many are to be sampled each epoch from the memory for experience replay
},
# Curriculum Objectives are all the scoring functions that are to be sequentially activated
"curriculum_objectives": [{
# 1st scoring function below
"scoring_function": {
"name": "custom_product", # this is our default one (alternative: "custom_sum")
"parallel": False,
"parameters": [{
"component_type": "matching_substructure", # enforce the match to a given substructure
"name": "Pyrimidine", # arbitrary name for the component
"specific_parameters": {
"smiles": [
"[c]1[c][c]n[c]n1" # a match with this substructure is required
]
},
"weight": 1}] # the weight of the component (default: 1)
},
"score_threshold": 0.8 # agent must achieve an average score of this before
# progressing to the next Curriculum Objective
},
# 2nd scoring function below
{
"scoring_function": {
"name": "custom_product",
"parallel": False,
"parameters": [{
"component_type": "matching_substructure",
"name": "H-Bonding Ring",
"specific_parameters": {
"smiles": [
"[c]1[c][c]nc(n1)[N]"
]
},
"weight": 1}]
},
"score_threshold": 0.8
},
# 3rd scoring function below
{
"scoring_function": {
"name": "custom_product",
"parallel": False,
"parameters": [{
"component_type": "matching_substructure",
"name": "H-Bonding Ring with Phenyl",
"specific_parameters": {
"smiles": [
"[c]1[c][c]c([c][c]1)[N]c2n[c][c][c]n2"
]
},
"weight": 1}]
},
"score_threshold": 0.8
},
# 4th scoring function below
{
"scoring_function": {
"name": "custom_product",
"parallel": False,
"parameters": [{
"component_type": "matching_substructure",
"name": "Double Ring",
"specific_parameters": {
"smiles": [
"[c]1[c][c]c([c][c]1)[N]c2n[c]c3c(n2)-[c][c][C][C]3"
]
},
"weight": 1}]
},
"score_threshold": 0.8
},
# 5th scoring function below
{
"scoring_function": {
"name": "custom_product",
"parallel": False,
"parameters": [{
"component_type": "matching_substructure",
"name": "Triple Ring",
"specific_parameters": {
"smiles": [
"[c]1[c][c]c([c][c]1)[N]c2n[c]c3c(n2)-c4c([c]n[n]4)[C][C]3"
]
},
"weight": 1}]
},
"score_threshold": 0.8
},
# 6th scoring function below
{
"scoring_function": {
"name": "custom_product",
"parallel": False,
"parameters": [{
"component_type": "matching_substructure",
"name": "Full Substructure",
"specific_parameters": {
"smiles": [
"[*]NC(=O)c1nn([*])c2c1CCc3cnc(Nc4ccccc4)nc23"
]
},
"weight": 1}]
},
"score_threshold": 0.8
},
]
}
# ### D) Specify the Production Strategy
#
# The Production Strategy provides the parameters to be used in the Production Phase. Here, the Production Objective (target objective) is activated. A new **Diversity Filter** and **Inception** can be initialized. We keep the Curriculum Phase **Inception** here as the last Curriculum Objective is the same as the Production Objective. Moreover, we continue to use **NoFilter** so the agent is not penalized for sampling the same scaffold as our only goal in this tutorial is to generate the target scaffold. Using a **Diversity Filter** would penalize the agent and eventually give compounds possesing the target scaffold a score of 0.
# set up the Curriculum Strategy
configuration["parameters"]["production_strategy"] = {
"name": "standard",
"retain_inception": True, # option to retain the inception from the Curriculum Phase
# retain it here since the last Curriculum Objective is the same as
# Production Objective. Previous top compounds will be relevant
"n_steps": 100, # number of epochs to run the Production Phase
"batch_size": 128, # specifies how many molecules are generated per epoch
"learning_rate": 0.0001, # sets how strongly the agent is influenced by each epoch
"sigma": 128, # used to calculate the "augmented likelihood", see publication
"diversity_filter": {
"name": "NoFilter", # other options are: "IdenticalTopologicalScaffold",
# "IdenticalMurckoScaffold"", and "ScaffoldSimilarity"
"bucket_size": 25, # the bin size; penalization will start once this is exceeded
"minscore": 0.4, # the minimum total score to be considered for binning
"minsimilarity": 0.4 # the minimum similarity to be placed into the same bin
},
"inception": {
"smiles": [], # fill in a list of SMILES here that can be used (or leave empty)
"memory_size": 100, # sets how many molecules are to be remembered
"sample_size": 10 # how many are to be sampled each epoch from the memory for experience replay
},
# the Production Objective contains the final scoring function to be activated
# here, it is the same scoring function as the last Curriculum Objective
# as we want to continue sampling the target substructure
"scoring_function": {
"name": "custom_product",
"parallel": False,
"parameters": [{
"component_type": "matching_substructure",
"name": "Full Substructure",
"specific_parameters": {
"smiles": [
"[*]NC(=O)c1nn([*])c2c1CCc3cnc(Nc4ccccc4)nc23"
]
},
"weight": 1}]
}
}
# # Write Out the Configuration
# We now have successfully filled the dictionary and will write it out as a `JSON` file in the output directory. Please have a look at the file before proceeding in order to see how the paths have been inserted where required and the `dict` -> `JSON` translations (e.g. `True` to `true`) have taken place.
configuration_JSON_path = os.path.join(output_dir, "AutoCL_config.json")
with open(configuration_JSON_path, 'w') as f:
json.dump(configuration, f, indent=4)
# ## 4. Run `REINVENT`
# Now it is time to execute `REINVENT` locally. Note, that depending on the number of epochs (steps) and the execution time of the scoring function components, this might take a while. The "matching_substructure" component should be fairly quick, and the total runtime should be under 15 minutes.
#
# **Note**: Sometimes, `REINVENT` will be unsuccessful in generating the desired substructure in this demo as there is stochasticity involved in the sampling process. The substructure was not present in the training set for the prior and thus represents a challenging task.
#
# The command-line execution looks like this:
# ```
# # activate envionment
# conda activate reinvent.v3.0
#
# # execute REINVENT
# python <your_path>/input.py <config>.json
# ```
# +
# %%capture captured_err_stream --no-stderr
# execute REINVENT from the command-line
# !{reinvent_env}/bin/python {reinvent_dir}/input.py {configuration_JSON_path}
# -
# ## Analyse The Results
# In order to analyze the run, we can use `tensorboard`:
#
# ```
# # go to the root folder of the output
# # cd <your_path>/REINVENT_AutoCL_demo
#
# # make sure you have activated the proper environment
# conda activate reinvent.v3.0
#
# # start tensorboard
# tensorboard --logdir progress.log
#
#
# ```
#
# Then copy the link provided to a browser window, e.g. "http://workstation.url.com:6006/". The following figures are example plots - remember, that there is always some randomness involved so replicate runs will follow different training progressions.
#
# **Note: There is a chance that the curriculum learning run does not find the target scaffold. The target scaffold is not present in the prior and thus may not be found in 1500 epochs (as enforced in the configuration `JSON`)**
#
# In `tensorboard` you can monitor the individual scoring function components. By analyzing the average score plot, we can see that the agent gradually constructs the target scaffold. The plot is annotated with the substructures. We further observe that the `Fraction_valid_SMILES` was high throughout.
#
# 
#
# There is also an "Images" tab available in `tensorboard` that lets you browse through the compounds generated in an easy way. In the moleculess, the target scaffold is highlighted in red (if present). Also, the total scores are given per molecule. Below is what was observed for epoch 1060. The generated compounds feature the target scaffold and therefore all possess the maximum score of 1.000.
#
# 
#
# Finally, scaffold memories is a `CSV` file containing all the compounds collected with each each Curriculum Objective or Production Objective activated. All Curriculum Objective scaffold memories are identified by a number suffix starting from 0 (denoting the first Curriculum Objective). The scaffold memory for the Production Objective is `scaffold_memory.csv`. The files are saved at:
#
# `<your_path>/REINVENT_AutoCL_demo/results`
| notebooks/Automated_Curriculum_Learning_Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Model predictive speed and steering control
#
# code:
#
# https://github.com/AtsushiSakai/PythonRobotics/blob/master/PathTracking/model_predictive_speed_and_steer_control/model_predictive_speed_and_steer_control.py
#
# This is a path tracking simulation using model predictive control (MPC).
#
# The MPC controller controls vehicle speed and steering base on linealized model.
#
# This code uses cvxpy as an optimization modeling tool.
#
# - [Welcome to CVXPY 1\.0 — CVXPY 1\.0\.6 documentation](http://www.cvxpy.org/)
# # MPC modeling
#
# State vector is:
# $$ z = [x, y, v,\phi]$$ x: x-position, y:y-position, v:velocity, φ: yaw angle
#
# Input vector is:
# $$ u = [a, \delta]$$ a: accellation, δ: steering angle
#
#
# The MPC cotroller minimize this cost function for path tracking:
#
# $$min\ Q_f(z_{T,ref}-z_{T})^2+Q\Sigma({z_{t,ref}-z_{t}})^2+R\Sigma{u_t}^2+R_d\Sigma({u_{t+1}-u_{t}})^2$$
#
# z_ref come from target path and speed.
# subject to:
# - Linearlied vehicle model
# $$z_{t+1}=Az_t+Bu+C$$
# - Maximum steering speed
# $$|u_{t+1}-u_{t}|<du_{max}$$
# - Maximum steering angle
# $$|u_{t}|<u_{max}$$
# - Initial state
# $$z_0 = z_{0,ob}$$
# - Maximum and minimum speed
# $$v_{min} < v_t < v_{max}$$
# - Maximum and minimum input
# $$u_{min} < u_t < u_{max}$$
#
# This is implemented at
#
# https://github.com/AtsushiSakai/PythonRobotics/blob/f51a73f47cb922a12659f8ce2d544c347a2a8156/PathTracking/model_predictive_speed_and_steer_control/model_predictive_speed_and_steer_control.py#L247-L301
# # Vehicle model linearization
#
#
#
# Vehicle model is
# $$ \dot{x} = vcos(\phi)$$
# $$ \dot{y} = vsin((\phi)$$
# $$ \dot{v} = a$$
# $$ \dot{\phi} = \frac{vtan(\delta)}{L}$$
#
#
#
#
# ODE is
#
# $$ \dot{z} =\frac{\partial }{\partial z} z = f(z, u) = A'z+B'u$$
#
#
# where
#
# \begin{equation*}
# A' =
# \begin{bmatrix}
# \frac{\partial }{\partial x}vcos(\phi) &
# \frac{\partial }{\partial y}vcos(\phi) &
# \frac{\partial }{\partial v}vcos(\phi) &
# \frac{\partial }{\partial \phi}vcos(\phi)\\
# \frac{\partial }{\partial x}vsin(\phi) &
# \frac{\partial }{\partial y}vsin(\phi) &
# \frac{\partial }{\partial v}vsin(\phi) &
# \frac{\partial }{\partial \phi}vsin(\phi)\\
# \frac{\partial }{\partial x}a&
# \frac{\partial }{\partial y}a&
# \frac{\partial }{\partial v}a&
# \frac{\partial }{\partial \phi}a\\
# \frac{\partial }{\partial x}\frac{vtan(\delta)}{L}&
# \frac{\partial }{\partial y}\frac{vtan(\delta)}{L}&
# \frac{\partial }{\partial v}\frac{vtan(\delta)}{L}&
# \frac{\partial }{\partial \phi}\frac{vtan(\delta)}{L}\\
# \end{bmatrix}
# \\
# =
# \begin{bmatrix}
# 0 & 0 & cos(\bar{\phi}) & -\bar{v}sin(\bar{\phi})\\
# 0 & 0 & sin(\bar{\phi}) & \bar{v}cos(\bar{\phi}) \\
# 0 & 0 & 0 & 0 \\
# 0 & 0 &\frac{tan(\bar{\delta})}{L} & 0 \\
# \end{bmatrix}
# \end{equation*}
#
#
# \begin{equation*}
# B' =
# \begin{bmatrix}
# \frac{\partial }{\partial a}vcos(\phi) &
# \frac{\partial }{\partial \delta}vcos(\phi)\\
# \frac{\partial }{\partial a}vsin(\phi) &
# \frac{\partial }{\partial \delta}vsin(\phi)\\
# \frac{\partial }{\partial a}a &
# \frac{\partial }{\partial \delta}a\\
# \frac{\partial }{\partial a}\frac{vtan(\delta)}{L} &
# \frac{\partial }{\partial \delta}\frac{vtan(\delta)}{L}\\
# \end{bmatrix}
# \\
# =
# \begin{bmatrix}
# 0 & 0 \\
# 0 & 0 \\
# 1 & 0 \\
# 0 & \frac{\bar{v}}{Lcos^2(\bar{\delta})} \\
# \end{bmatrix}
# \end{equation*}
#
#
# You can get a discrete-time mode with Forward Euler Discretization with sampling time dt.
#
# $$z_{k+1}=z_k+f(z_k,u_k)dt$$
#
# Using first degree Tayer expantion around zbar and ubar
# $$z_{k+1}=z_k+(f(\bar{z},\bar{u})+A'z_k+B'u_k-A'\bar{z}-B'\bar{u})dt$$
#
# $$z_{k+1}=(I + dtA')z_k+(dtB')u_k + (f(\bar{z},\bar{u})-A'\bar{z}-B'\bar{u})dt$$
#
# So,
#
# $$z_{k+1}=Az_k+Bu_k +C$$
#
# where,
#
# \begin{equation*}
# A = (I + dtA')\\
# =
# \begin{bmatrix}
# 1 & 0 & cos(\bar{\phi})dt & -\bar{v}sin(\bar{\phi})dt\\
# 0 & 1 & sin(\bar{\phi})dt & \bar{v}cos(\bar{\phi})dt \\
# 0 & 0 & 1 & 0 \\
# 0 & 0 &\frac{tan(\bar{\delta})}{L}dt & 1 \\
# \end{bmatrix}
# \end{equation*}
# \begin{equation*}
# B = dtB'\\
# =
# \begin{bmatrix}
# 0 & 0 \\
# 0 & 0 \\
# dt & 0 \\
# 0 & \frac{\bar{v}}{Lcos^2(\bar{\delta})}dt \\
# \end{bmatrix}
# \end{equation*}
# \begin{equation*}
# C = (f(\bar{z},\bar{u})-A'\bar{z}-B'\bar{u})dt\\
# = dt(
# \begin{bmatrix}
# \bar{v}cos(\bar{\phi})\\
# \bar{v}sin(\bar{\phi}) \\
# \bar{a}\\
# \frac{\bar{v}tan(\bar{\delta})}{L}\\
# \end{bmatrix}
# -
# \begin{bmatrix}
# \bar{v}cos(\bar{\phi})-\bar{v}sin(\bar{\phi})\bar{\phi}\\
# \bar{v}sin(\bar{\phi})+\bar{v}cos(\bar{\phi})\bar{\phi}\\
# 0\\
# \frac{\bar{v}tan(\bar{\delta})}{L}\\
# \end{bmatrix}
# -
# \begin{bmatrix}
# 0\\
# 0 \\
# \bar{a}\\
# \frac{\bar{v}\bar{\delta}}{Lcos^2(\bar{\delta})}\\
# \end{bmatrix}
# )\\
# =
# \begin{bmatrix}
# \bar{v}sin(\bar{\phi})\bar{\phi}dt\\
# -\bar{v}cos(\bar{\phi})\bar{\phi}dt\\
# 0\\
# -\frac{\bar{v}\bar{\delta}}{Lcos^2(\bar{\delta})}dt\\
# \end{bmatrix}
# \end{equation*}
# This equation is implemented at
#
# https://github.com/AtsushiSakai/PythonRobotics/blob/eb6d1cbe6fc90c7be9210bf153b3a04f177cc138/PathTracking/model_predictive_speed_and_steer_control/model_predictive_speed_and_steer_control.py#L80-L102
# # Reference
#
# - [Vehicle Dynamics and Control \| Rajesh Rajamani \| Springer](http://www.springer.com/us/book/9781461414322)
#
# - [MPC Course Material \- MPC Lab @ UC\-Berkeley](http://www.mpc.berkeley.edu/mpc-course-material)
#
| PathTracking/model_predictive_speed_and_steer_control/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tensorflow]
# language: python
# name: conda-env-tensorflow-py
# ---
# +
# 自动计算cell的计算时间
# %load_ext autotime
# %matplotlib inline
# %config InlineBackend.figure_format='svg' #矢量图设置,让绘图更清晰
# + language="bash"
#
# # 增加更新
# git add *.ipynb *.md
#
# git remote -v
#
# git commit -m '更新 ch14 #4 Aug 18, 2021'
#
# git push origin master
# +
#设置使用的gpu
import tensorflow as tf
gpus = tf.config.list_physical_devices("GPU")
if gpus:
gpu0 = gpus[0] #如果有多个GPU,仅使用第0个GPU
tf.config.experimental.set_memory_growth(gpu0, True) #设置GPU显存用量按需使用
# 或者也可以设置GPU显存为固定使用量(例如:4G)
#tf.config.experimental.set_virtual_device_configuration(gpu0,
# [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4096)])
tf.config.set_visible_devices([gpu0],"GPU")
# -
# # 使用卷积神经网络的深度计算机视觉
# 尽管 IBM 的深蓝超级计算机早在 1996 年就击败了国际象棋世界冠军加里·卡斯帕罗夫,但直到最近,计算机才能够可靠地执行看似微不足道的任务,例如检测图片中的小狗或识别口语。 为什么这些任务对我们人类来说如此轻松? 答案在于,感知主要发生在我们的意识领域之外,在我们大脑中专门的视觉、听觉和其他感官模块内。 当感官信息到达我们的意识时,它已经被高级特征所装饰; 例如,当你看一张可爱的小狗的照片时,你不能选择不看小狗,不注意它的可爱。 你也无法解释你是如何认出一只可爱的小狗的; 这对你来说很明显。 因此,我们不能相信我们的主观经验:感知根本不是微不足道的,要理解它,我们必须看看感官模块是如何工作的。
# 卷积神经网络 (CNN) 起源于对大脑视觉皮层的研究,自 1980 年代以来一直用于图像识别。 在过去的几年里,由于计算能力的增加、可用训练数据的数量以及第 11 章中介绍的训练深度网络的技巧,CNN 已经在一些复杂的视觉任务上取得了超人的表现。 它们为图像搜索服务、自动驾驶汽车、自动视频分类系统等提供支持。 此外,CNN 不仅限于视觉感知:它们在许多其他任务上也很成功,例如语音识别和自然语言处理。 但是,我们现在将专注于可视化应用程序。
# 在本章中,我们将探讨 CNN 的来源、它们的构建块是什么样的,以及如何使用 TensorFlow 和 Keras 实现它们。 然后我们将讨论一些最好的 CNN 架构,以及其他视觉任务,包括对象检测(对图像中的多个对象进行分类并在它们周围放置边界框)和语义分割(根据对象的类别对每个像素进行分类) 属于)。
# ## 视觉皮层的架构
# <NAME> 和 <NAME> 在 1958 年和 1959 年(以及几年后在猴子身上)对猫进行了一系列实验,对视觉皮层的结构提供了重要的见解(作者获得了诺贝尔生理学或医学奖) 1981 年为他们的工作)。特别是,他们发现视觉皮层中的许多神经元有一个小的局部感受野,这意味着它们只对位于视野有限区域内的视觉刺激做出反应(见图 14-1,其中 5 个局部感受野)神经元用虚线圆圈表示)。不同神经元的感受野可能重叠,它们一起平铺整个视野。
#
# 此外,作者表明,一些神经元仅对水平线的图像作出反应,而其他神经元仅对不同方向的线作出反应(两个神经元可能具有相同的感受野,但对不同的线方向作出反应)。他们还注意到一些神经元具有更大的感受野,它们对更复杂的模式做出反应,这些模式是较低级别模式的组合。这些观察导致了这样的想法:高级神经元基于相邻的低级神经元的输出(在图 14-1 中,请注意每个神经元仅连接到前一层的几个神经元)。这种强大的架构能够检测视野任何区域的各种复杂模式。
# 
# 这些对视觉皮层的研究启发了新认知机,1980 年引入,逐渐演变成我们现在所说的卷积神经网络。 一个重要的里程碑是 Yann LeCun 等人 1998 年的一篇论文。 引入了著名的 LeNet-5 架构,被银行广泛用于识别手写支票号码。 这个架构有一些你已经知道的构建块,比如全连接层和 sigmoid 激活函数,但它也引入了两个新的构建块:卷积层和池化层。 现在让我们看看它们。
# > 为什么不简单地使用具有全连接层的深度神经网络来执行图像识别任务? 不幸的是,尽管这对于小图像(例如 MNIST)来说效果很好,但由于需要大量参数,它对于较大的图像会失效。 例如,一张 100 × 100 像素的图像有 10,000 个像素,如果第一层只有 1,000 个神经元(这已经严重限制了传输到下一层的信息量),这意味着总共有 1000 万个连接。 这只是第一层。 CNN 使用部分连接的层和权重共享来解决这个问题。
# ## 卷积层
# CNN 最重要的构建块是卷积层:第一个卷积层中的神经元并没有连接到输入图像中的每个像素(就像它们在前几章讨论的层中一样),而仅连接到它们的接受像素 字段(见图 14-2)。 反过来,第二个卷积层中的每个神经元只连接到位于第一层小矩形内的神经元。 这种架构允许网络在第一个隐藏层专注于小的低级特征,然后在下一个隐藏层将它们组装成更大的更高级特征,依此类推。 这种层次结构在现实世界的图像中很常见,这也是 CNN 在图像识别方面表现如此出色的原因之一。
# 
# > 到目前为止,我们看到的所有多层神经网络都有由一长串神经元组成的层,我们必须将输入图像展平为 1D,然后再将它们输入神经网络。 在 CNN 中,每一层都以 2D 形式表示,这使得将神经元与其相应的输入相匹配变得更容易。
# 位于给定层第 i 行、第 j 列的神经元连接到位于第 i 行到 i + fh – 1、第 j 列到 j + fw – 1 行的前一层神经元的输出,其中 fh 和 fw 是 感受野的高度和宽度(见图 14 3)。 为了使图层具有与前一层相同的高度和宽度,通常在输入周围添加零,如图所示。 这称为零填充。
# 
# 还可以通过将感受野隔开来将一个大的输入层连接到一个小得多的层,如图 14-4 所示。 这大大降低了模型的计算复杂度。 从一个感受野到下一个感受野的转变称为步幅。 在图中,一个 5 × 7 的输入层(加上零填充)连接到一个 3 × 4 的层,使用 3 × 3 的感受野和 2 的步幅(在这个例子中,两个方向的步幅相同,但它 不必如此)。 位于上层第 i 行、第 j 列的神经元连接到位于第 $i × s_h$ 至 $i × s_h + f_h – 1$、第 $j × s_w$ 至 $j × s_w + f_w$ 列的上一层神经元的输出 – 1,其中 $s_h$ 和 $s_w$ 是垂直和水平步幅。
# 
# ### 卷积核
# 神经元的权重可以表示为感受野大小的小图像。 例如,图 14-5 显示了两组可能的权重,称为过滤器(或卷积核)。 第一个表示为一个黑色方块,中间有一条垂直的白线(它是一个 7 × 7 的矩阵,除了中央一列全是 1 之外,全是 0); 使用这些权重的神经元将忽略其感受野中除中央垂直线之外的所有内容(因为所有输入都将乘以 0,除了位于中央垂直线上的输入)。 第二个过滤器是一个黑色方块,中间有一条水平白线。 再一次,使用这些权重的神经元将忽略其感受野中除中央水平线之外的所有内容。
# 现在,如果一层中的所有神经元都使用相同的垂直线滤波器(和相同的偏置项),并且您将图 14-5 所示的输入图像(底部图像)提供给网络,则该层将输出左上角的图像 . 请注意,垂直白线得到增强,而其余部分变得模糊。 类似地,如果所有神经元都使用相同的水平线过滤器,则您会得到右上方的图像; 请注意,水平白线得到增强,而其余部分变得模糊。 因此,一个充满神经元的层使用相同的过滤器输出一个特征图,它突出显示图像中激活过滤器最多的区域。 当然,您不必手动定义过滤器:相反,在训练期间,卷积层将自动学习对其任务最有用的过滤器,并且上面的层将学习将它们组合成更复杂的模式。
# 
# ### 堆叠多个特征图
# 到现在为止,为了简单起见,我已经将每个卷积层的输出表示为一个 2D 层,但实际上一个卷积层有多个过滤器(您决定多少)并且每个过滤器输出一个特征图,因此更准确地表示 在 3D 中(见图 14-6)。 它在每个特征图中每个像素有一个神经元,并且给定特征图中的所有神经元共享相同的参数(即相同的权重和偏置项)。 不同特征图中的神经元使用不同的参数。 神经元的感受野与前面描述的相同,但它扩展到所有先前层的特征图。 简而言之,卷积层同时将多个可训练滤波器应用于其输入,使其能够在其输入的任何位置检测多个特征。
# 特征图中的所有神经元共享相同的参数这一事实大大减少了模型中的参数数量。 一旦 CNN 学会了在一个位置识别模式,它就可以在任何其他位置识别它。 相比之下,一旦常规 DNN 学会了在一个位置识别模式,它只能在该特定位置识别它。
# 输入图像也由多个子层组成:每个颜色通道一个。 通常有三种:红色、绿色和蓝色 (RGB)。 灰度图像只有一个通道,但有些图像可能有更多通道——例如,捕获额外光频率(如红外线)的卫星图像。
# 
# 具体来说,位于给定卷积层 l 中特征图 k 的第 i 行第 j 列的神经元连接到前一层 $l – 1$ 中位于第 $i × s_h$ 到 $i × s_h + f_h$ 行的神经元的输出 – 1 列 $j × s_w$ 到 $j × s_w + f_w – 1$,跨越所有特征图(在第 $l – 1$ 层)。 请注意,位于同一行 i 和列 j 但在不同特征图中的所有神经元都连接到前一层中完全相同的神经元的输出。
# 公式 14-1 在一个大的数学公式中总结了前面的解释:它显示了如何计算卷积层中给定神经元的输出。 由于所有不同的索引,它有点难看,但它所做的只是计算所有输入的加权和,加上偏差项。
# 
# 在这个等式中:
#
# * $z_{i, j, k}$ 是位于卷积层(第l 层)的特征图k 中第i 行第j 列的神经元的输出。
# * 如前所述,$s_h$ 和$s_w$ 是垂直和水平步长,$f_h$ 和$f_w$ 是感受野的高度和宽度,$f_{n'}$ 是前一层(层$l – 1$)中的特征图的数量。
# * $x_{i', j', k'}$ 是位于第 $l – 1$ 层,第 $i'$ 行,第 $j'$ 列,特征图 $k'$(如果前一层是输入层,则为通道 $k'$)中神经元的输出。
# * $b_k$ 是特征图k(在第l 层)的偏置项。 您可以将其视为调整特征图 k 整体亮度的旋钮。
# * $w_{u, v, k′ ,k}$ 是第 $l$ 层的特征图 $k$ 中的任何神经元与其位于第 $u$ 行、第 $v$ 列(相对于神经元的感受野)和特征图 $k′$ 的输入之间的连接权重。
# ### TensorFlow 实现
# 在 TensorFlow 中,每个输入图像通常表示为形状 [高度、宽度、通道] 的 3D 张量。 小批量表示为形状 [小批量大小、高度、宽度、通道] 的 4D 张量。 卷积层的权重表示为形状为 [$f_h, f_w, f_{n'}, f_n$] 的 4D 张量。 卷积层的偏置项简单地表示为形状为 [$f_n$] 的一维张量。
# 我们来看一个简单的例子。 下面的代码使用 Scikit-Learn 的 `load_sample_image()` 加载两个示例图像(加载两个彩色图像,一个是中国寺庙,另一个是花),然后创建两个过滤器并将它们应用于两个图像,最后 它显示生成的特征图之一。 请注意,您必须通过 `pip` 安装 `Pillow` 包才能使用 `load_sample_image()`。
# +
from sklearn.datasets import load_sample_image
# Load sample images
china = load_sample_image("china.jpg") / 255
flower = load_sample_image("flower.jpg") / 255
# -
china.shape
# +
import matplotlib.pyplot as plt
import numpy as np
#plt.imshow(china)
images = np.array([china, flower])
batch_size, height, width, channels = images.shape
# -
images.shape
# +
# Create 2 filters
filters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)
filters[:, 3, :, 0] = 1 # vertical line
filters[3, :, :, 1] = 1 # horizontal line
outputs = tf.nn.conv2d(images, filters, strides=1, padding="SAME")
plt.imshow(outputs[0, :, :, 1], cmap="gray") # plot 1st image's 2nd feature map
plt.show()
# -
# 让我们来看看这段代码:
# * 每个颜色通道的像素强度表示为从 0 到 255 的一个字节,因此我们只需通过除以 255 来缩放这些特征,以获得范围从 0 到 1 的浮点数。
# * 然后我们创建两个 7 × 7 过滤器(一个中间有一条垂直的白线,另一个中间有一条水平的白线)。
# * 我们使用 tf.nn.conv2d() 函数将它们应用于两个图像,该函数是 TensorFlow 的低级深度学习 API 的一部分。 在这个例子中,我们使用零填充(padding="SAME")和 1 的步幅。
# * 最后,我们绘制生成的特征图之一(类似于图 14-5 中的右上角图像)。
# tf.nn.conv2d() 行值得更多解释:
# * 图像是输入小批量(4D 张量,如前所述)。
# * 过滤器是要应用的一组过滤器(也是 4D 张量,如前所述)。
# * strides 等于1,但它也可以是具有四个元素的一维数组,其中两个中心元素是垂直和水平步长($s_h$ 和$s_w$)。第一个和最后一个元素当前必须等于 1。它们有一天可能用于指定批处理步幅(跳过某些实例)和通道步幅(跳过一些前一层的特征图或通道)。
# * padding
# - 必须是“SAME”或“VALID”:——如果设置为“SAME”,卷积层在必要时使用零填充。输出大小设置为输入神经元的数量除以步幅,四舍五入。例如,如果输入大小为 13,步长为 5(见图 14-7),则输出大小为 3(即 13 / 5 = 2.6,四舍五入为 3)。然后在输入周围尽可能均匀地添加零,根据需要当 strides=1 时,层的输出将具有与其输入相同的空间维度(宽度和高度),因此名称相同。
# - 如果设置为“VALID”,则卷积层不使用零填充,可能会忽略输入图像底部和右侧的一些行和列,具体取决于步幅,如图 14-7 所示(为简单起见,仅 此处显示的是水平维度,但当然相同的逻辑适用于垂直维度)。 这意味着每个神经元的感受野都严格位于输入内部的有效位置(它不会越界),因此名称有效。
# 在这个例子中,我们手动定义了过滤器,但在真正的 CNN 中,您通常会将过滤器定义为可训练变量,以便神经网络可以了解哪些过滤器效果最好,如前所述。 使用 keras.layers.Conv2D 层,而不是手动创建变量:
conv = keras.layers.Conv2D(filters=32,
kernel_size=3,
strides=1,
padding="same",
activation="relu")
# 这段代码创建了一个包含 32 个过滤器的 Conv2D 层,每个过滤器为 3 × 3,使用 1 步长(水平和垂直)和“相同”填充,并将 ReLU 激活函数应用于其输出。 如您所见,卷积层有很多超参数:您必须选择过滤器的数量、高度和宽度、步幅和填充类型。 与往常一样,您可以使用交叉验证来找到正确的超参数值,但这非常耗时。 我们稍后将讨论常见的 CNN 架构,让您了解哪些超参数值在实践中效果最佳。
# ### 内存要求
# CNN 的另一个问题是卷积层需要大量 RAM。在训练期间尤其如此,因为反向传播的反向传播需要在正向传播期间计算的所有中间值。
#
# 例如,考虑一个具有 5 × 5 滤波器的卷积层,输出 200 个大小为 150 × 100、步幅为 1 和“相同”填充的特征图。如果输入是 150 × 100 RGB 图像(三个通道),则参数数量为 (5 × 5 × 3 + 1) × 200 = 15,200(+1 对应于偏差项),相比之下相当小7 然而,200 个特征图中的每一个都包含 150 × 100 个神经元,每个神经元都需要计算其 5 × 5 × 3 = 75 个输入的加权和:总共 2.25 亿个浮点数乘法。不像全连接层那么糟糕,但仍然是计算密集型的。此外,如果特征图使用 32 位浮点数表示,那么卷积层的输出将占用 200 × 150 × 100 × 32 = 9600 万位(12 MB)的 RAM。这只是一个例子——如果训练批处理包含 100 个实例,那么这一层将使用 1.2 GB 的 RAM!
# 在推理期间(即对新实例进行预测时),只要计算下一层,就可以释放一层占用的 RAM,因此您只需要两个连续层所需的 RAM。 但是在训练期间,前向传递期间计算的所有内容都需要保留以供反向传递使用,因此所需的 RAM 量(至少)是所有层所需的 RAM 总量。
# 如果训练因内存不足错误而崩溃,您可以尝试减小小批量大小。 或者,您可以尝试使用步幅降低维度,或删除几层。 或者您可以尝试使用 16 位浮点数而不是 32 位浮点数。 或者您可以将 CNN 分布在多个设备上。
# 现在让我们看看 CNN 的第二个常见构建块:池化层。
# ## 池化层
# 一旦你理解了卷积层的工作原理,池化层就很容易掌握了。 他们的目标是对输入图像进行二次采样(即缩小),以减少计算负载、内存使用和参数数量(从而限制过拟合的风险)。
# 就像在卷积层中一样,池化层中的每个神经元都连接到前一层中有限数量的神经元的输出,这些神经元位于一个小的矩形感受野内。 您必须像以前一样定义它的大小、步幅和填充类型。 然而,池化神经元没有权重。 它所做的只是使用聚合函数(例如最大值或平均值)聚合输入。 图 14-8 显示了一个最大池化层,它是最常见的池化层类型。 在这个例子中,我们使用一个 2 × 2 的池化内核,步长为 2,没有填充。 只有每个感受野中的最大输入值才能进入下一层,而其他输入则被丢弃。 例如,在图 14-8 左下方的感受野中,输入值为 1、5、3、2,因此只有最大值 5 传播到下一层。 由于步幅为 2,输出图像具有输入图像的一半高度和一半宽度(由于我们不使用填充,因此向下取整)。
# 
# 池化层通常独立地作用于每个输入通道,因此输出深度与输入深度相同。
# 除了减少计算、内存使用和参数数量之外,最大池化层还为小翻译引入了一定程度的不变性,如图 14-9 所示。这里我们假设亮像素的值低于暗像素,我们考虑三个图像(A、B、C)通过一个最大池化层,内核大小为 2 × 2,步长为 2。图像 B 和 C 是相同的如图像 A,但向右移动了一个和两个像素。如您所见,图像 A 和 B 的最大池化层的输出是相同的。这就是平移不变性的意思。对于图像 C,输出是不同的:它向右移动一个像素(但仍然有 75% 的不变性)。通过在 CNN 中每隔几层插入一个最大池化层,就有可能在更大范围内获得某种程度的平移不变性。此外,最大池化提供了少量的旋转不变性和轻微的尺度不变性。这种不变性(即使它是有限的)在预测不应该依赖于这些细节的情况下很有用,例如在分类任务中。
# 
# 然而,最大池化也有一些缺点。 首先,它显然是非常具有破坏性的:即使使用很小的 2 × 2 内核和 2 步长,输出在两个方向上都会小两倍(因此它的面积会小四倍),只需丢弃 75% 的输入 值。 在某些应用程序中,不变性是不可取的。 以语义分割(根据像素所属的对象对图像中的每个像素进行分类的任务,我们将在本章稍后探讨):显然,如果输入图像向右平移一个像素,则输出 也应该向右平移一个像素。 这种情况下的目标是等方差,而不是不变性:输入的微小变化应该导致输出的相应微小变化。
# ### TensorFlow 实现
# 在 TensorFlow 中实现最大池化层非常简单。 以下代码使用 2 × 2 内核创建最大池化层。 步幅默认为内核大小,因此该层将使用步幅为 2(水平和垂直)。 默认情况下,它使用“有效”填充(即,根本没有填充):
max_pool = keras.layers.MaxPool2D(pool_size=2)
# 要创建平均池化层,只需使用 AvgPool2D 而不是 MaxPool2D。 正如您所料,它的工作原理与最大池化层完全相同,只是它计算平均值而不是最大值。 平均池化层曾经非常流行,但现在人们大多使用最大池化层,因为它们通常表现更好。 这似乎令人惊讶,因为计算平均值通常比计算最大值丢失的信息少。 但另一方面,最大池化只保留最强的特征,去掉所有无意义的特征,因此下一层得到更清晰的信号。 此外,最大池化提供比平均池化更强的平移不变性,并且它需要的计算量略少。
# 请注意,最大池化和平均池化可以沿深度维度而不是空间维度执行,尽管这并不常见。 这可以让 CNN 学会对各种特征保持不变。 例如,它可以学习多个过滤器,每个过滤器检测相同模式的不同旋转(例如手写数字;见图 14-10),并且深度最大池化层将确保输出是相同的,无论 回转。 CNN 可以类似地学会对其他任何事物保持不变:厚度、亮度、偏斜、颜色等。
# 
# Keras 不包含深度最大池化层,但 TensorFlow 的低级深度学习 API 包含:只需使用 tf.nn.max_pool() 函数,并将内核大小和步幅指定为 4 个元组(即大小为 4 的元组) . 每个的前三个值应该是 1:这表示沿着批次、高度和宽度维度的内核大小和步幅应该是 1。最后一个值应该是沿着深度维度你想要的内核大小和步幅——例如 , 3(这必须是输入深度的除数;如果前一层输出 20 个特征图将不起作用,因为 20 不是 3 的倍数):
output = tf.nn.max_pool(images,
ksize=(1, 1, 1, 3),
strides=(1, 1, 1, 3),
padding="valid")
# 如果您想将其作为层包含在 Keras 模型中,请将其包装在 Lambda 层中(或创建自定义 Keras 层):
depth_pool = keras.layers.Lambda(lambda X: tf.nn.max_pool(X, ksize=(1, 1, 1, 3),
strides=(1, 1, 1, 3),
padding="valid"))
# 您在现代架构中经常会看到的最后一种池化层是全局平均池化层。 它的工作方式非常不同:它所做的只是计算每个整个特征图的平均值(它就像一个使用与输入具有相同空间维度的池化内核的平均池化层)。 这意味着它只为每个特征图和每个实例输出一个数字。 虽然这当然是极具破坏性的(特征图中的大部分信息都丢失了),但它可以用作输出层,我们将在本章后面看到。 要创建这样的层,只需使用 keras.layers.GlobalAvgPool2D 类:
global_avg_pool = keras.layers.GlobalAvgPool2D()
# 它相当于这个简单的 Lambda 层,它计算空间维度(高度和宽度)的平均值:
global_avg_pool = keras.layers.Lambda(lambda X: tf.reduce_mean(X, axis=[1, 2]))
# 现在您已经了解了创建卷积神经网络的所有构建块。 让我们看看如何组装它们。
# ## CNN架构
# 典型的 CNN 架构堆叠了几个卷积层(每个卷积层后通常跟一个 ReLU 层),然后是一个池化层,然后是另外几个卷积层(+ReLU),然后是另一个池化层,依此类推。 随着图像在网络中前进,图像变得越来越小,但由于卷积层(见图 14-11),它通常也会变得越来越深(即具有更多的特征图)。 在堆栈的顶部,添加了一个常规的前馈神经网络,由几个全连接层(+ReLU)组成,最后一层输出预测(例如,输出估计类概率的 softmax 层)。
# 
# > 一个常见的错误是使用过大的卷积核。 例如,不使用具有 5 × 5 内核的卷积层,而是将具有 3 × 3 内核的两层堆叠起来:它会使用更少的参数和更少的计算,并且通常会表现得更好。 一个例外是第一个卷积层:它通常可以有一个大内核(例如,5 × 5),通常步幅为 2 或更多:这将减少图像的空间维度而不会丢失太多信息,并且由于 输入图像一般只有三个通道,成本不会太高。
# 以下是如何实现一个简单的 CNN 来处理 Fashion MNIST 数据集(在第 10 章中介绍):
# +
from tensorflow import keras
model = keras.models.Sequential([
keras.layers.Conv2D(64, 7, activation="relu", padding="same",
input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(2),
keras.layers.Conv2D(128, 3, activation="relu", padding="same"),
keras.layers.Conv2D(128, 3, activation="relu", padding="same"),
keras.layers.MaxPooling2D(2),
keras.layers.Conv2D(256, 3, activation="relu", padding="same"),
keras.layers.Conv2D(256, 3, activation="relu", padding="same"),
keras.layers.MaxPooling2D(2),
keras.layers.Flatten(),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dropout(0.5),
keras.layers.Dense(64, activation="relu"),
keras.layers.Dropout(0.5),
keras.layers.Dense(10, activation="softmax")
])
# -
model.summary()
# 让我们来看看这个模型:
# * 第一层使用 64 个相当大的过滤器 (7 × 7) 但没有步幅,因为输入图像不是很大。它还设置 input_shape=[28, 28, 1],因为图像是 28 × 28 像素,具有单个颜色通道(即灰度)。
# * 接下来我们有一个最大池化层,它使用池大小为 2,因此它将每个空间维度除以因子 2。
# * 然后我们重复两次相同的结构:两个卷积层,然后是一个最大池化层。对于较大的图像,我们可以多次重复此结构(重复次数是您可以调整的超参数)。
# * 请注意,当我们将 CNN 向上爬向输出层时,过滤器的数量会增加(最初是 64,然后是 128,然后是 256):它的增长是有意义的,因为低级特征的数量通常相当低(例如,小圆圈、水平线),但是有许多不同的方法可以将它们组合成更高级别的特征。将每个池化层后的过滤器数量加倍是一种常见的做法:由于池化层将每个空间维度除以因子 2,因此我们可以将下一层中的特征图数量加倍,而不必担心数量会爆炸参数、内存使用或计算负载。
# * 接下来是全连接网络,由两个隐藏密集层和一个密集输出层组成。请注意,我们必须展平其输入,因为密集网络需要每个实例的一维特征数组。我们还添加了两个 dropout 层,每个 dropout 层的 dropout 率为 50%,以减少过拟合。
# 这个 CNN 在测试集上达到了 92% 以上的准确率。它不是最先进的,但它非常好,显然比我们在第 10 章中使用密集网络实现的要好得多。
#
# 多年来,已经开发了这种基本架构的变体,从而在该领域取得了惊人的进步。这种进步的一个很好的衡量标准是诸如 ILSVRC ImageNet 挑战赛等比赛中的错误率。在这场比赛中,图像分类的前五名错误率在短短六年内从超过 26% 下降到不到 2.3%。前五名错误率是系统的前五名预测不包括正确答案的测试图像的数量。图像很大(256 像素高),有 1,000 个类别,其中一些非常微妙(尝试区分 120 个狗品种)。查看获奖作品的演变是了解 CNN 如何工作的好方法。
#
# 我们将首先看看经典的 LeNet-5 架构(1998 年),然后是 ILSVRC 挑战赛的三位获胜者:AlexNet(2012 年)、GoogLeNet(2014 年)和 ResNet(2015 年)。
# ### LeNet-5
# LeNet-5 架构 10 可能是最广为人知的 CNN 架构。 如前所述,它由 Y<NAME>un 于 1998 年创建,并已广泛用于手写数字识别(MNIST)。 它由表 14-1 所示的层组成。
# 
# 还有一些额外的细节需要注意:
# * MNIST 图像为 28 × 28 像素,但在输入网络之前将它们补零至 32 × 32 像素并进行归一化处理。网络的其余部分不使用任何填充,这就是为什么随着图像在网络中前进,尺寸不断缩小。
# * 平均池化层比平常稍微复杂一些:每个神经元计算其输入的平均值,然后将结果乘以一个可学习系数(每个映射一个)并添加一个可学习的偏差项(同样,每个映射一个),然后最后应用激活函数。
# * C3 映射中的大多数神经元仅连接到三个或四个 S2 映射(而不是所有六个 S2 映射)中的神经元。有关详细信息,请参阅原始论文 10 中的表 1(第 8 页)。
# * 输出层有点特殊:不是计算输入和权重向量的矩阵乘法,而是每个神经元输出其输入向量与其权重向量之间的欧几里得距离的平方。每个输出测量图像属于特定数字类别的程度。现在首选交叉熵成本函数,因为它会更多地惩罚错误的预测,产生更大的梯度并更快地收敛。
# Yann LeCun 的网站提供了 LeNet-5 分类数字的精彩演示。
# ### AlexNet
# AlexNet CNN 架构 11 在 2012 年 ImageNet ILSVRC 挑战赛中大获全胜:它实现了 17% 的前五名错误率,而第二名的错误率仅为 26%! 它由 <NAME>(因此得名)、<NAME> 和 <NAME> 开发。 它与 LeNet-5 类似,只是更大更深,并且它是第一个将卷积层直接堆叠在一起,而不是在每个卷积层的顶部堆叠池化层。 表 14-2 展示了这种架构。
# 
# 为了减少过拟合,作者使用了两种正则化技术。 首先,他们在训练期间对 F9 和 F10 层的输出应用了 dropout(在第 11 章中介绍),dropout 率为 50%。 其次,他们通过以各种偏移量随机移动训练图像、水平翻转它们并改变光照条件来执行数据增强。
# **数据增强**
#
# ---
# 数据增强通过生成每个训练实例的许多真实变体来人为地增加训练集的大小。这减少了过度拟合,使其成为一种正则化技术。生成的实例应该尽可能真实:理想情况下,给定来自增强训练集的图像,人类不应该能够判断它是否被增强。简单地添加白噪声无济于事;修改应该是可学习的(白噪声不是)。
#
# 例如,您可以将训练集中的每张图片略微移动、旋转和调整大小,并将结果图片添加到训练集中(见图 14-12)。这迫使模型更能容忍图片中对象的位置、方向和大小的变化。对于更能容忍不同光照条件的模型,您可以类似地生成许多具有不同对比度的图像。一般情况下,也可以水平翻转图片(文字和其他不对称对象除外)。通过组合这些转换,您可以大大增加训练集的大小。
# 
# ---
# AlexNet 还在 C1 和 C3 层的 ReLU 步骤之后立即使用竞争归一化步骤,称为局部响应归一化 (LRN):最强烈激活的神经元抑制位于相邻特征图中相同位置的其他神经元(这种竞争性激活已被观察到 在生物神经元中)。 这鼓励不同的特征图进行专业化,将它们分开并迫使它们探索更广泛的特征,最终提高泛化能力。 公式 14-2 显示了如何应用 LRN。
# 
# 在这个等式中:
# * $b_i$ 是位于特征图 $i$ 中某行 $u$ 和 $v$ 列的神经元的归一化输出(注意,在此等式中,我们只考虑位于该行和该列的神经元,因此未显示 $u$ 和 $v$)。
# * $a_i$ 是在 ReLU 步骤之后但在归一化之前激活该神经元。
# * $k, \alpha, \beta $ 和 $r$ 是超参数。 $k$ 称为偏差,$r$ 称为深度半径。
# * $f_n$ 是特征图的数量。
# 例如,如果 r = 2 并且一个神经元具有很强的激活,它将抑制位于其自身上方和下方的特征图中的神经元的激活。
#
# 在 AlexNet 中,超参数设置如下:r = 2、α = 0.00002、β = 0.75 和 k = 1。可以使用 tf.nn.local_response_normalization() 函数(您可以将其包装在 Lambda 如果你想在 Keras 模型中使用它)。
#
# AlexNet 的变体 ZF Net12 由 <NAME> 和 <NAME> 开发,并赢得了 2013 年 ILSVRC 挑战赛。 它本质上是带有一些调整过的超参数(特征图的数量、内核大小、步幅等)的 AlexNet。
# ### GoogLeNet
# GoogLeNet 架构由 <NAME> 等人开发。 来自 Google Research,它通过将前五名错误率降低到 7% 以下赢得了 ILSVRC 2014 挑战。 如此出色的性能很大程度上来自于网络比以前的 CNN 更深的事实(如图 14-14 所示)。 这是通过称为初始模块的子网络实现的,它允许 GoogLeNet 比以前的架构更有效地使用参数:GoogLeNet 的参数实际上比 AlexNet 少 10 倍(大约 600 万而不是 6000 万)。
# 图 14-13 展示了一个 Inception 模块的架构。 符号“3 × 3 + 1(S)”表示该层使用 3 × 3 内核、步长 1 和“相同”填充。 输入信号首先被复制并馈送到四个不同的层。 所有卷积层都使用 ReLU 激活函数。 请注意,第二组卷积层使用不同的内核大小(1 × 1、3 × 3 和 5 × 5),允许它们捕获不同尺度的模式。 另请注意,每一层都使用 1 步长和“相同”填充(甚至是最大池化层),因此它们的输出都具有与输入相同的高度和宽度。 这使得在最终深度连接层中沿深度维度连接所有输出成为可能(即,堆叠来自所有四个顶部卷积层的特征图)。 这个连接层可以在 TensorFlow 中使用 tf.concat() 操作实现,axis=3(轴是深度)。
# 
# 您可能想知道为什么 Inception 模块具有 1 × 1 内核的卷积层。 这些层肯定不能捕捉任何特征,因为它们一次只看一个像素? 事实上,这些层有三个目的:
# * 虽然它们不能捕捉空间模式,但它们可以捕捉沿深度维度的模式。
# * 它们被配置为输出比输入更少的特征图,因此它们充当瓶颈层,这意味着它们降低了维度。 这降低了计算成本和参数数量,加快了训练速度并提高了泛化能力。
# * 每对卷积层([1 × 1, 3 × 3] 和 [1 × 1, 5 × 5])就像一个强大的卷积层,能够捕捉更复杂的模式。 事实上,这对卷积层不是在图像上扫描一个简单的线性分类器(就像单个卷积层那样),而是在图像上扫描一个两层神经网络。
# 简而言之,您可以将整个 inception 模块视为类固醇上的卷积层,能够输出捕获各种尺度复杂模式的特征图。
# > 每个卷积层的卷积核数是一个超参数。 不幸的是,这意味着您需要为添加的每个初始层调整 6 个超参数
# 现在让我们看看 GoogLeNet CNN 的架构(见图 14-14)。 每个卷积层和每个池化层输出的特征图数量显示在内核大小之前。 该架构非常深,必须用三列表示,但 GoogLeNet 实际上是一个高堆栈,包括九个初始模块(带有陀螺的盒子)。 inception 模块中的六个数字代表模块中每个卷积层输出的特征图数量(顺序与图 14-13 相同)。 请注意,所有卷积层都使用 ReLU 激活函数。
# 
# 让我们通过这个网络:
# * 前两层将图像的高度和宽度除以 4(因此其面积除以 16),以减少计算负载。第一层使用大内核大小,以便保留大部分信息。
# * 然后局部响应归一化层确保前面的层学习各种各样的特征(如前所述)。
# * 后面有两个卷积层,第一个像瓶颈层。如前所述,您可以将这一对视为单个更智能的卷积层。
# * 同样,局部响应归一化层可确保前面的层捕获多种模式。
# * 接下来,最大池化层将图像高度和宽度减少 2,再次加快计算速度。
# * 然后是九个初始模块的高堆栈,与几个最大池化层交错以降低维度并加速网络。
# * 接下来,全局平均池化层输出每个特征图的均值:这会丢弃任何剩余的空间信息,这很好,因为那时没有太多空间信息。实际上,GoogLeNet 输入图像通常预期为 224 × 224 像素,因此在 5 个最大池化层之后,每个层将高度和宽度除以 2,特征图下降到 7 × 7。此外,这是一个分类任务,而不是本地化,所以对象在哪里并不重要。由于这一层带来的降维,在 CNN 的顶部不需要有几个全连接层(就像在 AlexNet 中那样),这大大减少了网络中的参数数量并限制了过拟合的风险。
# * 最后一层是不言自明的:用于正则化的 dropout,然后是一个具有 1,000 个单元的全连接层(因为有 1,000 个类)和一个 softmax 激活函数来输出估计的类概率。
# 该图略有简化:原始的 GoogLeNet 架构还包括插入在第三个和第六个初始模块顶部的两个辅助分类器。 它们都由一个平均池化层、一个卷积层、两个全连接层和一个 softmax 激活层组成。 在训练期间,他们的损失(按比例缩小 70%)被添加到整体损失中。 目标是解决梯度消失问题并规范网络。
#
# 然而,后来证明它们的影响相对较小。 谷歌研究人员后来提出了 GoogLeNet 架构的几种变体,包括 Inception-v3 和 Inception-v4,使用稍微不同的 Inception 模块并达到更好的性能。
# ### VGGNet
# ILSVRC 2014 挑战赛的亚军是 VGGNet,它由牛津大学视觉几何组 (VGG) 研究实验室的 <NAME> 和 <NAME> 开发。 它有一个非常简单和经典的架构,有 2 或 3 个卷积层和一个池化层,然后又是 2 或 3 个卷积层和一个池化层,依此类推(总共只有 16 或 19 个卷积层,具体取决于 VGG 变体),加上具有 2 个隐藏层和输出层的最终密集网络。 它只使用了 3 × 3 个过滤器,但使用了许多过滤器。
# ### ResNet
# 何开明等。 使用残差网络(或 ResNet)赢得了 ILSVRC 2015 挑战,其前五名错误率低于 3.6%,令人震惊。 获胜的变体使用了由 152 层组成的极深 CNN(其他变体有 34、50 和 101 层)。 它印证了大趋势:模型越来越深,参数越来越少。 能够训练这样一个深度网络的关键是使用跳过连接(也称为快捷连接):输入层的信号也被添加到位于堆栈更高一点的层的输出中。 让我们看看为什么这很有用。
# 训练神经网络时,目标是使其建模目标函数 h(x)。 如果将输入 x 添加到网络的输出(即添加跳过连接),则网络将被迫建模 f(x) = h(x) – x 而不是 h(x)。 这称为残差学习(见图 14-15)。
# 
# 初始化常规神经网络时,其权重接近于零,因此网络仅输出接近于零的值。 如果添加跳过连接,生成的网络只会输出其输入的副本; 换句话说,它最初对恒等函数进行建模。 如果目标函数非常接近恒等函数(通常是这种情况),这将大大加快训练速度。
#
# 此外,如果添加许多跳过连接,即使有几个层尚未开始学习,网络也可以开始取得进展(见图 14-16)。 由于跳过连接,信号可以轻松地穿过整个网络。 深度残差网络可以看作是一堆残差单元(RU),其中每个残差单元都是一个带有跳跃连接的小型神经网络。
# 
# 现在让我们看看 ResNet 的架构(见图 14-17)。 这是令人惊讶的简单。 它的开始和结束与 GoogLeNet 完全一样(除了没有 dropout 层),中间只是一堆非常深的简单残差单元。 每个残差单元由两个卷积层组成(没有池化层!),使用 3 × 3 内核和保留空间维度(步长 1,“相同”填充),使用批量归一化 (BN) 和 ReLU 激活。
# 
# 请注意,特征图的数量每几个残差单元加倍,同时它们的高度和宽度减半(使用步幅为 2 的卷积层)。 发生这种情况时,输入不能直接添加到残差单元的输出中,因为它们的形状不同(例如,这个问题影响了图 14-17 中虚线箭头表示的跳过连接)。 为了解决这个问题,输入通过一个 1 × 1 的卷积层,步长为 2,输出特征图的数量正确(见图 14-18)。
# 
# ResNet-34 是具有 34 层(仅计算卷积层和全连接层)17 的 ResNet,包含 3 个残差单元,输出 64 个特征图、128 个图的 RU、256 个图的 6 个 RU 和 512 个图的 3 个 RU。 我们将在本章后面实现这个架构。
#
# 比这更深的 ResNet,例如 ResNet-152,使用略有不同的残差单元。 它们不是两个具有 256 个特征图的 3 × 3 卷积层,而是使用三个卷积层:首先是一个只有 64 个特征图(少 4 倍)的 1 × 1 卷积层,它充当瓶颈层(如前所述) ),然后是具有 64 个特征图的 3 × 3 层,最后是具有 256 个特征图(4 乘以 64)的另一个 1 × 1 卷积层,用于恢复原始深度。 ResNet-152 包含 3 个这样的 RU,输出 256 个地图,然后是 8 个具有 512 个地图的 RU,高达 36 个具有 1,024 个地图的 RU,最后是 3 个具有 2,048 个地图的 RU。
# > Google 的 Inception-v4 架构融合了 GoogLe-Net 和 ResNet 的思想,在 ImageNet 分类上实现了接近 3% 的前五错误率。
# ### Xception
# GoogLeNet 架构的另一个变体值得注意:Xception19(代表 Extreme Inception)由 <NAME>(Keras 的作者)在 2016 年提出,它在一个巨大的视觉任务(3.5 亿张图像和 17,000类)。就像 Inception-v4 一样,它融合了 GoogLeNet 和 ResNet 的思想,但它用一种称为深度可分离卷积层(或简称可分离卷积层)的特殊类型层代替了 inception 模块。这些层以前曾在一些 CNN 架构中使用过,但它们不像 Xception 架构那样处于中心位置。虽然常规卷积层使用过滤器尝试同时捕获空间模式(例如,椭圆形)和跨通道模式(例如,嘴 + 鼻子 + 眼睛 = 脸),但可分离的卷积层强烈假设空间模式和交叉- 通道模式可以单独建模(见图 14-19)。因此,它由两部分组成:第一部分为每个输入特征图应用单个空间过滤器,然后第二部分专门用于跨通道模式——它只是一个具有 1 × 1 过滤器的常规卷积层。
# 
# 由于可分离卷积层的每个输入通道只有一个空间过滤器,因此您应该避免在通道太少的层之后使用它们,例如输入层(当然,这就是图 14-19 所表示的,但这只是为了说明目的) . 出于这个原因,Xception 架构从 2 个常规卷积层开始,但架构的其余部分仅使用可分离卷积(总共 34 个),加上一些最大池化层和通常的最终层(一个全局平均池化层和一个 密集输出层)。
# 您可能想知道为什么 Xception 被认为是 GoogLeNet 的一个变体,因为它根本不包含 Inception 模块。 好吧,正如我们之前所讨论的,初始模块包含带有 1 × 1 过滤器的卷积层:这些过滤器专门用于跨通道模式。 然而,位于它们之上的卷积层是常规卷积层,它们寻找空间和跨通道模式。 因此,您可以将初始模块视为常规卷积层(联合考虑空间模式和跨通道模式)和可分离卷积层(分别考虑它们)之间的中间体。 在实践中,似乎可分离的卷积层通常表现更好。
# > 可分离的卷积层比普通卷积层使用更少的参数、更少的内存和更少的计算,并且通常它们甚至表现得更好,所以你应该考虑默认使用它们(除了很少通道的层之后)。
# ILSVRC 2016 挑战赛由香港中文大学的 CUImage 团队赢得。 他们使用了许多不同技术的集合,包括称为 GBD-Net 的复杂对象检测系统,以实现低于 3% 的前五名错误率。 尽管这个结果无疑令人印象深刻,但解决方案的复杂性与 ResNets 的简单性形成了对比。 此外,一年后,另一个相当简单的架构表现得更好,正如我们现在将看到的。
# ### SENet
# ILSVRC 2017 挑战赛的获胜架构是 Squeeze-and Excitation Network (SENet)。 这种架构扩展了现有架构,例如初始网络和 ResNets,并提高了它们的性能。 这让 SENet 以惊人的 2.25% 前五错误率赢得了比赛! Inception 网络和 ResNet 的扩展版本分别称为 SE-Inception 和 SE-ResNet。 提升来自这样一个事实,即 SENet 向原始架构中的每个单元(即每个初始模块或每个残差单元)添加了一个称为 SE 块的小型神经网络,如图 14-20 所示。
# 
# SE 块分析它所连接的单元的输出,只关注深度维度(它不寻找任何空间模式),并了解哪些特征通常最活跃。 然后使用此信息重新校准特征图,如图 14-21 所示。 例如,SE 块可以了解到嘴巴、鼻子和眼睛通常在图片中一起出现:如果您看到嘴巴和鼻子,您应该也看到眼睛。 所以如果块在嘴巴和鼻子特征图中看到强烈的激活,但在眼睛特征图中只有轻微激活,它会提升眼睛特征图(更准确地说,它会减少不相关的特征图)。 如果眼睛与其他东西有些混淆,则此特征图重新校准将有助于解决歧义。
# 
# SE 块仅由三层组成:全局平均池化层、使用 ReLU 激活函数的隐藏密集层和使用 sigmoid 激活函数的密集输出层(见图 14-22)。
# 
# 如前所述,全局平均池化层计算每个特征图的平均激活:例如,如果它的输入包含 256 个特征图,它将输出 256 个数字,代表每个过滤器的整体响应水平。下一层是“挤压”发生的地方:这一层的神经元明显少于 256 个——通常比特征图的数量(例如,16 个神经元)少 16 倍,因此 256 个数字被压缩成一个小向量(例如,16方面)。这是特征响应分布的低维向量表示(即嵌入)。这个瓶颈步骤迫使 SE 块学习特征组合的一般表示(当我们在第 17 章讨论自动编码器时,我们将再次看到这个原理的作用)。最后,输出层采用嵌入并输出一个重新校准向量,每个特征图包含一个数字(例如,256),每个数字在 0 和 1 之间。然后将特征图乘以这个重新校准向量,因此不相关的特征(低重新校准分数)被缩小,而相关特征(重新校准分数接近 1)被保留。
# ## 使用 Keras 实现 ResNet-34 CNN
# 到目前为止描述的大多数 CNN 架构都相当容易实现(尽管通常你会加载一个预训练的网络,正如我们将看到的)。 为了说明这个过程,让我们使用 Keras 从头开始实现一个 ResNet-34。 首先,让我们创建一个 ResidualUnit 层:
from tensorflow import keras
from tensorflow.keras.utils import plot_model
class ResidualUnit(keras.layers.Layer):
def __init__(self, filters, strides=1, activation="relu", **kwargs):
super().__init__(**kwargs)
self.activation = keras.activations.get(activation)
self.main_layers = [
keras.layers.Conv2D(filters, 3,
strides=strides,
padding="same",
use_bias=False),
keras.layers.BatchNormalization(),
self.activation,
keras.layers.Conv2D(filters, 3,
strides=1,
padding="same",
use_bias=False),
keras.layers.BatchNormalization()]
self.skip_layers = []
if strides > 1:
self.skip_layers = [keras.layers.Conv2D(filters, 1,
strides=strides,
padding="same",
use_bias=False),
keras.layers.BatchNormalization()]
def call(self, inputs):
Z = inputs
for layer in self.main_layers:
Z = layer(Z)
skip_Z = inputs
for layer in self.skip_layers:
skip_Z = layer(skip_Z)
return self.activation(Z + skip_Z)
# 如您所见,此代码与图 14-18 非常匹配。 在构造函数中,我们创建了所有需要的层:主层是图右侧的层,而跳过层是左侧的层(仅当步幅大于 1 时才需要)。 然后在 call() 方法中,我们让输入通过主层和跳过层(如果有),然后我们添加两个输出并应用激活函数。
#
# 接下来,我们可以使用 Sequential 模型构建 ResNet-34,因为它实际上只是一个很长的层序列(现在我们有了 ResidualUnit 类,我们可以将每个残差单元视为单个层):
# +
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(64, 7, strides=2, input_shape=[224, 224, 3],
padding="same", use_bias=False))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Activation("relu"))
model.add(keras.layers.MaxPool2D(pool_size=3, strides=2, padding="same"))
prev_filters = 64
for filters in [64] * 3 + [128] * 4 + [256] * 6 + [512] * 3:
strides = 1 if filters == prev_filters else 2
model.add(ResidualUnit(filters, strides=strides))
prev_filters = filters
model.add(keras.layers.GlobalAvgPool2D())
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(10, activation="softmax"))
# -
# 这段代码中唯一稍微有点棘手的部分是将 ResidualUnit 层添加到模型的循环:如前所述,前 3 个 RU 有 64 个过滤器,然后接下来的 4 个 RU 有 128 个过滤器,依此类推。 然后,当过滤器的数量与前一个 RU 中的相同时,我们将步幅设置为 1,否则我们将其设置为 2。然后我们添加 ResidualUnit,最后我们更新 prev_filters。
#
# 令人惊讶的是,在不到 40 行代码中,我们就可以构建赢得 ILSVRC 2015 挑战的模型! 这展示了 ResNet 模型的优雅和 Keras API 的表现力。 实现其他 CNN 架构并不难。 然而,Keras 内置了几个这样的架构,那么为什么不改用它们呢?
# ## 使用来自 Keras 的预训练模型
# 通常,您不必手动实现 GoogLeNet 或 ResNet 等标准模型,因为预训练网络可以通过 keras.applications 包中的一行代码轻松获得。 例如,您可以使用以下代码行加载在 ImageNet 上预训练的 ResNet-50 模型:
model = keras.applications.resnet50.ResNet50(weights="imagenet")
# 就这样! 这将创建一个 ResNet-50 模型并下载在 ImageNet 数据集上预训练的权重。 要使用它,您首先需要确保图像具有正确的大小。ResNet-50 模型需要 224 × 224 像素的图像(其他模型可能需要其他尺寸,例如 299 × 299),所以让我们使用 TensorFlow 的 tf.image.resize() 函数来调整我们之前加载的图像的大小:
images_resized = tf.image.resize(images, [224, 224])
# tf.image.resize() 不会保留纵横比。 如果这是一个问题,请在调整大小之前尝试将图像裁剪为适当的纵横比。 两种操作都可以使用 tf.image.crop_and_resize() 一次性完成。
# 预训练模型假设图像以特定方式进行了预处理。 在某些情况下,他们可能希望输入从 0 缩放到 1,或从 –1 缩放到 1,依此类推。 每个模型都提供了一个 preprocess_input() 函数,您可以使用它来预处理您的图像。 这些函数假设像素值的范围为 0 到 255,因此我们必须将它们乘以 255(因为之前我们将它们缩放到 0-1 范围):
inputs = keras.applications.resnet50.preprocess_input(images_resized * 255)
# 现在我们可以使用预训练模型进行预测:
Y_proba = model.predict(inputs)
# 像往常一样,输出 Y_proba 是一个矩阵,每个图像一行,每个类别一列(在这种情况下,有 1,000 个类别)。 如果要显示前 K 个预测,包括类名和每个预测类的估计概率,请使用 decode_predictions() 函数。 对于每张图像,它返回一个包含前 K 个预测的数组,其中每个预测表示为一个包含类标识符、23 其名称和相应置信度分数的数组:
top_K = keras.applications.resnet50.decode_predictions(Y_proba, top=3)
for image_index in range(len(images)):
print("Image #{}".format(image_index))
for class_id, name, y_proba in top_K[image_index]:
print(" {} - {:12s} {:.2f}%".format(class_id, name, y_proba * 100))
print()
# 输出如下所示:
# 
# 正确的类(修道院和雏菊)出现在两个图像的前三个结果中。 考虑到模型必须从 1,000 个类中进行选择,这很好。
# 如您所见,使用预训练模型创建一个非常好的图像分类器非常容易。 keras.applications 中提供了其他视觉模型,包括几个 ResNet 变体、GoogLeNet 变体(如 Inception-v3 和 Xception)、VGGNet 变体以及 MobileNet 和 MobileNetV2(用于移动应用程序的轻量级模型)。
#
# 但是,如果您想对不属于 ImageNet 的图像类别使用图像分类器怎么办? 在这种情况下,您仍然可以从预训练模型中受益以执行迁移学习。
# ## 用于迁移学习的预训练模型
# 如果你想建立一个图像分类器但你没有足够的训练数据,那么重用一个预训练模型的较低层通常是个好主意,正如我们在第 11 章中讨论的那样。 例如,让我们训练一个模型来分类 鲜花图片,重用预训练的 Xception 模型。 首先,让我们使用 TensorFlow Datasets 加载数据集(参见第 13 章):
# +
import tensorflow_datasets as tfds
dataset, info = tfds.load("tf_flowers", as_supervised=True, with_info=True)
dataset_size = info.splits["train"].num_examples # 3670
class_names = info.features["label"].names # ["dandelion", "daisy", ...]
n_classes = info.features["label"].num_classes # 5
# -
# 请注意,您可以通过设置 with_info=True 来获取有关数据集的信息。 在这里,我们获得了数据集大小和类的名称。 不幸的是,只有一个“训练”数据集,没有测试集或验证集,所以我们需要拆分训练集。 TF Datasets 项目为此提供了一个 API。 例如,让我们将数据集的前 10% 用于测试,接下来的 15% 用于验证,剩余的 75% 用于训练:
# +
test_split, valid_split, train_split = tfds.Split.TRAIN.subsplit=[10, 15, 75]
test_set = tfds.load("tf_flowers", split=test_split, as_supervised=True)
valid_set = tfds.load("tf_flowers", split=valid_split, as_supervised=True)
train_set = tfds.load("tf_flowers", split=train_split, as_supervised=True)
# -
# 接下来我们必须预处理图像。 CNN 需要 224 × 224 的图像,因此我们需要调整它们的大小。 我们还需要通过 Xception 的 preprocess_input() 函数运行图像:
def preprocess(image, label):
resized_image = tf.image.resize(image, [224, 224])
final_image = keras.applications.xception.preprocess_input(resized_image)
return final_image, label
# 让我们将此预处理函数应用于所有三个数据集,对训练集进行混洗,并为所有数据集添加批处理和预取:
# +
batch_size = 32
train_set = train_set.shuffle(1000)
train_set = train_set.map(preprocess).batch(batch_size).prefetch(1)
valid_set = valid_set.map(preprocess).batch(batch_size).prefetch(1)
test_set = test_set.map(preprocess).batch(batch_size).prefetch(1)
# -
# 如果要执行一些数据增强,请更改训练集的预处理函数,向训练图像添加一些随机变换。 例如,使用 tf.image.random_crop() 随机裁剪图像,使用 tf.image.random_flip_left_right() 随机水平翻转图像,等等(请参阅笔记本的“迁移学习的预训练模型”部分以了解 一个例子)。
# > keras.preprocessing.image.ImageDataGenerator 类可以轻松地从磁盘加载图像并以各种方式扩充它们:您可以移动每个图像、旋转它、重新缩放它、水平或垂直翻转它、剪切它或应用任何转换函数 你想要它。 这对于简单的项目来说非常方便。 然而,构建 tf.data 管道有许多优点:它可以从任何来源高效(例如并行)读取图像,而不仅仅是本地磁盘; 您可以随意操作数据集; 如果你编写了一个基于 tf.image 操作的预处理函数,这个函数可以在 tf.data 管道和你将部署到生产的模型中使用(参见第 19 章)。
# 接下来让我们加载在 ImageNet 上预训练的 Xception 模型。 我们通过设置 include_top=False 排除网络的顶部:这排除了全局平均池化层和密集输出层。 然后我们根据基础模型的输出添加我们自己的全局平均池化层,然后是一个密集输出层,每个类一个单元,使用 softmax 激活函数。 最后,我们创建 Keras 模型:
base_model = keras.applications.xception.Xception(weights="imagenet",
include_top=False)
avg = keras.layers.GlobalAveragePooling2D()(base_model.output)
output = keras.layers.Dense(n_classes, activation="softmax")(avg)
model = keras.Model(inputs=base_model.input, outputs=output)
# 如第 11 章所述,冻结预训练层的权重通常是个好主意,至少在训练开始时:
for layer in base_model.layers:
layer.trainable = False
# > 由于我们的模型直接使用基础模型的层,而不是 base_model 对象本身,因此设置 base_model.trainable=False 将不起作用。
# 最后,我们可以编译模型并开始训练:
optimizer = keras.optimizers.SGD(lr=0.2, momentum=0.9, decay=0.01)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
history = model.fit(train_set, epochs=5, validation_data=valid_set)
# 这将非常缓慢,除非您有 GPU。 如果你不这样做,那么你应该在 Colab 中运行本章的笔记本,使用 GPU 运行时(它是免费的!)。 请参阅 https://github.com/ageron/handson-ml2 上的说明。
# 在对模型进行几个 epoch 训练后,其验证准确率应达到 75-80% 左右,并且不再取得太大进展。 这意味着顶层现在已经训练得很好,所以我们准备解冻所有层(或者你可以尝试只解冻顶层)并继续训练(不要忘记在你冻结或解冻层时编译模型 )。 这次我们使用低得多的学习率来避免损坏预训练的权重:
# +
for layer in base_model.layers:
layer.trainable = True
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=0.001)
model.compile(...)
history = model.fit(...)
# -
# 这需要一段时间,但这个模型在测试集上应该达到 95% 左右的准确率。 有了它,你就可以开始训练惊人的图像分类器了! 但计算机视觉不仅仅是分类。 例如,如果您还想知道花在图片中的位置怎么办? 现在让我们看看这个。
# ## 分类和定位
# 定位图片中的对象可以表示为回归任务,如第 10 章所述:要预测对象周围的边界框,常用的方法是预测对象中心的水平和垂直坐标,以及其高度 和宽度。 这意味着我们有四个数字要预测。 它不需要对模型进行太多更改; 我们只需要添加具有四个单元的第二个密集输出层(通常在全局平均池化层的顶部),并且可以使用 MSE 损失进行训练:
# +
base_model = keras.applications.xception.Xception(weights="imagenet",
include_top=False)
avg = keras.layers.GlobalAveragePooling2D()(base_model.output)
class_output = keras.layers.Dense(n_classes, activation="softmax")(avg)
loc_output = keras.layers.Dense(4)(avg)
model = keras.Model(inputs=base_model.input,
outputs=[class_output,
loc_output])
model.compile(loss=["sparse_categorical_crossentropy", "mse"],
loss_weights=[0.8, 0.2], # depends on what you care most about
optimizer=optimizer, metrics=["accuracy"])
# -
# 但是现在我们遇到了一个问题:鲜花数据集没有围绕鲜花的边界框。所以,我们需要自己添加它们。这通常是机器学习项目中最困难和最昂贵的部分之一:获取标签。花时间寻找合适的工具是个好主意。要使用边界框注释图像,您可能需要使用开源图像标记工具,例如 VGG Image Annotator、LabelImg、OpenLabeler 或 ImgLab,或者可能是商业工具,例如 LabelBox 或 Supervisely。如果您有大量要注释的图像,您可能还需要考虑众包平台,例如 Amazon Mechanical Turk。但是,搭建众包平台,准备表格发给工人,监督他们,确保他们生产的边界框的质量是好的,所以要确保它是值得的。努力。如果要标记的图像只有几千张,并且您不打算经常这样做,那么最好自己做。阿德里亚娜·科瓦什卡等人。写了一篇关于计算机视觉众包的非常实用的论文。我建议你检查一下,即使你不打算使用众包。
# 假设您已经获得了花数据集中每个图像的边界框(现在我们假设每个图像有一个边界框)。 然后,您需要创建一个数据集,其项目将是一批经过预处理的图像及其类标签和边界框。 每个项目都应该是形式(图像,(class_labels,bounding_boxes))的元组。 然后你就可以训练你的模型了!
# 边界框应该标准化,以便水平和垂直坐标以及高度和宽度都在 0 到 1 的范围内。此外,通常预测高度和宽度的平方根而不是高度和宽度 直接:这样,大边界框的 10 像素错误不会像小边界框的 10 像素错误那样受到惩罚。
# MSE 通常可以很好地作为训练模型的成本函数,但它不是评估模型预测边界框的好坏指标。 最常见的度量是联合交集 (IoU):预测边界框和目标边界框之间的重叠区域除以它们的联合区域(见图 14-23)。 在 tf.keras 中,它由 tf.keras.metrics.MeanIoU 类实现。
# 
# 对单个对象进行分类和定位很不错,但是如果图像包含多个对象怎么办(就像在花数据集中经常发生的情况一样)?
# ## 物体检测
# 对图像中的多个对象进行分类和定位的任务称为对象检测。直到几年前,一种常见的方法是采用经过训练的 CNN 对单个对象进行分类和定位,然后将其滑过图像,如图 14-24 所示。在这个例子中,图像被切割成一个 6 × 8 的网格,我们展示了一个 CNN(黑色粗矩形)在所有 3 × 3 区域上滑动。当 CNN 查看图像的左上角时,它检测到最左侧玫瑰的一部分,然后当它第一次向右移动一步时再次检测到同一朵玫瑰。下一步,它开始检测最上面的玫瑰的一部分,然后再向右移动一步,它再次检测到它。然后,您将继续在整个图像中滑动 CNN,查看所有 3 × 3 区域。此外,由于对象可以有不同的大小,您还可以在不同大小的区域之间滑动 CNN。例如,一旦您完成了 3 × 3 区域,您可能还想在所有 4 × 4 区域上滑动 CNN。
# 
# 这种技术相当简单,但正如您所见,它会在略有不同的位置多次检测同一个对象。然后将需要一些后处理来摆脱所有不必要的边界框。一种常见的方法称为非最大抑制。以下是您的操作方法:
# 1. 首先,您需要向 CNN 添加额外的对象性输出,以估计图像中确实存在花的概率(或者,您可以添加“无花”类,但这通常不起作用好)。它必须使用 sigmoid 激活函数,您可以使用二元交叉熵损失来训练它。然后去掉所有对象性分数低于某个阈值的边界框:这将删除所有实际上不包含花的边界框。
# 2. 找到objectness score最高的bounding box,去掉所有与其重叠的bounding box(例如IoU大于60%)。例如,在图 14-24 中,具有最大对象性分数的边界框是最顶部玫瑰上的厚边界框(对象性分数由边界框的厚度表示)。同一朵玫瑰上的另一个边界框与最大边界框重叠很多,因此我们将去掉它。
# 3. 重复第二步,直到没有更多的边界框要摆脱。
# 这种简单的物体检测方法效果很好,但它需要多次运行 CNN,所以速度很慢。 幸运的是,有一种更快的方法可以在图像上滑动 CNN:使用完全卷积网络 (FCN)。
# ### 全卷积网络
# FCN 的想法在 2015 年由 <NAME> 等人发表的一篇论文中首次引入,用于语义分割(根据图像所属的对象类别对图像中的每个像素进行分类的任务)。作者指出,您可以用卷积层替换 CNN 顶部的密集层。为了理解这一点,让我们看一个例子:假设一个具有 200 个神经元的密集层位于输出 100 个特征图的卷积层之上,每个特征图的大小为 7 × 7(这是特征图的大小,而不是内核大小)。每个神经元将计算来自卷积层的所有 100 × 7 × 7 激活的加权和(加上偏置项)。现在让我们看看如果我们使用 200 个过滤器(每个过滤器的大小为 7 × 7)和“有效”填充将密集层替换为卷积层会发生什么。该层将输出 200 个特征图,每个 1 × 1(因为内核恰好是输入特征图的大小,并且我们使用了“有效”填充)。换句话说,它会输出 200 个数字,就像密集层一样;如果您仔细观察卷积层执行的计算,您会注意到这些数字与密集层产生的数字完全相同。唯一的区别是密集层的输出是形状为 [batch size, 200] 的张量,而卷积层将输出形状为 [batch size, 1, 1, 200] 的张量。
# > 要将密集层转换为卷积层,卷积层中的过滤器数量必须等于密集层中的单元数,过滤器大小必须等于输入特征图的大小,并且必须使用 “valid”填充。 步幅可以设置为 1 或更多,我们很快就会看到。
# 为什么这很重要? 好吧,虽然密集层需要特定的输入大小(因为它每个输入特征有一个权重),但卷积层会很乐意处理任何大小的图像(但是,它确实希望其输入具有特定数量的通道,
# 为什么这很重要? 好吧,虽然密集层需要特定的输入大小(因为它每个输入特征有一个权重),但卷积层会很乐意处理任何大小的图像(但是,它确实希望其输入具有特定数量的通道,因为每个 内核为每个输入通道包含一组不同的权重)。 由于 FCN 仅包含卷积层(和池化层,它们具有相同的属性),因此可以在任何大小的图像上进行训练和执行!
# 例如,假设我们已经训练了一个用于花卉分类和定位的 CNN。 它在 224 × 224 图像上训练,并输出 10 个数字:输出 0 到 4 通过 softmax 激活函数发送,这给出了类概率(每个类一个); 输出 5 通过逻辑激活函数发送,这给出了对象性分数; 输出 6 到 9 不使用任何激活函数,它们表示边界框的中心坐标,以及它的高度和宽度。 我们现在可以将其密集层转换为卷积层。 事实上,我们甚至不需要重新训练它; 我们可以将权重从密集层复制到卷积层! 或者,我们可以在训练之前将 CNN 转换为 FCN。
# 现在假设输出层(也称为瓶颈层)之前的最后一个卷积层在网络输入 224 × 224 图像时输出 7 × 7 特征图(见图 14-25 左侧)。如果我们向 FCN 提供 448 × 448 图像(参见图 14 25 的右侧),瓶颈层现在将输出 14 × 14 特征图。 27 由于密集输出层被使用 10 个大小过滤器的卷积层取代7 × 7,具有“有效”填充和步长 1,输出将由 10 个特征图组成,每个特征图的大小为 8 × 8(因为 14 – 7 + 1 = 8)。换句话说,FCN 将只处理整个图像一次,它会输出一个 8 × 8 的网格,其中每个单元格包含 10 个数字(5 个类别概率、1 个对象得分和 4 个边界框坐标)。这就像使用原始 CNN 并使用每行 8 步和每列 8 步在图像上滑动一样。为了可视化这一点,想象一下将原始图像切成 14 × 14 的网格,然后在这个网格上滑动一个 7 × 7 的窗口;窗口将有 8 × 8 = 64 个可能的位置,因此有 8 × 8 个预测。然而,FCN 方法效率更高,因为网络只查看图像一次。事实上,You Only Look Once (YOLO) 是一种非常流行的对象检测架构的名称,我们将在接下来介绍。
# 
# ### 你只看一次(YOLO)
# YOLO 是 <NAME> 等人提出的一种极其快速和准确的物体检测架构。 在 2015 年的一篇论文中,随后在 2016 年 (YOLOv2) 和 2018 年 (YOLOv3) 中进行了改进。 正如 Redmon 的演示中所见,它非常快,可以在视频上实时运行。
#
# YOLOv3 的架构与我们刚刚讨论的架构非常相似,但有一些重要的区别:
# * 它为每个网格单元输出五个边界框(而不是一个),并且每个边界框都带有一个对象性分数。 它还为每个网格单元输出 20 个类别概率,因为它是在包含 20 个类别的 PASCAL VOC 数据集上训练的。 每个网格单元总共有 45 个数字:5 个边界框,每个边界框有 4 个坐标,加上 5 个对象分数,加上 20 个类别概率。
# * YOLOv3 不是预测边界框中心的绝对坐标,而是预测相对于网格单元坐标的偏移量,其中 (0, 0) 表示该单元格的左上角,(1, 1) 表示右下角。 对于每个网格单元,YOLOv3 被训练为仅预测中心位于该单元的边界框(但边界框本身通常远远超出网格单元)。 YOLOv3 将逻辑激活函数应用于边界框坐标以确保它们保持在 0 到 1 的范围内。
# * 在训练神经网络之前,YOLOv3 找到五个有代表性的边界框维度,称为锚框(或边界框先验)。它通过将 K-Means 算法(参见第 9 章)应用于训练集边界框的高度和宽度来实现这一点。例如,如果训练图像包含许多行人,那么其中一个锚框可能具有典型行人的尺寸。然后当神经网络预测每个网格单元有五个边界框时,它实际上预测了每个锚框的重新缩放比例。例如,假设一个锚框高 100 像素,宽 50 像素,并且网络预测,例如,垂直缩放因子为 1.5,水平缩放因子为 0.9(对于网格单元之一)。这将产生大小为 150 × 45 像素的预测边界框。更准确地说,对于每个网格单元和每个锚框,网络预测垂直和水平缩放因子的对数。拥有这些先验使网络更有可能预测适当维度的边界框,并且它还可以加快训练速度,因为它将更快地了解合理的边界框是什么样子。
# * 网络使用不同尺度的图像进行训练:在训练过程中每隔几批,网络随机选择一个新的图像维度(从 330 × 330 到 608 × 608 像素)。这允许网络学习检测不同尺度的对象。此外,它还可以在不同尺度上使用 YOLOv3:较小尺度的准确度较低,但比较大尺度更快,因此您可以为您的用例选择正确的权衡。
# 还有一些您可能感兴趣的创新,例如使用跳过连接来恢复在 CNN 中丢失的一些空间分辨率(当我们查看语义分割时,我们将很快讨论这个问题)。 在 2016 年的论文中,作者介绍了使用层次分类的 YOLO9000 模型:该模型预测一个称为 WordTree 的视觉层次结构中每个节点的概率。 这使得网络可以高可信度地预测图像代表一只狗,即使它不确定是什么特定类型的狗。 我鼓励您继续阅读所有三篇论文:它们读起来非常愉快,并且它们提供了如何逐步改进深度学习系统的极好例子。
# **平均平均精度 (mAP)**
#
# ---
# 对象检测任务中使用的一个非常常见的指标是平均平均精度 (mAP)。 “平均平均值”听起来有点多余,不是吗? 为了理解这个指标,让我们回到我们在第 3 章中讨论的两个分类指标:准确率和召回率。 记住权衡:召回率越高,精度越低。 您可以在精度/召回曲线中对此进行可视化(参见图 3-5)。 要将这条曲线总结为一个数字,我们可以计算其曲线下面积 (AUC)。 但请注意,精确率/召回率曲线可能包含一些部分,当召回率增加时,精确率实际上会上升,尤其是在召回率较低的情况下(您可以在图 3-5 的左上角看到这一点)。 这是 mAP 指标的动机之一。
# 假设分类器在 10% 的召回率下有 90% 的准确率,但在 20% 的召回率下有 96% 的准确率。 这里真的没有权衡:在 20% 的召回率而不是 10% 的召回率下使用分类器更有意义,因为您将获得更高的召回率和更高的精度。 因此,我们不应着眼于 10% 召回率时的精度,而应该着眼于分类器在至少 10% 召回率下可以提供的最大精度。 它将是 96%,而不是 90%。 因此,获得模型性能的一个公平概念的一种方法是计算您可以获得的最大精度,至少为 0%,然后是 10%,20%,依此类推,直到 100%,然后计算平均值 这些最大精度。 这称为平均精度 (AP) 指标。 现在当有两个以上的类时,我们可以计算每个类的 AP,然后计算平均 AP(mAP)。 仅此而已!
# 在物体检测系统中,还有一个额外的复杂度:如果系统检测到正确的类别,但在错误的位置(即边界框完全关闭)怎么办? 当然,我们不应将此视为积极的预测。 一种方法是定义一个 IOU 阈值:例如,我们可以认为只有当 IOU 大于 0.5 并且预测的类别是正确的时,预测才是正确的。 相应的 mAP 通常记为 mAP@0.5(或 mAP@50%,有时只是 AP50)。 在一些比赛中(例如 PASCAL VOC 挑战赛),就是这样做的。 在其他比赛中(例如 COCO 比赛),mAP 是针对不同的 IOU 阈值(0.50、0.55、0.60、……、0.95)计算的,最终指标是所有这些 mAP 的平均值(注意 AP@[.50:. 95] 或 AP@[.50:0.05:.95])。 是的,这是一个平均平均值。
# ---
# GitHub 上提供了几个使用 TensorFlow 构建的 YOLO 实现。 特别是,查看 Zihao Zang 的 TensorFlow 2 实现。 TensorFlow 模型项目中提供了其他对象检测模型,其中许多具有预训练权重; 有的甚至已经移植到了TF Hub,比如SSD31和Faster RCNN,这两款都非常流行。 SSD也是一种“单发”检测模型,类似于YOLO。 Faster R-CNN 更复杂:图像首先通过 CNN,然后将输出传递给区域建议网络 (RPN),该网络提出最有可能包含对象的边界框,并为每个边界运行一个分类器 框,基于 CNN 的裁剪输出。
# 检测系统的选择取决于许多因素:速度、准确性、可用的预训练模型、训练时间、复杂性等。论文包含指标表,但测试环境存在很大的可变性,技术也在不断发展,因此 很快,很难做出一个对大多数人有用且有效期超过几个月的公平比较。
#
# 因此,我们可以通过在对象周围绘制边界框来定位对象。 很好! 但也许你想要更精确一点。 让我们看看如何下降到像素级别。
# ## 语义分割
# 在语义分割中,每个像素根据其所属对象的类别(例如,道路、汽车、行人、建筑物等)进行分类,如图 14-26 所示。注意同一类的不同对象是不区分的。例如,分割图像右侧的所有自行车最终都是一大块像素。该任务的主要难点在于,当图像通过常规 CNN 时,它们逐渐失去其空间分辨率(由于步长大于 1 的层);所以,一个普通的 CNN 最终可能会知道图像左下角的某个地方有一个人,但它不会比这更精确。
#
# 就像对象检测一样,有许多不同的方法可以解决这个问题,有些方法非常复杂。然而,<NAME> 等人在 2015 年的论文中提出了一个相当简单的解决方案。我们之前讨论过。作者首先采用预训练的 CNN 并将其转换为 FCN。 CNN 将 32 的总步长应用于输入图像(即,如果将所有步长加起来大于 1),这意味着最后一层输出的特征图比输入图像小 32 倍。这显然太粗糙了,所以他们添加了一个上采样层,将分辨率乘以 32。
# 
# 有几种可用于上采样(增加图像大小)的解决方案,例如双线性插值,但仅适用于 ×4 或 ×8。 相反,他们使用转置卷积层:它相当于首先通过插入空行和列(全零)来拉伸图像,然后执行常规卷积(见图 14-27)。 或者,有些人更喜欢将其视为使用分数步长的常规卷积层(例如,图 14-27 中的 1/2)。 转置卷积层可以初始化为执行接近线性插值的操作,但由于它是一个可训练层,它会在训练过程中学习做得更好。 在 tf.keras 中,可以使用 Conv2DTranspose 层。
# 
# > 在转置卷积层中,步幅定义了输入将被拉伸的程度,而不是滤波器步长的大小,因此步幅越大,输出越大(与卷积层或池化层不同)。
# *TensorFlow 卷积运算*
#
# ---
# TensorFlow 还提供了一些其他类型的卷积层:
# * keras.layers.Conv1D。
# 为一维输入创建卷积层,例如时间序列或文本(字母或单词的序列),我们将在第 15 章中看到。
# * keras.layers.Conv3D
# 为 3D 输入创建卷积层,例如 3D PET 扫描。
# * dilation_rate
# 将任何卷积层的 dilation_rate 超参数设置为 2 或更大的值会创建一个 à-trous 卷积层(“à trous”是法语中“有孔”的意思)。这等效于使用带有通过插入零行和零列(即孔)而扩张的过滤器的常规卷积层。例如,一个等于 [[1,2,3]] 的 1 × 3 过滤器可能会以 4 的扩张率进行扩张,从而得到一个 [[1, 0, 0, 0, 2, 0, 0 的扩张过滤器, 0, 3]]。这让卷积层在没有计算成本和不使用额外参数的情况下拥有更大的感受野。
# * tf.nn.depthwise_conv2d()
# 可用于创建深度卷积层(但您需要自己创建变量)。它独立地将每个过滤器应用于每个单独的输入通道。因此,如果有 $f_n$ 个过滤器和 $f_{n'}$ 个输入通道,那么这将输出 $f_n × f_n'$ 个特征图。
# ---
# 这个解决方案是好的,但仍然太不精确。 为了做得更好,作者添加了来自较低层的跳过连接:例如,他们将输出图像上采样了 2 倍(而不是 32),并且他们添加了具有这种双分辨率的较低层的输出。 然后他们将结果上采样 16 倍,导致总上采样因子为 32(见图 14-28)。 这恢复了早期池化层中丢失的一些空间分辨率。 在他们最好的架构中,他们使用了第二个类似的跳过连接来从更低的层恢复更精细的细节。 简而言之,原始CNN的输出经过以下额外步骤:upscale ×2,添加较低层(适当比例)的输出,upscale ×2,添加更低层的输出,最后upscale ×8. 甚至可以放大到超过原始图像的大小:这可以用来增加图像的分辨率,这是一种称为超分辨率的技术。
# 
# 再一次,许多 GitHub 存储库提供了语义分割的 TensorFlow 实现(目前为 TensorFlow 1),您甚至可以在 TensorFlow 模型项目中找到预训练的实例分割模型。 实例分割类似于语义分割,但不是将同一类的所有对象合并成一个大块,而是将每个对象与其他对象区分开来(例如,它识别每辆自行车)。 目前,TensorFlow Models 项目中可用的实例分割模型基于 Mask R-CNN 架构,该架构在 2017 年的一篇论文中提出:它通过为每个边界框额外生成一个像素掩码来扩展 Faster R-CNN 模型。 因此,您不仅会在每个对象周围获得一个边界框,以及一组估计的类概率,而且还会得到一个像素掩码,用于定位边界框中属于该对象的像素。
# 如您所见,深度计算机视觉领域广阔且发展迅速,每年都会出现各种基于卷积神经网络的架构。短短几年内取得的进展令人震惊,研究人员现在专注于越来越难的问题,例如对抗性学习(它试图使网络对旨在愚弄它的图像更具抵抗力)、可解释性(理解为什么网络进行特定的分类)、逼真的图像生成(我们将在第 17 章中讨论)和单次学习(一个系统,它可以在看到一个物体一次后就识别它)。有些人甚至探索了全新的架构,例如 <NAME> 的胶囊网络(我在几个视频中介绍了它们,并在笔记本中提供了相应的代码)。现在进入下一章,我们将研究如何使用循环神经网络和卷积神经网络处理时序数据,例如时间序列。
| ch14.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Load libraries
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from pandas import read_csv
from pandas import set_option
from matplotlib import pyplot
from pandas import read_csv
from pandas import set_option
from matplotlib import pyplot as plt
import seaborn
HOME_PATH = '' #home path of the project
FILENAME = 'D_Contraceptive_Method_Data.csv'
# ## 1. Load the dataset
dataset = pd.read_csv(HOME_PATH + FILENAME)
dataset
# ## 2. Analyze data
categorical_cols = ['wife_education','husband_education','wife_religion','wife_working','husband_occupation',
'standard_of_living_index','media_exposure','contraceptive_method_used']
categorical_cols
#dimensions of the dataset
dataset.shape
#data types of each attribute
dataset.dtypes
#peak of the data
dataset.head(20)
#summarize the distribution of each attribute
set_option('precision', 2)
dataset.describe()
# ## 3. Data visualization
for col in dataset.columns :
# Multiple box plots on one Axes
data = dataset[col]
if col in categorical_cols :
data = data.astype("category").cat.codes
fig, ax = plt.subplots()
ax.boxplot(data)
ax.set_title(col)
for col in dataset.columns :
# Multiple box plots on one Axes
data = dataset[col]
if col in categorical_cols :
data = data.astype("category").cat.codes
fig, ax = plt.subplots()
ax.hist(data, density=False, histtype='bar')
ax.set_title(col)
#Correlation matrix
set_option('precision', 2)
pyplot.figure(figsize=(20,10))
cors = abs(dataset.corr(method='pearson'))
seaborn.heatmap(cors, mask=np.triu(np.ones_like(cors, dtype=bool)), cmap='Blues', vmin=0, vmax=1, annot=True)
pyplot.show()
# ## 4. Edit data
for col in dataset.columns :
if not dataset[col].isnull().values.any() :
print(col, ':', 'NO NaN values')
else :
print(col, ':', 'NaN values finded')
print('Number of NaN values: ', dataset[col].isnull().sum())
#quick look at the breakdown of class values
for col in categorical_cols :
dataset[col] = dataset[col].astype('category')
print('###########################')
print(dataset.groupby(col).size())
# ## 5. Data split (train and test)
from sklearn.model_selection import train_test_split
#Split data indixes in train and test
idx_train, idx_test = train_test_split(dataset.index.tolist(), train_size=0.8, random_state=42, shuffle=True)
print('Train data length: ', len(idx_train))
print('Test data length: ', len(idx_test))
print('Total data length: ', len(idx_train) + len(idx_test))
#Select train data and save locally
diabetes_train_data = dataset.loc[idx_train]
diabetes_train_data.to_csv(HOME_PATH + 'TRAIN DATASETS/D_ContraceptiveMethod_Real_Train.csv', index=False)
#Select test data and save locally
diabetes_test_data = dataset.loc[idx_test]
diabetes_test_data.to_csv(HOME_PATH + 'TEST DATASETS/D_ContraceptiveMethod_Real_Test.csv', index=False)
print('Train data size: ', diabetes_train_data.shape)
print('Test data length: ', diabetes_test_data.shape)
| notebooks/Dataset D - Contraceptive Method Choice/EDA and Data Split Dataset D.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # The Sparks Foundation - GRIP - Data Science and Business Analytics -
# # January 2022
#
# # Task 1 :- Prediction Using Supervised ML
#
# # Author : <NAME>
# Importing required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# ## Reading data from url
student_score = pd.read_csv("https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv")
student_score.head()
student_score.shape #shape of dataset
student_score.describe().T #statistical values. T is used for transpose
#distplot lets you show a histogram with a line on it. This can be shown in all kinds of variations.
sns.distplot(student_score['Hours'],label='hour',color='green')
#distplot lets you show a histogram with a line on it. This can be shown in all kinds of variations.
sns.distplot(student_score['Scores'],label='Scores',color='green' )
# Let's plot our data points on 2-D graph to eyeball our dataset and see if we can manually find any relationship between the data. We can create the plot with the following script:
# Plotting the distribution of scores
student_score.plot(x='Hours', y='Scores', style='.', color= 'red')
plt.title('Hours vs Percentage')
plt.xlabel('Hours Studied')
plt.ylabel('Percentage Score')
plt.show()
# ### The next step is to divide the data into "attributes" (inputs) and "labels" (outputs).
x = student_score.iloc[:, :-1].values
y = student_score.iloc[:, 1].values
y.shape, x.shape
#split this data into training and test sets
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y,test_size=0.1, random_state=0)
# ### Training the Algorithm
#
# We have split our data into training and testing sets, and now is finally the time to train our algorithm.
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(x_train, y_train)
# ### Plotting the regression line
# +
# Plotting the regression line
line = reg.coef_ * x + reg.intercept_
# Plotting for the test data
plt.scatter(x, y, color='red', marker='.')
plt.plot(x, line, color= 'red');
plt.xlabel('Hours Studied')
plt.ylabel('Percentage Score')
plt.show()
# -
# ### Making Predictions
y_pred=reg.predict(x_test) # predicted testset values
actual_predicted=pd.DataFrame({'Actual':y_test,'Predicted':y_pred})
actual_predicted
# now ploting it in graph
plt.plot(x_test,y_test,label="test")
plt.plot(x_test,y_pred,color="red",label="predict")
plt.xlabel("Hours Studied")
plt.ylabel("Percentage Score")
leg=plt.legend()
plt.show()
# ### You can also test with your own data
hours = [[2],[5.4],[8],[9]]
own_pred = reg.predict(hours)
print("No of Hours studied = {}".format(hours))
print("Predicted Score = {}".format(own_pred))
# ### Evaluating the model
#
# The final step is to evaluate the performance of algorithm. This step is particularly important to compare how well different algorithms perform on a particular dataset. For simplicity here, we have chosen the mean square error. There are many such metrics.
from sklearn import metrics
print('Mean Absolute Error:',
metrics.mean_absolute_error(y_test, y_pred))
| The Sparks Foundation Task 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Identify fraud from Eron Email
# ## Context
#
# In 2000, Enron was one of the largest companies in the United States. By 2002, it had collapsed into bankruptcy due to widespread corporate fraud. In the resulting Federal investigation, a significant amount of typically confidential information entered into the public record, including tens of thousands of emails and detailed financial data for top executives. In this project, you will play detective, and put your new skills to use by building a person of interest identifier based on financial and email data made public as a result of the Enron scandal. To assist you in your detective work, we've combined this data with a hand-generated list of persons of interest in the fraud case, which means individuals who were indicted, reached a settlement or plea deal with the government, or testified in exchange for prosecution immunity.
# ### The Dataset
#
# As preprocessing to this project, we've combined the Enron email and financial data into a dictionary, where each key-value pair in the dictionary corresponds to one person. The dictionary key is the person's name, and the value is another dictionary, which contains the names of all the features and their values for that person. The features in the data fall into three major types, namely financial features, email features and POI labels.
#
# financial features: ['salary', 'deferral_payments', 'total_payments', 'loan_advances', 'bonus', 'restricted_stock_deferred', 'deferred_income', 'total_stock_value', 'expenses', 'exercised_stock_options', 'other', 'long_term_incentive', 'restricted_stock', 'director_fees'] (all units are in US dollars)
#
# email features: ['to_messages', 'email_address', 'from_poi_to_this_person', 'from_messages', 'from_this_person_to_poi', 'shared_receipt_with_poi'] (units are generally number of emails messages; notable exception is ‘email_address’, which is a text string)
#
# POI label: [‘poi’] (boolean, represented as integer)
# ### Understanding the Dataset and Question
#
# - Data Exploration (related lesson: "Datasets and Questions"):
#
# Student response addresses the most important characteristics of the dataset and uses these characteristics to inform their analysis. Important characteristics include:
# total number of data points,
# allocation across classes (POI/non-POI),
# number of features used,
# are there features with many missing values?,etc
#
# - Outlier Investigation (related lesson: "Outliers"):
# Student response identifies outlier(s) in the financial data, and explains how they are removed or otherwise handled.
#
# ### Libraries
#
# In this project I will use the libraries sys, pickle, pandas, numpy, matplot and pprint and the external file tester.
#
# I will use pickle to load the data; pandas and numpy to data manipulation, matplot to plot our visualizations and tester to test our model.
# +
import sys
import pickle
import pandas as pd
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pprint import pprint
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import tester
# %matplotlib inline
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
# -
# ### Importing Dataset
#
# I will import the dataset that is in a final_project_dataset.pkl file.
# Importing Dataset
data_dict = pickle.load(open("../final_project/final_project_dataset.pkl", "r") )
# ### Data Exploration
# In this part I will perform initial data analysis.
# First, we will see a instance of this data in order to get a sense of the data features.
data_dict.values()[0]
# There are 21 features some are financial features and others are email features. We can also see that we will need to handle with the NaN values. Let see the features.
len(data_dict.values()[0].keys()),data_dict.values()[0].keys()
# Lets tranform to a pandas data_frame for a better data manipulation,
eron_data = pd.DataFrame.from_dict(data_dict, orient = 'index')
# Lets see how many people are poi in our dataset.
# +
# total number of data points, allocation across classes (POI/non-POI)
# number of features used, are there features with many missing values?
# total number of data points,
print("Number of People Analyzed: ",len(data_dict))
# allocation across classes (POI/non-POI)
eron_data.groupby('poi').count()['salary']
# -
sns.set_context("talk")
sns.set_style("whitegrid")
sns.despine(left=True)
sns.countplot(x='poi',data=eron_data)
# Lets treat our missing data. In order to do so we will need to first see how many NaN are on our dataset by each feature.
def PercentageMissin(Dataset):
"""this function will return the percentage of missing values in a dataset """
if isinstance(Dataset,pd.DataFrame):
adict={} #a dictionary conatin keys columns names and values percentage of missin value in the columns
for feature in Dataset.columns:
count_nan_feature = 0;
for feature_row in Dataset[feature]:
if(feature_row == 'NaN'):
count_nan_feature += 1
adict[feature]=(count_nan_feature*100)/len(Dataset[feature])
return pd.DataFrame(adict,index=['% of missing'],columns=adict.keys())
else:
raise TypeError("can only be used with panda dataframe")
print("Ranking of Missing Data")
PercentageMissin(eron_data).mean().sort_values(ascending=False)
payment_fields =['poi','salary','bonus', 'long_term_incentive', 'deferred_income', 'deferral_payments',
'loan_advances', 'other','expenses', 'director_fees','total_payments']
stock_fields =['poi','exercised_stock_options','restricted_stock','restricted_stock_deferred','total_stock_value']
email_fields =['to_messages','from_messages','from_poi_to_this_person','from_this_person_to_poi','shared_receipt_with_poi']
eron_data.loc[:,payment_fields] = eron_data.loc[:,payment_fields].replace('NaN', 0)
eron_data.loc[:,stock_fields] = eron_data.loc[:,stock_fields].replace('NaN',0)
eron_poi = eron_data[eron_data.poi == 1]
eron_non_poi = eron_data[eron_data.poi == 0]
# +
from sklearn.preprocessing import Imputer
imp = Imputer(missing_values='NaN', strategy = 'mean', axis=0)
eron_poi.loc[:, email_fields] = imp.fit_transform(eron_poi.loc[:,email_fields]);
eron_non_poi.loc[:, email_fields] = imp.fit_transform(eron_non_poi.loc[:,email_fields]);
eron_data = eron_poi.append(eron_non_poi)
# -
eron_data.info()
# There are no more missing data.
eron_data.head(20)['poi']
# ### Outliers
#
# In this part we will find the outliers and remove them.
eron_data[eron_data[payment_fields[1:-1]].sum(axis='columns') != eron_data['total_payments']][payment_fields + ['total_payments']]
eron_data[eron_data[stock_fields[1:-1]].sum(axis='columns') != eron_data['total_stock_value']][stock_fields+['total_stock_value']]
# We see that only <NAME> and <NAME> had their data of total_payments and total_stock different from the payments_fields and stock_fields aggregate sum. Now looking at the eron pdf we can see that the values in the dataset were wrong. So we will now correct them.
# Treating <NAME>.
# +
# <NAME>
eron_data.loc['BELFER ROBERT','deffered_income'] = -102500
eron_data.loc['BELFER ROBERT','defferral_payments'] = 0
eron_data.loc['BELFER ROBERT','expenses'] = 3285
eron_data.loc['BELFER ROBERT','director_fees'] = 102500
eron_data.loc['BELFER ROBERT','total_payments'] = 3285
eron_data.loc['BELFER ROBERT','exercised_stock_options'] = 0
eron_data.loc['BELFER ROBERT','restricted_stock'] = 44093
eron_data.loc['BELFER ROBERT','restricted_stock_deferred'] = -44093
eron_data.loc['BELFER ROBERT','total_stock_value'] = 0
eron_data.loc['BELFER ROBERT'][payment_fields+stock_fields]
# -
# Treating BHATNAGAR SANJAY.
# +
# BHATNAGAR SANJAY
eron_data.loc['BHATNAGAR SANJAY','other'] = 0
eron_data.loc['BHATNAGAR SANJAY','expenses'] = 137864
eron_data.loc['BHATNAGAR SANJAY','director_fees'] = 0
eron_data.loc['BHATNAGAR SANJAY','total_payments'] = 137864
eron_data.loc['BHATNAGAR SANJAY','exercised_stock_options'] = 15456290
eron_data.loc['BHATNAGAR SANJAY','restricted_stock'] = 2604490
eron_data.loc['BHATNAGAR SANJAY','restricted_stock_deferred'] = -2604490
eron_data.loc['BHATNAGAR SANJAY','total_stock_value'] = 15456290
eron_data.loc['BHATNAGAR SANJAY'][payment_fields+stock_fields]
# -
eron_data[eron_data[payment_fields[1:-1]].sum(axis='columns') != eron_data['total_payments']][payment_fields]
eron_data[eron_data[stock_fields[1:-1]].sum(axis='columns') != eron_data['total_stock_value']][stock_fields]
# Fill all the incorrect data. Now lets see in the rows and their outliers and see if they are an oulier.
# ### Removing Outliers
# According to the pdf and the data_dict we have some aggregate row. They are "Total" and "THE TRAVEL AGENCY IN THE PARK". So lets remove them.
eron_data = eron_data.drop(["TOTAL","THE TRAVEL AGENCY IN THE PARK"])
# Lets see those with rows with more outliers. To do so we will see the rows that stays less than 25% of the data
# and those with greather than 75%.
outliers = eron_data.quantile(.5) + 1.5 * (eron_data.quantile(.75)-eron_data.quantile(.25))
outlier_pd = pd.DataFrame((eron_data[1:] > outliers[1:]).sum(axis = 1), columns = ['# of outliers']).\
sort_values('# of outliers', ascending = [0]).head(7)
outlier_pd
outlier_pd.join(eron_data)[['# of outliers','poi']]
# We will remove those with more outliers that are not poi.
eron_data = eron_data.drop(['FREVERT MARK A','WHALLEY LAWRENCE G','LAVORATO JOHN J','KEAN STEVEN J'])
del data_dict['FREVERT MARK A']
del data_dict['<NAME>']
del data_dict['<NAME>']
del data_dict['<NAME>']
# Lets see if we have some rows with just NaN fields.
nan_pd =pd.DataFrame((eron_data == 0).astype(int).sum(axis=1), columns = ['# of NaN']).\
sort_values('# of NaN', ascending = [0]).head(7)
nan_pd
data_dict['LOCKHART EUGENE E']
data_dict['<NAME>']
eron_data = eron_data.drop(['LOCKHART EUGENE E'])
del data_dict['LOCKHART EUGENE E']
# ### Select Features
# First thing that strike my attention was the number of missing data in loan_advances,director_fees, restricted_stock_deferred, deferral_payments. It seems we have just a few of those and I if it wll be usefull to our model.
loan_advances_df = eron_data[eron_data.loan_advances != 0][['loan_advances','poi']]
print("There are ",len(loan_advances_df)," points in our dataset.")
loan_advances_df
# Since we gae only four poins in our dataset and only one is a poi so I will not consider this from our feature list.
director_fees_df = eron_data[eron_data.director_fees != 0][['director_fees','poi']]
print("There are ",len(director_fees_df)," points in our dataset.")
director_fees_df
# From 146 values we have only 17 entries and from those all 17 are false to poi parameter. So I will also not consider in my analysis.
restricted_stock_deferred_df = eron_data[eron_data.restricted_stock_deferred != 0][['restricted_stock_deferred','poi']]
print("There are ",len(restricted_stock_deferred_df)," points in our dataset.")
restricted_stock_deferred_df
# The same happened with restricted_store_deferred as with the director_fee. From 146 values we have only 18 entries and from those all are false to poi parameter. So I will also not consider in my analysis.
deferral_payments_df = eron_data[eron_data.deferral_payments != 0][['deferral_payments','poi']]
print("There are ",len(deferral_payments_df)," points in our dataset.")
deferral_payments_df
deferral_payments_df.groupby('poi').count()
# We found some poi in deferral payments!! There were 5 poi out of 39 people in this feature. Let see if plotting the values we found some more information about this points.
sns.lmplot('poi','deferral_payments',data=deferral_payments_df,fit_reg=False)
# We also noticed that ther are no much difference between the values of poi and non poi for the deferral payments. Most of the poi felt almost like in the middle of the data.
# The others features contain almost 50% of the data so I will use them in my analysis. Other thing is the email_address is unique amoung the people, so I will not use these variable.
# The features loan_advances, directors_fees, restricted_stock_deffered and email_address will be use.
# +
### Task 1: Select what features you'll use.
### features_list is a list of strings, each of which is a feature name.
### The first feature must be "poi".
features_list = [
'poi', 'to_messages','from_messages', 'from_poi_to_this_person','from_this_person_to_poi',
'salary','deferral_payments', 'other','total_payments','bonus',
'total_stock_value', 'shared_receipt_with_poi', 'long_term_incentive',
'exercised_stock_options','deferred_income', 'expenses', 'restricted_stock','shared_receipt_with_poi']
# In the featureFormat it will treat the missing values giving them a 0 value
eron_data = eron_data[features_list]
data = eron_data
# data = featureFormat(data_dict, features_list)
# -
# #### Question:
# Summarize for us the goal of this project and how machine learning is useful in trying to accomplish it. As part of your answer, give some background on the dataset and how it can be used to answer the project question. Were there any outliers in the data when you got it, and how did you handle those? [relevant rubric items: “data exploration”, “outlier investigation”]
# #### Answer:
# The goal of this project is to succesfully identify the POIs of the Eron scandal. In order to do it I will use machine learning classify algorithm, that can throught the data identify patterns of the POI. In this project I will use the dataset contaning some financial data and email data.
# ### Optimize Feature Selection/Engineering:
# - Create new features (related lesson: "Feature Selection"):
#
# At least one new feature is implemented. Justification for that feature is provided in the written response. The effect of that feature on final algorithm performance is tested or its strength is compared to other features in feature selection. The student is not required to include their new feature in their final feature set.
#
# - Intelligently select features (related lesson: "Feature Selection"):
#
# Univariate or recursive feature selection is deployed, or features are selected by hand (different combinations of features are attempted, and the performance is documented for each one). Features that are selected are reported and the number of features selected is justified. For an algorithm that supports getting the feature importances (e.g. decision tree) or feature scores (e.g. SelectKBest), those are documented as well.
#
# - Properly scale features (related lesson: "Feature Scaling")
#
# If algorithm calls for scaled features, feature scaling is deployed.
#
# +
## Auxiliar Functions
def convert_dataframe_into_dataset(df):
"""
Convert Pandas DataFarme to Dataset.
"""
scaled_df = df.copy()
scaled_df.iloc[:,1:] = scale(scaled_df.iloc[:,1:])
my_dataset = scaled_df.to_dict(orient='index')
return my_dataset
def test_model_tester(clf,my_dataset,featurs_list):
"""
This function will test our model into the tester file
"""
tester.dump_classifier_and_data(clf, my_dataset, features_list)
return tester.main()
def divide_dataset_into_features_labels(my_dataset,features_list):
"""
Divide Dataset into features and labels
"""
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
return [labels, features]
class DecisionTree:
def __init__(self,features_train, features_test, labels_train):
"""
Generate Model
"""
self.clf = DecisionTreeClassifier()
self.fit(features_train, labels_train)
self.predict(features_test)
def fit(self,features,labels):
"""
Fit model
"""
return self.clf.fit(features,labels)
def predict(self,features_test):
"""
Predict model
"""
return self.clf.predict(features_test)
# -
my_dataset = convert_dataframe_into_dataset(eron_data)
[labels, features] = divide_dataset_into_features_labels(my_dataset,features_list)
features_train, features_test, labels_train, labels_test = train_test_split(features,\
labels, test_size=0.3, random_state=42)
clf = DecisionTree(features_train, features_test, labels_train)
print(test_model_tester(clf,my_dataset,features_list));
# ###### Creating new features
eron_data[eron_data['to_messages'] < eron_data['from_poi_to_this_person']][['from_poi_to_this_person', 'to_messages']]
eron_data[eron_data['from_messages'] < eron_data['from_this_person_to_poi']][['from_this_person_to_poi', 'from_messages']]
# Engineer a new feature
eron_data['fraction_messages_to_poi'] =eron_data['from_this_person_to_poi']/eron_data['from_messages']
eron_data['fraction_messages_from_poi'] = eron_data['from_poi_to_this_person']/eron_data['to_messages']
print(eron_data.loc['BANNANTINE <NAME>'])
# +
new_features = ["fraction_messages_from_poi","fraction_messages_to_poi"]
for feature_name in new_features:
if feature_name not in features_list:
features_list.append(feature_name)
# +
### plot new features
fraction_list = ['poi','fraction_messages_from_poi','fraction_messages_to_poi']
data_fraction = eron_data[fraction_list]
data_fraction_poi = data_fraction[data_fraction['poi'] == 1]
data_fraction_non_poi = data_fraction[data_fraction['poi'] == 0]
ax = data_fraction_poi.plot(kind='scatter', x='fraction_messages_from_poi', y='fraction_messages_to_poi',
color='DarkRed', label='POI', marker="*");
data_fraction_non_poi.plot(kind='scatter', x='fraction_messages_from_poi', y='fraction_messages_to_poi',
color='DarkBlue', label='Non POI',ax=ax);
plt.ylabel("From this person to Poi")
plt.xlabel('From Poi to this person')
plt.show()
# -
my_dataset = convert_dataframe_into_dataset(eron_data)
[labels, features] = divide_dataset_into_features_labels(my_dataset,features_list)
## Test our data set and see the performace of it.
clf = DecisionTree(features_train, features_test, labels_train)
print(test_model_tester(clf,my_dataset,features_list));
# Get my dataset and alseo perform feature scalling
# ### Feature selection
print("Initial data shape:", data.shape)
print("Feature_list:", features_list)
features = np.array(features)
data.shape,features.shape
data_pd = pd.DataFrame(data=data)
corr = data_pd.corr()
corr.shape
# +
# Feature selection - selectionkbest, select percentile,lasso regression
from sklearn.feature_selection import SelectPercentile,SelectKBest,chi2
selector = SelectKBest(k=5)
features_5 = selector.fit_transform(features,labels)
features_train, features_test, labels_train, labels_test = train_test_split(features_5,\
labels, test_size=0.3, random_state=42)
clf = DecisionTree(features_train, features_test, labels_train)
print(test_model_tester(clf,my_dataset,features_list));
# +
from sklearn.feature_selection import SelectPercentile,SelectKBest,chi2
selector = SelectKBest(k=7)
features_7 = selector.fit_transform(features,labels)
features_train, features_test, labels_train, labels_test = train_test_split(features_7,\
labels, test_size=0.3, random_state=42)
clf = DecisionTree(features_train, features_test, labels_train)
print(test_model_tester(clf,my_dataset,features_list));
# +
from sklearn.feature_selection import SelectPercentile,SelectKBest,chi2
selector = SelectKBest(k=8)
features_8 = selector.fit_transform(features,labels)
features_train, features_test, labels_train, labels_test = train_test_split(features_8,\
labels, test_size=0.3, random_state=42)
clf = DecisionTree(features_train, features_test, labels_train)
print(test_model_tester(clf,my_dataset,features_list));
# +
from sklearn.feature_selection import SelectPercentile,SelectKBest,chi2
selector = SelectKBest(k=9)
features_9 = selector.fit_transform(features,labels)
features_train, features_test, labels_train, labels_test = train_test_split(features_9,\
labels, test_size=0.3, random_state=42)
clf = DecisionTree(features_train, features_test, labels_train)
print(test_model_tester(clf,my_dataset,features_list));
# -
# I tested k = 5 to 9 and got that the best results was with 5. So I decided to use a k = 5.
# +
# Feature selection - selectionkbest, select percentile,lasso regression
from sklearn.feature_selection import SelectPercentile,SelectKBest,chi2
selector = SelectKBest(k=5)
features_5 = selector.fit_transform(features,labels)
scores_KB = selector.scores_
print("Final shape of parameters",features.shape)
print("Feature scores:",selector.scores_)
# +
import numpy as np
scores = selector.scores_
d = {'features': features_list[1:], 'score': scores}
df = pd.DataFrame(data=d)
df.sort_values(['score'],ascending=[False])
# -
ax = df.plot.bar(xticks=df.index)
ax.set_xticklabels(df.features)
features_list = ['poi','bonus','salary','fraction_messages_to_poi','total_stock_value','exercised_stock_options']
# I used SelectBestK with k = 5. Then I stayed with the top 5 features: ['poi','bonus','salary','fraction_messages_to_poi','total_stock_value','exercised_stock_options'].
# #### Question:
# What features did you end up using in your POI identifier, and what selection process did you use to pick them? Did you have to do any scaling? Why or why not? As part of the assignment, you should attempt to engineer your own feature that does not come ready-made in the dataset -- explain what feature you tried to make, and the rationale behind it. (You do not necessarily have to use it in the final analysis, only engineer and test it.) In your feature selection step, if you used an algorithm like a decision tree, please also give the feature importances of the features that you use, and if you used an automated feature selection function like SelectKBest, please report the feature scores and reasons for your choice of parameter values. [relevant rubric items: “create new features”, “intelligently select features”, “properly scale features”]
#
#
# #### Answer
#
# First I created two new variables two variables one for the 'fraction_messages_to_poi' and other for 'fraction_messages_from_poi'. By creating them I noticed that I had a better model performace in all our metrics. The accuracy,precision and recall improved. Accuracy went from 82.3% to 89.9%. Precision went from 33.9% to 62.6% . And Recall went from 34.55% to 60.3%.
#
# Then I scale my parameters to reduce the range the parameters. It was mosst useful in the financial features were we have large numbers and a large range.
#
# Then selected the features using SelectKBest with differents k (k = 5,7,8,9) and I noticed that the top 5 features were above 18% so I selected to keep with the k = 5. And the best paramenters I got was ['poi','bonus','salary','fraction_messages_to_poi','total_stock_value','exercised_stock_options'].
# ## Pick an Algorithm
#
# - Pick an algorithm (related lessons: "Naive Bayes" through "Choose Your Own Algorithm")
#
# At least two different algorithms are attempted and their performance is compared, with the best performing one used in the final analysis.
#
# Let's first separate between train and test data set.
# Train and test
from sklearn.model_selection import train_test_split
features_train, features_test, labels_train, labels_test = \
train_test_split(features, labels, test_size=0.3, random_state=42)
#Decision Tree
clf = DecisionTree(features_train, features_test, labels_train)
print(test_model_tester(clf,my_dataset,features_list));
# Then we will peak an algorithm.
# I will try to fit with:
# - Knn
# - Decision Tree
# - Random Forest
# - Ada Boost
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier(n_neighbors=3)
clf.fit(features_train,labels_train)
predict = clf.predict(features_test)
test_model_tester(clf,my_dataset,features_list)
from sklearn.ensemble import AdaBoostClassifier
clf = AdaBoostClassifier(n_estimators=110)
clf.fit(features_train,labels_train)
predict = clf.predict(features_test)
test_model_tester(clf,my_dataset,features_list)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(features_train,labels_train)
predict = clf.predict(features_test)
test_model_tester(clf,my_dataset,features_list)
# #### Question:
# What algorithm did you end up using? What other one(s) did you try? How did model performance differ between algorithms? [relevant rubric item: “pick an algorithm”]
# #### Answer:
# I tried to use knn, naive bayes, decision trees and some ensembles classifiers like adaboost and random forest. I noticed that the perfomace differ from one model to another and with the decision tree it had the best peformace, with Accuracy: 0.85000 Precision: 0.47283 Recall: 0.43500. I also noticed that with AdaBoost I had a great metrics results as well.
# ## Tune an Algorithm
#
# - Discuss parameter tuning and its importance.:
#
# Response addresses what it means to perform parameter tuning and why it is important.
#
# - Tune the algorithm (related lesson: "Validation"):
#
# At least one important parameter tuned with at least 3 settings investigated systematically, or any of the following are true:
#
# GridSearchCV used for parameter tuning
# Several parameters tuned
# Parameter tuning incorporated into algorithm selection (i.e. parameters tuned for more than one algorithm, and best algorithm-tune combination selected for final analysis).
#
#
# - Algorithm Performance
#
# When tester.py is used to evaluate performance, precision and recall are both at least 0.3.
#
# - Validation Strategy (related lesson "Validation")
#
# Performance of the final algorithm selected is assessed by splitting the data into training and testing sets or through the use of cross validation, noting the specific type of validation performed.
#
# +
from sklearn.model_selection import StratifiedKFold,GridSearchCV
def init_cross_validation(train_reduced, labels):
cross_validation = StratifiedKFold(n_splits=5)
cross_validation.get_n_splits(train_reduced, labels)
return cross_validation;
def generate_grid_search_model(model,parameters,train_reduced,labels):
cross_validation = init_cross_validation(train_reduced, labels);
grid_search = GridSearchCV(model,
scoring='accuracy',
param_grid = parameters,
cv = cross_validation)
grid_search.fit(train_reduced, labels)
model = grid_search
parameters = grid_search.best_params_
print('Best score: {}'.format(grid_search.best_score_))
print('Best parameters: {}'.format(grid_search.best_params_))
return model
# -
def create_decision_tree(train_reduced,labels,run_grid_search=False):
if run_grid_search:
parameter_grid = {
'min_samples_split': [2, 3,4, 10],
'min_samples_leaf': [1, 3, 10]
}
clf = DecisionTreeClassifier()
clf = generate_grid_search_model(clf,parameter_grid,train_reduced,labels)
else:
parameters = {
class_weight:None,
criterion:'gini',
max_depth:None,
max_features:None,
max_leaf_nodes:None,
min_impurity_split:1e-07,
min_samples_leaf:1,
min_samples_split:2,
min_weight_fraction_leaf:0.0,
presort:False,
random_state:None,
splitter:'best'
}
clf = DecisionTreeClassifier(**parameters)
clf.fit(train_reduced, labels)
return clf
def create_AdaBoost(train_reduced,labels,run_grid_search=False):
if run_grid_search:
parameter_grid = {
'n_estimators': [100,110,120,130,150,170]
}
clf = AdaBoostClassifier()
clf = generate_grid_search_model(clf,parameter_grid,train_reduced,labels)
else:
parameters = {
'n_estimators': 130
}
clf = AdaBoostClassifier(**parameters)
clf.fit(train_reduced, labels)
return clf
clfAda = create_AdaBoost(features,labels,run_grid_search=True)
test_model_tester(clfAda,my_dataset,features_list)
clfDT = create_decision_tree(features,labels,run_grid_search=True)
test_model_tester(clfDT,my_dataset,features_list)
# #### Question
# What does it mean to tune the parameters of an algorithm, and what can happen if you don’t do this well? How did you tune the parameters of your particular algorithm? What parameters did you tune? (Some algorithms do not have parameters that you need to tune -- if this is the case for the one you picked, identify and briefly explain how you would have done it for the model that was not your final choice or a different model that does utilize parameter tuning, e.g. a decision tree classifier). [relevant rubric items: “discuss parameter tuning”, “tune the algorithm”]
# #### Answer:
# Tuning the algorithm means the way to optimize the paameters that impact the model in order to perform it best. I tuned my particular algorithm for the Decision Tree and got {'max_features': 'sqrt', 'min_samples_split': 3, 'min_samples_leaf': 1} parameters.
# For my Adaboost model I tuned the n_estimators. And the final result was with {'n_estimators': 130} parameter.
# ### Validate and Evaluate
# - Usage of Evaluation Metrics (related lesson: "Evaluation Metrics")
#
# At least two appropriate metrics are used to evaluate algorithm performance (e.g. precision and recall), and the student articulates what those metrics measure in context of the project task.
#
# - Discuss validation and its importance.
#
# Response addresses what validation is and why it is important.
#
# Evaluate
test_model_tester(clfDT,my_dataset,features_list)
# #### Question
# What is validation, and what’s a classic mistake you can make if you do it wrong? How did you validate your analysis? [relevant rubric items: “discuss validation”, “validation strategy”]
# #### Answer:
# Validation process is when you divided your dataset into datasets (it can be by dividing it into train and test or into small chunks of data). A classic mistake is to not divide into these to dataset and use the entire data to train your model. I validade my model using a cross-validation method , that means dividing my dataset into small chucks and using most of them to train and a chuck to test and then repeat the process. I also used a stratification, which has the idea keep the percentage of the target class as close as possible to the one we have in the complete dataset.
#
# To validate my model I used cross validation method with n_iter=1000, test_size=0.1, random_state=42. That means, 1000 interations, 0.1 is the test size (15 rows in each test) and intializing in random_state 42.
# #### Question:
#
# Give at least 2 evaluation metrics and your average performance for each of them. Explain an interpretation of your metrics that says something human-understandable about your algorithm’s performance. [relevant rubric item: “usage of evaluation metrics”]
# #### Answer:
# In order to evaluate my model I used the accuracy, recall and prediction score.
#
# - Accuracy is the metric for the pecentage of correctness of the model. That means the porcentage of correct poi that the model could itentify.
#
# - Recall is the fraction that have been retrieved over the total amount of relevant instances. That means the percentage of the number of correct poi detected by the model divided by the total of actual poi in the analysis.
# - Precision is the fraction of relevant instances among the retrieved instances. That means the percentage of the number of correct poi detected by model divived by the total of poi detected.
#
# In my final model I got Accuracy Score: 84.2% Precision Score: 40.7% Recall Score: 37.4 %
#
# ## Exporting Model
# +
### Task 6: Dump your classifier, dataset, and features_list so anyone can
### check your results. You do not need to change anything below, but make sure
### that the version of poi_id.py that you submit can be run on its own and
### generates the necessary .pkl files for validating your results.
tester.dump_classifier_and_data(clfDT, my_dataset, features_list)
| final_project/Identify Fraud from Eron Email.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Spiral Traverse
# Write a function that takes in an n X m two-dimensional array and returns an array of the elements in spiral order.
#
# For instance, given
# ```
# [
# [1, 2, 3, 4],
# [12, 13, 14, 5],
# [11, 16, 15, 6],
# [10, 9, 8, 7]
# ]
# ```
# The function should return
# ```
# [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
# ```
# #### Iterative solution
# +
def spiral_traverse(A):
"""Complexity analysis:
O(n) time, O(n) space, where n is the total number of elements in the 2-dimensional array
"""
# first define starting and ending row, starting and ending column
start_row, end_row = 0, len(A) - 1
start_col, end_col = 0, len(A[0]) - 1
output = []
while start_row <= end_row and start_col <= end_col:
for col in range(start_col, end_col + 1):
output.append(A[start_row][col])
for row in range(start_row + 1, end_row + 1):
output.append(A[row][end_col])
for col in reversed(range(start_col, end_col)):
# prevent double counting of the row (in case we have counted it in for-loop 1)
if start_row == end_row:
break
output.append(A[end_row][col])
for row in reversed(range(start_row + 1, end_row)):
# prevent double counting of column (in case we have counted it in for loop 2)
if start_col == end_col:
break
output.append(A[row][start_col])
start_row += 1
start_col += 1
end_row -= 1
end_col -= 1
return output
# -
A = [
[1, 2, 3, 4],
[12, 13, 14, 5],
[11, 16, 15, 6],
[10, 9, 8, 7],
]
spiral_traverse(A)
# #### Recursive solution
# +
def spiral_traversal(array):
"""Complexity analysis:
O(n) time, O(n) space, where n is the total number of elements in the array.
"""
output = []
spiral_fill(array, 0, len(array) - 1, 0, len(array[0]) - 1, output)
return output
def spiral_fill(A, start_row, end_row, start_col, end_col, output):
if start_row > end_row or start_col > end_col:
return
for col in range(start_col, end_col + 1):
output.append(A[start_row][col])
for row in range(start_row + 1, end_row + 1):
output.append(A[row][end_col])
for col in reversed(range(start_col, end_col)):
# prevent double counting of the row (in case we have counted it in for-loop 1)
if start_row == end_row:
break
output.append(A[end_row][col])
for row in reversed(range(start_row + 1, end_row)):
# prevent double counting of column (in case we have counted it in for loop 2)
if start_col == end_col:
break
output.append(A[row][start_col])
return spiral_fill(A, start_row + 1, end_row - 1, start_col + 1, end_col - 1, output)
# -
B = [
[2, 4, 6, 8],
[32, 34, 36, 10],
[30, 48, 38, 12],
[28, 46, 40, 14],
[26, 44, 42, 16],
[24, 22, 20, 18]
]
spiral_traverse(B)
| 2D-arrays/spiral_traverse.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Maj 2015, <NAME> in <NAME>
# +
# Uvozimo potrebne module
from sympy import *
init_printing()
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# Vprašanje 1: Na sliki (vir: <NAME>: Dinamika, meh. nihanja..., 2014) je prikazan trikotnik s stranicami dolžine $a$, $b$, debelino $h$ in gostoto $\rho$.
# <img src="../fig/Slavic 2014 str 242 slika 5.4.png" width=200>
# V simbolni obliki določite masni vztrajnostni moment glede na prikazano os $y$:
# $$J_{yy}=\int_0^b y^2\,\rho\,h\,(a-a/b\,y)\,dy.$$
# Upoštevajte tudi: $m=a\,b\,h\,\rho/2$. Za izmišljene vrednosti izračunajte numerični rezultat.
a, b, h, rho, y, m = symbols('a, b, h, rho, y, m')
Jyy = (y**2 * rho * h * (a-a/b*y)).integrate((y, 0, b))
Jyy
# Upoštevanje mase:
Jyy.expand().subs(rho*a*b*h/2, m)
# Izračunajmo še s številkami:
podatki = {'a': 1, 'b': 2, 'rho': 7800, 'h': 0.01}
Jyy.subs(podatki)
# Vprašanje 2: Izračunajte integral tudi numerično. Uporabite ``scipy.integrate`` in integrirajte glede na pravila: trapezno, Simpsonovo 1/3. Rezultat primerjajte tudi z Gaussovo kvadraturo. Raziščite natančnost in hitrost metod.
# Definirajmo najprej funkcjo, ki jo je treba integrirati:
def f(y, p=podatki):
"""
y: spremenljivka
p: slovar s podatki: a, b, rho, h
"""
return y**2 * p['rho']* p['h'] * (p['a']-p['a']/p['b']*y)
from scipy import integrate
# Da dobimo občutek o funkciji, ki jo integriramo, jo prikažimo.
# +
# Definiramo meje integrala
podatki['ma'] = 0
podatki['mb'] = 2
N = 100
y_d = np.linspace(podatki['ma'], podatki['mb'], N)
f_d = f(y_d, podatki)
plt.plot(y_d, f_d);
# -
# #%%timeit
dy = y_d[1]-y_d[0]
integrate.trapz(f_d, dx=dy)
# #%%timeit
integrate.simps(f_d, dx=dy)
# #%%timeit
integrate.quad(f, 0, podatki['b'], args=podatki)
# Vprašnje 3: Preštudirajte ``scipy.special.legendre``, ki vam vrne objekt ``orthopoly1d``. Ta objekt ima metodo ``weights``, ki vrne seznam ``[x, w, mu0]`` vrednosti, ki jih uporabimo pri Gaussovi kvadraturi. (Če tukaj vsega ne razumete, ne skrbite preveč, bo asistent pokazal/komentiral). Opazite lahko, da smo vrednosti izpeljali na predavanjih!
from scipy import special
# Pripravimo Legendrev polinom izbrane stopnje (poskusite več različnih):
poli = special.legendre(3)
# Pogledamo vozlišča, uteži:
poli.weights
# Vprašanje 4: S pomočjo zgoraj pridobljenih uteži in vozlišč izračunajte integral s pomočjo Gaussove kvadrature: $\sum_iw_i\,f(x_i)$. Pazite na transformacijo mej:
# $$x=\frac{b+a}{2}+\frac{b-a}{2}\xi$$
# $$\int_a^bf(x)\,dx\approx\frac{b-a}{2}\sum_i A_i\,f(x_i).$$
# Pripravimo delni rezultat:
aa = [w*f(((podatki['mb']-podatki['ma'])*x + podatki['mb']+podatki['ma'])/2, podatki) for x, w, mu0 in poli.weights]
aa
# Upoštevamo transformacijo mej in izračunamo integral:
(podatki['mb']-podatki['ma'])/2*np.sum(aa)
integrate.newton_cotes(rn=3)
| pypinm-master/vprasanja za razmislek/Vaja 11 - polovica.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="aZ1lg-rIZJak"
# #0. Default Setting
# + [markdown] id="uJkdy_8RZGjc"
# Executed in Colab environment.
#
# * ML Framework
# - Python 3.7.10
# - Pytorch 1.8.1
#
# * Hardware
# - RAM: 12.7G
# - CPU: Intel(R) Xeon(R) CPU @ 2.20GHz (1core)
#
# Assumed that data exists like below.
# you also need a etri openapi key.
#
# ```
# /content/gdrive/My Drive/data
# ├── 1_bert_download_001_bert_morp_pytorch.zip
# ```
#
# Project Tree (directory only)
# ```
# /content/KoBertSum
# ├── bert_data
# ├── json_data
# ├── logs
# ├── models
# ├── raw_data
# ├── results
# ├── src
# │ ├── models
# │ ├── others
# │ └── prepro
# └── urls
# ```
# + [markdown] id="3bYOoWzjYSip"
# #1. Install dependency packages
# + id="7p9HcD3e8Ryo" colab={"base_uri": "https://localhost:8080/"} outputId="0d132884-504f-4029-ef41-d68e812e7b27"
# install bheinzerling's pyrouge
# !git clone https://github.com/bheinzerling/pyrouge
# %cd pyrouge
# !python setup.py -q install
# install missing dependency
# !apt install -q libxml-parser-perl
# %cd pyrouge
# !git clone https://github.com/andersjo/pyrouge.git rouge
# !pyrouge_set_rouge_path '/content/pyrouge/rouge/tools/ROUGE-1.5.5'
# %cd /content/pyrouge/rouge/tools/ROUGE-1.5.5/data
# !mv WordNet-2.0.exc.db WordNet-2.0.exc.db.orig
# !perl WordNet-2.0-Exceptions/buildExeptionDB.pl ./WordNet-2.0-Exceptions ./smart_common_words.txt ./WordNet-2.0.exc.db
# + id="fkQhs-DW_GK9" colab={"base_uri": "https://localhost:8080/"} outputId="e2a96d5f-72e8-4eea-ee94-a8e0c24eff50"
# 기타 패키지 설치
# !pip install -q pytorch_pretrained_bert
# !pip install -q tensorboardX
# !pip install -q jupyter-dash==0.3.0rc1 dash-bootstrap-components transformers
# + [markdown] id="6Uuo_VZyYvbP"
# #Google Drive Mount
# + colab={"base_uri": "https://localhost:8080/"} id="gqy8CkL4qMNW" outputId="2591a30b-ea94-49e2-953a-871f6fa7bf3e"
#gd mount
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="eNHS_RluhGcv"
# #2. BERT Model download
# + id="iy23A3EaQc97"
import os
os.chdir('/content')
# + colab={"base_uri": "https://localhost:8080/"} id="dPAwphgFm1Cc" outputId="19f9c880-84bc-4deb-91ba-4fb709e747d9"
#KoBertSum package
# !git clone -q https://github.com/raqoon886/KoBertSum.git
# + id="ZJSyUZLagMYV"
#load fine-tuned bert model
# !gdown --id "your fine-tuned bert model" -O /content/KoBertSum/models/model_step_1000.pt
# + id="fnShxFgZqV_f" colab={"base_uri": "https://localhost:8080/"} outputId="bea26035-7239-47e8-faad-93324c008bf6"
# data에 저장된 etri-bert 모델 가져와서 압축해제
# !cp "/content/gdrive/My Drive/data/1_bert_download_001_bert_morp_pytorch.zip" "1_bert_download_001_bert_morp_pytorch.zip"
# !unzip -q "1_bert_download_001_bert_morp_pytorch.zip"
# + id="wv5f3XOLpZ7o"
# Bertsum directory chdir
os.chdir('/content/KoBertSum/src')
# + [markdown] id="d3Uzgqx3cLy0"
# #3. BERT forward propagation workflow
# + id="M3m9X2IApKrt"
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import numpy as np
import torch
from pytorch_pretrained_bert import BertConfig
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import Summarizer
from tensorboardX import SummaryWriter
from models.reporter import ReportMgr
from models.stats import Statistics
from others.logging import logger
# from models.trainer import build_trainer
# build_trainer의 dependency package pyrouge.utils가 import되지 않아 직접 셀에 삽입
from others.logging import logger, init_logger
import easydict
args = easydict.EasyDict({
"encoder":'classifier',
"mode":'summary',
"bert_data_path":'/content/bert_sample/korean',
"model_path":'../models/bert_classifier',
"bert_model":'/content/001_bert_morp_pytorch',
"result_path":'../results/korean',
"temp_dir":'.',
"bert_config_path":'/content/001_bert_morp_pytorch/bert_config.json',
"batch_size":1000,
"use_interval":True,
"hidden_size":128,
"ff_size":512,
"heads":4,
"inter_layers":2,
"rnn_size":512,
"param_init":0,
"param_init_glorot":True,
"dropout":0.1,
"optim":'adam',
"lr":2e-3,
"report_every":1,
"save_checkpoint_steps":5,
"block_trigram":True,
"recall_eval":False,
"accum_count":1,
"world_size":1,
"visible_gpus":'-1',
"gpu_ranks":'0',
"log_file":'../logs/bert_classifier',
"test_from":'/content/KoBertSum/models/model_step_1000.pt'
})
def build_trainer(args, device_id, model,
optim):
"""
Simplify `Trainer` creation based on user `opt`s*
Args:
opt (:obj:`Namespace`): user options (usually from argument parsing)
model (:obj:`onmt.models.NMTModel`): the model to train
fields (dict): dict of fields
optim (:obj:`onmt.utils.Optimizer`): optimizer used during training
data_type (str): string describing the type of data
e.g. "text", "img", "audio"
model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object
used to save the model
"""
device = "cpu" if args.visible_gpus == '-1' else "cuda"
grad_accum_count = args.accum_count
n_gpu = args.world_size
if device_id >= 0:
gpu_rank = int(args.gpu_ranks[device_id])
else:
gpu_rank = 0
n_gpu = 0
print('gpu_rank %d' % gpu_rank)
tensorboard_log_dir = args.model_path
writer = SummaryWriter(tensorboard_log_dir, comment="Unmt")
report_manager = ReportMgr(args.report_every, start_time=-1, tensorboard_writer=writer)
trainer = Trainer(args, model, optim, grad_accum_count, n_gpu, gpu_rank, report_manager)
# print(tr)
if (model):
n_params = _tally_parameters(model)
logger.info('* number of parameters: %d' % n_params)
return trainer
class Trainer(object):
"""
Class that controls the training process.
Args:
model(:py:class:`onmt.models.model.NMTModel`): translation model
to train
train_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
valid_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
optim(:obj:`onmt.utils.optimizers.Optimizer`):
the optimizer responsible for update
trunc_size(int): length of truncated back propagation through time
shard_size(int): compute loss in shards of this size for efficiency
data_type(string): type of the source input: [text|img|audio]
norm_method(string): normalization methods: [sents|tokens]
grad_accum_count(int): accumulate gradients this many times.
report_manager(:obj:`onmt.utils.ReportMgrBase`):
the object that creates reports, or None
model_saver(:obj:`onmt.models.ModelSaverBase`): the saver is
used to save a checkpoint.
Thus nothing will be saved if this parameter is None
"""
def __init__(self, args, model, optim,
grad_accum_count=1, n_gpu=1, gpu_rank=1,
report_manager=None):
# Basic attributes.
self.args = args
self.save_checkpoint_steps = args.save_checkpoint_steps
self.model = model
self.optim = optim
self.grad_accum_count = grad_accum_count
self.n_gpu = n_gpu
self.gpu_rank = gpu_rank
self.report_manager = report_manager
self.loss = torch.nn.BCELoss(reduction='none')
assert grad_accum_count > 0
# Set model in training mode.
if (model):
self.model.train()
def summary(self, test_iter, step, cal_lead=False, cal_oracle=False):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
def _get_ngrams(n, text):
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _block_tri(c, p):
tri_c = _get_ngrams(3, c.split())
for s in p:
tri_s = _get_ngrams(3, s.split())
if len(tri_c.intersection(tri_s))>0:
return True
return False
if (not cal_lead and not cal_oracle):
self.model.eval()
stats = Statistics()
with torch.no_grad():
for batch in test_iter:
src = batch.src
labels = batch.labels
segs = batch.segs
clss = batch.clss
mask = batch.mask
mask_cls = batch.mask_cls
gold = []
pred = []
if (cal_lead):
selected_ids = [list(range(batch.clss.size(1)))] * batch.batch_size
elif (cal_oracle):
selected_ids = [[j for j in range(batch.clss.size(1)) if labels[i][j] == 1] for i in
range(batch.batch_size)]
else:
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
# loss = self.loss(sent_scores, labels.float())
# loss = (loss * mask.float()).sum()
# batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))
# stats.update(batch_stats)
sent_scores = sent_scores + mask.float()
sent_scores = sent_scores.cpu().data.numpy()
selected_ids = np.argsort(-sent_scores, 1)
# selected_ids = np.sort(selected_ids,1)
return selected_ids
def _gradient_accumulation(self, true_batchs, normalization, total_stats,
report_stats):
if self.grad_accum_count > 1:
self.model.zero_grad()
for batch in true_batchs:
if self.grad_accum_count == 1:
self.model.zero_grad()
src = batch.src
labels = batch.labels
segs = batch.segs
clss = batch.clss
mask = batch.mask
mask_cls = batch.mask_cls
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss*mask.float()).sum()
(loss/loss.numel()).backward()
# loss.div(float(normalization)).backward()
batch_stats = Statistics(float(loss.cpu().data.numpy()), normalization)
total_stats.update(batch_stats)
report_stats.update(batch_stats)
# 4. Update the parameters and statistics.
if self.grad_accum_count == 1:
# Multi GPU gradient gather
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
# in case of multi step gradient accumulation,
# update only after accum batches
if self.grad_accum_count > 1:
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
def _save(self, step):
real_model = self.model
# real_generator = (self.generator.module
# if isinstance(self.generator, torch.nn.DataParallel)
# else self.generator)
model_state_dict = real_model.state_dict()
# generator_state_dict = real_generator.state_dict()
checkpoint = {
'model': model_state_dict,
# 'generator': generator_state_dict,
'opt': self.args,
'optim': self.optim,
}
checkpoint_path = os.path.join(self.args.model_path, 'model_step_%d.pt' % step)
logger.info("Saving checkpoint %s" % checkpoint_path)
# checkpoint_path = '%s_step_%d.pt' % (FLAGS.model_path, step)
if (not os.path.exists(checkpoint_path)):
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _start_report_manager(self, start_time=None):
"""
Simple function to start report manager (if any)
"""
if self.report_manager is not None:
if start_time is None:
self.report_manager.start()
else:
self.report_manager.start_time = start_time
def _maybe_gather_stats(self, stat):
"""
Gather statistics in multi-processes cases
Args:
stat(:obj:onmt.utils.Statistics): a Statistics object to gather
or None (it returns None in this case)
Returns:
stat: the updated (or unchanged) stat object
"""
if stat is not None and self.n_gpu > 1:
return Statistics.all_gather_stats(stat)
return stat
def _maybe_report_training(self, step, num_steps, learning_rate,
report_stats):
"""
Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_training(
step, num_steps, learning_rate, report_stats,
multigpu=self.n_gpu > 1)
def _report_step(self, learning_rate, step, train_stats=None,
valid_stats=None):
"""
Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_step(
learning_rate, step, train_stats=train_stats,
valid_stats=valid_stats)
def _maybe_save(self, step):
"""
Save the model if a model saver is set
"""
if self.model_saver is not None:
self.model_saver.maybe_save(step)
def summary(args, b_list, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
test_iter =data_loader.Dataloader(args, _lazy_dataset_loader(b_list),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
result = trainer.summary(test_iter,step)
return result
def _tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
return n_params
args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']
# + [markdown] id="xji5mhCxcWcg"
# #4. Input data morp-tokenization workflow
# + [markdown] id="Diau6__Vhhuo"
# ##your openapi_key
# + id="moouXLWzclio"
openapi_key = ''
# + [markdown] id="ifDaOjcThpEr"
# ##workflow
# + id="UAnJ4R8b0hYd"
import argparse
import json
import os
import time
import urllib3
from glob import glob
import collections
import six
import gc
def do_lang ( openapi_key, text ) :
openApiURL = "http://aiopen.etri.re.kr:8000/WiseNLU"
requestJson = { "access_key": openapi_key, "argument": { "text": text, "analysis_code": "morp" } }
http = urllib3.PoolManager()
response = http.request( "POST", openApiURL, headers={"Content-Type": "application/json; charset=UTF-8"}, body=json.dumps(requestJson))
json_data = json.loads(response.data.decode('utf-8'))
json_result = json_data["result"]
if json_result == -1:
json_reason = json_data["reason"]
if "Invalid Access Key" in json_reason:
logger.info(json_reason)
logger.info("Please check the openapi access key.")
sys.exit()
return "openapi error - " + json_reason
else:
json_data = json.loads(response.data.decode('utf-8'))
json_return_obj = json_data["return_object"]
return_result = ""
json_sentence = json_return_obj["sentence"]
for json_morp in json_sentence:
for morp in json_morp["morp"]:
return_result = return_result+str(morp["lemma"])+"/"+str(morp["type"])+" "
return return_result
class BertData():
def __init__(self, vocab_file_path):
self.tokenizer = Tokenizer(vocab_file_path)
self.sep_vid = self.tokenizer.vocab['[SEP]']
self.cls_vid = self.tokenizer.vocab['[CLS]']
self.pad_vid = self.tokenizer.vocab['[PAD]']
def preprocess(self, src):
if (len(src) == 0):
return None
original_src_txt = [''.join(s) for s in src]
idxs = [i for i, s in enumerate(src) if (len(s) > 0)]
src = [src[i][:20000] for i in idxs]
src = src[:10000]
if (len(src) < 3):
return None
src_txt = [''.join(sent) for sent in src]
text = ' [SEP] [CLS] '.join(src_txt)
src_subtokens = text.split(' ')
src_subtokens = src_subtokens[:510]
src_subtokens = ['[CLS]'] + src_subtokens + ['[SEP]']
src_subtoken_idxs = self.tokenizer.convert_tokens_to_ids(src_subtokens)
_segs = [-1] + [i for i, t in enumerate(src_subtoken_idxs) if t == self.sep_vid]
segs = [_segs[i] - _segs[i - 1] for i in range(1, len(_segs))]
segments_ids = []
for i, s in enumerate(segs):
if (i % 2 == 0):
segments_ids += s * [0]
else:
segments_ids += s * [1]
cls_ids = [i for i, t in enumerate(src_subtoken_idxs) if t == self.cls_vid]
labels = None
tgt_txt = None
src_txt = [original_src_txt[i] for i in idxs]
return src_subtoken_idxs, labels, segments_ids, cls_ids, src_txt, tgt_txt
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
class Tokenizer(object):
def __init__(self, vocab_file_path):
self.vocab_file_path = vocab_file_path
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(self.vocab_file_path, "r", encoding='utf-8') as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
### joonho.lim @ 2019-03-15
if token.find('n_iters=') == 0 or token.find('max_length=') == 0 :
continue
token = token.split('\t')[0].strip('_')
token = token.strip()
vocab[token] = index
index += 1
self.vocab = vocab
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
try:
ids.append(self.vocab[token])
except:
ids.append(1)
if len(ids) > 10000:
raise ValueError(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), 10000)
)
return ids
def _lazy_dataset_loader(pt_file):
dataset = pt_file
yield dataset
def News_to_input(text, openapi_key):
newstemp = do_lang(openapi_key,text)
news = newstemp.split(' ./SF ')[:-1]
bertdata = BertData('/content/001_bert_morp_pytorch/vocab.korean_morp.list')
tmp = bertdata.preprocess(news)
b_data_dict = {"src":tmp[0],
"labels":[0,1,2],
"segs":tmp[2],
"clss":tmp[3],
"src_txt":tmp[4],
"tgt_txt":'hehe'}
b_list = []
b_list.append(b_data_dict)
return b_list
# + [markdown] id="6nhubPG1c5zt"
# #5. html for SummaryBot
# + id="U0HisA9GdU95"
import time
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from jupyter_dash import JupyterDash
from transformers import AutoModelWithLMHead, AutoTokenizer
import torch
def textbox(text, box="other"):
style = {
"max-width": "55%",
"width": "max-content",
"padding": "10px 15px",
"border-radius": "25px"
}
if box == "self":
style["margin-left"] = "auto"
style["margin-right"] = 0
color = "primary"
inverse = True
elif box == "other":
style["margin-left"] = 0
style["margin-right"] = "auto"
color = "light"
inverse = False
else:
raise ValueError("Incorrect option for `box`.")
return dbc.Card(text, style=style, body=True, color=color, inverse=inverse)
conversation = html.Div(
style={
"width": "80%",
"max-width": "800px",
"height": "70vh",
"margin": "auto",
"overflow-y": "auto",
},
id="display-conversation",
)
controls = dbc.InputGroup(
style={"width": "80%", "max-width": "800px", "margin": "auto"},
children=[
dbc.Input(id="user-input", placeholder="Write to the chatbot...", type="text"),
dbc.InputGroupAddon(dbc.Button("Submit", id="submit"), addon_type="append",),
],
)
# Define app
app = JupyterDash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
# Define Layout
app.layout = dbc.Container(
fluid=True,
style={'background-image': 'url(https://img1.daumcdn.net/thumb/R1280x0/?scode=mtistory2&fname=http%3A%2F%2Fcfile21.uf.tistory.com%2Fimage%2F99A30D4B5CB15385210EA0)'},
children=[
html.H1("뉴스뚝딱"),
html.Hr(),
dcc.Store(id="store-conversation", data=""),
conversation,
controls
],
)
@app.callback(
Output("display-conversation", "children"), [Input("store-conversation", "data")]
)
def update_display(chat_history):
return [
textbox(x, box="self") if i % 2 == 0 else textbox(x, box="other")
for i, x in enumerate(chat_history.split('<token>'))
]
@app.callback(
[Output("store-conversation", "data"), Output("user-input", "value")],
[Input("submit", "n_clicks"), Input("user-input", "n_submit")],
[State("user-input", "value"), State("store-conversation", "data")],
)
def run_chatbot(n_clicks, n_submit, user_input, chat_history):
if n_clicks == 0:
return "", ""
if user_input is None or user_input == "":
return chat_history, ""
bot_input_ids = News_to_input(chat_history + user_input, openapi_key)
chat_history_ids = summary(args, bot_input_ids, -1, '', None)
pred_lst = list(chat_history_ids[0][:3])
final_text = ''
for i,a in enumerate(user_input.split('. ')):
if i in pred_lst:
final_text = final_text+a+'. '
chat_history = user_input + '<token>' +final_text
return chat_history, ""
# + [markdown] id="UW0GrA7Mg5kr"
# #6. RUN!
# + colab={"base_uri": "https://localhost:8080/", "height": 51} id="i_dGcr31dBdp" outputId="27f634ca-568e-49a0-e60e-bc1e6fb643d8"
app.run_server(mode='external')
| Newsdata_summarybot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python3
# ---
# <h3 style='color:pink'>Exercise: GPU performance for fashion mnist dataset</h3>
# You need to write code wherever you see `your code goes here` comment. You are going to do image classification for fashion mnist dataset and then you will benchmark the performance of GPU vs CPU for 1 hidden layer and then for 5 hidden layers. You will eventually fill out this table with your performance benchmark numbers
#
#
# | Hidden_Layer | CPU | GPU |
# |:------|:------|:------|
# | 1 | 8.06s | 8.85s |
# | 5 | 19.8s | 14.8s |
# +
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
# +
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# -
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images.shape
plt.imshow(train_images[0])
train_labels[0]
class_names[train_labels[0]]
plt.figure(figsize=(3,3))
for i in range(5):
plt.imshow(train_images[i])
plt.xlabel(class_names[train_labels[i]])
plt.show()
# +
train_images_scaled = train_images / 255.0
test_images_scaled = test_images / 255.0
train_images_scaled.shape
# -
def get_model(hidden_layers=1):
layers = []
# Your code goes here-----------START
# Create Flatten input layers
layers.append(keras.layers.Flatten(input_shape=(28, 28,)))
# Create hidden layers that are equal to hidden_layers argument in this function
for i in range(hidden_layers):
layers.append(keras.layers.Dense(500, activation="relu"))
# Create output
layers.append(keras.layers.Dense(10, activation="sigmoid"))
# Your code goes here-----------END
model = keras.Sequential(layers)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
model = get_model(1)
model.fit(train_images_scaled, train_labels, epochs=5)
np.argmax(model.predict(test_images_scaled)[52])
test_labels[52]
tf.config.experimental.list_physical_devices()
# <h4 style="color:pink">5 Epochs performance comparison for 1 hidden layer</h4>
# %%timeit -n1 -r1
with tf.device('/CPU:0'):
# your code goes here
cpu_model = get_model()
cpu_model.fit(train_images_scaled, train_labels, epochs=5)
# %%timeit -n1 -r1
with tf.device('/GPU:0'):
# your code goes here
gpu_model = get_model()
gpu_model.fit(train_images_scaled, train_labels, epochs=5)
# <h4 style="color:pink">5 Epocs performance comparison with 5 hidden layers</h4>
# %%timeit -n1 -r1
with tf.device('/CPU:0'):
# your code here
cpu_model = get_model(5)
cpu_model.fit(train_images_scaled, train_labels, epochs=5)
# %%timeit -n1 -r1
with tf.device('/GPU:0'):
# your code here
gpu_model = get_model(5)
gpu_model.fit(train_images_scaled, train_labels, epochs=5)
| Python Tutorial Tensorflow/8_Exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <center>
# <img src="./images/adsp_logo.png">
# </center>
#
# ### Prof. Dr. -Ing. <NAME> <br> Jupyter Notebook: <NAME>
#
# + [markdown] slideshow={"slide_type": "-"}
# # Lloyd-Max Quantizer
# + hide_input=true language="html"
# <iframe width="560" height="315" src="https://www.youtube.com/embed/n2xuCE2dKeo" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# + [markdown] slideshow={"slide_type": "-"}
# **Idea:** Wouldn't it be helpful if we choose our **quantization steps smaller** where **signal samples appear most often**, to reduce the quantization error there, and make the quantization step size (and also the error) larger, where there are only a few samples?
#
# This is the idea behind the Lloyd-Max quantizer (see also the Book: <NAME>, P. Noll: “Digital coding of waveforms“).
#
# **Observe** that this is not quite the same as for $\mu$-law companding. There, the **small** values get the smallest quantization step sizes, here, the **most likely** values get the smallest quantization steps sizes.
#
# This is a type of non-uniform quantizer, which is adapted to the signals pdf. It basically minimizes the expectation of the quanization power (the expectaion of the squared signal, or its second moment), given the pdf of the signal to quantize.
# + hide_input=true language="html"
# <iframe width="560" height="315" src="https://www.youtube.com/embed/3TBS2vnBxow" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# + [markdown] slideshow={"slide_type": "-"}
# Let's call our Quantisation function Q(x) (this is quantization followed by reverse quantization). You can also think of non-uniform quantization as first applying this non-linear function and then to use uniform quantization. Then the expectation of our quantization power is:
#
# $$
# D=E((x-Q(x))^2)
# $$
#
# Observe that we use the square here, and not for instance the magnitude of the error, because the square leads to an easier solution for minimum, which we would like to find.
#
# Our **goal** is to **minimize this expectation** of the quantisation error power D.
# Starting with the pdf of our signal, the result should be our quantisation intervals and reconstruction values. Since we now assume non-uniform intervals, we need to give those intervals and their reconstruction values names, which can be see in the following graphic:
#
# <center>
# <img src="./images/lloyd_max.png" width='600'>
# </center>
# + hide_input=true language="html"
# <iframe width="560" height="315" src="https://www.youtube.com/embed/1Fr1Qo2-nEk" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# + [markdown] slideshow={"slide_type": "-"}
# The encoder knows the $b_k$, and decides in which interval (what we called $\Delta$ before) the sample lies, and assigns the interval index k to it, as before (remember: only the index k is transmitted to the decoder). The decoder takes this index, and assigns the reconstructed value $y_k$ to it, also as before.
#
#
# We call $b_k$ the decision boundaries, in the A/D converter or encoder (each interval gets an index as before), and on the decoding side we have the $y_k$ as the reconstruction values for each index from the encoding side. <br>
# In the multidimensional case, they are also called a “**codeword**”.
#
# So using these definitions, and the pdf our the measured **probability distribution** of our signal p(x), we can re-write our equation for the error power or distortion:
#
# $$ \large
# D=E((x-Q(x))^2)=\int_{-\infty} ^ \infty (x-Q(x))^2 p(x) dx
# $$
#
# we can now subdivide the integral over the quantisation intervals, assuming we have M quantization intervals, by just adding the quantization error power of all the quantisation intervals (see also: Wikipedia: quantization (signal processing)):
#
# $$ \large
# D=\sum _ {k=1} ^ {M }\int _ {b_{k-1}} ^ {b_k} (x-y_k)^2 p(x) dx$$
#
# We would now like to have the minimum of this expression for the decision boundaries $b_k$ and the reconstruction values $y_k$. Hence we need to take the first derivative of the distortion D with respect to those variables and obtain the zero point.
# + hide_input=true language="html"
# <iframe width="560" height="315" src="https://www.youtube.com/embed/4wLah9Agrnw" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# + [markdown] slideshow={"slide_type": "-"}
# Lets start with the decision boundaries $b_k$:
#
# $$\large
# \frac{\partial D} {\partial {b_k}}=0$$
#
# To obtain this derivative, we could first solve the integral, over 2 neighbouring quantisation intervals, because each decision interval $b_k$ appears in two intervals (one where it is the upper boundary, and one where it it the lower boundary).
#
# $$ \large
# D_k=\int _ {b_k }^ {b_{k+1}} (x-y_{k+1})^2 p(x) dx+ \int _ {b_{k-1}} ^ {b_{k}} (x-y_{k})^2 p(x) dx
# $$
#
# Here we cannot really get a closed form solution for a general probability function p(x). Hence, to simplify matters, we make the **assumption** that p(x) is **approximately constant** over our 2 neighbouring quantisation intervals. This means we assume that our quantisation intervals are small in comparison with the changes of p(x)!
#
# **We need to keep this assumption in mind, because the derived algorithm is based on this assumption!**
#
# Hence we can set:
#
# $$p(x)=p$$
#
# Using this simplification we can now solve this integral:
#
# $$ \large
# \frac{D_k} {p}= \frac{(b_k-y_k)^3}{ 3} - \frac{(b_{k-1}-y_k)^3} {3 }+ \frac{(b_{k+1}-y_{k+1})^3} { 3} -\frac{(b_{k}-y_{k+1})^3} {3} $$
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Solving using Sympy
# + slideshow={"slide_type": "-"}
# Imports
from sympy import Eq, symbols, Integral, Derivative, simplify, solve
# Define Symbols
x, yk, yk1, bk, bkp1, bkm1, p = symbols('x y_k y_{k+1} b_k b_{k+1} b_{k-1} p', real=True)
# + slideshow={"slide_type": "-"}
# Dk Integral
Dk = Integral((x-yk1)**2*p,(x,bk,bkp1)) + Integral((x-yk)**2*p,(x,bkm1,bk))
display(Dk)
simplify(Dk.doit())
# + [markdown] slideshow={"slide_type": "-"}
# Since we now have a closed form solution, we can easily take the derivative with respect to $b_k$ (which only influences $D_k$ in $D$, hence we can drop the k in the derivative):
#
# $$ \large
# \frac{\partial D/p} {\partial {b_k}} = (b_k -y_k)^2 -(b_k -y_{k+1})^2$$
# + slideshow={"slide_type": "-"}
display(Derivative(Dk/p,bk))
simplify(Derivative(Dk/p,bk).doit())
# + [markdown] slideshow={"slide_type": "-"}
# We can set this then to zero, and observing that $y_{k+1}>b_k$ (see above image), we can take the positive square root of both sides:
#
# $$ \large
# (b_k -y_k)^2 -(b_k -y_{k+1})^2=0
# $$
#
# $$ \large
# (b_k -y_k) =( y_{k+1} - b_k)
# $$
#
# $$ \large
# b_k= \frac{y_{k+1}+
# y_k} { 2}$$
#
# This means that we put our decision boundaries right in the middle of two reconstruction values. But remember, this is only optimal if we assume that the signals pdf is roughly constant over the 2 quantisation intervals! This approach is also called the “**nearest neighbour**”, because any signal value or data point is always quantized to the **nearest reconstruction value**. This is one important result of this strategy.
# + slideshow={"slide_type": "-"}
Eq_bk = Eq(simplify(Derivative(Dk/p,bk).doit()))
display(Eq_bk)
display(Eq(bk,solve(Eq_bk,bk)[0]))
# + [markdown] slideshow={"slide_type": "subslide"}
# Now we have the decision boundaries, but we still need the reconstruction values $y_k$. To obtain them, we can again take the derivative of D, and set it to zero. Here we cannot start with an assumption of a uniform pdf, because we would like to have a dependency on a non-uniform pdf. We could make this assumption before, because we only assumed it for the (small) quantisation intervals. This can be true in practice also for non-uniform pdf's, if we have enough quantisation intervals.
#
# But to still have the dependency on the pdf, for the reconstruction values $y_k$ we have to start at the beginning, take the derivative of the original formulation of D.
#
# $$ \large
# D=\sum_{k=1} ^M \int _{b_{k-1}}^ {b_k} (x-y_k)^2 p(x) dx$$
#
#
# Here we have the pdf p(x) and the reconstruction values (codewords) $y_k$. Now we start with taking the derivative with respect to the reconstruction value $y_k$ and set it to 0:
#
# $$ \large
# \frac{\partial D} {\partial {y_k}}=-\sum_ {k=1} ^ {M} \int_{b_{k-1}} ^{b_k} 2 \cdot (x-y_k) p(x) dx = 0
# $$
# + hide_input=true language="html"
# <iframe width="560" height="315" src="https://www.youtube.com/embed/DTeqd_PFbQc" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# + slideshow={"slide_type": "-"}
from sympy import Function, Sum, Indexed
p_x = Function('p')(x)
M, k = symbols('M k', real=True, positive=True)
# + slideshow={"slide_type": "-"}
D = Sum(Integral((x-yk)**2*p_x,(x,bkm1,bk)),(k,1,M))
display(D)
display(Derivative(D,yk))
# + [markdown] slideshow={"slide_type": "-"}
# Since the $y_k$ is only in 1 interval, the sum disappears:
#
# $$ \large
# \frac{\partial D}{\partial {y_k}}=- \int _ {b_{k-1}}^{b_k} 2 \cdot (x-y_k) p(x) dx = 0
# $$
#
# + slideshow={"slide_type": "-"}
display(Derivative(Integral((x-yk)**2*p_x,(x,bkm1,bk)),yk))
display(simplify(Derivative(Integral((x-yk)**2*p_x,(x,bkm1,bk)),yk).doit()))
# + [markdown] slideshow={"slide_type": "-"}
# Since we have a sum, we can split this integral in 2 parts (and remove the - sign):
#
#
# $$ \large
# \int _ {b_{k-1}}^{b_k} 2 \cdot x p(x) dx -\int _{b_{k-1}} ^ {b_k} 2 \cdot y_k p(x) dx = 0
# $$
#
# $$ \large
# \int _ {b_{k-1}} ^ {b_k} x \cdot p(x) dx -y_k \cdot \int_ {b_{k-1}} ^ {b_k} p(x) dx = 0
# $$
#
# Hence we get the result
#
# $$ \large
# y_k = \frac{\int _ {b_{k-1}}^ {b_k} x \cdot p(x) dx} {\int _{b_{k-1}} ^{b_k} p(x) dx}
# $$
# + slideshow={"slide_type": "-"}
display(Eq(-2*(Integral(x*p_x,(x,bkm1,bk)) - Integral(yk*p_x,(x,bkm1,bk)))))
Eq_yk=Eq(-2*(Integral(x*p_x,(x,bkm1,bk))),-2*yk*Integral(p_x,(x,bkm1,bk)))
display(Eq_yk)
Eq(yk,solve(Eq_yk,yk)[0])
# + [markdown] slideshow={"slide_type": "-"}
# Observe that we now got a result without making any assumptions on p(x).
#
# This can be interpreted as a **conditional expectation** of our signal value over the quantization interval (given the signal is in this interval), or also its “**centroid**” as reconstruction value (codeword).
#
# - The value in the numerator can be seen as the expectation value of our signal in the interval.
# - The denominator can be seen as the probability of that signal being in that interval.
#
# Hence it can be interpreted as: Given the signal is inside the interval, this is its average or expected value.
#
# Since the decision boundaries $b_k$ depend on the reconstruction values $y_k$, and the $y_k$ in turn depend on the $b_k$, we need to come up with a way to compute them. The approach for this is an **iterative algorithm**:
#
# <ol>
# <li>Decide on M, start (initialize the iteration) with a <b> random </b> assignment of M <b>reconstruction values </b> (codewords) $y_k$</li>
# <li>Using the reconstruction values $y_k$, compute the <b>boundary values</b> $b_k$ as mid-points between 2 reconstruction values / codewords (<b>nearest neighbour rule</b>)</li>
# <li>Using the pdf of our signal and the boundary values $b_k$, update, <b>compute new reconstruction values (codewords) $y_k$ as centroids or conditional expectation over the quantisation areas between $b_k$ and $b_{k-1}$</b></li>
# <li>Go to 2) until update is sufficiently small (< epsilon).</li>
# </ol>
#
# This algorithm usually converges (it finds an equilibrium and doesn't change anymore), and it results in the minimum distortion D.
# + hide_input=true language="html"
# <iframe width="560" height="315" src="https://www.youtube.com/embed/nPk1vHD6S8s" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Example 1 for Max-Lloyd Interation
# + [markdown] slideshow={"slide_type": "-"}
# Assume we have a signal x between $0\leq x \leq 1$, uniformly distributed (p(x)=1 on this interval) and we want to have 2 reconstruction values/ codewords $y_k$, and hence 3 boundaries $b_k$ (where $b_0=0$ and $b_2=1$), we need to find only $b_1$.
#
# 1) **Random initialization:** $y_1=0.3$, $y_2=0.8$<br>
# 2) **Nearest neighbour:** $b_1=(0.3+0.8)/2=0.55$<br>
# 3) **Conditional expectation:** <br>
#
# $$ \large
# y_k = \frac{\int_{b_{k-1}}^ {b_k} x \cdot p(x) dx} {\int_ {b_{k-1}}^{b_k} p(x) dx}$$
#
# now we use that $p(x)=1$.
#
# $$
# y_1 = \frac{\int_{0} ^{0.55} x dx} {\int _ {0} ^ {0.55} 1 dx}=\frac{0.55^2 /2}{ 0.55 }= 0.275
# $$
# <br>
# $$
# y_2 = \frac{\int_{0.55} ^{1} x dx} {\int _ {0.55} ^ {1} 1 dx}=\frac{1/2-0.55^2/2}{1- 0.55 }= 0.775
# $$<br>
#
# 4) Go to 2), **nearest neighbour:**
# $b_1=(0.275+0.775)/2=0.525$
#
# 3) **Conditional expectation:**<br>
#
# $y_1 = \frac{0.525^2 /2}{ 0.525} = 0.26250$
#
# $y_2 =\frac{1/2-0.525^2 / 2} {1- 0.525} = 0.76250$
#
# and so on until it doesn't change much any more. This should converge to $y_1=0.25$, $y_2=0.75$, and $b_1=0.5$.
# + hide_input=true language="html"
# <iframe width="560" height="315" src="https://www.youtube.com/embed/5rNDlO5xYv0" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# + slideshow={"slide_type": "-"}
import numpy as np
from scipy.integrate import quad
# Algorithm
def b_k(y):
return (y.sum())/2
def y_k(b):
return np.array([(quad(lambda x: x,0,b)[0]/quad(lambda x: 1,0,b)[0]),
(quad(lambda x: x,b,1)[0]/quad(lambda x: 1,b,1)[0])])
# + slideshow={"slide_type": "-"}
y = np.array([0.3,0.8])
it = 100
epsilon=1e-5
b=0
for i in range(it):
b_old=b
b=b_k(y)
if i>0 and (b_old-b)<=epsilon:
print('Iteration:',i+1)
print('[y1 y2]:',y)
print('b:',b)
break
y=y_k(b)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Example 2
# + [markdown] slideshow={"slide_type": "-"}
# Like above, but now with a **non-uniform**, Laplacian pdf: $p(x)=e^{-0.5\cdot \mid x \mid}$
#
# 1) **Random initialization:** $y_1=0.3$ ,$y_2=0.8$ <br>
# 2) **Nearest neighbour:** $b_1=(0.3+0.8)/2=0.55$<br>
# 3) **Conditional expectation:**
#
# $$ \large
# y_k=\frac{\int _{b_{k-1}} ^{b_k} x \cdot p(x)dx}{\int _{b_{k-1}}^{b_k} p(x)dx}
# $$
#
# + [markdown] slideshow={"slide_type": "-"}
# Now we need Python to compute the numerator integral, for $y_1$:
#
# $$ \large
# \int _0^{b_1} x \cdot p(x)dx= \int_0 ^{0.55} x \cdot e^{-0.5 \cdot \mid(x)\mid } dx
# $$
# + hide_input=true language="html"
# <iframe width="560" height="315" src="https://www.youtube.com/embed/8FFMFzZvXl0" frameborder="0" allow="accelerometer; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# + slideshow={"slide_type": "-"}
# Numerator
Num,Nerr=quad(lambda x: x*np.exp(-0.5*abs(x)),0,0.55)
Num
# + [markdown] slideshow={"slide_type": "-"}
# For the denominator integral we get:
# $$ \large
# \int_0 ^{0.55} p(x)dx $$, hence:
# + slideshow={"slide_type": "-"}
# Denominator
Den,Derr=quad(lambda x: np.exp(-0.5*abs(x)),0,0.55)
Den
# + [markdown] slideshow={"slide_type": "-"}
# and hence we obtain,
#
# $$y_1= \frac {Num}{ Den} = \frac{0.12618 }{0.48086} = 0.26240$$
# -
# For $y_2$ we get:
# + slideshow={"slide_type": "-"}
Num,Nerr=quad(lambda x: x*np.exp(-0.5*abs(x)),0.55,1)
print ("Num = ",Num)
Den,Derr=quad(lambda x: np.exp(-0.5*abs(x)),0.55,1)
print ("Den = ",Den)
print(Num/Den)
# + [markdown] slideshow={"slide_type": "-"}
# Hence $y_2= 0.7665$.
# Go back from here to step 2 until convergence.
# + slideshow={"slide_type": "-"}
def b_k(y):
return (y.sum())/2
def y_k(b):
return np.array([(quad(lambda x: x*np.exp(-0.5*abs(x)),0,b)[0]/quad(lambda x: np.exp(-0.5*abs(x)),0,b)[0]),
(quad(lambda x: x*np.exp(-0.5*abs(x)),b,1)[0]/quad(lambda x: np.exp(-0.5*abs(x)),b,1)[0])])
# + slideshow={"slide_type": "-"}
y = np.array([0.3,0.8])
it = 100
epsilon=1e-9
b=0
for i in range(it):
b_old=b
b=b_k(y)
if i>0 and (b_old-b)<=epsilon:
print('Iteration:',i+1)
print('[y1 y2]:',y)
print('b:',b)
break
y=y_k(b)
| ADSP_04_LloydMax.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: solver-demo
# language: python
# name: solver-demo
# ---
# # Solver Benchmark - Finding All Solutions
#
# We analyze how fast various solvers are in listing all solutions for a simple AND and a simple OR formula.
# Solvers might not natively support listing all solutions, but instead rather present a single one.
# However, one can simply add the negation of the previous solution as another constraint and run the solver again.
#
# Besides bechmarking the solvers, this notebook also demonstrates their API in general.
# +
import time
def benchmark(name, function, param1, param2):
"""Benchmark a function with two parameters."""
print('--'+ name + ' approach--')
start_time = time.perf_counter()
print('Number of models: ' + str(function(param1, param2)))
end_time = time.perf_counter()
print('Time: ' + str(round(end_time - start_time, 2)) + ' s')
# -
# ## Z3
# The [`Z3` library](https://github.com/Z3Prover/z3/wiki) offers SMT solving as well as optimization for various programming languages.
# The solver returns only one valid solution.
# We try multiple ways to find/count all satisfying interpretations (models):
# - Enumeration-based: Enumerate all possible assignments and check if they satisfy all assertions. This is possible in `Z3` with
# - conditional checking, which temporarily adds the literals corresponding to the assignment as further constraints and checks the resulting model.
# - substitution of the variables with their values, followed by simplification.
# - [Solver-based](https://stackoverflow.com/questions/13395391/z3-finding-all-satisfying-models): Find the first model with the solver. Add the negation of its assignment as another constraint and re-run the solver. Thus, the solver will find a different solution. Repeat until unsatisfiable.
# +
from z3 import *
def count_models_with_solver(solver, variables):
solver.push() # as we will add further assertions to solver, checkpoint current state
solutions = 0
while solver.check() == sat:
solutions = solutions + 1
# Invert at least one variable to get a different solution:
solver.add(Or([Not(x) if is_true(solver.model()[x]) else x for x in variables]))
solver.pop() # restore solver to previous state
return solutions
import itertools
# Fastest enumeration: conditional checking.
def count_models_by_enumeration(solver, variables):
solutions = 0
for assignment in itertools.product(*[(x, Not(x)) for x in variables]): # all combinations
if solver.check(assignment) == sat: # conditional check (does not add assignment permanently)
solutions = solutions + 1
return solutions
# Creating the assignment as a separate step is slower.
def count_models_by_enumeration2(solver, variables):
solutions = 0
for assignment in itertools.product([False, True], repeat = len(variables)): # all combinations
if solver.check([x if assign_true else Not(x) for x, assign_true in zip(variables, assignment)]) == sat:
solutions = solutions + 1
return solutions
# Using simplication instead of conditional checking is even slower.
def count_models_by_enumeration3(solver, variables):
solutions = 0
for assignment in itertools.product([BoolVal(False), BoolVal(True)], repeat = len(variables)): # all combinations
satisfied = True
for assertion in solver.assertions():
if is_false(simplify(substitute(assertion, list(zip(variables, assignment))))):
satisfied = False
break
if satisfied: solutions = solutions + 1
return solutions
# -
# We try both approaches with a small propositional formula with 10 variables, using an AND constraint as well as an OR constraint.
# +
from z3 import *
x = Bools(' '.join('x' + str(i) for i in range(10)))
solver = Solver()
print('## OR formula ##')
solver.add(Or(x))
benchmark('Solver-based', count_models_with_solver, solver, x)
benchmark('Enumeration-based (conditional check, direct assignment)', count_models_by_enumeration, solver, x)
benchmark('Enumeration-based (conditional check, separate assignment)', count_models_by_enumeration2, solver, x)
benchmark('Enumeration-based (substitute + simplify)', count_models_by_enumeration3, solver, x)
print('\n## AND formula ##')
solver.reset()
solver.add(And(x))
benchmark('Solver-based', count_models_with_solver, solver, x)
benchmark('Enumeration-based (conditional check, direct assignment)', count_models_by_enumeration, solver, x)
benchmark('Enumeration-based (conditional check, separate assignment)', count_models_by_enumeration2, solver, x)
benchmark('Enumeration-based (substitute + simplify)', count_models_by_enumeration3, solver, x)
# -
# The enumeration-based approach has to evaluate the same number of values for AND and OR formulas, i.e., all value combinations.
# The conditional-checking approach still seems to benefit from problems with fewer solutions, though not as strong as the solver-based approach.
# Overall, there is no clear winner: Depending on the number of solutions, solving or enumerating might be better.
# ## Google OR Tools
#
# Google provides a framework for combinatorial white-box optimization problems, including constraint [solving](https://developers.google.com/optimization/cp/cp_solver) and [optimization](https://developers.google.com/optimization/cp/integer_opt_cp).
# Besides the [Python API](https://developers.google.com/optimization/reference/python/sat/python/cp_model), C++, Java and C# are supported.
# Creating an enumeration-based solution is more difficult than with `Z3`, as we cannot simply copy models or make conditional evaluations (temporary assignments).
# Thus, we refrain from implementing such a solution.
# Howeveer, as a nice alternative, iterating over all valid solutions is supported natively.
# +
from ortools.sat.python import cp_model
def count_models_with_solver(model, variables):
# TO DO: make a copy of model (not supported natively)
solver = cp_model.CpSolver()
solutions = 0
while solver.Solve(model) == cp_model.FEASIBLE:
solutions = solutions + 1
# Invert at least one variable to get a different solution:
model.AddBoolOr([x.Not() if solver.Value(x) == 1 else x for x in variables])
return solutions
class Solution_Counter(cp_model.CpSolverSolutionCallback):
def __init__(self):
cp_model.CpSolverSolutionCallback.__init__(self)
self.__solution_count = 0
def on_solution_callback(self):
self.__solution_count += 1
def solution_count(self):
return self.__solution_count
def count_models_natively(model, counter_callback):
solver = cp_model.CpSolver()
solver.SearchForAllSolutions(model=model, callback=counter_callback)
return counter_callback.solution_count()
# +
from ortools.sat.python import cp_model
print('## OR formula ##')
model = cp_model.CpModel()
x = [model.NewBoolVar('x' + str(i)) for i in range(10)]
model.AddBoolOr(x)
benchmark('Solver-based', count_models_with_solver, model, x)
model = cp_model.CpModel() # solver-based approach changes model, therefore re-creation
x = [model.NewBoolVar('x' + str(i)) for i in range(10)]
model.AddBoolOr(x)
benchmark('Native', count_models_natively, model, Solution_Counter())
print('\n## AND formula ##')
model = cp_model.CpModel()
x = [model.NewBoolVar('x' + str(i)) for i in range(10)]
model.AddBoolAnd(x)
benchmark('Solver-based', count_models_with_solver, model, x)
model = cp_model.CpModel()
x = [model.NewBoolVar('x' + str(i)) for i in range(10)]
model.AddBoolAnd(x)
benchmark('Native', count_models_natively, model, Solution_Counter())
# -
# The native approach beats the solver-based approach.
# Compared to `Z3`, the native approach with `OR Tools` is about as fast as `Z3` (solver-based) for AND formulas and slightly faster than `Z3` (enumeration-based) for OR formulas.
# ## pySMT
#
# [pySMT](https://pysmt.readthedocs.io/en/latest/index.html) is a wrapper for [various solvers](https://github.com/pysmt/pysmt#solvers-support) supporting the *SMT-Lib* format, including `Z3`.
# The solvers have to be [installed separately](https://pysmt.readthedocs.io/en/latest/getting_started.html#installation).
# +
from pysmt.shortcuts import *
def count_models_with_solver(solver, variables):
solver.push() # as we will add further assertions to solver, checkpoint current state
solutions = 0
while solver.solve():
solutions = solutions + 1
# Invert at least one variable to get a different solution ("iff" == "<-->"):
solver.add_assertion(Not(And([Iff(x, solver.get_value(x)) for x in variables])))
solver.pop() # restore solver to previous state
return solutions
import itertools
# Fastest enumeration by conditional checking.
def count_models_by_enumeration(solver, variables):
solutions = 0
for assignment in itertools.product(*[(x, Not(x)) for x in variables]): # all combinations
if solver.solve(assignment): # conditional check (does not add assignment permanently)
solutions = solutions + 1
return solutions
# Slower enumeration by substitution and simplification.
def count_models_by_enumeration2(conditions, variables):
solutions = 0
for assignment in itertools.product([Bool(False), Bool(True)], repeat = len(variables)): # all combinations
satisfied = True
for condition in conditions:
if substitute(condition, dict(zip(variables, assignment))).simplify().is_false():
satisfied = False
break
if satisfied: solutions = solutions + 1
return solutions
# -
# Here, we use the MathSAT solver.
# +
from pysmt.shortcuts import *
x = [Symbol('x' + str(i)) for i in range(10)]
solver = Solver(name='msat') # could also use 'z3'
print('## OR formula ##')
solver.add_assertion(Or(x))
benchmark('Solver-based', count_models_with_solver, solver, x)
benchmark('Enumeration-based (conditional check)', count_models_by_enumeration, solver, x)
benchmark('Enumeration-based (substitute + simplify)', count_models_by_enumeration2, [Or(x)], x)
print('\n## AND formula ##')
solver.reset_assertions()
solver.add_assertion(And(x))
benchmark('Solver-based', count_models_with_solver, solver, x)
benchmark('Enumeration-based (conditional check)', count_models_by_enumeration, solver, x)
benchmark('Enumeration-based (substitute + simplify)', count_models_by_enumeration2, [And(x)], x)
# -
# We don't get a performance advantage compared to `Z3`.
# ## PicoSAT
#
# A SAT solver implemented in C with a very simple [Python interface](https://github.com/ContinuumIO/pycosat).
# It offers solving and iterative solving for pure SAT formulas in CNF.
# The formula has to be provided as a list of clauses.
# Each clause is a list of non-zero integers indicating the involved variables.
# Negative numbers represent negated variables.
# +
from pycosat import itersolve
def count_models_natively(formula, dummyParam):
iterator = pycosat.itersolve(formula)
return sum(1 for _ in iterator) # might be more efficient than turning into list
# +
import pycosat
print('## OR formula ##')
orFormula = [[i for i in range(1, 11)]]
benchmark('Native', count_models_natively, orFormula, None)
print('\n## AND formula ##')
andFormula = [[i] for i in range(1, 11)]
benchmark('Native', count_models_natively, andFormula, None)
# -
# A very fast solver, though the input requirements are strongly limiting.
# The solver can also solve bigger OR clauses, e.g. with 20 variables, instantly, but iterating over the solutions then becomes the bottleneck.
# ## python-constraint
#
# [python-constrant](https://labix.org/python-constraint) is directly targeted at constraint solving.
# However, the pre-defined constraint types do not include logical constraints, which is no problem for simple scenarios (there are good arithmetic alternatives), but might become tricky when combining constraints.
# It is possible to define your own constraints.
# +
from constraint import *
# Constraints have to be classes or functions
class ExcludeSolutionConstraint(Constraint):
def __init__(self, assignments):
self._solution = assignments
def __call__(self, variables, domains, assignments, forwardcheck=False):
return assignments != self._solution # at least one difference
def count_models_with_solver(problem, variables):
# TO DO: make a copy of problem (not supported natively)
solutions = 0
solution = problem.getSolution()
while not solution is None:
solutions = solutions + 1
problem.addConstraint(ExcludeSolutionConstraint(solution))
solution = problem.getSolution()
return solutions
def count_models_natively(problem, dummyParam):
return len(problem.getSolutions())
# +
from constraint import *
x = ['x' + str(i) for i in range(10)]
print('## OR formula ##')
problem = Problem()
problem.addVariables(x, domain = [0,1])
problem.addConstraint(MinSumConstraint(1))
benchmark('Native', count_models_natively, problem, None)
print('\n## AND formula ##')
problem = Problem()
problem.addVariables(x, domain = [0,1])
problem.addConstraint(ExactSumConstraint(len(x)))
benchmark('Native', count_models_natively, problem, None)
benchmark('Solver-based', count_models_with_solver, problem, x)
# -
# ## Dummy Arithmetic Enumerator
#
# To check how fast the enumeration loop is in general (without the solver overhead for conditional checking or simplification), we define some methods which each check one particular logical formula in an arithmetic way.
# This is based on the observation that `AND` could be replaced with `min() == 1`, `OR` with `max() == 1` and `NOT(x)` with `1 - x`.
# +
import itertools
import numpy as np
def count_models_by_enumeration_or(numVariables, dummyParam):
solutions = 0
for assignment in itertools.product([False, True], repeat=numVariables): # all combinations
if max(assignment) == 1:
solutions = solutions + 1
return solutions
# Slower solution using vector operations: create assignment matrix
# (rows are assignments, columns are variables), evaluate row-wise
# and aggregate result
def count_models_by_enumeration_or2(numVariables, dummyParam):
return np.array(list(itertools.product([False, True], repeat=numVariables))).max(axis=1).sum()
# Slower solution with manual int-to-binary conversion
# (using numpy array instead of list is even slower)
def count_models_by_enumeration_or3(numVariables, dummyParam):
solutions = 0
for i in range(2 ** numVariables):
remaining_value = i
for varIdx in range(numVariables):
if remaining_value % 2 == 1:
solutions = solutions + 1
break # early ababdoning: if one variable true, then OR is true
remaining_value = remaining_value // 2
return solutions
def count_models_by_enumeration_and(numVariables, dummyParam):
solutions = 0
for assignment in itertools.product([False, True], repeat=numVariables): # all combinations
if min(assignment) == 1:
solutions = solutions + 1
return solutions
# +
print('## OR formula ##')
benchmark('Enumeration-based (10 variables)', count_models_by_enumeration_or, 10, None)
benchmark('Enumeration-based (20 variables)', count_models_by_enumeration_or, 20, None)
print('## AND formula ##')
benchmark('Enumeration-based (10 variables)', count_models_by_enumeration_and, 10, None)
benchmark('Enumeration-based (20 variables)', count_models_by_enumeration_and, 20, None)
# -
# This iterative arithmetic approach is comparatively fast, but for each 10 new variables, the processing time will still increase $2^{10} = 1024$ times.
# We can't prevent exponential growth ...
# An alternative approach which uses vectorized evaluation of assignments still has to create huge assignment matrices.
# This takes time and also consumes a lot of memory.
# Plus, it only moves the starting point, the growth still happens ...
# ## Flexible Enumerator
#
# The dummy enumerator above does not allow to build own logical expressions, it is tailored to hard-coded formulas.
# Actually, is is not hard to create a configurable enumerator, which we do now.
# First, we define several classes which allow to formulate nested constraints.
# Next, we adapt our enumeration method as already used above.
# +
# "interface" / super-class
class Expression:
def is_true(self):
pass # method not implemented here, but in each sub-class
class Variable(Expression):
def __init__(self):
self.value = False
def is_true(self):
return self.value
class Not(Expression):
def __init__ (self, expression):
self.__expression = expression
def is_true(self):
return not self.__expression.is_true()
class And(Expression):
def __init__(self, expressions):
self.__expressions = expressions
def is_true(self):
for expression in self.__expressions:
if not expression.is_true():
return False
return True
class Or(Expression):
def __init__(self, expressions):
self.__expressions = expressions
def is_true(self):
for expression in self.__expressions:
if expression.is_true():
return True
return False
import itertools
class Problem:
def __init__(self, variables):
self.__variables = variables
self.__constraints = [] # several constraints allowed, will be combined by AND
# Add an Expression as constraint
def add_constraint(self, constraint):
self.__constraints.append(constraint)
def count_models_by_enumeration(self):
solutions = 0
for assignment in itertools.product([False, True], repeat=len(self.__variables)):
# Assign
for i in range(len(assignment)):
self.__variables[i].value = assignment[i]
# Check SAT
satisfied = True
for constraint in self.__constraints:
if not constraint.is_true():
satisfied = False
break
solutions = solutions + satisfied
return solutions
def count_model_dispatch(problem, dummyParam):
return problem.count_models_by_enumeration()
# +
print('## OR formula ##')
x = [Variable() for i in range(10)]
problem = Problem(variables=x)
problem.add_constraint(Or(x))
benchmark('Enumeration-based (10 variables)', count_model_dispatch, problem, None)
x = [Variable() for i in range(20)]
problem = Problem(variables=x)
problem.add_constraint(Or(x))
benchmark('Enumeration-based (20 variables)', count_model_dispatch, problem, None)
print('## AND formula ##')
x = [Variable() for i in range(10)]
problem = Problem(variables=x)
problem.add_constraint(And(x))
benchmark('Enumeration-based (10 variables)', count_model_dispatch, problem, None)
x = [Variable() for i in range(20)]
problem = Problem(variables=x)
problem.add_constraint(And(x))
benchmark('Enumeration-based (20 variables)', count_model_dispatch, problem, None)
# -
# We see some overhead compared to the arithmetic enumerator, but are still faster than any complete enumeration based on a solver.
# We cannot beat iterative solving for formulas which only have a few models.
| Solver_Enumeration_Benchmark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, Lambda, MaxPool2D, BatchNormalization
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import RMSprop
from keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from keras.utils.np_utils import to_categorical
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import xml.etree.ElementTree as ET
import sklearn
import itertools
import cv2
import scipy
import os
import csv
import matplotlib.pyplot as plt
# %matplotlib inline
from tqdm import tqdm
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
import tensorflow as tf
tf.test.gpu_device_name()
class1 = {1:'NEUTROPHIL',2:'EOSINOPHIL',3:'MONOCYTE',4:'LYMPHOCYTE'}
class2 = {0:'Mononuclear',1:'Polynuclear'}
tree_path = 'datasets/dataset-master/Annotations'
image_path = 'datasets/dataset-master/JPEGImages'
#Sample image generation
image = cv2.imread(image_path+'/BloodImage_00002.jpg')
tree = ET.parse(tree_path+'/BloodImage_00002.xml')
try:
image.shape
print("checked for shape".format(image.shape))
except AttributeError:
print("shape not found")
for elem in tree.iter():
if 'object' in elem.tag or 'part' in elem.tag:
for attr in list(elem):
if 'name' in attr.tag:
name = attr.text
if 'bndbox' in attr.tag:
for dim in list(attr):
if 'xmin' in dim.tag:
xmin = (round(float(dim.text)))
if 'ymin' in dim.tag:
ymin = (round(float(dim.text)))
if 'xmax' in dim.tag:
xmax = (round(float(dim.text)))
if 'ymax' in dim.tag:
ymax = (round(float(dim.text)))
if name[0] == "R":
cv2.rectangle(image, (xmin, ymin),
(xmax, ymax), (0, 255, 0), 1)
cv2.putText(image, name, (xmin + 10, ymin + 15), cv2.FONT_HERSHEY_DUPLEX, 1e-3 * image.shape[0], (0, 255, 0), 1)
if name[0] == "W":
cv2.rectangle(image, (xmin, ymin),(xmax, ymax), (0, 0, 255), 1)
cv2.putText(image, name, (xmin + 10, ymin + 15),
cv2.FONT_HERSHEY_DUPLEX, 1e-3 * image.shape[0], (0, 0, 255), 1)
if name[0] == "P":
cv2.rectangle(image, (xmin, ymin),
(xmax, ymax), (255, 0, 0), 1)
cv2.putText(image, name, (xmin + 10, ymin + 15),cv2.FONT_HERSHEY_DUPLEX, 1e-3 * image.shape[0], (255, 0, 0), 1)
plt.figure(figsize=(20,20))
plt.imshow(image)
plt.show()
def gen_det_rec(classes, dataset_dir, ratio=1):
assert ratio <= 1 and ratio >= 0
tree_path = 'datasets/dataset-master/Annotations'
image_path = 'datasets/dataset-master/JPEGImages'
image_names = os.listdir(image_path)
image_names.sort()
tree_names = os.listdir(tree_path)
tree_names.sort()
file_number = len(img_names)
assert file_number == len(tree_names),"#labels != file_num"
# assert file_num==len(label_names)
df1 = pd.read_csv('datasets/dataset-master/labels.csv')
df1 = df1.drop(columns=['Unnamed: 0']).dropna()
df1
#reader = csv.reader(open('/dataset-master/labels.csv'))
# skip thev header
y3 = df1[~df1["Category"].str.contains(",", na=False)]['Category']
y3
encoder = LabelEncoder()
encoder.fit(y3)
encoded_y = encoder.transform(y3)
counts = np.bincount(encoded_y)
print(counts)
fig, ax = plt.subplots()
plt.bar(list(range(5)), counts)
ax.set_xticklabels(('', 'Basophil', 'Eosinophil', 'Lymphocyte', 'Monocyte', 'Neutrophil'))
ax.set_ylabel('Number of Cells')
# +
#Load data from folder
from tqdm import tqdm
def get_data(folder):
X = []
y = []
z = []
for wbc_type in os.listdir(folder):
if not wbc_type.startswith('.'):
if wbc_type in ['NEUTROPHIL']:
label = 1
label2 = 1
elif wbc_type in ['EOSINOPHIL']:
label = 2
label2 = 1
elif wbc_type in ['MONOCYTE']:
label = 3
label2 = 0
elif wbc_type in ['LYMPHOCYTE']:
label = 4
label2 = 0
else:
label = 5
label2 = 0
for img_filename in tqdm(os.listdir(folder + wbc_type)):
img_file = cv2.imread(folder + wbc_type + '/' + img_filename)
if img_file is not None:
img_file = cv2.resize(img_file, dsize=(60,80), interpolation=cv2.INTER_CUBIC)
img_arr = np.asarray(img_file)
X.append(img_arr)
y.append(label)
z.append(label2)
X = np.asarray(X)
y = np.asarray(y)
z = np.asarray(z)
return X,y,z
X_train, y_train, z_train = get_data('datasets/dataset2-master/images/TRAIN/')
X_test, y_test, z_test = get_data('datasets/dataset2-master/images/TEST/')
# One Hot Encoding of vectors (ex : 2 -> [0,0,1,0,0,0,0,0,0,0])
y_trainHot = to_categorical(y_train, num_classes = 5)
y_testHot = to_categorical(y_test, num_classes = 5)
z_trainHot = to_categorical(z_train, num_classes = 2)
z_testHot = to_categorical(z_test, num_classes = 2)
print(class1)
print(class2)
# -
#Plot RGB pixel intensities
def plotHistogram(a):
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.imshow(a)
plt.axis('off')
histo = plt.subplot(1,2,2)
histo.set_ylabel('Count')
histo.set_xlabel('Pixel Intensity')
n_bins = 30
plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);
plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);
plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);
plotHistogram(X_train[1])
X_train=np.array(X_train)
X_train=X_train/255.0
X_test=np.array(X_test)
X_test=X_test/255.0
plotHistogram(X_train[1])
# +
# Functions for Plotting Learning Curves and Confusion Matrix
class MetricsCheckpoint(Callback):
# Callback that saves metrics after each epoch
def __init__(self, savepath):
super(MetricsCheckpoint, self).__init__()
self.savepath = savepath
self.history = {}
def on_epoch_end(self, epoch, logs=None):
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
np.save(self.savepath, self.history)
def plotKerasLearningCurve():
plt.figure(figsize=(10,5))
metrics = np.load('logs.npy')[()]
filt = ['acc'] # try to add 'loss' to see the loss learning curve
for k in filter(lambda x : np.any([kk in x for kk in filt]), metrics.keys()):
l = np.array(metrics[k])
plt.plot(l, c= 'r' if 'val' not in k else 'b', label='val' if 'val' in k else 'train')
x = np.argmin(l) if 'loss' in k else np.argmax(l)
y = l[x]
plt.scatter(x,y, lw=0, alpha=0.25, s=100, c='r' if 'val' not in k else 'b')
plt.text(x, y, '{} = {:.4f}'.format(x,y), size='15', color= 'r' if 'val' not in k else 'b')
plt.legend(loc=4)
plt.axis([0, None, None, None]);
plt.grid()
plt.xlabel('Number of epochs')
plt.ylabel('Accuracy')
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.figure(figsize = (5,5))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def plotLearningCurve(history):
plt.figure(figsize=(8,8))
plt.subplot(1,2,1)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./analysis.png')
#plt.clf()
# summarize history for loss
plt.subplot(1,2,2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./loss_curve.png')
# -
def runKerasCNNAugment(a,b,c,d,e,epochs):
batch_size = 128
num_classes = len(b[0])
# img_rows, img_cols = a.shape[1],a.shape[2]
img_rows,img_cols=60,80
input_shape = (img_rows, img_cols, 3)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape,strides=e))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
augmented_images = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=5, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
history = model.fit_generator(augmented_images.flow(a,b, batch_size=42),
steps_per_epoch=len(a) / 42, epochs=epochs, validation_data = [c, d],callbacks = [MetricsCheckpoint('logs')])
score = model.evaluate(c,d, verbose=0)
print('\nKeras CNN #1C - accuracy:', score[1],'\n')
y_pred = model.predict(c)
map_characters = dict_characters
print('\n', sklearn.metrics.classification_report(np.where(d > 0)[1], np.argmax(y_pred, axis=1), target_names=list(map_characters.values())), sep='')
Y_pred_classes = np.argmax(y_pred,axis=1)
Y_true = np.argmax(d,axis=1)
plotKerasLearningCurve()
plt.show()
plotLearningCurve(history)
plt.show()
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
plot_confusion_matrix(confusion_mtx, classes = list(dict_characters.values()))
plt.show()
runKerasCNNAugment(X_train,y_trainHot,X_test,y_testHot,1)
| archives/phase1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spampinato mice retina mea252ch pair recording - 1
#
# ## Part 1) Cleaning the ground-truth data
#
# This set of notebooks the dataset is from paired juxtacellular/extracellular recordings from mice retina _in vitro_.
# The MEA has 252 channels.
#
# The official publication of this open dataset can be found at the following address:
# https://zenodo.org/record/1205233#.W9mq1HWLTIF
#
# These datasets were used by <NAME> al in the following "spyking circus" paper:
# https://elifesciences.org/articles/34518
#
#
# After inspecting the juxta-cellular data, we found that some recordings don't have a good enough quality to be considered as "ground truth". To be "ground truth", a unit is required to be stable in the detection, peak signal-to-noise ratio (SNR) and amplitude.
#
# At the end of our quality assessment, some files are removed for the main study shown in [**"spampinato-mice-retina-mea252ch-pair-recording-part2"**](https://spikeinterface.github.io/blog/spampinato-mice-retina-mea252ch-pair-recording-part2/).
#
#
# ### Quality assessment details
#
# First, we have to run the script `detect_ground_truth_spike_on_juxta.py`.
#
# This script:
# * unzips the downloaded data
# * runs a juxta cellular detection
# * generates figure to manually check juxtacellular quality
# * computes the peak SNR on the max channel of the MEA.
#
# Before running the script, we need:
# * to create a folder **basedir**
# * to create a subfolder **basedir/original_files** that contain all zip downloded (20160415_patch2.tar.gz, ...)
#
# Then we can run the script `detect_ground_truth_spike_on_juxta.py`
#
# After we can:
# * inscpect in each folder explanatory figures.
#
#
# Author: [<NAME>](https://github.com/samuelgarcia), CRNL, Lyon
# # Criterium to keep or remove a file
#
# Having a very reliable ground truth is crucial, as all the following spike sorting performance metrics are designed on the hypothesis the ground truth is **indeed ground truth**.
#
# In the following script we choose a high threshold value for peak detection: **thresh = med + 8\*mad**, where:
# * **med** is the median of the signal (the baseline),
# * **mad** is the median absolut deviation (a robust std estimation),
# * 8 is a quite high relative threshold that ensures the absence of false positive.
#
#
# Two main criteria were used to keep a recording:
# * the distribution of the peak values of the juxtacelullar action potentials must have a Gaussian distribution:
# * a truncated Gaussian suggests that false negative (misses) corrupt the "ground truth",
# * a multi-modal distribution suggests either that an amplitude drift occured or that two (or more) cells were present.
#
#
# # List of accepted recording (8)
#
#
# ```python
# '20160415_patch2',
# '20170803_patch1',
# '20160426_patch3',
# '20170725_patch1',
# '20170621_patch1',
# '20160426_patch2',
# '20170728_patch2',
# '20170713_patch1',
#
# ```
#
# # List of rejected recording (11)
#
# ```python
# '20170706_patch2'
# '20170629_patch2'
# '20170622_patch2'
# '20170726_patch1'
# '20170706_patch1'
# '20170706_patch3'
# '20170627_patch1'
# '20170630_patch1'
# '20170629_patch3'
# '20170623_patch1'
# '20170622_patch1'
#
# ```
#
# (Some reader may think that we are too strict, but we prefer to be strict to ensure safe final results.
# Feel free to modify this list as you prefer using your own criteria.)
# +
import matplotlib
import os, shutil
import zipfile, tarfile
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# path
basedir = '/media/samuel/dataspikesorting/DataSpikeSortingHD2/Pierre/zenodo/'
recording_folder = basedir + 'original_files/'
ground_truth_folder = basedir + 'ground_truth/'
# %matplotlib notebook
# -
# # Step 1 : Re-detect properly juxtacellular peaks
# this tridesclous utils are imported only for juxta detection to keep this script simple
from tridesclous.peakdetector import detect_peaks_in_chunk
from tridesclous.tools import median_mad
from tridesclous.waveformtools import extract_chunks
rec_names = ['20170629_patch3', '20170728_patch2', '20170630_patch1', '20160426_patch2', '20170621_patch1',
'20170627_patch1', '20170706_patch3', '20170706_patch1', '20170726_patch1', '20170725_patch1',
'20160426_patch3', '20170622_patch1', '20170623_patch1', '20170622_patch2', '20170629_patch2',
'20170713_patch1', '20160415_patch2', '20170706_patch2', '20170803_patch1']
# ## Unzip all
# this unzip all files into recording_folder
for rec_name in rec_names:
filename = recording_folder + rec_name + '.tar.gz'
if os.path.exists(recording_folder+rec_name) and os.path.isdir(recording_folder+rec_name):
continue
print('unzip', rec_name)
t = tarfile.open(filename, mode='r|gz')
t.extractall(recording_folder+rec_name)
# ## Detect ground-truth spikes on juxta
# +
if not os.path.exists(ground_truth_folder):
os.mkdir(ground_truth_folder)
gt_info = pd.DataFrame(index=rec_names)
for rec_name in rec_names:
print('detect_juxta: ', rec_name)
# get juxta signal
dirname = recording_folder + rec_name + '/'
for f in os.listdir(dirname):
if f.endswith('juxta.raw'):
juxta_filename = dirname + f
juxta_sig = np.memmap(juxta_filename, dtype='float32')
# get mea signals
for f in os.listdir(dirname):
if f.endswith('.raw') and not f.endswith('juxta.raw'):
mea_filename = dirname + f
with open(mea_filename.replace('.raw', '.txt'), mode='r') as f:
offset = int(re.findall('padding = (\d+)', f.read())[0])
mea_sigs = np.memmap(mea_filename, dtype='uint16', offset=offset).reshape(-1, 256)
print(1)
# select only the 252 mea channel (see PRB file)
mea_sigs = mea_sigs[:, list(range(126)) + list(range(128,254))]
print(2)
gt_folder = ground_truth_folder + rec_name + '/'
os.mkdir(gt_folder)
# detect spikes
med, mad = median_mad(juxta_sig)
print(3)
thresh = med + 8*mad
gt_indexes = detect_peaks_in_chunk(juxta_sig[:, None], k=10,thresh=thresh, peak_sign='-')
gt_indexes = gt_indexes.astype('int64')
gt_indexes.tofile(gt_folder+'juxta_peak_indexes.raw')
print(4)
# save some figures to for visual cheking
sr = 20000.
times = np.arange(juxta_sig.size) / sr
fig, ax = plt.subplots()
ax.plot(times, juxta_sig)
ax.plot(times[gt_indexes], juxta_sig[gt_indexes], ls='None', color='r', marker='o')
ax.set_xlim(0, 10)
ax.axhline(-thresh, color='k', ls='--')
ax.set_title('juxta detection - ' + rec_name)
fig.savefig(gt_folder+'juxta detection.png')
fig, ax = plt.subplots()
count, bins = np.histogram(juxta_sig[gt_indexes], bins=np.arange(np.min(juxta_sig[gt_indexes]), 0, 0.5))
ax.plot(bins[:-1], count)
ax.axvline(-thresh, color='k', ls='--')
ax.set_title('juxta peak amplitude - ' + rec_name)
fig.savefig(gt_folder+'juxta peak amplitude.png')
print(5)
# extract waveforms with only 150 peaks to minimize RAM
n_left, n_right = -45, 60
some_gt_indexes = np.random.choice(gt_indexes, size=150)
waveforms = extract_chunks(mea_sigs, some_gt_indexes+n_left, n_right-n_left)
wf_median, wf_mad = median_mad(waveforms, axis=0)
print(6)
# get on wich channel the max is and the value
max_on_channel = np.argmin(np.min(wf_median, axis=0), axis=0)
# get the MAD (robust STD) on the mea signal
# this estimate the SNR
mea_median, mea_mad = median_mad(mea_sigs[:, max_on_channel] , axis=0)
baseline = mea_median
print(7)
peak_value = np.min(wf_median[:, max_on_channel])
peak_value = peak_value- baseline
peak_snr = np.abs(peak_value/mea_mad)
# evrything in Dataframe
gt_info.at[rec_name, 'nb_spike'] = gt_indexes.size
gt_info.at[rec_name, 'max_on_channel'] = max_on_channel
gt_info.at[rec_name, 'peak_value'] = peak_value
gt_info.at[rec_name, 'peak_snr'] = peak_snr
gt_info.at[rec_name, 'noise_mad'] = mea_mad
fig, ax = plt.subplots()
ax.plot(wf_median.T.flatten())
fig.savefig(gt_folder+'GT waveforms flatten.png')
fig, ax = plt.subplots()
ax.plot(wf_median)
ax.axvline(-n_left)
fig.savefig(gt_folder+'GT waveforms.png')
print(8)
gt_info.to_excel(ground_truth_folder+'gt_info.xlsx')
# -
# # Step2 : Check juxtacellular quality
# +
# 2 simple functions
def get_juxta_filename(rec_name):
# find the juxta file
dirname = recording_folder + rec_name + '/'
for f in os.listdir(dirname):
if f.endswith('juxta.raw'):
juxta_filename = dirname + f
return juxta_filename
def plot_juxta_amplitude(rec_name):
juxta_filename = get_juxta_filename(rec_name)
juxta_sig = np.memmap(juxta_filename, dtype='float32')
med = np.median(juxta_sig)
mad = np.median(np.abs(juxta_sig-med))*1.4826
thresh = med + 8*mad
gt_indexes = ground_truth_folder + 'juxta_peak_indexes.raw'
gt_indexes = np.fromfile(ground_truth_folder + rec_name + '/juxta_peak_indexes.raw', dtype='int64')
gt_amplitudes = juxta_sig[gt_indexes]
fig, axs = plt.subplots(nrows=2)
count, bins = np.histogram(gt_amplitudes, bins=np.arange(np.min(juxta_sig[gt_indexes]), 0, 0.5))
ax = axs[0]
ax.plot(bins[:-1], count)
ax.axvline(-thresh, color='r', ls='--')
ax.axvline(med, color='k', ls='-')
for i in range(1,6):
ax.axvspan(med - i * mad, med + i * mad, color='k', alpha=0.05)
fig.suptitle('juxta peak amplitude - ' + rec_name)
ax = axs[1]
ax.plot(gt_indexes, gt_amplitudes, ls='None', marker='o')
ax.axhline(-thresh, color='r', ls='--')
for i in range(1,6):
ax.axhspan(med - i * mad, med + i * mad, color='k', alpha=0.05)
# -
# # Why some recordings are are not kept?
#
#
# In the following figures:
# * the black vertical line is the baseline (median) of juxta-cellular trace,
# * the grey areas represent 1, 2, 3, 4, 5 MAD (robust STD),
# * the red line is the detection threshold.
# ## Figure for 20170706_patch2
#
# For this cell too few events are detected.
plot_juxta_amplitude('20170706_patch2')
# ## Figure for 20170629_patch2
#
# Here the peak amplitude distribution crosses the detection threhold.
# Missed events are obvious in the middle part of recording.
plot_juxta_amplitude('20170629_patch2')
# ## Figure for 20170622_patch2
#
# Here the peak amplitude distribution crosses the detection threhold and too few events got detected.
plot_juxta_amplitude('20170622_patch2')
# ## Figure for 20170726_patch1
#
# Here again the peak amplitude distribution crosses the detection threhold.
# Some spikes are clearly missed at the begnning.
plot_juxta_amplitude('20170726_patch1')
# ## Figure for 20170706_patch1
#
# Obvious missing spikes.
plot_juxta_amplitude('20170706_patch1')
# ## Figure for 20170706_patch3
#
# Obvious missing spikes.
plot_juxta_amplitude('20170706_patch3')
# ## Figure for 20170627_patch1
#
# Obvious missing spikes
plot_juxta_amplitude('20170627_patch1')
# ## Figure for 20170630_patch1
#
# Suspicion of missing spikes at the beggining and at the end of recording.
plot_juxta_amplitude('20170630_patch1')
# ## Figure for 20170629_patch3
#
# Obvious missing spikes.
plot_juxta_amplitude('20170629_patch3')
# ## Figure for 20170623_patch1 : NO
#
# Here the amplitude distribution right tail is too close to the detection threhold and there is a suspicion of missed spikes.
plot_juxta_amplitude('20170623_patch1')
# # List of clean ground truth
# ## Figure for 20170713_patch1 : OK, but boundary
#
# We see here two clear peaks in the distribution suggesting that there could be an electrode movement.
plot_juxta_amplitude('20170713_patch1')
# ## Figure for 20160415_patch2 : OK
#
# A ground truth unit we can trust!!
plot_juxta_amplitude('20160415_patch2')
# ## Figure for 20170803_patch1 : OK
plot_juxta_amplitude('20170803_patch1')
# ## Figure for 20160426_patch3 : OK
#
# A ground truth we can trust!!
plot_juxta_amplitude('20160426_patch3')
# ## Figure for 20170725_patch1 : OK but bundary
#
# A ground truth we can trust, but there is a suspicious change in amplitude in the middle of recording.
plot_juxta_amplitude('20170725_patch1')
# ## Figure for 20170621_patch1 : OK
plot_juxta_amplitude('20170621_patch1')
# ## Figure for 20160426_patch2 : OK
plot_juxta_amplitude('20160426_patch2')
# ## Figure for 20170728_patch2 : OK
#
# Ok but some movement at the end...
plot_juxta_amplitude('20170728_patch2')
# # Conclusion
#
# * 11 out of 19 files have been removed for further ground-truth analysis.
# * 8 out of 19 files are kept for ground-truth analysis.
#
# For paired recording ground truth, the ground truth itself has to be carefully verified.
#
# The original ground spike index provided on 19 files by the authors are not trustable for a fair spike sorting comparison.
| posts/spampinato-mice-retina-mea252ch-pair-recording-part1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import string
import os
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import cv2
from PIL import Image
from scipy import misc
from skimage import io
data_path = "C:/Users/varun/Downloads/samples"
labels = []
for img_filename in os.listdir(data_path):
if img_filename.endswith(".png") or img_filename.endswith(".jpg"):
label = img_filename#.split('.')[0]
labels.append(label)
n_samples=len(labels)
labels
cropped_images=[]
cropped_images_labels=[]
for i in range(n_samples):
#image=cv2.imread('samples/'+str(labels[i]))
image=io.imread('samples/'+str(labels[i]), as_gray=True)
crop_image_1=image[0:50, 30:50]
crop_image_2=image[0:50, 50:70]
crop_image_3=image[0:50, 70:90]
crop_image_4=image[0:50, 95:115]
crop_image_5=image[0:50, 115:135]
crop_image_1_label=labels[i][0]
crop_image_2_label=labels[i][1]
crop_image_3_label=labels[i][2]
crop_image_4_label=labels[i][3]
crop_image_5_label=labels[i][4]
cropped_images.append(crop_image_1)
cropped_images.append(crop_image_2)
cropped_images.append(crop_image_3)
cropped_images.append(crop_image_4)
cropped_images.append(crop_image_5)
cropped_images_labels.append(crop_image_1_label)
cropped_images_labels.append(crop_image_2_label)
cropped_images_labels.append(crop_image_3_label)
cropped_images_labels.append(crop_image_4_label)
cropped_images_labels.append(crop_image_5_label)
len(cropped_images)
len(cropped_images_labels)
print(cropped_images_labels[10],cropped_images_labels[11],cropped_images_labels[12],cropped_images_labels[13],cropped_images_labels[14])
plt.imshow(cropped_images[10])
plt.imshow(cropped_images[11])
plt.imshow(cropped_images[12])
plt.imshow(cropped_images[13])
plt.imshow(cropped_images[14])
set(cropped_images_labels)
for image in cropped_images:
image=image/255.0
image.size
image.shape
label_characters=string.ascii_lowercase+str(string.digits)
len(label_characters)
cropped_images_tensor=tf.convert_to_tensor(cropped_images)
cropped_images_tensor.shape
inp_shape=(50,20,1)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=inp_shape),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
#tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(36, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
for n, i in enumerate(cropped_images_labels):
if i == '2':
cropped_images_labels[n] = 2
if i == '3':
cropped_images_labels[n] = 3
if i == '4':
cropped_images_labels[n] = 4
if i == '5':
cropped_images_labels[n] = 5
if i == '6':
cropped_images_labels[n] = 6
if i == '7':
cropped_images_labels[n] = 7
if i == '8':
cropped_images_labels[n] = 8
if i == 'b':
cropped_images_labels[n] = 10
if i == 'c':
cropped_images_labels[n] = 11
if i == 'd':
cropped_images_labels[n] = 12
if i == 'e':
cropped_images_labels[n] = 13
if i == 'f':
cropped_images_labels[n] = 14
if i == 'g':
cropped_images_labels[n] = 15
if i == 'm':
cropped_images_labels[n] = 16
if i == 'n':
cropped_images_labels[n] = 17
if i == 'p':
cropped_images_labels[n] = 18
if i == 'w':
cropped_images_labels[n] = 19
if i == 'x':
cropped_images_labels[n] = 0
if i == 'y':
cropped_images_labels[n] = 1
cropped_images_labels=np.array(cropped_images_labels)
cropped_images_tensor=np.array(cropped_images_tensor)
cropped_images_tensor=cropped_images_tensor.reshape((5325,50,20,1))
cropped_images_tensor.shape
history = model.fit(cropped_images_tensor, cropped_images_labels, epochs=30, batch_size=32)
test_image_1='test1.png'
test_image_1=io.imread((test_image_1))
plt.imshow(test_image_1)
test_image_2='test2.png'
test_image_2=io.imread((test_image_2))
plt.imshow(test_image_2)
test_image_3='test3.png'
test_image_3=io.imread((test_image_3))
plt.imshow(test_image_3)
test_image_4='test4.png'
test_image_4=io.imread((test_image_4))
plt.imshow(test_image_4)
test_image_5='test5.png'
test_image_5=io.imread((test_image_5))
plt.imshow(test_image_5)
test_image_1='test1.png'
test_image_2='test2.png'
test_image_3='test3.png'
test_image_4='test4.png'
test_image_5='test5.png'
test_images=[test_image_1,test_image_2,test_image_3,test_image_4,test_image_5]
cropped_test_images=[]
for i in test_images:
image=io.imread((i), as_gray=True)
crop_test_image_1=image[0:50, 30:50]
crop_test_image_2=image[0:50, 50:70]
crop_test_image_3=image[0:50, 70:90]
crop_test_image_4=image[0:50, 95:115]
crop_test_image_5=image[0:50, 115:135]
cropped_test_images.append(crop_test_image_1)
cropped_test_images.append(crop_test_image_2)
cropped_test_images.append(crop_test_image_3)
cropped_test_images.append(crop_test_image_4)
cropped_test_images.append(crop_test_image_5)
cropped_test_images_tensor=tf.convert_to_tensor(cropped_test_images)
cropped_test_images_tensor=np.array(cropped_test_images_tensor)
cropped_test_images_tensor=cropped_test_images_tensor.reshape((25, 50, 20,1))
yhat=model.predict(cropped_test_images_tensor)
yhat.shape
d=[]
for i in yhat:
d.append(np.argmax(i))
e=[]
for i in d:
if i == 2:
e.append('2')
if i == 3:
e.append('3')
if i == 4:
e.append('4')
if i == 5:
e.append('5')
if i == 6:
e.append('6')
if i == 7:
e.append('7')
if i == 8:
e.append('8')
if i == 10:
e.append('b')
if i == 11:
e.append('c')
if i == 12:
e.append('d')
if i == 13:
e.append('e')
if i == 14:
e.append('f')
if i == 15:
e.append('g')
if i == 16:
e.append('m')
if i == 17:
e.append('n')
if i == 18:
e.append('p')
if i == 19:
e.append('w')
if i == 0:
e.append('x')
if i == 1:
e.append('y')
s=''
a=s.join(e)
test_label_1=a[:5]
test_label_2=a[5:10]
test_label_3=a[10:15]
test_label_4=a[15:20]
test_label_5=a[20:25]
print('The captcha text is')
print('Test Label 1:',test_label_1)
print('Test Label 2:',test_label_2)
print('Test Label 3:',test_label_3)
print('Test Label 4:',test_label_4)
print('Test Label 5:',test_label_5)
| ocr_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
import logging
from IPython.display import clear_output
logging.basicConfig(level=logging.INFO)
modulepath = os.path.abspath(os.path.join(os.getcwd(), '../'))
sys.path.insert(0, modulepath)
from interact import System
# -
# # Working with a contact DataFrame
#
# ## 1.0 Creating a contact DataFrame
# +
# Loading a single RCSB Protein Data Bank (pdb) file from disk
pdb = os.path.join(modulepath, 'tests/files/dnmt.pdb')
mol2 = os.path.join(modulepath, 'tests/files/dnmt.mol2')
molsys = System(pdb, mol2file=mol2)
for frame, fn in molsys.iter_frames(auto_chunk=False):
# Select everything but water
sel = frame[frame['resName'] != 'HOH']
# Build pairwise-distance matrix
sel.distances()
dt412 = sel[sel['resSeq'] == 412].covalent_bonds()
da423 = sel[sel['resSeq'] == 423].covalent_bonds()
# Get donors
don412 = dt412[((dt412[('source', 'element')].isin(('N', 'O'))) & (dt412[('target', 'element')] == 'H')) |
((dt412[('target', 'element')].isin(('N', 'O'))) & (dt412[('source', 'element')] == 'H'))]
don423 = da423[((da423[('source', 'element')].isin(('N', 'O'))) & (da423[('target', 'element')] == 'H')) |
((da423[('target', 'element')].isin(('N', 'O'))) & (da423[('source', 'element')] == 'H'))]
print(don423)
# -
| examples/basic2_contact_frames.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: TensorFlow 2.3 on Python 3.6 (CUDA 10.1)
# language: python
# name: python3
# ---
# + [markdown] id="6cqSHc1dtswv"
# # GRU 감성 분류기
# + [markdown] id="nUriko_vtswz"
# 이 노트북에서 GRU를 사용해 감성에 따라 IMDB 영화 리뷰를 분류합니다.
# + [markdown] id="Dut515Mktswz"
# [](https://colab.research.google.com/github/rickiepark/dl-illustrated/blob/master/notebooks/11-8.gru_sentiment_classifier.ipynb)
# + [markdown] id="ytasvvFCtswz"
# #### 라이브러리 적재
# + id="kwYqJi-Dtsw0"
from tensorflow import keras
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Embedding, SpatialDropout1D
from tensorflow.keras.layers import GRU # new!
from tensorflow.keras.callbacks import ModelCheckpoint
import os
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
# %matplotlib inline
# + [markdown] id="uz2xTMfytsw0"
# #### 하이퍼파라미터 설정
# + id="uGerl956tsw0"
# 출력 디렉토리
output_dir = 'model_output/gru'
# 훈련
epochs = 4
batch_size = 128
# 벡터 공간 임베딩
n_dim = 64
n_unique_words = 10000
max_review_length = 100 # 시간에 따른 그레이디언트 소실 때문에 낮춤
pad_type = trunc_type = 'pre'
drop_embed = 0.2
# GRU 층 구조
n_gru = 256
drop_gru = 0.2
# 밀집 층 구조
# n_dense = 256
# dropout = 0.2
# + [markdown] id="QqXbhKmYtsw0"
# #### 데이터 적재
# + id="XvyIYYyNtsw1" colab={"base_uri": "https://localhost:8080/"} outputId="e4545d1e-c50b-4ac0-f097-3eacb5386713"
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words) # n_words_to_skip 삭제
# + [markdown] id="AZapBitHtsw1"
# #### 데이터 전처리
# + id="Z4xXXN87tsw1"
x_train = pad_sequences(x_train, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
# + [markdown] id="cxqcJFOktsw1"
# #### 신경망 만들기
# + id="1SFI8EQ9tsw1"
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(SpatialDropout1D(drop_embed))
model.add(GRU(n_gru, dropout=drop_gru))
# model.add(Dense(n_dense, activation='relu')) # 일반적으로 NLP에는 밀집 층을 위에 두지 않습니다.
# model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid'))
# + id="4i1-J1yMtsw1" colab={"base_uri": "https://localhost:8080/"} outputId="bf5a9fc4-2995-4f67-82bd-dd3c24d63523"
model.summary()
# + [markdown] id="5BO9NAEKtsw2"
# #### 모델 설정
# + id="W3sAYfGytsw2"
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# + id="NJgshp9stsw3"
modelcheckpoint = ModelCheckpoint(filepath=output_dir+"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# + [markdown] id="4jago2KLtsw3"
# #### 훈련!
# + id="nt5hekxNtsw3" colab={"base_uri": "https://localhost:8080/"} outputId="7c513a2b-ce27-4f31-b442-50bd07055317"
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_valid, y_valid), callbacks=[modelcheckpoint])
# + [markdown] id="ljdWoWJFtsw3"
# #### 평가
# + id="L-8dITmUtsw3"
model.load_weights(output_dir+"/weights.02.hdf5")
# + id="PqlFabHvtsw4"
y_hat = model.predict(x_valid)
# + id="OBHEVWKxtsw4" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="e1847405-7077-4a30-a9b5-7985c4e98804"
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
# + id="8ds8Pwpktsw4" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="4b0892ab-d139-4b16-ce1c-c1f2c3afa81f"
"{:0.2f}".format(roc_auc_score(y_valid, y_hat)*100.0)
| notebooks/11-8.gru_sentiment_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook was prepared by [<NAME>](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Solution Notebook
# ## Problem: Implement quick sort.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Pythonic-Code](#Pythonic-Code)
# * [Unit Test](#Unit-Test)
# ## Constraints
#
# * Is a naive solution sufficient (ie not in-place)?
# * Yes
# * Are duplicates allowed?
# * Yes
# * Can we assume the input is valid?
# * No
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# * None -> Exception
# * Empty input -> []
# * One element -> [element]
# * Two or more elements
# ## Algorithm
#
# Wikipedia's animation:
# 
#
# * Set pivot to the middle element in the data
# * For each element:
# * If current element is the pivot, continue
# * If the element is less than the pivot, add to left array
# * Else, add to right array
# * Recursively apply quicksort to the left array
# * Recursively apply quicksort to the right array
# * Merge the left array + pivot + right array
#
# Complexity:
# * Time: O(n log(n)) average, best, O(n^2) worst
# * Space: O(n)
#
# Misc:
#
# * More sophisticated implementations are in-place, although they still take up recursion depth space
# * Most implementations are not stable
#
# See [Quicksort on wikipedia](https://en.wikipedia.org/wiki/Quicksort):
#
# Typically, quicksort is significantly faster in practice than other Θ(nlogn) algorithms, because its inner loop can be efficiently implemented on most architectures [presumably because it has good cache locality], and in most real-world data, it is possible to make design choices which minimize the probability of requiring quadratic time.
#
# See: [Quicksort vs merge sort](http://stackoverflow.com/questions/70402/why-is-quicksort-better-than-mergesort)
# ## Code
# +
from __future__ import division
class QuickSort(object):
def sort(self, data):
if data is None:
raise TypeError('data cannot be None')
return self._sort(data)
def _sort(self, data):
if len(data) < 2:
return data
equal = []
left = []
right = []
pivot_index = len(data) // 2
pivot_value = data[pivot_index]
# Build the left and right partitions
for item in data:
if item == pivot_value:
equal.append(item)
elif item < pivot_value:
left.append(item)
else:
right.append(item)
# Recursively apply quick_sort
left_ = self._sort(left)
right_ = self._sort(right)
return left_ + equal + right_
# -
# ## Unit Test
#
#
# +
# %%writefile test_quick_sort.py
import unittest
class TestQuickSort(unittest.TestCase):
def test_quick_sort(self):
quick_sort = QuickSort()
print('None input')
self.assertRaises(TypeError, quick_sort.sort, None)
print('Empty input')
self.assertEqual(quick_sort.sort([]), [])
print('One element')
self.assertEqual(quick_sort.sort([5]), [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
self.assertEqual(quick_sort.sort(data), sorted(data))
print('Success: test_quick_sort\n')
def main():
test = TestQuickSort()
test.test_quick_sort()
if __name__ == '__main__':
main()
# -
# %run -i test_quick_sort.py
| sorting_searching/quick_sort/quick_sort_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:altair]
# language: python
# name: conda-env-altair-py
# ---
# ## ERDDAP Moonflower Daily Plots
# +
from erddapy import ERDDAP
import pandas as pd
import datetime
# for secondary/derived parameters
from metpy.units import units
import metpy.calc as mpcalc
# +
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import cmocean
from matplotlib.dates import YearLocator, WeekdayLocator, MonthLocator, DayLocator, HourLocator, DateFormatter
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from matplotlib.ticker import NullFormatter # useful for `logit` scale
# -
### specify primary bulk figure parameters
fontsize = 20
labelsize = 16
#plotstyle = 'seaborn'
max_xticks = 10
plt.style.use('seaborn-ticks')
mpl.rcParams['svg.fonttype'] = 'none'
mpl.rcParams['ps.fonttype'] = 42 #truetype/type2 fonts instead of type3
mpl.rcParams['pdf.fonttype'] = 42 #truetype/type2 fonts instead of type3
mpl.rcParams['axes.grid'] = False
mpl.rcParams['axes.edgecolor'] = 'black'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.labelcolor'] = 'black'
mpl.rcParams['grid.linestyle'] = '--'
mpl.rcParams['grid.linestyle'] = '--'
mpl.rcParams['xtick.major.size'] = 4
mpl.rcParams['xtick.minor.size'] = 1
mpl.rcParams['xtick.major.width'] = 2
mpl.rcParams['xtick.minor.width'] = 1
mpl.rcParams['ytick.major.size'] = 4
mpl.rcParams['ytick.minor.size'] = 1
mpl.rcParams['ytick.major.width'] = 2
mpl.rcParams['ytick.minor.width'] = 1
mpl.rcParams['ytick.direction'] = 'out'
mpl.rcParams['xtick.direction'] = 'out'
mpl.rcParams['ytick.color'] = 'black'
mpl.rcParams['xtick.color'] = 'black'
server_url = 'http://raspberrypi.local:8080/erddap'
# +
alldatasets=['tempest_moonflower_wx',
'channel_1314759_thingspeak',
'channel_1027974_thingspeak',
'channel_1037066_thingspeak',
'channel_1047747_thingspeak',
'channel_843357_thingspeak',
'channel_rpi',
'MFPurpleAir_Primary_RT']
constraints = {
'time>=': (datetime.datetime.utcnow()-datetime.timedelta(days=21)).strftime('%Y-%m-%dT00:00:00Z'),
'time<=': datetime.datetime.utcnow().strftime('%Y-%m-%dT00:00:00Z'),
}
#>0.9.0
relative_constraints = {'time>': 'now-1days'}
df_all = {}
for dataset_id in alldatasets:
try:
d = ERDDAP(server=server_url,
protocol='tabledap',
response='csv'
)
d.dataset_id=dataset_id
d.constraints=constraints
d.relative_constraints=relative_constraints
print(f'url - {d.get_download_url()} + \'&time>now-1hour\'')
except:
print('Failed to generate url {}'.format(dataset_id))
try:
df_m = d.to_pandas(
index_col='time (UTC)',
parse_dates=True,
skiprows=(1,) # units information can be dropped.
)
df_m.sort_index(inplace=True)
df_m.columns = [x[1].split()[0] for x in enumerate(df_m.columns)]
except:
print(f"something failed in data download {dataset_id}")
pass
#stats are all utc driven - but we really want local daily values
if not dataset_id in ['channel_rpi']:
df_m=df_m.tz_convert('US/Pacific')
else:
pass
df_m=df_m.tz_convert('US/Pacific')
df_m=df_m.tz_convert(None).tz_localize(tz='US/Pacific')
df_all.update({dataset_id:df_m})
# calculations of various parameters... metpy?
# HDD/CDD, dewpointTemp
# +
import altair as alt
### own colormap
import palettable
alt.data_transformers.disable_max_rows()
# -
df_all['MFPurpleAir_Primary_RT']['Temperature_C'] =( df_all['MFPurpleAir_Primary_RT']['Temperature_F']- 32)*5/9
dfsub=None
dfsub = pd.concat([df_all['tempest_moonflower_wx'].rename(columns={'temperature': 'Tempest_Temperature', 'humidity': 'Tempest_Humidity'}),
df_all['channel_1314759_thingspeak'].rename(columns={'temperature': 'GreenHouseTemp', 'RH_Percent': 'GreenHouseHumidity'}),
df_all['channel_1027974_thingspeak'].rename(columns={'temperature': 'TysonsRoomTemp'}),
df_all['channel_843357_thingspeak'].rename(columns={'Barotemperature': 'ShopTemp'}),
df_all['channel_1037066_thingspeak'].rename(columns={'temperature': 'CellarTemp', 'RH_Percent': 'CellarHumidity'}),
df_all['channel_1047747_thingspeak'].rename(columns={'temperature_internal': 'DuckBarnTemp_Internal',
'temperature_external': 'DuckBarnTemp_External',
'RH_Percent': 'Duckbarn_Humidity'}),
df_all['channel_rpi'].rename(columns={'temperature': 'OfficeRoomTemp', 'humidity': 'OfficeRoom_Humidity'}),
#df_all['MFPurpleAir_Primary_RT'].rename(columns={'Temperature_C': 'PA_Temp_C', 'Humidity': 'PA_Humidity'})
]).resample('15T').mean()
# +
selector = alt.selection_single(
fields=['key'],
empty='all',
bind='legend'
)
area1 = alt.Chart(dfsub.reset_index()).transform_fold(
['DuckBarnTemp_Internal','DuckBarnTemp_External','TysonsRoomTemp','duckTd','ShopTemp',
'CellarTemp','OfficeRoomTemp','Tempest_Temperature','PA_Temp_C','GreenHouseTemp']
).mark_line(clip=True
).encode(
alt.X('time (UTC):T'),
alt.Y('value:Q'),
alt.Color('key:N'),
opacity=alt.condition(selector, alt.value(1), alt.value(0))
).add_selection(
selector
).properties(
width=750,
height=150
).interactive()
area1
# + jupyter={"source_hidden": true}
f1 = alt.Chart(dfsub.reset_index()).mark_rect().encode(
alt.X('hoursminutes(time (UTC)):O', title='hour of day - inside duck barn'),
alt.Y('monthdate(time (UTC)):O', title='date'),
alt.Color('DuckBarnTemp_Internal:Q', title='temperature (C)', scale=alt.Scale(range=palettable.cmocean.sequential.Thermal_20.hex_colors)),
tooltip=['hoursminutes(time (UTC)):O','monthdate(time (UTC)):O','DuckBarnTemp_Internal:Q']
).properties(
width=900,
height=200
)
f2 = f1.encode(
alt.X('hoursminutes(time (UTC)):O', title='hour of day - outside duck barn'),
alt.Color('DuckBarnTemp_External:Q', title='temperature (C)', scale=alt.Scale(range=palettable.cmocean.sequential.Thermal_20.hex_colors)),
tooltip=['hoursminutes(time (UTC)):O','monthdate(time (UTC)):O','DuckBarnTemp_External:Q']
)
f3 = f1.encode(
alt.X('hoursminutes(time (UTC)):O', title='hour of day - Cellar'),
alt.Color('CellarTemp:Q', title='temperature (C)', scale=alt.Scale(range=palettable.cmocean.sequential.Thermal_20.hex_colors)),
tooltip=['hoursminutes(time (UTC)):O','monthdate(time (UTC)):O','CellarTemp:Q']
)
f4 = f1.encode(
alt.X('hoursminutes(time (UTC)):O', title='hour of day - Shop'),
alt.Color('ShopTemp:Q', title='temperature (C)', scale=alt.Scale(range=palettable.cmocean.sequential.Thermal_20.hex_colors)),
tooltip=['hoursminutes(time (UTC)):O','monthdate(time (UTC)):O','ShopTemp:Q']
)
f5 = f1.encode(
alt.X('hoursminutes(time (UTC)):O', title='hour of day - TysonsRoom'),
alt.Color('TysonsRoomTemp:Q', title='temperature (C)', scale=alt.Scale(range=palettable.cmocean.sequential.Thermal_20.hex_colors)),
tooltip=['hoursminutes(time (UTC)):O','monthdate(time (UTC)):O','TysonsRoomTemp:Q']
)
f6 = f1.encode(
alt.X('hoursminutes(time (UTC)):O', title='hour of day - Tempest Temp'),
alt.Color('Tempest_Temperature:Q', title='temperature (C)', scale=alt.Scale(range=palettable.cmocean.sequential.Thermal_20.hex_colors)),
tooltip=['hoursminutes(time (UTC)):O','monthdate(time (UTC)):O','Tempest_Temperature:Q']
)
f7 = f1.encode(
alt.X('hoursminutes(time (UTC)):O', title='hour of day - Office Temp'),
alt.Color('OfficeRoomTemp:Q', title='temperature (C)', scale=alt.Scale(range=palettable.cmocean.sequential.Thermal_20.hex_colors)),
tooltip=['hoursminutes(time (UTC)):O','monthdate(time (UTC)):O','OfficeRoomTemp:Q']
)
(f1 & f2 & f3 & f4 & f5 & f6 & f7)
# +
selector = alt.selection_single(
fields=['key'],
empty='all',
bind='legend'
)
area1 = alt.Chart(dfsub.reset_index()).transform_fold(
['Duckbarn_Humidity','CellarHumidity','OfficeRoom_Humidity','Tempest_Humidity','PA_Humidity','GreenHouseHumidity']
).mark_line(clip=True
).encode(
alt.X('time (UTC):T'),
alt.Y('value:Q'),
alt.Color('key:N'),
opacity=alt.condition(selector, alt.value(1), alt.value(0))
).add_selection(
selector
).properties(
width=750,
height=150
).interactive()
area1
# +
f3 = alt.Chart(dfsub.reset_index()).mark_rect().encode(
alt.X('hoursminutes(time (UTC)):O', title='hour of day'),
alt.Y('monthdate(time (UTC)):O', title='date'),
alt.Color('Duckbarn_Humidity:Q', title='humidity (%)', scale=alt.Scale(domain=(50,100),range=palettable.cmocean.sequential.Algae_20.hex_colors)),
tooltip=['hoursminutes(time (UTC)):O','monthdate(time (UTC)):O','Duckbarn_Humidity:Q']
).properties(
width=900,
height=200
)
f4 = f3.encode(
alt.Color('CellarHumidity:Q', title='humidity (%)', scale=alt.Scale(domain=(50,100),range=palettable.cmocean.sequential.Algae_20.hex_colors)),
tooltip=['hoursminutes(time (UTC)):O','monthdate(time (UTC)):O','CellarHumidity:Q']
)
f5 = f3.encode(
alt.Color('OfficeRoom_Humidity:Q', title='humidity (%)', scale=alt.Scale(domain=(50,100),range=palettable.cmocean.sequential.Algae_20.hex_colors)),
tooltip=['hoursminutes(time (UTC)):O','monthdate(time (UTC)):O','OfficeRoom_Humidity:Q']
)
f6 = f3.encode(
alt.Color('Tempest_Humidity:Q', title='humidity (%)', scale=alt.Scale(domain=(50,100),range=palettable.cmocean.sequential.Algae_20.hex_colors)),
tooltip=['hoursminutes(time (UTC)):O','monthdate(time (UTC)):O','Tempest_Humidity:Q']
)
f7 = f3.encode(
alt.Color('PA_Humidity:Q', title='humidity (%)', scale=alt.Scale(domain=(50,100),range=palettable.cmocean.sequential.Algae_20.hex_colors)),
tooltip=['hoursminutes(time (UTC)):O','monthdate(time (UTC)):O','PA_Humidity:Q']
)
f3 & f4 & f5 & f6 & f7
# -
# ## 1:1 Plots
#
# data at 5?15?30m intervals.
# Use Tempest as PRIMARY Baseline for outdoor instruments
| swbell/ThingSpeak_Wx/MoonFlowerPlots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Fitting a Bernoulli mixture model with known number of groups
# Learning algorithm: Maximum likelihood using Expectation Maximization (EM) algorithm
# +
import os, pickle
import numpy as np
from numpy import log, sum, exp, prod
from numpy.random import beta, binomial, dirichlet, uniform, gamma, seed, multinomial, gumbel, rand
from imp import reload
import matplotlib.pyplot as plt
from copy import deepcopy
os.chdir("C:\\Users\\Alexander\\Documents\\\Github\\bmm_mix")
from bernmix.utils import bmm_utils as bmm
# -
# Simulate data from data generating process:
# +
#seed(12)
N = 10**4 # number of observations
K = 3 # number of mixture components
D = 50 # dimensions / number of features
# True states of unknown parameters:
alphas = gamma(shape=5, size=K) # shape parameter
p_true = dirichlet(alpha = alphas, size = 1)[0] # mixture weights
theta_true = beta(a = .7, b = .9, size = K*D).reshape(D,K) # success probabilities for Bernoulli distributions
# +
X, Z = bmm.sample_bmm(N, p_true, theta_true) # Generate data from mixture model
print(Z.shape)
print(X.shape)
# -
# Set initial values for parameters:
# Run EM algorithm:
# +
D = X.shape[1]
alphas = gamma(shape=5, size=K) # shape parameters
p_0 = dirichlet(alpha = alphas, size = 1)[0]
#p_0 = np.array([1/K]*K) # flat prior
theta_0 = beta(a = .7, b = 1.1, size = K*D).reshape(D,K)
logli, p_em, theta_em = bmm.mixture_EM(X = X, p_0 = p_0, theta_0 = theta_0, n_iter = 200, stopcrit = 10**(-3))
# -
# Plot loglikelihood function:
# +
burn_in = 5 # leave out burn-in period for nicer plotting
plt.plot(logli[burn_in:], 'b--')
plt.title("Convergence check")
plt.xlabel('iterations')
plt.ylabel('loglike.')
plt.show()
# -
# Compare estimates with true parameters:
print(p_em)
print(p_true)
print(theta_em)
print(theta_true)
| .ipynb_checkpoints/EM_for_BMM-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## AmExpert 2021 – Machine Learning Hackathon
# + [markdown] heading_collapsed=true
# ### Problem Statement
# * A mid-sized private bank that includes a variety of banking products, such as savings accounts, current accounts, investment products, credit products, and home loans.
# * The task is to predict the next set of products (upto 3 products) for a set of customers (test data) based on their demographics and current product holdings.midm
# + [markdown] hidden=true
# **Project Flow**
# * Importing the data
# * EDA
# * Feature Engineering / Data Preparation
# * Splitting the data to train and test
# * Feature Scaling
# * Model Building
# * Model Evaluation
# * Prediction on Given Test Set
# -
# ### Importing the libraries
# +
# Required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from ast import literal_eval
# Data Preprocessing
from sklearn.preprocessing import LabelEncoder, MultiLabelBinarizer
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split, KFold
# Model building
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.layers import Dense
from keras.models import Sequential
from keras.callbacks import EarlyStopping
# Model Evaluation
from sklearn.metrics import multilabel_confusion_matrix, precision_score, average_precision_score
from sklearn.metrics import accuracy_score, f1_score, hamming_loss
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import classification_report
import warnings
warnings.filterwarnings("ignore")
# + [markdown] heading_collapsed=true
# ### Data Exploration
# + hidden=true
# Loading the train & test datasets
# convert Target Variables to list while loading the csv using literal_eval
train_df = pd.read_csv('train_go05W65.csv', converters={"Product_Holding_B1": literal_eval, "Product_Holding_B2": literal_eval})
test_df = pd.read_csv('test_VkM91FT.csv', converters={"Product_Holding_B1": literal_eval})
sample_submission = pd.read_csv('sample_submission_kF044ur.csv')
# + hidden=true
# Training set
train_df.head()
# + hidden=true
# Testing set
test_df.head()
# + hidden=true
train_df.info() # There are no null values in the dataset
# + hidden=true
train_df.describe() # There are no outliers as the mean & median are almost similar
# + hidden=true
# Analysis of categorical data
train_df.describe(include = 'object')
# + [markdown] heading_collapsed=true
# ### EDA
# + hidden=true
# Visualising the Gender Count
fig, ax = plt.subplots(1, 2, figsize=(10, 8))
sns.countplot(train_df['Gender'], ax=ax[0])
ax[1].pie(train_df['Gender'].value_counts(), labels=['Male', 'Female'], autopct="%.0f%%")
fig.show()
# + hidden=true
# Visualising the City_Category
fig, ax = plt.subplots(1,2, figsize = (10,8))
sns.countplot(train_df['City_Category'], ax=ax[0])
ax[1].pie(train_df['City_Category'].value_counts(), labels=['C1', 'C2'], autopct="%.0f%%")
fig.show()
# + hidden=true
# Visualising the Customer_Category
fig, ax = plt.subplots(1,2, figsize = (10,8))
sns.countplot(train_df['Customer_Category'], ax=ax[0])
ax[1].pie(train_df['Customer_Category'].value_counts(), labels=['S1', 'S2', 'S3'], autopct="%.0f%%")
fig.show()
# + hidden=true
# Visualising the Is_Active
fig, ax = plt.subplots(1,2, figsize = (10,8))
sns.countplot(train_df['Is_Active'], ax=ax[0])
ax[1].pie(train_df['Is_Active'].value_counts(), labels=['0','1'], autopct="%.0f%%")
fig.show()
# + hidden=true
# Plot the Age histogram
sns.histplot(data=train_df, x='Age', kde=True)
# + hidden=true
# Plot the Vintage histogram
sns.histplot(data=train_df, x='Vintage', kde=True)
plt.show()
# + hidden=true
# Group by Gender and plot the mean Age
gender_data = train_df.groupby('Gender').mean().reset_index()
sns.barplot(x=gender_data['Gender'], y=gender_data['Age'])
# + [markdown] hidden=true
# **Both the genders have equal distribution in mean Age.**
# + hidden=true
# Group by Gender and plot the mean Vintage
sns.barplot(x=gender_data['Gender'], y=gender_data['Vintage'])
# + [markdown] hidden=true
# **Both distributions have equal distribution of mean Vintage.**
# + hidden=true
# Insights across City Category
train_df.groupby('City_Category').mean()
# + [markdown] hidden=true
# **Almost equal distributions of Age, Vintage and Is_Active observed across both the cities.**
# + hidden=true
# Insights across Customer_Category
train_df.groupby('Customer_Category').mean()
# + [markdown] hidden=true
# **Almost equal distributions of Age, Vintage and Is_Active observed across both the Customer Cateogry.**
# + hidden=true
# Using Multi label Binarizer for converting Product_Holding_B1 column into numeric
mlb = MultiLabelBinarizer(sparse_output=False)
out_train = mlb.fit_transform(train_df['Product_Holding_B1'])
# + hidden=true
# Adding the converted columns to train dataset
train_df = train_df.join(pd.DataFrame(out_train, columns=mlb.classes_))
train_df.head()
# + hidden=true
# Products 13, 16 & 17 are bought more
plt.figure(figsize=(20,10))
sns.barplot(x=mlb.classes_, y=train_df[mlb.classes_].sum())
# + hidden=true
train_df.iloc[:, 9:].sum(axis=1).value_counts()
# + hidden=true
# We can see that combination of 1,2 & 3 products are bought more by the customers
plt.figure(figsize=(12, 5))
sns.barplot(x=[1, 2, 3, 4, 5, 6, 7, 8],y=train_df.iloc[:, 9:].sum(axis=1).value_counts())
# + hidden=true
# Target variable
# Converting Product_Holding_B2 column into numeric
mlb2 = MultiLabelBinarizer(sparse_output=False)
out_train_y = mlb2.fit_transform(train_df['Product_Holding_B2'])
# + hidden=true
# Creating different dataframe for target variables
target = pd.DataFrame(out_train_y, columns = mlb2.classes_)
target.head()
# + hidden=true
# Product 13, 16 & 17 are highly bought
plt.figure(figsize=(16,10))
sns.barplot(x=mlb2.classes_, y=target[mlb2.classes_].sum())
# + [markdown] heading_collapsed=true
# ### Feature engineering/Data Preparation
# + hidden=true
# Columns with object datatype
columns_cat = train_df.select_dtypes(include='object')
columns_cat.head()
# + hidden=true
# Converting categorical columns to numeric
train_df = pd.get_dummies(train_df, columns=['Gender', 'City_Category', 'Customer_Category'], drop_first=True)
# + hidden=true
# Removing columns which are not required
train_df = train_df.drop('Customer_ID', axis=1)
test_df = test_df.drop('Customer_ID', axis=1)
# + hidden=true
# Removing Product_Holding_B1 & Product_Holding_B2 columns
train_df = train_df.drop('Product_Holding_B1', axis=1)
train_df = train_df.drop('Product_Holding_B2', axis=1)
# + hidden=true
train_df.head()
# + hidden=true
test_df.head()
# + hidden=true
# Preparing test dataset
# Converting categorical columns to numeric
test_df = pd.get_dummies(test_df, columns=['Gender','City_Category','Customer_Category'], drop_first= True)
# Converting Product_Holding_B1 column into numeric
mlb = MultiLabelBinarizer(sparse_output=False)
test_df = test_df.join(pd.DataFrame(mlb.fit_transform(test_df['Product_Holding_B1']), columns = mlb.classes_))
# Removing Product_Holding_B1 columns
test_df = test_df.drop('Product_Holding_B1', axis=1)
# + hidden=true
test_df.head()
# + [markdown] heading_collapsed=true
# ### Spliting the data
# + hidden=true
# Creating X & Y variables
X= train_df
y = target
# + hidden=true
# Split the data into 80% train & 20% test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=10)
# + hidden=true
# Checking shape of train & test
print("X_train = {}".format(X_train.shape))
print("X_test = {}".format(X_test.shape))
print("y_train = {}".format(y_train.shape))
print("y_test = {}".format(y_test.shape))
# + [markdown] heading_collapsed=true
# ### Scaling the data
# + hidden=true
# Scaling continuous columns (Age & Vintage) of train & test by using Normalization
col_scale = ['Age', 'Vintage']
for col in col_scale:
scale = MinMaxScaler()
train_df[col] = scale.fit_transform(train_df[col].values.reshape(-1, 1))
test_df[col] = scale.transform(test_df[col].values.reshape(-1, 1))
# -
# ### Model Building
# ##### Logistic
# + code_folding=[]
# Model Building
lr = LogisticRegression()
model_lr = MultiOutputClassifier(lr)
model_lr.fit(X_train, y_train)
# -
# Make Predictions and compute the precision at K
y_pred_prob_lr = model_lr.predict_proba(X_test)
y_pred_lr = model_lr.predict(X_test)
# Metrics
print('Accuracy : {}'.format(accuracy_score(y_test, y_pred_lr)))
print('Hamming loss : {}'.format(hamming_loss(y_test, y_pred_lr)))
print('Average precision score : {}'.format(average_precision_score(y_test, y_pred_lr, average='weighted')))
# ##### XGBoost
# Model Building
xg = XGBClassifier(n_estimators=100,
max_depth=5,
random_state=10,
eta=0.01,
gamma=0.5,
learning_rate=0.1)
model_xg = MultiOutputClassifier(estimator=xg)
model_xg.fit(X_train, y_train)
# Make Predictions and compute the precision at K
y_pred_prob_xg = model_xg.predict_proba(X_test)
y_pred_xg = model_xg.predict(X_test)
# Metrics
print('Accuracy : {}'.format(accuracy_score(y_test, y_pred_xg)))
print('Hamming loss : {}'.format(hamming_loss(y_test, y_pred_xg)))
print('Average precision score : {}'.format(average_precision_score(y_test, y_pred_xg, average='weighted')))
# ##### Catboost
# Model Building
cat = CatBoostClassifier(iterations=10, random_state=10)
model_cat = MultiOutputClassifier(estimator=cat)
model_cat.fit(X_train, y_train)
# Make Predictions and compute the precision at K
y_pred_prob_cat = model_cat.predict_proba(X_test)
y_pred_cat = model_cat.predict(X_test)
# Metrics
print('Accuracy : {}'.format(accuracy_score(y_test, y_pred_cat)))
print('Hamming loss : {}'.format(hamming_loss(y_test, y_pred_cat)))
print('Average precision score : {}'.format(average_precision_score(y_test, y_pred_cat, average='weighted')))
# ##### ANN Model
# +
# Neural Network (NN)
# Creating the Model
model = Sequential()
model.add(Dense(units=32, activation='relu',
kernel_initializer='uniform', input_shape=[X_train.shape[1]]))
model.add(Dense(units=16, activation='relu', kernel_initializer='normal'))
model.add(Dense(units=8, activation='relu', kernel_initializer='normal'))
model.add(Dense(units=target.shape[1], activation='sigmoid'))
# +
# Model optimization.
# Loss : It is more natural to use BinaryCrossEntropy loss for this multi category
# Optimizer : Using Adam for a faster learning.
# Compile the model and train it with the training data.
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(X_train,
y_train,
validation_data=(X_test, y_test),
epochs=200,
verbose=0,
batch_size=32)
# +
# Plot the results of training.
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['loss'], label='loss')
plt.xlabel('epoch number')
plt.ylabel('quantity')
plt.legend()
plt.show()
# Plot the results of testing
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.plot(history.history['val_loss'], label='val_loss')
plt.xlabel('epoch number')
plt.ylabel('quantity')
plt.legend()
plt.show()
# -
# Make Predictions and compute the precision at K
y_pred_prob_ANN = model.predict_proba(X_test)
y_pred_ANN = model.predict(X_test)
# Metrics
#print('Accuracy : {}'.format(accuracy_score(y_test, y_pred_ANN)))
#print('Hamming loss : {}'.format(hamming_loss(y_test, y_pred_ANN)))
print('Average precision score : {}'.format(average_precision_score(y_test, y_pred_ANN, average='weighted')))
# +
#compare_score =
# -
# **Going ahead with ANN model as it is giving highest Average precision score.**
# ### Prediction
# #### Predictions on training
# +
# creation of dictionary for predicting Y variable
int_char = dict((i, c) for i, c in enumerate(y_train))
prediction = []
for i in range(len(X_test)):
# highest probablity will have zero rank
rank = np.argsort(y_pred_prob_ANN[i])
# -> pic the top 3 Probabilities
top_3_proba = ([i for i in rank])[-3:]
pred = [int_char[x] for x in top_3_proba]
prediction.append(pred)
# -
prediction
# #### Predictions on test data
#### Predictions on test data
pred_test = model.predict_proba(test_df)
prediction_test = []
for i in range(len(test_df)):
# highest probablity will have zero rank
rank = np.argsort(pred_test[i])
# -> pic the top 3 Probabilities
top_3_proba = ([i for i in rank])[-3:]
pred = [int_char[x] for x in top_3_proba]
prediction_test.append(pred)
prediction_test
| Hackathon - American Express - Final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 012
#
#
# H 行 W 列のマス目があり、上から i 行目・左から j 列目のマスを (i, j) と表します。
#
# 最初すべてのマスは白いです。次の Q 個のクエリを順に処理してください。
#
# 《type[i] = 1 のとき》
#
# ・整数 x, y が与えられる
#
# ・元々白かったマス (x, y) が赤く塗られる
#
# 《type[i] = 2 のとき》
#
# ・整数 xa, ya, xb, yb が与えられる
#
# ・マス (xa, ya) からマス (xb, yb) まで上下左右に隣り合うマスを介して移動し、赤マスのみを通って辿り着ける場合は "Yes"、そうでなければ "No" と出力する。
#
# 【制約】
#
# ・1 ≦ H, W ≦ 2000
#
# ・1 ≦ Q ≦ 100000
#
# ・1 ≦ type[i] ≦ 2
#
# ・type[i] = 1 のとき、1 ≦ x ≦ H、1 ≦ y ≦ W
#
# ・type[i] = 2 のとき、1 ≦ xa, xb ≦ H、1 ≦ ya, yb ≦ W
#
#
# ### 入力形式
# H W
# Q
#
# (1 個目のクエリ)
#
# (2 個目のクエリ)
#
# :
#
# (Q 個目のクエリ)
#
#
# ### クエリについて
# T[i] = 1 の場合
#
# 1 x y
#
# T[i] = 2 の場合
#
# 2 xa ya xb yb
#
# といった形で入力が与えられます。
# +
# 入力例 1
3 3
10
1 2 2
1 1 1
2 1 1 2 2
1 3 2
2 1 1 2 2
2 2 2 3 2
1 2 3
1 2 1
2 1 1 2 2
2 1 1 3 3
# 出力例 1
No
No
Yes
Yes
No
# +
# 入力例 2
1 1
3
2 1 1 1 1
1 1 1
2 1 1 1 1
# 出力例 2
No
Yes
※そもそもマス (xa, ya) や (xb, yb) が塗られていない場合に注意してください。
# -
| 012_problem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1.0 Import Function
# !pip install yfinance
# ## 2.0 Setup
from META_TOOLBOX import *
import BOLSA_NEW as BOVESPA
ACOES = ['ITSA4.SA', 'VALE3.SA', 'GGBR4.SA', 'LREN3.SA', 'JBSS3.SA']
DATA_INICIAL = '2019-02-01'
DATA_FINAL = '2020-02-01'
DADOS_BOLSA ={'ID ACAO':ACOES, 'DATA INICIAL':DATA_INICIAL, 'DATA FINAL':DATA_FINAL}
DADOS_GERAIS, RETORNOS, COVARIANCIA = BOVESPA.DADOS_BOLSA_PRECO_AJUSTADO(DADOS_BOLSA)
OPCOES_GRAF={'ANNOT':True, 'LINEWIDTHS':20, 'FMT':'.4'}
BOVESPA.BOLSA_PLOT_001(COVARIANCIA,OPCOES_GRAF)
N_REP = 20
N_ITER = 50
N_POP = 20
D = 5
X_L = [0.00] * D
X_U = [1.00] * D
M = 2
GAMMA = GAMMA_ASSEMBLY(X_L, X_U, D, M)
NULL_DIC = {'COVARIANCIA': COVARIANCIA, 'RETORNOS DIÁRIOS': RETORNOS}
SETUP_FA = {
'N_REP': N_REP,
'N_ITER': N_ITER,
'N_POP': N_POP,
'D': D,
'X_L': X_L,
'X_U': X_U,
'BETA_0': 0.98,
'ALPHA_MIN': 0.25,
'ALPHA_MAX': 1.00,
'THETA': 0.95,
'GAMMA': GAMMA,
'NULL_DIC': NULL_DIC
}
# OBJ. Function
def OF_FUNCTION(X, NULL_DIC):
DADOS_COV = NULL_DIC['COVARIANCIA']
DADOS_RETORNO = NULL_DIC['RETORNOS DIÁRIOS']
LAMBDA = 0.50
OF = BOVESPA.FO_MARKOWITZ(X, DADOS_COV, DADOS_RETORNO, LAMBDA)
H = np.abs(sum(X)) - 1
for I_CONT in range(len(X)):
OF += (H ** 2) * 1E6
return OF
# ## 4.0 Example
[RESULTS_REP, BEST_REP, AVERAGE_REP, WORST_REP, STATUS] = FA_ALGORITHM_0001(OF_FUNCTION, SETUP_FA)
BEST_REP_ID = STATUS[0]
BEST_REP_ID
BEST = BEST_REP[BEST_REP_ID]
AVERAGE = AVERAGE_REP[BEST_REP_ID]
WORST = WORST_REP[BEST_REP_ID]
PLOT_SETUP = {
'NAME': 'WANDER-OF',
'WIDTH': 0.40,
'HEIGHT': 0.20,
'DPI': 600,
'EXTENSION': '.svg',
'COLOR OF': '#000000',
'MARKER OF': 's',
'COLOR FIT': '#000000',
'MARKER FIT': 's',
'MARKER SIZE': 6,
'LINE WIDTH': 4,
'LINE STYLE': '--',
'OF AXIS LABEL': '$W (kN) $',
'X AXIS LABEL': 'Number of objective function evaluations',
'LABELS SIZE': 14,
'LABELS COLOR': '#000000',
'X AXIS SIZE': 14,
'Y AXIS SIZE': 14,
'AXISES COLOR': '#000000',
'ON GRID?': True,
'Y LOG': True,
'X LOG': True,
}
DATASET = {'X': BEST['NEOF'], 'OF': BEST['OF'], 'FIT': BEST['FIT']}
META_PLOT_001(DATASET, PLOT_SETUP)
PLOT_SETUP = {
'NAME': 'WANDER-OF',
'WIDTH': 0.40,
'HEIGHT': 0.20,
'DPI': 600,
'EXTENSION': '.svg',
'COLOR': '#00BFFF',
'MARKER': 's',
'MARKER SIZE': 6,
'LINE WIDTH': 4,
'LINE STYLE': '--',
'Y AXIS LABEL': '$Euller$',
'X AXIS LABEL': 'Number of objective function evaluations',
'LABELS SIZE': 14,
'LABELS COLOR': '#000000',
'X AXIS SIZE': 14,
'Y AXIS SIZE': 14,
'AXISES COLOR': '#000000',
'ON GRID?': True,
'Y LOG': True,
'X LOG': True,
}
DATASET = {'X': BEST['NEOF'], 'Y': BEST['OF']}
META_PLOT_002(DATASET, PLOT_SETUP)
PLOT_SETUP = {
'NAME': 'WANDER-OF',
'WIDTH': 0.40,
'HEIGHT': 0.20,
'DPI': 600,
'EXTENSION': '.svg',
'COLOR BEST': '#00008B',
'COLOR WORST': '#000000',
'COLOR AVERAGE': '#ffcbdb',
'MARKER': 's',
'MARKER SIZE': 6,
'LINE WIDTH': 4,
'LINE STYLE': '--',
'Y AXIS LABEL': '$W (kN) $',
'X AXIS LABEL': 'Number of objective function evaluations',
'LABELS SIZE': 14,
'LABELS COLOR': '#000000',
'X AXIS SIZE': 14,
'Y AXIS SIZE': 14,
'AXISES COLOR': '#000000',
'ON GRID?': True,
'LOC LEGEND': 'upper right',
'SIZE LEGEND': 12,
'Y LOG': True,
'X LOG': True
}
DATASET = {'X': BEST['NEOF'], 'BEST': BEST['OF'], 'AVERAGE': AVERAGE['OF'], 'WORST': WORST['OF']}
META_PLOT_003(DATASET, PLOT_SETUP)
PLOT_SETUP = {
'NAME': 'WANDER-OF',
'WIDTH': 0.40,
'HEIGHT': 0.20,
'DPI': 600,
'EXTENSION': '.svg',
'MARKER': 's',
'X AXIS LABEL': 'OF values',
'X AXIS SIZE': 14,
'Y AXIS SIZE': 14,
'LABELS SIZE': 14,
'LABELS COLOR': '#000000',
'COLOR': '#000000',
'AXISES COLOR': '#000000',
'BINS': 20,
'KDE': False,
}
DATASET = {'NUMBER OF REPETITIONS': N_REP, 'NUMBER OF ITERATIONS': N_ITER, 'OF OR FIT': 'OF', 'BEST': BEST_REP}
META_PLOT_004(DATASET, PLOT_SETUP)
| Obsoletos II/FA example2 - bolsa.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Working with Probability Distributions
#
# `deeplenstronomy` has several built-in probability distributions directly callable from the configuration file. Sometimes these are enough, and sometimes not. If you find you need more flexibility than the built-in distributions, you can supply any distribution you want as a text file. This notebook will review the standard way of using the built-in distributions and then demonstrate the text file method.
#
# ## Using built-in probability distributions
from deeplenstronomy import distributions
[x for x in dir(distributions) if not x.startswith('_')]
# The standard way of using one of these distributions is to use the `DISTRIBUTION` keyword within the configuration file. Let's look at a quick example.
import deeplenstronomy.deeplenstronomy as dl
# ! cat data/demo.yaml
# At present, all of the parameters are set to constant values. This configuration file offers no variance in the resulting dataset. As an example, let's play with drawing the `exposure_time` and `magnitude` of `GALAXY_2` from distributions.
#
# We'll replace `exposure_time: 90` with
# ```
# exposure_time:
# DISTRIBUTION:
# NAME: uniform
# PARAMETERS:
# minimum: 30.0
# maximum: 300.0
# ```
# to draw the `exposure_time` from a uniform distribution on the interval [30.0, 300.0]. Similarly, we can replace `magnitude: 21.5` with
# ```
# magnitude:
# DISTRIBUTION:
# NAME: normal
# PARAMETERS:
# mean: 20.0
# std: 1.0
# ```
# to draw the `magnitude` of `GALAXY_2` from a normal distribution with mean 20.0 and standard deviation 1.0.
# I have put these update in a new config file called "demo_distributions.yaml", removed configurations 2-4 for efficiency, and increased the number of images to simualte to better characterize the distributions.
# ! cat data/demo_distributions.yaml
dataset = dl.make_dataset("data/demo_distributions.yaml", verbose=True)
# We can verify that the distributions were used in the data set by inspecting the metadata:
exp_times = dataset.CONFIGURATION_1_metadata['exposure_time-g'].values
magnitudes = dataset.CONFIGURATION_1_metadata['PLANE_2-OBJECT_1-LIGHT_PROFILE_1-magnitude-g'].values
import matplotlib.pyplot as plt
# +
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.hist(exp_times)
ax1.set_xlabel("Exposure Time (s)")
ax2.hist(magnitudes)
ax2.set_xlabel("GALAXY_2 Magnitude")
plt.show()
# -
# And we can see the uniform distribution of exposure time and the normal distribution of magnitude have been recovereed. With the exception of a few safe-guarded parameters, any single parameter can be sampled from an underlying built-in distribution using the method above.
# ## Correlations and Non-Standard Distributions
#
# If you would like to create correlations between parameters or utilize your own empriical distribution in the construction of your dataset, you can make use of `deeplenstronomy`'s `USERDIST` feature. To use this feature, you add an entry to the configuration file that looks like this:
#
# ```
# DISTRIBUTIONS:
# USERDIST_1:
# FILENAME: data/seeing.txt
# MODE: interpolate
# STEP: 20
# USERDIST_2:
# FILENAME: data/Rsersic_magnitude.txt
# MODE: sample
# ```
#
# The `DISTRIBUTIONS` section goes a the same level of the yaml file as the `DATASET`, `COSMOLOGY`, `IMAGE` etc. sections. Let's dive into what each of the parts of that entry mean.
#
# `USERDIST_#`
#
# - Each `USERDIST` entry represents an independent probability distribution you want to add into your dataset. They must be indexed as 1, 2, 3, ... for `deeplenstronomy` to track them properly.
#
# `FILENAME`
#
# - This parameter specifies the text file that contains an empirical version of the probability distribution. More details on the structure of this file is available in the next subsection.
#
# `MODE`
#
# - This tells `deeplenstronomy` how you want to use the probability distribution you supplied. The options are `interpolate` and `sample`.
#
# - `interpolate` will use the points in your distribution to form the basis of a grid that will be sampled during dataset generation.
#
# - `STEP` is only used if you use the `interpolate` mode, and it is the number of points in each dimension of the grid that is sampled. The number of elements in the grid is the value of `STEP` raised to the power of the number of dimensions in your distribution. This can easily exceed the available memory in your computer if you have high-dimensional distributions and a large `STEP` value. If `STEP` is not specified, the default of 10 will be used.
#
# - `sample` will draw only from the raw points specified and not perform any interpolation. This mode is recommended if you have a highly-dimensional distribution with a lot of structure in each dimension for memory considerations.
#
# `PARAMS`
#
# - This parameter is optional and only used if you'd like to use the same text file for multiple parts of a simulation. See the "Using The Same Distribution for Multiple Sets of Parameters" subheading below for more information.
# ### Writing probability distribution files
#
# Let's inspect "seeing.txt" to learn how to work with a one-dimensional distribution.
# ! cat data/seeing.txt
# These files are whitespace-separated, use the paramater name as a column header, and specify the probability weight associated with each point in parameter space. The weights are defined relative to each other and do not need to sum to one. There is also no requirement of regular spacing in the distributions, though it may lead to more accurate interpolations.
#
# At present, the supplied seeing distribution will be applied to all bands and all configurations in the dataset. If, for example, we only wanted the distribution to apply to the $g$-band seeing, the column name would be changed to `seeing-g`. If we wanted the distribution to only apply to `CONFIGURATION_1`, then we could use `CONFIGURATION_1-seeing` as the column name. And if we only want to target the $g$ -band seeing in `CONFIGURATION_1`, then we would use `CONFIGURATION_1-seeing-g`.
# We can verify this distribution was put into the dataset by plotting the raw text file distribution over the simulated seeing values:
import numpy as np
import pandas as pd
# +
df = pd.read_csv('data/seeing.txt', delim_whitespace=True)
# just normalizing weights to an AUC of 1.0 using a rectangular sum
norm_const = sum((df['seeing'].values.max() - df['seeing'].values.min()) / len(df) * df['WEIGHT'].values.mean() * df['seeing'].values)
plt.figure()
plt.scatter(df['seeing'].values, df['WEIGHT'].values / norm_const, color='black', label='Raw Distribution Points')
plt.hist(dataset.CONFIGURATION_1_metadata['seeing-g'].values, density=True, histtype='step', lw=3, color='red', label="Simulated Values")
plt.xlabel("Seeing (arcsec)")
plt.legend(loc='upper left')
plt.show()
# -
# Let's now use this feature to input a correlation, and let's plan to use the `sample` mode instead of the `interpolate` mode.
# ! head data/Rsersic_magnitude.txt
# Here we're using a distribution in the text file to draw the size of the galaxy measured in the $i$-band (`PLANE_1-OBJECT_1-LIGHT_PROFILE_1-R_sersic-i`) with the magnitude measured in all bands (`PLANE_1-OBJECT_1-LIGHT_PROFILE_1-magnitude`). We'll compare the raw distribution from the text file to the simulated metadata parameters:
df = pd.read_csv('data/Rsersic_magnitude.txt', delim_whitespace=True)
# +
r_sersic = dataset.CONFIGURATION_1_metadata['PLANE_1-OBJECT_1-LIGHT_PROFILE_1-R_sersic-i'].values
magnitudes = dataset.CONFIGURATION_1_metadata['PLANE_1-OBJECT_1-LIGHT_PROFILE_1-magnitude-g'].values
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(df['PLANE_1-OBJECT_1-LIGHT_PROFILE_1-R_sersic-i'].values,
df['PLANE_1-OBJECT_1-LIGHT_PROFILE_1-magnitude'].values,
c=df['WEIGHT'].values)
ax1.set_xlabel("GALAXY_1 R_sersic i-band (arcsec)")
ax1.set_ylabel("GALAXY_1 magnitude g-band")
ax1.set_title("Raw Distribution")
ax2.hist2d(r_sersic, magnitudes,
bins=[np.linspace(r_sersic.min(), r_sersic.max(), 10),
np.linspace(magnitudes.min(), magnitudes.max(), 10)])
ax2.set_xlabel("GALAXY_1 R_sersic i-band (arcsec)")
ax2.set_ylabel("GALAXY_1 magnitude g-band")
ax2.set_title("Simulated Dataset")
fig.tight_layout()
fig.show()
# -
# Notice that in the left plot, points are assigned a color based on the weight in the text file, while in the right plot we have a histogram counting the number of simulated images with a particular parameter value combination.
# ### How am I supposed to know what to put as the column names in my text files?
#
# Good question.
#
# The column names are certianly scary to look at, but `deeplenstronomy` has a functionality to help you out. Let's revisit the example of trying to correlate the $g$-band `magnitude` of a galaxy with the $i$-band `R_sersic`. Now that we've simualted a dataset, we can `search` for USERDIST column names:
dataset.search('magnitude')
# The `dataset.search()` function returns all possible USERDIST column names containing the parameter of interest. The returned object is a dictionary where the keys are the object names in the `SPECIES` section and the values are the possible USERDIST column names.
#
# Looking at the output, and knowing you are concerned with the object named `LENS` would point you directly to the column name `CONFIGURATION_1-PLANE_1-OBJECT_1-LIGHT_PROFILE_1-magnitude-g`, where we could leave off the `CONFIGURATION_1` part to apply the USERDIST to all configurations. In this case, there is only one CONFIGURATION, so the prefix doesn't matter.
#
# We can repeat the process for `R_sersic`:
dataset.search('R_sersic')
# ### Using The Same Distribution for Multiple Sets of Parameters
#
# In some cases or workflows you may wish to use the same distribution file for two types of objects. For example, two nearly-identical configurations, such as a lensing event alone and a lensing event with a foreground star, may use the same distribution for the lens galaxy.
#
# In this case, simply add the `PARAMS` argument below `FILENAME` for a given entry in the `DISTRIBUTIONS` section. The argument must be a list, with entries corresponding to the desired parameters (as they would appear in a distribution file header). Currently, the accompanying file must also contain a header.
#
# As an example, let's take the example userdist above:
# ! head data/Rsersic_magnitude.txt
# Suppose we hypothetically wanted this distribution to be used for the `SOURCE` object in `CONFIGURATION_1` of the example file. We can add another entry in the `DISTRIBUTIONS` section like so:
#
# ```
# DISTRIBUTIONS:
# USERDIST_1:
# FILENAME: data/seeing.txt
# MODE: interpolate
# STEP: 20
# USERDIST_2:
# FILENAME: data/Rsersic_magnitude.txt
# MODE: sample
# USERDIST_3:
# FILENAME: data/Rsersic_magnitude.txt
# MODE: sample
# PARAMS: [PLANE_2-OBJECT_1-LIGHT_PROFILE_1-R_sersic-i, PLANE_2-OBJECT_1-LIGHT_PROFILE_1-magnitude, WEIGHT]
# ```
| Notebooks/UserDistributions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PyCharm (Tutorials)
# language: python
# name: pycharm-38c7cf03
# ---
# +
#####################################################################
# This notebook is authored by: <NAME> #
# Date: May 2022 #
# If you use this code or the results from this work please cite: #
# Machine learning the trilinear and light-quark Yukawa couplings #
# from Higgs pair kinematic shapes #
# <NAME>, <NAME>, <NAME>, <NAME> #
# and <NAME> #
# arXiv:2205.XXXXX (https://arxiv.org/abs/2005.XXXXX) #
#####################################################################
#####################################################################
# Plot Single and Di-Higgs cross sections #
#####################################################################
import numpy as np
import pandas as pd
from matplotlib.ticker import NullFormatter, LogLocator
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from matplotlib import rc
rc('text', usetex=True)
plt.rcParams['text.latex.preamble'] = r"\usepackage{amsmath}"
## ***************************************************************************
## * RC param *
## ***************************************************************************
plt.rcParams['xtick.top'] = True
plt.rcParams['xtick.major.size'] = 10
plt.rcParams['xtick.minor.size'] = 5
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.right'] = True
plt.rcParams['ytick.major.size'] = 10
plt.rcParams['ytick.minor.size'] = 5
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.labelsize'] = 23
plt.rcParams['ytick.labelsize'] = 23
plt.rcParams['font.family'] = 'monospace'
## ***************************************************************************
LambdaNP2 = 1e+3**2
v4 = 246.**4
mh2 = 125.1**2
kltoCH = lambda x : LambdaNP2/v4*mh2*0.5*(1-x)
# +
LambdaNP = 1e+3 #GeV
v = 246.
mh = 125.1
mass = {
'ku':2.2e-3,
'kd':4.7e-3,
'ks':95e-3,
'kc':1.275,
}
def kqtoCqH(kq,op):
if op=='ku'or op=='kd'or op=='ks' or op=='kc':
return LambdaNP**2/v**3*(np.sqrt(2.0)*mass[op]*(1-kq))
else:
return kq
def CqHtokq(CqH,op):
if op=='ku'or op=='kd'or op=='ks' or op=='kc':
return -(CqH/np.sqrt(2)/LambdaNP**2 *v**3/mass[op])+1
else:
return CqH
def xinverse(x):
return kqtoCqH(x,'kd')
def xforward(x):
return CqHtokq(x,'kd')
def xinverse2(x):
return kqtoCqH(x,'ku')
def xforward2(x):
return CqHtokq(x,'ku')
######################################################################
# + tags=[]
# all in pb
uuh = lambda x: 283.1877 * x**2
ddh = lambda x: 178.110 * x**2
uuhh = lambda x: 0.5813103 * x**2
ddhh= lambda x: 0.3499139 * x**2
ggFh = 56.36
ggFhh= 36.92e-3
inthu = np.sqrt(56.36/283.1877)
inthd = np.sqrt(56.36/178.110)
inthhu = np.sqrt(36.92e-3/0.5813103)
inthhd = np.sqrt(36.92e-3/0.3499139)
intkhu = CqHtokq(inthu,'ku')
intkhd = CqHtokq(inthd,'kd')
intkhhu = CqHtokq(inthhu,'ku')
intkhhd = CqHtokq(inthhd,'kd')
fig, ax = plt.subplots(1, figsize=(8, 7))
colpastil = ['#937eba','#72bbd0','#f09494','#62a071']
X = np.linspace(0., 1.0, 1000)
plt.axhline(y=ggFh, color='k', linewidth=2.5, alpha=0.9, linestyle='solid')
plt.axhline(y=ggFhh, color='#002859', linewidth=2.5, alpha=0.9, linestyle='dashed')
plt.plot(X, uuh(X), color=colpastil[0], linewidth=3.5, alpha=1)
plt.plot(X, uuhh(X), color=colpastil[1], linewidth=3.5, alpha=1)
plt.plot(X, ddh(X), color=colpastil[2], linewidth=3.5, alpha=1, linestyle=(0, (3, 1, 1, 1)))
plt.plot(X, ddhh(X), color=colpastil[3], linewidth=3.5, alpha=1, linestyle=(0, (3, 1, 1, 1)))
plt.plot(inthu, ggFh, 'o', markersize=10, color=colpastil[0], mfc='white', markeredgewidth=2)
plt.plot(inthhu, ggFhh, 'o', markersize=10, color=colpastil[1], mfc='white', markeredgewidth=2)
plt.plot(inthd, ggFh, 'o', markersize=10, color=colpastil[2], mfc='white', markeredgewidth=2)
plt.plot(inthhd, ggFhh, 'o', markersize=10, color=colpastil[3], mfc='white', markeredgewidth=2)
# labels = [r'$u\bar u \to h$ ($C_{u\phi}$)', r'$ u \bar u \to hh$ $(C_{u\phi}) $', r'$d \bar d \to h$ ($C_{d\phi}$)',r'$d \bar d \to hh$ ($C_{d\phi}$)']
labels = [r'$u\bar u \to h$', r'$ u \bar u \to hh$', r'$d \bar d \to h$',r'$d \bar d \to hh$']
line0 = Line2D([0], [0], color=colpastil[0], linewidth=4, linestyle='-', solid_capstyle='round', markersize=10, alpha=0.9)
line1 = Line2D([0], [0], color=colpastil[1], linewidth=4, linestyle='-', solid_capstyle='round', markersize=10, alpha=0.9)
line2 = Line2D([0], [0], color=colpastil[2], linewidth=4, linestyle=(0, (3, 1, 1, 1)), solid_capstyle='round', markersize=10, alpha=0.9)
line3 = Line2D([0], [0], color=colpastil[3], linewidth=4, linestyle=(0, (3, 1, 1, 1)), solid_capstyle='round', markersize=10, alpha=0.9)
leg = plt.legend(handles=[line0, line1, line2,line3], labels=labels,
loc='lower right', prop={'size': 14}, fancybox=True, framealpha=1, columnspacing=1,
ncol=1, bbox_to_anchor=(0.98, 0.02))
plt.xlim((0.001, 1.))
plt.ylim(5.0e-5, 1200.0)
plt.xlabel(r'$C_{q\phi}/\Lambda^2$', fontsize=22)
plt.ylabel(r'$\sigma$ $\mathrm{[pb]}$', fontsize=22)
plt.semilogy()
locmajy = LogLocator(base=10,numticks=100)
locminy = LogLocator(base=10,subs=np.arange(2, 10) * .1,numticks=100) # subs=(0.2,0.4,0.6,0.8)
ax.yaxis.set_major_locator(locmajy)
ax.yaxis.set_minor_locator(locminy)
ax.yaxis.set_minor_formatter(NullFormatter())
plt.tick_params(axis="x", labelsize=22)
plt.tick_params(axis="y", labelsize=22)
ax.xaxis.set_minor_locator(AutoMinorLocator())
plt.annotate(r'$\sqrt{s}=14$ $\mathrm{TeV}$', xy=(0.2, 1e-4), xycoords='data', horizontalalignment='center',
verticalalignment='bottom', fontsize=22, fontweight='900', zorder=100, color='#474747')
plt.annotate(r'$gg\rightarrow h$', xy=(0.2, 0.65e2), xycoords='data', horizontalalignment='center',
verticalalignment='bottom', fontsize=20, fontweight='900', zorder=100, color='#474747')
plt.annotate(r'$gg\rightarrow hh$', xy=(0.55, 1.6e-2), xycoords='data', horizontalalignment='center',
verticalalignment='center', fontsize=20, fontweight='900', zorder=100, color='#474747')
plt.annotate(r'$q\bar q\rightarrow hh$', xy=(0.85, 0.9), xycoords='data', horizontalalignment='center',
verticalalignment='center', fontsize=20, fontweight='900', zorder=100, color='#474747', rotation=4)
plt.annotate(r'$q\bar q\rightarrow h$', xy=(0.8, 3.5e2), xycoords='data', horizontalalignment='center',
verticalalignment='center', fontsize=20, fontweight='900', zorder=100, color='#474747', rotation=4)
plt.annotate(r'$\kappa_u='+str(round(intkhu))+r'$', xy=(inthu, ggFh), xycoords='data', horizontalalignment='center',
verticalalignment='bottom', fontsize=18, fontweight='900', zorder=100, color='#474747', rotation=10)
plt.annotate(r'$\kappa_u='+str(round(intkhhu))+r'$', xy=(inthhu, ggFhh*0.85), xycoords='data', horizontalalignment='center',
verticalalignment='bottom', fontsize=18, fontweight='900', zorder=100, color='#474747', rotation=15)
plt.annotate(r'$\kappa_d='+str(round(intkhd))+r'$', xy=(inthd, ggFh*0.9), xycoords='data', horizontalalignment='center',
verticalalignment='top', fontsize=18, fontweight='900', zorder=100, color='#474747', rotation=10)
plt.annotate(r'$\kappa_d='+str(round(intkhhd))+r'$', xy=(inthhd, ggFhh*0.9), xycoords='data', horizontalalignment='center',
verticalalignment='top', fontsize=18, fontweight='900', zorder=100, color='#474747', rotation=15)
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=True, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off)
)
secax = ax.secondary_xaxis('top', functions=(xforward, xinverse))
secax.xaxis.set_minor_locator(AutoMinorLocator())
secax.set_xlabel(r"$\kappa_d$", fontsize=22,labelpad=10)
secax2 = ax.secondary_xaxis(1.2,functions=(xforward2, xinverse2),)
secax2.xaxis.set_minor_locator(AutoMinorLocator())
secax2.set_xlabel(r"$\kappa_u$", fontsize=22,labelpad=10)
plt.grid(linestyle=':')
plt.tight_layout()
#
plt.savefig('../plots/pph_hh_14Tev.pdf', dpi=300)
plt.show()
# -
| plotting-routines/single-vs-di-Higgs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # CODE TO PERFORM SIMPLE LINEAR REGRESSION ON FUEL CONSUMPTION DATASET
# # Dr. Ryan @STEMplicity
# 
#
#
# # PROBLEM STATEMENT
# - You have been hired as a consultant to a major Automotive Manufacturer and you have been tasked to develop a model to predict the impact of increasing the vehicle horsepower (HP) on fuel economy (Mileage Per Gallon (MPG)). You gathered the data:
# - Data set:
# - Independant variable X: Vehicle Horse Power
# - Dependant variable Y: Mileage Per Gallon (MPG)
# # STEP #1: LIBRARIES IMPORT
#
# # STEP #2: IMPORT DATASET
# # STEP#3: VISUALIZE DATASET
# # STEP#4: CREATE TESTING AND TRAINING DATASET
# # STEP#5: TRAIN THE MODEL
# # STEP#6: TEST THE MODEL
# # EXCELLENT JOB! NOW YOU BECAME EXPERT IN SIMPLE LINEAR REGRESSION
| Machine Learning/1) Regression(all reg course)/1.Simple Linear Regression/Simple Linear Regression - Fuel Consumption Project Questions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Improving Registration via Registration:<br>Semiautomatic Landmark Localization <a href="https://mybinder.org/v2/gh/InsightSoftwareConsortium/SimpleITK-Notebooks/master?filepath=Python%2F67_Registration_Semiautomatic_Homework.ipynb"><img style="float: right;" src="https://mybinder.org/badge_logo.svg"></a>
#
# This notebook is intentionally missing code, an example of a <font color="red">homework</font> assignment using SimpleITK.
#
# Localization of anatomical landmarks or fiducial markers in medical images is a common task, both for initializing intensity based registration between two images and for registration between the image space and physical space in computer assisted interventions.
#
# In this notebook our goal is to rigidly register two images using manually localized point pairs. You will then improve the initial result by improving the point localization in the moving image via registration between each of the landmark regions in the fixed and moving images.
#
# ### Manual Localization
#
# * Advantages: identification, coarse localization, of the landmarks or fiducials is extremely <a href="https://en.wikipedia.org/wiki/Robust_statistics">robust</a>. Humans readily identify salient features in the presence of noise and under a variety of spatial transformations, including large deformations.
# * Disadvantages: exhibits low <a href="https://en.wikipedia.org/wiki/Accuracy_and_precision">accuracy and precision</a>.
#
# ### Automatic Localization
#
# * Advantages: highly precise, and with a good coarse localization it is also highly accurate.
# * Disadvantages: prone to failure in the presence of noise and requires knowledge of the possible spatial transformations the landmark may undergo.
#
# ### Semiautomatic Localization
# A Combination of manual and automatic components to obtain a robust (human contribution), accurate and precise (machine contribution) localization.
# +
# To use interactive plots (mouse clicks, zooming, panning) we use the notebook back end. We want our graphs
# to be embedded in the notebook, inline mode, this combination is defined by the magic "%matplotlib notebook".
# %matplotlib notebook
import numpy as np
import SimpleITK as sitk
import registration_utilities as ru
# %run update_path_to_download_script
from downloaddata import fetch_data as fdata
import gui
# -
# ## Load Data
#
# We will be working with the training data from the Retrospective Image Registration Evaluation (<a href="http://www.insight-journal.org/rire/">RIRE</a>) project. This data consists of a CT and MR of the same patient with a known rigid transformation between the two. We create a dense random point set in the CT image's coordinate system and transform it to the MR coordinate system. This set of points serves as our reference data for registration evaluation.
#
# To ensure that your semi-automatic localization approach can deal with clinical data you should evaluate it using the data as is, and rotated. We test the extreme case where the data is rotated by 180$^o$ (boolean variable "rotate") so that in one scan the patient is in supine and in the other in prone position.
#
# We will start by loading our data and looking at the distances between corresponding points prior to registration, illustrates the spatial variability of the errors.
# +
fixed_image = sitk.ReadImage(fdata("training_001_ct.mha"), sitk.sitkFloat32)
moving_image = sitk.ReadImage(fdata("training_001_mr_T1.mha"), sitk.sitkFloat32)
fixed_fiducial_points, moving_fiducial_points = ru.load_RIRE_ground_truth(
fdata("ct_T1.standard")
)
# In the original data both images have the same orientation (patient in supine), the approach should also work when
# images have different orientation. In the extreme they have a 180^o rotation between them.
rotate = True
if rotate:
rotation_center = moving_image.TransformContinuousIndexToPhysicalPoint(
[(index - 1) / 2.0 for index in moving_image.GetSize()]
)
transform_moving = sitk.Euler3DTransform(rotation_center, 0, 0, np.pi, (0, 0, 0))
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(moving_image)
resample.SetInterpolator(sitk.sitkLinear)
resample.SetTransform(transform_moving)
moving_image = resample.Execute(moving_image)
for i, p in enumerate(moving_fiducial_points):
moving_fiducial_points[i] = transform_moving.TransformPoint(p)
# Compute the rigid transformation defined by the two point sets. Flatten the tuple lists
# representing the points. The LandmarkBasedTransformInitializer expects the point coordinates
# in one flat list [x1, y1, z1, x2, y2, z2...].
fixed_fiducial_points_flat = [c for p in fixed_fiducial_points for c in p]
moving_fiducial_points_flat = [c for p in moving_fiducial_points for c in p]
reference_transform = sitk.LandmarkBasedTransformInitializer(
sitk.VersorRigid3DTransform(),
fixed_fiducial_points_flat,
moving_fiducial_points_flat,
)
# Generate a reference dataset from the reference transformation
# (corresponding points in the fixed and moving images).
fixed_points = ru.generate_random_pointset(image=fixed_image, num_points=100)
moving_points = [reference_transform.TransformPoint(p) for p in fixed_points]
# Compute the TRE prior to registration.
pre_errors_mean, pre_errors_std, _, pre_errors_max, pre_errors = ru.registration_errors(
sitk.Euler3DTransform(), fixed_points, moving_points, display_errors=True
)
print(
f"Before registration, errors (TRE) in millimeters, mean(std): {pre_errors_mean:.2f}({pre_errors_std:.2f}), max: {pre_errors_max:.2f}"
)
# -
# ## Manual Landmark Localization
#
# We now localize N(>=3) landmarks in the two images. Note that you can zoom and pan the images, just remember to change the interaction mode from "edit" to "view".
#
# NOTE: In edit mode, the GUI will force you to enter corresponding points by disabling the option for consecutively localizing multiple (>2) points in the same image. In view mode, point localization is disabled which is useful for zooming/panning (in edit mode zooming/panning will also localize points due to the mouse button click).
point_acquisition_interface = gui.RegistrationPointDataAquisition(
fixed_image, moving_image, fixed_window_level=(215, 50)
)
# ## Registration (manual landmark localization)
#
# Evaluate the quality of the manual localization by registering the two images, and then comparing the registration errors using the known reference data.
# fixed_image_points, moving_image_points = point_acquisition_interface.get_points()
fixed_image_points = [
(156.48434676356158, 201.92274575468412, 68.0),
(194.25413436597393, 98.55771047484492, 32.0),
(128.94523819661913, 96.18284152323203, 32.0),
]
moving_image_points = [
(141.46826904042848, 156.97653126727528, 48.0),
(113.70102381552435, 251.76553994455645, 8.0),
(180.69457220262115, 251.76553994455645, 8.0),
]
# +
fixed_image_points_flat = [c for p in fixed_image_points for c in p]
moving_image_points_flat = [c for p in moving_image_points for c in p]
manual_localized_transformation = sitk.VersorRigid3DTransform(
sitk.LandmarkBasedTransformInitializer(
sitk.VersorRigid3DTransform(), fixed_image_points_flat, moving_image_points_flat
)
)
(
manual_errors_mean,
manual_errors_std,
manual_errors_min,
manual_errors_max,
_,
) = ru.registration_errors(
manual_localized_transformation, fixed_points, moving_points, display_errors=True
)
print(
f"After registration (manual point localization), errors (TRE) in millimeters, mean(std): {manual_errors_mean:.2f}({manual_errors_std:.2f}), max: {manual_errors_max:.2f}"
)
# -
# We can also evaluate the registration qualitatively by using a linked cursor approach via the same GUI we used to localize corresponding points. This time the points will be added in pairs.
gui.RegistrationPointDataAquisition(
fixed_image,
moving_image,
fixed_window_level=(215, 50),
known_transformation=manual_localized_transformation,
)
# ## <font color="red">Homework:</font> semiautomatic landmark localization
#
# You will now improve the localization of the fixed landmarks in the moving image using intensity based registration. This registration is performed independently for each landmark.
#
# The output of the following cell is expected to be a list of tuples (3D) called <b>updated_moving_image_points</b>.
#
# Hint: You need to initialize the intensity based registration in a way that takes into account that the images may have a significant rotation between them (up to 180$^o$).
updated_moving_image_points = moving_image_points
# ## Registration (semiautomatic landmark localization)
#
# Evaluate the quality of the semiautomatic localization by registering the two images, and then comparing the registration errors using the known reference data.
# +
updated_moving_image_points_flat = [c for p in updated_moving_image_points for c in p]
semi_automatic_transform = sitk.VersorRigid3DTransform(
sitk.LandmarkBasedTransformInitializer(
sitk.VersorRigid3DTransform(),
fixed_image_points_flat,
updated_moving_image_points_flat,
)
)
(
semi_automatic_errors_mean,
semi_automatic_errors_std,
_,
semi_automatic_errors_max,
_,
) = ru.registration_errors(
semi_automatic_transform,
fixed_points,
moving_points,
display_errors=True,
min_err=manual_errors_min,
max_err=manual_errors_max,
)
print(
f"After registration (semiautomatic point localization), errors (TRE) in millimeters, mean(std): {semi_automatic_errors_mean:.2f}({semi_automatic_errors_std:.2f}), max: {semi_automatic_errors_max:.2f}"
)
# -
gui.RegistrationPointDataAquisition(
fixed_image,
moving_image,
fixed_window_level=(215, 50),
known_transformation=semi_automatic_transform,
)
# ## <font color="red">Answer</font> the following questions:
#
# 1. Is semiautomatic localization more precise than manual localization? Answer this question using Fiducial Registration Error. Repeat the manual and semiautomatic localizations multiple times and save the FREs to file. Plot the histograms of these errors (see matplotlib.pyplot.hist). Which method is more precise? Is this statistically significant? (see scipy.stats.ttest_rel).
# * Evaluate the variability, precision, of manual localization of point pairs using the reference transformation. The distribution of $\|p_{moving\_fiducial} - T(p_{fixed\_fiducial})\|$.
# * Evaluate the variability, precision, of the semiautomatic localization of point pairs using the reference transformation. The distribution of $\|p_{moving\_updated\_fiducial} - T(p_{fixed\_fiducial})\|$.
#
# 2. Avoid the temptation to <a href="https://en.wikipedia.org/wiki/Overfitting">overfit</a>: When we only have pairs of manually localized points we may be tempted to use all of the point pairs and estimate a transformation that has more degrees of freedom. In our case, an affine transformation instead of a rigid one. To illustrate the problem with this approach you will manually localize four points in the two images. Estimate a rigid and an affine transformation using these points (change the transform type given as input to LandmarkBasedTransformInitializer).<br><br>
# Now compute the FRE associated with the two transformations and the TRE (using the fixed_points and moving_points data). Did the use of more degrees of freedom improve the registration results (smaller TREs)?
| Python/67_Registration_Semiautomatic_Homework.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
# +
# coding=utf-8
file_path = '/Users/li/workshop/MyRepository/DeepNaturalLanguageProcessing/DeepNLP/language_model/anna/data/anna.txt'
with open(file_path, 'r') as f:
text = f.read()
vocab = set(text)
# print(vocab)
# 字符数字映射
vocab_to_int = {c: i for i, c in enumerate(vocab)}
# print(vocab_to_int)
# 数字字符映射
int_to_vocab = dict(enumerate(vocab))
# print(int_to_vocab)
# 对文本进行编码
encode = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
print(text[:100])
print('encode\n', encode[:100])
# +
def generate_bath(arr, batch_size, seq_length):
num_steps = batch_size * seq_length
# print('num_steps', num_steps)
n_iters = int(len(arr) / num_steps)
# print('num_iters', n_iters)
arr = arr[: num_steps * n_iters]
# print('arr_b', arr[:,:15])
# 重塑
arr = arr.reshape((batch_size, -1))
print('arr_a\n',arr[:,:15])
# print(arr.shape[1])
for n in range(0, arr.shape[1], seq_length):
x = arr[:, n:n + seq_length]
y = np.zeros_like(x)
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
yield x, y
batches = generate_bath(encode, 10, 50)
x, y = next(batches)
print('x.shape', np.shape(x))
print('x', x[:10, :10])
print('y.shape', np.shape(y))
print('y', y[:10, :10])
# -
def build_inputs(num_seqs, num_steps):
'''
构建输入层
num_seqs: 每个batch中的序列个数
num_steps: 每个序列包含的字符数
'''
inputs = tf.placeholder(tf.int32, shape=(num_seqs, num_steps), name='inputs')
targets = tf.placeholder(tf.int32, shape=(num_seqs, num_steps), name='targets')
# 加入keep_prob
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
'''
构建lstm层
keep_prob
lstm_size: lstm隐层中结点数目
num_layers: lstm的隐层数目
batch_size: batch_size
'''
# 构建一个基本lstm单元
cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# 添加dropout
# drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# 堆叠
# cell = tf.contrib.rnn.MultiRNNCell([drop for _ in range(num_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
'''
构造输出层
lstm_output: lstm层的输出结果
in_size: lstm输出层重塑后的size
out_size: softmax层的size
'''
# 将lstm的输出按照列concate,例如[[1,2,3],[7,8,9]],
# tf.concat的结果是[1,2,3,7,8,9]
seq_output = tf.concat(lstm_output, axis=1) # tf.concat(concat_dim, values)
# reshape
x = tf.reshape(seq_output, [-1, in_size])
# 将lstm层与softmax层全连接
with tf.variable_scope('softmax'):
softmax_w = tf.Variable(tf.truncated_normal([in_size, out_size], stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# 计算logits
logits = tf.matmul(x, softmax_w) + softmax_b
# softmax层返回概率分布
out = tf.nn.softmax(logits, name='predictions')
return out, logits
def build_loss(logits, targets, lstm_size, num_classes):
'''
根据logits和targets计算损失
logits: 全连接层的输出结果(不经过softmax)
targets: targets
lstm_size
num_classes: vocab_size
'''
# One-hot编码
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape())
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss = tf.reduce_mean(loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
'''
构造Optimizer
loss: 损失
learning_rate: 学习率
'''
# 使用clipping gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# 如果sampling是True,则采用SGD
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# 输入层
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# LSTM层
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
# 对输入进行one-hot编码
x_one_hot = tf.one_hot(self.inputs, num_classes)
# 运行RNN
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# 预测结果
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss 和 optimizer (with gradient clipping)
self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
# +
batch_size = 100 # Sequences per batch
num_steps = 100 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.001 # Learning rate
keep_prob = 0.5 # Dropout keep probability
epochs = 20
# 每n轮进行一次变量保存
save_every_n = 200
# -
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in generate_bath(encode, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
# control the print lines
if counter % 100 == 0:
print('轮数: {}/{}... '.format(e + 1, epochs),
'训练步数: {}... '.format(counter),
'训练误差: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end - start)))
if (counter % save_every_n == 0):
saver.save(sess, "/Users/li/workshop/MyRepository/DeepNaturalLanguageProcessing/DeepNLP/anna/checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "/Users/li/workshop/MyRepository/DeepNaturalLanguageProcessing/DeepNLP/anna/checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
def pick_top_n(preds, vocab_size, top_n=5):
"""
从预测结果中选取前top_n个最可能的字符
preds: 预测结果
vocab_size
top_n
"""
p = np.squeeze(preds)
# 将除了top_n个预测值的位置都置为0
p[np.argsort(p)[:-top_n]] = 0
# 归一化概率
p = p / np.sum(p)
# 随机选取一个字符
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
"""
生成新文本
checkpoint: 某一轮迭代的参数文件
n_sample: 新闻本的字符长度
lstm_size: 隐层结点数
vocab_size
prime: 起始文本
"""
# 将输入的单词转换为单个字符组成的list
samples = [c for c in prime]
# sampling=True意味着batch的size=1 x 1
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
# 加载模型参数,恢复训练
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
# 输入单个字符
x[0, 0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
# 添加字符到samples中
samples.append(int_to_vocab[c])
# 不断生成字符,直到达到指定数目
for i in range(n_samples):
x[0, 0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
tf.train.latest_checkpoint('checkpoints')
# 选用最终的训练参数作为输入进行文本生成
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="The")
print(samp)
| LanguageModel/anna/anna.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [](https://colab.research.google.com/github/krasserm/bayesian-machine-learning/blob/master/latent_variable_models_part_2.ipynb)
try:
# Use Tensorflow 2.x
# %tensorflow_version 2.x
# Check if notebook is running in Google Colab
import google.colab
except:
pass
# # Latent variable models - part 2: Stochastic variational inference and variational autoencoders
#
# [Part 1](latent_variable_models_part_1.ipynb) of this article series introduced a latent variable model with discrete latent variables, the Gaussian mixture model (GMM), and an algorithm to fit this model to data, the EM algorithm. Part 2 covers a latent variable model with continuous latent variables for modeling more complex data, like natural images for example, and a Bayesian inference technique that can be used in conjunction with stochastic optimization algorithms.
#
# Consider a natural image of size $100 \times 100$ with a single channel. This image is a point in $10.000$-dimensional space. Natural images are usually not uniformly distributed in this space but reside on a much lower-dimensional manifold within this high-dimensional space. The lower dimensionality of the manifold is related to the limited degrees of freedom in these images e.g. only a limited number of pixel value combinations are actually perceived as natural images.
#
# Modeling natural images with latent variable models whose continuous latent variables represent locations on the manifold can be a useful approach that is also discussed here. As in part 1, a model with one latent variable $\mathbf{t}_i$ per observation $\mathbf{x}_i$ is used but now the latent variables are continuous rather than discrete variables. Therefore, summations over latent variable states are now replaced by integrals and these are often intractable for more complex models.
#
# Observations i.e. images $\mathbf{X} = \left\{ \mathbf{x}_1, \ldots, \mathbf{x}_N \right\}$ are again described with a probabilistic model $p(\mathbf{x} \lvert \boldsymbol{\theta})$. Goal is to maximize the data likelihood $p(\mathbf{X} \lvert \boldsymbol{\theta})$ w.r.t. $\boldsymbol{\theta}$ and to obtain approximate posterior distributions over continuous latent variables. The joint distribution over an observed variable $\mathbf{x}$ and a latent variable $\mathbf{t}$ is defined as the product of the conditional distribution over $\mathbf{x}$ given $\mathbf{t}$ and the prior distribution over $\mathbf{t}$.
#
# $$
# p(\mathbf{x}, \mathbf{t} \lvert \boldsymbol{\theta}) = p(\mathbf{x} \lvert \mathbf{t}, \boldsymbol{\theta}) p(\mathbf{t} \lvert \boldsymbol{\theta})
# \tag{1}
# $$
#
# We obtain the marginal distribution over x by integrating over t.
#
# $$
# p(\mathbf{x} \lvert \boldsymbol{\theta}) = \int p(\mathbf{x} \lvert \mathbf{t}, \boldsymbol{\theta}) p(\mathbf{t} \lvert \boldsymbol{\theta}) d\mathbf{t}
# \tag{2}
# $$
#
# This integral is usually intractable for even moderately complex conditional probabilities $p(\mathbf{x} \lvert \mathbf{t}, \boldsymbol{\theta})$ and consequently also the true posterior.
#
# $$
# p(\mathbf{t} \lvert \mathbf{x}, \boldsymbol{\theta}) = {{p(\mathbf{x} \lvert \mathbf{t}, \boldsymbol{\theta}) p(\mathbf{t} \lvert \boldsymbol{\theta})} \over {p(\mathbf{x} \lvert \boldsymbol{\theta})}}
# \tag{3}
# $$
#
# This means that the E-step of the EM algorithm becomes intractable. Recall from part 1 that the lower bound of the log marginal likelihood is given by
#
# $$
# \mathcal{L}(\boldsymbol{\theta}, q) = \log p(\mathbf{X} \lvert \boldsymbol{\theta}) - \mathrm{KL}(q(\mathbf{T} \lvert \mathbf{X}) \mid\mid p(\mathbf{T} \lvert \mathbf{X}, \boldsymbol{\theta}))
# \tag{4}
# $$
#
# In the E-step, the lower bound is maximized w.r.t. $q$ and $\boldsymbol{\theta}$ is held fixed. If the true posterior is tractable, we can set $q$ to the true posterior so that the KL divergence becomes $0$ which maximizes the lower bound for the current value of $\boldsymbol{\theta}$. If the true posterior is intractable approximations must be used.
#
# Here, we will use *stochastic variational inference*, a Bayesian inference method that also scales to large datasets<sup>[1]</sup>. Numerous other approximate inference approaches exist but these are not discussed here to keep the article focused.
#
# ## Stochastic variational inference
#
# The field of mathematics that covers the optimization of a functional w.r.t. a function, like ${\mathrm{argmax}}_q \mathcal{L}(\boldsymbol{\theta}, q)$ in our example, is the [calculus of variations](https://en.wikipedia.org/wiki/Calculus_of_variations), hence the name *variational inference*. In this context, $q$ is called a *variational distribution* and $\mathcal{L}(\boldsymbol{\theta}, q)$ a *variational lower bound*.
#
# We will approximate the true posterior with a parametric variational distribution $q(\mathbf{t} \lvert \mathbf{x}, \boldsymbol{\phi})$ and try to find a value of $\boldsymbol{\phi}$ that minimizes the KL divergence between this distribution and the true posterior. Using $q(\mathbf{t} \lvert \mathbf{x}, \boldsymbol{\phi})$ we can formulate the variational lower bound for a single observation $\mathbf{x}_i$ as
#
# $$
# \begin{align*}
# \mathcal{L}(\boldsymbol{\theta}, q; \mathbf{x}_i) &=
# \log p(\mathbf{x}_i \lvert \boldsymbol{\theta}) - \mathrm{KL}(q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \mid\mid p(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\theta})) \\ &=
# \log p(\mathbf{x}_i \lvert \boldsymbol{\theta}) - \int q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \log {{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \over {p(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\theta})}} d\mathbf{t}_i \\ &=
# \int q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \log {{p(\mathbf{x}_i \lvert \boldsymbol{\theta}) p(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\theta})} \over {q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})}} d\mathbf{t}_i \\ &=
# \int q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \log {{p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) p(\mathbf{t}_i \lvert \boldsymbol{\theta})} \over {q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})}} d\mathbf{t}_i \\ &=
# \int q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) d\mathbf{t}_i - \mathrm{KL}(q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \mid\mid p(\mathbf{t}_i \lvert \boldsymbol{\theta})) \\ &=
# \mathbb{E}_{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) - \mathrm{KL}(q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \mid\mid p(\mathbf{t}_i \lvert \boldsymbol{\theta}))
# \end{align*}
# \tag{5}
# $$
#
# We assume that the integral $\int q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) d\mathbf{t}_i$ is intractable but we can choose a functional form of $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ from which we can easily sample so that the expectation of $\log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta})$ w.r.t. to $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ can be approximated with $L$ samples from $q$.
#
# $$
# \mathcal{L}(\boldsymbol{\theta}, q; \mathbf{x}_i) \approx {1 \over L} \sum_{l=1}^L \log p(\mathbf{x}_i \lvert \mathbf{t}_{i,l}, \boldsymbol{\theta}) - \mathrm{KL}(q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \mid\mid p(\mathbf{t}_i \lvert \boldsymbol{\theta}))
# \tag{6}
# $$
#
# where $\mathbf{t}_{i,l} \sim q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$. We will also choose the functional form of $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ and $p(\mathbf{t}_i \lvert \boldsymbol{\theta})$ such that integration of the KL divergence can be done analytically, hence, no samples are needed to evaluate the KL divergence. With these choices, an approximate evaluation of the variational lower bound is possible. But in order to optimize the lower bound w.r.t. $\boldsymbol{\theta}$ and $\boldsymbol{\phi}$ we need to approximate the gradients w.r.t. these parameters.
#
# ### Stochastic gradients
#
# We first assume that the analytical expression of the KL divergence, the second term on the RHS of Eq. $(5)$, is differentiable w.r.t. $\boldsymbol{\phi}$ and $\boldsymbol{\theta}$ so that deterministic gradients can be computed. The gradient of the first term on the RHS of Eq. $(5)$ w.r.t. $\boldsymbol{\theta}$ is
#
# $$
# \nabla_{\boldsymbol{\theta}} \mathbb{E}_{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) =
# \mathbb{E}_{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \nabla_{\boldsymbol{\theta}} \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta})
# \tag{7}
# $$
#
# Here, $\nabla_{\boldsymbol{\theta}}$ can be moved inside the expectation as $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ doesn't depend on $\boldsymbol{\theta}$. Assuming that $p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta})$ is differentiable w.r.t. $\boldsymbol{\theta}$, unbiased estimates of the gradient can be obtained by sampling from $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$.
#
# $$
# \nabla_{\boldsymbol{\theta}} \mathbb{E}_{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) \approx
# {1 \over L} \sum_{l=1}^L \nabla_{\boldsymbol{\theta}} \log p(\mathbf{x}_i \lvert \mathbf{t}_{i,l}, \boldsymbol{\theta})
# \tag{8}
# $$
#
# We will later implement $p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta})$ as neural network and use Tensorflow to compute $\nabla_{\boldsymbol{\theta}} \log p(\mathbf{x}_i \lvert \mathbf{t}_{i,l}, \boldsymbol{\theta})$. The gradient w.r.t. $\boldsymbol{\phi}$ is a bit more tricky as $\nabla_{\boldsymbol{\phi}}$ cannot be moved inside the expectation because $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ depends on $\boldsymbol{\phi}$. But if we can decompose $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ into an auxiliary distribution $p(\boldsymbol\epsilon)$ that doesn't depend on $\boldsymbol{\phi}$ and a deterministic, differentiable function $g(\boldsymbol\epsilon, \mathbf{x}, \boldsymbol{\phi})$ where $\mathbf{t}_i = g(\boldsymbol\epsilon, \mathbf{x}_i, \boldsymbol{\phi})$ and $\boldsymbol\epsilon \sim p(\boldsymbol\epsilon)$ then we can re-formulate the gradient w.r.t. $\boldsymbol{\phi}$ as
#
# $$
# \begin{align*}
# \nabla_{\boldsymbol{\phi}} \mathbb{E}_{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) &=
# \nabla_{\boldsymbol{\phi}} \mathbb{E}_{p(\boldsymbol\epsilon)} \log p(\mathbf{x}_i \lvert g(\boldsymbol\epsilon, \mathbf{x}_i, \boldsymbol{\phi}), \boldsymbol{\theta}) \\ &=
# \mathbb{E}_{p(\boldsymbol\epsilon)} \nabla_{\boldsymbol{\phi}} \log p(\mathbf{x}_i \lvert g(\boldsymbol\epsilon, \mathbf{x}_i, \boldsymbol{\phi}), \boldsymbol{\theta})
# \tag{9}
# \end{align*}
# $$
#
# Unbiased estimates of the gradient w.r.t. $\boldsymbol{\phi}$ can then be obtained by sampling from $p(\boldsymbol\epsilon)$.
#
# $$
# \nabla_{\boldsymbol{\phi}} \mathbb{E}_{q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})} \log p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta}) \approx
# {1 \over L} \sum_{l=1}^L \nabla_{\boldsymbol{\phi}} \log p(\mathbf{x}_i \lvert \mathbf{t}_{i,l}, \boldsymbol{\theta})
# \tag{10}
# $$
#
#
# where $\mathbf{t}_{i,l} = g(\boldsymbol\epsilon_l, \mathbf{x}_i, \boldsymbol{\phi})$ and $\boldsymbol\epsilon_l \sim p(\boldsymbol\epsilon)$. This so-called *reparameterization trick* can be applied to a wide range of probability distributions, including Gaussian distributions. Furthermore, stochastic gradients w.r.t. $\boldsymbol{\phi}$ obtained with this trick have much smaller variance than those obtained with alternative approaches (not shown here).
#
# ### Mini-batches
#
# The above approximations for the variational lower bound and its gradients have been formulated for a single training example $\mathbf{x}_i$ but this can be easily extended to mini-batches $\mathbf{X}^M = \left\{ \mathbf{x}_1, \ldots, \mathbf{x}_M \right\}$ with $M$ random samples from a dataset $\mathbf{X}$ of $N$ i.i.d. observations. The lower bound of the full dataset $\mathcal{L}(\boldsymbol{\theta}, q; \mathbf{X})$ can then be approximated as
#
# $$
# \begin{align*}
# \mathcal{L}(\boldsymbol{\theta}, q; \mathbf{X}) &\approx
# {N \over M} \sum_{i=1}^M \mathcal{L}(\boldsymbol{\theta}, q; \mathbf{x}_i) \\ &=
# \mathcal{L}^M(\boldsymbol{\theta}, q; \mathbf{X}^M)
# \tag{11}
# \end{align*}
# $$
#
# Gradients of $\mathcal{L}^M(\boldsymbol{\theta}, q; \mathbf{X}^M)$ can be obtained as described above together with averaging over the mini-batch and used in combination with optimizers like Adam, for example, to update the parameters of the latent variable model. Sampling from the variational distribution $q$ and usage of mini-batches leads to noisy gradients, hence the term *stochastic variational inference*.
#
# If $M$ is sufficiently large, for example $M = 100$, then $L$ can be even set to $1$ i.e. a single sample from the variational distribution per training example is sufficient to get a good gradient estimate on average.
#
# ## Variational autoencoder
#
# From the perspective of a generative model, $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ is a probabilistic *encoder* because it generates a *latent code* $\mathbf{t}_i$ for input image $\mathbf{x}_i$ and $p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta})$ is a probabilistic *decoder* because it generates or reconstructs an image $\mathbf{x}_i$ from latent code $\mathbf{t}_i$. Optimizing the variational lower bound w.r.t. parameters $\boldsymbol{\theta}$ and $\boldsymbol{\phi}$ can therefore be regarded as training a probabilistic autoencoder or *variational autoencoder* (VAE)<sup>[1]</sup>.
#
# In this context, the first term on the RHS of Eq. $(5)$ can be interpreted as expected negative *reconstruction error*. The second term is a *regularization term* that encourages the variational distribution to be close to the prior over latent variables. If the regularization term is omitted, the variational distribution would collapse to a delta function and the variational auto-encoder would degenerate to a "usual" deterministic autoencoder.
#
# ### Implementation
#
# For implementing a variational autoencoder, we make the following choices:
#
# - The variational distribution $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ is a multivariate Gaussian $\mathcal{N}(\mathbf{t}_i \lvert \boldsymbol\mu(\mathbf{x}_i, \boldsymbol{\phi}), \boldsymbol\sigma^2(\mathbf{x}_i, \boldsymbol{\phi}))$ with a diagonal covariance matrix where mean vector $\boldsymbol\mu$ and the covariance diagonal $\boldsymbol\sigma^2$ are functions of $\mathbf{x}_i$ and $\boldsymbol{\phi}$. These functions are implemented as neural network and learned during optimization of the lower bound w.r.t. $\boldsymbol{\phi}$. After reparameterization, samples from $q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi})$ are obtained via the deterministic function $g(\boldsymbol\epsilon, \mathbf{x}_i, \boldsymbol{\phi}) = \boldsymbol\mu(\mathbf{x}_i, \boldsymbol{\phi}) + \boldsymbol\sigma^2(\mathbf{x}_i, \boldsymbol{\phi}) \odot \boldsymbol\epsilon$ and an auxiliary distribution $p(\boldsymbol\epsilon) = \mathcal{N}(\boldsymbol\epsilon \lvert \mathbf{0}, \mathbf{I})$.
#
# - The conditional distribution $p(\mathbf{x}_i \lvert \mathbf{t}_i, \boldsymbol{\theta})$ is a multivariate Bernoulli distribution $\text{Ber}(\mathbf{x}_i \lvert \mathbf{k}(\mathbf{t}_i, \boldsymbol{\theta}))$ where parameter $\mathbf{k}$ is a function of $\mathbf{t}_i$ and $\boldsymbol{\theta}$. This distribution models the binary training data i.e. monochrome (= binarized) MNIST images in our example. Function $\mathbf{k}$ computes for each pixel its expected value. It is also implemented as neural network and learned during optimization of the lower bound w.r.t. $\boldsymbol{\theta}$. Taking the (negative) logarithm of $\text{Ber}(\mathbf{x}_i \lvert \mathbf{k}(\mathbf{t}_i, \boldsymbol{\theta}))$ gives a sum over pixel-wise binary cross entropies as shown in Eq. $(12)$
#
# - Prior $p(\mathbf{t}_i \lvert \boldsymbol{\theta})$ is a multivariate Gaussian distribution $\mathcal{N}(\mathbf{t}_i \lvert \mathbf{0}, \mathbf{I})$ with zero mean and unit covariance matrix. With the chosen functional forms of the prior and the variational distribution $q$, $\mathrm{KL}(q(\mathbf{t}_i \lvert \mathbf{x}_i, \boldsymbol{\phi}) \mid\mid p(\mathbf{t}_i \lvert \boldsymbol{\theta}))$ can be integrated analytically to $-{1 \over 2} \sum_{d=1}^D (1 + \log \sigma_{i,d}^2 - \mu_{i,d}^2 - \sigma_{i,d}^2)$ where $D$ is the dimensionality of the latent space and $\mu_{i,d}$ and $\sigma_{i,d}$ is the $d$-th element of $\boldsymbol\mu(\mathbf{x}_i, \boldsymbol{\phi})$ and $\boldsymbol\sigma(\mathbf{x}_i, \boldsymbol{\phi})$, respectively.
#
# Using these choices and setting $L = 1$, the variational lower bound for a single image $\mathbf{x}_i$ can be approximated as
#
# $$
# \mathcal{L}(\boldsymbol{\theta}, q; \mathbf{x}_i) \approx
# - \sum_c \left(x_{i,c} \log k_{i,c} + (1 - x_{i,c}) \log (1 - k_{i,c})\right) + {1 \over 2} \sum_d (1 + \log \sigma_{i,d}^2 - \mu_{i,d}^2 - \sigma_{i,d}^2)
# \tag{12}
# $$
#
# where $x_{i,c}$ is the value of pixel $c$ in image $\mathbf{x}_i$ and $k_{i,c}$ its expected value. The negative value of the lower bound is used as loss during training. The following figure outlines the architecture of the variational autoencoder.
#
# 
#
# The definitions of the encoder and decoder neural networks were taken from \[2\]. Here, the encoder computes the logarithm of the variance, instead of the variance directly, for reasons of numerical stability.
# +
from tensorflow.keras import layers
from tensorflow.keras.models import Model
def create_encoder(latent_dim):
"""
Creates a convolutional encoder for MNIST images.
Args:
latent_dim: dimensionality of latent space.
"""
encoder_iput = layers.Input(shape=(28, 28, 1))
x = layers.Conv2D(32, 3, padding='same', activation='relu')(encoder_iput)
x = layers.Conv2D(64, 3, padding='same', activation='relu', strides=(2, 2))(x)
x = layers.Conv2D(64, 3, padding='same', activation='relu')(x)
x = layers.Conv2D(64, 3, padding='same', activation='relu')(x)
x = layers.Flatten()(x)
x = layers.Dense(32, activation='relu')(x)
q_mean = layers.Dense(latent_dim)(x)
q_log_var = layers.Dense(latent_dim)(x)
return Model(encoder_iput, [q_mean, q_log_var], name='encoder')
def create_decoder(latent_dim):
"""
Creates a convolutional decoder for MNIST images.
Args:
latent_dim: dimensionality of latent space.
"""
decoder_input = layers.Input(shape=(latent_dim,))
x = layers.Dense(12544, activation='relu')(decoder_input)
x = layers.Reshape((14, 14, 64))(x)
x = layers.Conv2DTranspose(32, 3, padding='same', activation='relu', strides=(2, 2))(x)
k = layers.Conv2D(1, 3, padding='same', activation='sigmoid')(x)
return Model(decoder_input, k, name='decoder')
# -
# These definitions are used to implement a `VariationalAutoencoder` model class.
# +
import tensorflow as tf
class VariationalAutoencoder(Model):
def __init__(self, latent_dim=2):
"""
Creates a variational autoencoder Keras model.
Args:
latent_dim: dimensionality of latent space.
"""
super().__init__()
self.latent_dim = latent_dim
self.encoder = create_encoder(latent_dim)
self.decoder = create_decoder(latent_dim)
def encode(self, x):
"""
Computes variational distribution q statistics from
input image x.
Args:
x: input image, shape (M, 28, 28, 1).
Returns:
Mean, shape (M, latent_dim), and log variance,
shape (M, latent_dim), of multivariate Gaussian
distribution q.
"""
q_mean, q_log_var = self.encoder(x)
return q_mean, q_log_var
def sample(self, q_mean, q_log_var):
"""
Samples latent code from variational distribution q.
Args:
q_mean: mean of q, shape (M, latent_dim).
q_log_var: log variance of q, shape (M, latent_dim).
Returns:
Latent code sample, shape (M, latent_dim).
"""
eps = tf.random.normal(shape=q_mean.shape)
return q_mean + tf.exp(q_log_var * .5) * eps
def decode(self, t):
"""
Computes expected pixel values (= probabilities k) from
latent code t.
Args:
t: latent code, shape (M, latent_dim).
Returns:
Probabilities k of multivariate Bernoulli
distribution p, shape (M, 28, 28, 1).
"""
k = self.decoder(t)
return k
def call(self, x):
"""
Computes expected pixel values (= probabilities k) of a
reconstruction of input image x.
Args:
x: input image, shape (M, 28, 28, 1).
Returns:
Probabilities k of multivariate Bernoulli
distribution p, shape (M, 28, 28, 1).
"""
q_mean, q_log_var = self.encode(x)
t = self.sample(q_mean, q_log_var)
return self.decode(t)
# -
# The `variational_lower_bound` function is implemented using Eq. $(12)$ and Eq. $(11)$ but instead of estimating the lower bound for the full dataset it is normalized by the dataset size $N$.
# +
from tensorflow.keras.losses import binary_crossentropy
def variational_lower_bound(model, x):
"""
Computes normalized variational lower bound.
Args:
x: input images, shape (M, 28, 28, 1)
Returns:
Variational lower bound averaged over M
samples in batch and normalized by dataset
size N.
"""
q_mean, q_log_var = model.encode(x)
t = model.sample(q_mean, q_log_var)
x_rc = model.decode(t)
# Expected negative reconstruction error
rc_error = -tf.reduce_sum(binary_crossentropy(x, x_rc), axis=[1, 2])
# Regularization term (KL divergence)
kl_div = 0.5 * tf.reduce_sum(1 + q_log_var \
- tf.square(q_mean) \
- tf.exp(q_log_var), axis=-1)
# Average over mini-batch (of size M)
return tf.reduce_mean(rc_error + kl_div)
# -
# The training procedure uses the negative value of the variational lower bound as loss to compute stochastic gradient estimates. These are used by the `optimizer` to update model parameters $\boldsymbol\theta$ and $\boldsymbol\phi$. The normalized variational lower bound of the test set is computed at the end of each epoch and printed.
# +
@tf.function
def train_step(model, optimizer, x):
"""Trains VAE on mini-batch x using optimizer.
"""
with tf.GradientTape() as tape:
# Compute neg. variational lower bound as loss
loss = -variational_lower_bound(model, x)
# Compute gradients from neg. variational lower bound
gradients = tape.gradient(loss, model.trainable_variables)
# Apply gradients to model parameters theta and phi
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
def train(model, optimizer, ds_train, ds_test, epochs):
"""Trains VAE on training dataset ds_train using
optimizer for given number of epochs.
"""
for epoch in range(1, epochs + 1):
for x in ds_train:
train_step(model, optimizer, x)
vlb_mean = tf.keras.metrics.Mean()
for x in ds_test:
vlb_mean(variational_lower_bound(model, x))
vlb = vlb_mean.result()
print(f'Epoch: {epoch:02d}, Test set VLB: {vlb:.2f}')
# -
# Since the data are modelled with a multivariate Bernoulli distribution, the MNIST images are first binarized to monochrome images so that their pixel values are either 0 or 1. The training batch size is set to 100 to get reliable stochastic gradient estimates.
# +
from tensorflow.keras.datasets import mnist
(x_train, _), (x_test, y_test) = mnist.load_data()
x_train = (x_train > 127.5).astype('float32') # binarize
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = (x_test > 127.5).astype('float32') # binarize
x_test = x_test.reshape(-1, 28, 28, 1)
batch_size = 100
ds_train = tf.data.Dataset.from_tensor_slices(x_train).shuffle(x_train.shape[0]).batch(batch_size)
ds_test = tf.data.Dataset.from_tensor_slices(x_test).shuffle(x_test.shape[0]).batch(batch_size)
# -
# We choose a two-dimensional latent space so that it can be easily visualized. Training the variational autoencoder with `RMSProp` as optimizer at a learning rate of `1e-3` for 20 epochs gives already reasonable results. This takes a few minutes on a single GPU.
vae = VariationalAutoencoder(latent_dim=2)
opt = tf.keras.optimizers.RMSprop(lr=1e-3)
train(model=vae,
optimizer=opt,
ds_train=ds_train,
ds_test=ds_test,
epochs=20)
# The following figure shows the locations of test set images in latent space. Here, the mean vectors of the variational distributions are plotted. The latent space is organized by structural similarity of digits i.e. structurally similar digits have a smaller distance in latent space than structurally dissimilar digits. For example, digits 4 and 9 usually differ only by a horizontal bar or curve at the top of the image and are therefore in proximity.
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# Compute mean vectors of variational distributions (= latent code locations)
q_test_mean, _ = vae.encode(x_test)
# Use a discrete colormap
cmap = plt.get_cmap('viridis', 10)
# Plot latent code locations colored by the digit value on input images
im = plt.scatter(q_test_mean[:, 0], q_test_mean[:, 1], c=y_test, cmap=cmap,
vmin=-0.5, vmax=9.5, marker='x', s=0.2)
plt.colorbar(im, ticks=range(10));
# -
# When we sample locations in latent space (with density proportional to the prior density over latent variables) and decode these locations we can get a nice overview how MNIST digits are organized by structural similarity in latent space. Each digit is plotted with its expected pixel values k instead of using a sample from the corresponding multivariate Bernoulli distribution.
# +
import numpy as np
from scipy.stats import norm
# Number of samples per latent space dimension
samples_per_dim = 20
# Size of plotted digits
digit_size = 28
# Sampling grid coordinates. Grid points density is
# proportial to density of latent variable prior.
grid_x = norm.ppf(np.linspace(0.05, 0.95, samples_per_dim))
grid_y = norm.ppf(np.linspace(0.05, 0.95, samples_per_dim))
figure = np.zeros((digit_size * samples_per_dim,
digit_size * samples_per_dim))
for i, x in enumerate(grid_x):
for j, y in enumerate(grid_y):
t_ij = np.array([[x, y]])
x_ij = vae.decode(t_ij)
digit = x_ij.numpy().reshape(digit_size, digit_size)
figure[j * digit_size: (j + 1) * digit_size,
i * digit_size: (i + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r');
# -
# ## References
#
# \[1\] <NAME>, <NAME> [Auto-Encoding Variational Bayes](https://arxiv.org/abs/1312.6114).
# \[2\] <NAME>. [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).
| latent_variable_models_part_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="BjOWQM_KATxd"
# **What Does The Data Reveal About Dating? (Dighum100 project)**
#
# This jupyter notebook studies a data set retrieved from Kaggle that recorded responses of participants at a series of speed-dating events. Specifically, I will be investigating the correlation between major of choice and prefered attributes of a partner. I will use exploratory data analysis to answer the following questions:
#
# * Which majors care most about attractiveness?
# * Which majors care most about intelligence?
# * Is there evidence that certain majors systematically prefer a given attribute?
#
# **Process/Method:**
#
# 1. Import relevant packages for project
# 2. Import csv dataset and create data frame using pandas
# 3. Utilize numpy to run regressions to determine correlations
# 4. Utilize matplot to create visualizations of analysis
#
#
#
#
#
# + [markdown] id="ui3A2-UE5DQd"
# The cell below is used to import all packages that will be called upon in this notebook.
# + colab={"base_uri": "https://localhost:8080/"} id="kqgRJdSLFNx9" executionInfo={"status": "ok", "timestamp": 1622748798713, "user_tz": 420, "elapsed": 1342, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12126947298807884464"}} outputId="85d89a07-2ad0-4006-92e9-4d61c64bac34"
#Import relevant packages for project
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datascience import *
# %matplotlib inline
# + [markdown] id="-8S-gKF14N2s"
# Upload dataset into the files folder of this colab notebook (found on the left side of the screen), then use pandas to create dataframe.
# + colab={"base_uri": "https://localhost:8080/", "height": 694} id="8l3n--qhHjWf" executionInfo={"status": "ok", "timestamp": 1622750123692, "user_tz": 420, "elapsed": 471, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12126947298807884464"}} outputId="214efe6d-7e71-4632-d74d-3b53bd2a9514"
#create dataframe using pandas
dating = pd.read_csv('Speed Dating Data.csv', encoding= 'unicode_escape')
dating
# + [markdown] id="J9qnoJek5ok9"
# Next, we will run a series of regressions below to identifying correlations between majors and attributes.
# + id="EEUSJ6lc56FE"
numpy used for regression
# + [markdown] id="VyszsnqK59Iw"
# Lastly, use matplot to create visualizations.
# + id="NCyNCyTy6Hbp"
matplot used for visualizations
| jupyter notebook dighum100 week 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Jt1IHW8KJJmf"
# #Libreries import
# + id="HXCjXrtz19MK" executionInfo={"status": "ok", "timestamp": 1619015599472, "user_tz": -120, "elapsed": 2029, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
import pandas as pd
import numpy as np
import time
import concurrent.futures
import math
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
# + [markdown] id="Xy5xLbZRJExu"
# #Dataset import
# + id="IP6GLtL_pQ-M" executionInfo={"status": "ok", "timestamp": 1619015603467, "user_tz": -120, "elapsed": 6016, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
# !pip install -U -q PyDrive
# + id="H_Vas2Ovpowk" executionInfo={"status": "ok", "timestamp": 1619015603468, "user_tz": -120, "elapsed": 6012, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# + id="GyXbPTmmpqAd" executionInfo={"status": "ok", "timestamp": 1619015614539, "user_tz": -120, "elapsed": 17078, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# + id="HfLihr06pq_V" executionInfo={"status": "ok", "timestamp": 1619015620836, "user_tz": -120, "elapsed": 23370, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
drive.CreateFile({'id':'1fWSZsu_sn5N0SBsmEJX-CnDYAsAKRdbu'}).GetContentFile('dataset2_X_billboard_popularity_2.0.csv')
df = pd.read_csv("dataset2_X_billboard_popularity_2.0.csv").drop('Unnamed: 0',axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="CYNSh1D-2gWW" executionInfo={"status": "ok", "timestamp": 1619015637285, "user_tz": -120, "elapsed": 970, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}} outputId="e802d05f-d6c7-4a42-e0d9-c97ee4f81918"
df.info()
# + [markdown] id="vwYEU7VPKJOU"
# #Hyperparameters tuning
# + [markdown] id="VD4zR0BjKRb_"
#
#
# 1. Month encoding
# * cos,sin
# * int
# * categorical
# 2. Target
# * hit
# * weeks
# * int
# * y^n con 0<n<1
# * log(1+y)
# * categorical
# 3. New features
# * sì
# * intorno ?
# * no
# 4. Modello Machine Learning
# 5. Modello Deep Learning
# 6. Approccio
# * Past hits
# * Present hits
# * Future hits
#
#
# + [markdown] id="U3S2g0X7g9RH"
# ##weeks encoding
# + id="XOnBQmymghxm" colab={"base_uri": "https://localhost:8080/", "height": 0} executionInfo={"status": "ok", "timestamp": 1619015722106, "user_tz": -120, "elapsed": 2261, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}} outputId="d96733ae-c51d-4d0f-ff14-c9e9af6814fb"
x = list(range(0,87))
y = []
y = [np.log(1+i*1000) for i in x]
plt.figure(figsize=(40,10))
plt.xticks(list(range(1,87)))
plt.xlim(0,87)
plt.plot(x,y)
print("y[1] - y[0] = " + str(y[1] - y[0]))
print("y[86] - y[1] = " + str(y[86] - y[1]))
# + [markdown] id="-OgVPPFohLCV"
# ##PARAMETERS
# + id="-dlpzIjrKOLT" executionInfo={"status": "ok", "timestamp": 1619015722107, "user_tz": -120, "elapsed": 2255, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
weeks_encoding_parameter = 1000
new_features_params = (1,0) # (anni precedenti, anni successivi)
new_features_params_array = [(0,0), (1,0), (4,0), (9,0)]
linear_loudness = False # True
# + [markdown] id="JGKGEWuYJTAL"
# #Functions
# + id="1E5er83wJbzU" executionInfo={"status": "ok", "timestamp": 1619015722107, "user_tz": -120, "elapsed": 2250, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
def get_new_features(row,df_mean,audio_features,categorical_features):
new_features = []
new_categorical_features = []
for x in audio_features:
x_mean = df_mean[df_mean.year == row.year_YYYY][x].iloc[0]
new_features.append(np.power((row[x] - x_mean),2))
for w in categorical_features:
w_mode = df_mean[df_mean.year == row.year_YYYY][w].iloc[0]
if(w_mode == w):
new_features.append(0)
else:
new_features.append(1)
print("Completed for id:" + str(row.id) + ", year:" + str(row.year_YYYY))
return new_features
# + id="oTgyIbQliCoT" executionInfo={"status": "ok", "timestamp": 1619015722108, "user_tz": -120, "elapsed": 2246, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
def log_weeks(col,n):
return np.log(1+col*n)
# + id="K-_b44m1Xy86" executionInfo={"status": "ok", "timestamp": 1619015722108, "user_tz": -120, "elapsed": 2241, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
def get_season(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/'+year, end='20/06/'+year),
'summer': pd.date_range(start='21/06/'+year, end='22/09/'+year),
'autumn': pd.date_range(start='23/09/'+year, end='20/12/'+year)}
if date in seasons['spring']:
return 2 # spring
if date in seasons['summer']:
return 3 # summer
if date in seasons['autumn']:
return 4 # autumn
else:
return 1 # winter
# + [markdown] id="Z_-Bgk3ZJdoQ"
# #Pre-processing
# + id="bLoiFaeXVrl2" executionInfo={"status": "ok", "timestamp": 1619015722485, "user_tz": -120, "elapsed": 2611, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
# trasformo in int le colonne 'hit', 'weeks', 'key' e 'mode'
for x in ['hit', 'weeks', 'key', 'mode']:
df[x] = df[x].apply(int)
# + id="TZ-O8CaAEGMJ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619015722485, "user_tz": -120, "elapsed": 2606, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}} outputId="18017bc0-1a8d-4e71-e3b0-fa7f156bc638"
df.id.count()
# + id="2smCHP5DgJbu" colab={"base_uri": "https://localhost:8080/", "height": 0} executionInfo={"status": "ok", "timestamp": 1619015722486, "user_tz": -120, "elapsed": 2601, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}} outputId="3e00d6d2-4426-46d8-fa4d-c1e37d199c36"
df.head()
# + [markdown] id="vqjqfV2EhwyN"
# **FEATURES CONTINUE**
#
# Acustiche:
# * valence
# * acousticness
# * danceability
# * duration_ms --> normalizzare
# * energy
# * instrumentalness
# * liveness
# * loudness (--> convertire in lineare?) --> normalizzare
# * speechiness
# * tempo --> normalizzare
#
# --> aggiungere new_feature per ognuna --> normalizzare
#
#
# Temporali:
# * year --> normalizzare
# * month --> estrarre da 'release_date' --> codificare in cos,sin --> normalizzare
# * season --> estrarre da 'release_date' --> one-hot encoding
#
#
# Bonus:
# * past_popularuty --> normalizzare
# * [popularity --> eliminare]
#
# -- -- -- -- -- -- -- --
#
#
#
# **FEATURES CATEGORICHE**
#
# * explicit --> one-hot encoding
# * key --> one-hot encoding
# * mode --> one-hot encoding
#
# --> aggiungere new_feature per ognuna --> one-hot encoding
#
#
#
# **TARGET**
#
# * hit
# * weeks --> codificare come log(1+y) oppure y^n con 0<n<1
#
# + id="1ojVEYydBwAv" executionInfo={"status": "ok", "timestamp": 1619015722487, "user_tz": -120, "elapsed": 2596, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
audio_features = ['valence','acousticness','danceability','duration_ms','energy','instrumentalness','liveness','loudness','speechiness','tempo']
categorical_features = ['explicit','key','mode']
# + id="X5JAJfxPoxvH" executionInfo={"status": "ok", "timestamp": 1619015722952, "user_tz": -120, "elapsed": 3056, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
# converto colonna 'release_date' in tipo datetime
df.release_date = pd.to_datetime(df.release_date,format="%Y-%m-%d",exact=False)
# creo feature 'month' --> estraggo da 'release_date'
month_array = df.release_date.apply(lambda x: x.month)
df.insert(2,'month', month_array)
# codifico 'month' come cos,sin
# 1) calcolo cos
df['cos(month)'] = np.cos(2 * math.pi * df['month'] / df['month'].max())
# 2) calcolo sin
df['sin(month)'] = np.sin(2 * math.pi * df['month'] / df['month'].max())
# + id="QfxrNf93WGjh" executionInfo={"status": "ok", "timestamp": 1619015802084, "user_tz": -120, "elapsed": 82183, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
# creo la feature 'season'
season_array = df.release_date.apply(get_season)
df.insert(3,'season',season_array)
# + id="xOSWesPx9jsl" executionInfo={"status": "ok", "timestamp": 1619015802090, "user_tz": -120, "elapsed": 82183, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
# trasformo 'loudness' da logaritmica a lineare
if (linear_loudness == True):
df.loudness = 10**(df.loudness/10)
# + id="xNX5OSRpYgOm" executionInfo={"status": "ok", "timestamp": 1619015802091, "user_tz": -120, "elapsed": 82179, "user": {"displayName": "<NAME>cchi", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
# elimino colonna 'popuparity'
df = df.drop('popularity',axis=1)
# + id="E61VqOfYdxOA" executionInfo={"status": "ok", "timestamp": 1619015802091, "user_tz": -120, "elapsed": 82174, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
# NORMALIZZAZIONE
# creo copia della colonna 'year' da conservare non normalizzato
df['year_YYYY'] = df['year']
# creo copia della colonna 'month' da conservare non normalizzato
df['month_mm'] = df['month']
to_norm = ['year', 'month', 'duration_ms','loudness','tempo','cos(month)','sin(month)', 'past_pop_n_hit', 'past_pop_n_weeks']
df[to_norm] = (df[to_norm] - df[to_norm].min())/(df[to_norm].max()-df[to_norm].min())
# + id="80b0sQhUvCri" colab={"base_uri": "https://localhost:8080/", "height": 0} executionInfo={"status": "ok", "timestamp": 1619015802092, "user_tz": -120, "elapsed": 82169, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}} outputId="a4f672e2-e233-402e-ae7f-7224806ee079"
df.head()
# + [markdown] id="9vPGIMvEKlUh"
# # NEW FEATURES
# + id="XXduAmSCFvF2" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619020559248, "user_tz": -120, "elapsed": 4839320, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}} outputId="7c1840a8-8d3b-47c0-b625-f476ea4c1d87"
# creo un dataframe dove per ogni anno (=riga) memorizzo la media/moda delle features audio/categoriche calcolata sull'intorno di quell'anno (definito in base a new_features_params)
years = df.year_YYYY.unique() # NB uso 'year_YYYY' (colonna anno non normalizzata)
for i, params in enumerate(new_features_params_array):
mean_features = []
for year in years:
# calcolo anno inizio
year_start = max(year - params[0], years.min())
# calcolo anno fine
year_end = min(year + params[1], years.max())
# estraggo sub df --> intorno di anni in cui calcolare la media/moda
mask_1 = df.year_YYYY <= year_end
mask_2 = df.year_YYYY >= year_start
sub_df = df[mask_1 & mask_2]
mean_features_row = [year]
# calcolo media delle features
for x in audio_features:
mean_features_row.append(sub_df[x].mean())
for w in categorical_features:
mean_features_row.append(sub_df[w].mode()[0])
mean_features.append(mean_features_row)
df_mean = pd.DataFrame(mean_features,columns=['year']+audio_features+categorical_features)
new_features_list = ['valence_new_'+str(i),'acousticness_new_'+str(i),'danceability_new_'+str(i),'duration_ms_new_'+str(i),'energy_new_'+str(i),'instrumentalness_new_'+str(i),'liveness_new_'+str(i),'loudness_new_'+str(i),'speechiness_new_'+str(i),'tempo_new_'+str(i),'explicit_new_'+str(i),'key_new_'+str(i),'mode_new_'+str(i)]
df[new_features_list] = df.apply(get_new_features, args=(df_mean,audio_features,categorical_features), result_type='expand', axis=1)
# normalizzo new_features
to_norm = ['valence_new_'+str(i),'acousticness_new_'+str(i),'danceability_new_'+str(i),'duration_ms_new_'+str(i),'energy_new_'+str(i),'instrumentalness_new_'+str(i),'liveness_new_'+str(i),'loudness_new_'+str(i),'speechiness_new_'+str(i),'tempo_new_'+str(i)]
df[to_norm] = (df[to_norm] - df[to_norm].min())/(df[to_norm].max()-df[to_norm].min())
# trasformo in int le colonne 'explicit_new', 'key_new', 'mode_new'
for x in ['explicit_new_'+str(i), 'key_new_'+str(i), 'mode_new_'+str(i)]:
df[x] = df[x].apply(int)
# + id="sySFkByBWqDb" colab={"base_uri": "https://localhost:8080/", "height": 0} executionInfo={"status": "ok", "timestamp": 1619020559255, "user_tz": -120, "elapsed": 4839322, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}} outputId="ea64ca86-1d35-4c9c-cbb0-0f4a902f11da"
df.head()
# + [markdown] id="wwMJ1gYWTSHS"
# #Codifica 'weeks'
# + id="oytuwM5ZToJF" executionInfo={"status": "ok", "timestamp": 1619020559255, "user_tz": -120, "elapsed": 4839317, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
df['weeks_enc'] = df.weeks.apply(log_weeks,args=[weeks_encoding_parameter])
# + [markdown] id="Cy-ECxLLxB_A"
# #Export (per analysis/visualisation)
# + id="Upd--X5QxKRl" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619022261044, "user_tz": -120, "elapsed": 6541098, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}} outputId="b0337958-d144-4ccd-ade7-e377cf9a85af"
# esporto dataset senza one hot encoding, da utilizzare per analysis/visualisation
from google.colab import drive
# mounts the google drive to Colab Notebook
drive.mount('/content/drive',force_remount=True)
df.to_csv('/content/drive/My Drive/Colab Notebooks/datasets/dataset_final_4.0_no_ohe.csv')
# + [markdown] id="NG9nb-eapJMR"
# #One-Hot-Encoding
# + id="g8is3ACvpFIr" executionInfo={"status": "ok", "timestamp": 1619022261045, "user_tz": -120, "elapsed": 6541093, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
to_oh_encode = ['key', 'season']
df = pd.get_dummies(df,columns=to_oh_encode)
# + [markdown] id="YC3ywCS9dld5"
# #Re-order
# + id="y6MIcFo6p5ud" colab={"base_uri": "https://localhost:8080/", "height": 0} executionInfo={"status": "ok", "timestamp": 1619022261046, "user_tz": -120, "elapsed": 6541087, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}} outputId="a057bf8a-99a6-4405-f91d-6ea01a46d0c8"
df.head()
# + id="rIBUsxcIqGAh" executionInfo={"status": "ok", "timestamp": 1619022261047, "user_tz": -120, "elapsed": 6541078, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
df_ordered = df[['id',
'name',
'artists',
'release_date',
'year_YYYY',
'month_mm',
'month',
'year',
'cos(month)',
'sin(month)',
'valence',
'acousticness',
'danceability',
'duration_ms',
'energy',
'instrumentalness',
'liveness',
'loudness',
'speechiness',
'tempo',
'valence_new_0',
'acousticness_new_0',
'danceability_new_0',
'duration_ms_new_0',
'energy_new_0',
'instrumentalness_new_0',
'liveness_new_0',
'loudness_new_0',
'speechiness_new_0',
'tempo_new_0',
'valence_new_1',
'acousticness_new_1',
'danceability_new_1',
'duration_ms_new_1',
'energy_new_1',
'instrumentalness_new_1',
'liveness_new_1',
'loudness_new_1',
'speechiness_new_1',
'tempo_new_1',
'valence_new_2',
'acousticness_new_2',
'danceability_new_2',
'duration_ms_new_2',
'energy_new_2',
'instrumentalness_new_2',
'liveness_new_2',
'loudness_new_2',
'speechiness_new_2',
'tempo_new_2',
'valence_new_3',
'acousticness_new_3',
'danceability_new_3',
'duration_ms_new_3',
'energy_new_3',
'instrumentalness_new_3',
'liveness_new_3',
'loudness_new_3',
'speechiness_new_3',
'tempo_new_3',
'explicit',
'key_0',
'key_1',
'key_2',
'key_3',
'key_4',
'key_5',
'key_6',
'key_7',
'key_8',
'key_9',
'key_10',
'key_11',
'mode',
'explicit_new_0',
'key_new_0',
'mode_new_0',
'explicit_new_1',
'key_new_1',
'mode_new_1',
'explicit_new_2',
'key_new_2',
'mode_new_2',
'explicit_new_3',
'key_new_3',
'mode_new_3',
'season_1',
'season_2',
'season_3',
'season_4',
'hit',
'weeks',
'weeks_enc',
'past_pop_n_hit',
'past_pop_n_weeks']]
# + id="gQbVMNUhrEw5" colab={"base_uri": "https://localhost:8080/", "height": 0} executionInfo={"status": "ok", "timestamp": 1619022261601, "user_tz": -120, "elapsed": 6541626, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}} outputId="07724eea-fc6b-422d-fc61-b0288af08c8f"
df_ordered.head()
# + [markdown] id="MYes-dZRDHW0"
# #Export
# + id="tcj0RLyArFoJ" executionInfo={"status": "ok", "timestamp": 1619022276326, "user_tz": -120, "elapsed": 6556344, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhsE3N_0rvi4i7PTUy9okHWPKo4CtX-Vq2O3tR8K8M=s64", "userId": "14558454588425913231"}}
df_ordered.to_csv('/content/drive/My Drive/Colab Notebooks/datasets/dataset_final_4.0.csv')
| feature_extraction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2nd Order Optimization
#
# ## Newtons Method
#
# ### Part 2
# ## Newton's method optimization
# +
from sympy import *
from sympy.parsing import sympy_parser as spp
import numpy as np
import matplotlib.pyplot as plt
# -
# ### Hyperparams
# +
plot_from = -7.0
plot_to = 7.0
plot_step = 0.1
target_precision = 0.3
sd_iters = 100000
display_freq = sd_iters//10
m = Matrix(symbols('x1 x2'))
# -
def dfdx(x,g):
return [float(g[i].subs(m[0],x[0]).subs(m[1],x[1])) for i in range(len(g))]
def sd(obj,x_start,x_result,alpha=0.0002):
'''
Steepest Descent - 1st Order Optimization
'''
print('Steepest Descent - Start')
#Gradient
g = [diff(obj,i) for i in m]
#Initialize xs
xs = [[0.0,0.0]]
xs[0] = x_start
#Get gradient at start location (dx/df or grad(f))
iter_s = 0
while np.linalg.norm(xs[-1] - x_result) > target_precision:
if(iter_s % display_freq == 0):
print ('Steepest Descent - Distance: ',np.linalg.norm(xs[-1] - x_result))
gs = dfdx(xs[iter_s],g)
#Compute search direction and magnitude (dx)
#With dx = -grad but no line searching
xs.append(xs[iter_s] - np.dot(alpha,gs))
if(iter_s % display_freq == 0):
print('Last xs:',xs[-1])
iter_s += 1
if iter_s > sd_iters:
break
print('Steepest Descent - Result distance: ', np.linalg.norm(xs[-1] - x_result))
xs = np.array(xs)
plt.plot(xs[:,0],xs[:,1],'g-o')
def nm(obj,x_start,x_result):
'''
Newton's method - 2nd order optimization
'''
print ('Newton\'s method - Start')
#Gradient
g = [diff(obj,i) for i in m]
#Hessian Matrix
H = Matrix([[diff(g[j],m[i]) for i in range(len(m))] for j in range(len(g))])
H_inv = H.inv()
xn = [[0,0]] #Newton's method result global for comparison
xn[0] = x_start
iter_n = 0
while np.linalg.norm(xn[-1] - x_result) > target_precision:
print ('Newton\'s method - Distance: ',np.linalg.norm(xn[-1] - x_result))
gn = Matrix(dfdx(xn[iter_n],g))
delta_xn = -H_inv * gn
delta_xn = delta_xn.subs(m[0],xn[iter_n][0]).subs(m[1],xn[iter_n][1])
#Transform to numpy.ndarray to avoid np.linalg.norm error with 'Float' data type
aux = []
for elem in Matrix(xn[iter_n]) + delta_xn:
aux.append(elem)
aux = np.array(aux).astype(np.float64)
xn.append(aux)
iter_n += 1
print ('Newton\'s method - Result Distance: ',np.linalg.norm(xn[-1] - x_result))
xn = np.array(xn)
plt.plot(xn[:,0],xn[:,1],'k-o')
# ### Test & Plot
def run_and_plot_quadratic():
#Quadratic function
x_start = [-4.0,6.0]
obj = spp.parse_expr('x1**2 - 2* x1 * x2 + 4 * x2 ** 2')
x_result = np.array([0.0,0.0])
#Design variables at mesh points
i1 = np.arange(plot_from,plot_to,plot_step)
i2 = np.arange(plot_from,plot_to,plot_step)
x1_mesh,x2_mesh = np.meshgrid(i1,i2)
f_str = obj.__str__().replace('x1','x1_mesh').replace('x2','x2_mesh')
f_mesh = eval(f_str)
#Create contour plot
plt.figure()
plt.imshow(f_mesh,cmap='Paired',origin='lower',extent=[plot_from - 20, plot_to + 20, plot_from - 20, plot_to + 2])
plt.colorbar()
#Add some text
plt.title('f(x) = ' + str(obj))
plt.xlabel('x1')
plt.ylabel('x2')
nm(obj,x_start,x_result)
sd(obj,x_start,x_result,alpha=0.05)
plt.show()
def run_and_plot_rosenbrock():
#Rosenbrock function
x_start = [-4.0,-5.0]
obj = spp.parse_expr('(1-x1)**2 + 100 * (x2 - x1**2)**2')
x_result = np.array([1,1])
#Design variables at mesh points
i1 = np.arange(plot_from,plot_to,plot_step)
i2 = np.arange(plot_from,plot_to,plot_step)
x1_mesh,x2_mesh = np.meshgrid(i1,i2)
f_str = obj.__str__().replace('x1','x1_mesh').replace('x2','x2_mesh')
f_mesh = eval(f_str)
#Create contour plot
plt.figure()
plt.imshow(f_mesh,cmap='Paired',origin='lower',extent=[plot_from - 20, plot_to + 20, plot_from - 20, plot_to + 2])
plt.colorbar()
#Add some text
plt.title('f(x) = ' + str(obj))
plt.xlabel('x1')
plt.ylabel('x2')
nm(obj,x_start,x_result)
sd(obj,x_start,x_result,alpha=0.0002)
plt.show()
# ### Test
run_and_plot_quadratic()
run_and_plot_rosenbrock()
| Labs/Resources/NewtonsMethod/newtons_method_optimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Gromov-Wasserstein example
#
# This example is designed to show how to use the Gromov-Wassertsein distance
# computation in POT.
#
# +
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
import scipy as sp
import numpy as np
import matplotlib.pylab as pl
from mpl_toolkits.mplot3d import Axes3D # noqa
import ot
# -
# ## Sample two Gaussian distributions (2D and 3D)
#
# The Gromov-Wasserstein distance allows to compute distances with samples that
# do not belong to the same metric space. For demonstration purpose, we sample
# two Gaussian distributions in 2- and 3-dimensional spaces.
#
#
# +
n_samples = 30 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
mu_t = np.array([4, 4, 4])
cov_t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s)
P = sp.linalg.sqrtm(cov_t)
xt = np.random.randn(n_samples, 3).dot(P) + mu_t
# -
# ## Plotting the distributions
#
#
fig = pl.figure()
ax1 = fig.add_subplot(121)
ax1.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
ax2 = fig.add_subplot(122, projection='3d')
ax2.scatter(xt[:, 0], xt[:, 1], xt[:, 2], color='r')
pl.show()
# ## Compute distance kernels, normalize them and then display
#
#
# +
C1 = sp.spatial.distance.cdist(xs, xs)
C2 = sp.spatial.distance.cdist(xt, xt)
C1 /= C1.max()
C2 /= C2.max()
pl.figure()
pl.subplot(121)
pl.imshow(C1)
pl.subplot(122)
pl.imshow(C2)
pl.show()
# -
# ## Compute Gromov-Wasserstein plans and distance
#
#
# +
p = ot.unif(n_samples)
q = ot.unif(n_samples)
gw0, log0 = ot.gromov.gromov_wasserstein(
C1, C2, p, q, 'square_loss', verbose=True, log=True)
gw, log = ot.gromov.entropic_gromov_wasserstein(
C1, C2, p, q, 'square_loss', epsilon=5e-4, log=True, verbose=True)
print('Gromov-Wasserstein distances: ' + str(log0['gw_dist']))
print('Entropic Gromov-Wasserstein distances: ' + str(log['gw_dist']))
pl.figure(1, (10, 5))
pl.subplot(1, 2, 1)
pl.imshow(gw0, cmap='jet')
pl.title('Gromov Wasserstein')
pl.subplot(1, 2, 2)
pl.imshow(gw, cmap='jet')
pl.title('Entropic Gromov Wasserstein')
pl.show()
# -
# ## Compute GW with a scalable stochastic method with any loss function
#
#
# +
def loss(x, y):
return np.abs(x - y)
pgw, plog = ot.gromov.pointwise_gromov_wasserstein(C1, C2, p, q, loss, max_iter=100,
log=True)
sgw, slog = ot.gromov.sampled_gromov_wasserstein(C1, C2, p, q, loss, epsilon=0.1, max_iter=100,
log=True)
print('Pointwise Gromov-Wasserstein distance estimated: ' + str(plog['gw_dist_estimated']))
print('Variance estimated: ' + str(plog['gw_dist_std']))
print('Sampled Gromov-Wasserstein distance: ' + str(slog['gw_dist_estimated']))
print('Variance estimated: ' + str(slog['gw_dist_std']))
pl.figure(1, (10, 5))
pl.subplot(1, 2, 1)
pl.imshow(pgw.toarray(), cmap='jet')
pl.title('Pointwise Gromov Wasserstein')
pl.subplot(1, 2, 2)
pl.imshow(sgw, cmap='jet')
pl.title('Sampled Gromov Wasserstein')
pl.show()
| _downloads/705e002e8bade030ed468515173797f7/plot_gromov.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="2JXWocLnz38N"
# You might need to install this on your system:
#
# apt-get install python3-opencv git
# + colab={"base_uri": "https://localhost:8080/"} id="PQKpflNl7m63" outputId="faf13e6a-5527-4377-bbf7-b369db89a36c"
import os
#"""
# # !rm k -r
if not os.path.isdir('k'):
# !git clone -b development12 https://github.com/joaopauloschuler/k-neural-api.git k
else:
# !cd k && git pull
#"""
# !cd k && pip install .
# + colab={"base_uri": "https://localhost:8080/"} id="2FWmCCX96ndE" outputId="60bfcfe1-200d-4f03-b368-83c7d8c68a1f"
import cai.layers
import cai.datasets
import cai.models
import cai.efficientnet
import numpy as np
from tensorflow import keras
from tensorflow.keras import mixed_precision
import gc
import multiprocessing
import random
import tensorflow as tf
print("Tensorflow version:", tf.version.VERSION)
print("Keras version:", keras.__version__)
from sklearn.metrics import classification_report
# + id="v5C5xGzHD9lu"
from tensorflow.python.profiler.model_analyzer import profile
from tensorflow.python.profiler.option_builder import ProfileOptionBuilder
def get_flops(model):
forward_pass = tf.function(
model.call,
input_signature=[tf.TensorSpec(shape=(1,) + model.input_shape[1:])])
graph_info = profile(forward_pass.get_concrete_function().graph,
options=ProfileOptionBuilder.float_operation())
# The //2 is necessary since `profile` counts multiply and accumulate
# as two flops, here we report the total number of multiply accumulate ops
flops = graph_info.total_float_ops // 2
return flops
# + id="ezaCDhF_cdnP"
# mixed_precision.set_global_policy('mixed_float16')
# + [markdown] id="fl4NUmiLLs2J"
# # Create folder structure with CIFAR-10 as png files.
# + id="E7hD0zwbcKIq"
verbose=True
root_folder = 'cifar10-as-png'
data_dir = root_folder + '/train';
test_dir = root_folder + '/test';
if not os.path.isdir(root_folder):
os.mkdir(root_folder)
x_train, y_train, x_test, y_test = cai.datasets.load_dataset(tf.keras.datasets.cifar10, verbose=verbose, lab=False, bipolar=False)
cai.datasets.save_dataset_as_png(cai.datasets.fix_bad_tfkeras_channel_order(x_train)*255, y_train, dest_folder_name=data_dir)
cai.datasets.save_dataset_as_png(cai.datasets.fix_bad_tfkeras_channel_order(x_test)*255, y_test, dest_folder_name=test_dir)
# + id="-6WYfTY_h8zn"
num_classes = 10
batch_size = 32
epochs = 50
target_size_x = 224
target_size_y = 224
seed = 12
# + id="qN8LdqheVEkj"
train_datagen = cai.util.create_image_generator(validation_split=0.1, rotation_range=20, width_shift_range=0.3, height_shift_range=0.3, channel_shift_range=0.0, rescale=1./255)
test_datagen = cai.util.create_image_generator_no_augmentation(rescale=1./255)
cpus_num = max([multiprocessing.cpu_count(), 8])
def cyclical_adv_lrscheduler25(epoch):
"""CAI Cyclical and Advanced Learning Rate Scheduler.
# Arguments
epoch: integer with current epoch count.
# Returns
float with desired learning rate.
"""
base_learning = 0.001
local_epoch = epoch % 25
if local_epoch < 7:
return base_learning * (1 + 0.5*local_epoch)
else:
return (base_learning * 4) * ( 0.85**(local_epoch-7) )
# + id="8A7b9F3TVQUG"
def work_on_keffnet(show_model=False, run_fit=False, test_results=False, calc_f1=False):
monitor='val_accuracy'
if (calc_f1):
test_results=True
if (show_model):
input_shape = (target_size_x, target_size_y, 3)
else:
input_shape = (None, None, 3)
for kType in [-1, 2, 13]:
basefilename = 'JP30B27-keffnet-CIFAR10-'+str(kType)
best_result_file_name = basefilename+'-best_result.hdf5'
print('Running: '+basefilename)
if kType == -1:
model = cai.efficientnet.EfficientNetB0(
include_top=True,
input_shape=input_shape,
classes=num_classes)
else:
model = cai.efficientnet.kEfficientNetB0(
include_top=True,
input_shape=input_shape,
classes=num_classes,
kType=kType)
#opt = keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
optimizer = keras.optimizers.RMSprop()
optimizer = mixed_precision.LossScaleOptimizer(optimizer)
model.compile(
loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
if (show_model):
model.summary(line_length=180)
print('model flops:',get_flops(model))
save_best = keras.callbacks.ModelCheckpoint(
filepath=best_result_file_name,
monitor=monitor,
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='max',
save_freq='epoch')
if (run_fit):
train_flow = train_datagen.flow_from_directory(
directory=data_dir,
subset="training",
target_size=(target_size_x, target_size_y),
color_mode="rgb",
batch_size=batch_size,
class_mode="categorical",
shuffle=True,
seed=seed
)
validation_flow = train_datagen.flow_from_directory(
directory=data_dir,
subset="validation",
target_size=(target_size_x, target_size_y),
color_mode="rgb",
batch_size=batch_size,
class_mode="categorical",
shuffle=True,
seed=seed
)
history = model.fit(
x = train_flow,
epochs=epochs,
batch_size=batch_size,
validation_data=validation_flow,
callbacks=[save_best, tf.keras.callbacks.LearningRateScheduler(cyclical_adv_lrscheduler25)],
workers=cpus_num,
max_queue_size=128
)
if (test_results):
test_flow = test_datagen.flow_from_directory(
directory=test_dir,
target_size=(target_size_x, target_size_y),
color_mode="rgb",
batch_size=batch_size,
class_mode="categorical",
shuffle=True,
seed=seed
)
print('Best Model Results: '+best_result_file_name)
model = cai.models.load_kereas_model(best_result_file_name)
evaluated = model.evaluate(
x=test_flow,
batch_size=batch_size,
use_multiprocessing=False,
workers=cpus_num
)
for metric, name in zip(evaluated,["loss","acc"]):
print(name,metric)
if (calc_f1):
pred_y = model.predict(x_test)
print("Predicted Shape:", pred_y.shape)
pred_classes_y = np.array(list(np.argmax(pred_y, axis=1)))
test_classes_y = np.array(list(np.argmax(y_test, axis=1)))
print("Pred classes shape:",pred_classes_y.shape)
print("Test classes shape:",test_classes_y.shape)
report = classification_report(test_classes_y, pred_classes_y, digits=4)
print(report)
print('Finished: '+basefilename)
# + [markdown] id="HRSxTd5GeU5p"
# # Show Models
# + colab={"base_uri": "https://localhost:8080/"} id="EfjO1XESTCrY" outputId="ba7a7100-8a7d-4847-bd64-04c06bf4f6a9"
work_on_keffnet(show_model=True, run_fit=False, test_results=False)
# + [markdown] id="x7RjCRzmxhce"
# # Fitting
# + colab={"base_uri": "https://localhost:8080/"} id="edbu3-Y6THos" outputId="6b46d760-22c3-490b-e119-6fc5dc79221a"
work_on_keffnet(show_model=False, run_fit=True, test_results=True)
# + [markdown] id="qm50d5uZxkvA"
# # Test Results
# + id="sGjwYVi6TNN_" outputId="ae854b54-cefd-42e5-c1a9-604ae98a7e6b"
work_on_keffnet(show_model=False, run_fit=False, test_results=True)
# + id="qYPjSr31VdXG"
| raw/kEffNet/CIFAR-10/JP30B27_Baseline_kEffNet_kType_2_and_13.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, plot_roc_curve
from sklearn.metrics import make_scorer, precision_recall_fscore_support
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# -
path = 'https://raw.githubusercontent.com/s-a-nersisyan/HSE_bioinformatics_2021/master/seminar15/BRCA_pam50.tsv'
df = pd.read_csv(path, sep="\t", index_col=0)
X = df.iloc[:, :-1].to_numpy()
y = df["Subtype"].to_numpy()
def make_model(X, y, params=None):
model = KNeighborsClassifier()
params = {
"n_neighbors": [*range(1, 20)],
"weights": ["uniform", "distance"],
"p": [1, 2]
} if not params else params
cv = GridSearchCV(
model, params,
scoring=make_scorer(accuracy_score),
cv=RepeatedStratifiedKFold(n_repeats=10, n_splits=20)
)
cv.fit(X, y)
print('best params:', cv.best_params_)
print('best score:', cv.best_score_)
print('\n')
model = KNeighborsClassifier().set_params(**cv.best_params_)
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, test_size=0.35, random_state=17
)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
print(X.shape)
make_model(X, y)
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
print(X_pca.shape)
make_model(X_pca, y)
tsne = TSNE(n_components=2)
X_tsne = tsne.fit_transform(X)
print(X_tsne.shape)
make_model(X_tsne, y)
| bonus/19.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://csdms.colorado.edu/wiki/ESPIn2020"><img style="float: center; width: 75%" src="../../media/ESPIn.png"></a>
# ## Diffusion
# ### Introduction
#
# The diffusion equation can be used to represent a great deal of natural
# and environmental processes. It was introduced by Fourier in 1822 to
# calculate the distribution of the temperature in materials and has later
# been applied by Fick to material science. The mathematical expression
# that we will derive can be used to model e.g. heat transfer in the
# earth's crust, soil evolution, transport of contaminant in an aquifer or
# in the atmosphere, erosion of mountain ranges, the evolution of glaciers
# and many other phenomena. But before describing the equation directly,
# we will investigate what diffusion actually means.
#
# Note: Lecture notes on diffusion are partly based on Prof. Dr. <NAME>'s course on geophysical processes.
# @author: <NAME>
#
# #### Diffusion, what does it mean?
# The following movie illustrates Brownian motion, we can
# see that the equation to derive for diffusion must enable us to
# represent the movement of molecules from a high concentration zone to a
# zone of low concentration (Movie 1):
#
# [](https://www.youtube.com/watch?v=UhL9OsRSKO8 "Brownian motion")
#
# **Movie 1:** Brownian motion causes food dye molecules to move throughout the water
#
# If we simplify this is a graph, we would get the following:
# <img src="../../media/Diff_Fig1.png" style="width:3in;height:2in" />
#
# **Figure 1**: Diffusion is the movement of molecules from a high
# concentration zone to a zone of low concentration due to random
# processes. C represents the concentration; X is the horizontal distance
# and q is the net particle flow.
#
# Due to diffusion, the particles move from the black zone to the grey
# zone. This can be explained by the fact that each particle can move at
# any moment in any direction, over a given distance. In one dimension, a
# particle can move to the left or to the right with equal probability,
# and this as well in the gray region as the black region. However, at the
# transition from the black zone to the grey zone, the probability of
# seeing particles move from left to right is much larger than the
# opposite (because there are much more black particles). This causes a
# particle transfer that depends on the difference of concentration $\Delta C$ and
# the distance that the particle must travel $\Delta x$, where $\Delta C$ is the
# difference of concentration in a transition zone of length $\Delta x$.
# Therefore, we can see that the flow of particles (i.e. the number of
# particles passing through per unit surface and time (in 2D, mol m<sup>-1</sup>
# s<sup>-1</sup>) will depend on the concentration gradient. Over time,
# the concentration changes as illustrated in Figure 2.
#
# <img src="../../media/Diff_Fig2.png" style="width:3in;height:2in" />
#
# **Figure 2:** Concentration changes over time due to diffusion
#
# ### Exercise 1: modeling the random movement of particles
#
# Start by importing some libraries we will use
import numpy as np
import matplotlib.pyplot as plt
import random
import time
# The goal of the first exercise is to replicate the process of diffusion by modeling the random movement of particles.
#
# First, create a vector (called `xp`) that contains 100000 particles, having a random value between -20 and 20.
# Represent this graphically using a histogram (use the `plt.hist()` function) and using the number of bins `xbins` (see below) in which you calculate the frequency (that is, the number of particles that is in each bin). Complete the code block below:
# +
xbins = np.arange(-200,200,2) # The number of bins in which you calculate the frequency (i.e. the number of particles that is in each bin.)
# -
# Now, create a loop in which:
# - each particle moves over a random distance (positive or negative). This distance varies from -2 to 2. Use the `random` function.
# - Run the loop 100 times (`nbT` =100). You will need two for loops (also referred to as nested for loops).
# - Plot the resulting distribution every 200 iterations (using the code you coded above). Fix the limits of the y axes (`plt.ylim()`) to better understand how the distribution changes with time.
# - Extra: can you find a way to check how long it takes to execute the loop (a timer)?
#
# The code should have the following structure:
# ~~~
# nbT = ...
# for t ...:
# for i ...:
# xp[i] ...
# if t%49==0:
# plt.figure()
# plt.hist(xp, bins=xbins)
# plt.title('Time is: ' +str(t))
# plt.xlabel('Horizontal distance')
# plt.ylabel('Number of particles')
# plt.ylim((0, 10e3))
# plt.show()
#
# ~~~
# As you notice, this code is very slow. Vectorize your problem using numpy arrays to speed up the calculation and get rid of the inner for loop (you can keep the loop taking care of time). Change `nbT` to 10000 iterations
# Now that you have an efficient solution, answer the following questions:
#
# 1. Describe the evolution of the particles. How does the shape of the histogram evolve?
# 2. Why does the evolution of the histogram over time slows down?
# 3. What happens if the number of particles is reduced to 1000?
# 4. What happens if the distance of the displacement now varies randomly between -10 and 10? Why is this the case?
#
#
# ## Derivation of the diffusion equation
#
# In the previous exercise, we modeled a particle transfer assuming a random particle shift. The solution showed that the change in particle distribution depends on the concentration difference $\Delta C$ and the distance the particle must travel $\Delta x$, where $\Delta C$ is the difference in concentration in the transition zone of length $\Delta x$ (Figure 2).
#
# From these observations, we can thus conclude that particle flow, i.e. the number of particles passing through the side of an infinitesimal block per unit of time $\mathrm{(mol \: m^{-1} s^{-1})}$, will depend on the concentration gradient (Figure 2).
#
# We can therefore say that the flux, $q$ is defined by:
#
# $$q = -D\frac{\Delta C}{\Delta x} \label{eq:1} \tag{1}$$
#
#
# where $D$ corresponds to the diffusion coefficient $\mathrm{(m^{2} s^{-1})}$, or the diffusivity. *C* represents the concentration or the number of elements in a 2-dimensional infinitesimal block $\mathrm{(mol \: m^{-2})}$. The diffusion coefficient will vary from one problem to another and defines the speed of particle transfer. Now, we would also like to know how the *concentration* changes during the calculations. Let's take an infinitesimal block with an incoming flow, and an outgoing flow (Figure 4).
#
# <img src="../../media/Diff_Fig4.png" style="width:3in;height:2in" />
#
# **Figure 4:** Infinitesimal block with an incoming flow, and an
# outgoing flow.
#
# As the concentration varies in the *X* direction, the flow will be
# different at the input and at the output of the block. The difference of
# the number of particles in the block can therefore be derived from the
# flux:
#
# $$ \Delta (number \: of \: particles) = (q_x -q_{x+dx}\Delta Y \Delta t)\label{eq:A1} \tag{A1}$$
#
# Note that the term $\Delta t$ appears because the flux dimensions are in $\mathrm{(mol \: m^{-1} s^{-1})}$ and that $\Delta (number \: of \: particles)$ is in mol.
#
# We also know that the concentration change over an infinitesimally small
# unit of time corresponds to the change in the number of particles for a
# given volume:
#
# $$ \Delta C= \frac{\Delta (number \: of \: particles) }{\Delta X \Delta Y} \label{eq:A2} \tag{A2}$$
#
# which gives:
#
# $$ \Delta (number \: of \: particles) = \Delta C \Delta X \Delta Y \label{eq:A3} \tag{A3}$$
#
# By combining Equation (\ref{eq:A1}) and (\ref{eq:A3}), we can write:
#
# $$ (q_x -q_{x+dx}) \Delta Y \Delta t = \Delta C \Delta X \Delta Y \label{eq:A4} \tag{A4}$$
#
# $$ (q_x -q_{x+dx}) \Delta t = \Delta C \Delta X \label{eq:A5} \tag{A5}$$
#
#
# $$ \frac{\Delta C}{\Delta t} = \frac{q_x -q_{x+dx}}{\Delta X} \label{eq:A6} \tag{A6}$$
#
# Using the definition of a differential equation:
#
# $$ \frac{\delta q}{\delta x} = \frac{(q_{x+dx} -q_x)}{\Delta X} \label{eq:A7} \tag{A7}$$
#
# We obtain the following equation (note the use of the $\partial$ symbol: we solve a PDE):
#
# $$ \frac{\partial C}{\partial t} = -\frac{\partial q}{\partial x} \label{eq:2} \tag{2}$$
# By combining Eqs.(\ref{eq:1}) and (\ref{eq:2}), we finally obtain the heat
# equation:
#
# $$ \frac{\partial C}{\partial t} = D\frac{\partial^2 C}{\partial x^2} \label{eq:3} \tag{3}$$
#
# which depends only on the curvature (i.e. the second derivative) of the concentration and the diffusion constant. Therefore, it is sufficient to know the diffusion coefficient $D$ (which can be measured) and to measure the curvature to estimate the change in concentration over time.
# ### Exercise 2: Change in concentration due to diffusion
#
# You will now solve the diffusion equation and write a code that allows us to solve this equation. The change in concentration will be calculated over a distance $Lx$. There are two ways to do this, we can either calculate the second derivative directly (i.e. the curvature), or do it in two steps by calculating the flux (i.e. the first derivative of the concentration) and then the derivative of the flux. We will use the second method because it is easier to calculate a first derivative than a second derivative.
#
# To be able to do the calculation we also need an initial condition (i.e. the starting concentration) and boundary conditions (i.e. the concentration in $x = 0$ and $x = Lx$). Finally, you will have to choose a time step that is small enough. The [Von Neumann stability analysis](https://en.wikipedia.org/wiki/Von_Neumann_stability_analysis) prescribes that $\Delta t$ must be smaller than $\frac{\Delta X^2}{2D}$.
#
# Now, try to solve the diffusion equation through discretization of ([Eq. 3](#mjx-eqn-eq:3)).
# Make the following assumptions:
# 1. The initial condition:
# - $C(x<=\frac{Lx}{2}) = 500 \: \mathrm{(mol \: m^{-2})}$
# - $C(x>\frac{Lx}{2}) = 0 \: \mathrm{(mol \: m^{-2})}$
# - $Lx = 30 \: \mathrm{m}$ or $Lx = 300 \: \mathrm{m}$
# - $D = 100 \: \mathrm{(m^{2} s^{-1})}$
# - $\Delta x = 0.1 \: \mathrm{m}$
#
# 2. Assumtions regarding the boundary conditions:
# - $C(x=0) = 500 \: \mathrm{(mol \: m^{-2})}$
# - $C(x=Lx) = 0 \: \mathrm{(mol \: m^{-2})}$
#
# The code to solve this exercise must have the following structure:
# ~~~
# #physics
# D =
# Lx =
# time =
#
# #numerical properties
# dx =
# x = np.arange(...)
# nx =
# nt =
# nout =
#
# # initial condition
# # Choose an initial condition where C = C1 when x <= (Lx/2) and C = 0 when x > (Lx/2)
# C1 =
# C2 =
# C =
# C[x<=Lx/2] =
# C[x>Lx/2] =
#
# # Plot the initial concentration
# # plt.figure()
# # plt.plot(x,C)
# # plt.title('Initial condition')
# # plt.show()
#
# # impose a condition on the time step (Von Neumann stability criterion)
# dt = dx*dx/D/2.1
# print(dt)
#
# #model run: solve the heat equation and plot the result.
#
# # - make a time loop
# for t in range(0,nt):
# # - in this loop, first calculate the flux by discretizing equation (1),
# # try to use vectorized code (eg. using numPy diff statement)
# q =
#
# # - Update the new concentration (Eq. 2, without changing the boundary values)
# # Careful: which nodes do you have to update now?
# C[...] =
#
# # - plot intermediate results, but only for every 100 time steps
# if t%100==0:
# #plt.figure()
# plt.plot(x,C)
# plt.title('Time is: ' +str(t))
#
# plt.show()
# ~~~
# Now, try to answer the following questions:
#
# 1. What is the shape of the concentration in equilibrium?
# 2. How long does it take to reach equilibrium?
# 3. Let *Lx* vary between 30 and 300, and *D* between 20 and 500. How does the time change to arrive at equilibrium according to *L* and *D*?
#
# To be able to answer the questions, you can modify your code to assume a condition on the concentration that defines when the solution will have reached a state of equilibrium. To implement this, replace the loop 'for' with a while loop:
# ~~~
# Cp=C
# it=0
# diff=1e6
#
# while ...: #(diff > 1e-4)
# it +=1
# #update the time
# #calculate the flow with the discretized equation (eq. 1)
# #calculate the new concentration (eq. 2)(without changing the BC's)
#
# #check if the solution changes
# diff = # sum of absolute difference between Cp and C
# Cp = C
# #plotting (only every 10000 iterations)
# ~~~
# ## Practice your skills: Eyjafjallajokull- Part 1
#
# <img src="../../media/Eyjafjallajokull.jpg" />
#
# **Figure** "On March 2010 Eyjafjallajokull volcano in Iceland exploded into life, spewing lava, magma, rock and clouds of ash into the sky above it. The disaster grounded airlines, stranding holidaymakers and business passengers across Europe and North America. While many could only watch the crisis unfolding in hotels and airports, photographer <NAME> chose to fly into the epicentre on a mission to record one of nature's most deadly phenomena" The Telegraph (c); Picture: <NAME> / BARCROFT
#
# Exercise: 1-D diffusion
# The Eyjafjallajökull volcano is located at the Southern Iceland coast and 2000 km from Brussels. Consider a one-dimensional case with a domain length of 5000 km. The volcano itself is situated at 2220 km from the left boundary of the simulation domain. Brussels is situated at 4220 km from the left boundary of the simulation domain. Choose a spatial resolution of 20 km. In the next couple of steps, you will calculate the time required to obtain a specific ash concentration above Brussels.
#
# <img src="../../media/Situation1D.png" />
#
# **Figure:** 1D situation sketch
#
# Solve the spread of ash using the diffusion equation (Eq. 3)
#
# Define the model parameters:
# - set the diffusivity to 25 km$^2$/h
# - define the model domain: total length is 5000 km, spatial resolution is 20 km
# - calcualte the location (index in the np array) of the volcano and of Brussels and call the variables respectively `ind_vol` and `ind_Bru`
# - Set the initial conditions (C): at the volcano the concentration is 100 ppm, over the rest of the domain the concentration is 0 ppm.
# - The Eyjafjallajökull volcano produced ashes almost continuously during a couple of weeks. Start from the initial condition above but now add 100 ppm ashes per hour to the volcano grid cell as a source term.
# - Assume Dirichlet boundary conditions (0 ppm at 0 km and 0 ppm at 3000 km)
# - Plot the initial concentration, also indicate the location of Brussels on the plot (HINT use `plt.scatter()`)
# - Calculate and print out the time step (dt) using the CFL criterion
#
# The code must have the following structure:
#
# ~~~
# #physics
# D =
# Lx =
# time =
#
# #numerical properties
# dx =
# x =
# nx =
# nt =
# nout =
#
# # Location of volcano and Brussels
# ind_vol=
# ind_Bru=
#
# C_ini =
# C_rate =
# Cstart = 0
# Cend = 0
# C =np.zeros(x.shape)
#
# C[0] = Cstart
# C[ind_vol] = C_ini
# C[-1] = Cend
#
# plt.figure()
# plt.plot(x,C)
# plt.scatter(x[ind_Bru],C[ind_Bru],c='r')
#
# dt = dx*dx/D/2.5
# print('dt is: ' + str(dt) + 'hours')
#
# it = 0
# plt.figure()
# ~~~
# - After how many hours do we get 5 ppm ash aerosols in Brussels?
# - Is this realistic?
#
# Use the solution derived before to solve this question using a while loop. Plot the output every 100 iterations.
#
# The code must have the following structure:
#
# ~~~
#
# it =0
#
# while ...:
# it...
# q =...
# C...
#
# # Source term
# C[ind_vol] += C_rate*dt
#
# # Boundary conditions
# C[0] = Cstart
# C[-1] = Cend
#
#
# if ...:
# plt.plot(x,C)
# plt.scatter(x[ind_Bru],C[ind_Bru],c='r')
# plt.title('Time is: ' + str(it*dt) + ' sec')
# plt.show()
#
# print('Concentration reached after: ' + str(int(it*dt)) + ' hours')
# print('or : ' + str(int(it*dt/24)) + ' days')
# ~~~
| lessons/python/ESPIN-05 Models Part 1 (Diffusion).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="k5siNEaHVLCq" colab_type="code" colab={}
# #! pip install datadotworld
# #! pip install datadotworld[pandas]
# + id="Z7CidxAUVtSN" colab_type="code" colab={}
# # !dw configure
# + id="caFBCYaIUGtb" colab_type="code" colab={} cellView="both"
from google.colab import drive
import pandas as pd
import numpy as np
import datadotworld as dw
# + id="Riz1759KV-oL" colab_type="code" colab={}
# drive.mount("/content/drive")
# + id="WN_YuBQAWCrN" colab_type="code" colab={}
# cd "/content/drive/My Drive/Colab Notebooks/dataworkshop/matrix_one"
# + id="SZt1AZjfXGcf" colab_type="code" colab={}
# !mkdir data
# + id="Xl3_Sq5UXKk0" colab_type="code" colab={}
# !echo 'data' > .gitignore
# + id="wi-8imobXuHI" colab_type="code" colab={}
# !git add .gitignore
# + id="eH2gcAVuXw_D" colab_type="code" colab={}
data = dw.load_dataset('datafiniti/mens-shoe-prices')
# + id="T-pzsAPEX9yS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="9c50ac29-58f9-4d4a-8eae-baf3ceba6220" executionInfo={"status": "ok", "timestamp": 1581493834694, "user_tz": -60, "elapsed": 1951, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
df = data.dataframes['7004_1']
df.shape
# + id="EDTYbAGxYBPn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 547} outputId="176ee50b-bb9d-4caa-c9d5-8b0fb28ea5f7" executionInfo={"status": "ok", "timestamp": 1581493854258, "user_tz": -60, "elapsed": 704, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
df.sample(5)
# + id="VtyN2ySLYRy7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="e6fb8d4b-ca8a-4043-b9d9-363401cdeec5" executionInfo={"status": "ok", "timestamp": 1581493889203, "user_tz": -60, "elapsed": 788, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
df.columns
# + id="FQMTiwZxYaSs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="c14331c9-3504-447a-b917-57b57901bf79" executionInfo={"status": "ok", "timestamp": 1581493905913, "user_tz": -60, "elapsed": 706, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
df.prices_currency.unique()
# + id="hJLiWoe_YeZt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="479eefc7-cc70-488f-a0c4-aef1d0480e27" executionInfo={"status": "ok", "timestamp": 1581493985497, "user_tz": -60, "elapsed": 821, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
df.prices_currency.value_counts(normalize=True)
# + id="ZS7ASbZ8YqH7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="231c2d7c-a03f-4bdb-c761-e8dacf2657c1" executionInfo={"status": "ok", "timestamp": 1581494089947, "user_tz": -60, "elapsed": 704, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
df_usd = df[ df.prices_currency == 'USD' ].copy()
df_usd.shape
# + id="y2xfDbOAZLVn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="769aed78-10a1-4c0c-86a6-8e54a2c9ccfa" executionInfo={"status": "ok", "timestamp": 1581494395852, "user_tz": -60, "elapsed": 975, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float)
df_usd['prices_amountmin'].hist()
# + id="z5jdAm2naV8c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="17b8818a-894f-494d-8488-70e24c69d6f8" executionInfo={"status": "ok", "timestamp": 1581494554382, "user_tz": -60, "elapsed": 617, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
filter_max = np.percentile(df_usd['prices_amountmin'], 99)
filter_max
# + id="Se_QZSXjauSb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="721d3d7f-21d2-440e-b162-9118647d7d0b" executionInfo={"status": "ok", "timestamp": 1581494749909, "user_tz": -60, "elapsed": 911, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
df_usd_filter = df_usd[ df_usd['prices_amountmin'] < filter_max]
df_usd_filter.prices_amountmin.hist(bins=100)
# + id="z-dK700jbsXl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="998aa15b-9e5b-44d0-c401-323e3cbc0007" executionInfo={"status": "ok", "timestamp": 1581494957862, "user_tz": -60, "elapsed": 1922, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
# ls
# + id="DYpXe4iYce5j" colab_type="code" colab={}
# !git add day3.ipynb
# + id="-N8wp4yoc1rl" colab_type="code" colab={}
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "<NAME>"
# + id="xDLK7DvGcn_D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="1f7c6e67-5c92-4ae3-8b5a-a45ac2c9add9" executionInfo={"status": "ok", "timestamp": 1581495844462, "user_tz": -60, "elapsed": 3953, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
# !git commit -m "Read Men's Shoe Prices dataset from data.world"
# + id="oDKNRJJWdzG5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="d6b7b018-896f-4e57-aaa7-c9f53d62b6e0" executionInfo={"status": "ok", "timestamp": 1581495905539, "user_tz": -60, "elapsed": 1878, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
# !git push -u origin master
# + id="gxMPSRSWd2s0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 190} outputId="581af24e-79fb-4ab2-c57e-cbbd798bbafa" executionInfo={"status": "ok", "timestamp": 1581495880504, "user_tz": -60, "elapsed": 3787, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDW36NzrsfsvgSw3Tp8832PIT932MkHcVTTpOXvvg=s64", "userId": "10838063464060661544"}}
# !git pull
| matrix_one/matrix_day3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://stackoverflow.com/questions/11874767/real-time-plotting-in-while-loop-with-matplotlib
# +
# %matplotlib inline
import matplotlib.pylab as plt
import pandas as pd
import numpy as np
import time
from IPython import display
# %matplotlib inline
i = pd.date_range('2013-1-1',periods=100,freq='s')
while True:
try:
plt.plot(pd.Series(data=np.random.randn(100), index=i))
display.display(plt.gcf())
display.clear_output(wait=True)
time.sleep(1)
except KeyboardInterrupt:
break
# +
# %matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import time
def pltsin(ax, colors=['b']):
x = np.linspace(0,1,100)
if ax.lines:
for line in ax.lines:
line.set_xdata(x)
y = np.random.random(size=(100,1))
line.set_ydata(y)
else:
for color in colors:
y = np.random.random(size=(100,1))
ax.plot(x, y, color)
fig.canvas.draw()
fig,ax = plt.subplots(1,1)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_xlim(0,1)
ax.set_ylim(0,1)
for f in range(50):
pltsin(ax, ['b', 'r'])
time.sleep(1)
# -
| matplotlib/Real time plotting 2-11-2017.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# 
# <center><em>Copyright! This material is protected, please do not copy or distribute. by:<NAME></em></center>
# ***
# <h1 align="center">Udemy course : Python Bootcamp for Data Science 2021 Numpy Pandas & Seaborn</h1>
#
# ***
# ## 9.7 Filtering Outliers
# First we import numoy and pandas libraries:
# + hide_input=false
import numpy as np
import pandas as pd
# -
# Here we read a dataframe from a csv file using the function **pd.read_csv()**:
# + hide_input=false
data = pd.read_csv('data/ex2.csv')
data.head(15)
# -
# We can use the function **describe()** to display the common statistics for this dataframe:
# + hide_input=false
data.describe()
# -
# We can filter values in the Cards column taht are greater than five using boolean mask:
# + hide_input=false
data[data.Cards> 5]
# -
# Here ewe can change the values of these outliers using the function **loc[ ]**:
# + hide_input=false
data.loc[4,['Cards']] = 4
data.loc[12,['Cards']] = 5
# -
# Here we check for outliers ( cards > 5) using boolean expression:
# + hide_input=false
(data.Cards > 5)
# -
# Or much easier, we can add the function **any()** to return a single boolean value:
# + hide_input=false
(data.Cards > 5).any()
# -
# Lets read the data frame again:
# + hide_input=false
data = pd.read_csv('data/ex2.csv')
# -
# Alternatively, we can replace outliers with missing values (NaN):
# + hide_input=false
data.loc[4,['Cards']] = np.nan
data.loc[12,['Cards']] = np.nan
# + hide_input=false
data.head(15)
# -
# ***
#
# <h1 align="center">Thank You</h1>
#
# ***
| 9.7 Filtering Outliers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
s = list(s)
s.sort()
t = list(t)
t.sort()
if s == t:
return True
else:
return False
s = Solution()
s.isAnagram("anagram","nagaram")
| algorithms/242-Valid-Anagram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.insert(0, "/Users/rluger/src/starry-dev/")
import starry
npts = 5000
cmap = plt.get_cmap("plasma")
def PlotFisherMatrix(ydeg=4, udeg=0, vmin=-6, axis=[0, 1, 0], u=None, title=None, **kwargs):
map = starry.Map(ydeg=ydeg, udeg=udeg)
map.axis = axis
if udeg > 0 and u is not None:
map[1:] = u
A = map.X(**kwargs)
F = np.dot(A.T, A)
Z = np.log10(np.abs(F / np.max(F)))
Z[Z < vmin] = vmin
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
fig.subplots_adjust(wspace=0.05)
ax[0].set_xticks([])
ax[0].set_yticks([])
vmax = 0.1
im = ax[0].imshow(Z, vmin=vmin, vmax=vmax, cmap="plasma")
plt.colorbar(im, ax=ax[0])
for l in range(map.ydeg + 1):
ax[0].axvline(l ** 2 - 0.5, color="w", ls="--", lw=1)
ax[0].axhline(l ** 2 - 0.5, color="w", ls="--", lw=1)
c = cmap((np.diag(Z) - vmin) / (vmax - vmin))
ax[1].scatter(np.arange(F.shape[0]), np.diag(Z), c=c)
ax[1].set_ylim(vmin, vmax)
ax[1].set_xlim(-0.5, F.shape[0] - 0.5)
ax[1].set_xticks([])
for l in range(map.ydeg + 1):
ax[1].axvline(l ** 2 - 0.5, color="k", ls="--", lw=1)
fig.suptitle(title, fontsize=24)
theta = np.linspace(0, 360, npts)
PlotFisherMatrix(theta=theta, title="Phase curve, edge on")
theta = np.linspace(0, 360, npts)
PlotFisherMatrix(theta=theta, axis=[1, 1, 1], title="Phase curve, inclined")
theta = np.linspace(0, 360, npts)
udeg = 2
u = [0.4, 0.26]
PlotFisherMatrix(theta=theta, udeg=udeg, u=u, axis=[1, 1, 1], title="Phase curve, inclined, limb darkened")
xo = np.linspace(-1.1, 1.1, npts)
yo = 0.5
ro = 0.01
PlotFisherMatrix(xo=xo, yo=yo, ro=ro, title="Small planet transiting")
xo = np.linspace(-1.1, 1.1, npts)
yo = 0.5
ro = 0.1
PlotFisherMatrix(xo=xo, yo=yo, ro=ro, title="Large planet transiting")
xo = np.linspace(-11, -9, npts)
yo = 0.1
ro = 10
PlotFisherMatrix(xo=xo, yo=yo, ro=ro, title="Secondary eclipse, low impact parameter")
xo = np.linspace(-11, -9, npts)
yo = 5
ro = 10
PlotFisherMatrix(xo=xo, yo=yo, ro=ro, title="Secondary eclipse, high impact parameter")
| FisherInformation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Yt_A6nW4nnWl"
# ## Neural networks models: practicals
#
# In this notebook you will practice simple neural networks models for classification.
# We will be using the `breast cancer` dataset:
#
# - binary classification problem: breast cancer diagnosis, `0`: `malignant`, `1`: `benign`
# - EDA: look at the data
# - split between the training and the test sets
# - number of hidden layers
# - number of nodes within layers
# - type of activation functions in the hidden layers
# - number of epochs
# - number of features to include in the model
# - etc.
#
# Let's start by importing some basic libraries and the data:
# + id="zec9A2Dzm6QO"
## import libraries
import numpy as np
import tensorflow as tf
import pandas as pd
import sklearn.datasets
import matplotlib.pyplot as plt
# + [markdown] id="vs_c5og5oklW"
#
# ## Breast cancer data
#
# Now, it's up to you to continue: write here your code!! (plus text chunks for explanations)
# + id="fKOxluEnn7Kz"
from sklearn.datasets import load_breast_cancer
bcancer = load_breast_cancer()
y = bcancer.target
X = bcancer.data
y.shape
# + id="Hrqq466ezgjs"
from collections import Counter
print(Counter(y))
# + id="VpLvqpI2qSaG"
print(bcancer.DESCR)
# + [markdown] id="QOwqolFYL5-e"
# ### Explore the data
# + id="jQ4nrTezz9ub"
bcancer.data = pd.DataFrame(bcancer.data, columns=bcancer.feature_names) #converting numpy array -> pandas DataFrame
bcancer.target = pd.Series(bcancer.target)
# + id="6Nf7wTpW1LXY"
features = bcancer.data.iloc[:,:]
target = bcancer.target
features
# + id="7J4XGVk7r2JX"
#we want to have the same proportion of classes in both train and validation sets
from sklearn.model_selection import StratifiedShuffleSplit
#building a StratifiedShuffleSplit object (sss among friends) with 20% data
#assigned to validation set (here called "test")
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0)
#the .split() method returns (an iterable over) two lists which can be
#used to index the samples that go into train and validation sets
for train_index, val_index in sss.split(features, target):
X_train = features.iloc[train_index, :]
X_val = features.iloc[val_index, :]
y_train = target[train_index]
y_val = target[val_index]
#let's print some shapes to get an idea of the resulting data structure
print("Training features size: ", X_train.shape)
print("Test features size: ", X_val.shape)
print("Training targets size: ", y_train.shape)
print("Test targets size: ", y_val.shape)
print("Type of the training features object: ", type(X_train))
print("Type of the training targets object: ", type(y_train))
# + id="9pkijw8ksdF5"
## # Configuration options
input_shape = (X_train.shape[1],) ## tuple that specifies the number of features
hidden_nodes = 16
hidden_activation = 'relu'
output_activation = 'sigmoid'
loss_function = 'binary_crossentropy'
optimizer_used = 'sgd' ##stochastic gradient descent
num_epochs = 100
print(input_shape)
# + id="Fg89DFRBsil8"
from keras.models import Sequential
from keras.layers import Dense ## a "dense" layer is a layer were all the data coming in are connected
#to all nodes.
# binary classification shallow neural network model in Keras
model = Sequential()
model.add(Dense(units=hidden_nodes, input_shape=input_shape, activation=hidden_activation))
model.add(Dense(1, activation=output_activation))
#the model is declared, but we still need to compile it to actually
#build all the data structures
model.compile(optimizer=optimizer_used, loss=loss_function)
# + id="9tyCBVxlstZ1"
print(model.summary())
# + id="d9vFz7tqs3Nt"
history = model.fit(X_train, y_train, epochs=num_epochs, validation_data=(X_val, y_val), verbose=0)
# + id="ACO_-lrytDMJ"
def plot_loss_history(h, title):
plt.plot(h.history['loss'], label = "Train loss")
plt.plot(h.history['val_loss'], label = "Validation loss")
plt.xlabel('Epochs')
plt.title(title)
plt.legend()
plt.show()
plot_loss_history(history, 'Logistic ({} epochs)'.format(num_epochs))
# + id="0h2U3HKstfZ6"
from sklearn.metrics import confusion_matrix
predictions = model.predict(X_val)
predicted_labels = np.where(predictions > 0.5, "benign", "malignant")
target_labels = y_val.to_numpy().reshape((len(y_val),1))
target_labels = np.where(target_labels > 0.5, "benign", "malignant")
con_mat_df = confusion_matrix(target_labels, predicted_labels, labels=["malignant","benign"])
print(con_mat_df)
# + [markdown] id="0uMJ7ZGHtM2C"
# ### Data normalization
# + id="KCtHCXavMhh-"
features.head()
# + id="-ztEYiPBtPeb"
#getting an idea about features averages, sd
avg = X_train.mean()
std = X_train.std()
print('Feature means')
print(avg)
print('\nFeature standard deviations')
print(std)
# + id="d9VfgbRptZDc"
#IMPORTANT: normalizing features using the same weights for both
#train and validation test (which are computed ON THE TRAIN SET)
X_train = (X_train - avg)/std
X_val = (X_val - avg)/std
# + id="JHsFr2xj2Mi7"
X_train
# + id="xxDQZsmd2ZfH"
## # Configuration options
input_shape = (X_train.shape[1],) ## tuple that specifies the number of features
hidden_nodes = 16
hidden_activation = 'relu'
output_activation = 'sigmoid'
loss_function = 'binary_crossentropy'
optimizer_used = 'sgd' ##stochastic gradient descent
num_epochs = 100
# + id="5gJwSNCF2scZ"
#we are building a "sequential" model, meaning that the data will
#flow like INPUT -> ELABORATION -> OUTPUT.
from keras.models import Sequential
#a "dense" layer is a layer were all the data coming in are connected
#to all nodes.
from keras.layers import Dense
# binary classification shallow neural network model in Keras
model = Sequential()
model.add(Dense(units=hidden_nodes, input_shape=input_shape, activation=hidden_activation))
model.add(Dense(1, activation=output_activation))
#the model is declared, but we still need to compile it to actually
#build all the data structures
model.compile(optimizer=optimizer_used, loss=loss_function)
model.summary()
# + id="C2zBp1ej2fOT"
history = model.fit(X_train, y_train, epochs=num_epochs, validation_data=(X_val, y_val), verbose=0)
# + id="TRS6zVke21PH"
plot_loss_history(history, 'Logistic ({} epochs)'.format(num_epochs))
# + id="WEZLODEO27vU"
predictions = model.predict(X_val)
predicted_labels = np.where(predictions > 0.5, "cancer", "no-cancer")
target_labels = y_val.to_numpy().reshape((len(y_val),1))
target_labels = np.where(target_labels > 0.5, "cancer", "no-cancer")
con_mat_df = confusion_matrix(target_labels, predicted_labels, labels=["no-cancer","cancer"])
print(con_mat_df)
# + id="EpXslUr9sfTO"
history2 = model.fit(X_train, y_train, epochs=100,
validation_data=(X_val, y_val), verbose=0)
# + id="0iN8RHwWsQCr"
#putting together the whole history
history.history['loss'] += history2.history['loss']
history.history['val_loss'] += history2.history['val_loss']
#and plotting again
plot_loss_history(history, 'Logistic (500 epochs)')
# + id="yhL1mZAb3JNb"
predictions = model.predict(X_val)
predicted_labels = np.where(predictions > 0.5, "cancer", "no-cancer")
target_labels = y_val.to_numpy().reshape((len(y_val),1))
target_labels = np.where(target_labels > 0.5, "cancer", "no-cancer")
con_mat_df = confusion_matrix(target_labels, predicted_labels, labels=["no-cancer","cancer"])
print(con_mat_df)
# + id="uDDXxNOh3Obb"
model = Sequential()
model.add(Dense(units=hidden_nodes, input_shape=input_shape, activation=hidden_activation))
model.add(Dense(units=hidden_nodes, input_shape=input_shape, activation=hidden_activation))
model.add(Dense(1, activation=output_activation))
#the model is declared, but we still need to compile it to actually
#build all the data structures
model.compile(optimizer=optimizer_used, loss=loss_function)
model.summary()
# + id="ZtC-mQFe3agS"
history = model.fit(X_train, y_train, epochs=num_epochs, validation_data=(X_val, y_val), verbose=0)
# + id="cIdfl23B3eiM"
plot_loss_history(history, 'Breast cancer data (100 epochs)')
# + id="Z0HrDNLE3kuf"
predictions = model.predict(X_val)
predicted_labels = np.where(predictions > 0.5, "cancer", "no-cancer")
target_labels = y_val.to_numpy().reshape((len(y_val),1))
target_labels = np.where(target_labels > 0.5, "cancer", "no-cancer")
con_mat_df = confusion_matrix(target_labels, predicted_labels, labels=["no-cancer","cancer"])
print(con_mat_df)
# + id="aW7OQRgwr55s"
import seaborn as sn
figure = plt.figure(figsize=(8, 8))
sn.heatmap(con_mat_df, annot=True,cmap=plt.cm.Blues)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
| lab_day2/day2_code04d_neural_networks_exercise [SOLVED].ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 ('data_science_py395')
# language: python
# name: python3
# ---
from utils import *
cachorrita = utils.Dog("Labrador", "Hembra")
cachorrita.species
cachorrita.raza
cachorrita.sex
cachorrita.name
cachorrito = utils.Dog("Bulldog", "Macho", name="Pepe")
cachorrito
cachorrito.name
cachorrito.present()
cachorrito.baptize("Mario")
cachorrito.name
mascota = Pet("Hembra", "miel")
gato = Cat("Hembra" , "miel")
gato.present()
| 02_PYTHON/week08/lab/clases_mascotas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.datasets import load_boston
from sklearn.metrics import mean_squared_error
# -
# <h1>Regression Models Demos</h1>
def real_function():
length = 20
x = np.linspace(-3, 5, length)
y = 2 * x + 3
y_noise = np.random.normal(loc = 0, scale = 2, size = length)
y += y_noise
return x, y
x, y = real_function()
x, y
# +
plt.scatter(x, y)
plt.xlabel('x')
plt.ylabel('y', rotation=0)
plt.show()
# -
y_predicted = 3 * x + 0
for y_predicted in [3 * x, -4 * x - 2, np.zeros(len(x)), np.zeros(len(x)) + 5, 15 * x - 10]:
plt.scatter(x, y)
plt.plot(x, y_predicted, c='r')
plt.xlabel('x')
plt.ylabel('y', rotation=0)
mse = np.mean((y_predicted - y) ** 2)
plt.title(f'Cost: {mse}')
plt.show()
def perform_gradient_descent(x, y, a, b, learning_rate):
a_gradient = -2 / len(x) * np.sum(x * (y - (a * x + b)))
b_gradient = -2 / len(y) * np.sum(y - (a * x + b))
new_a = a - a_gradient * learning_rate
new_b = b - b_gradient * learning_rate
return (new_a, new_b)
perform_gradient_descent(x, y, 3, 0, 0.001)
# +
a = 0
b = 0
learning_rate = 0.1
for i in range(200):
a, b = perform_gradient_descent(x, y, a, b, learning_rate)
if i % 10 == 0:
plt.scatter(x, y)
y_predicted = a * x + b
plt.plot(x, y_predicted, c='r')
plt.xlabel('x')
plt.ylabel('y', rotation=0)
mse = np.mean((y_predicted - y) ** 2)
plt.title(f'Cost: {mse}')
plt.show()
print(a, b)
# -
model = LinearRegression()
model.fit(x.reshape(-1, 1), y)
# second variant - x.reshape(len(x), 1)
model.coef_, model.intercept_
print(load_boston().DESCR)
data = load_boston().data
targets = load_boston().target
data.shape
targets.shape
boston_model = LinearRegression()
boston_model.fit(data, targets)
boston_model.coef_
predicted_targets = boston_model.predict(data)
np.sqrt(mean_squared_error(targets, predicted_targets))
plt.hist(targets, bins='fd', alpha = 0.7)
plt.hist(predicted_targets, bins='fd', alpha = 0.7)
plt.show()
boston_model.score(data, targets) # preview for next coure (Machine Learning)
abs_differences = np.abs(predicted_targets - targets)
plt.hist(abs_differences, bins='fd')
plt.show()
rel_differences = np.abs(predicted_targets - targets) / targets
plt.hist(rel_differences, bins='fd')
plt.show()
plt.scatter(targets, abs_differences, s = 2)
plt.show()
| regression_models/Regression Models Demos.ipynb |
# +
# Based on
# https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_classification.ipynb
# (MIT License)
from __future__ import absolute_import, division, print_function
try:
from tensorflow import keras
except:
# %pip install tensorflow
from tensorflow import keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = "figures"
def savefig(fname):
plt.savefig(os.path.join(figdir, fname))
# print(tf.__version__)
np.random.seed(0)
data = keras.datasets.cifar10
(train_images, train_labels), (test_images, test_labels) = data.load_data()
# print(np.shape(train_images))
# print(np.shape(test_images))
# For CIFAR:
# (50000, 32, 32, 3)
# (10000, 32, 32, 3)
class_names = ["plane", "car", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i])
y = train_labels[i][0]
plt.xlabel(class_names[y])
savefig("cifar10-data.pdf")
plt.show()
| notebooks/book1/01/cifar_viz_tf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit ('PythonSoftwareFoundation.Python.3.8_qbz5n2kfra8p0')
# name: python3810jvsc74a57bd021d078d886b85fede069205476867ce54a60b54e2bdc890629a20862db136af1
# ---
# + [markdown] id="5SaYFF_Ovuh_"
# # Python
# ## intro 101
# Lab 1 - Qué va a crear
# En este desafío, creará un contador de calorías que solicita al usuario lo siguiente:
#
# * La fecha actual (en cualquier formato)
# * Calorías ingeridas en el desayuno
# * Calorías ingeridas en la comida
# * Calorías ingeridas en la cena
# * Calorías ingeridas en tentempiés
# * Después, el programa sumará todas las calorías y les dará formato de mensaje.
#
#
#
# + id="z-0NBe6yuwQT" colab={"base_uri": "https://localhost:8080/"} outputId="dfde9502-1437-472b-8259-018df1ba4d94"
print("¿Qué día es hoy?")
# + colab={"base_uri": "https://localhost:8080/"} id="C20v6atFhFg_" outputId="6aa99abf-b229-4d68-b755-e9371d674f29"
date = input()
# + colab={"base_uri": "https://localhost:8080/"} id="sHEctM-ehYn9" outputId="3debfcf2-386f-4d62-9869-9336ace94b15"
print("¿Cuántas calorías ingieres en el desayuno?")
# + colab={"base_uri": "https://localhost:8080/"} id="SyaTZC9ahosE" outputId="9d7e7eb8-666b-455b-88a4-8eb9a4e77c93"
Kal_desayuno = int(input())
# + colab={"base_uri": "https://localhost:8080/"} id="0bnJVBa5iXO6" outputId="9fb4d2a2-333e-4c0c-e207-ac8737e1efc3"
print("¿Cuántas calorías ingieres en el comida?")
# + colab={"base_uri": "https://localhost:8080/"} id="5bWJfHhyitiw" outputId="00405811-2b3f-4078-855c-a748f4a24c66"
Kal_comida = int(input())
# + colab={"base_uri": "https://localhost:8080/"} id="l69bQZm1ip44" outputId="8bdbe157-f6e5-4f8d-b1eb-2d074066c41a"
print("¿Cuántas calorías ingieres en el cena?")
# + colab={"base_uri": "https://localhost:8080/"} id="adyJzdSKjDDB" outputId="ee4bf267-20ac-4dd7-df46-3bfd64190d4b"
Kal_cena = int(input())
# + colab={"base_uri": "https://localhost:8080/"} id="c5UJ0-SfisVw" outputId="fe247805-7163-4e49-99fa-8abae5bd0ac1"
print("¿Cuántas calorías ingieres en el tentempié?")
# + colab={"base_uri": "https://localhost:8080/"} id="dmt5BWMZjDQQ" outputId="3888e56d-b7e8-4a10-be49-a5dc40deae85"
Kal_tentempié = int(input())
# + id="C03AHSfckMeE"
Total_Kal = Kal_desayuno + Kal_comida + Kal_cena + Kal_tentempié
# + colab={"base_uri": "https://localhost:8080/"} id="luL0mcpllDZR" outputId="7f4b000d-4bcc-405d-e970-1efb447b2250"
print(Total_Kal)
# + colab={"base_uri": "https://localhost:8080/"} id="bmrfZWCRjhBO" outputId="6d6adf43-a2b7-4300-8352-e909b7b41887"
print("La suma de todas las calorías consumidas el ", date, "es de: ",Total_Kal)
# + id="NG9LETAQh1ow"
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="O07H_DChiJia" outputId="65397fec-21e6-4d7d-a6f7-8cc3f41fffd9"
x = ["Desayuno", "Comida", "Cena", "tentempié"]
y = [Kal_desayuno, Kal_comida, Kal_cena, Kal_tentempié]
plt.bar(x,y)
plt.show
# + colab={"base_uri": "https://localhost:8080/"} id="H5N8XSjGtUMr" outputId="07fa57ca-3814-4e82-bc21-f1e671835c6e"
if (Total_Kal > 600):
print("Cuidado! Consumes más calorías de las recomendadas por la OMS")
elif (Total_Kal <= 600 or Total_Kal >= 500):
print("Bien! estás dentro de la media recomendada por la OMS")
else:
print("Cuidado! Consumes menos calorías de las recomendadas por la OMS")
| Lab01v3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="z_oecfGdOnZi"
# Programación
# ===
# + [markdown] id="C8mqfeQIOnZk"
# #### Contenido
# + [markdown] id="UlfCksoFOnZk"
# > * [Estructuras de Control](#Estructuras-de-Control)
# * [Estructura if](#Estructura-if)
# * [ Estructura for](#Estructura-for)
# * [Comandos break y continue](#Comandos-break-y-continue)
# * [Estructura while](#Estructura-while)
# * [Comando else en estructuras for y while](#Comando-else-en-estructuras-for-y-while)
# > * [Funciones de usuario](#Funciones-de-usuario)
# > * [Estructuras de datos](#Estructuras-de-datos)
# * [Listas](#Listas)
# * [Funciones filter y map](#Funciones-filter-y--map)
# * [Desempaquetado de listas](#Desempaquetado-de-listas)
# * [Uso de listas como stacks (pilas)](#Uso-de-listas-como-stacks)
# * [Uso de listas como colas (queue)](#Uso-de-listas-como-colas)
# * [List comprenhensions](#List-comprenhensions)
# * [Comando del](#Comando-del)
# * [Tuplas y secuencias](#Tuplas-y-secuencias)
# * [Conjuntos](#Conjuntos)
# * [Diccionarios](#Diccionarios)
# * [Comparación de secuencias y otros tipos de datos](#Comparación-de-secuencias-y-otros-tipos-de-datos)
# > * [Impresión con formato](#Impresión-con-formato)
# + [markdown] id="qsbR7WC6OnZl"
# # Estructuras de Control
# + [markdown] id="3p_F4hsIOnZl"
# ## Estructura `if`
# + [markdown] id="TAAk5IrYOnZl"
# [Contenido](#Contenido)
# + [markdown] id="9WF4_MNCOnZl"
# La identación usando espacios en blanco es el mecanismo definido en Python para delimitar el cuerpo asociado a las estructuras de control y a las funciones de usuario.
#
# En el siguiente ejemplo, la estructura `else if` es reemplazada comunmente por la palabra reservada `elif`. El uso del caracter `:` es obligatorio.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 5942, "status": "ok", "timestamp": 1646284957910, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="rt470032OnZm" outputId="6aa031f2-255e-444b-e3e0-7e743c4ed139"
# este código es mucho más dificil de leer.
x = int(input("Please enter an integer: "))
if x < 0:
x = 0
print('Negative changed to zero')
else:
if x == 0:
print('Zero')
else:
if x == 1:
print('Single')
else:
print('More')
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 5392, "status": "ok", "timestamp": 1646284963289, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="uSXzGMzUOnZn" outputId="fec5cc3f-1fc3-41a8-e32c-8869e7847a6f"
# código mucho más legible
x = int(input("Please enter an integer: "))
if x < 0:
x = 0
print('Negative changed to zero')
elif x == 0:
print('Zero')
elif x == 1:
print('Single')
else:
print('More')
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 58, "status": "ok", "timestamp": 1646284963290, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="nt9uO9qfOnZo" outputId="1da98161-0756-41e7-f907-4fd692820ed8"
x
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 48, "status": "ok", "timestamp": 1646284963291, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="7p9ssPfpOnZo" outputId="8e07a683-5380-447f-d2c3-2f40cb155562"
x1 = 1 if x == 0 else 0
x1
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 41, "status": "ok", "timestamp": 1646284963292, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="6IwH1ttgOnZo" outputId="a187142a-8bbd-4634-eb3d-4462d07d9063"
x2 = 1 if x > 0 else 0
x2
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 37, "status": "ok", "timestamp": 1646284963293, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="T9gkwP0UOnZp" outputId="14bb61ff-d6ac-4a6d-d6dc-8ea001d1db22"
v = [1,2,3,4]
v
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 33, "status": "ok", "timestamp": 1646284963293, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="X5DF3O5hOnZp" outputId="62030b59-9b12-4d3e-dc00-344815ffd334"
v[2:3] = []
v
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1646284963294, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="Nwb0JUJ9OnZp" outputId="66b8f78b-e548-4612-f696-e5a2da254fb3"
del v[2]
v
# + [markdown] id="4iio2YaqOnZq"
# **Ejercicio.** Escriba una función en Python que implemente la siguiente función matemática:
#
# $$ \text{absval}(x) = \left\{
# \begin{array}{rr}
# x & \text{if } x > 0 \\
# 0 & \text{if } x = 0 \\
# -x & \text{if } x < 0
# \end{array}
# \right. $$
# + [markdown] id="vZRVcI0dOnZq"
# **Ejercicio.** Escriba la función $f(n)$ definida como:
# $$ f(n) = \left\{
# \begin{array}{lr}
# 1 & \text{if } n = 0 \\
# n * f(n - 1) & \text{if } n > 0
# \end{array}
# \right. $$
# + [markdown] id="Oat1DYMxOnZq"
# Mediante el uso de funciones recursivas es posible implementar tanto cálculos recursivos como iterativos, tal como se ejemplifica a continuación. Suponga que se desea calcular la suma de los primeros $n$ números naturales, definida como:
# + [markdown] id="O9HkChd_OnZq"
# $$f(n) = 1 + 2 + 3 + ... + (n-2) + (n-1) + n $$
# + [markdown] id="_Z9VHnW6OnZr"
# Agrupando los primeros $(n-1)$ términos se obtiene que:
# + [markdown] id="yXJjc-IxOnZr"
# $$ f(n) = [1 + 2 + 3 + ...+ (n-2) + (n-1)] + n $$
# + [markdown] id="q_36eP0yOnZr"
# Pero la cantidad entre corchetes es $f(n-1)$, es decir:
# + [markdown] id="tx3EeT08OnZr"
# $$ f(n) = f(n-1) + n$$
# + [markdown] id="gzaqRcyFOnZr"
# y a su vez:
# + [markdown] id="i1fqzEUeOnZr"
# $$ f(n-1) = f(n-2) + (n-1)$$
# + [markdown] id="oSq2PwmzOnZr"
# El proceso puede repetirse hasta que $n=0$, es dcir, $f(0) = 0$. Por consiguiente, la función puede definirse como:
# + [markdown] id="4PoaeangOnZs"
# $$ f(n) = \left\{
# \begin{array}{lr}
# 0 & \text{if } n = 0 \\
# n + f(n - 1) & \text{if } n > 0
# \end{array}
# \right. $$
# + [markdown] id="FxbRCEONOnZs"
# **Ejercicio.** Implemente la función anterior en Python.
# + [markdown] id="Bf1bSay1OnZs"
# **Ejercicio.** Escriba la función `listLength(x)` que calcula la longitud de la lista `x`. _Ayuda_: esta función puede definirse como `listLength(x) = 1 + listLenth(x')` donde `x'` es la lista original `x` sin el primer elemento. La recursión se detiene cuando `x` es la lista vacía.
#
# ```
# listLength([1, 2, 3, 4]) ==> 4
# listLength([['a', 'b', 'c'], [1, 2, 3]]) ==> 2
# ```
# + [markdown] id="L6MR9ObEOnZt"
# **Ejercicio.** Escriba la función `listDeepLength(x)` que calcula la totalidad de elementos que contiene `x`, por ejemplo:
#
# ```
# listDeepLength([1, 2, 3, 4]) ==> 4
# listLength([['a', 'b', 'c'], [1, 2, 3]]) ==> 7
# ```
# + [markdown] id="Qz71nrkIOnZt"
# **Ejercicio.** Escriba la función `list2(x)` que devuelve la lista `x` con sus elementos elevados al cuadrado.
# + [markdown] id="ZhfkctilOnZt"
# **Ejercicio.** Escriba la función `listExpand(x)` que 'rompe' las listas que son elementos de la lista `x`, es decir:
#
# ```
# listExpand([1, 2, 3, 4]) ==> [1, 2, 3, 4]
# listExpand([['a', ['b', 'c']], [1, 2, 3]]) ==> ['a', 'b', 'c', 1, 2, 3]
# ```
# + [markdown] id="26RJl_X2OnZt"
# **Ejercicio.** Escriba la función `listUpper(x)` que recibe la lista de strings `x` y devuelve una lista de strings en que la primera letra de cada string está en mayúsculas.
# + [markdown] id="T77cBcMOOnZt"
# **Ejercicio.** Escriba la función `listFilter(f, x)` donde `f` es una función booleana que se le aplica a cada elemento de la lista `x`. `listFilter` devuelve la lista conformada por los elementos de `x` para los cuales `f` devuelve verdadero.
# + [markdown] id="847PBXcMOnZu"
# **Ejercicio.** Escriba la función `sequence(n)` que devuelve una lista conformada por los enteros desde cero hasta `(n-1)`.
# + [markdown] id="3pg-sE45OnZu"
# ## Estructura `for`
# + [markdown] id="5vurI-xJOnZu"
# [Contenido](#Contenido)
# + [markdown] id="fQne4LxoOnZu"
# El comando `for` permite iterar sobre los elementos de una lista.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 418, "status": "ok", "timestamp": 1646285005073, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="BWfYg3XdOnZu" outputId="e730330c-c81b-43bf-b2a2-35f1b9023788"
words = ['cat', 'window', 'door', 'abcdefg']
for w in words:
print(w, len(w))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 447, "status": "ok", "timestamp": 1646285006123, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="NwiHSzOOOnZu" outputId="bcb05d22-2b31-4a2b-9e1d-ff4e70547f40"
for w in words[:]: # words[:] genera una nueva lista diferente a la contenida en words.
if len(w) > 6:
words.insert(0, w)
words
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 50, "status": "ok", "timestamp": 1646285006124, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="yRRijO_sOnZv" outputId="9a510ddf-a2d0-4d71-83c9-4cf39cfccac1"
words.append('final')
words
# + [markdown] id="zZ4-spVYOnZv"
# La función `range(n)` devuelve un objeto cuyos elementos son los enteros consecutivos desde `0` hasta `n-1`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 47, "status": "ok", "timestamp": 1646285006124, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="2cJN_1aXOnZv" outputId="a2046600-6a83-4dc0-8711-0734abba2887"
for i in range(5):
print(i)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 45, "status": "ok", "timestamp": 1646285006125, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="Hq8TvFG8OnZv" outputId="b0493f2f-995d-46a9-81fb-cb5b64c5f487"
range(5)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 42, "status": "ok", "timestamp": 1646285006125, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="t3VhYiysOnZv" outputId="e9f7a72c-816c-45bc-c04f-f3cf8b362148"
list(range(5)) # un rango puede convertirse en una lista
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 39, "status": "ok", "timestamp": 1646285006125, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="v4txw2u-OnZw" outputId="caffcb7c-147f-484f-c00c-447053f24d52"
for i in range(5, 10):
print(i, end = ', ') # el argumento end indica que al final del print se imprime ', ' y no retorno de carro
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 37, "status": "ok", "timestamp": 1646285006126, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="Jwh-xrjyOnZw" outputId="9940bb1a-ab71-479e-c4fc-0b68e240786b"
for i in range(0, 10, 3):
print(i, end = ', ')
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 35, "status": "ok", "timestamp": 1646285006126, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="KzjQTP6nOnZw" outputId="fa30d1fd-409e-43d6-b498-ee82f367fd5d"
for i in range(-10, -100, -30):
print(i, end = ', ')
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646285006126, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="T2lk13VoOnZw" outputId="63f28d84-324e-45fd-bb5e-1003ac5ae534"
print('hola', 'soy', 'Pauli', sep=' --- ')
# + [markdown] id="epyQeK7YOnZw"
# En el comando `for` existen dos formas para obtener un elemento de una lista y su posición. La primera es generar los indices usando `range` y con ellos obtener los elementos:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1646285006127, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="lPO7RFpQOnZx" outputId="3d4bd734-b563-4ae7-d5e8-e0f9d992dff5"
a = ['a', 'b', 'c', 'd', 'e']
for i in range(len(a)):
print(i, a[i])
# + [markdown] id="dnnyOhSZOnZx"
# La segunda forma es enumerar los elementos de la lista usando la función `enumerate`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1646285006127, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="SipLYareOnZx" outputId="e60e2196-3fa1-4810-8ef7-97c29598fd4a"
a = ['a', 'b', 'c', 'd', 'e']
for (i, x) in enumerate(a):
print(i, x)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 26, "status": "ok", "timestamp": 1646285006128, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="4_aF5ys0OnZx" outputId="2a132b89-5a49-4d0b-81e6-64617aae5d9a"
t = (1, 'Pauli', 'Bi')
t
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 24, "status": "ok", "timestamp": 1646285006128, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="de8XVT8KOnZx" outputId="5cbd10a1-1d25-4979-cc5e-27eac0c11a5a"
n, m, d = t
print(n,m,d,sep='\t')
# + [markdown] id="mmTHIWIWOnZx"
# **Ejercicio.** Escriba la función `stringsLength(x)` que recibe la lista de strings `x` y devuelve una lista de enteros con las longitudes de los strings. Haga una función usando un ciclo `for` y otra usando recursión.
# + [markdown] id="xYj947UQOnZy"
# **Ejercicio.** Escriba la función `listLength(x)` que calcula la longitud de la lista `x` usando un ciclo `for`. _Ayuda_: esta función puede definirse como un ciclo `for` que recorre los elementos de la lista `x` y le suma 1 a un contador por cada elemento.
# + [markdown] id="DU3tP1qeOnZy"
# **Ejercicio.** Escriba la función `list2(x)` que devuelve la lista `x` con sus elementos elevados al cuadrado. Use un ciclo `for` en vez de recursión para su implementación.
# + [markdown] id="l6k4i25DOnZy"
# **Ejercicio.** Escriba la función `listUpper(x)` que recibe la lista de strings `x` y devuelve una lista de strings en que la primera letra de cada string está en mayúsculas. Use un ciclo `for` para su implementación en vez de recursión.
# + [markdown] id="krxmbDjEOnZy"
# **Ejercicio.** Escriba la función `listFilter(f, x)` donde `f` es una función booleana que se le aplica a cada elemento de la lista `x`. `listFilter` devuelve la lista conformada por los elementos de `x` para los cuales `f` devuelve verdadero. Use un ciclo `for` en vez de recursión.
# + [markdown] id="OEATF93nOnZy"
# **Ejercicio.** Escriba la función `listSort(x)` que ordena los elementos de la lista `x` usando el método de la burbuja.
# + [markdown] id="RWo6uBNbOnZy"
# ## Comandos `break` y `continue`
# + [markdown] id="tKS8r8biOnZy"
# [Contenido](#Contenido)
# + [markdown] id="9nRD5c2xOnZy"
# El comando `continue` causa que se ejecute una nueva iteración del ciclo `for` sin pasar por el resto del código que hace parte del cuerpo del ciclo `for`. El comando `break` causa la salida del cuerpo del ciclo `for`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 21, "status": "ok", "timestamp": 1646285006128, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="hFCcKM0VOnZz" outputId="46549eb1-253f-4963-f610-e02135396f06"
for n in range(1, 10):
if n < 4:
continue
print(n) # solo pasa por aca cuando n >= 4.
if n > 6:
break # interrupe el ciclo cuando n > 6.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 22, "status": "ok", "timestamp": 1646285006131, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="Gbk-Zwg0OnZz" outputId="9bda26f9-4399-4b8f-d33d-b68496bf0da7"
try:
x = 10/0
print('Fun')
except:
pass
#print('Fallo')
print('Termino')
# + executionInfo={"elapsed": 22, "status": "ok", "timestamp": 1646285006132, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="1lLwajPROnZz"
def fun1():
pass
# + [markdown] id="lNvQmfC3OnZz"
# ## Estructura `while`
# + [markdown] id="UG6Lh-7ROnZz"
# [Contenido](#Contenido)
# + [markdown] id="bn7bEXZpOnZz"
# El comando `while` permite iterar mientras se cumpla una condición. Al igual que en un ciclo `for`, el código perteneciente al cuerpo del `while` se identifica por identación.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 21, "status": "ok", "timestamp": 1646285006132, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="GIu7-49pOnZ0" outputId="69f2b017-0021-4003-a22b-aaa36260ca8e"
n = 0
while n < 5: # se ejecuta mientras se cumpla que n < 5
print(n)
n = n + 1
print('fin')
# + [markdown] id="bTnzltRAOnZ0"
# **Ejercicio.** Escriba la función `stringsLength(x)` que recibe la lista de strings `x` y devuelve una lista de enteros con las longitudes de los strings. Implemente su función usando un ciclo `while`.
# + [markdown] id="wyEJekC8OnZ0"
# **Ejercicio.** Escriba la función `list2(x)` que devuelve la lista `x` con sus elementos elevados al cuadrado. Use un ciclo `while` en vez de un ciclo `for` o recursión para su implementación.
# + [markdown] id="5XeIRidUOnZ0"
# **Ejercicio.** Escriba la función `listUpper(x)` que recibe la lista de strings `x` y devuelve una lista de strings en que la primera letra de cada string está en mayúsculas. Use un ciclo `while` para su implementación en vez de un ciclo `for` o de recursión.
# + [markdown] id="I6zhlDPDOnZ0"
# ## Comando `else` en estructuras `for` y `while`
# + [markdown] id="X_NGYzs4OnZ0"
# [Contenido](#Contenido)
# + [markdown] id="Jttj_qcgOnZ1"
# Los ciclos creados usando los comandos `for` y `while` pueden contener un comando `else`. En el caso de los ciclos `for`, el cuerpo del `else` se ejecuta cuando se termina el ciclo; para los ciclos `while`, el cuerpo del `else` se ejecuta cuando el condicional se hace falso.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 21, "status": "ok", "timestamp": 1646285006133, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="9y1LiFxTOnZ1" outputId="3fcefe6a-eac2-47c1-dd6c-4b87eb40ed37"
n = 0
while n < 5: # se ejecuta mientras se cumpla que n < 5
print(n)
n = n + 1
else:
print('cuerpo else')
print('fin')
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 20, "status": "ok", "timestamp": 1646285006133, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="VHNSliOnOnZ1" outputId="748b5a04-930a-4663-ad09-f209ccb7685a"
for n in range(5): # se ejecuta mientras se cumpla que n < 5
print(n)
n = n + 1
else:
print('cuerpo else')
print('fin')
# + [markdown] id="TAImdKKjOnZ1"
# # Funciones de usuario
# + [markdown] id="QBZGv0eDOnZ1"
# [Contenido](#Contenido)
# + [markdown] id="gA4uNhtNOnZ1"
# Las funciones son definidas mediante la palabra reservada `def`. En el siguiente ejemplo se presenta una función que calcula la serie de Fibonnaci.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 19, "status": "ok", "timestamp": 1646285006133, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="P_ywYuJQOnZ1" outputId="cb91b955-1a6d-4534-e5b6-6ecd39d4931f"
def fib(n):
"""Imprime los términos de la serie de Fibbonaci que son menores que n."""
a, b = 0, 1 # esto equivale a hacer a = 0 y b = 1
while a < n:
print(a, end=' ')
a, b = b, a+b # equivale a: a = b y b = a + b
print()
# Llama la función
fib(2000)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 18, "status": "ok", "timestamp": 1646285006133, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="vJaAeXdXOnZ2" outputId="3ed4d5e5-4d2f-4eda-ec98-5de206295675"
fib # la función es un objeto.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 18, "status": "ok", "timestamp": 1646285006134, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="7FisyPkFOnZ2" outputId="af8f74c0-cebd-4fc2-daf6-eeba6743d754"
help(fib) # invoca la ayuda de fib
# + executionInfo={"elapsed": 17, "status": "ok", "timestamp": 1646285006134, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="njIEhOTSOnZ2"
f = fib # se almacena el objeto en la variable f
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 517, "status": "ok", "timestamp": 1646285006634, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="R1p1OjU4OnZ2" outputId="775463a4-59c7-43de-e1ce-590effe5bce0"
f(100) # terminos de la serie de Fibbonaci menores que 100
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 46, "status": "ok", "timestamp": 1646285006635, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="WoDhluWeOnZ2" outputId="d7db1b94-0f4e-4061-ff6e-3784c83104b9"
# en vez de imprimir, devuelve los términos de la serie en una lista.
def fib2(n):
"""Retorna los términos de la serie de Fibbonaci que son menores que n en una lista."""
result = [] # se crea una lista vacia
a, b = 0, 1
while a < n:
result.append(a) # se agrega a al final de la lista (opera como un stack)
a, b = b, a+b
return result
f100 = fib2(100) # llama la función
f100 # imprime el resultado
# + [markdown] id="UheVEdNFOnZ3"
# **Ejercicio.** Escriba la función recursiva de la serie de Fibbonacci definida como:
# + [markdown] id="0YQqoyGrOnZ3"
# $$ f(n) = \left\{
# \begin{array}{lr}
# 0 & \text{if } n = 0 \\
# 1 & \text{if } n = 1 \\
# f(n-1) + f(n-2) & \text{if } n > 1
# \end{array}
# \right. $$
# + [markdown] id="KGnXEPhpOnZ3"
# El valor por defecto de los argumentos (en este caso `L=[]` para el siguiente ejemplo) se evalua solamente la primera vez; entonces, la primera vez que se invoca `f` se hace `L=[]` (lista vacía), pero esto no ocurre en las llamadas posteriores.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 45, "status": "ok", "timestamp": 1646285006635, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="TeA1wLJfOnZ3" outputId="b207b209-6820-4978-b227-b51dde4a5604"
def f(a, L=[]):
L.append(a)
return L
print(f(1))
print(f(2))
print(f(3))
# + [markdown] id="Xq0fng3eOnZ3"
# Para solucionar este comportamiento, se hace `L=None` y en el cuerpo se hace `L=[]`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 44, "status": "ok", "timestamp": 1646285006635, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="aHSR0wK7OnZ3" outputId="664c58b2-03e3-4931-e7c0-b2b8f42b82a9"
def f(a, L=None):
if L is None:
L = []
L.append(a)
return L
print(f(1))
print(f(2))
print(f(3))
# + [markdown] id="vEf20nEPOnZ4"
# Las funciones pueden ser invocadas con una cantidad variable de argumentos. En el siguiente ejemplo, los argumentos son guardados como una tupla en la variable `args`. Con el `*` se indica cual es el argumento que guarda la tupla.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 44, "status": "ok", "timestamp": 1646285006636, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="tr2xCVCfOnZ4" outputId="8d0c7aab-3dbd-4f81-a08c-13302aba4438"
def f(*args): # simplemente imprime los argumentos con que se invoca
print(args)
f(1, 2, 3)
# + [markdown] id="2mmQlZ8UOnZ4"
# En el siguiente ejemplo, se está dando un valor por defecto al argumento `c`, tal que cuando la función es invocada, la variable `c` toma el valor especificado.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 42, "status": "ok", "timestamp": 1646285006636, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="YvMJ2k59OnZ4" outputId="3cc65002-13ab-41fd-ae30-65c733e4e288"
def f(a, *b, c = 'hola'):
print(a)
print(b)
print(c)
f(1, 2, 3, 4, 5) # el 5 no se asigna a la variable c
# + [markdown] id="xD9uvMDeOnZ4"
# Note que a diferencia del caso anterior, en el cual se hacia la llamada `f(1, 2, 3, 4, 5) `, en el siguiente ejemplo se hace explicita la asignación a la variable `c`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 41, "status": "ok", "timestamp": 1646285006636, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="1Mtow4y7OnZ4" outputId="ad504cad-4b35-468f-a571-6d864c4d9274"
f(1, 2, 3, 4, c=5) # se debe indicar explicitamente que `c = 5`.
# + [markdown] id="7yQeHt7cOnZ4"
# **Ejercicio.** Escriba la función `magicsum` que puede recibir un número variable de argumentos. Los argumentos son números o listas de numeros. La función devuelve la suma de sus argumentos.
#
# ```
# magicsum([1, 2, 3]) ==> 6
# magicsum([1, 2, 3], 4) ==> 10
# magicsum(4, [1, 2, 3]) == > 10
# magicsum(1, 2, 3, 4) ==> 10
# ```
# + [markdown] id="X-vLgtt7OnZ5"
# Python permite la definición de funciones anónimas (que no tienen nombre) usando la palabra reservada `lambda`. En el siguiente ejemplo, se define la función `incr` la cual incrementa en la unidad su argumento.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 41, "status": "ok", "timestamp": 1646285006637, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="JIvWTEEvOnZ5" outputId="700d0fa7-ee4c-4d77-d04b-132ec0e47c0d"
def incr(x):
return(x + 1)
incr(1)
# + [markdown] id="HZSVIOSDOnZ5"
# Esto es equivalente a asignar una función anónima a una variable:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 39, "status": "ok", "timestamp": 1646285006637, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="Sl_Q1AvuOnZ5" outputId="2eda66b7-f08e-425c-d46b-d9370f40ba01"
incr0 = lambda x,y: x + y + 1
incr0(x=1,y=6)
# + [markdown] id="KEfo-I-POnZ5"
# En el código anterior, el código `lambda x:` indica que hay una función anónima con un solo argumento llamado `x`. El código `x + 1` es lo que retorna la función.
# + [markdown] id="UYS8Rvf2OnZ5"
# No es necesario realizar la asignación de la función anónima a una variable; la función anónima puede ser usada directamente, tal como se ilustra en el siguiente ejemplo.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 37, "status": "ok", "timestamp": 1646285006637, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="eJTwhMaiOnZ5" outputId="d9616cf2-3350-4ae2-9847-545a545e892e"
(lambda x:x + 1)(2)
# + [markdown] id="pJZHoX0lOnZ6"
# **Ejercicio.** Escriba una función anónima que reciba dos argumetnos y devuelva su suma.
# + [markdown] id="81e0CO0zOnZ6"
# Las funciones pueden retornar funciones, tal como es el caso presentado a continuación donde `return` devuelve una función anónima. Note que el valor de `n` persiste, tal que la función `f` suma `42` a su argumento y `g` suma `1` a su argumento.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 37, "status": "ok", "timestamp": 1646285006638, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="TVwidT88OnZ6" outputId="c147eaf0-bc55-4236-b563-12208b3a6a4d"
def make_incrementor(n):
return lambda x: x + n
f = make_incrementor(42)
f(0)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 35, "status": "ok", "timestamp": 1646285006638, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="QW26_EJ1OnZ6" outputId="c097b78d-4efd-4942-d4ef-1197dca6d97e"
f(1)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 34, "status": "ok", "timestamp": 1646285006638, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="jiCWx0NvOnZ6" outputId="1d21aa7c-f736-42d0-93fa-b429ced00018"
g = make_incrementor(1)
g(1)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 34, "status": "ok", "timestamp": 1646285006639, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="D6f_1mVmOnZ7" outputId="51348914-e62e-4968-88aa-6d8aced25613"
f = make_incrementor(10)
x = 0
for i in range(1,6):
x = f(i)
x
# + [markdown] id="drm-sOX1OnZ7"
# Cuando una estructura de control (`if`, `for`, `while`, ...) o una función no tiene código asignado a su cuerpo se usa el comando `pass`.
# + executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646285006639, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="1FFyfCvzOnZ7"
def my_function():
"""Do nothing, but document it.
No, really, it doesn't do anything.
"""
pass
# + [markdown] id="zd4N9X_DOnZ7"
# Por último, el string que sigue al nombre de la función se usa para su documentación. Este puede ser accesado directamente por el comando `help` o mediante `print`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646285006639, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="rVR4TC09OnZ7" outputId="a48aab85-c68a-42b7-9047-94b354437e76"
print(my_function.__doc__)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1646285006639, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="FJ6dSwSDOnZ7" outputId="491d2454-4717-429f-ec93-fd203d21177e"
help(my_function)
# -
# ### Argumentos predeterminados
# Los argumentos predeterminados se utilizan cuando necesita preestablecer el valor de los parámetros de entrada de una función, de esta manera si estos no se pasan en la invocación, la función se ejecuta con sus argumentos por defecto.
def add(a=0, b=0):
return (a+b)
print(add(10, 6))
print(add())
# ### Alias para nombres de funciones
# Si cree que el nombre de una función es demasiado largo y no vale la pena escribirlo cada vez, puede crear un alias usando un nuevo nombre, sin alterar la función original.
# Asigna un alias a la función add()
a = add
print(a(9, 8))
# + [markdown] id="g664XogJOnZ7"
# # Estructuras de datos
# + [markdown] id="41ZD3PlkOnZ8"
# ## Listas
# + [markdown] id="e9TSU9YvOnZ8"
# [Contenido](#Contenido)
# + [markdown] id="7vDXf7p7OnZ8"
# Las principales funciones de las listas son las siguientes:
#
# * `list.`**`append`**`(`*`x`*`)` -- agrega el elemento `x` al final de la lista.
# * `list.`**`extend`**`(`*`L`*`)` -- agrega la lista `L` al final de la lista.
# * `list.`**`insert`**`(`*`i`*`,`*`x`*`)` -- inserta el elemento `x` en la posición `i`.
# * `list.`**`remove`**`(`*`x`*`)` -- remueve el elemento `x`.
# * `list.`**`pop`**`([`*`j`*`])` -- remueve el elemento en la posición `j` de la lista.
# * `list.`**`clear`**`()` -- borra la lista; elimina todos los elementos de la lista.
# * `list.`**`index`**`(`*`x`*`)` -- devuelve el indice del elemento `x`
# * `list.`**`count`**`(`*`x`*`)` -- Cuenta las veces que aparece el elemento `x`.
# * `list.`**`sort`**`(`*`key=None`*`, `*`reverse=False`*`)` -- ordena los elementos de la lista.
# * `list.`**`copy`**`()` -- crea una copia de la lista.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1646285006640, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="n3PGgc3KOnZ8" outputId="a7121693-c3f3-4d5d-b508-4d178582a0d5"
a = [1, 2, 3, 4, 5] # creación de la lista
a
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 29, "status": "ok", "timestamp": 1646285006640, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="ACkyJRUIOnZ8" outputId="7a5b8441-8156-4d60-fde4-d381081b6e26"
a.append(6) # agrega el elemento al final
a
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 27, "status": "ok", "timestamp": 1646285006640, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="YUcTBYVsOnZ8" outputId="c1ed0d63-a3da-4294-9d12-5e4e8bb7f7c8"
b = [7, 8, 9] # crea una nueva lista
a.extend(b) # pega la nueva lista al final de la anterior
a
# + executionInfo={"elapsed": 27, "status": "ok", "timestamp": 1646285006641, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="fO1wEVJwOnZ8"
b.extend([8])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 27, "status": "ok", "timestamp": 1646285006641, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="8HxQcPjAOnZ9" outputId="e15d453c-35af-4a63-b562-7d1ca1dd1d95"
b
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 25, "status": "ok", "timestamp": 1646285006641, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="A1XpyfeMOnZ9" outputId="02f9328e-c0e5-4c20-ab1a-a2a3501be789"
a.insert(0, 'a') # las listas pueden contener elementos con diferentes tipos de datos
a
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 24, "status": "ok", "timestamp": 1646285006641, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="99BYcEyiOnZ9" outputId="3b7adafa-f24c-44a2-f908-d99233c1d9bc"
x = ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd'] # cuenta la cantidad de veces que aparece cada elemento
print(x.count('a'), x.count('b'), x.count('c'), x.count('d'))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 23, "status": "ok", "timestamp": 1646285006642, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="iLum3r1sOnZ9" outputId="12d879f8-64bd-4159-bf5f-a562b779e022"
x.remove('c') # borra una de las veces que aparece el elemento
x
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 22, "status": "ok", "timestamp": 1646285006642, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="zkWmtlmjOnZ9" outputId="74f17d76-5dd4-4d6d-c541-3cb43dbfd5c0"
a.reverse() # invierte la lista
a
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 20, "status": "ok", "timestamp": 1646285006642, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="_Z7TXAXNOnZ9" outputId="3d887812-ec41-42bc-b9b5-4eab2579f018"
v = [2, 4, 1, 5, 3]
v.sort() # ordena la lista
v
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 20, "status": "ok", "timestamp": 1646285006643, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="k_5oZ3poOnZ-" outputId="88ab28cc-d6a7-4d9d-ddb8-91c1c3268b66"
sorted([2, 4, 1, 5, 3]) # devuelve una copia ordenada de la lista original
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 19, "status": "ok", "timestamp": 1646285006643, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="bcTKGCExOnZ-" outputId="39afb2ac-7aa6-4f18-884b-4b32a6dd1c42"
v.pop() # remueve el último elemento de la lista (extremo derecho).
v
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 18, "status": "ok", "timestamp": 1646285006643, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="oEMM--OBOnZ-" outputId="5294b1bf-47b1-49d1-91a8-adb8d6bd83c3"
a.pop(2) # remueve el elemento en la posición 2 de la lista
a
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 443, "status": "ok", "timestamp": 1646285007070, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="GG5pE0tjOnZ-" outputId="596374ca-742f-42c3-b6d8-26397993a0cf"
x = list(range(10)) # convierte un objeto rango en lista
x
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 33, "status": "ok", "timestamp": 1646285007070, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="3NJpJsYnOnZ-" outputId="80919405-6f0a-4fdb-8a11-a381cb79a668"
y = x # `y` apunta en memoria a la misma lista que `x`.
y
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 33, "status": "ok", "timestamp": 1646285007071, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="CJQyb2GVOnZ-" outputId="c3d6bac6-2357-4293-d34c-0ca678dadb40"
y.pop() # si se remueve un elemento en la lista `y`, también se remueve en la lista `x`
x
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646285007071, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="9tr7mgakOnZ_" outputId="82479599-9296-48e7-f476-87123f44cec0"
x = list(range(10))
y = x.copy() # para evitar el problema anterior se usa `copy()`
x.pop()
y
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646285007071, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="82PBQlk0OnZ_" outputId="22b8e9ee-3842-42c0-ff42-23a45821da01"
x
# + [markdown] id="cJIqXcYqOnZ_"
# ### Funciones `filter` y `map`
# + [markdown] id="qSTQbFIEOnZ_"
# [Contenido](#Contenido)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646285007072, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="3qVaWvS7OnZ_" outputId="2135eda7-6cc8-4fad-afb5-43fc8c8ddbbb"
# Filtra los elementos mayores que 4
# MAL
a = [3, 4, 5]
b = []
for i in a:
if i > 4:
b.append(i)
print(b)
# + executionInfo={"elapsed": 31, "status": "ok", "timestamp": 1646285007072, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="fOdIVjAlOnZ_"
# BIEN
a = [3, 4, 5]
b = [i for i in a if i > 4]
# O:
b = filter(lambda x: x > 4, a)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 31, "status": "ok", "timestamp": 1646285007072, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="yrklK2xwOnaA" outputId="519526eb-28c8-474d-d283-5cf6fa3b03ee"
list(b)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 31, "status": "ok", "timestamp": 1646285007073, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="l5VQdSRGOnaA" outputId="e4fea0b0-c575-4134-cc6c-daf4a244f018"
# Sume 3 a todos los elementos de una lista
# MAL
a = [3, 4, 5]
for i in range(len(a)):
a[i] += 3
print(a)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1646285007073, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="D6tPqaiFOnaA" outputId="9223d409-b647-4a77-f1e4-b289394a2582"
# BIEN
a = [3, 4, 5]
a = [i + 3 for i in a]
a
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1646285007073, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="oyJnrjjXOnaB" outputId="b43de777-1972-4045-c2a8-682283fead8c"
# BIEN
a = [3, 4, 5]
a = map(lambda i: i + 3, a)
list(a)
# + [markdown] id="zwvHPv3LOnaB"
# ### Desempaquetado de listas
# + [markdown] id="L4BQU_t3OnaB"
# [Contenido](#Contenido)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1646285007074, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="DbA36qzmOnaB" outputId="a68d6d2e-8a60-4b5a-f2d6-3a701abeef23"
a, *rest = [1, 2, 3, 4, 5] # a = 1, rest = [2, 3]
print(rest)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 29, "status": "ok", "timestamp": 1646285007074, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="855OLT9jOnaB" outputId="b304d66f-11ef-4e56-a8a0-cd44dfd1847c"
a, *middle, c = [1, 2, 3, 4, 5, 6] # a = 1, middle = [2, 3], c = 4
print(middle)
# + [markdown] id="PuKP1-1NOnaE"
# ### Uso de listas como stacks
# + [markdown] id="m6c1_rJlOnaF"
# [Contenido](#Contenido)
# + [markdown] id="A-lKb99rOnaF"
# Una lista es una estructura de datos LIFO (Last In First Out): el último elemento en entrar es el primer elemento en salir. Para simular este funcionamiento se usan las funciones `append` y `pop`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 29, "status": "ok", "timestamp": 1646285007075, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="zr89ykd0OnaF" outputId="434ce82e-caed-404b-9c49-28261c50f499"
stack = [3, 4, 5] # se crea una lista con elementos
stack.append(6) # se agrega el 6 al final de la lista
stack.append(7) # se agrega el 7 al final de la lista
stack
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1646285007075, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="vk_Hc8dYOnaF" outputId="17657fe9-571f-41d4-f041-8396838c2ee4"
stack.pop() # al llamar a `pop` sale primero el 7 que fue el último elemento en entrar.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1646285007075, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="BC1LdNPNOnaF" outputId="6d573de3-406c-442f-b329-c6526dc3e588"
stack
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1646285007076, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="mpv0VwimOnaF" outputId="4bb4eb73-23a2-4f22-c73e-870c86c6de59"
stack.pop()
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 27, "status": "ok", "timestamp": 1646285007076, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="G3DU7boHOnaG" outputId="8659c75d-6fd3-4214-f3c3-fb1b36b53b81"
stack.pop()
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 26, "status": "ok", "timestamp": 1646285007076, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="aZA4mqnxOnaG" outputId="5a20b06a-2a93-439c-966c-8b85eec3c330"
stack
# + [markdown] id="JlsRz9svOnaG"
# ### Uso de listas como colas
# + [markdown] id="X5A4rxn5OnaG"
# [Contenido](#Contenido)
# + [markdown] id="Q1R6pWpbOnaG"
# Esta estructura de datos simula la cola de un supermercado. Los elementos entran por la cola y salen por la cabeza (popleft -- pop() por el inicio de la lista). Para ello, se debe importar la función `deque` de la librería `collections`.
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 25, "status": "ok", "timestamp": 1646285007077, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="XfYR7HklOnaG" outputId="e468d806-3807-4381-96ee-026e0c66f72b"
from collections import deque
queue = deque(["a", "b", "c"]) # se crea una cola con elementos.
queue.append("d") # la `d` entra por el extremo derecho
queue.append("e") # la `e` entra por el extremo derecho
queue.popleft() # la `a` sale por el extremo izquierdo.
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 24, "status": "ok", "timestamp": 1646285007077, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="iHIptoN_OnaH" outputId="c15d6b7d-5b6b-4d42-ec1d-93ee5cba24d4"
queue.popleft() # la `b` sale por el extremo izquierdo
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 23, "status": "ok", "timestamp": 1646285007077, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="4j84lqjROnaH" outputId="4e5eaf8d-cfa2-448a-9b9c-d03e4387a2b3"
queue # imprime el contenido de la variable queue
# + [markdown] id="eEhLv7YDOnaH"
# ### List comprenhensions
# + [markdown] id="r2FrfEhqOnaH"
# [Contenido](#Contenido)
# + [markdown] id="cKe4iXiOOnaH"
# Este es un mecanismo para definir la composición de una lista de forma compacta. En el siguiente ejemplo se crea una lista con los cuadrados de los números del 0 al 9.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 23, "status": "ok", "timestamp": 1646285007078, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="LSXsrURGOnaH" outputId="e2f9127e-b413-4cce-db2b-8be0538d90c2"
squares = []
for x in range(10):
squares.append(x**2)
squares
# + [markdown] id="mFgHFanrOnaH"
# Un mecanismo más simple consiste en aplicar la función `lambda x:x**2` a los elementos de la lista `[0, ..., 9]`. Para que la función anónima sea aplicada a cada elemento de la lista se usa la función `map`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 23, "status": "ok", "timestamp": 1646285007078, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="Tr9HTJFuOnaH" outputId="e83f7a68-c4c7-44f0-d449-60bca2a7b82c"
squares = list(map(lambda x: x**2, range(10)))
squares
# + [markdown] id="eFz-71DxOnaI"
# Un mecanismo mucho más simple es integrar la definición de los elementos de la lista en el ciclo `for`:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 23, "status": "ok", "timestamp": 1646285007079, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="Y3fHoY-yOnaI" outputId="5ebbc090-a3bf-4b33-853a-bd97426fdd99"
squares = [x**2 for x in range(10)]
squares
# + [markdown] id="l4thpSGoOnaI"
# En una comprenhension, se pueden anidar ciclos `for` tal como se muestra a continuación. Adicionalmente, se pueden agregar condiciones usando la palabra reservada `if`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 22, "status": "ok", "timestamp": 1646285007079, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="iY3JjBBPOnaI" outputId="2f6c6a73-15fe-4def-d5f4-5e826ac1af74"
[(x, y) for x in [1,2,3] for y in [3,1,4] if x != y]
# + [markdown] id="s3f15yjGOnaI"
# Note que el codigo anterior es mucho más simple que el equivalente usando ciclos `for` anidados como se ilustra en el siguiente código.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 22, "status": "ok", "timestamp": 1646285007079, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="DTlSN_uFOnaI" outputId="2344f543-0e00-4145-e02d-bc23299c2ff6"
combs = []
for x in [1,2,3]:
for y in [3,1,4]:
if x != y:
combs.append((x, y))
combs
# + [markdown] id="tYxxE78-OnaJ"
# Ya que este es un mecanismo para crear listas, tambien puede ser aplicado sobre elementos del tipo string. En el siguiente ejemplo, la función `strip` elimina los espacio en blanco al principio y al final de la cadena de caracteres.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 22, "status": "ok", "timestamp": 1646285007080, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="VfEBGSgrOnaJ" outputId="d68c5886-7c5e-4a82-d0c7-d53de168a156"
# llamada de un metodo sobre cada elemento
x = [' a a', ' a a ', 'a a ']
[y.strip() for y in x] # elimina los espacios en blanco que delimitan la cadena
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 21, "status": "ok", "timestamp": 1646285007080, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="r-EDSTRcOnaJ" outputId="f1f82419-1f5a-48dc-dc7a-d9a2e0be415c"
['linea ' + str(i) for i in range(1, 6)]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 21, "status": "ok", "timestamp": 1646285007081, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="RYNlVaLsOnaJ" outputId="0c3e93b4-f0df-453e-c3c0-4e2e3f4136d5"
for x in ['linea ' + str(i) for i in range(1, 6)]: print(x)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 473, "status": "ok", "timestamp": 1646285007534, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="s3oSK-4rOnaJ" outputId="960126be-b998-424d-fd4d-8eb3f15e5372"
# numeros del 1 al 20 que contienen un '1'
import re
[str(x) for x in range(1,21) if re.search('1', str(x))]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 43, "status": "ok", "timestamp": 1646285007534, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="CNCnFeKROnaJ" outputId="30c6515b-8a66-428b-c316-209e7b4c680e"
# cantidad de números del 1 al 20 que contienen un '1'
import re
len([str(x) for x in range(1,21) if re.search('1', str(x))])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 43, "status": "ok", "timestamp": 1646285007535, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="676SBVhEOnaK" outputId="8943ccf4-e57e-4f23-cbbc-d959d8d0c86c"
# extrae los caracteres en las posiciones 2, 3 y 4
x = ["123456790", "abcdefghi", "jklmnopqr"]
[m[2:5] for m in x]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 42, "status": "ok", "timestamp": 1646285007536, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="DQwtlvf0OnaK" outputId="6c82ac59-8f9b-4c68-dc05-5db1975e9319"
# extrae los caracteres entre corchetes `[` y `]`.
x = ["-->[456]<--",
"-->[def]<--",
"-->[nop]<--",
"-------->[123456]<---------"]
[m[(m.find('[')+1):m.find(']')] for m in x]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 40, "status": "ok", "timestamp": 1646285007536, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="luJsCDO8OnaK" outputId="b4577639-bea4-4485-d0c5-e69c67ce8ce0"
# extrae la segunda palabra de cada línea
x = ["Bash is a Unix shell and command language",
"written by <NAME> for the ",
"GNU Project as a free software",
"replacement for the Bourne shell."]
[m.split(' ')[1] for m in x]
# + [markdown] id="PCwQWVPpOnaK"
# A continuación se presentan varios ejemplos de iteración sobre strings.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 39, "status": "ok", "timestamp": 1646285007537, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="IwKT-pgSOnaK" outputId="10f316f3-bf5e-4bcc-90b0-b2acd8cad022"
# iteracción sobre strings -- MAL
nums = ""
for n in range(20):
nums += str(n) # Lento e ineficiente
print(nums)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 37, "status": "ok", "timestamp": 1646285007537, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="rJFlH65GOnaK" outputId="ed00d27e-2e0b-4a03-ad0e-d4c141de8def"
# iteración sobre strings -- BIEN
nums = []
for n in range(20):
nums.append(str(n))
print("".join(nums)) # más eficiente
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 37, "status": "ok", "timestamp": 1646285007538, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="yPMap7JyOnaL" outputId="21db5004-2343-4c3a-9d63-d498e02220ec"
# iteración sobre strings -- MEJOR
nums = [str(n) for n in range(20)]
print("".join(nums))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 35, "status": "ok", "timestamp": 1646285007538, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="EVd9XyrCOnaL" outputId="1ddfe43d-e7c3-4f3a-f10a-250aa4be4698"
# también se pueden crear tuplas.
[(x, x**2) for x in range(6)]
# + [markdown] id="3xHZY5d1OnaL"
# En el siguiente ejemplo, el primer `for` recorre los elementos de la lista `vec` mientras que el segundo ciclo `for` recorre las componentes de dicho elemento.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 33, "status": "ok", "timestamp": 1646285007538, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="YBBVm7ueOnaL" outputId="68d42e2d-c8cb-43f6-c368-82fb987bb6ce"
vec = [[1,2,3], [4,5,6], [7,8,9]]
[num for elem in vec for num in elem]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646285007539, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="sbJTUj2pOnaL" outputId="2ac16196-038f-4613-afa2-4610e6cb6e17"
from math import pi # otro ejemplo
[str(round(pi, i)) for i in range(1, 6)]
# + executionInfo={"elapsed": 31, "status": "ok", "timestamp": 1646285007539, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="e42D6YVBOnaL"
# se simula una matriz como una lista cuyos elementos son listas de números.
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 31, "status": "ok", "timestamp": 1646285007540, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="BREm0keHOnaL" outputId="6ab34367-c984-4d6d-b241-4ce41c58c076"
# se calcula la transpuesta
[[row[i] for row in matrix] for i in range(4)]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1646285007540, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="sif4VLtKOnaM" outputId="705dfbb1-8e29-4c11-ef4e-22f153311a4a"
# este es el codigo equivalente tradicional.
transposed = []
for i in range(4):
transposed.append([row[i] for row in matrix])
transposed
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1646285007541, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="ZFg2YAifOnaM" outputId="9ba3cb72-f1a5-4962-bdab-d1f77dabea73"
# otro codigo equivalente.
transposed = []
for i in range(4):
# the following 3 lines implement the nested listcomp
transposed_row = []
for row in matrix:
transposed_row.append(row[i])
transposed.append(transposed_row)
transposed
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 29, "status": "ok", "timestamp": 1646285007541, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="8qKmJED7OnaM" outputId="ddf8fa42-09ac-4c06-8e59-a78d7ec9ca24"
# otra forma
list(zip(*matrix))
# + [markdown] id="4Ckh4XDUOnaM"
# ### Comando `del`
# + [markdown] id="BYj8NzztOnaM"
# [Contenido](#Contenido)
# + [markdown] id="PTJvm1OuOnaM"
# El comando `del` permite borrar elementos de una lista.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 29, "status": "ok", "timestamp": 1646285007542, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="3GJsu98POnaM" outputId="71b15433-ce4d-4374-d73b-ac8df06fc6ae"
a = [-1, 1, 66.25, 333, 333, 1234.5]
del a[0]
a
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 27, "status": "ok", "timestamp": 1646285007542, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="08U9TzuQOnaN" outputId="50c312e1-cf87-4621-cf49-e402088f4169"
del a[2:4]
a
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 26, "status": "ok", "timestamp": 1646285007542, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="c5zqQ5m7OnaN" outputId="e1bd20a0-f07b-4efe-a688-d73c3b511a6a"
del a[:]
a
# + executionInfo={"elapsed": 26, "status": "ok", "timestamp": 1646285007543, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="7edm1zX1OnaN"
del a
# + [markdown] id="jwIVWN04OnaN"
# ## Tuplas y secuencias
# + [markdown] id="Kg4FOsCuOnaN"
# [Contenido](#Contenido)
# + [markdown] id="ndj6PmkTOnaN"
# Una tupla es una secuencia de elementos separados por comas.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 357, "status": "ok", "timestamp": 1646285047104, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="umyv_qZpOnaO" outputId="37fd1855-d6e7-448d-b7b9-4d6b2839ebc0"
t = 12345, 54321, 'hello!'
t[0]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 10, "status": "ok", "timestamp": 1646285047414, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="GU3iY6pVOnaO" outputId="dd1c2fbb-8611-4788-93c1-2e6d3feaf4ad"
t # una tupla se diferencia de una lista porque se imprime entre paréntesis.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 8, "status": "ok", "timestamp": 1646285047414, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="Fu0_3v46OnaO" outputId="be9edb92-67e0-4c40-a955-ff3c28f4df43"
# Las tuplas pueden anidarse.
u = t, (1, 2, 3, 4, 5)
u
# + executionInfo={"elapsed": 527, "status": "ok", "timestamp": 1646285047937, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="Vlir6HWsOnaO"
x, y, z = t # asignación de los elementos de una tupla.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 63, "status": "ok", "timestamp": 1646285047937, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="UfrMMghZOnaO" outputId="afc7cae6-8e30-4665-8a03-35b28328a43d"
x
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 61, "status": "ok", "timestamp": 1646285047938, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="PyLnR0TOOnaO" outputId="fafda6da-e670-46a0-ef1f-2bdebf489222"
y
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 46, "status": "ok", "timestamp": 1646285047938, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="MrUz0FCJOnaP" outputId="bc0be791-4750-42e8-f1d6-bc0e20c47883"
z
# + [markdown] id="rlKyHOgDOnaP"
# ## Conjuntos
# + [markdown] id="I2fStNykOnaP"
# [Contenido](#Contenido)
# + [markdown] id="VRiyzCQEOnaP"
# Un conjunto es una estructura de datos cuyos elementos no se repiten.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 43, "status": "ok", "timestamp": 1646285047939, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="IN4y574gOnaP" outputId="a5b3c4e0-a676-4031-aa0c-1281d4133f89"
basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}
print(basket) # los elmentos duplicados se removieron.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646285047939, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="-O87ozvDOnaP" outputId="7111dafe-19fa-43b9-dd0b-3b85ef15b949"
'orange' in basket # `in` se usa para determinar si el elemento está en el conjunto
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1646285047939, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="MfwsqByKOnaP" outputId="ba84517f-35bb-4bf0-db5c-c1514727249c"
'crabgrass' in basket
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1646285047940, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="k7dUTS7uOnaP" outputId="22500eb6-0485-48d6-a8aa-e2dd16988bea"
a = set('abracadabra') # el string se descompone en sus letras.
b = set('alacazam')
a # letras únicas en el string `a`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 29, "status": "ok", "timestamp": 1646285047940, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="EXjxgKQqOnaQ" outputId="0a8928d5-08bc-404a-bbff-ceb8d1c56c80"
b
# + [markdown] id="njINIF54OnaQ"
# A continuación se presentan las principales operaciones entre conjuntos.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1646285047940, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="kqJkgXL1OnaQ" outputId="b73b8934-b84c-41c0-a7e4-add476cfeb7a"
a - b # letras en a pero no en b (diferencia)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1646285047941, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="DpA8Z-NnOnaQ" outputId="7ed76dda-9da0-4215-ed4d-738c69d8b58f"
a | b # letras en a o en b (OR, union)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 27, "status": "ok", "timestamp": 1646285047941, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="QTko27ZvOnaQ" outputId="c65791c4-ec1a-4e0f-a5d9-e9f2c88723e4"
a & b # letras en a y en b (intersección)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 26, "status": "ok", "timestamp": 1646285047941, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="8t6KvC6mOnaQ" outputId="a7a762c6-7a43-4931-a95d-246f5a0f9835"
a ^ b # letras en a o en b pero no en ambos
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 26, "status": "ok", "timestamp": 1646285047942, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="3Qh7W7TfOnaQ" outputId="0f08c39e-aa16-4e63-e006-f6b59c5ac91d"
# List comprenhensions en conjuntos
a = {x for x in 'abracadabra' if x not in 'abc'} # letras que estan en `abracadabra` y no están en `abc`.
a
# + [markdown] id="nsj6H4GJOnaR"
# ## Diccionarios
# + [markdown] id="SiGWWqwfOnaR"
# [Contenido](#Contenido)
# + [markdown] id="36LWry1nOnaR"
# Un dicionario es una estructura de datos en que cada elemento contiene una clave y un valor. Los diccionarios pueden crearse usando `{` y `}`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 24, "status": "ok", "timestamp": 1646285047942, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="4d4FYNZCOnaR" outputId="a33f3a86-4262-4892-c1a0-532f55b08cef"
x = {'b': 2, 'a': 1} # 'a' y 'b' son las claves y 1 y 2 son los valores
x['c'] = 3 # se agrega un neuvo elemento al dicicionario.
x
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 24, "status": "ok", "timestamp": 1646285047943, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="GBn6gYtLOnaR" outputId="f88a2a31-26ee-4c67-ed01-d9ddab24d9c2"
x['b'] # se obtiene el valor asociado a la clave 'b'
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 23, "status": "ok", "timestamp": 1646285047943, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="-10bKd4aOnaR" outputId="68ce4488-f050-4d6a-aa65-4d2e23156341"
del x['b'] # se elimina el elemento.
x['d'] = 4 # se agrega un nuevo elemento.
x
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 22, "status": "ok", "timestamp": 1646285047943, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="NNjNwXpqOnaR" outputId="955af8da-8628-48a3-a56f-f3570a7c897c"
list(x.keys()) # la función `keys` imprime las claves.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 23, "status": "ok", "timestamp": 1646285047944, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="W9v57PvbOnaR" outputId="1f69668e-4b54-4871-e418-f2744b27ceea"
sorted(x.keys()) # claves ordenadas.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 22, "status": "ok", "timestamp": 1646285047944, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="O-HnxAfTOnaS" outputId="6b74b627-a740-45ab-cba7-1d7264d38839"
'a' in x #
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1646285047945, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="uvOf5SsXOnaS" outputId="052daa0d-40f4-4d63-bf6f-af347834fd70"
'a' not in x
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1646285047952, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="aoy0hDxYOnaS" outputId="9ff5e540-4491-40dd-9adf-cb2e732db89d"
# un diccionario también puee crearse con `dict` a partir de una lista de tuplas.
dict([('d', 4), ('a', 1), ('b', 2)])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1646285047952, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="mSmJizbeOnaS" outputId="71a30bb8-1a90-42b4-bfae-a87da355fd8a"
# también se pueden crear diccionarios usando comprenhensions
{x: x**2 for x in (2, 4, 6)}
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1646285047953, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="_bnRCdN3OnaS" outputId="d7cd1aa5-aeab-4dbe-ea6b-2f89d7a335eb"
dict(c=3, b=2, a=1) # creación del diccionario usando `=`
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1646285047954, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="ILPwTie4OnaS" outputId="3e220e13-e803-4d41-c2e3-8b5f75611eb9"
x.copy() # devuelve una copia del diccionario
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 27, "status": "ok", "timestamp": 1646285047954, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="PpMZMvfVOnaS" outputId="8a1948ac-8c8e-48be-c4dd-846461caf964"
x.items() # retorna una nueva vista de los items en el diccionario.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 26, "status": "ok", "timestamp": 1646285047954, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="aEYFHM9_OnaT" outputId="76eec875-6fcf-40b2-c195-1697cda67695"
list(x.items())
# + [markdown] id="pYHZFqoNOnaT"
# ## Comparación de secuencias y otros tipos de datos
# + [markdown] id="sP5A7YJYOnaT"
# [Contenido](#Contenido)
# + [markdown] id="djg6p3NvOnaT"
# A continuación se presentan varios ejemplos de comparación entre secuencias.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 27, "status": "ok", "timestamp": 1646285047955, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="rPIJKaffOnaT" outputId="0fb88db7-bb31-401f-d9f5-e23bd57378b3"
(1, 2, 3) < (1, 2, 4)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 44, "status": "ok", "timestamp": 1646285048340, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="TyqwDuJKOnaT" outputId="adfaa46f-5d6e-4e1e-eb58-b6a21b2e9d1b"
[1, 2, 3] < [1, 2, 4]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 44, "status": "ok", "timestamp": 1646285048341, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="SDoDt4MEOnaT" outputId="79c6fb78-f663-4e6b-e611-0fc5bf034a8a"
'ABC' < 'C' < 'Pascal' < 'Python'
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 43, "status": "ok", "timestamp": 1646285048341, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="pbmkzWE1OnaT" outputId="6d38ddcc-2e11-469f-946c-9554439f73c6"
(1, 2, 3, 4) < (1, 2, 4)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 43, "status": "ok", "timestamp": 1646285048342, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="BR5kCSreOnaU" outputId="d51dbc89-456f-4a58-9d46-c8b25a1dab30"
(1, 2) < (1, 2, -1)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 42, "status": "ok", "timestamp": 1646285048343, "user": {"displayName": "Alexander <NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="s8NMdJQLOnaU" outputId="b9a0109e-bdf5-4eb7-dbe2-f6dd3a0d41e4"
(1, 2, 3) == (1.0, 2.0, 3.0)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 41, "status": "ok", "timestamp": 1646285048343, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="1aGemgy3OnaU" outputId="409a8f8c-8468-40a4-a0ab-c9b46d55ed4a"
(1, 2, ('aa', 'ab')) < (1, 2, ('abc', 'a'), 4)
# + [markdown] id="GKBSXvFpOnaU"
# # Impresión con formato
# + [markdown] id="XkgrtYD3OnaU"
# [Contenido](#Contenido)
# + [markdown] id="O6dMwYApOnaU"
# La función `repr()` produce la representación de un objeto en forma de string para su impresión. La función `str()` convierte un objeto a una cadena de caracteres.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 41, "status": "ok", "timestamp": 1646285048344, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="0as3rW2wOnaU" outputId="ef7a8a22-196b-4c21-bec4-9a65176dcf95"
s = 'Hola, mundo.'
print(s) # note que aca lo imprime sin las comillas
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 40, "status": "ok", "timestamp": 1646285048344, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="4q89NUY2OnaU" outputId="a69d00fd-a34c-44c6-b22b-43fb9863ef86"
str(s) # imprime las comillas
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 39, "status": "ok", "timestamp": 1646285048345, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="9tW_2mDnOnaV" outputId="1f93caed-d937-4ee9-9a64-33f62c090673"
repr(s) # note las comillas dobles
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 38, "status": "ok", "timestamp": 1646285048345, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="KK55eK2qOnaV" outputId="42799b4b-438c-4a54-d80f-fd071ea40070"
print(repr(s))
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 38, "status": "ok", "timestamp": 1646285048346, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="CiomJIjkOnaV" outputId="10b34eb4-3faf-4330-ca88-94b3db47155f"
a = 1
str(a) # convierte al 1 de número a string.
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 38, "status": "ok", "timestamp": 1646285048347, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="IKlKNM6rOnaV" outputId="23930e5f-1da3-4565-b761-5ab1b2033b98"
str(1/7)
# + [markdown] id="mj9l8HPMOnaV"
# La función `repr()` es usualmente usada para impresión, tal como se ilustra en el siguiente ejemplo.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 36, "status": "ok", "timestamp": 1646285048347, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="lCdPXSlEOnaV" outputId="00499581-6509-46ca-82f2-02e5fcd0e16c"
x = 10 * 3.25
y = 200 * 200
s = 'El valor de "x" es ' + repr(x) + ', y el de "y" es ' + repr(y) + '...'
print(s)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 36, "status": "ok", "timestamp": 1646285048348, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="KoWN6zKWOnaW" outputId="0052dc8e-16b8-4da8-adb3-77855c717052"
hello = 'hola, <NAME>\n'
hellos = repr(hello) # agrega comillas
print(hellos)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 35, "status": "ok", "timestamp": 1646285048348, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="Rwj9gosvOnaW" outputId="2602cfd0-85ee-4d32-a960-db1893dca4f1"
hello = 'hola, <NAME>\\n'
print(hello)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 35, "status": "ok", "timestamp": 1646285048349, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="x2DcPMkSOnaW" outputId="20b0cd69-2224-436e-cba2-cde023f1873c"
repr((x, y, ('a', 'b'))) # la función `repr()` recibe cualquier objeto de Python
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 34, "status": "ok", "timestamp": 1646285048349, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="-nfkXDQAOnaW" outputId="e9cd68e5-d993-4e45-9f16-63f053a64284"
for x in range(1, 11):
print(repr(x).rjust(2), repr(x*x).rjust(3), end=' ') # imprime las dos primeras columnas
print(repr(x*x*x).rjust(4)) # imprime la tercera columna
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 34, "status": "ok", "timestamp": 1646285048350, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="FCEZkawBOnaW" outputId="b9776748-e465-464d-9eee-eee96fd14535"
# forma alternativa
for x in range(1, 11):
print('{0:2d} {1:3d} {2:4d}'.format(x, x*x, x*x*x))
# + [markdown] id="J_9x5LfnOnaW"
# La función `zfill()` permite rellenar un número con ceros a la izquierda.
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 34, "status": "ok", "timestamp": 1646285048350, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="w9LxiId1OnaW" outputId="af6a3353-72f6-4822-9054-0a103648a5cd"
'12'.zfill(5) # el número debe tener 5 caracteres de longitud en total
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 34, "status": "ok", "timestamp": 1646285048351, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="U0wbg71tOnaX" outputId="aa789da2-e9b2-4f7d-ac02-b44815b8c73e"
'-3.14'.zfill(7) # el número debe tener 7 caracteres de longitud en total, teniendo en cuenta el signo -
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 34, "status": "ok", "timestamp": 1646285048352, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="2qobBC90OnaX" outputId="8d69369d-3704-4681-e1d2-275b819d9bfb"
'3.14159265359'.zfill(5) # si la cadena ya excede la longitud, zfill() no la recorta.
# + [markdown] id="pyyj_NAhOnaX"
# La función `print()` admite argumentos para la impresión. En su forma más simple, los argumentos son impresos en orden y su lugar se indica mediante los caracteres `{}`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 33, "status": "ok", "timestamp": 1646285048352, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="eDA3arDBOnaX" outputId="1a6c8d02-9ef4-425e-a94c-6c66f5778bef"
print('Este es el argumento {} y este el "{}"'.format('-1-', '-2-'))
# + [markdown] id="Yhr0l93mOnaX"
# En los ejemplos anteriores, los argumentos se imprimen por posición: el primer argumento de format se imprime en el primer `{}` y asi sucesivamente. Python permite numerar o dar nombre a los argumentos para su impresión.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 33, "status": "ok", "timestamp": 1646285048353, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="1QG2KycOOnaX" outputId="de9e72e1-153e-453e-f42e-8c4b781d5d16"
print('{0} y {1}'.format('-0-', '-1-')) # {0} es '-0-' y {1} es '-1-'
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646285048353, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="ag8vgLPwOnaX" outputId="ac2df836-6f3a-4f7d-bbc3-f75e2a95520d"
print('{1} y {0}'.format('-0-', '-1-')) # {0} es '-0-' y {1} es '-1-'
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646285048354, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="v_kavD04OnaX" outputId="40033a84-3ba5-43ee-e4cb-93237ac0248a"
print('{arg0} y {arg1}.'.format(arg1='-1-', arg0='-0-')) # se da nombre a los argumentos
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646285048354, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="Kwlx3jwPOnaY" outputId="734088d8-1228-4c18-d256-3380224d307a"
print('{0}, {1}, y {a}.'.format('-0-', '-1-', a='-2-'))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646285048355, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="uepJ4wzkOnaY" outputId="ec1a8662-7692-4084-a720-21130c633b00"
x = '-0-'
print('Este es el argumento {}.'.format(x))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 31, "status": "ok", "timestamp": 1646285048355, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="s94kzMpKOnaY" outputId="30724257-2edb-4cf0-a364-8c62679015fd"
print('Este es el argumento {!r}.'.format(x)) # aca agrega comillas alrededor de x
# + [markdown] id="fAkwbxHbOnaY"
# El formato `{0:.3f}`indica lo siguiente: `0` es el número del argumento; `.3f` indica un número en punto flotante (un real) con tres decimales después del punto.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 31, "status": "ok", "timestamp": 1646285048355, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="cy5RLEGcOnaY" outputId="f6974ac5-6464-432b-b8cb-b1c2665f7e89"
import math
print('Valor de PI con tres decimales: {0:.3f}.'.format(math.pi))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 31, "status": "ok", "timestamp": 1646285048356, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="nh7ZeFhfOnaY" outputId="1d11e618-8d5f-40e7-9621-966549da9abe"
print('{} ---- {}'.format('hola mundo', 1.23456789))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1646285048634, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="tsrcQXqDOnaY" outputId="9bc9b8fb-387a-458c-c4bb-a2242652e40e"
print('{0:15s} ---- {1:8.2f}'.format('hola mundo', 1.23456789))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1646285048635, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="9v7ktCnkOnaZ" outputId="4da4425c-7232-44a5-aaae-35ae4c3c2c96"
print('{0:>15s} ---- {1:8.2f}'.format('hola mundo', 1.23456789))
# + [markdown] id="JbBpA5VQOnaZ"
# En el siguiente ejemplo se ilustra la ipmpresión de un diccionario. El formato `{0:10}` indica que el argumento `0` tiene diez caracteres de longitud; la cadena de caracteres se alinea por la izquierda. El formato `{1:10d}` señala que el argumento 1 es entero ('d') y tiene diez caracteres de longitud; se hace la alineación por la derecha.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 10, "status": "ok", "timestamp": 1646285048635, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="8fjzWXvCOnaZ" outputId="1b6c77a5-2db2-4958-da59-56676e585629"
z = {'a': 100, 'b': 101, 'c': 102}
for x, y in z.items():
print('{0:10} ---> {1:10d}'.format(x, y))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 9, "status": "ok", "timestamp": 1646285048635, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="EHrEwYk-OnaZ" outputId="52150232-ed63-4ce8-e8a1-0ea7f9aec603"
# este ejemplo muestra como imprimir un diccionario.
z = {'a': 100, 'b': 101, 'c': 102}
print('a: {0[a]:d}; '
'b: {0[b]:d}; '
'c: {0[c]:d}'.format(z))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 9, "status": "ok", "timestamp": 1646285048636, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="TG_nQLi3OnaZ" outputId="42739cf8-fe6b-40c2-ef7e-dbd61888702f"
# otra forma de imprimir un diccionario.
z = {'a': 100, 'b': 101, 'c': 102}
print('a: {a:d}; b: {b:d}; c: {c:d}'.format(**z))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 8, "status": "ok", "timestamp": 1646285048636, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="JaRYMYeMOnaZ" outputId="f69fc64f-b514-4630-98bd-4113621f0da3"
h = 'hola mundo'
n = 1.23456789
print(f'{h} ---- {n}')
# + [markdown] id="eL3-aBEzOnaZ"
# Por compatibilidad con versiones anteriores, Python conserva la impresión usando el operador `%`. En el siguiente ejemplo, la especificación `%5.3f` indica que se imprimirá un número real de cinco caracteres de longitud y tres posiciones decimales.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 8, "status": "ok", "timestamp": 1646285048636, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi5bptZVpT8uPV-rsu1k3GPYeG1CXzkWxwYtJ_160M=s64", "userId": "06106219622026670571"}, "user_tz": 300} id="MpMHq3MyOnaa" outputId="4242a667-0760-49d6-dadb-5dacc2961934"
import math
print('--> %5.3f <--' % math.pi)
# + [markdown] id="ZFYiTMYhOnaa"
# [Contenido](#Contenido)
| 03-fundamentos-programacion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # FASHION CLASS CLASSIFICATION
# ## 1. DATA IMPORT
# ### Load libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import random
# ### Load dataset
df_train = pd.read_csv('input/fashion-mnist_train.csv',sep=',')
df_test = pd.read_csv('input/fashion-mnist_test.csv', sep = ',')
# ## 2. DATA VISUALIZING
# ### Display dataframe
# 784 indicates 28x28 pixels and 1 column for label:
df_train.head()
df_test.head()
# Let's check how many data are in train and test datasets:
df_train.shape
df_test.shape
# Let's create training and testing arrays:
training = np.array(df_train, dtype = 'float32')
testing = np.array(df_test, dtype = 'float32')
# ### Display random images
# Below we are going to plot a grid of default figures by defining grid dimensions 15x15. Subplots return figure object and axes object. We can use the axes object to plot specific figures at various locations. Next, 15x15 matrix is flattened into 225 array and length of training dataset is set.
# A random number from 0 to n_training is selected and random figure is plotted accordingly.
# +
W_grid = 15
L_grid = 15
fig, axes = plt.subplots(L_grid, W_grid, figsize = (17,17))
axes = axes.ravel()
n_training = len(training)
for i in np.arange(0, W_grid * L_grid):
index = np.random.randint(0, n_training)
axes[i].imshow( training[index,1:].reshape((28,28)) )
axes[i].set_title(training[index,0], fontsize = 8)
axes[i].axis('off')
plt.subplots_adjust(hspace=0.4)
# Classes:
# 0 => T-shirt/top
# 1 => Trouser
# 2 => Pullover
# 3 => Dress
# 4 => Coat
# 5 => Sandal
# 6 => Shirt
# 7 => Sneaker
# 8 => Bag
# 9 => Ankle boot
# -
# ## 3. MODEL TRAINING
# ### Prepare train, validation and test datasets
# Training and testing datasets are prepared:
# +
X_train = training[:, 1:]/255
y_train = training[:, 0]
X_test = testing[:, 1:]/255
y_test = testing[:, 0]
# -
# Let's create validation dataset. A validaton dataset can be used during training, it is used to help the model to generalize. We would like to simply avoid overfitting.
# +
from sklearn.model_selection import train_test_split
X_train, X_validate, y_train, y_validate = train_test_split(X_train, y_train, test_size=0.2, random_state=12345)
# -
# Now we are going to take our training, testing and validating data and put in a form that will fit to out convolutional neural network, so we will need to reshape our data so that they are in a form (28,28,1)
X_train.shape
X_test.shape
X_validate.shape
X_train = X_train.reshape(X_train.shape[0], *(28,28,1))
X_test = X_test.reshape(X_test.shape[0], *(28,28,1))
X_validate = X_validate.reshape(X_validate.shape[0], *(28,28,1))
X_train.shape
X_test.shape
X_validate.shape
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
# ### Create neural network model
cnn_model = Sequential()
# After creating our model, we are going to add our layers:
# - convolutional layer, with 32 kernels of size 3x3, with input shape equal to our image shape and RELU activation function
# - pooling layer, with pool size 2x2
# - dropout layer
# - flattening layer to flat our features into one single array
# - dense layer, with dimension equal to 32 and RELU activation function
# - dense layer, with dimension equal to 10 (as we have 10 classes) and sigmoid activation function
# +
cnn_model.add(Conv2D(32, 3, 3, input_shape = (28, 28, 1), activation = 'relu'))
cnn_model.add(MaxPooling2D(pool_size = (2, 2)))
cnn_model.add(Dropout(0.25))
cnn_model.add(Flatten())
cnn_model.add(Dense(output_dim = 32, activation = 'relu'))
cnn_model.add(Dense(output_dim = 10, activation = 'sigmoid'))
# -
# Now we are going to train our network. As loss function we are going to use sparse_categorical_crossentropy (sparse because our classes are mutually exclusive, categorical and not binary because we have 10 classes). We will choose Adam optimizer cause it is computationally efficient and has little memory requirements - it is quite popular to start with this kind of optimizer as it applies well for many deep learning problems. Accuracy metrics will be used.
# ### Compile model
cnn_model.compile(loss = 'sparse_categorical_crossentropy', optimizer = Adam(lr=0.001), metrics = ['accuracy'])
# ### Train model
# Number of epochs in this case will be 10. An epoch is a measure of the number of times all of the training vectors are used once to update the weights. The greater number of epochs is, the smaller the error is.
# +
epochs = 10
history = cnn_model.fit(X_train,
y_train,
batch_size = 512,
nb_epoch = epochs,
verbose = 1,
validation_data = (X_validate, y_validate))
# -
# With 10 epochs used, we reached accuracy 89.55% which is pretty good. We might get better accuracy by adding more epochs but for this case we are going to stay with 10 due to the time it takes to calculate it.
# ## 4. MODEL EVALUATING
# ### Test data
# At first, we will run a method evaluate using our testing data.
evaluation = cnn_model.evaluate(X_test, y_test)
print('Loss: {:.3f}'.format(evaluation[0]))
print('Test Accuracy: {:.3f}'.format(evaluation[1]))
# Let's get the predictions for the test data:
predicted_classes = cnn_model.predict_classes(X_test)
# Now let's print sample 25 images to see how our model performs (prediction class vs. true class):
# +
L = 5
W = 5
fig, axes = plt.subplots(L, W, figsize=(12,12))
axes = axes.ravel()
for i in np.arange(0, L * W):
axes[i].imshow(X_test[i].reshape(28,28))
axes[i].set_title("Prediction Class = {:0.1f}\n True Class = {:0.1f}".format(predicted_classes[i], y_test[i]))
axes[i].axis('off')
plt.subplots_adjust(wspace=0.5)
# -
# ### Print heatmap
# +
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, predicted_classes)
plt.figure(figsize=(14,10))
ax = sns.heatmap(cm, annot=True, fmt='d')
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
# -
# In order to get the total value of correctly classified, numbers of the confusion matrix diagonal should be added. We can observe, however, that many samples hasn't been classified correctly.
# ### Print classification report
# +
from sklearn.metrics import classification_report
num_classes = 10
target_names = ['Class: {}'.format(i) for i in range(num_classes)]
print(classification_report(y_test, predicted_classes, target_names=target_names))
# -
# Classification report show how many classes for th specific class has been classified correctly. Most common misclassification was between 0 and 6 classes (T-shirt misclassified with shirt). We got best results for class 1, 5, 8 (trousers, sandals, bags).
| Fashion_Class_Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Investigating the opportunities
#
# - Research for a protein database which has OMIM ids
# - In pathway databases OMIM is missing
# - Human Protein Reference Database
# - UniProt
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # Why UniProt?
#
# - OMIM id is available for proteins
# - More than 500,000 proteins
# - It is easy to access and use (txt)
# - Other databases has connection to UniProt Ids
# - Provides an up-to-date, comprehensive body of protein information
# - Continuously updated for new data every four weeks
# + [markdown] slideshow={"slide_type": "subslide"}
# # Source file
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # Novelty
#
# - Similar task was addressed by others (offline)
# - The databases mostly focus on mapping gene to disease associations
# - Protein disease associations can be more interesting in drug design
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Database structure
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## Data model implementation
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + slideshow={"slide_type": "slide"}
# %%time
from biodb_team_3.db_manager import Manager
m = Manager()
m.populate_db()
# + [markdown] slideshow={"slide_type": "slide"}
# - 5689 associations between 5689 diseases and 3815 proteins!
# - Easy to query from both ends, with single or batch inputs!
# - Advanced queries for identifying related diseases or associated proteins!
# - Outputs deliverable as strings or pickled list objects!
# + [markdown] slideshow={"slide_type": "slide"}
# # What you can do with it apart from the two above
# ## How to install:
# One sh file to run and all software will be installed
# -
install.sh ../biodb.db
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## How to update:
#
# It is easy to update the software data by calling uniprot
# -
from biodb_team_3.db_manager import Manager
m =Manager()
m.populate_db()
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## How to use:
# -
m.query_for_protein_single("101400")
m.query_for_disease_single("Q15672")
m.get_omim_id_with_uniprot_id(['P21802','Q15672'])
m.get_uniprot_id_with_omim_id(['614592','101600'])
m.get_shared_associated_proteins_by_omim_ids(["101400", "123100", "180750", "601622"])
m.get_associated_disease_with_omim_id("101400")
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # Summary
#
# - The package allow us to investigate the Uniprot Ids and OMIM Ids relationship
#
# - User friendly
#
# - Provide link between pathways and disease
#
# - This relation can be interesting for drug deisgn
#
# - Easy to query from both ends
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Future improvements
#
# - Parsing the whole Uniprot xml raw data file and extract much more information
#
# - Eliminating the reliance to external packages and develop our own
# either API handles or local database tables which contain all of the information
#
# - Implementing additional algorithms for walking the association search space
| present/dejan_prez_update2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import os
from tqdm import tqdm_notebook as tq
loadPath = '../data/train/images/'
trFiles = os.listdir(loadPath)
print(len(trFiles))
temp = np.load(loadPath+trFiles[0])
print(temp.shape)
totalData = np.zeros((len(trFiles),512,512))
for n in tq(range(len(trFiles))):
totalData[n] = np.load(loadPath+trFiles[n])
print('Mean:',totalData.mean(),'Std:',totalData.std())
print('Min:',totalData.min(),'Max:',totalData.max())
| data_prep/LUNA_mean_std.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="7oJF8jEXZP5v"
# ### Molecule solubility prediction with basic GNN model
#
# Water (or any other kind of) solubility is the property of the whole molecule, therefore we will have a simple binary clasification problem for the whole molecule. <br> <br>
#
# ### References
# PyG [examples](https://github.com/pyg-team/pytorch_geometric/tree/master/examples) on GNN <br>
# Textbook: Deep Learning for the Life Sciences: Applying Deep Learning to Genomics, Microscopy, Drug Discovery, and More
# + id="pxmMjZ240wob"
# !python -c "import torch; print(torch.__version__)"
# !python -c "import torch; print(torch.version.cuda)"
# !python --version
# !nvidia-smi
# + id="YnRp-W6y5K1f"
pytorch_version = f"torch-{torch.__version__}.html"
# !pip install --no-index torch-scatter -f https://pytorch-geometric.com/whl/torch-1.10.0+cu111.html
# !pip install --no-index torch-sparse -f https://pytorch-geometric.com/whl/torch-1.10.0+cu111.html
# !pip install --no-index torch-cluster -f https://pytorch-geometric.com/whl/torch-1.10.0+cu111.html
# !pip install --no-index torch-spline-conv -f https://pytorch-geometric.com/whl/torch-1.10.0+cu111.html
# !pip install torch-geometric
# !pip -q install rdkit-pypi
# + id="5oDwYVMTh3N2"
import torch
from torch.nn import Linear
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, TopKPooling, global_mean_pool
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
from torch_geometric.data import DataLoader
from torch_geometric.datasets import MoleculeNet
import pandas as pd
import warnings
import seaborn as sns
import matplotlib.pyplot as plt
import rdkit
from rdkit import Chem
from rdkit.Chem.Draw import IPythonConsole
from rdkit.Chem.Draw import MolsToGridImage
# + [markdown] id="cSTDAc0jZTON"
# ESOL [dataset](https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/delaney-processed.csv) will be used
#
# [This dataset is created based on MoleculeNet.org](http://ww82.moleculenet.ai/).
# + id="kL8_Pkmt07WM" colab={"base_uri": "https://localhost:8080/"} outputId="8210073d-3c71-4721-99a5-7ce85265fbf7"
# Load the ESOL dataset
data = MoleculeNet(root=".", name="ESOL")
data
# + id="-bGynPDD2pOp" colab={"base_uri": "https://localhost:8080/"} outputId="30d830cf-6123-4fe9-ecee-906aad6aabbe"
print("Dataset type: ", type(data))
print("Dataset features: ", data.num_features)
print("Dataset target: ", data.num_classes)
print("Dataset length: ", data.len)
print("Dataset sample: ", data[0])
print("Sample nodes: ", data[0].num_nodes)
print("Sample edges: ", data[0].num_edges)
# + id="mvOSFVE8tyyd" colab={"base_uri": "https://localhost:8080/"} outputId="2a903fa5-8668-4f67-d6fe-ccc233fe463d"
# Shape of nodes is made of [num_nodes, num_node_features]
data[0].x
# + id="eJgV2WdKuqky" colab={"base_uri": "https://localhost:8080/"} outputId="d79a53fe-715b-4930-81a4-a3737e683d8f"
# Shape of edges is [2, num_edges]
data[0].edge_index.t()
# + id="JLLbZhpBvAnG" colab={"base_uri": "https://localhost:8080/"} outputId="60596c52-accf-471f-cb09-8ec3b9d5d683"
data[0].y
# + id="Qvn0uNqksE5q" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="4d275523-b50b-4b4d-ad61-9dfa7c4bcc35"
data[150]["smiles"]
# + id="df-9E4kbgOuU" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="d7cc62cc-629a-4fe1-bd87-e62d66616876"
molecule = Chem.MolFromSmiles(data[150]["smiles"])
molecule
# + id="KwoLnhIBsN_t" colab={"base_uri": "https://localhost:8080/"} outputId="8ac440d9-c34e-46e1-9a7b-25e9fa27798c"
type(molecule)
# + [markdown] id="EoqT0XdzU9K1"
# We implement GNN in form of basic GCN and do aggregation only from 4 hop neighbourhood
# + id="45raJzPsjhU7" colab={"base_uri": "https://localhost:8080/"} outputId="99b638cc-d534-412c-cb4c-ac287d4cea7c"
embedding_size = 64
class GCN(torch.nn.Module):
def __init__(self):
# Init parent
super(GCN, self).__init__()
torch.manual_seed(42)
# GCN layers
self.initial_conv = GCNConv(data.num_features, embedding_size)
self.conv1 = GCNConv(embedding_size, embedding_size)
self.conv2 = GCNConv(embedding_size, embedding_size)
self.conv3 = GCNConv(embedding_size, embedding_size)
self.conv4 = GCNConv(embedding_size, embedding_size)
# Output layer
self.out = Linear(embedding_size*2, 1)
def forward(self, x, edge_index, batch_index):
# Starting GCN layer
hidden = self.initial_conv(x, edge_index)
hidden = F.tanh(hidden)
# Other GCN layers
hidden = self.conv1(hidden, edge_index)
hidden = F.tanh(hidden)
hidden = self.conv2(hidden, edge_index)
hidden = F.tanh(hidden)
hidden = self.conv3(hidden, edge_index)
hidden = F.tanh(hidden)
hidden = self.conv4(hidden, edge_index)
hidden = F.tanh(hidden)
# Global Pooling
hidden = torch.cat([gmp(hidden, batch_index),
gap(hidden, batch_index)], dim=1)
# Linear Layer
out = self.out(hidden)
return out, hidden
model = GCN()
print(model)
print("Number of parameters: ", sum(p.numel() for p in model.parameters()))
# + [markdown] id="vf_KuYZmnRgn"
# Training
# + id="xDra7-J-k0EB" colab={"base_uri": "https://localhost:8080/"} outputId="b4e39113-170d-4262-f41a-15834164abbf"
warnings.filterwarnings("ignore")
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0007)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
data_size = len(data)
NUM_GRAPHS_PER_BATCH = 64
loader = DataLoader(data[:int(data_size * 0.8)],
batch_size=NUM_GRAPHS_PER_BATCH, shuffle=True)
test_loader = DataLoader(data[int(data_size * 0.8):],
batch_size=NUM_GRAPHS_PER_BATCH, shuffle=True)
def train(data):
for batch in loader:
batch.to(device)
optimizer.zero_grad()
pred, embedding = model(batch.x.float(), batch.edge_index, batch.batch)
loss = torch.sqrt(loss_fn(pred, batch.y))
loss.backward()
optimizer.step()
return loss, embedding
print("Starting training...")
losses = []
for epoch in range(2000):
loss, h = train(data)
losses.append(loss)
if epoch % 100 == 0:
print(f"Epoch {epoch} | Train Loss {loss}")
# + id="Z3Eh6uL15AYX" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="6e0386f5-f012-4910-c5e3-10306a8affe1"
# Plotting training loss
losses_float = [float(loss.cpu().detach().numpy()) for loss in losses]
indices = [i for i,l in enumerate(losses_float)]
plt.plot(indices, losses_float, c='c');
# + id="mpnH7cf03Uxi" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="990e7eb6-e3ca-4d27-a74a-ddfa82cc5c1e"
# Predict
test_batch = next(iter(test_loader))
with torch.no_grad():
test_batch.to(device)
pred, embed = model(test_batch.x.float(), test_batch.edge_index, test_batch.batch)
df = pd.DataFrame()
df["y_real"] = test_batch.y.tolist()
df["y_pred"] = pred.tolist()
df["y_real"] = df["y_real"].apply(lambda row: row[0])
df["y_pred"] = df["y_pred"].apply(lambda row: row[0])
molecules = [Chem.MolFromSmiles(data[index]["smiles"]) for index in range(len(df["y_real"]))]
legends = [f"true/pred = {df['y_real'][i]:.2f}/{df['y_pred'][i]:.2f}" for i in range(len(df['y_real']))]
MolsToGridImage(molecules, molsPerRow=4, legends=legends)
| Pytorch/Molecule_Solubility.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Datatypes
# *How do we represent everyday data in machines?*
# We could...Find a way to represent everything as strings.. but...
# - How will the computer know what to do with the data?
# - How will it differentiate between different data?
#
# **Think about it!**
# In our hypothetical programming language, we write `output hello + world` and `output 5 + 7`. The plus sign has very different meanings for each. We cannot treat words as numbers, and numbers as words.
#
# We now write `output true and false`. Do we expect the language to output `true and false`, or treat it like binary and output `false`? What if we really expect the language to "take it literally" and output `true and false`?
#
# **Python is smart**
# When we define variables, we do not need to specify the type of the data. This makes the language less cumbersome, yet it gives the illusion that there are no types in Python.
#
# *In Python, we have*
# - `int` --> Integer.
# - `str` --> String. Literally, for storing a *string* of alphabetical characters.
# - `bool` --> Boolean. For storing true/false values (1/0)
# - `float` --> Float. Literally, for storing a number that *floats* between integers
#
# There are more datatypes, but these are all needed to start for now. Eventually, we can make our own datatypes!
# ## Operations on `int`s and `float`s
# Valid integer and float operations: `+`, `-`, `*`, `/` (and more to discover!)
num=2+100
num = num + 25
print(num)
3 * 100
3 / 100
num2 = 10 + 9
print(num2)
# Sadly, 10+9 does not equal 21. Predict what will happen in the next cell.
0.1 + 0.2 == 0.3
# **Wait, what?**
# Does this mean either math or Python is broken? What does this signify? Let's examine this in more detail...
a = 0.3
print(a)
b = 0.1 + 0.2
print(b)
# **What's with the zeros?** And why does `0.1 + 0.2` produce a float other than `0.3`?
# Floats work well in most cases, but beware when trying to equate 2 floats - many errors have occured in software before because of such cases.
# +
floaty = 100/3
print(floaty)
notfloaty = 100 // 3
print(notfloaty)
# -
# ## Operations on `str`ings
"hello" + "world"
"hello" + " " + "world"
'hello ' + 'world'
"hello" - "world"
3*"hello"
# ## Operations on `bool`eans
True + False
False - True
True and False
False or True and True
True and False or True
not True and False or False and not True
not not not not False or not not True and False or True or True and False and (True or False and not True)
| 19T2/2_review/datatypes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CUDA UFuncs and memory management
#
# In this notebook you wil learn how to GPU accelerate element-wise NumPy array functions by compiling [NumPy Universal functions \(or ufuncs\)](https://docs.scipy.org/doc/numpy-1.15.1/reference/ufuncs.html) for the GPU, along with some techniques for efficiently moving data between a CPU host and GPU device.
# ## Objectives for this Section
#
# By the time you complete this section you will be able to:
#
# - GPU accelerate NumPy ufuncs.
# - GPU accelerate hand-written vectorized functions.
# - Optimize data transfers between the CPU host and GPU device.
# ## Review of NumPy Universal Functions (ufuncs)
#
# NumPy has the concept of universal functions ("ufuncs"), which are functions that can take NumPy arrays of varying dimensions, or scalars, and operate on them element-by-element.
#
# As an example we'll use the NumPy `add` ufunc to demonstrate the basic ufunc mechanism:
# +
import numpy as np
a = np.array([1, 2, 3, 4])
b = np.array([10, 20, 30, 40])
np.add(a, b)
# -
# Ufuncs also can combine scalars with arrays:
np.add(a, 100)
# Arrays of different, but compatible dimensions can also be combined. The lower dimensional array will be replicated to match the dimensionality of the higher dimensional array.
# +
c = np.arange(4*4).reshape((4,4))
print('c:', c)
np.add(b, c)
# -
# ## Making ufuncs for the GPU
#
# Numba has the ability to create *compiled* ufuncs. You simply implement a scalar function to be performed on all the inputs, decorate it with `@vectorize`, and Numba will figure out the broadcast rules for you.
# Generating a ufunc that uses CUDA requires giving an **explicit type signature** and setting the `target` attribute. The type signature argument describes what types to use both for the ufuncs arguments and return value:
# ```python
# 'return_value_type(argument1_value_type, argument2_value_type, ...)'
# ```
#
# See the Numba docs for more on [available types](https://numba.readthedocs.io/en/stable/reference/types.html), as well as for additional information on [writing ufuncs with more than one signature](https://numba.readthedocs.io/en/stable/user/vectorize.html)
#
# This example defines a ufunc that expects two `int64` values and returns an `int64` value, and is compiled for a CUDA device:
# +
from numba import vectorize
@vectorize(['int64(int64, int64)'], target='cuda')
def add_ufunc(x, y):
return x + y
# -
add_ufunc(a, b)
# A lot of things just happened! Numba just automatically:
#
# * Compiled a CUDA kernel to execute the ufunc operation in parallel over all the input elements.
# * Allocated GPU memory for the inputs and the output.
# * Copied the input data to the GPU.
# * Executed the CUDA kernel (GPU function) with the correct kernel dimensions given the input sizes.
# * Copied the result back from the GPU to the CPU.
# * Returned the result as a NumPy array on the host.
#
# Compared to an implementation in C, the above is remarkably more concise.
#
# You might be wondering how fast our simple example is on the GPU? Let's see:
# %timeit np.add(b, c) # NumPy on CPU
# %timeit add_ufunc(b, c) # Numba on GPU
# Wow, the GPU is *a lot slower* than the CPU?? For the time being this is to be expected because we have (deliberately) misused the GPU in several ways in this example:
#
# * **Our inputs are too small**: the GPU achieves performance through parallelism, operating on thousands of values at once. Our test inputs have only 4 and 16 integers, respectively. We need a much larger array to even keep the GPU busy.
# * **Our calculation is too simple**: Sending a calculation to the GPU involves quite a bit of overhead compared to calling a function on the CPU. If our calculation does not involve enough math operations (often called "arithmetic intensity"), then the GPU will spend most of its time waiting for data to move around.
# * **We copy the data to and from the GPU**: While in some scenarios, paying the cost of copying data to and from the GPU can be worth it for a single function, often it will be preferred to to run several GPU operations in sequence. In those cases, it makes sense to send data to the GPU and keep it there until all of our processing is complete.
# * **Our data types are larger than necessary**: Our example uses `int64` when we probably don't need it. Scalar code using data types that are 32 and 64-bit run basically the same speed on the CPU, and for integer types the difference may not be drastic, but 64-bit floating point data types have a significant performance cost on the GPU. Basic arithmetic on 64-bit floats can be anywhere from 2x (Pascal-architecture Tesla) to 24x (Maxwell-architecture GeForce) slower than 32-bit floats. NumPy defaults to 64-bit data types when creating arrays, so it is important to set the [`dtype`](https://docs.scipy.org/doc/numpy-1.14.0/reference/arrays.dtypes.html) attribute or use the [`ndarray.astype()`](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.ndarray.astype.html) method to pick 32-bit types when you need them.
#
#
# Given the above, let's try an example that is faster on the GPU by performing an operation with much greater arithmetic intensity, on a much larger input, and using a 32-bit data type:
# +
import math # Note that for the CUDA target, we need to use the scalar functions from the math module, not NumPy
SQRT_2PI = np.float32((2*math.pi)**0.5) # Precompute this constant as a float32. Numba will inline it at compile time.
@vectorize(['float32(float32, float32, float32)'], target='cuda')
def gaussian_pdf(x, mean, sigma):
'''Compute the value of a Gaussian probability density function at x with given mean and sigma.'''
return math.exp(-0.5 * ((x - mean) / sigma)**2) / (sigma * SQRT_2PI)
# +
import numpy as np
# Evaluate the Gaussian a million times!
x = np.random.uniform(-3, 3, size=1000000).astype(np.float32)
mean = np.float32(0.0)
sigma = np.float32(1.0)
# Quick test on a single element just to make sure it works
gaussian_pdf(x[0], 0.0, 1.0)
# -
import scipy.stats # for definition of gaussian distribution, so we can compare CPU to GPU time
norm_pdf = scipy.stats.norm
# %timeit norm_pdf.pdf(x, loc=mean, scale=sigma)
# %timeit gaussian_pdf(x, mean, sigma)
# That's a pretty large improvement, even including the overhead of copying all the data to and from the GPU. Ufuncs that use special functions (`exp`, `sin`, `cos`, etc) on large data sets run especially well on the GPU.
# ## CUDA Device Functions
#
# Ufuncs are great, but you should not have to cram all of your logic into a single function body. `@njit` can be used to decorate a helper function that can be utilized by GPU-accelerated ufuncs:
# +
from numba import njit
@njit
def polar_to_cartesian(rho, theta):
x = rho * math.cos(theta)
y = rho * math.sin(theta)
return x, y
@vectorize(['float32(float32, float32, float32, float32)'], target='cuda')
def polar_distance(rho1, theta1, rho2, theta2):
x1, y1 = polar_to_cartesian(rho1, theta1) # We can use device functions inside our GPU ufuncs
x2, y2 = polar_to_cartesian(rho2, theta2)
return ((x1 - x2)**2 + (y1 - y2)**2)**0.5
# -
n = 1000000
rho1 = np.random.uniform(0.5, 1.5, size=n).astype(np.float32)
theta1 = np.random.uniform(-np.pi, np.pi, size=n).astype(np.float32)
rho2 = np.random.uniform(0.5, 1.5, size=n).astype(np.float32)
theta2 = np.random.uniform(-np.pi, np.pi, size=n).astype(np.float32)
polar_distance(rho1, theta1, rho2, theta2)
# Note that the CUDA compiler aggressively inlines device functions, so there is generally no overhead for function calls. Similarly, the "tuple" returned by `polar_to_cartesian` is not actually created as a Python object, but represented temporarily as a struct, which is then optimized away by the compiler.
# ### Exercise: GPU Accelerate a Function
#
# Let's build a "zero suppression" function. A common operation when working with waveforms is to force all sample values below a certain absolute magnitude to be zero, as a way to eliminate low amplitude noise. Let's make some sample data:
# +
# This allows us to plot right here in the notebook
# %matplotlib inline
# Hacking up a noisy pulse train
from matplotlib import pyplot as plt
n = 100000
noise = np.random.normal(size=n) * 3
pulses = np.maximum(np.sin(np.arange(n) / (n / 23)) - 0.3, 0.0)
waveform = ((pulses * 300) + noise).astype(np.int16)
plt.plot(waveform)
# -
# Now decorate this `zero_suppress` function to run as a vectorized ufunc on the CUDA device.
def zero_suppress(waveform_value, threshold):
if waveform_value < threshold:
result = 0
else:
result = waveform_value
return result
# This will throw an error until you successfully vectorize the `zero_suppress` function above.
# The noise on the baseline should disappear when zero_suppress is implemented
plt.plot(zero_suppress(waveform, 15))
# ## Managing GPU Memory
#
# During the benchmarking above, we used NumPy arrays on the CPU as inputs and outputs. If you want to reduce the impact of host-to-device/device-to-host bandwidth, it is best to copy data to the GPU explicitly and leave it there to amortize the cost over multiple function calls. In addition, allocating device memory can be relatively slow, so allocating GPU arrays once and refilling them with data from the host can also be a performance improvement.
#
# To demonstrate, let's create our example addition ufunc again:
@vectorize(['float32(float32, float32)'], target='cuda')
def add_ufunc(x, y):
return x + y
n = 100000
x = np.arange(n).astype(np.float32)
y = 2 * x
# %timeit add_ufunc(x, y) # Baseline performance with host arrays
# The `numba.cuda` module includes a function that will copy host data to the GPU and return a CUDA device array:
# +
from numba import cuda
x_device = cuda.to_device(x)
y_device = cuda.to_device(y)
print(x_device)
print(x_device.shape)
print(x_device.dtype)
# -
# Device arrays can be passed to CUDA functions just like NumPy arrays, but without the copy overhead:
# %timeit add_ufunc(x_device, y_device)
# Because `x_device` and `y_device` are already on the device, this benchmark is much faster.
#
# That's a big performance improvement already, but we are still allocating a device array for the output of the ufunc and copying it back to the host. We can create an output array with the [`numba.cuda.device_array()`](https://numba.readthedocs.io/en/stable/cuda-reference/memory.html#numba.cuda.device_array) function:
out_device = cuda.device_array(shape=(n,), dtype=np.float32) # does not initialize the contents, like np.empty()
# And then we can use a special `out` keyword argument to the ufunc to specify the output buffer:
# %timeit add_ufunc(x_device, y_device, out=out_device)
# This call to `add_ufunc` does not involve any data transfers between the host and device and therefore runs the fastest. If and when we want to bring a device array back to the host memory, we can use the `copy_to_host()` method:
out_host = out_device.copy_to_host()
print(out_host[:10])
# Numba provides additional methods for managing device memory and data transfer. See the [CUDA Memory Management documentation](https://numba.pydata.org/numba-doc/dev/cuda/memory.html) for full details.
# ### Exercise: Optimize Memory Movement
#
# Given these ufuncs:
# +
import math
@vectorize(['float32(float32, float32, float32)'], target='cuda')
def make_pulses(i, period, amplitude):
return max(math.sin(i / period) - 0.3, 0.0) * amplitude
n = 100000
noise = (np.random.normal(size=n) * 3).astype(np.float32)
t = np.arange(n, dtype=np.float32)
period = n / 23
# -
# Convert this code to use device allocations so that there are only host<->device copies at the beginning and end and benchmark performance change:
pulses = make_pulses(t, period, 100.0)
waveform = add_ufunc(pulses, noise)
# %matplotlib inline
from matplotlib import pyplot as plt
plt.plot(waveform)
# ## Summary
#
# Now that you have completed this session you are able to:
#
# - GPU accelerate NumPy ufuncs
# - GPU accelerate hand-written vectorized functions
# - Optimize memory transfers between the CPU host and GPU device
# ## Appendix: Generalized Ufuncs
#
# Ufuncs apply a function on scalar values of an array. Generalized Ufuncs (or *gufuncs*) can operate on inputs that are sub-arrays of an input array.
#
# To build a gufunc, we use the `@guvectorize` decorator. This decorator needs several things:
#
# * A list of signatures. Signatures are similar to ufunc signatures, but the dimension of each argument also needs to be given using a comma-separated list of colons.
# * A layout specification. This is a string that gives the relationships between the shapes of the inputs and outputs. Input shapes are given before the `->`, and outputs after it.
# * The `target` kwarg, if the gufunc is to run on a CUDA GPU.
#
# Instead of returning an output, the output for a gufunc is passed in.
#
# The following example computes the moving mean of sub-arrays of a 2D matrix:
# +
from numba import guvectorize, float64, int64
# Moving mean example
@guvectorize([(float64[:], int64[:], float64[:])], '(n),()->(n)', target='cuda')
def move_mean(a, window_arr, out):
window_width = window_arr[0]
asum = 0.0
count = 0
for i in range(window_width):
asum += a[i]
count += 1
out[i] = asum / count
for i in range(window_width, len(a)):
asum += a[i] - a[i - window_width]
out[i] = asum / count
arr = np.arange(20, dtype=np.float64).reshape(2, 10)
move_mean(arr, 3)
# -
# Further reading on gufuncs:
#
# * [Generalized Universal Function API documentation](http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html)
# * [Numba guvectorize decorator documentation](https://numba.readthedocs.io/en/stable/user/vectorize.html#the-guvectorize-decorator)
# * [Numba CUDA guvectorize documentation](https://numba.readthedocs.io/en/stable/cuda/ufunc.html#generalized-cuda-ufuncs)
| session-1/exercises/cuda-ufuncs-and-memory-management.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Part 2 - Plotting element cross sections
#
# As shown in Part 1, OpenMC is able to plot neutron interaction cross sections for specific isotopes. However, we can also do the same for elements.
#
# This python notebook allows users to plot neutron interaction cross sections for specific elements using OpenMC.
#
# To plot elemental cross sections, the cross sections of each stable isotope of the element are combined.
from IPython.display import HTML
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/ELZNeIdSuMY" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
# This first code block plots the (n,2n) neutron multiplication cross section for all elements.
# +
import plotly.graph_objects as go
# the create plot function contains all the OpenMC routines for accessing the cross sections
from plotting_utils import create_element_plot
elements_of_interest = [
'Ag', 'Al', 'Ar', 'As', 'Au', 'B', 'Ba', 'Be', 'Bi', 'Br', 'C', 'Ca', 'Cd', 'Ce', 'Cl',
'Co', 'Cr', 'Cs', 'Cu', 'Dy', 'Er', 'Eu', 'F', 'Fe', 'Ga', 'Gd', 'Ge', 'H', 'He', 'Hf',
'Hg', 'Ho', 'I', 'In', 'Ir', 'K', 'Kr', 'La', 'Li', 'Lu', 'Mg', 'Mn', 'Mo', 'N', 'Na',
'Nb', 'Nd', 'Ni', 'O', 'P', 'Pa', 'Pb', 'Pd', 'Po', 'Pr', 'Rb',
'Re', 'Rh', 'Rn', 'Ru', 'Sb', 'Sc', 'Se', 'Si', 'Sm', 'Sn', 'Sr', 'Ta', 'Tb',
'Te', 'Th', 'Ti', 'Tl', 'Tm', 'U', 'V', 'W', 'Xe', 'Y', 'Zn', 'Zr'
]
reaction_of_interest = '(n,2n)'
# we could plot all the elements but that would take a long time so we just plot the first 15
number_of_elements_to_plot = 15
create_element_plot(
elements=elements_of_interest[:number_of_elements_to_plot],
reaction=reaction_of_interest
)
# -
# Tritium production is another important reaction in fusion as it affects the rate at which tritium can be bred. When designing breeder blankets we need to use materials which maximise both neutron multiplication AND tritium production.
#
# The next code block plots the (n,Xt) tritium production reaction for all elements.
# +
elements_of_interest = [
'Li', 'Ag', 'Al', 'Ar', 'As', 'Au', 'B', 'Ba', 'Be', 'Bi', 'Br', 'C', 'Ca', 'Cd', 'Ce', 'Cl',
'Co', 'Cr', 'Cs', 'Cu', 'Dy', 'Er', 'Eu', 'F', 'Fe', 'Ga', 'Gd', 'Ge', 'H', 'He', 'Hf',
'Hg', 'Ho', 'I', 'In', 'Ir', 'K', 'Kr', 'La', 'Lu', 'Mg', 'Mn', 'Mo', 'N', 'Na',
'Nb', 'Nd', 'Ni', 'O', 'P', 'Pa', 'Pb', 'Pd', 'Po', 'Pr', 'Rb',
'Re', 'Rh', 'Rn', 'Ru', 'Sb', 'Sc', 'Se', 'Si', 'Sm', 'Sn', 'Sr', 'Ta', 'Tb',
'Te', 'Th', 'Ti', 'Tl', 'Tm', 'U', 'V', 'W', 'Xe', 'Y', 'Zn', 'Zr'
]
reaction_of_interest = '(n,Xt)' # The X is a wild card / catch all
# we could plot all the elements but that would take a long time so we just plot the first 15
number_of_elements_to_plot = 15
create_element_plot(
elements=elements_of_interest[:number_of_elements_to_plot],
reaction=reaction_of_interest
)
# -
# Lithium is the typical candidate tritium breeder material used in D-T fusion reactor designs.
#
# The graph shows that Lithium has a high (n,Xt) cross section for low energy neutrons which decreases as neutron energy increases.
# **Learning Outcomes for Part 2:**
# - OpenMC can be used to plot interaction cross sections for specific elements.
# - Tritium production is an important reaction to consider when selecting a breeder material.
# - Lithium is a good material for tritium production.
| tasks/task_01_cross_sections/2_element_xs_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.015306, "end_time": "2021-10-14T15:01:59.211948", "exception": false, "start_time": "2021-10-14T15:01:59.196642", "status": "completed"} tags=[]
# To run this example locally, execute: `ploomber examples -n spec-api-r`.
#
# To start a free, hosted JupyterLab: [](https://mybinder.org/v2/gh/ploomber/binder-env/main?urlpath=git-pull%3Frepo%3Dhttps%253A%252F%252Fgithub.com%252Fploomber%252Fprojects%26urlpath%3Dlab%252Ftree%252Fprojects%252Fspec-api-r%252FREADME.ipynb%26branch%3Dmaster)
#
# Found an issue? [Let us know.](https://github.com/ploomber/projects/issues/new?title=spec-api-r%20issue)
#
# Have questions? [Ask us anything on Slack.](http://community.ploomber.io/)
#
# + [markdown] papermill={"duration": 0.01649, "end_time": "2021-10-14T15:01:59.242431", "exception": false, "start_time": "2021-10-14T15:01:59.225941", "status": "completed"} tags=[]
# # R pipeline
#
# R pipeline.
#
# **Note:** If using conda (`environment.yml`), R will be installed and configured. If using pip (`requirements.txt`), you must install R and [configure it yourself]( https://github.com/IRkernel/IRkernel).
# + [markdown] papermill={"duration": 0.016309, "end_time": "2021-10-14T15:01:59.278450", "exception": false, "start_time": "2021-10-14T15:01:59.262141", "status": "completed"} tags=[]
# ## Pipeline description
#
# This pipeline contains three tasks. The last task generates a plot. To get the
# pipeline description:
# + papermill={"duration": 3.431763, "end_time": "2021-10-14T15:02:02.725239", "exception": false, "start_time": "2021-10-14T15:01:59.293476", "status": "completed"} tags=[] language="bash"
# ploomber status
# + [markdown] papermill={"duration": 0.015869, "end_time": "2021-10-14T15:02:02.756645", "exception": false, "start_time": "2021-10-14T15:02:02.740776", "status": "completed"} tags=[]
# ## Build the pipeline from the command line
# + papermill={"duration": 9.472908, "end_time": "2021-10-14T15:02:12.245226", "exception": false, "start_time": "2021-10-14T15:02:02.772318", "status": "completed"} tags=[] language="bash"
# mkdir output
# ploomber build
# + [markdown] papermill={"duration": 0.018243, "end_time": "2021-10-14T15:02:12.282449", "exception": false, "start_time": "2021-10-14T15:02:12.264206", "status": "completed"} tags=[]
# Output stored in the ``output/`` directory.
| spec-api-r/README.ipynb |
# + [markdown] slideshow={"slide_type": "slide"}
# ## Interactie in Elm
# + [markdown] slideshow={"slide_type": "slide"}
# Een functioneel programma heeft geen "toestand", hoe kun je dan interactie beschrijven?
#
# -> splitsen in *functionele onderdelen* en de rest: toestand (memory) en i/o
#
# + [markdown] slideshow={"slide_type": "slide"}
# Vgl. eindige automaat (FSM): overgang, gegeven input en toestand, bepaalt volgende toestand en output.
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# FSM kun je beschrijven met:
#
# 
#
# * input "computer" (symbolen, events); update/next state **functie**; state (memory); view/output **functie**
#
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ```elm
# import Playground exposing (..)
#
# main = game view update (0,0)
#
# view computer (x,y) =
# [ square blue 40 |> move x y ]
#
# update computer (x,y) =
# ( x + toX computer.keyboard
# , y + toY computer.keyboard
# )
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# Zie: [Try-Elm](https://elm-lang.org/examples/keyboard) enz.
#
# Met Playground kun je eenvoudige browser-games maken.
| presentaties/.ipynb_checkpoints/5-interactie-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from gurobipy import * # import the optimize solver Gurobi
number_of_item = 2 # Set the number limit of manufacturing item type
m = Model() # Import and create the model
# Set the input Parameter:
leather_required = [3, 4] # kgs of leather used to produce item type
selling_profit = [250, 400] # Profit for each item sold
leather_available = 2000 # Total kgs of leather available for production for the month
# Set the Variable list: Number of item type to be produced by Croscill Home and set the variable nx to integer
nx = []
for i in range(number_of_item):
nx.append(m.addVar(vtype=GRB.INTEGER, name='nx{}'.format(i + 1)))
# Set the Maximize Obijective: Total profit
m.setObjective(quicksum([selling_profit[i]*nx[i] for i in range(len(nx))]), GRB.MAXIMIZE)
# +
# Set Non Negative decision variable
c1 = []
for i in range(len(nx)):
c1.append(m.addConstr(nx[i] >= 0))
# Set Minimum number of Purses is at least 2*Handbags
c2 = m.addConstr(nx[0] >= 2*nx[1])
# Set total leather availability
c3 = m.addConstr(quicksum([leather_required[i]*nx[i] for i in range(len(nx))]) <= leather_available)
# -
# Run the optimize solver
m.optimize()
# Get the Optimal Solution for X
m.printAttr('X')
# Get the Optimal Objective Value
m.ObjVal
| assets/python/Ex2[Handbags]_s.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import os
import pickle
import cv2
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import Dense,Dropout,Flatten,Reshape
from tensorflow.keras.optimizers import SGD
# -
np.random.seed(seed=1)
tf.random.set_seed(1)
#from tensorflow.keras.preprocessing.image import ImageDataGenerator
from matplotlib.image import imread
#filepath = input("please enter the file path : ")
filepath = r'C://Users//riaz//Desktop//dataset//'
# %pwd
len(os.listdir(filepath))
os.listdir(filepath)[0]
#img = imread()
imgPath = filepath + os.listdir(filepath)[0]
plt.imshow(plt.imread(imgPath))
a = plt.imread(imgPath)
'''w = []
h = []
target_size = (128,128,0)
for images in os.listdir(filepath):
img = imread(filepath + images)
d1,d2,c = img.shape
w.append(d1)
h.append(d2)
plt.subplot(1,2,1)
sns.countplot(x=w)
plt.subplot(1,2,2)
sns.countplot(x=h)
#sns.jointplot(x=w,y=h)
'''
input_size = (128,128)
X = []
for filename in os.listdir(filepath):
imgPath = filepath + filename
img = image.load_img(imgPath, target_size = input_size)
# here we divide by 255 to scale the image and pass it as array
X.append((1/255)*np.asarray(img))
print('completed loading files from ... ' + filepath)
# image_gen = ImageDataGenerator(rotation_range=20,width_shift_range=0.1,height_shift_range=0.1,rescale=1/255,shear_range=0.1,zoom_range=0.1,
# horizontal_flip=True, fill_mode='nearest')
with open('X_data.pkl', 'wb') as picklefile:
pickle.dump(X, picklefile)
x
| unsupervised ml/.ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ECBelarmino/OOP-58001/blob/main/OOP_In_Python.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="L1XUTrB5kxtv"
# # **Creating a Class**
# + id="EDHa9sk0k4Kh"
class MyClass:
pass
#to create a class without method
# + id="VGibKnb5lZWj"
class MyClass:
x = 20
# + [markdown] id="3b-ub6hDlxZA"
# # **Demonstration**
# + colab={"base_uri": "https://localhost:8080/"} id="pDcWVOK6l6Io" outputId="046f1dfe-84fe-458c-eaf4-459b48cb4fe7"
class Car:
def __init__(self, name, color):
self.name = name
self.color = color
def description (self):
return "the " + (self.name) + " car is " + (self.color)
def display (self):
print ("We can say that", self.description())
obj1 = Car ("Honda", "blue")
obj1.display()
# + colab={"base_uri": "https://localhost:8080/"} id="ogSL0wagnprX" outputId="88717f48-aa3b-4383-d236-190b4c30dffc"
# Modifying an Object Property
obj1.name = "Mitsubishi"
print (obj1.name)
# + id="qC8KoTeun0NX"
# Deleting the object
del obj1.color
# + [markdown] id="k1QNOEhwoP8l"
# # **Application 1**
# + [markdown] id="G853NrHRopIn"
# Write a program to compute the area and perimeter of a rectangle.
# + colab={"base_uri": "https://localhost:8080/"} id="pxDYgWVQoviy" outputId="e77e6122-4c3f-4950-e19c-72d43cd11185"
class Rectangle:
def __init__(self, length, width):
self.length = length
self.width = width
def Area (self):
return self.length * self.width
def Perimeter (self):
return 2 * (self.length + self.width)
def display (self):
print ("The area of the rectangle is", self.Area())
print ("The perimeter of the rectangle is", self.Perimeter())
pol = Rectangle (7 , 4.5)
pol.display()
# + [markdown] id="sI9HDZ6TqYgl"
# # **Application 2**
# + [markdown] id="paMOwGqNqdWV"
# Write a program to display a class name OOP_58001 with your student number and full name.
# + colab={"base_uri": "https://localhost:8080/"} id="7fDacy20qmmr" outputId="c6189fce-fc28-4bf7-9be8-3024d2878259"
class OOP_58001:
def __init__(self, lname, fname, number):
self.lname = lname
self.fname = fname
self.number = number
def studname (self):
return self.fname + self.lname
def studnum (self):
return self.number
def display (self):
print ("My name is", self.studname())
print ("My student number is", self.studnum())
student = OOP_58001 ("Belarmino", "<NAME> ", "202110020")
student.display()
| OOP_In_Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import salem
from salem import get_demo_file, DataLevels, GoogleVisibleMap, Map
import matplotlib.pyplot as plt
plt.style.use('seaborn')
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 18
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# -
#Read the data:
catches_df = pd.read_csv("catches_24_7_2021.csv")
print("There are {} rows originally in the data".format(len(catches_df)))
catches_df.head()
catches_df['Community Group'] = catches_df['Community Group'].str.replace("Predator Free Port Hills ","")
def remove_nans(df,col):
before = len(df)
df = df.dropna(subset=[col])
after = len(df)
print("{} rows with Nan {} values removed".format(before-after,col))
return df
#Replace "Mt Pleasant" with "Mount Pleasant"
catches_df.replace("Mt Pleasant", "Mount Pleasant", inplace=True)
catches_df = remove_nans(catches_df,'Community Group')
catches_df['Community Group'].unique()
catches_df = remove_nans(catches_df,'Bait')
catches_df['Bait'].unique()
catches_df = remove_nans(catches_df,'Species caught')
catches_df['Species caught'].unique()
catches_df = remove_nans(catches_df,'Trap type')
catches_df['Trap type'].unique()
catches_df = remove_nans(catches_df,'Number/Code')
print("There are {} unique trap IDs".format(len(catches_df['Number/Code'].unique())))
print("There are {} rows remaining in the data".format(len(catches_df)))
# +
def plot_bar_by_category(df,category, color="#DD9933", title="", filename=None):
"""
Produce a bar graphy showing percentages of catches broken down by category
"""
cat_counts = df[category].value_counts()
fig = plt.figure(2,figsize=(12,8))
ax = fig.subplots(1)
hb = ax.barh(cat_counts.keys(),cat_counts.to_numpy(),color=color)
for p in ax.patches:
width = p.get_width()
height = p.get_height()
x, y = p.get_xy()
ax.annotate("{:.1f}%".format(width/len(df)*100), (x + width+10,y + height/2-0.15), ha='left')
plt.xlabel("Catches")
plt.ylabel(category)
plt.title(title)
if filename is not None:
plt.tight_layout()
plt.savefig(filename)
plt.show()
# -
plot_bar_by_category(catches_df, "Community Group", color="#DD9933", title="Figure 1. Total Catches by Group",
filename="plots/fig1_catches_by_group_bar")
plot_bar_by_category(catches_df, "Species caught", color="#9922AA", title="Figure 2. Total Catches by Species",
filename="plots/fig2_catches_by_species_bar")
plot_bar_by_category(catches_df, "Trap type", color="#22AADD", title="Figure 3. Total Catches by Trap Type",
filename="plots/fig3_catches_by_type_bar")
plot_bar_by_category(catches_df, "Bait", color="#33AA22", title="Figure 4. Total Catches by Bait",
filename="plots/fig4_catches_by_bait_bar")
# Add datetime column:
catches_df['Datetime'] = pd.to_datetime(catches_df['Date'])
# Write to HDF5
catches_df.to_hdf('catches.h5',key='catches',mode='w')
| Predator Free Port Hills - Data Exploration and Cleanup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# 사용자 정의 Dataset, Dataloader, Transforms 작성하기
# ==========================================================
#
# **저자** : <NAME> <https://chsasank.github.io>
# **번역** : 정윤성 <https://github.com/Yunseong-Jeong>
#
# 머신러닝 문제를 푸는 과정에서 데이터를 준비하는데 많은 노력이 필요합니다.
# PyTorch는 데이터를 불러오는 과정을 쉽게해주고, 또 잘 사용한다면 코드의 가독성도 보다 높여줄 수 있는 도구들을
# 제공합니다. 이 튜토리얼에서 일반적이지 않은 데이터셋으로부터 데이터를 읽어오고
# 전처리하고 증가하는 방법을 알아보겠습니다.
#
# 이번 튜토리얼을 진행하기 위해 아래 패키지들을 설치해주세요.
#
# - ``scikit-image``: 이미지 I/O 와 변형을 위해 필요합니다.
# - ``pandas``: CSV 파일 파싱을 보다 쉽게 해줍니다.
#
#
#
#
# +
from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
# 경고 메시지 무시하기
import warnings
warnings.filterwarnings("ignore")
plt.ion() # 반응형 모드
# -
# 다룰 데이터셋은 아래 조건과 같은 랜드마크(landmark)가 있는 얼굴 사진입니다.
#
# .. figure:: /_static/img/landmarked_face2.png
# :width: 400
#
# 각각의 얼굴에 68개의 서로다른 중요 포인트들이 존재합니다.
#
# <div class="alert alert-info"><h4>Note</h4><p>이 `링크 <https://download.pytorch.org/tutorial/faces.zip>`_ 를 통해 데이터셋을 다운로드 해주세요.
# 다운로드한 데이터셋은 'data/faces/'에 위치해야 합니다.
# 이 데이터셋은 ImageNet에서 '얼굴'이라는 태그를 가진 몇몇 이미지들에 대해
# `dlib의 pose estimation <https://blog.dlib.net/2014/08/real-time-face-pose-estimation.html>`_ 을
# 적용한 데이터셋입니다.</p></div>
#
#
# 데이터셋은 아래와 같은 특징을 가진 CSV 파일이 포함되어 있습니다.
#
# ::
#
# image_name,part_0_x,part_0_y,part_1_x,part_1_y,part_2_x, ... ,part_67_x,part_67_y
# 0805personali01.jpg,27,83,27,98, ... 84,134
# 1084239450_e76e00b7e7.jpg,70,236,71,257, ... ,128,312
#
# 이제 CSV 파일을 불러와서 (N, 2) 배열안에 있는 랜드마크들을 잡아보겠습니다.
# N은 랜드마크(landmarks)의 개수입니다.
#
#
# +
landmarks_frame = pd.read_csv('data/faces/face_landmarks.csv')
n = 65
img_name = landmarks_frame.iloc[n, 0]
landmarks = landmarks_frame.iloc[n, 1:]
landmarks = np.asarray(landmarks)
landmarks = landmarks.astype('float').reshape(-1, 2)
print('Image name: {}'.format(img_name))
print('Landmarks shape: {}'.format(landmarks.shape))
print('First 4 Landmarks: {}'.format(landmarks[:4]))
# -
# 이미지와 랜드마크(landmark)를 보여주는 간단한 함수를 작성해보고,
# 실제로 적용해보겠습니다.
#
#
#
# +
def show_landmarks(image, landmarks):
"""Show image with landmarks"""
""" 랜드마크(landmark)와 이미지를 보여줍니다. """
plt.imshow(image)
plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')
plt.pause(0.001) # 갱신이 되도록 잠시 멈춥니다.
plt.figure()
show_landmarks(io.imread(os.path.join('data/faces/', img_name)),
landmarks)
plt.show()
# -
# Dataset 클래스
# ----------------
#
# ``torch.utils.data.Dataset`` 은 데이터셋을 나타내는 추상클래스입니다.
# 여러분의 데이터셋은 ``Dataset`` 에 상속하고 아래와 같이 오버라이드 해야합니다.
#
# - ``len(dataset)`` 에서 호출되는 ``__len__`` 은 데이터셋의 크기를 리턴해야합니다.
# - ``dataset[i]`` 에서 호출되는 ``__getitem__`` 은
# $i$\ 번째 샘플을 찾는데 사용됩니다.
#
# 이제 데이터셋 클래스를 만들어보도록 하겠습니다.
# ``__init__`` 을 사용해서 CSV 파일 안에 있는 데이터를 읽지만,
# ``__getitem__`` 을 이용해서 이미지의 판독을 합니다.
# 이 방법은 모든 이미지를 메모리에 저장하지 않고 필요할때마다 읽기 때문에
# 메모리를 효율적으로 사용합니다.
#
# 데이터셋의 샘플은 ``{'image': image, 'landmarks': landmarks}`` 의 사전 형태를 갖습니다.
# 선택적 인자인 ``transform`` 을 통해 필요한 전처리 과정을 샘플에 적용할 수 있습니다.
# 다음 장에서 전이 ``transform`` 의 유용성에 대해 알아보겠습니다.
#
#
#
class FaceLandmarksDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, csv_file, root_dir, transform=None):
"""
Args:
csv_file (string): csv 파일의 경로
root_dir (string): 모든 이미지가 존재하는 디렉토리 경로
transform (callable, optional): 샘플에 적용될 Optional transform
"""
self.landmarks_frame = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = os.path.join(self.root_dir,
self.landmarks_frame.iloc[idx, 0])
image = io.imread(img_name)
landmarks = self.landmarks_frame.iloc[idx, 1:]
landmarks = np.array([landmarks])
landmarks = landmarks.astype('float').reshape(-1, 2)
sample = {'image': image, 'landmarks': landmarks}
if self.transform:
sample = self.transform(sample)
return sample
# 클래스를 인스턴스화 하고 데이터 샘플을 통해서 반복해봅시다.
# 첫번째 4개의 샘플의 크기를 출력 하고, 샘플들의 랜드마크(landmarks)를 보여줄 것 입니다.
#
#
#
# +
face_dataset = FaceLandmarksDataset(csv_file='data/faces/face_landmarks.csv',
root_dir='data/faces/')
fig = plt.figure()
for i in range(len(face_dataset)):
sample = face_dataset[i]
print(i, sample['image'].shape, sample['landmarks'].shape)
ax = plt.subplot(1, 4, i + 1)
plt.tight_layout()
ax.set_title('Sample #{}'.format(i))
ax.axis('off')
show_landmarks(**sample)
if i == 3:
plt.show()
break
# -
# Transforms
# ----------
#
# 위에서 볼 수 있었던 한가지 문제점은 샘플들이 다 같은 사이즈가 아니라는 것입니다.
# 대부분의 신경망(neural networks)은 고정된 크기의 이미지라고 가정합니다.
# 그러므로 우리는 신경망에 주기 전에 처리할 과정을 작성해야 합니다.
#
# 3가지의 transforms 을 만들어 봅시다:
# - ``Rescale``: 이미지의 크기를 조절합니다.
# - ``RandomCrop``: 이미지를 무작위로 자릅니다.
# 이것을 data augmentation이라 합니다.
# - ``ToTensor``: numpy 이미지에서 torch 이미지로 변경합니다.
# (축변환이 필요합니다)
#
# 간단한 함수대신에 호출 할 수 있는 클래스로 작성 합니다.
# 이렇게 한다면, 클래스가 호출 될 때마다 전이(Transform)의 매개변수가 전달 되지 않아도 됩니다.
# 이와 같이, ``__call__`` 함수를 구현해야 합니다.
# 필요하다면, ``__init__`` 함수도 구현해야 합니다. 다음과 같이 전이(transform)를 사용할 수 있습니다.
#
# ::
#
# tsfm = Transform(params)
# transformed_sample = tsfm(sample)
#
# 아래에서는 이미지와 랜드마크(landmark)들을 어떻게 적용하는지 살펴보도록 하겠습니다.
#
#
# +
class Rescale(object):
"""주어진 사이즈로 샘플크기를 조정합니다.
Args:
output_size(tuple or int) : 원하는 사이즈 값
tuple인 경우 해당 tuple(output_size)이 결과물(output)의 크기가 되고,
int라면 비율을 유지하면서, 길이가 작은 쪽이 output_size가 됩니다.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, landmarks = sample['image'], sample['landmarks']
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = transform.resize(image, (new_h, new_w))
landmarks = landmarks * [new_w / w, new_h / h]
return {'image': img, 'landmarks': landmarks}
class RandomCrop(object):
"""샘플데이터를 무작위로 자릅니다.
Args:
output_size (tuple or int): 줄이고자 하는 크기입니다.
int라면, 정사각형으로 나올 것 입니다.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, landmarks = sample['image'], sample['landmarks']
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h,
left: left + new_w]
landmarks = landmarks - [left, top]
return {'image': image, 'landmarks': landmarks}
class ToTensor(object):
"""numpy array를 tensor(torch)로 변환 시켜줍니다."""
def __call__(self, sample):
image, landmarks = sample['image'], sample['landmarks']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return {'image': torch.from_numpy(image),
'landmarks': torch.from_numpy(landmarks)}
# -
# Compose transforms
# ~~~~~~~~~~~~~~~~~~
#
# 이제, 샘플에 전이(transform)를 적용해 봅시다.
#
# 이미지의 가장 짧은 측면을 256개로 rescale하고,
# 그후에 무작위로 224개를 자른다고 가정합시다.
# 다시말해, ``Rescale`` 과 ``RandomCrop`` 을 사용해봅시다.
#
# ``torchvision.transforms.Compose`` 는 위의 두작업을 하는 간단한 호출할 수 있는 클래스입니다.
#
#
#
# +
scale = Rescale(256)
crop = RandomCrop(128)
composed = transforms.Compose([Rescale(256),
RandomCrop(224)])
# Apply each of the above transforms on sample.
fig = plt.figure()
sample = face_dataset[65]
for i, tsfrm in enumerate([scale, crop, composed]):
transformed_sample = tsfrm(sample)
ax = plt.subplot(1, 3, i + 1)
plt.tight_layout()
ax.set_title(type(tsfrm).__name__)
show_landmarks(**transformed_sample)
plt.show()
# -
# 데이터셋을 이용한 반복작업
# -----------------------------
#
# 전이(transform)를 적용한 dataset을 만들기위해서 만들었던것을 다 집어 넣어 봅시다.
#
# 요약하자면, 데이터셋은 다음과 같이 샘플링 됩니다.
#
# - 이미지는 파일 전체를 메모리에 올리지않고 필요할때마다 불러와서 읽습니다.
# - 그 후에 읽은 이미지에 Transform을 적용합니다.
# - transfroms 중 하나가 랜덤이기 때문에, 데이터는 샘플링때 증가합니다.
#
#
# 우리는 이제 이전에 사용하던 것 처럼 ``for i in range`` 를 사용해서
# 생성된 데이터셋을 반복 작업에 사용할 수 있습니다.
#
#
#
# +
transformed_dataset = FaceLandmarksDataset(csv_file='data/faces/face_landmarks.csv',
root_dir='data/faces/',
transform=transforms.Compose([
Rescale(256),
RandomCrop(224),
ToTensor()
]))
for i in range(len(transformed_dataset)):
sample = transformed_dataset[i]
print(i, sample['image'].size(), sample['landmarks'].size())
if i == 3:
break
# -
# 그러나, 데이터 상에서 반복하는 ``for`` 문은 많은 특징(features)를 놓칠 수 있습니다.
# 특히, 아래와 같은 것을 놓칠 수 있습니다:
#
# - 데이터를 묶는 과정
# - 데이터를 섞는 과정
# - 병렬처리 과정에서 ``multiprocessing`` 을 사용할때 데이터를 불러오는 것
#
# ``torch.utils.data.DataLoder`` 는 위와 같은 기능을 모두 제공해주는 반복자(iterator)입니다.
# 사용되는 매개변수(Parameters)는 명확해야 합니다.
# ``collate_fn`` 는 흥미로운 매개변수(Parameters) 중 하나입니다.
# ``collate_fn`` 을 이용하여 샘플들을 정확하게 배치하는 방법을 명시할 수 있습니다.
# 그러나, 대부분의 경우에 대해서 정확하게 작동해야 합니다.
#
#
# +
dataloader = DataLoader(transformed_dataset, batch_size=4,
shuffle=True, num_workers=4)
# 배치하는 과정을 보여주는 함수입니다.
def show_landmarks_batch(sample_batched):
"""Show image with landmarks for a batch of samples."""
images_batch, landmarks_batch = \
sample_batched['image'], sample_batched['landmarks']
batch_size = len(images_batch)
im_size = images_batch.size(2)
grid_border_size = 2
grid = utils.make_grid(images_batch)
plt.imshow(grid.numpy().transpose((1, 2, 0)))
for i in range(batch_size):
plt.scatter(landmarks_batch[i, :, 0].numpy() + i * im_size + (i + 1) * grid_border_size,
landmarks_batch[i, :, 1].numpy() + grid_border_size,
s=10, marker='.', c='r')
plt.title('Batch from dataloader')
for i_batch, sample_batched in enumerate(dataloader):
print(i_batch, sample_batched['image'].size(),
sample_batched['landmarks'].size())
# observe 4th batch and stop.
if i_batch == 3:
plt.figure()
show_landmarks_batch(sample_batched)
plt.axis('off')
plt.ioff()
plt.show()
break
# -
# Afterword: torchvision
# ----------------------
#
# 이번 튜토리얼에서는, 데이터셋 작성과 사용, 전이(transforms), 데이터를 불러오는 방법에 대해서 알아봤습니다.
# ``torchvision`` 패키지는 몇몇의 일반적인 데이터셋과 전이(transforms)들을 제공합니다.
# 클래스들을 따로 작성하지 않아도 될 것입니다.
# torchvision에서의 사용가능한 일반적인 데이터셋 중 하나는 ``ImageFolder`` 입니다.
# 이것은 다음과 같은 방식으로 구성되어 있다고 가정합니다: ::
#
# root/ants/xxx.png
# root/ants/xxy.jpeg
# root/ants/xxz.png
# .
# .
# .
# root/bees/123.jpg
# root/bees/nsdf3.png
# root/bees/asd932_.png
#
# 여기서'ants', 'bees'는 class labels입니다.
# 비슷하게, ``RandomHorizontalFlip`` , ``Scale`` 과 같이 ``PIL.Image`` 에서 작동하는
# 일반적인 전이(transforms)도 사용가능합니다. 이와 같이 데이터로더(dataloader)를 사용할 수 있습니다: ::
#
# import torch
# from torchvision import transforms, datasets
#
# data_transform = transforms.Compose([
# transforms.RandomSizedCrop(224),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
# hymenoptera_dataset = datasets.ImageFolder(root='hymenoptera_data/train',
# transform=data_transform)
# dataset_loader = torch.utils.data.DataLoader(hymenoptera_dataset,
# batch_size=4, shuffle=True,
# num_workers=4)
#
# training code에 대한 예시를 알고 싶다면,
# :doc:`transfer_learning_tutorial` 문서를 참고해주세요
#
#
| docs/_downloads/f498e3bcd9b6159ecfb1a07d6551287d/data_loading_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/CrisHarsche/DS_Python/blob/main/Pandas.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="FwOZh0-I2ax4"
# Demonstração Pandas - Disciplina Introdução à Ciências de Dados - Univesp
# + [markdown] id="3Ix3XygJ2oSD"
# Prof. Dr. <NAME>
# + id="3jyIVR8v2xmC"
import pandas as pd
import numpy as np
# + [markdown] id="L0LKTVYe2_P-"
# Series
# + id="E01SoVmE3Cnm"
s1=pd.Series([1,2,-5,0])
# + colab={"base_uri": "https://localhost:8080/"} id="VGXwrl7J3I5J" outputId="ef995717-389b-4d14-d911-811dcecd5344"
s1
# + colab={"base_uri": "https://localhost:8080/"} id="5kR2wFaL3KS1" outputId="c5e6699a-c9b8-4ac7-ca85-21e5a59cb827"
s1.values
# + colab={"base_uri": "https://localhost:8080/"} id="Hy02FAiD3QL-" outputId="73cd5a9f-1f53-43d3-bdc4-07f9ae0583bf"
s1.index
# + id="V9_BirfJ3Sss"
s2=pd.Series([1,2,-5,0], index=['a', 'b', 'c', 'd'])
# + colab={"base_uri": "https://localhost:8080/"} id="SJ131ixj3lk0" outputId="5bfbf05a-cc76-4236-92f9-c93c068fde6e"
s2
# + colab={"base_uri": "https://localhost:8080/"} id="rVI8Gs8538WP" outputId="2f10be22-cd2d-4e46-d6f5-efd2d9e01788"
s2.index
# + id="nIAZrDmf4KsG"
s2['a'] =1000
# + colab={"base_uri": "https://localhost:8080/"} id="nELew9G34RaU" outputId="75e4ffaf-bd3d-4011-a7a1-3cfd8a3d12cb"
s2
# + [markdown] id="lbDVN6O94XFT"
# Comparação
# + colab={"base_uri": "https://localhost:8080/"} id="PJTQTBUN4SKF" outputId="e2a7638e-d64a-4bc3-badf-4b463f4b40ac"
s2[s2>0]
# + [markdown] id="zH10Rb465RCY"
# Algebra
# + colab={"base_uri": "https://localhost:8080/"} id="UkYZpKrz5OoP" outputId="4ab746fb-1602-4699-eb85-c196a25e02de"
s2*2
# + colab={"base_uri": "https://localhost:8080/"} id="IksFLawU5UUj" outputId="de6e9f86-570c-4678-d452-8e67dc177ddb"
s2
# + colab={"base_uri": "https://localhost:8080/"} id="c-zdKFmk5Wbq" outputId="5b5bdd26-16ef-489b-c4cb-f7cfb5a3e0b7"
s2.isnull()
# + [markdown] id="KF_H_Nse5dlU"
# Dataframe
# + id="YDW2J2005ayV"
dados={'estado':['SP', 'MG', 'PR', 'SP', 'MG', 'PR'], 'ano':[2019, 2019, 2019, 2020, 2020, 2020], 'pop':[45.9, 21.2,16.9,46.6,21.4,17.3]}
df1=pd.DataFrame(dados)
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="hkLKXqEn6ROH" outputId="cbd4fbf7-2331-43dd-e3dd-3b11e0de756f"
df1
# + colab={"base_uri": "https://localhost:8080/", "height": 112} id="CZoRsn0G6Sjm" outputId="17ad0bf9-34da-46e3-9394-ae1f38eb7f1b"
df1.head(2)
# + colab={"base_uri": "https://localhost:8080/", "height": 112} id="55ff-Jtr6mBL" outputId="e2321771-c083-491d-c7e0-0f782a997e8b"
df1.tail(2)
# + colab={"base_uri": "https://localhost:8080/", "height": 112} id="cy1BwjUf6qk8" outputId="094f089e-59bd-45e8-87ac-34fd76d5ca53"
df1.sample(2)
# + [markdown] id="1nzSJYi-7Zhq"
# Novo DF a partir do anterior
# + id="sHBOBfQ96zVE"
df2=pd.DataFrame(dados, columns=['ano', 'estado', 'pop'])
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="WnjLsxp27su_" outputId="bef44555-a820-4120-f32e-efba8a59c43d"
df2
# + colab={"base_uri": "https://localhost:8080/"} id="JBVcvU7G7uBz" outputId="4dd77a66-66b2-4fd9-c8a3-4fea27d3add4"
df2['estado']
# + colab={"base_uri": "https://localhost:8080/"} id="5PEi-hmW70c8" outputId="b563c8d3-fc2e-42c8-81b5-85db828fb9f5"
df2.ano
# + colab={"base_uri": "https://localhost:8080/"} id="r2y6Gfc574iQ" outputId="fafef994-1740-4175-b0b5-f30cbb79c36a"
df2.dtypes
# + [markdown] id="0K36G2VP8EJc"
# Atribuir valores
# + id="CWYcdIk87_ch"
df2['estimativa']=50
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="06_wdh4-8Jsr" outputId="0db5472f-2890-4ebe-960a-40d975531b9c"
df2
# + id="rd-PYABY8Kd7"
df2['estimativa']=np.arange(6)
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="XPK-jGSv8TU8" outputId="d6db3970-80b0-401c-9410-a59f9cf06f40"
df2
# + id="fiBq0gPq8dWN"
df3=df2
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="FvHNokFe8glr" outputId="4b8c0cc3-7208-4fd5-eec3-f706fd83261c"
df3
# + id="eO9bbyO58hgF"
df3=df2['ano']
# + colab={"base_uri": "https://localhost:8080/"} id="3iQyYxB48pLT" outputId="60d2b2ac-491a-42b0-b08c-10b914c6c56f"
df3
# + id="gOxp0lea8q08"
| Pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 解压训练/测试样本和标签文件
# +
import os
import gzip
mnist_dir = "./data" # 下载的MNIST的文件夹路径
X_train_fpath = os.path.join(mnist_dir, "train-images-idx3-ubyte.gz")
Y_train_fpath = os.path.join(mnist_dir, "train-labels-idx1-ubyte.gz")
X_test_fpath = os.path.join(mnist_dir, "t10k-images-idx3-ubyte.gz")
Y_test_fpath = os.path.join(mnist_dir, "t10k-labels-idx1-ubyte.gz")
with gzip.open(X_train_fpath, 'rb') as f:
X_train_content = f.read()
with gzip.open(Y_train_fpath, 'rb') as f:
Y_train_content = f.read()
with gzip.open(X_test_fpath, 'rb') as f:
X_test_content = f.read()
with gzip.open(Y_test_fpath, 'rb') as f:
Y_test_content = f.read()
# -
# # 获取训练样本和标签文件信息
# +
X_train_bytes = len(X_train_content) # 解压后的训练图集文件大小
X_train_magic_number = X_train_content[:4].hex() # magic number 0x00000803(2051)
X_train_images = int(X_train_content[4:8].hex(), 16) # 图像数量 60,000
Y_train_bytes = len(Y_train_content) # 解压后训练标签文件大小
Y_train_magic_number = Y_train_content[:4].hex() # magic number 0x00000801(2049)
Y_train_labels = int(Y_train_content[4:8].hex(), 16) # 标签数量 60,000,标签范围 [0,9]
num_rows = int(X_train_content[8:12].hex(), 16) # 图像高 28
num_cols = int(X_train_content[12:16].hex(),16) # 图像宽 28
num_pixels = num_rows * num_cols # 图像大小 784
print(X_train_bytes, Y_train_bytes)
print(X_train_magic_number, int(X_train_magic_number,16), Y_train_magic_number, int(Y_train_magic_number,16))
print(X_train_images, Y_train_labels)
print(num_rows)
print(num_cols)
print(num_pixels)
# -
# # 查看训练样本和标签
# +
import random
import numpy as np
import matplotlib.pyplot as plt
X_train = []
Y_train = []
for i in range(16, X_train_bytes, num_pixels):
# 读取784个字节并转换为numpy数组形式,再reshape为28*28
ndata = np.frombuffer(X_train_content[i:i+num_pixels], dtype=np.uint8).reshape(28,28,1)
X_train.append(ndata)
for i in range(8, Y_train_bytes):
Y_train.append(Y_train_content[i])
X_train = np.array(X_train)
Y_train = np.array(Y_train, dtype=np.int64)
# 随机挑选8个看一下
choices = random.sample(range(X_train_images), 8)
labels = Y_train[choices]
print(choices)
for i in range(8):
plt.subplot(2, 4, i+1)
plt.imshow(X_train[choices[i]], cmap='gray')
plt.title("{i}".format(i=labels[i]))
plt.show()
# -
# # 将训练集划分为训练集和验集
# +
# # !conda install scikit-learn # 报错,找不到sklearn模块时取消注释
from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.3, random_state=123)
print(X_train.shape, X_val.shape)
print(Y_train.shape, Y_val.shape)
# -
# # 定义网络
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
# N = (W - F + 2P) / S + 1
self.conv1 = nn.Conv2d(1, 32, 3, 1) # [32, 26, 26]
self.conv2 = nn.Conv2d(32, 64, 3, 1) # [64, 24, 24]
# max_pool2d [64, 12, 12]
self.dropout1 = nn.Dropout(0.25)
self.fc1 = nn.Linear(64 * 12 * 12, 128) # [128]
self.dropout2 = nn.Dropout(0.5)
self.fc2 = nn.Linear(128, 10) # [10]
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, (2, 2))
x = self.dropout1(x)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = self.dropout2(x)
x = F.relu(self.fc2(x))
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
# -
# # 定义数据集
# https://pytorch.org/tutorials/beginner/basics/data_tutorial.html?highlight=dataset
# +
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
class MyDataset(Dataset):
def __init__(self, X, Y, transform=None, target_transform=None):
self.X = X
self.Y = Y
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
X = self.X[idx]
Y = self.Y[idx]
if self.transform:
X = self.transform(X)
if self.target_transform:
Y = self.target_transform(Y)
return X, Y
transform = transforms.ToTensor()
datasets = {
'train': MyDataset(X_train, Y_train, transform),
'val': MyDataset(X_val, Y_val, transform)
}
dataset_sizes = {
'train': len(datasets['train']),
'val': len(datasets['val'])
}
batch_size = 128
dataloaders = {
'train': DataLoader(datasets['train'], batch_size=batch_size, shuffle=True),
'val': DataLoader(datasets['val'], batch_size=batch_size, shuffle=False)
}
# -
# # 训练
# https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
# +
import time
import copy
from torch.optim import Adam, lr_scheduler
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=1e-3)
epochs = 10
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(epochs):
print('Epoch {}/{}'.format(epoch, epochs - 1))
print('-' * 10)
for phase in ['train', 'val']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
# forward
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# -
# # 测试
# +
X_test_bytes = len(X_test_content)
Y_test_bytes = len(Y_test_content)
X_test = []
Y_test = []
for i in range(16, X_test_bytes, num_pixels):
ndata = np.frombuffer(X_test_content[i:i+num_pixels], dtype=np.uint8).reshape(28,28,1)
X_test.append(ndata)
for i in range(8, Y_test_bytes):
Y_test.append(Y_test_content[i])
X_test = np.array(X_test)
Y_test = np.array(Y_test, dtype=np.int64)
testset = MyDataset(X_test, Y_test, transform)
test_dataloader = DataLoader(testset, batch_size=batch_size, shuffle=False)
total = len(testset)
corrects = 0
# 读取最佳权重
model.load_state_dict(best_model_wts)
model.eval()
with torch.no_grad():
for inputs, labels in test_dataloader:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
corrects += torch.sum(preds == labels.data)
print("Test Acc: {:.4f}".format(corrects.double() / total))
# -
# # 保存模型
path = "mnist.pt"
torch.save(best_model_wts, path)
| code/2.HelloWorld/hello_world.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="O-82JAxpWHys" outputId="f916b215-6a48-428d-b3f2-d3515c8b1782"
from google.colab import drive
drive.mount('/proj')
# + colab={"base_uri": "https://localhost:8080/"} id="lwpbOzCBWqjJ" outputId="b9358d00-0d28-4ac1-82cb-0c9a3cbadb01"
# !pip install -q -U umap-learn[plot] hdbscan tensorflow-addons #opencv-python==4.5.1.48
# + id="6OnwBWQ-XK38"
#Imports
import tensorflow as tf
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
import io
import numpy as np
###UMAP seems to take a while to import due to a pynndescent dependency (which uses numba).
import umap
import umap.plot
import hdbscan
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import seaborn as sns
import shutil
from pathlib import Path
from sklearn.neighbors import KNeighborsClassifier
# + id="Hw-u4KdTX9Ns"
CHECKPOINT_DIR="/proj/MyDrive/158780_Project_Dolphin_Computer_Vision/models/triplet_loss_dolphin/training/"
SEED = 100
VAL_PROP = 0.15
DIM=128
CHANNELS=3
data_dir = "/content/final_pigmentation_catalogue_2016"
test_dir = "/content/test"
# + id="Tuf_PA-DEPKv"
shutil.rmtree(data_dir, ignore_errors=True)
# + colab={"base_uri": "https://localhost:8080/"} id="h-vLGLnhYE1s" outputId="064f349d-ede4-4d01-ee78-1d65ed1322fe"
# !unzip /proj/MyDrive/158780_Project_Dolphin_Computer_Vision/images/final_pigmentation_catalogue_2016.zip -d /content
# + [markdown] id="6pXlbfgD9Qry"
# We will build the stratified testing set with 10% of the dataset per class. As far as I'm aware, keras can't be used for this.
# + id="rTX8t16N6Z3L"
shutil.rmtree(test_dir, ignore_errors=True)
for dir in Path(data_dir).glob('*'):
class_test_dir = f"/content/test/{dir.name}"
Path(class_test_dir).mkdir(parents = True, exist_ok = True)
images = list(dir.glob('*.png'))
test_prop = round(len(images) * 0.1)
for i in range(test_prop):
test_file = random.choice(images)
while os.path.exists(f"{class_test_dir}/{test_file.name}"): test_file = random.choice(images)
shutil.move(str(test_file), class_test_dir)
# + colab={"base_uri": "https://localhost:8080/"} id="GwMjLmzFDNYz" outputId="af1027b1-d2d3-4aa6-9ce2-a2335ff23b7f"
# !ls /content/test/0004
# + [markdown] id="YW3T1LXbcqux"
# We'll use transfer learning with a ResNet50 backbone with an L2 normaliser and online image augmentation. We load ResNet50, inialised with imagenet weights. Critically, the final dense layer has no activiation function - we don't want to perform any non-linear transformations prior to nornalisation.
#
# We use a 2048 unit dense layer, using Leaky ReLu as its activation function. This is followed by batch normalisation for regularisation. Next, a 1024 unit dense layer with ,batch normalisation, a final 256 unit dense layer with no activation function, and L2 normalisation.
# + id="8FwIC8qmaVil"
batch_size=128
from tensorflow.keras.applications.resnet import ResNet50
base_model = ResNet50(include_top=False, pooling='avg', weights='imagenet')
base_model.trainable = False
model = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal", input_shape=(DIM,DIM,CHANNELS)),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.15),
tf.keras.layers.experimental.preprocessing.RandomZoom(0.15),
tf.keras.layers.experimental.preprocessing.RandomTranslation((-.15, .15), (-.15, .15), fill_mode='reflect',interpolation='bilinear'),
tf.keras.layers.experimental.preprocessing.Rescaling(1./255, input_shape=(DIM,DIM,CHANNELS)),
base_model,
tf.keras.layers.Dense(2048, activation=tf.keras.layers.LeakyReLU(alpha=0.3)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(1024, activation=tf.keras.layers.LeakyReLU(alpha=0.3)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(256, activation=None),
tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))
])
# + colab={"base_uri": "https://localhost:8080/"} id="0EPBfrWcodNq" outputId="f008d611-be04-4490-d9b3-674d71eade32"
model.build(input_shape=(DIM,DIM,CHANNELS))
model.summary()
# + [markdown] id="0Gh3dlFPcnvl"
# We load the dataset, visualise the classes, and apply both pre-fetching and caching for performance.
# + colab={"base_uri": "https://localhost:8080/"} id="tfUkMTBUwRJe" outputId="e409288c-3943-46ee-d872-d8d3a83ace51"
def build_data(batch_size):
train_dataset = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
labels="inferred",
label_mode="int",
class_names=None,
color_mode="rgb",
batch_size=batch_size,
image_size=(DIM, DIM),
shuffle=True,
seed=SEED,
validation_split=VAL_PROP,
subset="training",
interpolation="bilinear",
follow_links=False,
)
val_dataset = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
labels="inferred",
label_mode="int",
class_names=None,
color_mode="rgb",
batch_size=batch_size,
image_size=(DIM, DIM),
shuffle=True,
seed=SEED,
validation_split=VAL_PROP,
subset="validation",
follow_links=False,
)
full_dataset = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
labels="inferred",
label_mode="int",
color_mode="rgb",
# batch_size=batch_size,
image_size=(DIM, DIM),
shuffle=True,
seed=SEED,
)
test_dataset = tf.keras.preprocessing.image_dataset_from_directory(
test_dir,
labels="inferred",
label_mode="int",
color_mode="rgb",
# batch_size=batch_size,
image_size=(DIM, DIM),
shuffle=True,
seed=SEED,
)
return train_dataset, val_dataset, full_dataset, test_dataset
def prefetch_data(batch_size):
train_dataset, val_dataset, full_dataset, test_dataset = build_data(batch_size)
train_dataset = train_dataset.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
val_dataset = val_dataset.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
full_dataset = full_dataset.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
test_dataset = test_dataset.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
return train_dataset, val_dataset, full_dataset, test_dataset
train_dataset, val_dataset, full_dataset, test_dataset = build_data(68)
# + colab={"base_uri": "https://localhost:8080/", "height": 399} id="VODR1haNwW7p" outputId="bbbbb036-636a-4c24-f8e2-11cac33bf9f5"
import matplotlib.pyplot as plt
class_names = train_dataset.class_names
plt.figure(figsize=(10, 10))
for images, labels in train_dataset.take(1):
for i in range(6):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
# + colab={"base_uri": "https://localhost:8080/"} id="h56CGG0Twnf_" outputId="6e3bf4d4-27c2-458c-baaa-b66b2ceba709"
train_dataset, val_dataset, full_dataset, test_dataset = prefetch_data(68)
# + [markdown] id="PFZpa-UOd3YI"
# We first perform online learning with triplet hard loss and a soft margin of 2.
# + id="rKE2IBbsKIG1"
model.compile(
optimizer=tf.keras.optimizers.Adamax(0.001),
loss=tfa.losses.TripletHardLoss(2, soft = True)
)
# + id="66jr6kj2RmFh"
def scheduler(epoch, lr):
if epoch < 10:
return lr
else:
return lr * tf.math.exp(-0.1)
# + colab={"base_uri": "https://localhost:8080/"} id="plHC1V4IgZyP" outputId="c4fd9d60-9240-4c49-bfc8-481f3bf83983"
history = model.fit(
train_dataset,
validation_data = val_dataset,
epochs = 50,
callbacks = [
tf.keras.callbacks.LearningRateScheduler(scheduler),
tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', restore_best_weights = True, patience = 30),
tf.keras.callbacks.ModelCheckpoint(f"{CHECKPOINT_DIR}/training_semi_resnet_aug", monitor= 'val_loss', save_best_only = True, mode = "min", save_weights_only = True),
tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", factor=0.1, patience=10, mode="min", min_delta=0.0001, min_lr = 0)
]
)
# + [markdown] id="wptw0En_2hyo"
# We visualise the training results.
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="GZCkagHdw7Bz" outputId="514465e9-5e5a-40e4-fee7-3b00400f02c8"
def plot_loss(history):
epochs = len(history.history['loss'])
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
plot_loss(history)
# + [markdown] id="rmVG4Z5b2oNu"
# Not bad, but we consistently reach a loss plateau.
# + id="bDtc4cIZxpHi"
def fit_model(epochs, batch_size):
train_dataset, val_dataset, full_dataset, test_dataset = prefetch_data(batch_size)
model.load_weights(f"{CHECKPOINT_DIR}/training_semi_resnet_aug")
history = model.fit(
train_dataset,
validation_data = val_dataset,
epochs = epochs,
callbacks = [
tf.keras.callbacks.LearningRateScheduler(scheduler),
tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', restore_best_weights = True, patience = 30),
tf.keras.callbacks.ModelCheckpoint(f"{CHECKPOINT_DIR}/training_semi_resnet_aug", monitor= 'val_loss', save_best_only = True, mode = "min", save_weights_only = True),
tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", factor=0.1, patience=10, mode="min", min_delta=0.0001, min_lr = 0)
]
)
return history
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="s9kXhyiGyk_4" outputId="76ed1cc0-a194-4da3-b312-de2c5458d79e"
history = fit_model(50, 64)
plot_loss(history)
# + [markdown] id="tQT4PD8AKnjG"
# It seems we've hit a loss plateau. We'll try decreasing the learning rate.
#
# + id="I1wLzTSmWMEQ"
def compile_model(lr, loss):
model.compile(
optimizer=tf.keras.optimizers.Adamax(lr),
loss=loss
)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="4_6gS0v_NxJp" outputId="92ce2cdc-d67f-4c56-89dc-dd391937db7a"
compile_model(1e-07, tfa.losses.TripletHardLoss(2, soft = False))
history = fit_model(50, 128)
plot_loss(history)
# + [markdown] id="mlVWjG61y3qm"
# Again, we've reached a plateau.
# + [markdown] id="MET8tyKC1lkQ"
# We'll switch back to semi-hard loss
# + [markdown] id="9sCPjwwZxqyb"
# So, we will generate the embeddings, visualise them, and use a KNN classifier.
# + colab={"base_uri": "https://localhost:8080/"} id="wAUoSwKyzfXn" outputId="0bbf8f14-1238-4149-f1b8-a96a0d68f79c"
train_embeddings = model.predict(full_dataset)
test_embeddings = model.predict(test_dataset)
train_embeddings.shape
# + [markdown] id="wv40JpwtNbpD"
# We retrieve the class labels.
# + colab={"base_uri": "https://localhost:8080/"} id="a7E8iN1eMHgV" outputId="594ebc23-ebc1-4220-aaf3-606f8a503dee"
y_train = np.concatenate([y for x, y in full_dataset], axis=0)
y_test = np.concatenate([y for x, y in test_dataset], axis=0)
y_train[0:100]
# + [markdown] id="wr-vr0t9Ty2c"
# Next, we plot the results.
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="Vymd_lLdN56D" outputId="4a4c672f-30ef-41c6-e817-28796b3a2d8c"
mapper=umap.UMAP( n_neighbors = 100, min_dist= 0.1).fit(train_embeddings, y_train)
cluster = hdbscan.HDBSCAN(
algorithm ='best',
approx_min_span_tree = True,
gen_min_span_tree = False,
leaf_size = 40,
metric='euclidean',
min_cluster_size = 25,
min_samples = 25,
p = None
).fit(mapper.embedding_)
plt.title("Clustered UMAP visualisation.")
scatter = plt.scatter(
mapper.embedding_[:, 0],
mapper.embedding_[:, 1],
c = cluster.labels_,
s = 0.1,
)
plt.legend(
handles = scatter.legend_elements()[0],
labels = pd.Series(cluster.labels_.tolist()).astype("string").sort_values().unique(),
loc="right",
bbox_to_anchor=(1.2, 0.5)
)
plt.xlabel("D1")
plt.ylabel("D2")
plt.show();
# + [markdown] id="kfbw22vJUOVF"
# Not so good. The clusters are not well formed.
# + colab={"base_uri": "https://localhost:8080/"} id="FZTTN8DgRoq9" outputId="c4e38923-6f86-41a0-ba6e-5f4190eee6cd"
knn=KNeighborsClassifier(100)
knn.fit(mapper.embedding_, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="XxGpCevpTfsi" outputId="8b06d47d-384c-4327-be32-94044ebf811a"
knn.score(mapper.transform(test_embeddings), y_test)
# + [markdown] id="3z0FykwMUTaJ"
# The KNN classifier has not done a good job.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="asqbnozyUlzr" outputId="3fadf98c-c5c8-4062-81d2-4a8e63e9b1ac"
# %%shell
jupyter nbconvert --to html /content/4_3_triplet_loss_dolphin_resnet_aug.ipynb
| notebooks/5_3_triplet_loss_dolphin_resnet_aug.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Unit 8: Hybrid Recommender Model using both Collaborative Filtering and Content-based Filtering using a Factorization Machine
# In this section, we combine CF and CBF.
#
# Therefore, we simply add the one-hot-encoded user and item IDs to the data. Thus, the model is capable of factorizing the similarities in rating and features for rating prediction. This combination is called hybrid as it combines two recommenders.
# +
from collections import OrderedDict
import itertools
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pyfm import pylibfm
from scipy import sparse
from sklearn.metrics import mean_squared_error, mean_absolute_error
# -
from recsys_training.data import Dataset, genres
from recsys_training.evaluation import get_relevant_items
from recsys_training.utils import get_sparsity
ml100k_ratings_filepath = '../../data/raw/ml-100k/u.data'
ml100k_item_filepath = '../../data/raw/ml-100k/u.item'
ml100k_user_filepath = '../../data/raw/ml-100k/u.user'
# ## Load Data
data = Dataset(ml100k_ratings_filepath)
data.rating_split(seed=42)
user_ratings = data.get_user_ratings()
item_feat = pd.read_csv(ml100k_item_filepath, sep='|', header=None,
names=['item', 'title', 'release', 'video_release', 'imdb_url']+genres,
engine='python')
user_feat = pd.read_csv(ml100k_user_filepath, sep='|', header=None,
names=['user', 'age', 'gender', 'occupation', 'zip'])
# ## User and Item Content (Features)
# ### Preprocessing
# #### Items
# We keep the following information for items:
# * release year
# * genres
def min_max_scale(val, bounds):
min_max_range = bounds['max']-bounds['min']
return (val-bounds['min'])/min_max_range
# +
# Infer the release year
idxs = item_feat[item_feat['release'].notnull()].index
item_feat.loc[idxs, 'release_year'] = item_feat.loc[idxs, 'release'].str.split('-')
item_feat.loc[idxs, 'release_year'] = item_feat.loc[idxs, 'release_year'].apply(lambda val: val[2]).astype(int)
# Impute median release year value for the items with missing release year
top_year = item_feat.loc[idxs, 'release_year'].astype(int).describe()['50%']
idx = item_feat[item_feat['release'].isnull()].index
item_feat.loc[idx, 'release_year'] = top_year
# Min-max scale the release year
item_year_bounds = {'min': item_feat['release_year'].min(),
'max': item_feat['release_year'].max()}
item_feat['release_year'] = item_feat['release_year'].apply(
lambda year: min_max_scale(year, item_year_bounds))
# Drop other columns
item_feat.drop(['title', 'release', 'video_release', 'imdb_url'], axis=1, inplace=True)
# -
# #### users
# We keep the following information for users:
# * age
# * gender
# * occupation
# * zip-code
# +
# Min-max scale the age
user_age_bounds = {'min': user_feat['age'].min(),
'max': user_feat['age'].max()}
user_feat['age'] = user_feat['age'].apply(lambda age: min_max_scale(age, user_age_bounds))
# Transform gender characters to numerical values (categories)
genders = sorted(user_feat['gender'].unique())
user_gender_map = dict(zip(genders, range(len(genders))))
user_feat['gender'] = user_feat['gender'].map(user_gender_map)
# Transform occupation strings to numerical values (categories)
occupations = sorted(user_feat['occupation'].unique())
user_occupation_map = dict(zip(occupations, range(len(occupations))))
user_feat['occupation'] = user_feat['occupation'].map(user_occupation_map)
# Transform the zip codes to categories keeping the first three digits and impute for missing
idxs = user_feat[~user_feat['zip'].str.isnumeric()].index
user_feat.loc[idxs, 'zip'] = '00000'
zip_digits_to_cut = 3
user_feat['zip'] = user_feat['zip'].apply(lambda val: int(val) // 10 ** zip_digits_to_cut)
# -
# In addition, we infer profiles by combining item information with rating data for each user to get features that represent the users' preferred genres and film age
def user_profiler(group):
genre_dist = group[genres].mean()
year_dist = group['release_year'].describe()[['mean', 'std', '50%']]
return pd.concat((genre_dist, year_dist), axis=0)
def get_user_profiles(ratings: pd.DataFrame,
item_feat: pd.DataFrame,
min_rating: float = 4.0) -> pd.DataFrame:
ratings = ratings[ratings.rating >= min_rating]
ratings = ratings[['user', 'item']]
ratings = ratings.merge(item_feat, on='item', how='left')
ratings.drop(['item'], axis=1, inplace=True)
grouped = ratings.groupby('user')
profiles = grouped.apply(user_profiler).reset_index()
profiles.rename(columns={'50%': 'median'}, inplace=True)
return profiles
# Finally, we join the original user information with their profiles' information and one-hot-encode categorical information
# +
profiles = get_user_profiles(data.train_ratings, item_feat)
user_feat = user_feat.merge(profiles, on='user', how='left')
occupation_1H = pd.get_dummies(user_feat['occupation'], prefix='occupation')
zip_1H = pd.get_dummies(user_feat['zip'], prefix='zip')
user_feat.drop(['occupation', 'zip', ], axis=1, inplace=True)
user_feat = pd.concat([user_feat, occupation_1H, zip_1H], axis=1)
user_feat.fillna(0, inplace=True)
# -
# We remove the user/item id columns and replace the current dataframe indices with their values
# +
user_feat.index = user_feat['user'].values
user_feat.drop('user', axis=1, inplace=True)
item_feat.index = item_feat['item'].values
item_feat.drop('item', axis=1, inplace=True)
# -
# ## Factorization Machine for a Hybrid Recommender
# [<NAME>: Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf)
#
# [pyFM - Factorization Machines in Python](https://github.com/coreylynch/pyFM)
# #### Create Feature Matrices
# fetch content information for all observed user-item rating combinations
user_cb_feat_train = user_feat.loc[data.train_ratings.user.values].values
user_cb_feat_test = user_feat.loc[data.test_ratings.user.values].values
item_cb_feat_train = item_feat.loc[data.train_ratings.item.values].values
item_cb_feat_test = item_feat.loc[data.test_ratings.item.values].values
# 
#
# **Task:** Implement additional arrays for user and item IDs and adjust the design matrices `X_train` and `X_test` accordingly.
def one_hot_encode_ids(ids: np.array, length):
one_hot_enc = np.zeros((len(ids), length))
one_hot_enc[np.arange(len(ids)), ids] = 1
return one_hot_enc
# Subtract 1 to turn 1-base-indexed into 0-base-indexed IDs for 0-base-indexed array
user_cf_feat_train = one_hot_encode_ids(data.train_ratings.user.values-1, data.n_users)
user_cf_feat_test = one_hot_encode_ids(data.test_ratings.user.values-1, data.n_users)
item_cf_feat_train = one_hot_encode_ids(data.train_ratings.item.values-1, data.n_items)
item_cf_feat_test = one_hot_encode_ids(data.test_ratings.item.values-1, data.n_items)
# concatenate user and item content information to form design matrices
# and convert to sparse matrix in Compressed Sparse Row (CSR) format
X_train = np.concatenate((user_cb_feat_train, item_cb_feat_train,
user_cf_feat_train, item_cf_feat_train), axis=1)
X_train = sparse.csr_matrix(X_train)
X_test = np.concatenate((user_cb_feat_test, item_cb_feat_test,
user_cf_feat_test, item_cf_feat_test), axis=1)
X_test = sparse.csr_matrix(X_test)
X_train
# Sparsity of Training Data
get_sparsity(X_train)
X_test
# Sparsity of Test Data
get_sparsity(X_test)
# #### Create Target Matrices for Rating Predictions
y_train = data.train_ratings.rating.values.astype(float)
y_test = data.test_ratings.rating.values
# #### Train Factorization Machine for Rating Prediction as Regressor using pyFM
n_epochs = 30 # number of full stochastic passes through the training data
k = 16
random_seed = 28
fm_hybrid = pylibfm.FM(num_factors=k,
num_iter=n_epochs,
verbose=True,
task="regression",
initial_learning_rate=0.001,
learning_rate_schedule="optimal",
seed=random_seed)
fm_hybrid.fit(X_train, y_train)
# ## Evaluation on Test Set
y_pred = fm_hybrid.predict(X_test)
# $MSE$
mean_squared_error(y_test, y_pred)
# $MAE$
mean_absolute_error(y_test, y_pred)
def get_prediction(fm: object, user: int, user_feat: pd.DataFrame, item_feat: pd.DataFrame,
items: np.array = None, remove_known_pos: bool = True) -> Dict[int, Dict[str, float]]:
if items is None:
if remove_known_pos:
# Predict from unobserved items
known_items = np.array(list(user_ratings[user].keys()))
items = np.setdiff1d(data.items, known_items)
else:
items = np.array(data.items)
if type(items) == np.int64:
items = np.array([items])
n_items = len(items)
single_user_cb_feat = user_feat.loc[user].values.reshape(1, -1).repeat(n_items, axis=0)
all_items_cb_feat = item_feat.loc[items].values
input_data = np.concatenate((single_user_cb_feat, all_items_cb_feat), axis=1)
input_data = sparse.csr_matrix(input_data)
preds = fm.predict(input_data)
sorting = np.argsort(preds)[::-1]
preds = {item: {'pred': pred} for item, pred in
zip(items[sorting], preds[sorting])}
return preds
predictions = get_prediction(fm_hybrid, 1, user_feat, item_feat)
list(predictions.items())[:10]
def get_recommendations(fm_cb: object,
user: int,
N: int,
user_feat: pd.DataFrame,
item_feat: pd.DataFrame,
remove_known_pos: bool = True) -> List[Tuple[int, Dict[str, float]]]:
recommendations = []
predictions = get_prediction(fm_cb, user, user_feat, item_feat,
remove_known_pos=remove_known_pos)
for item, pred in predictions.items():
add_item = (item, pred)
recommendations.append(add_item)
if len(recommendations) == N:
break
return recommendations
get_recommendations(fm_hybrid, 1, N=10, user_feat=user_feat, item_feat=item_feat)
# ## Evaluation
N = 10
relevant_items = get_relevant_items(data.test_ratings)
# +
users = relevant_items.keys()
prec_at_N = dict.fromkeys(data.users)
for user in users:
recommendations = get_recommendations(fm_hybrid, user, N,
user_feat=user_feat, item_feat=item_feat)
recommendations = [val[0] for val in recommendations]
hits = np.intersect1d(recommendations,
relevant_items[user])
prec_at_N[user] = len(hits)/N
# -
recommendations
np.mean([val for val in prec_at_N.values() if val is not None])
| notebooks/solutions/8_s_hybrid_fm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python3.8
# language: python
# name: python3.8
# ---
# %load_ext autoreload
# %autoreload 2
# !cd `pwd`
from fusus.pdf import pdf2png
name = "Affifi1"
source = f"../_local/source/{name}/{name.lower()}.pdf"
dest = f"../ur/{name}/in"
pdf2png(source, dest, silent=False)
| legacy/notebooks/splitPdf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Load the data into pandas and do some analysis
#I'm using an old python, because the tensorflow wheels aren't available for 3.9+
# !python --version
# Ran this before starting the notebook
# !pip install -r ../requirements.txt --use-deprecated=legacy-resolver > /dev/null
# # Load data set into pyarrow and pandas.
# This will let us do some analysis on the data and perhaps some feature engineering. First, we just want to make sure we're logging the data properly.
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import tensorflow as tf
import pyarrow.compute as pc
# Uncomment the following to disable GPU
#tf.config.set_visible_devices([], 'GPU')
tf.config.get_visible_devices()
ds = pq.ParquetDataset("/var/log/fancontrol/featurelog")
table = ds.read()
# +
#print(table)
# -
table.num_rows
dummy = pc.sort_indices(table, sort_keys=[('timestamp', 'descending')])
ptable = table.to_pandas()
print(ptable)
max(ptable['power'])
import matplotlib.pyplot as plt
plt.scatter(ptable['fan_rpm'], ptable['label'])
max_temp = ptable['temp'].apply(lambda a: max(a))
plt.scatter(max_temp, ptable['label'])
from statistics import mean
mean_temp = ptable['temp'].apply(lambda a: mean(a))
plt.scatter(mean_temp, ptable['label'])
# The above is exactly what we would expect since we set the fan speed, aka label, based on the current mean temperature.
plt.scatter(mean_temp, max_temp)
# As expected we see a high correlation between mean and max temperature.
def clamp(num, min_value, max_value):
return max(min(num, max_value), min_value)
power = ptable['power'].apply(lambda a: clamp(a[0], 0.0, 99.0))
import statistics
print(statistics.mean(power))
print(statistics.median(power))
print(statistics.variance(power))
plt.scatter(power, mean_temp)
# This shows there is some correlation between current wattage and mean measured temperature. Which one might expect. :)
# Lets see the distribution of idleness. Presumably we have a lot of samples of when the CPU is cold and not doing much.
# We'll have to either weigh our samples or resample the data if we want to use this data for training an ML model.
cpu_idle = ptable['cpu_idle'].apply(lambda a: mean(a))
import seaborn as sns
sns.displot(cpu_idle, binwidth=0.05, log=True)
# Our idlenes assumption appears to be backed up by reality.
#
# Let's see how strong the correlation is between idleness and temperature.
# And idleness and power.
plt.scatter(cpu_idle, mean_temp)
plt.scatter(cpu_idle, power)
# There is an inverse correlation as one would expect, but it doesn't appear to be all that strong.
| features/Explore the data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="brGRB_MqrA2O"
# ###In this notebook, I have implemented BERT. Tensorflow tutorial has been followed and suitable changes have been implemented
#
# Note: I do not recommend using BERT in this case since, the data is limited and BERT models are computationally expensive, Colab GPUs have memory limits which restricts optimal training pipeline
# + [markdown] id="vX1Y-O2hrKl4"
# ##Step 1: Load the data and import necessary packages
# + id="VnRwifzin2kQ"
# !cp drive/My\ Drive/finaldata.csv /content
# + id="nG-KxfB5oV7p"
import pandas as pd
import numpy as np
import os
# + id="rjvi76cvoZ8x"
data = pd.read_csv('finaldata.csv')
# + colab={"base_uri": "https://localhost:8080/"} id="02Tm2zSw53Y4" outputId="1d8610c9-efb2-40df-f914-d2cc9fd3c407"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/", "height": 163} id="11HGhAfqocMS" outputId="8b91d6f6-f155-49b0-fbb2-39a8ec04f86a"
data.head()
# + id="iouUOup0oclH"
data.drop('Unnamed: 0',axis = 1,inplace = True)
# + [markdown] id="ZEWopYpTrP4I"
# ##Step 2: Augment Data and 80:20 split into training and validation sets
# + id="mpKSNhP0qrws"
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
# + id="6Qkj82S9nL_s"
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding,Dropout,Dense,LSTM,Bidirectional
from tensorflow.keras.optimizers import Adamax
from tensorflow.keras.callbacks import ModelCheckpoint,ReduceLROnPlateau
# + id="oqCype7XrF1l"
le = LabelEncoder()
data['Category'] = le.fit_transform(data['Category'])
# + id="w29KJj3SrPwY"
trainx,testx,trainy,testy = train_test_split(data['Description'],data['Category'],stratify = data['Category'])
# + [markdown] id="33oISNXorl6l"
# ##Step 3:One hot encode target labels
# + id="luTsDQ-M7Gbp"
from tensorflow.keras.utils import to_categorical
train_labels = to_categorical(trainy)
test_labels = to_categorical(testy)
# + [markdown] id="lS40suLgsQvy"
# ##Step 4: Install required libraries for BERT, as mentioned in the tutorial
# + colab={"base_uri": "https://localhost:8080/"} id="gyWpy6HIm_Wg" outputId="bf573c06-113a-41ca-8cfa-33dc18955bb9"
# !pip install -q tensorflow-text
# !pip install -q tf-models-official
# + id="AErVaC2Xm_Zh"
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
from official.nlp import optimization
# + [markdown] id="bRypoNNBsWi4"
# ##Step 5: Select desired BERT model and load the model to runtime
# + id="S4kyY_K9n3q-"
bert_model_name = 'bert_en_uncased_L-12_H-768_A-12' #@param ["bert_en_uncased_L-12_H-768_A-12", "bert_en_cased_L-12_H-768_A-12", "bert_multi_cased_L-12_H-768_A-12", "small_bert/bert_en_uncased_L-2_H-128_A-2", "small_bert/bert_en_uncased_L-2_H-256_A-4", "small_bert/bert_en_uncased_L-2_H-512_A-8", "small_bert/bert_en_uncased_L-2_H-768_A-12", "small_bert/bert_en_uncased_L-4_H-128_A-2", "small_bert/bert_en_uncased_L-4_H-256_A-4", "small_bert/bert_en_uncased_L-4_H-512_A-8", "small_bert/bert_en_uncased_L-4_H-768_A-12", "small_bert/bert_en_uncased_L-6_H-128_A-2", "small_bert/bert_en_uncased_L-6_H-256_A-4", "small_bert/bert_en_uncased_L-6_H-512_A-8", "small_bert/bert_en_uncased_L-6_H-768_A-12", "small_bert/bert_en_uncased_L-8_H-128_A-2", "small_bert/bert_en_uncased_L-8_H-256_A-4", "small_bert/bert_en_uncased_L-8_H-512_A-8", "small_bert/bert_en_uncased_L-8_H-768_A-12", "small_bert/bert_en_uncased_L-10_H-128_A-2", "small_bert/bert_en_uncased_L-10_H-256_A-4", "small_bert/bert_en_uncased_L-10_H-512_A-8", "small_bert/bert_en_uncased_L-10_H-768_A-12", "small_bert/bert_en_uncased_L-12_H-128_A-2", "small_bert/bert_en_uncased_L-12_H-256_A-4", "small_bert/bert_en_uncased_L-12_H-512_A-8", "small_bert/bert_en_uncased_L-12_H-768_A-12", "albert_en_base", "electra_small", "electra_base", "experts_pubmed", "experts_wiki_books", "talking-heads_base"]
map_name_to_handle = {
'bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3'}
map_model_to_preprocess = {
'bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3'}
tfhub_handle_encoder = map_name_to_handle[bert_model_name]
tfhub_handle_preprocess = map_model_to_preprocess[bert_model_name]
# + id="JzloSYCom_cd"
bert_preprocess_model = hub.KerasLayer(tfhub_handle_preprocess)
# + id="ufykzpfBm_fp"
bert_model = hub.KerasLayer(tfhub_handle_encoder)
# + [markdown] id="l3-sXa1ysd3-"
# ##Step 6: Define the keras model and add dense layer for inference
# + id="jX24a0SMm_je"
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
preprocessing_layer = hub.KerasLayer(tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(22, activation=None, name='classifier')(net)
# + id="gF2qg8aJm_ma"
model = tf.keras.Model(text_input, net)
# + [markdown] id="ZmmwUvunsupj"
# Defining loss, accuracy metric and optimizer (BERT uses Adamw optimizer)
# + id="h4VDxMTRm_oz"
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
metrics = tf.keras.metrics.CategoricalAccuracy()
# + id="qnQaZq0wm_rS"
epochs = 5
num_train_steps = 100 * epochs
num_warmup_steps = int(0.1*num_train_steps)
init_lr = 3e-5
optimizer = optimization.create_optimizer(init_lr=init_lr,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
optimizer_type='adamw')
# + [markdown] id="j54dZhxNs3MH"
# ##Step 7: Compile the model and fit on preprocessed data
# + id="1l3EUFHEm_th"
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
# + colab={"base_uri": "https://localhost:8080/"} id="WGDrDPpxpTYz" outputId="12fb42d7-982f-4402-867f-d8665697bfd6"
hist = model.fit(trainx,train_labels,validation_data = (testx,test_labels),batch_size=32,epochs = 10)
# + [markdown] id="kabMIBvruTqP"
# Save the model to Google Drive
# + colab={"base_uri": "https://localhost:8080/"} id="wECQo5Qn7NQ6" outputId="2c3462ab-583f-4cb1-a463-a766c71b26be"
model.save('bert')
# + id="eGJBvIt88JYT"
# !cp -r bert drive/My\ Drive
# + [markdown] id="NSYSVO_WuYAc"
# ##Step 8: Generate the predictions for the validation set
# + id="Zu5g5-dUNl8-"
preds = list()
# + id="sFm-Tpb9Nv9y"
from tqdm.notebook import tqdm
# + id="4UrGN0aZN4dB"
testcases = testx.reset_index(drop = True)
# + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["c4512e7f400c4e3585a2a22eafbf72ee", "85c454ac7ccd4d3ebb57061cd6022665", "b10915789678438481f5f0625233da10", "7ff780df287c460c9ed5dcfb927190d9", "3dec434725a442b98044b77a9d11ca44", "67a31bbe556c4fd7b3256d6e03d94ca9", "<KEY>", "2a1dbec8a19b4bceb4036d40e9e5653a"]} id="g7PF1T92NMb0" outputId="45d851d6-2be1-441d-f2ee-a8cf69604dc4"
for i in tqdm(range(len(testcases))):
preds.append(tf.nn.softmax(model(tf.constant([testcases[i]]))).numpy().argmax(1)[0])
# + id="e53k7hxyw80r"
from sklearn.metrics import classification_report
# + [markdown] id="K4mvtLgZueka"
# ##Results and Model Evaluation
# + [markdown] id="pPZ_JAJAui6Q"
# It can be seen that the model achieves 88% macro average, much higher than the LSTM model
# + id="171qNVyT4FR-"
n = data['Category'].unique().max()+1
# + colab={"base_uri": "https://localhost:8080/"} id="y1xxsUSz3p0U" outputId="2fd17a88-4bbd-4ea2-8118-6ee549102131"
label_names = []
for i in range(n):
label_names.append(le.inverse_transform(np.array(i).reshape(-1,1))[0])
# + colab={"base_uri": "https://localhost:8080/"} id="d1ONoBtO37qd" outputId="1820088b-a0b3-4e08-c68c-9a2562a7eb05"
print(classification_report(testy,preds,target_names=label_names))
# + id="6JOH_2svxoZC"
from sklearn.metrics import classification_report,confusion_matrix
# + [markdown] id="IegLHJlmu0wA"
# ###Not plotting ROC Curve since, BERT does not support probability prediction (Working on getting the same after completing the task)
# + id="FWiyHwj84tYs"
from sklearn.metrics import roc_auc_score,roc_curve
# + id="YXNOSwIM5s0v"
fpr,tpr,thresh = dict(),dict(),dict()
# + [markdown] id="qKlreTH4vN5f"
# ####Plotting the confusion matrix for the validation set, minor misclassifications can be observed
# + id="yjavn9Mq6RKT"
import matplotlib.pyplot as plt
# + id="i5OdhFuU6QGG"
cm = confusion_matrix(testy.values,preds)
# + id="WO_wzK-a6q1Y"
import seaborn as sn
# + colab={"base_uri": "https://localhost:8080/", "height": 772} id="veM7abmH6tk_" outputId="c0c47610-4101-43d1-dfb7-a226a6389a3d"
plt.figure(figsize=(10,10))
sn.heatmap(cm,cmap = 'Blues',annot = True,fmt = '.1f',xticklabels = label_names,yticklabels=label_names)
# + [markdown] id="9mJcJOX8vWHl"
# ####Plotting model loss and accuracy, it can be seen that the BERT model achieves optimal performance after 1 epoch itself and further, does not start to overfit on training data. Instead, consistent loss is maintained
# + id="St54s5av7Gb8"
acc = hist.history['categorical_accuracy']
val_acc = hist.history['val_categorical_accuracy']
loss = hist.history['loss']
val_loss = hist.history['val_loss']
epochs = range(1, len(acc) + 1)
# + colab={"base_uri": "https://localhost:8080/", "height": 298} id="jA8jwyVd7Zrs" outputId="c7bcd1ed-8eec-4fb0-ea54-731d784f3a58"
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss at 10 epochs')
plt.legend()
# + colab={"base_uri": "https://localhost:8080/", "height": 298} id="uTyhQgC_7biq" outputId="d47a7669-4c1c-4f6f-9aed-cf0ffe9b7081"
plt.figure()
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy at 10 epochs')
plt.legend()
# + [markdown] id="vRijxXPo75Qv"
# ###Conclusion:
# The BERT model achieves optimal state after 1 epoch itself although, it takes almost 8 minutes for each epoch. The accuracy achieved is higher than the LSTM model
# But, the model would perform if more data is used
# + [markdown] id="hkD1TrI0xb4u"
# ###References:
# 1. BERT Tutorial: https://www.tensorflow.org/tutorials/text/classify_text_with_bert
#
# + id="xNkdW7wFxqju"
| Approach_2_Deep_Learning_BERT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#msticpy---Event-Timeline" data-toc-modified-id="msticpy---Event-Timeline-1">msticpy - Event Timeline</a></span></li><li><span><a href="#Discrete-Event-Timelines" data-toc-modified-id="Discrete-Event-Timelines-2">Discrete Event Timelines</a></span><ul class="toc-item"><li><span><a href="#Plotting-a-simple-timeline" data-toc-modified-id="Plotting-a-simple-timeline-2.1">Plotting a simple timeline</a></span></li><li><span><a href="#More-Advanced-Timelines" data-toc-modified-id="More-Advanced-Timelines-2.2">More Advanced Timelines</a></span><ul class="toc-item"><li><span><a href="#Grouping-Series-From-a-Single-DataFrame" data-toc-modified-id="Grouping-Series-From-a-Single-DataFrame-2.2.1">Grouping Series From a Single DataFrame</a></span></li></ul></li><li><span><a href="#Displaying-a-reference-line" data-toc-modified-id="Displaying-a-reference-line-2.3">Displaying a reference line</a></span></li><li><span><a href="#Plotting-series-from-different-data-sets" data-toc-modified-id="Plotting-series-from-different-data-sets-2.4">Plotting series from different data sets</a></span></li></ul></li><li><span><a href="#Plotting-Series-with-Scalar-Values" data-toc-modified-id="Plotting-Series-with-Scalar-Values-3">Plotting Series with Scalar Values</a></span><ul class="toc-item"><li><span><a href="#Documentation-for-display_timeline_values" data-toc-modified-id="Documentation-for-display_timeline_values-3.1">Documentation for display_timeline_values</a></span></li></ul></li><li><span><a href="#Exporting-Plots-as-PNGs" data-toc-modified-id="Exporting-Plots-as-PNGs-4">Exporting Plots as PNGs</a></span></li></ul></div>
# -
# # msticpy - Event Timeline
#
# This notebook demonstrates the use of the timeline displays built using the [Bokeh library](https://bokeh.pydata.org).
#
# You must have msticpy installed:
# ```
# # !pip install --upgrade msticpy
# ```
#
# There are two display types:
# - Discrete event series - this plots multiple series of events as discrete glyphs
# - Event value series - this plots a scalar value of the events using glyphs, bars or traditional line graph (or some combination.
# +
# Imports
import sys
import warnings
from msticpy.common.utility import check_py_version
MIN_REQ_PYTHON = (3,6)
check_py_version(MIN_REQ_PYTHON)
from IPython import get_ipython
from IPython.display import display, HTML, Markdown
import ipywidgets as widgets
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 50)
pd.set_option('display.max_colwidth', 100)
from msticpy.nbtools import *
from msticpy.sectools import *
WIDGET_DEFAULTS = {'layout': widgets.Layout(width='95%'),
'style': {'description_width': 'initial'}}
# -
# # Discrete Event Timelines
#
# ## Plotting a simple timeline
# nbdisplay.display_timeline
# ```
# Display a timeline of events.
#
# Parameters
# ----------
# data : Union[dict, pd.DataFrame]
# Either
# dict of data sets to plot on the timeline with the following structure.
#
# Key: str
# Name of data set to be displayed in legend
# Value: dict
# containing
#
# data: pd.DataFrame
# Data to plot
# time_column: str, optional
# Name of the timestamp column
# (defaults to `time_column` function parameter)
# source_columns: list[str], optional
# List of source columns to use in tooltips
# (defaults to `source_columns` function parameter)
# color: str, optional
# Color of datapoints for this data
# (defaults to autogenerating colors)
#
# Or
# DataFrame as a single data set or grouped into individual
# plot series using the `group_by` parameter
# time_column : str, optional
# Name of the timestamp column
# (the default is 'TimeGenerated')
# source_columns : list, optional
# List of default source columns to use in tooltips
# (the default is None)
# ```
# +
processes_on_host = pd.read_csv('data/processes_on_host.csv',
parse_dates=["TimeGenerated"],
infer_datetime_format=True)
# At a minimum we need to pass a dataframe with data
nbdisplay.display_timeline(processes_on_host)
# -
# The Bokeh graph is interactive and has the following features:
# - Tooltip display for each event marker as you hover over it
# - Toolbar with the following tools (most are toggles enabling or disabling the tool):
# - Panning
# - Select zoom
# - Mouse wheel zoom
# - Reset to default view
# - Save image to PNG
# - Hover tool
#
# Additionally an interactive timeline navigation bar is displayed below the main graph. You can change the timespan shown on the main graph by dragging or resizing the selected area on this navigation bar.
#
# **Note**:
# - the tooltips work on the Windows process data shown above because of a legacy fallback built into the code.
# Usually you need to specify the `source_columns` parameter explicitly to have
# the hover tooltips populated correctly.
# ## More Advanced Timelines
# `display_timeline` also takes a number of optional parameters that give you more flexibility to show multiple data series and change the way the graph appears.
# ```
# Other Parameters
# ----------------
# title : str, optional
# Title to display (the default is None)
# alert : SecurityAlert, optional
# Add a reference line/label using the alert time (the default is None)
# ref_event : Any, optional
# Add a reference line/label using the alert time (the default is None)
# ref_time : datetime, optional
# Add a reference line/label using `ref_time` (the default is None)
# group_by : str
# (where `data` is a DataFrame)
# The column to group timelines on
# sort_by : str
# (where `data` is a DataFrame)
# The column to order timelines on
# legend: str, optional
# left, right or inline
# (the default is None/no legend)
# yaxis : bool, optional
# Whether to show the yaxis and labels (default is False)
# range_tool : bool, optional
# Show the the range slider tool (default is True)
# height : int, optional
# The height of the plot figure
# (the default is auto-calculated height)
# width : int, optional
# The width of the plot figure (the default is 900)
# color : str
# Default series color (default is "navy")
# ```
#
# ### Grouping Series From a Single DataFrame
#
nbdisplay.display_timeline(processes_on_host,
group_by="Account",
source_columns=["NewProcessName",
"ParentProcessName"],
legend="left");
# We can use the group_by parameter to specify a column on which to split individually plotted series.
#
# Specifying a legend, we can see the value of each series group. The legend is interactive - click on a series name to
# hide/show the data. The legend can be placed inside of the chart (`legend="inline"`) or to the left or right.
#
# Alternatively we can enable the yaxis - although this is not guaranteed to show all values of the groups.
#
# **Note**:
# - the tooltips work on the Windows process data shown above because of a legacy fallback built into the code. Usually you need to specify the `source_columns` parameter explicitly to have the hover tooltips populated correctly.
# - the trailing semicolon just stops Jupyter showing the return value from the function. It isn't mandatory
nbdisplay.display_timeline(processes_on_host,
group_by="Account",
source_columns=["NewProcessName",
"ParentProcessName"],
legend="none",
yaxis=True, ygrid=True);
# +
host_logons = pd.read_csv('data/host_logons.csv',
parse_dates=["TimeGenerated"],
infer_datetime_format=True)
nbdisplay.display_timeline(host_logons,
title="Logons by Account name",
group_by="Account",
source_columns=["Account", "TargetLogonId", "LogonType"],
legend="left",
height=200);
nbdisplay.display_timeline(host_logons,
title="Logons by logon type",
group_by="LogonType",
source_columns=["Account", "TargetLogonId", "LogonType"],
legend="left",
height=200,
range_tool=False,
ygrid=True);
# -
# ## Displaying a reference line
# If you have a single item (e.g. an alert) that you want to show as a reference point on the graph you can pass a datetime value, or any object that has a TimeGenerated or StartTimeUtc property.
#
# If the object doesn't have one of these, just pass the property as the ref_time parameter.
# +
fake_alert = processes_on_host.sample().iloc[0]
nbdisplay.display_timeline(host_logons,
title="Processes with marker",
group_by="LogonType",
source_columns=["Account", "TargetLogonId", "LogonType"],
alert=fake_alert,
legend="left");
# -
# ## Plotting series from different data sets
# When you want to plot data sets with different schema on the same plot it is difficult to put them in a single DataFrame.
# To do this we need to assemble the different data sets into a dictionary and pass that to the `display_timeline`
#
# The dictionary has this format:
#
# Key: str
# Name of data set to be displayed in legend
#
# Value: dict, the value holds the settings for each data series:
#
# data: pd.DataFrame
# Data to plot
# time_column: str, optional
# Name of the timestamp column
# (defaults to `time_column` function parameter)
# source_columns: list[str], optional
# List of source columns to use in tooltips
# (defaults to `source_columns` function parameter)
# color: str, optional
# Color of datapoints for this data
# (defaults to autogenerating colors)
#
# +
procs_and_logons = {
"Processes" : {"data": processes_on_host, "source_columns": ["NewProcessName", "Account"]},
"Logons": {"data": host_logons, "source_columns": ["Account", "TargetLogonId", "LogonType"]}
}
nbdisplay.display_timeline(data=procs_and_logons,
title="Logons and Processes",
legend="left", yaxis=False);
# -
# # Plotting Series with Scalar Values
# Often you may want to see a scalar value plotted with the series.
#
# The example below uses `display_timeline_values` to plot network flow data using the total flows recorded between a pair of IP addresses.
#
# Note that the majority of parameters are the same as `display_timeline` but include a mandatory `y` parameter which indicates which value you want to plot on the y (vertical) axis.
# +
az_net_flows_df = pd.read_csv('data/az_net_flows.csv',
parse_dates=["TimeGenerated", "FlowStartTime", "FlowEndTime"],
infer_datetime_format=True)
flow_plot = nbdisplay.display_timeline_values(data=az_net_flows_df,
group_by="L7Protocol",
source_columns=["FlowType",
"AllExtIPs",
"L7Protocol",
"FlowDirection",
"TotalAllowedFlows"],
time_column="FlowStartTime",
y="TotalAllowedFlows",
legend="right",
height=500);
# -
# By default the plot uses vertical bars show the values but you can use any combination of vbar, circle and line, using the `kind` parameter. You specify the plot types as a list of strings (all lowercase).
#
# **Notes**
# - including "circle" in the plot kinds makes it easier to see the hover value
# - the line plot can be a bit misleading since it will plot lines between adjacent data points of the same series implying that there is a gradual change in the value being plotted - even though there may be no data between the times of these adjacent points. For this reason using vbar is often a more accurate view.
flow_plot = nbdisplay.display_timeline_values(data=az_net_flows_df,
group_by="L7Protocol",
source_columns=["FlowType",
"AllExtIPs",
"L7Protocol",
"FlowDirection",
"TotalAllowedFlows"],
time_column="FlowStartTime",
y="TotalAllowedFlows",
legend="right",
height=500,
kind=["vbar", "circle"]
);
nbdisplay.display_timeline_values(data=az_net_flows_df[az_net_flows_df["L7Protocol"] == "http"],
group_by="L7Protocol",
title="Line plot can be misleading",
source_columns=["FlowType",
"AllExtIPs",
"L7Protocol",
"FlowDirection",
"TotalAllowedFlows"],
time_column="FlowStartTime",
y="TotalAllowedFlows",
legend="right",
height=300,
kind=["line", "circle"],
range_tool=False
);
nbdisplay.display_timeline_values(data=az_net_flows_df[az_net_flows_df["L7Protocol"] == "http"],
group_by="L7Protocol",
title="Vbar and circle show zero gaps in data",
source_columns=["FlowType",
"AllExtIPs",
"L7Protocol",
"FlowDirection",
"TotalAllowedFlows"],
time_column="FlowStartTime",
y="TotalAllowedFlows",
legend="right",
height=300,
kind=["vbar", "circle"],
range_tool=False
);
# ## Documentation for display_timeline_values
# ```
# nbdisplay.display_timeline_values(
# data: pandas.core.frame.DataFrame,
# y: str,
# time_column: str = 'TimeGenerated',
# source_columns: list = None,
# **kwargs,
# ) -> figure
#
# Display a timeline of events.
#
# Parameters
# ----------
# data : pd.DataFrame
# DataFrame as a single data set or grouped into individual
# plot series using the `group_by` parameter
# time_column : str, optional
# Name of the timestamp column
# (the default is 'TimeGenerated')
# y : str
# The column name holding the value to plot vertically
# source_columns : list, optional
# List of default source columns to use in tooltips
# (the default is None)
#
# Other Parameters
# ----------------
# x : str, optional
# alias of `time_column`
# title : str, optional
# Title to display (the default is None)
# ref_event : Any, optional
# Add a reference line/label using the alert time (the default is None)
# ref_time : datetime, optional
# Add a reference line/label using `ref_time` (the default is None)
# group_by : str
# (where `data` is a DataFrame)
# The column to group timelines on
# sort_by : str
# (where `data` is a DataFrame)
# The column to order timelines on
# legend: str, optional
# left, right or inline
# (the default is None/no legend)
# yaxis : bool, optional
# Whether to show the yaxis and labels
# range_tool : bool, optional
# Show the the range slider tool (default is True)
# height : int, optional
# The height of the plot figure
# (the default is auto-calculated height)
# width : int, optional
# The width of the plot figure (the default is 900)
# color : str
# Default series color (default is "navy"). This is overridden by
# automatic color assignments if plotting a grouped chart
# kind : Union[str, List[str]]
# one or more glyph types to plot., optional
# Supported types are "circle", "line" and "vbar" (default is "vbar")
#
# Returns
# -------
# figure
# The bokeh plot figure.
# ```
# # Exporting Plots as PNGs
# To use bokeh.io image export functions you need selenium, phantomjs and pillow installed:
#
# `conda install -c bokeh selenium phantomjs pillow`
#
# or
#
# `pip install selenium pillow`
# `npm install -g phantomjs-prebuilt`
#
# For phantomjs see https://phantomjs.org/download.html.
#
# Once the prerequisites are installed you can create a plot and save the return value to a variable.
# Then export the plot using `export_png` function.
# ```python
# from bokeh.io import export_png
# from IPython.display import Image
#
# # Create a plot
# flow_plot = nbdisplay.display_timeline_values(data=az_net_flows_df,
# group_by="L7Protocol",
# source_columns=["FlowType",
# "AllExtIPs",
# "L7Protocol",
# "FlowDirection",
# "TotalAllowedFlows"],
# time_column="FlowStartTime",
# y="TotalAllowedFlows",
# legend="right",
# height=500,
# kind=["vbar", "circle"]
# );
#
# # Export
# file_name = "plot.png"
# export_png(flow_plot, filename=file_name)
#
# # Read it and show it
# display(Markdown(f"## Here is our saved plot: {file_name}"))
# Image(filename=file_name)
# ```
| docs/notebooks/EventTimeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # "Hello PySpark!"
#
# > Get up and running fast with a local pyspark installation, and learn the essentials of working with dataframes at scale.
#
# - hide: false
# - toc: flase
# - comments: true
# - categories: [PySpark]
# - image: images/guiones_wave.jpeg
#
# 
# Well, you guessed it: it's time for us to learn PySpark!
#
# I know, I know, I can hear you screaming into your pillow. Indeed we just spent all that time converting from R and learning python and why the hell do we need yet another API for working with dataframes?
#
# That's a totally fair question.
#
# So what happens when we're working on something in the real world, where datasets get large in a hurry, and we suddenly have a dataframe that no longer fits into memory?
# We need a way for our computations and datasets to scale across multiple nodes in a distributed system without having to get too fussy about all the distributed compute details.
#
# Enter PySpark.
#
# I think it's fair to think of PySpark as a python package for working with arbitrarily large dataframes, i.e., it's like pandas but scalable.
# It's built on top of [Apache Spark](https://spark.apache.org/), a unified analytics engine for large-scale data processing.
# [PySpark](https://spark.apache.org/docs/latest/api/python/) is essentially a way to access the functionality of spark via python code.
# While there are other high-level interfaces to Spark (such as Java, Scala, and R), for data scientists who are already working extensively with python, PySpark will be the natural interface of choice.
# PySpark also has great integration with [SQL](https://spark.apache.org/docs/latest/sql-programming-guide.html), and it has a companion machine learning library called [MLlib](https://spark.apache.org/mllib/) that's more or less a scalable scikit-learn (maybe we can cover it in a future post).
#
# So, here's the plan.
# First we're going to get set up to run PySpark locally in a jupyter notebook on our laptop.
# This is my preferred environment for interactively playing with PySpark and learning the ropes.
# Then we're going to get up and running in PySpark as quickly as possible by reviewing the most essential functionality for working with dataframes and comparing it to how we would do things in pandas.
# Once we're comfortable running PySpark on the laptop, it's going to be much easier to jump onto a distributed cluster and run PySpark at scale.
#
# Let's do this.
# ## How to Run PySpark in a Jupyter Notebook on Your Laptop
#
# Ok, I'm going to walk us through how to get things installed on a Mac or Linux machine where we're using homebrew and conda to manage virtual environments.
# If you have a different setup, your favorite search engine will help you get PySpark set up locally.
# ### Install Spark
#
# Most of the Spark sourcecode is written in Scala, so first we install Scala.
#
# ```
# $ brew install scala
# ```
#
# Install Spark.
#
# ```
# $ brew install apache-spark
# ```
#
# Check where Spark is installed.
# ```
# $ brew info apache-spark
# apache-spark: stable 3.1.1, HEAD
# Engine for large-scale data processing
# https://spark.apache.org/
# /usr/local/Cellar/apache-spark/3.1.2 (1,361 files, 242.6MB) *
# ...
# ```
#
# Set the Spark home environment variable to the path returned by `brew info` with `/libexec` appended to the end.
# Don't forget to add the export to your `.zshrc` file too.
#
# ```
# $ export SPARK_HOME=/usr/local/Cellar/apache-spark/3.1.2/libexec
# ```
#
# Test the installation by starting the Spark shell.
#
# ```
# $ spark-shell
# ...
# Welcome to
# ____ __
# / __/__ ___ _____/ /__
# _\ \/ _ \/ _ `/ __/ '_/
# /___/ .__/\_,_/_/ /_/\_\ version 3.1.1
# /_/
#
# Using Scala version 2.12.10 (OpenJDK 64-Bit Server VM, Java 14.0.1)
# Type in expressions to have them evaluated.
# Type :help for more information.
#
# scala>
# ```
#
# If you get the `scala>` prompt, then you've successfully installed Spark on your laptop!
# ### Install PySpark
#
# Use conda to install the PySpark python package.
# As usual, it's advisable to do this in a new virtual environment.
#
#
# ```
# $ conda install pyspark
# ```
#
# You should be able to launch an interactive PySpark REPL by saying pyspark.
#
# ```
# $ pyspark
# ...
# Welcome to
# ____ __
# / __/__ ___ _____/ /__
# _\ \/ _ \/ _ `/ __/ '_/
# /__ / .__/\_,_/_/ /_/\_\ version 3.1.2
# /_/
#
# Using Python version 3.8.3 (default, Jul 2 2020 11:26:31)
# Spark context Web UI available at http://192.168.100.47:4041
# Spark context available as 'sc' (master = local[*], app id = local-1624127229929).
# SparkSession available as 'spark'.
# >>>
# ```
#
# This time we get a familiar python `>>>` prompt.
# This is an interactive shell where we can easily experiment with PySpark.
# Feel free to run the example code in this post here in the PySpark shell, or, if you prefer a notebook, read on and we'll get set up to run PySpark in a jupyter notebook.
# ### The Spark Session Object
#
# You may have noticed that when we launched that PySpark interactive shell, it told us that something called `SparkSession` was available as `'spark'`.
# So basically, what's happening here is that when we launch the pyspark shell, it instantiates an object called `spark` which is an instance of class `pyspark.sql.session.SparkSession`.
# The spark session object is going to be our entry point for all kinds of PySpark functionality, i.e., we're going to be saying things like `spark.this()` and `spark.that()` to make stuff happen.
#
# The PySpark interactive shell is kind enough to instantiate one of these spark session objects for us automatically.
# However, when we're using another interface to PySpark (like say a jupyter notebook running a python kernal), we'll have to make a spark session object for ourselves.
# ### Create a PySpark Session in a Jupyter Notebook
#
# There are a few ways to run PySpark in jupyter which you can read about [here](https://www.datacamp.com/community/tutorials/apache-spark-python).
#
# For derping around with PySpark on your laptop, I think the best way is to instantiate a spark session from a jupyter notebook running on a regular python kernel.
# The method we'll use involves running a standard jupyter notebook session with a python kernal and using the findspark package to initialize the spark session.
# So, first install the findspark package.
#
# ```
# $ conda install findspark
# ```
#
# Launch jupyter as usual.
#
# ```
# $ jupyter notebook
# ```
#
#
# Go ahead and fire up a new notebook using a regular python 3 kernal.
# Once you land inside the notebook, there are a couple things we need to do to get a spark session instantiated.
# You can think of this as boilerplate code that we need to run in the first cell of a notebook where we're going to use PySpark.
# +
import pyspark
import findspark
from pyspark.sql import SparkSession
findspark.init()
spark = SparkSession.builder.appName('My Spark App').getOrCreate()
# -
# First we're running findspark's `init()` method to find our Spark installation. If you run into errors here,
# make sure you got the `SPARK_HOME` environment variable correctly set in the install instructions above.
# Then we instantiate a spark session as `spark`.
# Once you run this, you're ready to rock and roll with PySpark in your jupyter notebook.
# > Note: Spark provides a handy web UI that you can use for monitoring and debugging. Once you instantiate the spark session You can open the UI in your web browser at [http://localhost:4040/jobs/](http://localhost:4040/jobs/).
# ## PySpark Concepts
#
# PySpark provides two main abstractions for data: the RDD and the dataframe.
# **RDD**'s are just a distributed list of objects; we won't go into details about them in this post.
# For us, the key object in PySpark is the **dataframe**.
#
# While PySpark dataframes expose much of the functionality you would expect from a library for tabular data manipulation, they behave a little differently from pandas dataframes, both syntactically and under-the-hood.
# There are a couple of key concepts that will help explain these idiosyncracies.
#
# **Immutability** - Pyspark RDD's and dataframes are immutable. This means that if you change an object, e.g. by adding a column to a dataframe, PySpark returns a reference to a new dataframe; it does not modify the existing dataframe. This is kind of nice, because we don't have to worry about that whole [view versus copy](https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy) nonsense that happens in pandas.
#
# **Lazy Evaluation** - Lazy evaluation means that when we start manipulating a dataframe, PySpark won't actually perform any of the computations until we explicitly ask for the result. This is nice because it potentially allows PySpark to do fancy optimizations before executing a sequence of operations. It's also confusing at first, because PySpark will seem to blaze through complex operations and then take forever to print a few rows of the dataframe.
# ## PySpark Dataframe Essentials
# ### Creating a PySpark dataframe with `createDataFrame()`
#
# The first thing we'll need is a way to make dataframes.
# [`createDataFrame()`](https://spark.apache.org/docs/3.1.1/api/python/reference/api/pyspark.sql.SparkSession.createDataFrame.html) allows us to create PySpark dataframes from python objects like nested lists or pandas dataframes.
# Notice that `createDataFrame()` is a method of the spark session class, so we'll call it from our spark session `spark`by saying `spark.createDataFrame()`.
# +
# create pyspark dataframe from nested lists
my_df = spark.createDataFrame(
data=[
[2022, "tiger"],
[2023, "rabbit"],
[2024, "dragon"]
],
schema=['year', 'animal']
)
# -
# Let's read the seaborn tips dataset into a pandas dataframe and then use it to create a PySpark dataframe.
# +
import pandas as pd
# load tips dataset into a pandas dataframe
pandas_df = pd.read_csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/tips.csv')
# create pyspark dataframe from a pandas dataframe
pyspark_df = spark.createDataFrame(pandas_df)
# -
# > Note: In real life when we're running PySpark on a large-scale distributed system, we would not generally want to use python lists or pandas dataframes to load data into PySpark. Ideally we would want to read data directly from where it is stored on HDFS, e.g. by reading [parquet files](https://spark.apache.org/docs/latest/sql-data-sources-parquet.html), or by querying directly from a hive database using [spark sql](https://spark.apache.org/docs/latest/sql-programming-guide.html).
# ### Peeking at a dataframe's contents
#
# The default print method for the PySpark dataframe will just give you the schema.
pyspark_df
# If we want to peek at some of the data, we'll need to use the `show()` method, which is analogous to the pandas `head()`.
# Remember that `show()` will cause PySpark to execute any operations that it's been lazily waiting to evaluate, so sometimes it can take a while to run.
# +
# show the first few rows of the dataframe
pyspark_df.show(5)
# -
# We thus encounter our first rude awakening.
# PySpark's default representation of dataframes in the notebook isn't as pretty as that of pandas.
# But no one ever said it would be pretty, they just said it would be scalable.
# You can also use the `printSchema()` method for a nice vertical representation of the schema.
# +
# show the dataframe schema
pyspark_df.printSchema()
# -
# ### Select columns by name
#
# You can select specific columns from a dataframe using the [`select()`](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.select.html) method.
# You can pass either a list of names, or pass names as arguments.
# +
#hide_output
# select some of the columns
pyspark_df.select('total_bill', 'tip')
# select columns in a list
pyspark_df.select(['day', 'time', 'total_bill'])
# -
# ### Filter rows based on column values
#
# Analogous to the `WHERE` clause in SQL, and the `query()` method in pandas, PySpark provides a [`filter()`](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.filter.html) method which returns only the rows that meet the specified conditions.
# Its argument is a string specifying the condition to be met for rows to be included in the result. You specify the condition as an expression involving the column names and comparison operators like <, >, <=, >=, == (equal), and ~= (not equal). You can specify compound expressions using `and` and `or`, and you can even do a SQL-like `in` to check if the column value matches any items in a list.
# +
#hide_output
## compare a column to a value
pyspark_df.filter('total_bill > 20')
# compare two columns with arithmetic
pyspark_df.filter('tip > 0.15 * total_bill')
# check equality with a string value
pyspark_df.filter('sex == "Male"')
# check equality with any of several possible values
pyspark_df.filter('day in ("Sat", "Sun")')
# use "and"
pyspark_df.filter('day == "Fri" and time == "Lunch"')
# -
# If you're into boolean indexing with the brackets, PySpark does support that too, but I encourage you to use `filter()` instead.
# Check out my rant about [why you shouldn't use boolean indexing](https://blog.mattbowers.dev/8020-pandas-tutorial#Select--rows-based-on-their-values-with-query()) for the details.
# The TLDR is that `filter()` requires less typing, makes your code more readable and portable, and it allows you to chain method calls together using dot chains.
#
# Here's the boolean indexing equivalent of the last example from above.
#hide_output
# using boolean indexing
pyspark_df[(pyspark_df.day == 'Fri') & (pyspark_df.time == 'Lunch')]
# I know, it looks horrendous, but not as horrendous as the error message you'll get if you forget the parentheses. :smiley:
# ### Add new columns to a dataframe
#
# You can add new columns which are functions of the existing columns with the [`withColumn()`](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.withColumn.html) method.
# +
#hide_output
import pyspark.sql.functions as f
# add a new column using col() to reference other columns
pyspark_df.withColumn('tip_percent', f.col('tip') / f.col('total_bill'))
# -
# Notice that we've imported the [`pyspark.sql.functions`]([pyspark.sql.functions](https://spark.apache.org/docs/2.4.0/api/python/pyspark.sql.html#module-pyspark.sql.functions)) module. This module contains lots of useful functions that we'll be using all over the place, so it's probably a good idea to go ahead and import it whenever you're using PySpark.
# BTW, it seems like folks usually import this module as `f` or `F`.
# In this example we're using the `col()` function, which allows us to refer to columns in our dataframe using string representations of the column names.
#
# You could also achieve the same result using the dot to reference the other columns, but this requires us to type the dataframe name over and over again, which makes it harder to reuse this code on different dataframes or in [dot chains](https://blog.mattbowers.dev/8020-pandas-tutorial#Chain-transformations-together-with-the-dot-chain).
#hide_output
# add a new column using the dot to reference other columns (less recommended)
pyspark_df.withColumn('tip_percent', pyspark_df.tip / pyspark_df.total_bill)
# If you want to apply numerical transformations like exponents or logs, use the built-in functions in the `pyspark.sql.functions` module.
# +
#hide_output
# log
pyspark_df.withColumn('log_bill', f.log(f.col('total_bill')))
# exponent
pyspark_df.withColumn('bill_squared', f.pow(f.col('total_bill'), 2))
# -
# You can implement conditional assignment like SQL's `CASE WHEN` construct using the `when()` function and the `otherwise()` method.
# +
#hide_output
# conditional assignment (like CASE WHEN)
pyspark_df.withColumn('is_male', f.when(f.col('sex') == 'Male', True).otherwise(False))
# using multiple when conditions and values
pyspark_df.withColumn('bill_size',
f.when(f.col('total_bill') < 10, 'small')
.when(f.col('total_bill') < 20, 'medium')
.otherwise('large')
)
# -
# Remember that since PySpark dataframes are immutable, calling `withColumns()` on a dataframe returns a new dataframe.
# If you want to persist the result, you'll need to make an assignment.
#
# ```
# pyspark_df = pyspark_df.withColumns(...)
# ```
# ### Group by and aggregate
#
# PySpark provides a `groupBy()` method similar to the pandas `groupby()`.
# Just like in pandas, we can call methods like `count()` and `mean()` on our grouped dataframe, and we also have a more flexible `agg()` method that allows us to specify column-aggregation mappings.
# +
# group by and count
pyspark_df.groupBy('time').count().show()
# +
# group by and specify column-aggregation mappings with agg()
pyspark_df.groupBy('time').agg({'total_bill': 'mean', 'tip': 'max'}).show()
# -
# If you want to get fancier with your aggregations, it might just be easier to express them using hive syntax. Read on to find out how.
# ### Run Hive SQL on dataframes
#
# One of the mind-blowing features of PySpark is that it
# allows you to write hive SQL queries on your dataframes.
# To take a PySpark dataframe into the SQL world, use the `createOrReplaceTempView()` method.
# This method takes one string argument which will be the dataframes name in the SQL world.
# Then you can use `spark.sql()` to run a query.
# The result is returned as a PySpark dataframe.
# +
# put pyspark dataframe in SQL world and query it
pyspark_df.createOrReplaceTempView('tips')
spark.sql('select * from tips').show(5)
# -
# This is awesome for a couple of reasons. First, it allows us to easily express any transformations in hive syntax.
# If you're like me and you've already been using hive, this will dramatically reduce the PySpark learning curve, because when in doubt, you can always bump a dataframe into the SQL world and simply use hive to do what you need.
# Second, if you have a hive deployment, PySpark's SQL world also has access to all of your hive tables.
# This means you can write queries involving both hive tables and your PySpark dataframes.
# It also means you can run hive commands, like inserting into a table, directly from PySpark.
# Let's do some aggregations that might be a little trickier to do using the PySpark built-in functions.
# +
# run hive query and save result to dataframe
tip_stats_by_time = spark.sql("""
select
time
, count(*) as n
, avg(tip) as avg_tip
, percentile_approx(tip, 0.5) as med_tip
, avg(case when tip > 3 then 1 else 0 end) as pct_tip_gt_3
from
tips
group by 1
""")
tip_stats_by_time.show()
# -
# ## Visualization with PySpark
#
# There aren't any tools for visualization included in PySpark.
# But that's no problem, because we can just use the `toPandas()` method on a PySpark dataframe to pull data back into pandas.
# Once we have a pandas dataframe, we can happily build visualizations as usual.
# Of course, if your PySpark dataframe is huge, you wouldn't want to use `toPandas()` directly, because PySpark will attempt to read the entire contents of its huge dataframe into memory.
# Instead, it's best to use PySpark to generate aggregations of your data for plotting or to pull only a sample of your full data into pandas.
# +
# read aggregated pyspark dataframe into pandas for plotting
plot_pdf = tip_stats_by_time.toPandas()
plot_pdf.plot.bar(x='time', y=['avg_tip', 'med_tip']);
# -
# ## Wrapping Up
#
# So that's a wrap on our crash course in working with PySpark.
# You now have a good idea of what pyspark is and how to get started manipulating dataframes with it.
# Stay tuned for a future post on PySpark's companion ML library MLlib.
# In the meantime, may no dataframe be too large for you ever again.
| _notebooks/2021-06-22-hello-pyspark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python2.7
# language: python
# name: py2localspark
# ---
# + [markdown] deletable=true editable=true
# # Gas Network Optimization
#
# Table of contents:
#
# * [Description of the business problem and the solution provided in this notebook](#Description-of-the-business-problem-and-the-solution-provided-in-this-notebook)
# * [Description of the mathematical optimization model](#Description-of-the-mathematical-optimization-model)
# * [Decision optimization](#Decision-optimization)
# * [Step 1: Initialization and data loading](#Step-1:-Initialization-and-data-loading)
# * [Step 2: Load parameters](#Step-2:-Load-parameters)
# * [Step 3: Model the data](#Step-3:-Model-the-data)
# * [Visualize input gas network and extensions](#Visualize-input-gas-network-and-extensions)
# * [Step 4: Set up the optimization model](#Step-4:-Set-up-the-optimization-model)
# * [Define the optimization variables](#Define-the-optimization-variables)
# * [Express the physical constraints](#Express-the-physical-constraints)
# * [Express the business objective](#Express-the-business-objective)
# * [Step 5: Solve model and save the solution](#Step-5:-Solve-model-and-save-the-solution)
# * [Step 6: Investigate the solution](#Step-6:-Investigate-the-solution)
# ****
# + [markdown] deletable=true editable=true
# ### Description of the business problem and the solution provided in this notebook
#
# A gas transmission system operator faces many different transportation issues. In this notebook we focus on the topology optimization problem. This problem is as follows:
#
# * Given is a set of different gas transportation scenarios. A scenario defines gas quantities for each node that are either fed in to the network or taken out. The business question is: Which pipelines to select from a set of extension candidates and to add to the network in order to facilitate the transportation of gas quantities specified by the different scenarios. The selected extensions should be cost efficient due to construction costs of typically more than 1 Mio.€ per kilometer pipeline.
# * This planning process might be carried out manually by gas transmission system operators as follows: In a first step an adequate set of pipelines is added manually to the network model within an appropriate simulation software. In a second step, for each scenario, this software is used to check whether gas quantities according to the scenario can be transported through the network. If the transportation of these quantities is feasible for every scenario then the selected pipelines are a reasonable suggestion for extending the network. Otherwise the selection needs to be adapted.
# * Due to this manual approach the computation of an optimal solution for only one single scenario as well as handling multiple scenarios is typically a challenging issue for gas transmission system operators.
#
# This notebook implements a mathematical optimization model which enables an automated solution approach for the topology optimization problem. The user can specify a list of input scenarios, the details of the gas transportation network and a list of extension candidates. As a result an optimal selection of the extension candidates is computed such that all gas scenarios are feasible in the network after adding the candidates to the network. Note that it is also possible to provide a single transportation scenario instead of multiple scenarios.
# + [markdown] deletable=true editable=true
# ### Description of the mathematical optimization model
#
# In the following the mathematical optimization model is explained in detail:
#
# * In order to operate a gas network a pressure value must be assigned to each node in the network. The pressure difference at the end nodes of a pipeline induces a flow through the pipeline. The relation of the pressure values $p$ at the end nodes $v,w$ of a pipe and the gas flow $q$ through the pipe can roughly be described by the so-called weymouth equation $\alpha \, q|q| = p_v^2 - p_w^2$ for a pipeline specific constant $\alpha$. In this notebook we use an approximation of this equation as follows: $\alpha \, q = p_v^2 - p_w^2$. Modeling the squared pressure values $p^2$ by $\pi$ we express the approximation of the weymouth constraints by
# $$\alpha_a \, q_a - (\pi_v - \pi_w) = 0 \quad a \in A_{orig}$$
# The set $A_{orig}$ denotes the arc set of the input gas network. The squared pressure value at node $v$ is modeled by $\pi_v$ and the flow through a pipeline $a$ by $q_a$.
#
# * Typically there are limits for the pressure because a pipeline would burst if a pressure value exceeds a certain limit. These pressure limits imply a bound of the overall transportation capacity of the gas network. In the model pressure bounds are added to express these pressure limits. Also the flow though pipelines can be bounded. The following constraints are used for expressing these bounds:
# \begin{alignat}{2}
# \underline \pi_v \le \pi_v &\le \overline \pi_v &&\quad &v \in V \\
# \underline q_a \le q_a &\le \overline q_a &&\quad &a \in A
# \end{alignat}
#
#
# * Extension candidates are either build and the weymouth equation applies to them or they are not part of the network and the flow of the extension pipe equals zero. We use a decision variable $x_a$ for an extension pipe $a$ as follows:
# \begin{alignat}{2}
# x_a = 1 \Rightarrow \alpha_a q_a - (\pi_v - \pi_w) &= 0 &&\quad &a \in A_{ext} \\
# x_a = 0 \Rightarrow q_a &= 0 &&\quad &a\in A_{ext}
# \end{alignat}
# The set $A_{ext}$ denotes the set of extension candidates.
#
# * The flow through all pipelines must correspond to the specified transportation scenario $s$. At the nodes we have a flow conservation constraint:
# $$\sum_{a \in \delta^+(v)} q_a - \sum_{a \in \delta^-(v)} q_a = s_v \quad v \in V$$
#
# * The goal is to compute an optimal selection of the extensions, so investment costs have to be minimal:
# $$\min_{a \in A_{ext}} c_a x_a$$
#
# This model is implemented in [Step 4: Set up the optimization model](#Step-4:-Set-up-the-optimization-model).
# + [markdown] deletable=true editable=true
# ## Decision optimization
# + [markdown] deletable=true editable=true
# ## Step 1: Initialization and data loading
#
# #### Reset kernel
# Reset the Jupyter kernel: clears all variables and functions.<br>
# This supports code refactoring. If not, old variables/functions are still present in the kernel and you may not see an exception when these old defintions are still used in the current code.<br>
# Same can be done through resetting the kernel, but that is a very slow process.<br>
# The magic command '%reset' can also be called through an API. The flag '-f' avoids the manual confirmation of the reset.
# + deletable=true editable=true
#dd-ignore
#Clear state: is faster than resetting the kernel
# from IPython import get_ipython
# get_ipython().magic('reset -sf')
# %reset -f
# + [markdown] deletable=true editable=true
# #### Imports
# + deletable=true editable=true
import pandas as pd
# + deletable=true editable=true
#dd-ignore
from dd_scenario import *
#Create a client...
client = Client()
# + [markdown] deletable=true editable=true
# #### Loads scenario data from scenario repository. Only used in notebook run.
# + deletable=true editable=true
#dd-ignore
#Get 'Gas Opt' decision...
dd_model_builder = client.get_model_builder(name="Gas_Network_Optimization")
#Get scenario 'Scenario 1'...
scenario = dd_model_builder.get_scenario(name="Transport Scenario 1+2 (scale by 1.75)")
#Load all input data as a map { data_name: data_frame }
inputs = scenario.get_tables_data(category='input')
# This will hold all outputs as a map { data_name: data_frame }
outputs = {}
# we use a lock to access ``outputs``. This allows solves() to
# be aborted without race condition in data writting
import threading
output_lock = threading.Lock()
# + [markdown] deletable=true editable=true
# Extract input data
# + deletable=true editable=true
gasnetworkNodes = inputs['gasnetwork_nodes']
gasnetworkPipes = inputs['gasnetwork_pipes']
gasnetworkExtensions = inputs['gasnetwork_extensions']
gasnetworkScenarios = []
for name in [s for s in inputs.keys() if "scenario" in s]:
gasnetworkScenarios.append(inputs[name])
# + [markdown] deletable=true editable=true
# ## Step 2: Load parameters
# + [markdown] deletable=true editable=true
# Read parameters from file Parameter
# + deletable=true editable=true
#Parameter table (assumes 'param' and 'value' pairs)
if 'Parameter' in inputs.keys():
params = inputs['Parameter'].set_index(['param'])
else:
params = pd.DataFrame(columns=['param','value']).set_index('param')
params
# + [markdown] deletable=true editable=true
# Define the scaling value, each scenario will be scaled by this value. A value of 1.0 means that the gas quantities specified in the scenarios set are considered without modification.
# + deletable=true editable=true
scale = 1.0 # feasible setting, no network extensions needed
if 'scenarioScale' in params.index:
scale = float(params.loc['scenarioScale'].value)
# + [markdown] deletable=true editable=true
# ## Step 3: Model the data
#
# * a data store is used for storing the gas network and the scenarios
# * all nodes are read from file gasnetwork_nodes
# * all pipes are read from file gasnetwork_pipes
# * all extension candidates are read from file gasnetwork_extensions
# * several scenarios are read from several files gasnetwork_scn*
# + deletable=true editable=true
import networkx as nx
from math import pow, sqrt, log
class DataStore:
"""
Class DataStore.
This class is used to store the network, the extension candidates and the scenarios.
A directed graph is used for the network and the candidates. Node and arc attributes
store the provided network information. Several access functions are provided for
accessing the data.
"""
def __init__(self, gasnetworkNodes, gasnetworkPipes, gasnetworkExtensions, gasnetworkScenarios, scale, inputs):
assert isinstance(scale, float)
self.scale = scale
self.G = nx.DiGraph()
print("\nThe following gas network and extension candidates are loaded:\n")
self.__readNodes(gasnetworkNodes)
self.__readPipes(gasnetworkPipes)
self.__readExtensionCandidates(gasnetworkExtensions)
self.__readScenarios(gasnetworkScenarios)
def __readNodes(self, gasnetworkNodes):
inputDataNodes = gasnetworkNodes
assert inputDataNodes['name'].dtype == object
assert inputDataNodes['x'].dtype == float
assert inputDataNodes['y'].dtype == float
assert inputDataNodes['pressureMin'].dtype == float
assert inputDataNodes['pressureMax'].dtype == float
assert inputDataNodes['isSource'].dtype == int
for index, row in inputDataNodes.iterrows():
self.G.add_node(row['name'],
pos=(row['x'],row['y']),
pressureMin=row['pressureMin'],
pressureMax=row['pressureMax'],
isSource=row['isSource'])
print("\nNodes:\n")
print inputDataNodes
def __readPipes(self, gasnetworkPipes):
inputDataPipes = gasnetworkPipes
assert inputDataPipes['from'].dtype == object
assert inputDataPipes['to'].dtype == object
assert inputDataPipes['flowMin'].dtype == int
assert inputDataPipes['flowMax'].dtype == int
assert inputDataPipes['length'].dtype == float
assert inputDataPipes['diameter'].dtype == int
assert inputDataPipes['name'].dtype == object
for index, row in inputDataPipes.iterrows():
weymouthConst = self.__computeWeymouthConst(row['length'],row['diameter'])
self.G.add_edge(row['from'],
row['to'],
flowMin=row['flowMin'],
flowMax=row['flowMax'],
length=row['length'],
diameter=row['diameter'],
name=row['name'],
weymouth=weymouthConst,
isOriginal=True)
print("\nPipes:\n")
print inputDataPipes
def __readExtensionCandidates(self, gasnetworkExtensions):
extensions = gasnetworkExtensions
assert extensions['from'].dtype == object
assert extensions['to'].dtype == object
assert extensions['length'].dtype == float
assert extensions['diameter'].dtype == float
assert extensions['cost'].dtype == float
assert extensions['name'].dtype == object
for index, row in extensions.iterrows():
weymouthConst = self.__computeWeymouthConst(row['length'],row['diameter'])
self.G.add_edge(row['from'],
row['to'],
flowMin=-1000000,
flowMax=1000000,
length=row['length'],
diameter=row['diameter'],
name=row['name'],
weymouth=weymouthConst,
cost=row['cost'],
isOriginal=False)
print("\nExtension candidates:\n")
print extensions
def __readScenarios(self, gasnetworkScenarios):
self.scnList = []
for scn in gasnetworkScenarios:
scenario = {}
assert scn['name'].dtype == object
assert scn['value'].dtype == float
for index, row in scn.iterrows():
scenario[row['name']] = row['value']
self.scnList.append(scenario)
def __computeWeymouthConst(self, length, diameter):
length = float(length) # in km
diameter = float(diameter) # in mm
roughness = 0.05
temperature = 281.15;
density = 0.616;
zValue = 0.8;
c = diameter
c = pow(c, 5) * pow((2 * log(3.7 * c / roughness) / log(10)), 2) * 96.074830e-15
c = c / (zValue * temperature * length * density);
c = float(int(c*100.0)) / 100.0
return c
def getScnList(self):
return self.scnList
def getNodes(self):
return self.G.nodes()
def getArcs(self):
return self.G.edges()
def getOrigArcs(self):
isOriginal = nx.get_edge_attributes(self.G, 'isOriginal')
return [arc for arc in self.G.edges() if isOriginal[arc]]
def getExtArcs(self):
isOriginal = nx.get_edge_attributes(self.G, 'isOriginal')
return [arc for arc in self.G.edges() if isOriginal[arc] == False]
def getFromNode(self, arc):
return arc[0]
def getToNode(self, arc):
return arc[1]
def isSource(self, node):
isSource = nx.get_node_attributes(self.G, 'isSource')
return bool(isSource[node])
def getOutArcs(self, node):
return self.G.out_edges(node)
def getInArcs(self, node):
return self.G.in_edges(node)
def getNodePressureMax(self, node):
pressureMax = nx.get_node_attributes(self.G, 'pressureMax')
return float(pressureMax[node])
def getNodePressureMin(self, node):
pressureMin = nx.get_node_attributes(self.G, 'pressureMin')
return float(pressureMin[node])
def getArcFlowMax(self, arc):
flowMax = nx.get_edge_attributes(self.G, 'flowMax')
return float(flowMax[arc])
def getArcFlowMin(self, arc):
flowMin = nx.get_edge_attributes(self.G, 'flowMin')
return float(flowMin[arc])
def getArcCost(self, arc):
cost = nx.get_edge_attributes(self.G, 'cost')
return float(cost[arc])
def getWeymouthConst(self, arc):
weymouth = nx.get_edge_attributes(self.G, 'weymouth')
return float(weymouth[arc])
def getArcLength(self, arc):
length = nx.get_edge_attributes(self.G, 'length')
return float(length[arc])
def getArcDiameter(self, arc):
diameter = nx.get_edge_attributes(self.G, 'diameter')
return float(diameter[arc])
def getArcName(self, arc):
name = nx.get_edge_attributes(self.G, 'name')
return name[arc]
def getGraphCopy(self):
return self.G.copy()
def getScnScale(self):
return self.scale
# + deletable=true editable=true
ds = DataStore(gasnetworkNodes, gasnetworkPipes, gasnetworkExtensions, gasnetworkScenarios, scale, inputs)
# + [markdown] deletable=true editable=true
# #### Visualize input gas network and extensions
# + deletable=true editable=true
#dd-ignore
import matplotlib.pyplot as plt
print("Original gas network:\n")
H = ds.getGraphCopy()
for arc in ds.getExtArcs():
H.remove_edge(*arc)
nx.draw(H,
nx.get_node_attributes(H, 'pos'),
arrows=False)
plt.show()
print("Original gas network with extension candidates:\n")
H = ds.getGraphCopy()
for arc in ds.getExtArcs():
H.remove_edge(*arc)
nx.draw(H,
nx.get_node_attributes(H, 'pos'),
arrows=False)
nx.draw_networkx_edges(H,
nx.get_node_attributes(H, 'pos'),
edgelist=ds.getExtArcs(),
arrows=False,
edge_color='b',
width=3)
plt.show()
# + [markdown] deletable=true editable=true
# ### Step 4: Set up the optimization model
# + deletable=true editable=true
from docplex.mp.model import Model
mdl = Model(name="Gas_Network_Optimization")
# + [markdown] deletable=true editable=true
# #### Define the optimization variables
#
# Define the decision variables for deciding whether an extension candidate is build
# + deletable=true editable=true
x = {}
for arc in ds.getExtArcs():
x[arc] = mdl.binary_var(name="x(%s,%s)" % (ds.getFromNode(arc), ds.getToNode(arc)))
# + [markdown] deletable=true editable=true
# For each scenario define the squared pressure variables for each node and the flow variables for each arc
# + deletable=true editable=true
pi = {}
q = {}
for index in range(len(ds.getScnList())):
for node in ds.getNodes():
pi[node,index] = mdl.continuous_var(name="p(%s,%s)" % (node,index),
ub=int(pow(ds.getNodePressureMax(node), 2)),
lb=int(pow(ds.getNodePressureMin(node), 2)))
for arc in ds.getArcs():
q[arc,index] = mdl.continuous_var(name="q(%s,%s,%s)" % (ds.getFromNode(arc), ds.getToNode(arc), index),
ub=ds.getArcFlowMax(arc),
lb=ds.getArcFlowMin(arc))
# + [markdown] deletable=true editable=true
# #### Express the physical constraints
# + [markdown] deletable=true editable=true
# Add flow conservation constraints for every scenario
# + deletable=true editable=true
for index, scn in enumerate(ds.getScnList()):
for node in ds.getNodes():
scale = ds.getScnScale()
mdl.add_constraint(mdl.sum(q[arc,index] for arc in ds.getOutArcs(node)) -
mdl.sum(q[arc,index] for arc in ds.getInArcs(node)) == scn[node] * scale)
# + [markdown] deletable=true editable=true
# Add approximation of weymouth constraints for every arc in every scenario stating that arc flow is induced by the squared pressure difference at the end nodes
# + deletable=true editable=true
for index in range(len(ds.getScnList())):
for arc in ds.getOrigArcs():
mdl.add_constraint(q[arc,index] -
ds.getWeymouthConst(arc) * pi[ds.getFromNode(arc),index] +
ds.getWeymouthConst(arc) * pi[ds.getToNode(arc),index] == 0)
# + [markdown] deletable=true editable=true
# Add approximation of weymouth constraints on extension candidates using indicator constraints. Also ensure that the flow equals zero if the arc is not build
# + deletable=true editable=true
for index in range(len(ds.getScnList())):
for arc in ds.getExtArcs():
mdl.add_indicator(x[arc],
q[arc,index] -
ds.getWeymouthConst(arc) * pi[ds.getFromNode(arc),index] +
ds.getWeymouthConst(arc) * pi[ds.getToNode(arc),index] == 0,
1)
mdl.add_indicator(x[arc], q[arc,index] == 0, 0)
# + [markdown] deletable=true editable=true
# #### Express the business objective
# + [markdown] deletable=true editable=true
# Set the objective function which is minimizing the investment costs for the new pipes
# + deletable=true editable=true
mdl.minimize(mdl.sum(ds.getArcCost(arc) * x[arc] for arc in ds.getExtArcs()))
# + [markdown] deletable=true editable=true
# Set objective as KPI for the model
# + deletable=true editable=true
total_cost = mdl.sum(ds.getArcCost(arc) * x[arc] for arc in ds.getExtArcs())
mdl.add_kpi(total_cost, "Total Cost")
# + [markdown] deletable=true editable=true
# ## Step 5: Solve model and save the solution
# + deletable=true editable=true
from docplex.mp import sdetails
print("\nSolving model....\n")
msol = mdl.solve(log_output=True)
print("Solving done after %.2f" % mdl.get_solve_details().time + " seconds.\n")
# + [markdown] deletable=true editable=true
# Store KPIs
# + deletable=true editable=true
all_kpis = [(kp.name, kp.compute()) for kp in mdl.iter_kpis()]
df_kpis = pd.DataFrame(all_kpis, columns=['kpi', 'value'])
# + [markdown] deletable=true editable=true
# Save the extensions that should be build
# + deletable=true editable=true
extensions = pd.DataFrame(columns=['Name', 'From', 'To', 'Length', 'Diameter', 'Cost', 'Build'])
if msol:
for arc in ds.getExtArcs():
name = ds.getArcName(arc)
extensions = extensions.append(pd.DataFrame([[name,
ds.getFromNode(arc),
ds.getToNode(arc),
ds.getArcLength(arc),
ds.getArcDiameter(arc),
ds.getArcCost(arc),
int(msol[x[arc]])]],
columns=['Name', 'From', 'To', 'Length', 'Diameter', 'Cost', 'Build']),
ignore_index=True)
outputs['extensions_to_build'] = extensions
# + [markdown] deletable=true editable=true
# ## Step 6: Investigate the solution
#
# Visualize the result of the optimization
# + deletable=true editable=true
#dd-ignore
if msol:
# Retrieve solution values
pressure = {}
origflow = {}
extflow = {}
for index in list(range(len(ds.getScnList()))):
for node in ds.getNodes():
pressure[node, index] = sqrt(msol[pi[node, index]])
for arc in ds.getOrigArcs():
origflow[arc, index] = msol[q[arc, index]]
for arc in ds.getExtArcs():
if msol[x[arc]] > 0.5:
extflow[arc, index] = msol[q[arc, index]]
print("Investment cost: " + str(mdl.get_solve_details().best_bound) + " Mio. €\n")
# Show built arcs
print("Number of built pipes: " + str(len([arc for arc in ds.getExtArcs() if msol[x[arc]] > 0.5])) + "\n")
print("Visualization of built pipes:\n")
for arc in [arc for arc in ds.getExtArcs() if msol[x[arc]] > 0.5]:
print ds.getArcName(arc) + " " + str(arc)
H = ds.getGraphCopy()
for arc in [arc for arc in ds.getExtArcs() if msol[x[arc]] < 0.5]:
H.remove_edge(*arc)
nx.draw(H,
nx.get_node_attributes(H, 'pos'),
arrows=False)
nx.draw_networkx_edges(H,
nx.get_node_attributes(H, 'pos'),
edgelist=[arc for arc in ds.getExtArcs() if msol[x[arc]] > 0.5],
arrows=False,
edge_color='b',
width=3)
plt.show()
# Show scenario pressure distribution
for index in list(range(len(ds.getScnList()))):
print("Solution visualization of scenario %s" % (index + 1) + "\n")
H = ds.getGraphCopy()
for arc in [arc for arc in ds.getExtArcs() if msol[x[arc]] < 0.5]:
H.remove_edge(*arc)
nx.draw(H,
nx.get_node_attributes(H, 'pos'),
arrows=False,
node_color=[pow(pressure[node, index], 2) for node in H.nodes()],
cmap='rainbow',
vmin=0,
vmax=pow(max(ds.getNodePressureMax(node) for node in H.nodes()), 2))
plt.show()
else:
print("\n No solution found, specified gas quantities cannot be transported. The provided extension candidates are not sufficient.\n")
# + [markdown] deletable=true editable=true
# #### Comments
#
# The node colors are selected according to a rainbow color scheme, i.e., high pressure is marked in red while low pressure is marked blue. Modifying the scenario scaling, or the input scenarios, or the set of extension candidates in section [Step 2: Load / Set Parameters](#Step-2:-Load-/-Set-Parameters) yields different results. These parameters allow the experts at transmission system operators to automatically compute reasonable and cost-efficient gas network extensions.
# + [markdown] deletable=true editable=true
# ### Authors
#
# Dr. <NAME> is a Senior Data Science Engineer with Data Science Elite team (IBM Analytics) where he specializes in Decision Support solutions.
| jupyter/.ipynb_checkpoints/Gas_Network_Optimization.jupyter-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Stochastic Processes: <br>Data Analysis and Computer Simulation
# <br>
#
#
# # Stochastic processes in the real world
# <br>
#
#
# # 3. A Stochastic Dealer Model II
# <br>
# + [markdown] slideshow={"slide_type": "slide"}
# # 3.1. Preparation
# + slideshow={"slide_type": "-"}
% matplotlib inline
import numpy as np # import numpy library as np
import math # use mathematical functions defined by the C standard
import matplotlib.pyplot as plt # import pyplot library as plt
import pandas as pd # import pandas library as pd
from datetime import datetime
from pandas_datareader import data as pdr
from pandas_datareader import wb as pwb
plt.style.use('ggplot') # use "ggplot" style for graphs
pltparams = {'legend.fontsize':16,'axes.labelsize':20,'axes.titlesize':20,
'xtick.labelsize':12,'ytick.labelsize':12,'figure.figsize':(7.5,7.5),}
plt.rcParams.update(pltparams)
# + slideshow={"slide_type": "slide"}
# Logarithmic return of price time series
def logreturn(St,tau=1):
return np.log(St[tau:])-np.log(St[0:-tau]) # Eq.(J2) : G_tau(t) = log(S(t+tau)) - log(S(t))
# normalize data to have unit variance (<(x - <x>)^2> = 1)
def normalized(data):
return ((data)/np.sqrt(np.var(data)))
# compute normalized probability distribution function
def pdf(data,bins=50):
hist,edges=np.histogram(data[~np.isnan(data)],bins=bins,density=True) # remove NaNs and compute histogram
edges = (edges[:-1] + edges[1:])/2.0 # get bar center
nonzero = hist > 0.0 # non-zero points
return edges[nonzero], hist[nonzero]
# add logarithmic return data to pandas DataFrame data using the 'Adjusted Close' stock price for each day
def computeReturn(data, name, tau):
data[name]=pd.Series(normalized(logreturn(data['Adj Close'].values, tau)),index=data.index[:-tau])
# -
end_time = datetime.now()
start_time = datetime(end_time.year - 20, end_time.month, end_time.day)
toyota = pdr.DataReader('7203','yahoo',start_time,end_time) # import toyota stock
computeReturn(toyota, 'Return d1', 1)
# +
# ONLY execute this cell if the PREVIOUS cell returns an error
def read_yahoo_data(fname):
return pd.read_csv(fname, index_col=0, na_values="null").dropna()
toyota = read_yahoo_data('./yahoo_finance/TM.csv')
computeReturn(toyota, 'Return d1', 1)
# + [markdown] cell_style="center" slideshow={"slide_type": "slide"}
# # 3.2. The Dealer Model
# - <NAME>, <NAME>, <NAME> and <NAME>, <i>Physical Revew E</i> <b>79</b>, 051120 (2009).
#
# <!--
# <img src="fig/dealers0.svg" width=700>
# -->
# 
#
# - Transaction criterion
# $$
# \lvert p_i(t) - p_j(t) \rvert \ge L\tag{L1}
# $$
# - Market price of transaction
# $$
# P = \frac{1}{2}\left(p_1 + p_2\right) \tag{L2}
# $$
# - Logarithmic price return
# $$
# G_\tau(t) \equiv \log{P(t+\tau)} - \log{P(t)} \tag{L3}
# $$
# + [markdown] slideshow={"slide_type": "slide"}
# # 3.3. The dealer model with memory (model 2)
#
# - To improve the model, Yamada et al. (PRE 79, 051120, 2009) added the effect of "trend-following" predictions.
#
# - Dynamics is captured by a random walk with a drift/memory term
#
# $$
# p_i(t + \Delta t) = p_i(t) + d\langle\Delta P\rangle_M \Delta t + c f_i(t), \qquad i=1,2\tag{L4}
# $$
#
# $$
# f_i(t) = \begin{cases}
# +\Delta p & \mathrm{prob.} 1/2 \\
# -\Delta p & \mathrm{prob.} 1/2\tag{L5}
# \end{cases}
# $$
# - The constant $d$ determines whether the dealer is a "trend-follower" ($d>0$) or a "contrarian" ($d < 0$)
#
# - The added term represents a moving average over the previous price changes
#
# $$
# \langle\Delta P\rangle_M = \frac{2}{M (M+1)}\sum_{k=0}^{M-1}(M-k)\Delta P(n-k)\tag{L6}
# $$
#
# $$
# \Delta P(n) = P(n) - P(n-1) : \textrm{Market price change at the n-th tick}
# $$
#
# - $\langle\Delta P\rangle_M$ is constant during the Random-Walk process, it is only updated at the transaction events
# + [markdown] slideshow={"slide_type": "slide"}
# # 3.3. Dealer model as a 2D random walk
# - The dealer model can again be understood as a standard 2D Random walk with absorbing boundaries.
# - Introduce the price difference $D(t)$ and average $A(t)$
# $$
# D(t) = p_1(t) - p_2(t) \tag{L7}
# $$
# $$
# A(t) = \frac{1}{2}\big(p_1(t) + p_2(t)\big)\tag{L8}
# $$
# - Dynamics of $D$ and $A$ describe a 2D random walk
# $$
# D(t+\Delta t) = D(t) + \begin{cases}
# # +2 c \Delta p & \textrm{probability 1/4} \\
# 0 & \textrm{probability 1/2} \\
# -2 c\Delta p& \textrm{probability 1/4}
# \end{cases} \tag{L9} \\
# $$
# $$
# A(t+\Delta t) = A(t) + d\langle\Delta P\rangle_M \Delta t + \begin{cases}
# # +c\Delta p & \textrm{probability 1/4}\\
# 0 & \textrm{probability 1/2}\\
# -c\Delta p &\textrm{probability 1/4}
# \end{cases}\tag{L10}
# $$
# - When $D(t) = \pm L$ a transaction occurs and the random walk ends, the "particle" is absorbed by the boundary.
# + slideshow={"slide_type": "slide"}
params={'L':0.01,'c':0.01,'dp':0.01,'dt':0.01**2, 'd':1.25, 'M':1} # define model parameters
def model2RW(params,p0,deltapm): # simulate Random-Walk for 1 transaction
price = np.array([p0[0], p0[1]]) # initialize mid-prices for dealers p_1 and p_2
cdp = params['c']*params['dp'] # define random step size
ddt = params['d']*params['dt'] # define trend drift term
Dt = [price[0]-price[1]] # initialize price difference as empty list
At = [np.average(price)] # initialize avg price as empy list
while np.abs(price[0]-price[1]) < params['L']:
price=price+np.random.choice([-cdp,cdp],size=2) # random walk step for mid-prices Eq. (L4)
price=price+ddt*deltapm # Model 2 : add trend-following term in Eq. (L4)
Dt.append(price[0]-price[1])
At.append(np.average(price))
return np.array(Dt),np.array(At)-At[0] # return difference array and average centered at zero
# + slideshow={"slide_type": "slide"}
fig,ax=plt.subplots(figsize=(7.5,7.5),subplot_kw={'xlabel':r'$A(t) = \frac{1}{2}\left(p_1(t) + p_2(t)\right) - p_0$', 'ylabel':r'$D(t) = p_1(t) - p_2(t)$'})
p0 = [100.25, 100.25]
params={'L':0.01,'c':0.01,'dp':0.01,'dt':0.01**2, 'd':1.25, 'M':1} # define model parameters
for deltapm,lbl in zip([0, 0.003], ['Model 1', 'Model 2']):
np.random.seed(123456)
Dt,At = model2RW(params, p0, deltapm)
ax.plot(At,Dt,alpha=0.8,label=lbl) #plot random walk trajectory
ax.plot(At[-1],Dt[-1],marker='o',color='k', markersize=10) #last point
print(lbl+' : number of steps = ',len(At),', price change = ', At[-1])
ax.plot(0, 0, marker='s', color='k', markersize=10) # starting position
ax.plot([-0.01,0.03],[params['L'],params['L']],color='k') #top absorbing boundary P=L
ax.plot([-0.01,0.03],[-params['L'],-params['L']],color='k') #bottom absorbing boundary P=-L
ax.set_ylim([-0.012, 0.012])
ax.set_xlim([-0.01, 0.01])
ax.legend(loc=4,framealpha=0.8)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# # 3.4. Perform simulations
# + code_folding=[15] slideshow={"slide_type": "-"}
params={'L':0.01,'c':0.01,'dp':0.01,'dt':0.01**2, 'd':1.25, 'M':1} # define model parameters
def model2(params,p0,numt):
def avgprice(dpn): # compute running average Eq.(L6)
M = len(dpn) #
weights = np.array(range(1,M+1))*2.0/(M*(M+1))
return weights.dot(dpn)
mktprice = np.zeros(numt) # initialize market price P(n)
dmktprice= np.zeros(numt) # initialize change in price dP(n) needed for running average
ticktime = np.zeros(numt,dtype=np.int) #initialize array for tick times
price = np.array([p0[0], p0[1]]) #initialize dealer's mid-price (p1,p2)
time,tick= 0,0 # real time(t) and time time (n)
deltapm = 0.0 # trend term d <dP>_m dt for current random walk
cdp = params['c']*params['dp'] # define random step size
ddt = params['d']*params['dt'] # define amplitude of trend term
while tick < numt: # loop over ticks
while np.abs(price[0]-price[1]) < params['L']: # transaction criterion Eq.(L1)
price = price + deltapm + np.random.choice([-cdp,cdp], size=2) # random walk step Eq.(L4)
time += 1 #update ral time
price[:] = np.average(price) #set mid-prices to new market price Eq.(L2)
mktprice[tick] = price[0] # save market price
dmktprice[tick]= mktprice[tick] - mktprice[np.max([0,tick-1])] # save dP(n) = P(n) - P(n-1)
ticktime[tick] = time # save transaction time
tick += 1 #update ticks
tick0 = np.max([0, tick - params['M']]) #compute tick start for running average
deltapm = avgprice(dmktprice[tick0:tick])*ddt #compute updated trend term for current tick time
return ticktime,mktprice
# -
# - A simulation is performed if you run the cell below, but depending on your computer power it may take quite long time until it finishes with properly creating the simulation data "model2.txt".
# - You may skip this cell and use pre-calculated simulation data "model2.txt" which can be downloaded from our website to continue further data analyses.
np.random.seed(0)
ticktime2,mktprice2 = model2(params, [100.25, 100.25], 5000)
np.savetxt('model2.txt',np.transpose([ticktime2, mktprice2]))
# + [markdown] slideshow={"slide_type": "slide"}
# # 3.5. Analyses
# + slideshow={"slide_type": "-"}
ticktime,mktprice=np.loadtxt('model1.txt',unpack=True) # read saved data from file
ticktime2,mktprice2=np.loadtxt('model2.txt', unpack=True)
timeinterval=normalized((ticktime[1:]-ticktime[0:-1])*params['dt']) # compute time difference between subsequent trades
timeinterval2=normalized((ticktime2[1:]-ticktime2[0:-1])*params['dt'])
dprice=normalized(logreturn(mktprice,1)) # compute logarithmic return of the price over one tick, normalized to have unit variance (Eq. J2)
dprice2=normalized(logreturn(mktprice2,1))
fig,[ax,bx,cx]=plt.subplots(figsize=(18,6),ncols=3,subplot_kw={'xlabel':r'Time n (ticks)'})
ax.plot(mktprice2, lw=2, label='Model 2')
ax.plot(mktprice, alpha=0.5, lw=2, label='Model 1')
ax.legend()
ax.set_ylim(98,102)
ax.set_ylabel(r'Market price $P_n$')
bx.plot(dprice2, lw=2)
bx.plot(dprice, alpha=0.5, lw=2)
bx.set_ylabel(r'Price return $G_1$')
cx.plot(timeinterval2, lw=2)
cx.plot(timeinterval, alpha=0.5, lw=2)
cx.set_ylabel(r'Transaction interval')
fig.tight_layout() # get nice spacing between plots
plt.show()
# + slideshow={"slide_type": "slide"}
fig,ax,=plt.subplots(figsize=(15,7.5),subplot_kw={'xlabel':r'Absolute price return $|G_1|$', 'ylabel':r'Probability density'})
for data,lbl in zip([dprice2, toyota['Return d1']], ['Model 2', 'Toyota']):
edges, hist = pdf(np.abs(data), bins=50)
ax.plot(edges, hist, label=lbl, lw=2)
x = np.linspace(0.1, 10)
ax.plot(x, 2*np.exp(-1.5*x),lw=4,color='gray',ls='--',alpha=0.5,label=r'Exponential')
ax.plot(x, 0.3*x**(-3), lw=4, color='k', ls='--', alpha=0.5, label=r'Power Law $\propto x^{-3}$')
ax.set_xlim(5e-1, 2e1)
ax.set_ylim(5e-4, 1)
ax.legend()
ax.semilogx()
ax.semilogy()
plt.show()
# + code_folding=[24] slideshow={"slide_type": "slide"}
params={'L':0.01,'c':0.01,'dp':0.01,'dt':0.01**2, 'd':1.00, 'M':10} # define model parameters
def model2t(params,p0,numt):
def avgprice(dpn): # compute running average Eq.(L6)
M = len(dpn) #
weights = np.array(range(1,M+1))*2.0/(M*(M+1))
return weights.dot(dpn)
def dtime(i, dpm): # return time varying d-coefficient
if i <= 1000: # contrarians
return -params['d']
elif i <= 2000:# random walkers: no memory
return 0.0
elif i <= 3000:# trend-followers
return params['d']
elif dpm >= 0.0: # trend-followers if running average increasing
return params['d']
else: # contrarians if running average decreasing
return -params['d']
mktprice = np.zeros(numt) # initialize market price P(n)
dmktprice= np.zeros(numt) # initialize change in price dP(n) needed for running average
ticktime = np.zeros(numt,dtype=np.int) #initialize array for tick times
price = np.array([p0[0], p0[1]]) #initialize dealer's mid-price (p1,p2)
time,tick= 0,0 # real time(t) and time time (n)
deltapm = 0.0 # trend term d <dP>_m dt for current random walk
cdp = params['c']*params['dp'] # define random step size
while tick < numt: # loop over ticks
ddt = dtime(tick, deltapm)*params['dt'] # define amplitude of trend term (now time dependent)
while np.abs(price[0]-price[1]) < params['L']: # transaction criterion Eq.(L1)
price = price + deltapm + np.random.choice([-cdp,cdp], size=2) # random walk step Eq.(L4)
time += 1 #update ral time
price[:] = np.average(price) #set mid-prices to new market price Eq.(L2)
mktprice[tick] = price[0] # save market price
dmktprice[tick]= mktprice[tick] - mktprice[np.max([0,tick-1])] # save dP(n) = P(n) - P(n-1)
ticktime[tick] = time # save transaction time
tick += 1 #update ticks
tick0 = np.max([0, tick - params['M']]) #compute tick start for running average
deltapm = avgprice(dmktprice[tick0:tick])*ddt #compute updated trend term for current tick time
return ticktime,mktprice
# -
# - A simulation is performed if you run the cell below, but depending on your computer power it may take quite long time until it finishes with properly creating the simulation data "model2t.txt".
# - You may skip this cell and use pre-calculated simulation data "model2t.txt" which can be downloaded from our website to continue further data analyses.
np.random.seed(0)
ticktime2t,mktprice2t = model2t(params, [100.25, 100.25], 4001)
np.savetxt('model2t.txt',np.transpose([ticktime2t, mktprice2t]))
# + slideshow={"slide_type": "slide"}
ticktime2t,mktprice2t=np.loadtxt('model2t.txt',unpack=True) # read saved data from file
fig,[ax,bx]=plt.subplots(figsize=(15,7.5),nrows=2, sharex=True)
for i,lbl in zip(range(4), [r'$d=-1$', r'$d=0$', r'$d=+1$', r'$d=d(\left\langle P\right\rangle_M)$']):
n0,n1 = i*1000, (i+1)*1000
dprice=normalized(logreturn(mktprice2t[n0:n1],1))
ax.plot(range(n0,n1), mktprice2t[n0:n1])
ax.plot([n0,n0],[100,102], color='gray')
ax.text(n0+500, 100.05, lbl, fontsize=22)
bx.plot([n0,n0],[-6,6],color='gray')
bx.plot(range(n0+1,n1), dprice)
ax.set_ylim(100,101)
bx.set_ylim(-6,6)
ax.set_ylabel(r'Market price $P_n$')
bx.set_ylabel(r'Price Return $G_1$')
bx.set_xlabel(r'Time $n$ (ticks)')
fig.tight_layout()
plt.show()
# -
| edx-stochastic-data-analysis/downloaded_files/06_1/06/009x_63.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Advanced Python Concepts
#
# In this tutorial, a few advanced concepts (not class related) are introduced. This includes
#
# - packing and unpacking
# - context manager
# - decorator and factories
# - exceptions
import time
# ## Packing and unpacking of values
#
# Using `*` or `**` we can pack/unpack list-like objects and dict-like objects, respectively. They act as a "removal" of the parenthesis when situated on the right and as an "adder" of parenthesis when situated on the left of the assigmenemt operator (`=`).
#
# Let's play around...
a, c, *b = [3, 4, 4.5, 5, 6]
b
# As can be seen, b catches now all the remaining elements in a list. Interesting to see is also the special case if no element is left.
d1, d2, *d3, d4 = [1, 2, 3] # nothing left for d3
d3
# This is simply an empty list. However, this has the advantage that we know that it is _always_ a list.
a = [3, 4, 5]
# Multiple unpackings can be added together (however, the other way around does not work: multiple _packings_ are not possible as it is ill-defined which variable would get how many elements).
d, e, f, g, h, i = *a, *b
# Now we should be able to understand the `*args` and `**kwargs` for functions. Let's look at it:
def func(*args, **kwargs):
print(f'args are {args}')
print(f"kwargs are {kwargs}")
mykwargs = {'a': 5, 'b': 3}
myargs = [1, 3, 4]
func(*myargs, *mykwargs)
func(5, a=4)
# +
# play around with it!
# -
# ## Context manager
#
# A context manager is an object that responds to a `with` statement. It may returns something. The basic idea is that some action is performed when entering a context and again when exiting it.
#
# ```
# with context as var:
# # do something
# ```
# translates to
# ```
# # execute context entering code
# var = return_from_context_entering_code
# # do something
# # execute context leaving code
# ```
#
# The great advantage here is that the "leaving code" is automatically executed whenever we step out of the context!
#
# This proved to be incredibly useful when operations have cleanup code that we need to execute yet that is tedious to write manually and can be forgotten.
#
# ### Using `yield`
#
# One way to create a context manager is to have a function that has a `yield`.
#
# _What is `yield`?_: It's like a return, except that the executioin stops at the `yield`, lets other code execute and, at some point, **continues** again where the yield was. Examples are:
# - iterator: a function that yields elements. Everytime it is called, it is supposed to yield an element and then continue from there
# - asynchronous programing: it stops and waits until something else is finished
# - in the context manager, as we will see
# +
import contextlib
@contextlib.contextmanager
def printer(x):
print(f'we just entered the context manager and will yield {x}')
yield x
print(f'Finishing the context manager, exiting')
# -
with printer(5) as number:
print(f"we're inside, with number={number}")
print("left manager")
# #### Where is this useful
#
# Basically with stateful objects. This includes anything that can be set and changed (mutable objects).
with open('tmp.txt', 'w') as textfile:
textfile.write('asdf')
# The implementation roughly looks like this:
# +
import contextlib
@contextlib.contextmanager
def myopen(f, mode):
opened = open(f, mode)
yield opened
opened.close()
# -
# **Exercise**: create a context manager that _temporarily_ sets a `'value'` key to 42 of a dict and switches it back to the old value on exit
testdict = {'value': 11, 'name': 'the answer'}
# to be invoked like this
#
# ```python
# with manager(testdict) as obj:
# # here the value is 42
# # here the value is 11
# ```
# +
# SOLUTION
@contextlib.contextmanager
def func(x):
yield x
with func(5) as var1:
print('inside')
print(var1)
# -
@contextlib.contextmanager
def set_answer(obj):
old_value = obj.get('value')
obj['value'] = 42
yield obj
obj['value'] = old_value
# ## Using a class
#
# Instead of using the `yield`, we can have advanced control over the enter and exit methods by creating a class and implementing the two methods `__enter__` and `__exit__`
class MyContext:
def __init__(self, x):
self.x = x
def __enter__(self):
x = self.x
print('entered')
return x ** 2
def __exit__(self, type_, value, traceback): # but let's not go into things in detail here
self.x = 42
print('exited')
with MyContext(5) as x:
print(x)
# While a class is way more powerful and offers ways to catch exceptions and more in the exit, ususally the functional way is enough and should then be preferred. If it doesn't give you enough flexibility, remember the class, look it up and figure out all the things needed.
# ## Decorators and factories
#
# Sometimes we can't write a function fully by hand but want to create it programatically. This pattern is called a "factory". To achieve this, instead of having a function that returns an integer (an object), a list (an object), a dict (an object) or an array (an object), we return a function (an object). We see that the concept of Python, "everything is an object", starts being very useful here.
def make_power_func(power):
def func(x):
return x ** power
return func
pow3 = make_power_func(3)
pow3(2)
def make_power_func(power):
def func(x):
return x ** power
power = 42
return func
pow3 = make_power_func(3)
pow3(2)
# +
# Exercise: test it here
# -
# Another example is to create a timing wrapper. **Exercise**: create a timing function that can be used as follows
#
# ```
# timed_pow3 = fime_func(pow3)
# pow3(...)
# ```
# HINT, scetch of solution
# ```python
# def time_func(func):
# def new_func(...):
# print('start')
# func(...)
# print('stop')
# return new_func
# ```
# SOLUTION
def timed_func(func):
def wrapped_func(*args, **kwargs):
print(args)
print(kwargs)
start = time.time()
func(*args, **kwargs)
end = time.time()
print(f'time needed: {end - start}')
return wrapped_func
def add_notime(x, y):
return x + y
add_timed = timed_func(add_notime)
import time
add_timed(y=4, x=5)
# +
# test it here
# -
# ### Decorator
#
# There is another way, just syntactical sugar, to make this automatic: a decorator. It is invoked as below
@timed_func
def add(x, y):
return x + y
# Again, as for the contextmanager, we can also use a class here to give more flexibility and create a decorator that takes _arguments_.
# ## Exceptions
#
# Exceptions are used to stop the execution at a certain point and surface to higher stacks in the code, e.g. to go up in the call stack. A typical use-case is when an error is encountered, such as the wrong type of object is given.
# Exceptions can also be caught in a `try ... except ...` block in order to handle the exception.
#
# There are a few built-in exceptions, the most common ones are:
# - `TypeError`: object has the wrong type, e.g. string instead of float
# - `ValueError`: the value of the object is illegal, e.g. negative but should be positive
# - `RuntimeError`: if a function is illegally executed or a status is wrong. E.g. if an object first has to be loaded before it gets parsed. It covers any error that does not fall into an other category.
# - `KeyError`, `IndexError`: if a key or index is not available, e.g. in a `dict` or `list`
#
# An Exception can manually be raised by
# + tags=["raises-exception"]
raise TypeError("Has to be int, not str")
# -
# Note that it is often convenient to create an instance such as in the example above where the first argument is the message (as we see in the raised Exception above), but we can also raise an exception by only using the class itself
# + tags=["raises-exception"]
raise TypeError
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Custom Exception
#
# In Python, exceptions are simply a class. And as such, we can inherit from it and create our own exception.
#
# **Attention**: inherit from `Exception` or subclasses of it such as `TypeError`, `ValueError`, but NEVER from `BaseException`.
# -
class MyError(Exception):
pass
# + tags=["raises-exception"]
raise MyError("Hello world")
# + [markdown] pycharm={"name": "#%% md\n"}
# An exception can also be created by inheriting from an already existing exception if it is more specific and provides hints on the nature of the exception.
# -
class NegativeValueError(ValueError):
pass
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Catching exceptions
#
# An exception can be caught in a `try..except` block. This works as follows:
# - if an exception is raised in the `try` block, the next `except` is invoked
# - it is tested whether the raised exception is of type subclass of the exception type specified to be caught. For example, `except TypeError` checks if the raised error is of type `TypeError` or a subclass of it.
# - if that is not the case, it goes to the next `except` statement (yes, there can be multiple)
# - ... more below
# -
try:
raise NegativeValueError("Negative value encountered")
except ValueError as error:
print(f"Caught {error}")
# + [markdown] pycharm={"name": "#%% md\n"}
# By using the `as` keyword, the error that is raised is assigned to a variable. We can inspect the error now if we want or, as above, just print it.
# + [markdown] pycharm={"name": "#%% md\n"}
# If no error is specified, _any_ error is caught (this should NOT be used, except for special cases
# -
try:
raise TypeError
# Anti-pattern, do NOT use in general!
except: # any exception if not specified
pass
# + tags=["raises-exception"]
try:
raise TypeError("Type was wrong, unfortunately")
except TypeError as error: # any exception
print(f'caught TypeError: {error}')
raise
except ValueError as error:
print(f'caugth ValueError: {error}')
# + [markdown] pycharm={"name": "#%% md\n"}
# To continue from above: after the last `except`, an `else` statement is looked for. The `else` is executed if _no_ exception was raised.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
try:
print('no error raised')
# raise TypeError("Type was wrong, unfortunately")
except TypeError as error: # any exception
print(f'caught Type {error}')
except ValueError as error:
print(f'caugth Value: {error}')
else:
print("No error")
print("Executed after block")
# -
# ...and finally, after the else, a `finally` block is looked for. This is *guaranteed* to be executed! Whether an exception is raised, whether it is caught or not, whether there is an `else` or not, the `finally` is _always_ executed.
#
# Therefore it is suitable for any cleanup code such as closing files, removing temporary files and more.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} tags=["raises-exception"]
try:
# pass
# raise TypeError("Type was wrong, unfortunately")
raise RuntimeError("Type was wrong, unfortunately")
except TypeError as error: # any exception
print(f'caught Type {error}')
except ValueError as error:
print(f'caugth Value: {error}')
else:
print("No error")
finally: # POWERFUL! Guarantied to be executed
print('Finally run')
print("Executed when passed")
# -
# Note that in the above example, the error was _not_ caught! All the other statements could also be omitted and only a `try...finally` block can be created.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} tags=["raises-exception"]
try:
raise ValueError
finally:
print('raised')
# -
# ### pitfall "guaranteed execution"
#
# As the `finally` is guaranteed to be executed, this can have an odd effect: possible return statements can be ignored _before the finally_ **IF** the `finally` also has a return statement. The logic says here that the `finally` return _must_ be executed, as it is guaranteed to be executed.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
def func(x):
try:
if x == 5:
raise RuntimeError('called inside func')
except RuntimeError as error:
return error
else:
print('else before 42')
return 42
print('after else 42')
finally:
print("cleaned up")
return 11
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
func(6)
# -
# ## Exceptions as control-flow
#
# We are used to control-flow elements such as `if...elif...else` blocks. However, exceptions _can_ also be used for this. They do not replace other ways of control-flow, however there are sometimes situations in which they provide a golden solution to steer the execution.
#
# As an example, consider an `add` function that _sometimes_ can add three elements - which is, for the sake of a good example, favorable as more performant (real world cases of this exist in larger scale, but too complicated to explain here) - and sometimes not. Also assume that the `add` function is called again maybe inside.
# A solution is to execute `add` with three elements. If an error is raised, we catch it (the specific one), and run the function again with two arguments and add the third argument by calling `add` again.
#
# Note that this also solves the problem if `add` is called deeper nested again: we don't care _where_ it is called, we just try again with only two numbers. The advantage is that we don't need to check the output of the function; this will always be a number (and not a `None` or something like this).
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
def add(a, b, c=None):
if c is not None:
raise MyError
return a + b
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} tags=["raises-exception"]
add(1, 2, 3)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
try:
result = add(1, 2, 3)
except MyError as error:
result = add(add(1, 2), 3)
result
| advanced-python/11AdvancedPython.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 64-bit
# name: python3
# ---
# ## Importing required libraries for creating graph object
# +
from scipy.io import mmread
from Base import Graph
from IPython.display import display
import pandas as pd
from copy import deepcopy
#visualization libraries, not required as such for main algorithm
from pyvis.network import Network
import networkx as nx
from matplotlib import pyplot as plt
# -
# ## Initialising and creating instances of graph object using different *.mtx files
# +
# karate = mmread('soc-karate.mtx')
# webedu = mmread('web-edu.mtx')
# internet = mmread('tech-internet-as.mtx')
karate = mmread('../assets/S_soc-karate.mtx')
webedu = mmread('../assets/M_web-edu.mtx')
internet = mmread('../assets/L_tech-internet-as.mtx')
# -
G1 = Graph(mtxfilepath='../assets/S_soc-karate.mtx')
G2 = Graph(sparse=webedu)
G3 = Graph(sparse=internet)
print(("-"*50)+"Graphs objects created"+("-"*50))
# +
graphData = [['soc-karate.mtx', G1.graph.number_of_nodes(), G1.graph.number_of_edges(), G1.is_connected()],
['web-edu.mtx', G2.graph.number_of_nodes(), G2.graph.number_of_edges(), G2.is_connected()],
['tech-internet-as.mtx', G3.graph.number_of_nodes(), G3.graph.number_of_edges(), G3.is_connected()]]
display(pd.DataFrame(graphData, columns=["Name", "Size", 'Edges', "connected"]))
# -
# ## Finding Centralities
# +
# EGO centrality
# print(G.ego_centrality_node(4))
# print("ego graph made")
# +
# Finding lfvc node
lfvc1 = G1.lfvc_node(0)
lfvc2 = G2.lfvc_node(0)
# lfvc3 = G3.lfvc_node(0)
print(lfvc1)
print(lfvc2)
# print(lfvc3)
# -
# Finding nodes of interest
graphData = [['soc-karate.mtx', G1.nodes_of_interest()],
['web-edu.mtx', G2.nodes_of_interest()],
['tech-internet-as.mtx', G3.nodes_of_interest()]]
display(pd.DataFrame(graphData, columns=["Name", "Nodes of interest: "]))
# +
# Finding Centralities of smallest size graph, i.e. soc-karate
print("soc-karate :")
dc1 = G1.degree_centrality()
cc1 = G1.closeness_centrality()
bc1 = G1.betweenness_centrality()
ec1 = G1.eigenvector_centrality()
clc1 = G1.clustering_coefficient_node(0)
lfvc_val = G1.lfvc()
nhc1 = G1.neighbourhood_hopset(0,2)
data = [['lfvc', lfvc_val],
['degree centrality', dc1],
['closeness centrality', cc1],
['betweenness centrality', bc1],
['eigenvector centrality', ec1],
['neighbouring hopset', nhc1],
['Clusters of node 1', clc1]]
display(pd.DataFrame(data, columns=["Centrality", "value"]))
# -
# ## Finding nodes of interest
nodes_interest1 = G1.nodes_of_interest()
nodes_interest2 = G2.nodes_of_interest()
nodes_interest3 = G3.nodes_of_interest()
# ## Centralities at nodes of interest
# +
# Finding Centralities of medium size graph, i.e. web-edu
print("web-edu :")
for i in nodes_interest2:
print("\nNode ", i)
cc2 = G2.closeness_centrality_node(i)
clc2 = G2.clustering_coefficient_node(i)
ec2 = G2.ego_centrality_node(i)
lfvc_val2 = G2.lfvc_node(i)
nhc2 = G2.neighbourhood_hopset(i,2)
eig_c2 = G2.eigenvector_centrality_node(i)
data = [['lfvc', lfvc_val2],
['closeness centrality', cc2],
['Clusters of node 1', clc2],
['neighbouring hopset', nhc2],
['ego centrality', ec2],
['eigenvector centrality', eig_c2]]
display(pd.DataFrame(data, columns=["Centrality", "value"]))
# +
# Finding Centralities of largest size graph, i.e. tech-internet-as
print("tech-internet-as :")
for i in nodes_interest3:
print("\nNode ", i)
cc3 = G3.closeness_centrality_node(i)
clc3 = G3.clustering_coefficient_node(i)
ec3 = G3.ego_centrality_node(i)
lfvc_val3 = G3.lfvc_node(i)
nhc3 = G3.neighbourhood_hopset(i,2)
eig_c3 = G3.eigenvector_centrality_node(i)
data = [['lfvc', lfvc_val3],
['closeness centrality', cc3],
['Clusters of node 1', clc3],
['neighbouring hopset', nhc3],
['ego centrality', ec3],
['eigenvector centrality', eig_c3]]
display(pd.DataFrame(data, columns=["Centrality", "value"]))
# dc3 = G3.degree_centrality()
# cc3 = G3.closeness_centrality()
# bc3 = G3.betweenness_centrality()
# eig_c3 = G3.eigenvector_centrality()
# clc3 = G3.clustering_coefficient_node(0)
# lfvc_val3 = G3.lfvc_node(0)
# nhc3 = G3.neighbourhood_hopset(0,2)
# print(("-"*100))
# print("lfvc")
# print(lfvc_val3)
# data = [[1, 'lfvc', lfvc_val3],
# [2, 'degree centrality', len(dc3)],
# [3, 'closeness centrality', len(cc3)],
# [4, 'betweenness centrality', len(bc3)],
# [5, 'eigenvector centrality', len(eig_c3)],
# [6, 'neighbouring hopset', nhc3],
# [7, 'Clusters of node 1', clc3]]
# print(tabulate(data, headers=["#", "Centrality", "len of array/value"]))
# -
# ## Deep community detection using greedy lfvc
filename = 'webedu_nt'
g_obj = G2
dc: tuple() = g_obj.greedy_community_detection(q=50, function='node_lfvc')
# ## Display community
# +
def community_visualizer_matplot(dc,G,layout = nx.kamada_kawai_layout):
Gnx = G.graph
pos=layout(Gnx) #change layout if needed
# labels = nx.get_edge_attributes(Gnx,'weight')
colors=['green' if i in dc else 'red' for i in range(len(Gnx.nodes))]
# nx.draw_networkx_edge_labels(Gnx,pos,edge_labels=labels)
nx.draw_networkx(Gnx,pos,node_color=colors,node_size=300,width=0.3)
plt.show()
return
def community_visualizer_pyvis(file,dc,G_obj):
G = G_obj.graph
nt = Network(height='100%', width='100%', bgcolor='#94b8b8', font_color='black')
# nt = Network(height='750px', width='100%')
colors = []
for i in G.nodes:
if(i in dc[0]):
colors.append('blue')
elif(i in dc[1]):
colors.append('green')
else:
colors.append('red')
nt.add_nodes([i for i in G.nodes],color = colors)
for n1,n2 in G.edges:
nt.add_edge(int(n1),int(n2))
# print(nt)
nt.barnes_hut(gravity=-80000, central_gravity=0.3, spring_length=250, spring_strength=0.001, damping=0.09, overlap=0)
nt.toggle_physics(False)
nt.inherit_edge_colors(False)
nt.show_buttons(filter_=['physics']) #make =True for all buttons
nt.show(file)
# community_visualizer_matplot(dc,G1,nx.spring_layout)
community_visualizer_pyvis('../assets/'+filename+'.html', dc, g_obj)
# -
# # Community reduction
# Reducing Commnunity and representing it using single node in graph
# +
def reduce_community_graph(dc, g_obj: Graph):
n_g: nx.Graph = deepcopy(g_obj.graph)
om : set = dc[0]
nu : set = dc[1].difference(om)
nnode = max([x for x in g_obj.graph.nodes])+len(nu)
n_g.add_node(nnode)
for y in om:
for x in nu:
if(n_g.has_edge(y,x)):
n_g.add_edge(y,nnode, weight=1)
break
n_g.remove_nodes_from(nu)
n_nu = om.union([nnode])
return ((om, n_nu), Graph(nx_graph=n_g))
rc, ng_obj = reduce_community_graph(dc, g_obj)
community_visualizer_pyvis('../assets/'+filename+'_rc.html', rc, ng_obj)
# -
# ## Computing centralities of representative node
# +
nnode = list(rc[1].difference(rc[0]))[0]
print("\nNode ", nnode)
cc2 = ng_obj.closeness_centrality_node(nnode)
clc2 = ng_obj.clustering_coefficient_node(nnode)
ec2 = ng_obj.ego_centrality_node(nnode)
lfvc_val2 = ng_obj.lfvc_node(nnode)
nhc2 = ng_obj.neighbourhood_hopset(nnode,2)
eig_c2 = ng_obj.eigenvector_centrality_node(nnode)
data = [['lfvc', lfvc_val2],
['closeness centrality', cc2],
['Clusters of node 1', clc2],
['neighbouring hopset', nhc2],
['ego centrality', ec2],
['eigenvector centrality', eig_c2]]
display(pd.DataFrame(data, columns=["Centrality", "value"]))
| py-src/community_detect.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# ### Code for running scatterbrain
# * this part focusing on cleaning data and running t2tvit pretrain model for test performence
# + colab={"base_uri": "https://localhost:8080/"} id="OXcxc-5_EZZk" outputId="d66165a2-bac6-490b-d66d-6ace257cfd05"
# # !git clone https://github.com/HazyResearch/pixelfly.git
# + id="8IGy3tY0EtPH"
# import os
# os.ch
# + colab={"base_uri": "https://localhost:8080/"} id="QrbKa6EiFuMR" outputId="0f5ac542-9f85-41d2-a3d9-fea35bfa2080"
# %cd drive/MyDrive/pixelfly-master/
# + colab={"base_uri": "https://localhost:8080/"} id="vFT1GFDVThAI" outputId="25eec270-6bf2-4ab3-c8b8-5cb4910dbe9f"
# !pwd
# + id="70qXvK5HThVI"
# !mkdir -p checkpoints/t2tvit
# %cd checkpoints/t2tvit
# + colab={"base_uri": "https://localhost:8080/"} id="dExBl-sjT08H" outputId="9976c4e0-bec3-44be-f246-f12137af36f8"
# !wget https://github.com/yitu-opensource/T2T-ViT/releases/download/main/81.7_T2T_ViTt_14.pth.tar
# + id="PRyWd1uKT7en"
# !python scripts/convert_checkpoint_t2t_vit.py checkpoints/t2tvit/81.7_T2T_ViTt_14.pth.tar
# + [markdown] id="p8qTtvXw__Ge"
#
# + colab={"base_uri": "https://localhost:8080/"} id="ipXZ76C0T-Lx" outputId="e7169cba-d3b3-423c-c96a-fddded966944"
# !python run.py experiment=imagenet-t2tvit-eval.yaml model/t2tattn_cfg=full datamodule.data_dir=../../../../data/ eval.ckpt=../../../../checkpoints/t2tvit/81.7_T2T_ViTt_14.pth.tar
# + id="J53Ba_fEUhw7"
# !python run.py experiment=imagenet-t2tvit-eval.yaml model/t2tattn_cfg=full datamodule.data_dir=../../../../data/imagenet12 eval.ckpt=../../../../checkpoints/t2tvit/81.7_T2T_ViTt_14.pth.tar
# + colab={"base_uri": "https://localhost:8080/"} id="FHcOBhu8UC27" outputId="3538620c-8d10-421e-b326-8adc8e3c2c67"
# !pip install -U python-dotenv
# !pip install hydra-core==1.1.1
# !pip install hydra-colorlog==1.1.0
# !pip install hydra-optuna-sweeper==1.1.0
# !pip install omegaconf
# !pip install lightning-bolts==0.4.0
# !pip install pytorch-lightning
# !pip install rich
# !pip install einops
# !pip install timm
# !pip install torchvision
# !pip install torchtext
# !pip install pytest
# !pip install munch
# !pip install decorator
# !pip install fs
# !pip install click==7.1.2
# !pip install transformers==4.12.0
# !pip install datasets==1.14.0
# !pip install wandb
# !pip install tensorboard
# !pip install seqeval
# !pip install psutil
# !pip install sacrebleu
# !pip install rouge-score
# !pip install tensorflow_datasets
# !pip install h5py
# !pip install triton
# !pip install pytorch-block-sparse
# + colab={"base_uri": "https://localhost:8080/"} id="ohgAhau5UWGY" outputId="adb30cc5-8352-4c6e-83a1-1638bf8c58dc"
# %cd checkpoints/t2tvit
# !wget https://github.com/yitu-opensource/T2T-ViT/releases/download/main/cifar10_t2t-vit_14_98.3.pth
# + [markdown] id="LgSI3Rur6VzH"
#
# + id="6_OoqZtDAKru"
# !python scripts/convert_checkpoint_t2t_vit2.3py checkpoints/t2tvit/cifar10_t2t-vit_14_98.3.pth
# + colab={"base_uri": "https://localhost:8080/"} id="cn6TVUetfhMX" outputId="daf29262-d58d-4f92-e4b5-becb92f2a786"
# %cd ../
# + colab={"base_uri": "https://localhost:8080/"} id="EwZ5756ZBkef" outputId="ca3d1048-f960-44a6-da00-033c160a4030"
# !python run.py experiment=cifar10-t2tvit-eval.yaml model/t2tattn_cfg=full datamodule.data_dir=../../../../data/cifar10/cifar10 eval.ckpt=../../../../checkpoints/t2tvit/81.7_T2T_ViTt_14.pth.tar
# + colab={"base_uri": "https://localhost:8080/"} id="jbQuAQBOJGva" outputId="9ec41fa0-a9cf-40c1-d159-af21c6b24309"
# %cd data/cifar10/train/truck/
import cv2
img = cv2.imread('5000.png')
img_fin = cv2.resize(img, (224, 224))
# + id="H7WrD3NgOU3V"
# """Convert T2T-ViT checkpoints to be compatible with our rewrite
# """
# import re
# import sys
# import shutil
# from pathlib import Path
# import numpy as np
# import torch
# for file_name in sys.argv[1:]:
# path = Path(file_name).expanduser()
# if not str(path).endswith('.og'): # Back up original checkpoint
# path_og = Path(str(path) + '.og')
# shutil.copy2(path, path_og)
# state_dict = torch.load(path, map_location='cpu')
# # T2T-ViT checkpoint is nested in the key 'state_dict_ema'
# if state_dict.keys() == {'state_dict_ema'}:
# state_dict = state_dict['state_dict_ema']
# # Replace the names of some of the submodules
# def key_mapping(key):
# if key == 'pos_embed':
# return 'pos_embed.pe'
# elif key.startswith('tokens_to_token.'):
# return re.sub('^tokens_to_token.', 'patch_embed.', key)
# else:
# return key
# state_dict = {key_mapping(k): v for k, v in state_dict.items()}
# torch.save(state_dict, path)
# + colab={"base_uri": "https://localhost:8080/"} id="JfCDZArCVGmQ" outputId="bb2e8591-58ec-459c-e595-5154aee687da"
# import zipfile
# with zipfile.ZipFile("archive.zip","r") as zip_ref:
# # %cd data
# # !tar -xvf "archive.zip" -C "/cifar10_/"
# !tar -xvf "decathlon-1.0-data-imagenet.tar"
# + id="Dlc3rZTYVHKJ"
# !unzip "archive.zip"
# + id="Enb4VK22VUp0"
import torch
model = torch.load("81.7_T2T_ViTt_14.pth.tar.og")
model
# + id="6mwk53QHbQhr"
from google.colab import drive
drive.mount('/content/drive')
# + id="FHl5uj3bbvYc"
import timm
timm.list_models()
# + colab={"base_uri": "https://localhost:8080/"} id="sBHhNfKTBX28" outputId="e1b8c4ef-643f-4106-d228-a9590d54e0dd"
# !python run.py experiment=imagenet-t2tvit-eval.yaml model/t2tattn_cfg=full datamodule.data_dir=../../../../data/imagenet12 eval.ckpt=../../../../checkpoints/t2tvit/81.7_T2T_ViTt_14.pth.tar
# + id="0g_SYvsKP3Z5"
| Scatterbrain Run 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Multi-Armed Bandits
# Suppose you are at your favorite casino and there are $k$-different slot machines. Each slot machine has outputs a reward according to some unknown distribution. In the multi-armed bandit problem, an agent (or a player) has $k$ different actions with an associated expected reward value. Suppose $A_t=a$ denotes the action taken at time $t$, and an associated reward $R_t$. The value of action $a$ is denoted by:
# $$q(a) = \mathbb{E}[R_t|A_t=a] $$
# Since the distribution is unknown to the agent, an estimate of $q(a)$, which will be denoted by $Q(a)$, must be constructed as the agent plays rounds.
#
# ### Goal
# The goal of any gambler is to minimize regret, and similarly an agent faced with a multi-armed bandit problem wants to minimize regret. Regret $\rho$ after $T$ rounds is defined as follows:
#
# $$\rho=Tq^{\star}-\sum_{t=1}^T R_t $$
#
# where $q^{\star} = \max_a q(a)$. The regret metric accounts for the disparity between the maximal expected reward overall $q(a)$ and the accumulated rewards.
#
# ### Approach
#
# A plausible approach could be a greedy approach in which the action is decided via the following policy:
#
# $$A_t=\text{argmax}_aQ(a)$$
#
# Such a strategy does not allow for exploration of different actions and also, the agent becomes dependent on initial conditions. Instead we allow the agent to make a random selection among the $k$ different actions with probability $\varepsilon$. Such algorithms are dubbed $\varepsilon$-greedy algorithms
#
# ### Estimating $Q(a)$
#
# The initial estimate of $Q(a)$ can be initialized to encode prior beliefs. Since $Q(a)$ is an estimate of $q(a)$, it makes sense that the sample mean of the rewards be a sufficient estimator of $q(a)$. Convergence of the estimator is guaranteed by the law of law of large numbers and Cesàro mean. An efficient streaming algorithm for estimating the sample mean is given by:
#
# $$N(A) \gets N(a)+1 $$
#
# $$Q(A) \gets Q(a) + \frac{1}{N(a)} (R-Q(a)) $$
#
# where $N(A)$ is number of occurrences of action A.
#
# ### $\varepsilon$-Greedy Algorithm
#
# 1. Initialize for $k=1$ to $a$:
# 2. $Q(A)\gets 0$
# 3. $N(A)\gets 0$
# 2. for $t=1$ to $T$ rounds:
#
# A.$A_t\gets \begin{cases} \text{argmax}_a Q(a) \quad \text{with probability } 1-\varepsilon \\
# \text{a random action} \quad \text{with probability } \varepsilon \end{cases}$
#
# B.$R_t \gets \text{bandit}(A)$
#
# C.$N(A) \gets N(a)+1 $
#
# D.$Q(A) \gets Q(a) + \frac{1}{N(a)} (R_t-Q(a)) $
#
#
# ### Applications
#
# Multi-Armed bandits appear in the applications such as A/B testing, portfolio design, and etc. For more information, see references [2] and [3].
#
# ### Experiments
#
# For the experiments below, the bandit will have $k=10$ actions. The bandit will output a reward with the following distribution $\mathcal{N}(\frac{a}{10},1)$ for the $a^{\text{th}}$ action.
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
class EpsilonGreedy_Strategy(object):
def __init__(self,k,eps):
self.k=k
self.Q=np.zeros(k)
self.N=np.zeros(k)
self.eps=eps
def update(self,R,a):
self.N[a]+=1
self.Q[a]=self.Q[a]+(1./self.N[a])*(R-self.Q[a])
def choose_action(self):
if np.random.rand()<1-self.eps:
return self.Q.argmax()
else:
return np.random.randint(0,k)
if __name__=="__main__":
import matplotlib.pyplot as plt
k=10
R=(np.array(range(k)))/float(k)
def bandit_k(a,R):
return np.random.randn()+R[a]
base_eps=float(1)
T=int(1e6)
runs=7
A_player=[]
R_player=[]
for r in xrange(0,runs):
A_player.append(np.zeros(T))
R_player.append(np.zeros(T))
eps=base_eps/np.power(10,r)
player=EpsilonGreedy_Strategy(k,eps)
for i in xrange(0,T):
A=player.choose_action()
A_player[r][i]=A
R_out=bandit_k(A,R)
R_player[r][i]=R_out
player.update(R_out,A)
# +
plt.figure(figsize=(15,10))
for r in xrange(runs):
eps=base_eps/np.power(10,r)
plt.hist(A_player[r],bins=10,label=r'$\varepsilon='+str(eps)+'$',alpha=1./np.sqrt(r+1),range=[0,9],align='mid')
plt.xlabel(r'Actions $A_t$')
plt.ylabel(r'Counts')
plt.title('Histogram of Actions Taken by Agent')
plt.grid()
plt.legend()
plt.show()
plt.figure(figsize=(15,10))
for r in xrange(runs):
eps=base_eps/np.power(10,r)
tot_R=R_player[r].cumsum()
plt.semilogx(tot_R,label=r'$\varepsilon='+str(eps)+'$')
plt.xlabel(r'Iterations $t$')
plt.ylabel(r'Reward $R_t$')
plt.title('Total Rewards Collected by Agent')
plt.grid()
plt.legend()
plt.show()
plt.figure(figsize=(15,10))
for r in xrange(runs):
eps=base_eps/np.power(10,r)
tot_R=R_player[r].cumsum()
avg_R=tot_R/(np.arange(1,T+1)*1.)
plt.semilogx(avg_R,label=r'$\varepsilon='+str(eps)+'$')
plt.xlabel(r'Iterations $t$')
plt.legend()
plt.grid()
plt.ylabel(r'Average Reward $\bar{R}_t$')
plt.title('Average Rewards Collected by Agent')
plt.show()
# -
# ### Conclusions:
#
# It can be seen that as $\varepsilon \rightarrow 0$ and $\varepsilon>0$, the agent explores less. and thus converges to the optimal selection slowly. Meanwhile if $\varepsilon \rightarrow 1$, the agent's strategy becomes random guessing. Ultimately, agents with moderate values for $\varepsilon$ converge to the optimal action rather quickly.
# ### References:
#
# 1. Sutton, <NAME>., and <NAME>. Reinforcement learning: An introduction Second Edition
# 2. https://support.google.com/analytics/answer/2844870?hl=en
# 3. https://en.wikipedia.org/wiki/Multi-armed_bandit
| Machine Learning/Multiarm bandits/eps_bandits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # TITANIC SOLUTION
#
# ### A BEGINNER'S GUIDE
# ## Loading Modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set() # setting seaborn default for plots
# ## Loading Datasets
#
# Loading train and test dataset
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
# ## Looking into the training dataset
#
# Printing first 5 rows of the train dataset.
train.head()
# Below is a brief information about each columns of the dataset:
#
# 1. **PassengerId:** An unique index for passenger rows. It starts from 1 for first row and increments by 1 for every new rows.
#
# 2. **Survived:** Shows if the passenger survived or not. 1 stands for survived and 0 stands for not survived.
#
# 3. **Pclass:** Ticket class. 1 stands for First class ticket. 2 stands for Second class ticket. 3 stands for Third class ticket.
#
# 4. **Name:** Passenger's name. Name also contain title. "Mr" for man. "Mrs" for woman. "Miss" for girl. "Master" for boy.
#
# 5. **Sex:** Passenger's sex. It's either Male or Female.
#
# 6. **Age:** Passenger's age. "NaN" values in this column indicates that the age of that particular passenger has not been recorded.
#
# 7. **SibSp:** Number of siblings or spouses travelling with each passenger.
# 8. **Parch:** Number of parents of children travelling with each passenger.
# 9. **Ticket:** Ticket number.
# 10. **Fare:** How much money the passenger has paid for the travel journey.
# 11. **Cabin:** Cabin number of the passenger. "NaN" values in this column indicates that the cabin number of that particular passenger has not been recorded.
# 12. **Embarked:** Port from where the particular passenger was embarked/boarded.
# **Total rows and columns**
train.shape
# We can see that there are 891 rows and 12 columns in our training dataset.
# **Describing training dataset**
#
# *describe()* method can show different values like count, mean, standard deviation, etc. of numeric data types.
train.describe()
# *describe(include = ['O'])* will show the descriptive statistics of object data types.
train.describe(include=['O'])
# This shows that there are duplicate *Ticket number* and *Cabins* shared. The highest number of duplicate ticket number is "CA. 2343". It has been repeated 7 times. Similarly, the highest number of people using the same cabin is 4. They are using cabin number "C23 C25 C27".
#
# We also see that 644 people were embarked from port "S".
#
# Among 891 rows, 577 were Male and the rest were Female.
# We use *info()* method to see more information of our train dataset.
train.info()
# We can see that *Age* value is missing for many rows.
#
# Out of 891 rows, the *Age* value is present only in 714 rows.
#
# Similarly, *Cabin* values are also missing in many rows. Only 204 out of 891 rows have *Cabin* values.
train.isnull().sum()
# There are 177 rows with missing *Age*, 687 rows with missing *Cabin* and 2 rows with missing *Embarked* information.
# ## Looking into the testing dataset
#
# Test data has 418 rows and 11 columns.
#
# > Train data rows = 891
# >
# > Test data rows = 418
# >
# > Total rows = 891+418 = 1309
#
# We can see that around 2/3 of total data is set as Train data and around 1/3 of total data is set as Test data.
test.shape
# *Survived* column is not present in Test data.
# We have to train our classifier using the Train data and generate predictions (*Survived*) on Test data.
test.head()
test.info()
# There are missing entries for *Age* in Test dataset as well.
#
# Out of 418 rows in Test dataset, only 332 rows have *Age* value.
#
# *Cabin* values are also missing in many rows. Only 91 rows out ot 418 have values for *Cabin* column.
test.isnull().sum()
# There are 86 rows with missing *Age*, 327 rows with missing *Cabin* and 1 row with missing *Fare* information.
# ## Relationship between Features and Survival
#
# In this section, we analyze relationship between different features with respect to *Survival*. We see how different feature values show different survival chance. We also plot different kinds of diagrams to **visualize** our data and findings.
# +
survived = train[train['Survived'] == 1]
not_survived = train[train['Survived'] == 0]
print ("Survived: %i (%.1f%%)"%(len(survived), float(len(survived))/len(train)*100.0))
print ("Not Survived: %i (%.1f%%)"%(len(not_survived), float(len(not_survived))/len(train)*100.0))
print ("Total: %i"%len(train))
# -
# ### Pclass vs. Survival
#
# Higher class passengers have better survival chance.
train.Pclass.value_counts()
train.groupby('Pclass').Survived.value_counts()
train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean()
#train.groupby('Pclass').Survived.mean().plot(kind='bar')
sns.barplot(x='Pclass', y='Survived', data=train)
# ### Sex vs. Survival
#
# Females have better survival chance.
train.Sex.value_counts()
train.groupby('Sex').Survived.value_counts()
train[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean()
#train.groupby('Sex').Survived.mean().plot(kind='bar')
sns.barplot(x='Sex', y='Survived', data=train)
# ### Pclass & Sex vs. Survival
# Below, we just find out how many males and females are there in each *Pclass*. We then plot a stacked bar diagram with that information. We found that there are more males among the 3rd Pclass passengers.
# +
tab = pd.crosstab(train['Pclass'], train['Sex'])
print (tab)
tab.div(tab.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True)
plt.xlabel('Pclass')
plt.ylabel('Percentage')
# -
sns.factorplot('Sex', 'Survived', hue='Pclass', size=4, aspect=2, data=train)
# From the above plot, it can be seen that:
# - Women from 1st and 2nd Pclass have almost 100% survival chance.
# - Men from 2nd and 3rd Pclass have only around 10% survival chance.
# ### Pclass, Sex & Embarked vs. Survival
sns.factorplot(x='Pclass', y='Survived', hue='Sex', col='Embarked', data=train)
# From the above plot, it can be seen that:
# - Almost all females from Pclass 1 and 2 survived.
# - Females dying were mostly from 3rd Pclass.
# - Males from Pclass 1 only have slightly higher survival chance than Pclass 2 and 3.
# ### Embarked vs. Survived
train.Embarked.value_counts()
train.groupby('Embarked').Survived.value_counts()
train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean()
#train.groupby('Embarked').Survived.mean().plot(kind='bar')
sns.barplot(x='Embarked', y='Survived', data=train)
# ### Parch vs. Survival
train.Parch.value_counts()
train.groupby('Parch').Survived.value_counts()
train[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean()
#train.groupby('Parch').Survived.mean().plot(kind='bar')
sns.barplot(x='Parch', y='Survived', ci=None, data=train) # ci=None will hide the error bar
# ### SibSp vs. Survival
train.SibSp.value_counts()
train.groupby('SibSp').Survived.value_counts()
train[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean()
#train.groupby('SibSp').Survived.mean().plot(kind='bar')
sns.barplot(x='SibSp', y='Survived', ci=None, data=train) # ci=None will hide the error bar
# ### Age vs. Survival
# +
fig = plt.figure(figsize=(15,5))
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
sns.violinplot(x="Embarked", y="Age", hue="Survived", data=train, split=True, ax=ax1)
sns.violinplot(x="Pclass", y="Age", hue="Survived", data=train, split=True, ax=ax2)
sns.violinplot(x="Sex", y="Age", hue="Survived", data=train, split=True, ax=ax3)
# -
# From *Pclass* violinplot, we can see that:
# - 1st Pclass has very few children as compared to other two classes.
# - 1st Plcass has more old people as compared to other two classes.
# - Almost all children (between age 0 to 10) of 2nd Pclass survived.
# - Most children of 3rd Pclass survived.
# - Younger people of 1st Pclass survived as compared to its older people.
#
# From *Sex* violinplot, we can see that:
# - Most male children (between age 0 to 14) survived.
# - Females with age between 18 to 40 have better survival chance.
# +
total_survived = train[train['Survived']==1]
total_not_survived = train[train['Survived']==0]
male_survived = train[(train['Survived']==1) & (train['Sex']=="male")]
female_survived = train[(train['Survived']==1) & (train['Sex']=="female")]
male_not_survived = train[(train['Survived']==0) & (train['Sex']=="male")]
female_not_survived = train[(train['Survived']==0) & (train['Sex']=="female")]
plt.figure(figsize=[15,5])
plt.subplot(111)
sns.distplot(total_survived['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color='blue')
sns.distplot(total_not_survived['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color='red', axlabel='Age')
plt.figure(figsize=[15,5])
plt.subplot(121)
sns.distplot(female_survived['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color='blue')
sns.distplot(female_not_survived['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color='red', axlabel='Female Age')
plt.subplot(122)
sns.distplot(male_survived['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color='blue')
sns.distplot(male_not_survived['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color='red', axlabel='Male Age')
# -
# From the above figures, we can see that:
# - Combining both male and female, we can see that children with age between 0 to 5 have better chance of survival.
# - Females with age between "18 to 40" and "50 and above" have higher chance of survival.
# - Males with age between 0 to 14 have better chance of survival.
# ### Correlating Features
# Heatmap of Correlation between different features:
#
# >Positive numbers = Positive correlation, i.e. increase in one feature will increase the other feature & vice-versa.
# >
# >Negative numbers = Negative correlation, i.e. increase in one feature will decrease the other feature & vice-versa.
#
# In our case, we focus on which features have strong positive or negative correlation with the *Survived* feature.
plt.figure(figsize=(15,6))
sns.heatmap(train.drop('PassengerId',axis=1).corr(), vmax=0.6, square=True, annot=True)
# ## Feature Extraction
#
# In this section, we select the appropriate features to train our classifier. Here, we create new features based on existing features. We also convert categorical features into numeric form.
# ### Name Feature
#
# Let's first extract titles from *Name* column.
# +
train_test_data = [train, test] # combining train and test dataset
for dataset in train_test_data:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.')
# -
train.head()
# As you can see above, we have added a new column named *Title* in the Train dataset with the *Title* present in the particular passenger name.
pd.crosstab(train['Title'], train['Sex'])
# The number of passengers with each *Title* is shown above.
#
# We now replace some less common titles with the name "Other".
# +
for dataset in train_test_data:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col', \
'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Other')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()
# -
# After that, we convert the categorical *Title* values into numeric form.
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Other": 5}
for dataset in train_test_data:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train.head()
# ### Sex Feature
#
# We convert the categorical value of *Sex* into numeric. We represent 0 as female and 1 as male.
for dataset in train_test_data:
dataset['Sex'] = dataset['Sex'].map( {'female': 1, 'male': 0} ).astype(int)
train.head()
# ### Embarked Feature
# There are empty values for some rows for *Embarked* column. The empty values are represented as "nan" in below list.
train.Embarked.unique()
# Let's check the number of passengers for each *Embarked* category.
train.Embarked.value_counts()
# We find that category "S" has maximum passengers. Hence, we replace "nan" values with "S".
for dataset in train_test_data:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
train.head()
# We now convert the categorical value of *Embarked* into numeric. We represent 0 as S, 1 as C and 2 as Q.
for dataset in train_test_data:
#print(dataset.Embarked.unique())
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train.head()
# ### Age Feature
# We first fill the NULL values of *Age* with a random number between (mean_age - std_age) and (mean_age + std_age).
#
# We then create a new column named *AgeBand*. This categorizes age into 5 different age range.
# +
for dataset in train_test_data:
age_avg = dataset['Age'].mean()
age_std = dataset['Age'].std()
age_null_count = dataset['Age'].isnull().sum()
age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count)
dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list
dataset['Age'] = dataset['Age'].astype(int)
train['AgeBand'] = pd.cut(train['Age'], 5)
print (train[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean())
# -
train.head()
# Now, we map *Age* according to *AgeBand*.
for dataset in train_test_data:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4
train.head()
# ### Fare Feature
# Replace missing *Fare* values with the median of *Fare*.
for dataset in train_test_data:
dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median())
# Create *FareBand*. We divide the *Fare* into 4 category range.
train['FareBand'] = pd.qcut(train['Fare'], 4)
print (train[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean())
train.head()
# Map *Fare* according to *FareBand*
for dataset in train_test_data:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train.head()
# ### SibSp & Parch Feature
#
# Combining *SibSp* & *Parch* feature, we create a new feature named *FamilySize*.
# +
for dataset in train_test_data:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
print (train[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean())
# -
# About data shows that:
#
# - Having *FamilySize* upto 4 (from 2 to 4) has better survival chance.
# - *FamilySize = 1*, i.e. travelling alone has less survival chance.
# - Large *FamilySize* (size of 5 and above) also have less survival chance.
# Let's create a new feature named *IsAlone*. This feature is used to check how is the survival chance while travelling alone as compared to travelling with family.
# +
for dataset in train_test_data:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
print (train[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean())
# -
# This shows that travelling alone has only 30% survival chance.
train.head(1)
test.head(1)
# ## Feature Selection
# We drop unnecessary columns/features and keep only the useful ones for our experiment. Column *PassengerId* is only dropped from Train set because we need *PassengerId* in Test set while creating Submission file to Kaggle.
features_drop = ['Name', 'SibSp', 'Parch', 'Ticket', 'Cabin', 'FamilySize']
train = train.drop(features_drop, axis=1)
test = test.drop(features_drop, axis=1)
train = train.drop(['PassengerId', 'AgeBand', 'FareBand'], axis=1)
train.head()
test.head()
# We are done with Feature Selection/Engineering. Now, we are ready to train a classifier with our feature set.
# ## Classification & Accuracy
# Define training and testing set
# +
X_train = train.drop('Survived', axis=1)
y_train = train['Survived']
X_test = test.drop("PassengerId", axis=1).copy()
X_train.shape, y_train.shape, X_test.shape
# -
# There are many classifying algorithms present. Among them, we choose the following *Classification* algorithms for our problem:
#
# - Logistic Regression
# - Support Vector Machines (SVC)
# - Linear SVC
# - k-Nearest Neighbor (KNN)
# - Decision Tree
# - Random Forest
# - Naive Bayes (GaussianNB)
# - Perceptron
# - Stochastic Gradient Descent (SGD)
#
# Here's the training and testing procedure:
#
# > First, we train these classifiers with our training data.
# >
# > After that, using the trained classifier, we predict the *Survival* outcome of test data.
# >
# > Finally, we calculate the accuracy score (in percentange) of the trained classifier.
#
# ***Please note:*** that the accuracy score is generated based on our training dataset.
# Importing Classifier Modules
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
# ### Logistic Regression
#
# [Logistic regression](https://en.wikipedia.org/wiki/Logistic_regression), or logit regression, or logit model is a regression model where the dependent variable (DV) is categorical. This article covers the case of a binary dependent variable—that is, where it can take only two values, "0" and "1", which represent outcomes such as pass/fail, win/lose, alive/dead or healthy/sick. Cases where the dependent variable has more than two outcome categories may be analysed in multinomial logistic regression, or, if the multiple categories are ordered, in ordinal logistic regression.
clf = LogisticRegression()
clf.fit(X_train, y_train)
y_pred_log_reg = clf.predict(X_test)
acc_log_reg = round( clf.score(X_train, y_train) * 100, 2)
print (str(acc_log_reg) + ' percent')
# ### Support Vector Machine (SVM)
#
# [Support Vector Machine (SVM)](https://en.wikipedia.org/wiki/Support_vector_machine) model is a Supervised Learning model used for classification and regression analysis. It is a representation of the examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. New examples are then mapped into that same space and predicted to belong to a category based on which side of the gap they fall.
#
# In addition to performing linear classification, SVMs can efficiently perform a non-linear classification using what is called the kernel trick, implicitly mapping their inputs into high-dimensional feature spaces. Suppose some given data points each belong to one of two classes, and the goal is to decide which class a new data point will be in. In the case of support vector machines, a data point is viewed as a $p$-dimensional vector (a list of $p$ numbers), and we want to know whether we can separate such points with a $(p-1)$-dimensional hyperplane.
#
# When data are not labeled, supervised learning is not possible, and an unsupervised learning approach is required, which attempts to find natural clustering of the data to groups, and then map new data to these formed groups. The clustering algorithm which provides an improvement to the support vector machines is called **support vector clustering** and is often used in industrial applications either when data are not labeled or when only some data are labeled as a preprocessing for a classification pass.
#
# In the below code, [SVC](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html) stands for Support Vector Classification.
clf = SVC()
clf.fit(X_train, y_train)
y_pred_svc = clf.predict(X_test)
acc_svc = round(clf.score(X_train, y_train) * 100, 2)
print (acc_svc)
# ### Linear SVM
#
# Linear SVM is a SVM model with linear kernel.
#
# In the below code, [LinearSVC](http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html) stands for Linear Support Vector Classification.
clf = LinearSVC()
clf.fit(X_train, y_train)
y_pred_linear_svc = clf.predict(X_test)
acc_linear_svc = round(clf.score(X_train, y_train) * 100, 2)
print (acc_linear_svc)
# ### $k$-Nearest Neighbors
#
# [$k$-nearest neighbors algorithm (k-NN)](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm) is one of the simplest machine learning algorithms and is used for classification and regression. In both cases, the input consists of the $k$ closest training examples in the feature space. The output depends on whether $k$-NN is used for classification or regression:
#
# - In *$k$-NN classification*, the output is a class membership. An object is classified by a majority vote of its neighbors, with the object being assigned to the class most common among its $k$ nearest neighbors ($k$ is a positive integer, typically small). If $k = 1$, then the object is simply assigned to the class of that single nearest neighbor.
#
#
# - In *$k$-NN regression*, the output is the property value for the object. This value is the average of the values of its $k$ nearest neighbors.
clf = KNeighborsClassifier(n_neighbors = 3)
clf.fit(X_train, y_train)
y_pred_knn = clf.predict(X_test)
acc_knn = round(clf.score(X_train, y_train) * 100, 2)
print (acc_knn)
# ### Decision Tree
#
# A [decision tree](https://en.wikipedia.org/wiki/Decision_tree) is a flowchart-like structure in which each internal node represents a "test" on an attribute (e.g. whether a coin flip comes up heads or tails), each branch represents the outcome of the test, and each leaf node represents a class label (decision taken after computing all attributes). The paths from root to leaf represent classification rules.
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_pred_decision_tree = clf.predict(X_test)
acc_decision_tree = round(clf.score(X_train, y_train) * 100, 2)
print (acc_decision_tree)
# ### Random Forest
#
# [Random forests](https://en.wikipedia.org/wiki/Random_forest) or **random decision forests** are an **ensemble learning method** for classification, regression and other tasks, that operate by constructing a multitude of decision trees at training time and outputting the class that is the mode of the classes (classification) or mean prediction (regression) of the individual trees. Random decision forests correct for *decision trees' habit of overfitting to their training set*.
#
# [Ensemble methods](https://en.wikipedia.org/wiki/Ensemble_learning) use multiple learning algorithms to obtain better predictive performance than could be obtained from any of the constituent learning algorithms alone.
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred_random_forest = clf.predict(X_test)
acc_random_forest = round(clf.score(X_train, y_train) * 100, 2)
print (acc_random_forest)
# ### Gaussian Naive Bayes
#
# [Naive Bayes classifiers](https://en.wikipedia.org/wiki/Naive_Bayes_classifier) are a family of simple probabilistic classifiers based on applying Bayes' theorem with strong (naive) independence assumptions between the features.
#
# [Bayes' theorem](https://en.wikipedia.org/wiki/Bayes%27_theorem) (alternatively **Bayes' law** or **Bayes' rule**) describes the probability of an event, based on prior knowledge of conditions that might be related to the event. For example, if cancer is related to age, then, using Bayes' theorem, a person's age can be used to more accurately assess the probability that they have cancer, compared to the assessment of the probability of cancer made without knowledge of the person's age.
#
# Naive Bayes is a simple technique for constructing classifiers: models that assign class labels to problem instances, represented as vectors of feature values, where the class labels are drawn from some finite set. It is not a single algorithm for training such classifiers, but a family of algorithms based on a common principle: all naive Bayes classifiers assume that the value of a particular feature is independent of the value of any other feature, given the class variable. For example, a fruit may be considered to be an apple if it is red, round, and about 10 cm in diameter. A naive Bayes classifier considers each of these features to contribute independently to the probability that this fruit is an apple, regardless of any possible correlations between the color, roundness, and diameter features.
clf = GaussianNB()
clf.fit(X_train, y_train)
y_pred_gnb = clf.predict(X_test)
acc_gnb = round(clf.score(X_train, y_train) * 100, 2)
print (acc_gnb)
# ### Perceptron
#
# [Perceptron](https://en.wikipedia.org/wiki/Perceptron) is a type of linear classifier, i.e. a classification algorithm that makes its predictions based on a linear predictor function combining a set of weights with the feature vector.
clf = Perceptron(max_iter=5, tol=None)
clf.fit(X_train, y_train)
y_pred_perceptron = clf.predict(X_test)
acc_perceptron = round(clf.score(X_train, y_train) * 100, 2)
print (acc_perceptron)
# ### Stochastic Gradient Descent (SGD)
#
# [Stochastic gradient descent](https://en.wikipedia.org/wiki/Stochastic_gradient_descent) (often shortened in **SGD**), also known as incremental gradient descent, is a stochastic approximation of the gradient descent optimization method for minimizing an objective function that is written as a sum of differentiable functions. In other words, SGD tries to find minima or maxima by iteration.
clf = SGDClassifier(max_iter=5, tol=None)
clf.fit(X_train, y_train)
y_pred_sgd = clf.predict(X_test)
acc_sgd = round(clf.score(X_train, y_train) * 100, 2)
print (acc_sgd)
# ## Confusion Matrix
#
# A [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix), also known as an error matrix, is a specific table layout that allows visualization of the performance of an algorithm. Each row of the matrix represents the instances in a predicted class while each column represents the instances in an actual class (or vice versa). The name stems from the fact that it makes it easy to see if the system is confusing two classes (i.e. commonly mislabelling one as another).
#
# In predictive analytics, a table of confusion (sometimes also called a confusion matrix), is a table with two rows and two columns that reports the number of false positives, false negatives, true positives, and true negatives. This allows more detailed analysis than mere proportion of correct classifications (accuracy). Accuracy is not a reliable metric for the real performance of a classifier, because it will yield misleading results if the data set is unbalanced (that is, when the numbers of observations in different classes vary greatly). For example, if there were 95 cats and only 5 dogs in the data set, a particular classifier might classify all the observations as cats. The overall accuracy would be 95%, but in more detail the classifier would have a 100% recognition rate for the cat class but a 0% recognition rate for the dog class.
#
# Here's another guide explaining [Confusion Matrix with example](http://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/).
#
# $\begin{matrix} & Predicted Positive & Predicted Negative \\ Actual Positive & TP & FN \\ Actual Negative & FP & TN \end{matrix}$
#
# In our (Titanic problem) case:
#
# >**True Positive:** The classifier predicted *Survived* **and** the passenger actually *Survived*.
# >
# >**True Negative:** The classifier predicted *Not Survived* **and** the passenger actually *Not Survived*.
# >
# >**False Postiive:** The classifier predicted *Survived* **but** the passenger actually *Not Survived*.
# >
# >**False Negative:** The classifier predicted *Not Survived* **but** the passenger actually *Survived*.
# In the example code below, we plot a confusion matrix for the prediction of ***Random Forest Classifier*** on our training dataset. This shows how many entries are correctly and incorrectly predicted by our classifer.
# +
from sklearn.metrics import confusion_matrix
import itertools
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred_random_forest_training_set = clf.predict(X_train)
acc_random_forest = round(clf.score(X_train, y_train) * 100, 2)
print ("Accuracy: %i %% \n"%acc_random_forest)
class_names = ['Survived', 'Not Survived']
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_train, y_pred_random_forest_training_set)
np.set_printoptions(precision=2)
print ('Confusion Matrix in Numbers')
print (cnf_matrix)
print ('')
cnf_matrix_percent = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
print ('Confusion Matrix in Percentage')
print (cnf_matrix_percent)
print ('')
true_class_names = ['True Survived', 'True Not Survived']
predicted_class_names = ['Predicted Survived', 'Predicted Not Survived']
df_cnf_matrix = pd.DataFrame(cnf_matrix,
index = true_class_names,
columns = predicted_class_names)
df_cnf_matrix_percent = pd.DataFrame(cnf_matrix_percent,
index = true_class_names,
columns = predicted_class_names)
plt.figure(figsize = (15,5))
plt.subplot(121)
sns.heatmap(df_cnf_matrix, annot=True, fmt='d')
plt.subplot(122)
sns.heatmap(df_cnf_matrix_percent, annot=True)
# -
# ## Comparing Models
#
# Let's compare the accuracy score of all the classifier models used above.
# +
models = pd.DataFrame({
'Model': ['Logistic Regression', 'Support Vector Machines', 'Linear SVC',
'KNN', 'Decision Tree', 'Random Forest', 'Naive Bayes',
'Perceptron', 'Stochastic Gradient Decent'],
'Score': [acc_log_reg, acc_svc, acc_linear_svc,
acc_knn, acc_decision_tree, acc_random_forest, acc_gnb,
acc_perceptron, acc_sgd]
})
models.sort_values(by='Score', ascending=False)
# -
# From the above table, we can see that *Decision Tree* and *Random Forest* classfiers have the highest accuracy score.
#
# Among these two, we choose *Random Forest* classifier as it has the ability to limit overfitting as compared to *Decision Tree* classifier.
# ## Create Submission File to Kaggle
test.head()
submission = pd.DataFrame({
"PassengerId": test["PassengerId"],
"Survived": y_pred_random_forest
})
submission.to_csv('submission.csv', index=False)
# ## References
#
# This notebook is created by learning from the following notebooks:
#
# - [A Journey through Titanic](https://www.kaggle.com/omarelgabry/a-journey-through-titanic)
# - [Titanic Data Science Solutions](https://www.kaggle.com/startupsci/titanic-data-science-solutions)
# - [Pytanic](https://www.kaggle.com/headsortails/pytanic)
# - [Titanic best working Classifier](https://www.kaggle.com/sinakhorami/titanic-best-working-classifier)
# - [My approach to Titanic competition](https://www.kaggle.com/rafalplis/my-approach-to-titanic-competition)
| Titanic: Machine Learning from Disaster/Titanic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Diffusion of ions across a membrane
# In this notebook, we construct a bond graph model of ion diffusion across the membrane. The model accounts for the interplay between chemical and electrical energy such systems.
# +
import BondGraphTools as bgt
import numpy as np
from matplotlib import pyplot as plt
from general import mpl_settings, save_figure
from plot_options import set_color_palette
# -
mpl_settings()
set_color_palette()
# Assuming that the diffusion of a charged ion across the membrane follows the barrier model, ion diffusion can be represented by the bond graph below.
#
# <img src="electrodiffusion.png" alt="drawing" width="400"/>
#
# We use a function to construct this bond graph in the code below.
# +
R = 8.314
T = 310.0
F = 96485.
def ion_pore(z,conc_ex,conc_in,Ke=1,Ki=1,r=1e-7):
model = bgt.new(name="Ion pore")
# Se:Ie
ion_ex = bgt.new("Se",name="Ie",
value=R*T*np.log(Ke*conc_ex))
# Se:Ii
ion_in = bgt.new("Se",name="Ii",
value=R*T*np.log(Ki*conc_in))
flow_f = bgt.new("1",name="f")
flow_r = bgt.new("1",name="r")
# Re:pore
r_pore = bgt.new("Re",name="pore",
library="BioChem",
value={"r":r,"R":R,"T":T})
# C:mem
membrane = bgt.new("C",name="mem",value=1.0)
potential_mem = bgt.new("0",name="mem")
TF_F = bgt.new("TF",name="F",value=F)
# TF:-z/2
TF_z1 = bgt.new("TF",name="zf",value=-z/2)
# TF:z/2
TF_z2 = bgt.new("TF",name="zr",value=z/2)
bgt.add(model,ion_ex,ion_in,flow_f,flow_r,
r_pore,membrane,potential_mem,
TF_F,TF_z1,TF_z2)
bgt.connect(ion_ex,flow_f)
bgt.connect(flow_f,r_pore)
bgt.connect(r_pore,flow_r)
bgt.connect(flow_r,ion_in)
bgt.connect(potential_mem,(TF_F,1))
bgt.connect((TF_F,0),membrane)
bgt.connect(potential_mem,(TF_z1,0))
bgt.connect((TF_z1,1),flow_f)
bgt.connect(flow_r,(TF_z2,1))
bgt.connect((TF_z2,0),potential_mem)
return model
# -
# Models of transport for sodium, potassium and chloride ions can be constructed by passing physiological concentrations and ionic charges $z$ into the function.
# +
Nae = 155/1000
Nai = 19/1000
zNa = 1
Ke = 5/1000
Ki = 136/1000
zK = 1
Cle = 112/1000
Cli = 78/1000
zCl = -1
Na_model = ion_pore(z=zNa,conc_ex=Nae,conc_in=Nai)
K_model = ion_pore(z=zK,conc_ex=Ke,conc_in=Ki)
Cl_model = ion_pore(z=zCl,conc_ex=Cle,conc_in=Cli)
# -
# In the code below, the effect of membrane potential on reaction affinity is plotted. The equilibrium points correspond to the Nernst potentials.
# +
def affinity(V,z,Ie,Ii):
return R*T*np.log(Ie/Ii) - z*F*V
def nernst(z,Ie,Ii):
return (R*T/z/F)*np.log(Ie/Ii)
array_V = np.arange(-0.1,0.1,0.001)
A_Na = np.array([affinity(V,zNa,Nae,Nai) for V in array_V])
A_K = np.array([affinity(V,zK,Ke,Ki) for V in array_V])
A_Cl = np.array([affinity(V,zCl,Cle,Cli) for V in array_V])
V_Na = 1000*nernst(zNa,Nae,Nai)
V_K = 1000*nernst(zK,Ke,Ki)
V_Cl = 1000*nernst(zCl,Cle,Cli)
fig_affinity,ax = plt.subplots()
ax.axhline(0, color='lightgray', linewidth=1.5)
ax.axvline(V_Na, color='lightgray', linewidth=1.5)
ax.axvline(V_K, color='lightgray', linewidth=1.5)
ax.axvline(V_Cl, color='lightgray', linewidth=1.5)
plt.plot(1000*array_V,A_Na/1000,label="Na")
plt.plot(1000*array_V,A_K/1000,label="K")
plt.plot(1000*array_V,A_Cl/1000,label="Cl")
plt.xlabel("Voltage (mV)")
plt.ylabel("Affinity (kJ/mol)")
plt.plot(V_Na,0,".",color="C0",markersize=20,markeredgewidth=1.5,markeredgecolor="w")
plt.plot(V_K,0,".",color="C1",markersize=20,markeredgewidth=1.5,markeredgecolor="w")
plt.plot(V_Cl,0,".",color="C2",markersize=20,markeredgewidth=1.5,markeredgecolor="w")
ax.annotate("$\mathregular{V_{Na} = 56mV}$",xy=(V_Na,15),
fontsize=12,color="C0",horizontalalignment='center')
ax.annotate("$\mathregular{V_{K} = -88mV}$",xy=(V_K,15),
fontsize=12,color="C1",horizontalalignment='center')
ax.annotate("$\mathregular{V_{Cl} = -10mV}$",xy=(V_Cl,15),
fontsize=12,color="C2",horizontalalignment='center')
plt.legend(bbox_to_anchor=(1.05, 1.0))
plt.show()
# -
# Simulations of the model are run below. The final voltages correspond to the Nernst potentials once again.
# +
tspan = (0.0,1000.0)
x0 = [0.0]
t,V_Na = bgt.simulate(Na_model,tspan,x0)
t,V_K = bgt.simulate(K_model,tspan,x0)
t,V_Cl = bgt.simulate(Cl_model,tspan,x0)
fig_flux = plt.figure()
plt.plot(t,1000*V_Na)
plt.plot(t,1000*V_K)
plt.plot(t,1000*V_Cl)
plt.xlabel("Time (ms)")
plt.ylabel("Membrane potential (mV)")
plt.legend(["Na","K","Cl"],bbox_to_anchor=(1.05, 1.0))
plt.show()
# -
# The code below saves the figures in this notebook
save_figure(fig_affinity,"output/ion_affinity")
save_figure(fig_flux,"output/ion_flux")
| Ion diffusion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Smoke Alarm install model
# ### this notebook represents the current smoke alarm install model
import pandas as pd
import os
import sys
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
# +
# import stored methods
path = Path.cwd().parent.parent
LoadAndCleanACS = path /'src' /'data'/ 'LoadAndCleanACS.py'
LoadAndCleanARCP = path /'src' /'data'/ 'LoadAndCleanARCP.py'
# -
# Run methods to obtain clean datasets
# %run $LoadAndCleanACS
# %run $LoadAndCleanARCP
# to allow for all variables to be displayed in jupyter
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
def StandardizeColumnNames(df):
"""
Standardizes column names
"""
df.columns = map(str.lower, df.columns)
df.columns = df.columns.str.replace(', ', '_')
df.columns = df.columns.str.replace('-', '_')
df.columns = df.columns.str.replace('/', '_')
df.columns = df.columns.str.replace('(', '_')
df.columns = df.columns.str.replace(')', '_')
df.columns = df.columns.str.replace(' ', '_')
#print(df.columns)
return df
# ### Data
input_loc = path /'Data'/ 'Master Project Data'
output_loc = path /'Data'/ 'processed'
arc_path = input_loc / 'ARC Preparedness Data.csv'
arc = pd.read_csv(arc_path,
dtype = {'GEOID': str, 'Zip': str})
arc = StandardizeColumnNames(arc)
arc.dropna(inplace = True)
# trim geoid leading saftey marks
arc['geoid'] = arc['geoid'].str[2:]
arc.head()
# ## EDA
#
# - remove all houses that don't have a previous smoke detector record
# - Determine the median number of house visist
# - Visualize visit distribution
# - use ACS data to determine % of blocks visited
# - determine % blocks visited with >15 visits
#block level
counts = arc['geoid'].value_counts()
counts_median = counts.median()
counts.describe()
# ## EDA- Geograpic Level
# repeat block Level analysis at various levels
# county
county_counts = arc['geoid'].str[:7].value_counts()
print('County Level')
print(county_counts.describe())
# state
state_counts = arc['geoid'].str[:2].value_counts()
print('\n State Level')
print(state_counts.describe())
print(arc['pre_existing_alarms'].describe())
print(arc['pre_existing_alarms_tested_and_working'].describe())
# ## Confidence Interval Motivation
#
# A commonly used formula for a binomial confidence interval relies on approximating the distribution of error about a binomially-distributed observation, ${\displaystyle {\hat {p}}}$, with a normal distribution. This approximation is based on the central limit theorem and is unreliable when the sample size is small or the success probability is close to 0 or 1.
#
# Using the normal approximation, the success probability p is estimated as
#
# ${\displaystyle {\hat {p}}\pm z{\sqrt {\frac {{\hat {p}}\left(1-{\hat {p}}\right)}{n}}},}$
#
# Source https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval
def CreateConfidenceIntervals(num_surveys,percentage):
# this function takes the cleaned data and adds a confidence interval
z = 1.960 # corresponds to 95% confidence interval
CI = z * np.sqrt(
(percentage * (100 - percentage) ) /
num_surveys )
return CI
# ### Feature Engineering
# Create Binary variables out of the smoke_alarms_present and smoke_alarms_tested_and_working variables
#
# We'll then create a new dataset with the aggregated number and percantage of working smoke detectors in each census geography
def CreateSingleLevelSmokeAlarmModel(df,geo_level):
# This function takes the arc data into a dataset containing the percentage
# and number of smoke detectors by census geography
#
# Inputs
# arc- the arc dataset
#
# geo_level- String var indcating what census geography to aggregate on. current levels are:
# State,County,Block,State
#
# The resultant dataset will have the following values:
#
# num_surveys - total number of surveys conducted
#
# detectors_found - houses with at least one smoke detector in the home
#
# detectors_workding - houses with at least one tested and working smoke detector in the home
#
# Note: for variables the suffixes
# _total- indicates raw counts
# _prc - indicates percentage: (_total / num_surveys * 100)
#
# dict with relevant length of GEOID for tract geography
geo_level_dict = {'State':2,'County':5,'Tract':11,'Block':12}
df['geoid'] = df['geoid'].str[: geo_level_dict[geo_level]]
## binarize pre_existing_alarms and _tested_and_working
# values will now be: 0 if no detectors present and 1 if any number were present
df['pre_existing_alarms'].where(df['pre_existing_alarms'] < 1, other = 1, inplace = True)
df['pre_existing_alarms_tested_and_working'].where(
df['pre_existing_alarms_tested_and_working'] < 1,
other = 1,
inplace = True)
## create detectors dataset
# This happens by grouping data both on pre_existing alarms and then _tested_and working alarms
# and then merging the two into the final dataset
detectors = df.groupby('geoid')['pre_existing_alarms'].agg({np.size ,
np.sum,
lambda x: np.sum(x)/np.size(x)* 100 })
detectors.rename({'size':'num_surveys','sum':'detectors_found_total','<lambda_0>':'detectors_found_prc'},
axis =1,
inplace = True)
detectors['detectors_found_prc'] = detectors['detectors_found_prc'].round(2)
d2 = df.groupby('geoid')['pre_existing_alarms_tested_and_working'].agg({np.size,np.sum,
lambda x: np.sum(x)/np.size(x)* 100 })
d2.columns = ['num_surveys_2','detectors_working_total','detectors_working_prc']
d2['detectors_working_prc'] = d2['detectors_working_prc'].round(2)
detectors = detectors.merge(d2,how = 'left', on ='geoid')
detectors['detectors_found_CI'] = CreateConfidenceIntervals(detectors['num_surveys'].values,
detectors['detectors_found_prc'].values )
detectors['detectors_working_CI'] = CreateConfidenceIntervals(detectors['num_surveys'].values,
detectors['detectors_working_prc'].values )
# rearrange columns
column_order = ['num_surveys',
'detectors_found_total',
'detectors_found_prc',
'detectors_found_CI',
'detectors_working_total',
'detectors_working_prc',
'detectors_working_CI']
detectors = detectors[column_order]
return detectors
arc_state = CreateSingleLevelSmokeAlarmModel(arc.copy(),'State')
arc_state.head()
arc_county = CreateSingleLevelSmokeAlarmModel(arc.copy(),'County')
arc_county.describe()
arc_tract = CreateSingleLevelSmokeAlarmModel(arc.copy(),'Tract')
arc_tract.describe()
arc_block = CreateSingleLevelSmokeAlarmModel(arc.copy(),'Block')
arc_block.describe()
plt.figure()
plt.hist(np.log(arc_tract['num_surveys']),bins=6)
plt.xlabel("X")
plt.ylabel("Y")
plt.xticks(np.arange(8))
plt.show()
plt.figure()
plt.hist(np.log(arc_block['num_surveys']),bins=6)
plt.xlabel("X")
plt.ylabel("Y")
plt.xticks(np.arange(8))
plt.show()
np.arange(8)
arc_block[arc_block['num_surveys'] > 30].shape[0]/ arc_block.shape[0]
# +
blockGEOID = []
EstimateMean = []
EstimateCI = []
EstimateGeography = []
for index, row in arc_block.head(n=2).iterrows():
# append block GEOID-Data
if row['num_surveys'] > 30:
| Code/Models/Smoke_Alarm_Model.ipynb |