code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import dense_correspondence_manipulation.utils.utils as utils
utils.add_dense_correspondence_to_python_path()
from dense_correspondence.training.training import *
import sys
import logging
#utils.set_default_cuda_visible_devices()
utils.set_cuda_visible_devices([0]) # use this to manually set CUDA_VISIBLE_DEVICES
from dense_correspondence.training.training import DenseCorrespondenceTraining
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset
logging.basicConfig(level=logging.INFO)
from dense_correspondence.evaluation.evaluation import DenseCorrespondenceEvaluation
# -
# ## Load the configuration for training
# +
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'dataset', 'composite', 'caterpillar_upright.yaml')
config = utils.getDictFromYamlFilename(config_filename)
train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'training', 'training.yaml')
train_config = utils.getDictFromYamlFilename(train_config_file)
dataset = SpartanDataset(config=config)
logging_dir = "trained_models/tutorials"
num_iterations = 3500
d = 3 # the descriptor dimension
name = "caterpillar_%d" %(d)
train_config["training"]["logging_dir_name"] = name
train_config["training"]["logging_dir"] = logging_dir
train_config["dense_correspondence_network"]["descriptor_dimension"] = d
train_config["training"]["num_iterations"] = num_iterations
TRAIN = True
EVALUATE = True
# -
# ## Train the network
#
# This should take about ~12-15 minutes with a GTX 1080 Ti
# +
# All of the saved data for this network will be located in the
# code/data/pdc/trained_models/tutorials/caterpillar_3 folder
if TRAIN:
print "training descriptor of dimension %d" %(d)
train = DenseCorrespondenceTraining(dataset=dataset, config=train_config)
train.run()
print "finished training descriptor of dimension %d" %(d)
# -
# ## Evaluate the network quantitatively
#
# This should take ~5 minutes.
# +
model_folder = os.path.join(logging_dir, name)
model_folder = utils.convert_data_relative_path_to_absolute_path(model_folder)
if EVALUATE:
DCE = DenseCorrespondenceEvaluation
num_image_pairs = 100
DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs)
# -
# See `evaluation_quantitative_tutorial.ipynb` for a better place to display the plots.
print(model_folder)
| dense_correspondence/training/training_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (nlp_new)
# language: python
# name: nlp_new
# ---
import scipy
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
a = np.array([1,2,3,4,5,6])
np.mean(a), np.var(a)
x = np.array([1,2,3,4,5,6,7,8,9])
x
y = np.array([9,8,7,6,5,4,3,2,1])
y
np.cov(x,y)
np.corrcoef(x,y)
# ## Calculating Covariance from a Random Variable
x = np.random.random([100])
y = np.linspace(0,100,100)
x.shape, y.shape
plt.scatter(x,y)
np.mean(x), np.var(x)
x = randn()
mu, sigma = 0.03, 0.5
s = np.random.normal(mu, sigma, 1)
s
x = np.random.random([100])
y = x + np.random.normal(mu, sigma, 1)
x
y
plt.scatter(x,y)
| notebooks/3.causality_review.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
# ===========================================================
# Plot single trial activity, grouped by ROI and sorted by RT
# ===========================================================
#
# This will produce what is sometimes called an event related
# potential / field (ERP/ERF) image.
#
# The EEGLAB example file - containing an experiment with button press responses
# to simple visual stimuli - is read in and response times are calculated.
# ROIs are determined by the channel types (in 10/20 channel notation,
# even channels are right, odd are left, and 'z' are central). The
# median and the Global Field Power within each channel group is calculated,
# and the trials are plotted, sorted by response time.
#
#
# +
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import mne
from mne.datasets import testing
from mne import Epochs, io, pick_types
from mne.event import define_target_events
print(__doc__)
# -
# Load EEGLAB example data (a small EEG dataset)
#
#
# +
data_path = testing.data_path()
fname = data_path + "/EEGLAB/test_raw.set"
montage = data_path + "/EEGLAB/test_chans.locs"
event_id = {"rt": 1, "square": 2} # must be specified for str events
eog = {"FPz", "EOG1", "EOG2"}
raw = io.eeglab.read_raw_eeglab(fname, eog=eog, montage=montage,
event_id=event_id)
picks = pick_types(raw.info, eeg=True)
events = mne.find_events(raw)
# -
# Create Epochs
#
#
# +
# define target events:
# 1. find response times: distance between "square" and "rt" events
# 2. extract A. "square" events B. followed by a button press within 700 msec
tmax = .7
sfreq = raw.info["sfreq"]
reference_id, target_id = 2, 1
new_events, rts = define_target_events(events, reference_id, target_id, sfreq,
tmin=0., tmax=tmax, new_id=2)
epochs = Epochs(raw, events=new_events, tmax=tmax + .1,
event_id={"square": 2}, picks=picks)
# -
# Plot
#
#
# +
# Parameters for plotting
order = rts.argsort() # sorting from fast to slow trials
rois = dict()
for pick, channel in enumerate(epochs.ch_names):
last_char = channel[-1] # for 10/20, last letter codes the hemisphere
roi = ("Midline" if last_char == "z" else
("Left" if int(last_char) % 2 else "Right"))
rois[roi] = rois.get(roi, list()) + [pick]
# The actual plots
for combine_measures in ('gfp', 'median'):
epochs.plot_image(group_by=rois, order=order, overlay_times=rts / 1000.,
sigma=1.5, combine=combine_measures,
ts_args=dict(vlines=[0, rts.mean() / 1000.]))
| 0.15/_downloads/plot_roi_erpimage_by_rt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Preprocessing Data
#
# We will be using the [Rotten Tomatoes movie reviews dataset](https://www.kaggle.com/c/sentiment-analysis-on-movie-reviews/data).
# +
from kaggle.api.kaggle_api_extended import KaggleApi
import zipfile
import os
api = KaggleApi()
api.authenticate()
for file in ["train.tsv", "test.tsv"]:
api.competition_download_file(
"sentiment-analysis-on-movie-reviews", f"{file}.zip", path="./"
)
with zipfile.ZipFile(f"{file}.zip", "r") as zip_ref:
zip_ref.extractall("./")
os.remove(f"{file}.zip")
# -
# ---
#
# ## Preparing Data
#
# We will start by reading the data into a Pandas Dataframe using th `read_csv` function. Because we're working with *.tsv* (*tab seperate values*) files we need to specify that we will be taking tab characters as the delimiters:
# +
import pandas as pd
df = pd.read_csv("train.tsv", sep="\t")
df.head()
# -
# The *Phrase* column contains all of our text data that we will be processing. We can also see that there are many copies through *segments* of the same answer (note that the *SentenceId* value for each of these copies is identical). We can reduce the amount of noise in our dataset by removing these duplicates.
# +
# df = df.drop_duplicates(subset=['SentenceId'], keep='first')
df.head()
# -
# Let's check the distribution of sentiment classes across our data.
df["Sentiment"].value_counts().plot(kind="bar")
# We will be tokenizing this text to create two input tensors; our input IDs, and attention mask.
#
# We will contain our tensors within two numpy arrays, which will be of dimensions `len(df) * 512` - the `512` is the sequence length of our tokenized sequences for BERT, and `len(df)` the number of samples in our dataset.
# +
import numpy as np
seq_len = 512
num_samples = len(df)
num_samples, seq_len
# -
# Now we can begin tokenizing with a `BertTokenizer`, like so:
# +
from transformers import BertTokenizer
# initialize tokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
# tokenize - this time returning Numpy tensors
tokens = tokenizer(
df["Phrase"].tolist(),
max_length=seq_len,
truncation=True,
padding="max_length",
add_special_tokens=True,
return_tensors="np",
)
# -
# Which returns us three numpy arrays - *input_ids*, *token_type_ids*, and *attention_mask*.
tokens.keys()
print(
"Input IDs:",
tokens["input_ids"][:10],
"\nToken Type IDs:",
tokens["token_type_ids"][:10],
"\n\nAttention Mask:",
tokens["attention_mask"][:10],
)
# And now we save them to file as Numpy binary files:
with open("movie-xids.npy", "wb") as f:
np.save(f, tokens["input_ids"])
with open("movie-xmask.npy", "wb") as f:
np.save(f, tokens["attention_mask"])
# Now that we have them on file, we can delete the in-memory arrays to free up memory.
del tokens
# Our input tensors are prepared, but we haven't touched our target data yet. So, let's move onto that.
#
# Presently our target data is a set of integer values (representing sentiment classes) in the *Sentiment* column of our dataframe `df`. We need to extract these values and *one-hot* encode them into another numpy array, which will have the dimensions `len(df) * number of label classes`. Again, we will initialize a numpy zero array beforehand, but we won't populate it row by row - we will use some fancy indexing techniques instead.
# first extract sentiment column
arr = df["Sentiment"].values
# we then initialize the zero array
labels = np.zeros((num_samples, arr.max() + 1))
labels.shape
# We are able to use `arr.max()+1` to define our second dimension here because we have the values *\[0, 1, 2, 3, 4\]* in our *Sentiment* column, there are **five** unique labels which means we need our labels array to have five columns (one for each) - `arr.max() = 4`, so we do `4 + 1` to get our required value of `5`.
#
# Now we use the current values in our `arr` of *\[0, 1, 2, 3, 4\]* to place `1` values in the correct positions of our presently zeros-only array:
# +
labels[np.arange(num_samples), arr] = 1
labels
# -
# And there is our one-hot encoded labels array. Just like before, we save this to file as a Numpy binary file.
with open("movie-labels.npy", "wb") as f:
np.save(f, labels)
# + pycharm={"name": "#%%\n"}
| course/project_build_tf_sentiment_model/00_preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <h1 align="center">Fundamentos de Programación</h1>
# <h1 align="center">Módulo 01: Introducción</h1>
# <h1 align="center">2021/02</h1>
# <h1 align="center">MEDELLÍN - COLOMBIA </h1>
# -
# <table>
# <tr align=left><td><img align=left src="https://github.com/carlosalvarezh/Fundamentos_Programacion/blob/main/images/CC-BY.png?raw=true">
# <td>Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license.(c) <NAME></td>
# </table>
# ***
# ***Docente:*** <NAME>, I.C. D.Sc.
#
# ***e-mail:*** <EMAIL>
#
# ***skype:*** carlos.alberto.alvarez.henao
#
# ***Linkedin:*** https://www.linkedin.com/in/carlosalvarez5/
#
# ***github:*** https://github.com/carlosalvarezh/Fundamentos_Programacion
#
# ***Herramienta:*** [Jupyter Notebook](http://jupyter.org/)
#
# ***Kernel:*** Python 3.8
# ***
# <a id='TOC'></a>
# + [markdown] toc=true
# <h1>Tabla de Contenidos<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Presentación-del-Curso---Introducción" data-toc-modified-id="Presentación-del-Curso---Introducción-1"><span class="toc-item-num">1 </span>Presentación del Curso - Introducción</a></span><ul class="toc-item"><li><span><a href="#TODOS-DEBERÍAMOS-SABER-PROGRAMAR...-TODOS!" data-toc-modified-id="TODOS-DEBERÍAMOS-SABER-PROGRAMAR...-TODOS!-1.1"><span class="toc-item-num">1.1 </span>TODOS DEBERÍAMOS SABER PROGRAMAR... TODOS!</a></span></li><li><span><a href="#Objetivo-General:" data-toc-modified-id="Objetivo-General:-1.2"><span class="toc-item-num">1.2 </span>Objetivo General:</a></span></li><li><span><a href="#Objetivos-específicos-del-curso" data-toc-modified-id="Objetivos-específicos-del-curso-1.3"><span class="toc-item-num">1.3 </span>Objetivos específicos del curso</a></span></li><li><span><a href="#Competencias:" data-toc-modified-id="Competencias:-1.4"><span class="toc-item-num">1.4 </span>Competencias:</a></span></li><li><span><a href="#Público-Objetivo:" data-toc-modified-id="Público-Objetivo:-1.5"><span class="toc-item-num">1.5 </span>Público Objetivo:</a></span></li><li><span><a href="#Contenido-programático" data-toc-modified-id="Contenido-programático-1.6"><span class="toc-item-num">1.6 </span>Contenido programático</a></span></li><li><span><a href="#Evaluación" data-toc-modified-id="Evaluación-1.7"><span class="toc-item-num">1.7 </span>Evaluación</a></span></li><li><span><a href="#Metodología" data-toc-modified-id="Metodología-1.8"><span class="toc-item-num">1.8 </span>Metodología</a></span></li></ul></li><li><span><a href="#Recursos-de-programación" data-toc-modified-id="Recursos-de-programación-2"><span class="toc-item-num">2 </span>Recursos de programación</a></span><ul class="toc-item"><li><span><a href="#Por-qué-usar-Python?" data-toc-modified-id="Por-qué-usar-Python?-2.1"><span class="toc-item-num">2.1 </span>Por qué usar Python?</a></span></li><li><span><a href="#Con-Python-se-pueden-hacer:" data-toc-modified-id="Con-Python-se-pueden-hacer:-2.2"><span class="toc-item-num">2.2 </span>Con Python se pueden hacer:</a></span></li><li><span><a href="#Algunas-Empresas-que-usan-Python:" data-toc-modified-id="Algunas-Empresas-que-usan-Python:-2.3"><span class="toc-item-num">2.3 </span>Algunas Empresas que usan Python:</a></span></li><li><span><a href="#Lenguajes-de-Programación-más-usados-a-nivel-mundial:" data-toc-modified-id="Lenguajes-de-Programación-más-usados-a-nivel-mundial:-2.4"><span class="toc-item-num">2.4 </span>Lenguajes de Programación más usados a nivel mundial:</a></span></li><li><span><a href="#Por-dónde-empiezo?" data-toc-modified-id="Por-dónde-empiezo?-2.5"><span class="toc-item-num">2.5 </span>Por dónde empiezo?</a></span></li><li><span><a href="#De-dónde-obtengo-información-para-aprender-a-programar?" data-toc-modified-id="De-dónde-obtengo-información-para-aprender-a-programar?-2.6"><span class="toc-item-num">2.6 </span>De dónde obtengo información para aprender a programar?</a></span><ul class="toc-item"><li><span><a href="#Algoritmia" data-toc-modified-id="Algoritmia-2.6.1"><span class="toc-item-num">2.6.1 </span>Algoritmia</a></span></li><li><span><a href="#Python-como-lenguaje-de-programación-(uno-entre-muchos)" data-toc-modified-id="Python-como-lenguaje-de-programación-(uno-entre-muchos)-2.6.2"><span class="toc-item-num">2.6.2 </span>Python como lenguaje de programación (uno entre muchos)</a></span></li><li><span><a href="#MooCs" data-toc-modified-id="MooCs-2.6.3"><span class="toc-item-num">2.6.3 </span>MooCs</a></span></li><li><span><a href="#Tutoriales-on-line:" data-toc-modified-id="Tutoriales-on-line:-2.6.4"><span class="toc-item-num">2.6.4 </span>Tutoriales on-line:</a></span></li><li><span><a href="#Apps-para-Smartphones-y-Tablets" data-toc-modified-id="Apps-para-Smartphones-y-Tablets-2.6.5"><span class="toc-item-num">2.6.5 </span>Apps para Smartphones y Tablets</a></span></li><li><span><a href="#Repositorios" data-toc-modified-id="Repositorios-2.6.6"><span class="toc-item-num">2.6.6 </span>Repositorios</a></span></li></ul></li></ul></li></ul></div>
# + [markdown] slideshow={"slide_type": "fragment"}
# ## Presentación del Curso - Introducción
# + [markdown] slideshow={"slide_type": "slide"}
# ### TODOS DEBERÍAMOS SABER PROGRAMAR... TODOS!
# + slideshow={"slide_type": "subslide"}
from IPython.display import YouTubeVideo, HTML, IFrame
YouTubeVideo('Y1HHBXDL9bg', width=640, height=480)
# -
# ### Objetivo General:
# + [markdown] slideshow={"slide_type": "subslide"}
# - Desarrollar habilidades de pensamiento de orden superior, como el razonamiento abstracto, el pensamiento crítico y la resolución de problemas, con base en los conceptos de la computación.
# -
# ### Objetivos específicos del curso
# - Abstraer la información relevante de un problema cotidiano y proponer una estrategia metódica de solución.
#
#
# - Desarrollar algoritmos y programas simples en Python.
#
#
# - Entender y aplicar conceptos básicos de ciencias de la computación, tales como: representación de datos, algoritmia, funciones, patrones, entre otros.
# ### Competencias:
# + [markdown] slideshow={"slide_type": "fragment"}
# Tras participar del proceso cognoscitivo y práctico, se espera que el alumno este en capacidad de:
# Entender la sintaxis de lenguaje `Python` al leer fuentes de otros programadores.
# Crear(Escribir) Scripts en `Python`, para crear diferentes tipos de aplicaciones como tipo consola(terminal),`TUI`, `GUI` y páginas Web básicas.
# -
# ### Público Objetivo:
# + [markdown] slideshow={"slide_type": "fragment"}
# Todas las personas que deseen aprender un lenguaje de programación, múltiples propósitos y diferentes tipos de aplicaciones.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Contenido programático
# + [markdown] slideshow={"slide_type": "subslide"}
# |**Semana** | **Fecha** |**Contenido** |**Observación**|
# |:-----:|:-------------:|-----------|----------------------|
# |01 |19/07/2021|Descripción del curso y Pacto pedagógico| |
# |02|26-27/07/2021|Introducción a la programación y algoritmia||
# |03|02-03/08/2021|Introducción a plataformas virtuales de programación. Conceptos básicos de los tipos de variables en Python, entrada y salida de datos.||
# |04|09-10/08/2021|Condicionales||
# |05|16-17/08/2021|Funciones y modulos (concepto de funciones, creación de funciones, y tipos de retorno)|Taller 01 (10%)|
# |06|23-24/08/2021|Ciclos parte 1||
# |07|30-31/08/2021|Manejo de cadenas (concatenación y manipulación de textos)||
# |08|06-07/09/2021|Arreglos (agregación, modificación, acceso, borrado, y operaciones con arreglos)|Asamblea Estudiantíl|
# |09|13-14/09/2021|Ciclos parte 2 y ejercicios.||
# |- |20-25/09/2021||Semana de receso académico|
# |10|27-28/09/2021|Diccionarios y tuplas (agregación, modificación, acceso, borrado, y operaciones con diccionarios)|Taller 02 (20%)|
# |11|04-05/10/2021|Ficheros (operaciones con ficheros y manipulación de ficheros)||
# |12|11-12/10/2021|Análisis de datos y ejercicios (ordenamiento y busqueda)||
# |13|18-19/10/2021|Ecosistema Python: Bibliotecas científicas (Numpy)|Taller 03 (20%)|
# |14|25-26/10/2021|Ecosistema Python: Visualización (Matplotlib)|Finalización Ejercicios VPL (20%)|
# |15|01-02/11/2021|Ecosistema Python: Análisis de Datos (Pandas)|Proyecto entrega 1 (10%) - Reporte 70%|
# |16|08-09/11/2021|Cierre: Presentación proyectos||
# ||15-20/11/2021||Semana de Receso académico para preparación de finales|
# ||22-27/11/2021||Semana de finales|
# ||01/12/2021||Cierre académico - Reporte del 100%|
# -
# ### Evaluación
# - Ejercicios en Moodle VPL (20%)
#
#
# - Talleres (50% )
# - Taller 01: (10%)
# - Taller 02: (20%)
# - Taller 03: (20%)
#
#
# - 2 entregables (30%)
# - Estructuras de programación (10%)
# - Pandas (20%)
# ### Metodología
# La modalidad del curso es ***presencial*** sin embargo, entendiendo la actual situación por la que atravesamos, los estudiantes que tengan dificultad de asistir presencialmente en el campus de la Universidad lo podrán hacer a través de TEAMS. ***Las clases no serán grabadas***.
#
# Aunque la asistencia a clases en la Universidad es voluntaria, se tomará asistencia por cuestiones de controles internos. Se recomienda asistir a todas las clases y aprovechar los espacios de discusión en tiempo real.
#
# Las notas son responsabilidad del estudiante, NO DEL PROFESOR. Al final del semestre por favor evitar enviar correos del tipo:
#
# - “por favor colocarme un trabajo adicional”
#
# - “me van a quitar la beca”
#
# - “el promedio me va a quedar muy bajito”
#
# - “aunque la pierda que me quede altica”
#
#
#
# ***Clases magistrales:*** Desde la primera clase el estudiante contará con la totalidad del material de estudio, por lo que será su respobsabilidad repasar las clases anteriores y actual antes de cada clase.
#
#
# ***Clases prácticas donde se desarrollan talleres:*** terminar todos los talleres y ejercicios de las clases anteriores para ir al día con el curso
#
#
# ***Asesorías:*** primero tratar de utilizar el canal de dudas de Teams. Luego llevar dudas puntuales a la asesoría. Evite estudiar y aprender empleando Youtube.
#
# <span style='background :yellow' > ***Por cada hora de clase un estudiante debería dedicar, AL MENOS, 2 horas de estudio fuera del aula.*** </span>
# [Volver a la Tabla de Contenido](#TOC)
# ## Recursos de programación
# <p float="center">
# <img src="https://github.com/carlosalvarezh/FundamentosProgramacion/blob/main/images/Sem01Img01.jpg?raw=true" width="500" />
# </p>
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Por qué usar Python?
# + [markdown] slideshow={"slide_type": "fragment"}
# `Python` es un lenguaje de programación multiplataforma, algunas de sus ventajas son: ayuda a mejorar la productividad ya que permite trabajar mucho más rápido que cualquier otro lenguaje, es versátil y soporta programación orientada a objetos, cuenta con una gran extensión de bibliotecas para un mejor uso, es un lenguaje de programación sostenible lo cual permite leer y modificar de una manera fácil los programas.
#
# - Lenguaje de programación de alto nivel creado por [Guido van Rossum](https://gvanrossum.github.io/ "Guido van Rossum's home page") a finales de los 80's.
#
#
# - Actualmente ocupa los promeros lugares como uno de los lenguajes de programación más ampliamente usados a nivel mundial. [TIOBE index enero/2021](https://www.tiobe.com/tiobe-index/ "tiobe.com") y [PYPL PopularitY of Programming Language](https://pypl.github.io/PYPL.html "PYPL")
#
#
# - Una de las mayores ventajas es su "simplicidad": En general, se requiere de muchas menos líneas de programación para ejecutar una determinada tarea que en otros lenguajes (`C/C++`, `java`, $\ldots$.).
#
#
# - Viene con una amplica colección de bibliotecas que permiten extender las capacidades del lenguaje.
#
#
# - [Este](https://www.youtube.com/watch?v=Og847HVwRSI&feature=emb_logo "Most popular programming languges 1965 - 2019") es un video muy interesante que muestra la evolución de los lenguajes de programación más usados a nivel mundial desde 1965 y hasta 2019. Ampliamente recomndable su visualización:
# -
# ### Con Python se pueden hacer:
# + [markdown] slideshow={"slide_type": "subslide"}
# - Aplicaciones Web.
#
# - Análisis de Datos.
#
# - Aprendizajes de Maquina.
#
# - Visión Artificial.
#
# - Robótica, programar "pequeñas" computadoras(Raspberry Pi).
#
# - Juegos.
#
# - Web Scraping.
#
# - Automatizar(Scripts)
# -
# ### Algunas Empresas que usan Python:
# <img src="https://github.com/carlosalvarezh/FundamentosProgramacion/blob/main/images/Sem01Img02.PNG?raw=true" width="500" />
# </p>
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Lenguajes de Programación más usados a nivel mundial:
# -
# En el siguiente video se podrá ver como ha sido la evolución de los lenguajes de programación más populares en la historia
YouTubeVideo('Og847HVwRSI', width=640, height=480)
# ### Por dónde empiezo?
# + [markdown] slideshow={"slide_type": "slide"}
# Lo primero es elegir una herramienta de trabajo sobre la cual realizar el código. A continuación me permito compartirles solo algunas herramientas de esas que he tenido la oportunidad de revisar y me parecen apropiadas para un curso de este nivel. No son las únicas, por el contrario, existen muchas otras más, quizás mejores, dependiendo del interés particular de desarrollo de cada quién.
# + [markdown] slideshow={"slide_type": "slide"}
# Algunos Aplicativos usados para programar, enfoncándonos luego en `Python`, son:
#
#
# - **[VPL - Laboratorio de programación virtual para Moodle](https://contenidosint.org/login/index.php "VPL- Virtual Programming Lab"):** es un módulo de actividades que gestiona las asignaciones de programación y cuyas características destacadas son: Editar el código fuente de los programas en el navegador. Ejecutar programas de forma interactiva en el navegador. Ejecutar pruebas para revisar los programas. Buscar similitudes entre archivos. Establecer restricciones de edición y evitar el pegado de texto externo.
#
#
# - **[PSeInt](http://pseint.sourceforge.net/ "PSeInt"):** "*PSeInt es una herramienta para asistir a un estudiante en sus primeros pasos en programación. Mediante un simple e intuitivo pseudolenguaje en español (complementado con un editor de diagramas de flujo), le permite centrar su atención en los conceptos fundamentales de la algoritmia computacional, minimizando las dificultades propias de un lenguaje y proporcionando un entorno de trabajo con numerosas ayudas y recursos didácticos*".
#
#
# - **[Anaconda](https://www.anaconda.com/ "Anaconda")**: suite de trabajo para desarrollo de programas en [Python](https://www.python.org/ "Python.org") o [R](https://www.r-project.org/ "R") para proyectos de computación científica, que incluye el Notebook [Jupyter](http://jupyter.org/ "Jupyter"), plataforma en la que están hechas estas notas del curso, y el IDE [Spyder](https://www.spyder-ide.org/ "Spyder"), entre otras herramientas.
#
#
# - **[Google Colab](https://colab.research.google.com/ "Google Colaboratory"):** *Colaboratory* (también conocido como *Colab*) es un entorno de *notebook Jupyter* gratuito que se ejecuta en la nube y almacena sus notebooks en *Google Drive*. *Colab* fue originalmente un proyecto interno de *Google* y a partir de octubre de 2019 *Colab* solo le permite crear notebooks con núcleos `Python 2` y `Python 3`; sin embargo, si tiene una computadora portátil con kernels `ir` o `swift`, funcionará, ya que tanto `R` como `Swift` están instalados en el contenedor. Otros núcleos como `Julia` aún no son compatibles.
#
#
# **IDE** (*Integrated Development Environment*) o *Entorno de Desarrollo Integrado*, es una aplicación informática que permite la integración de varios servicios que permiten el desarrollo de un software o programa de computador.
#
# Normalmente consiste de:
#
# - Editor de Código fuente
#
# - Depurador
#
# A seguir enunciaremos algunos IDE ampliamente usados en la industria:
#
# - **[PyCharm](https://www.jetbrains.com/pycharm-edu/ "PyCharm"):** es un entorno de desarrollo integrado (*IDE*) utilizado en la programación de computadoras, específicamente para el lenguaje `Python`. Es desarrollado por la compañía checa [JetBrains](https://www.jetbrains.com/ "JetBrains"). Proporciona análisis de código, un depurador gráfico, un probador de unidad integrado, integración con sistemas de control de versiones (*VCS*) y admite el desarrollo web con *Django*, así como Data Science con *Anaconda*.
#
#
# - **[repl.it](https://www.repl.it "repl.it"):** es una start-up basada en San Francisco y un *IDE* en línea. Su nombre proviene de "[read – eval – print loop](https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop "repl")". Es compatible [*+50* lenguajes de programación](https://repl.it/languages "repl.it languages"): *JavaScript*, *Python*, *PHP*, *C/C++*, *Fortran* o incluso *QBasic*, entre muchos otros, así como de frameworks populares como Django, Ruby on Rails y Sinatra. Y si quieres lastimarte y escribir en Brainfuck, también puedes hacerlo.
#
#
# - **[Visual Studio Code](https://code.visualstudio.com/ "Visual Studio Code"):** es un editor de código fuente gratuito creado por Microsoft para Windows, Linux y macOS. Las características incluyen soporte para depuración, resaltado de sintaxis, finalización inteligente de código, fragmentos, refactorización de código y Git integrado
#
#
# También se puede desarrollar código empleando un bloc de notas y grabando el archivo del código creado con extensión `.py` que será reconocido por el compilador del lenguaje `python`. Adicionalmente a los bloc de notas "clásico" del windows, se tiene:
#
# - **[Notepad++](http://notepad-plus-plus.org/ "Notepad"):** es un editor de código fuente (ojo, no ejecuta!!! solo edita!!!). Presenta resaltado de sintaxis, plegado de código y autocompletado limitado para lenguajes de programación, scripting y marcado, pero no terminación de código inteligente o verificación de sintaxis. Como tal, puede resaltar correctamente el código escrito en un esquema compatible, pero no se puede verificar si la sintaxis es internamente sólida o compilable. A partir de la versión 7.6.3, *Notepad++* puede resaltar los elementos sintácticos de *+70 lenguajes de programación*. Solo se cuenta para trabajar en el SO *Windows*.
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### De dónde obtengo información para aprender a programar?
# -
# #### Algoritmia
# + [markdown] slideshow={"slide_type": "slide"}
# Algunos libros introductorios en [Algoritmia](https://es.wikiversity.org/wiki/Algoritmia_y_programaci%C3%B3n_b%C3%A1sica/Aspectos_introductorios "Algoritmia"), que no emplean ningún lenguaje en particular
#
# - [<NAME>](http://latinoamerica.cengage.com/ls/introduccion-a-la-programacion-logica-y-diseno-7a-ed/ "<NAME>. Introducción a la programación Lógica y Diseño"). Introducción a la programación Lógica y Diseño (en físico, $$).
#
#
# - [Pinales y Velázquez](http://www.uaa.mx/direcciones/dgdv/editorial/docs/algoritmos.pdf "Pinales y Velázquez. Algoritmos Resueltos con Diagramas de Flujo y Pseudocódigo"). Algoritmos Resueltos con Diagramas de Flujo y Pseudocódigo (en pdf, libre descarga).
# -
# #### Python como lenguaje de programación (uno entre muchos)
# + [markdown] slideshow={"slide_type": "slide"}
# - [Python.org](https://www.python.org/ "Python.org"): Página oficial del lenguaje de programación Python (inglés)
#
#
# - [<NAME>. Think Python. 2a Ed. Green Tea Press.](http://greenteapress.com/wp/think-python-2e/ "<NAME>. Think Python"): Libro en PDF de libre descarga (inglés).
# -
# #### MooCs
# **MooC** es la abreviatura de **Curso masivo abierto en línea** (*Massive open online Course*, en inglés) . En términos simples, es un curso de estudio puesto a disposición por Internet para una gran cantidad de personas. Algunas de las compañías de cursos en línea más comunes que compiten por este nuevo negocio educativo son: [Udemy](https://www.udemy.com/ "Udemy"), [Edx](https://www.edx.org/ "Edx"), [Coursera](https://www.coursera.org/ "Coursera") y [DataCamp](https://www.datacamp.com/ "DataCamp"), entre muchos otros.
#
# Los *MooC* generalmente tienen fechas específicas de inicio y finalización. Los interesados miran conferencias de video en línea, completan las tareas y reciben la calificación de inmediato la mayor parte del tiempo. Con el auge de la globalización y la tecnología, la accesibilidad a la educación superior ha aumentado dramáticamente.
#
# Los MooC son generalmente gratuitos, y los cursos ofrecidos en línea a menudo provienen de algunas de las universidades más prestigiosas como *Harvard*, *MIT* y *Stanford*. Cada año, los *MooC* se vuelven más populares. Hay más cursos disponibles y la calidad de la educación en línea aumenta. Los MooC se caracterizan por su masividad, apertura y conectividad. Utilizan estrategias similares a las redes sociales, conectando a las masas con los beneficios del aprendizaje.
#
# Pero... cuál de todos escoger? en [este enlace](https://coolcollegehelpers.com/udemy-vs-edx-coursera-udacity/ "Udemy vs Edx vs Coursera vs Udacity") pueden encontrar una excelente descripción de cada uno de ellos y encontrar cuál se adapta más a sus necesidades de capacitación, o en [este](https://medium.com/@adiyagil/udemy-vs-coursera-vs-lynda-the-ultimate-comparison-70586665dca5 "Ultimate comparison") también, y [aquí](https://www.thecrowder.com/online-courses-sites?utm_source=me&utm_medium=organic&utm_campaign=education&utm_content=Udemy-VS-Coursera-VS-Lynda&ref=me-organic-education-Udemy-VS-Coursera-VS-Lynda "Online Courses Sites") encontrará un cuadro comparativo y acceso directo a algunas de esas plataformas.
# #### Tutoriales on-line:
# + [markdown] slideshow={"slide_type": "slide"}
# Algunas páginas que ofrecen cursos on-line en diferentes áreas de las *TICs* (inglés/español). Algunos son libres ("grátis") pero si necesita certificado debes pagar.
#
# - **[Python Course](https://www.python-course.eu/index.php):** Excelente página para aprender `Python`a nivel básico, intermedio y/o avanzado (inglés)
#
#
# - **[SocraticaStudios](https://www.youtube.com/user/SocraticaStudios "Socratica")** Canal de [Youtube](https://www.youtube.com/ "Youtube") con algunos recursos en español.
# -
# #### Apps para Smartphones y Tablets
# + [markdown] slideshow={"slide_type": "slide"}
# - **[Sololearn](https://www.sololearn.com/Course/Python/)**
# -
# #### Repositorios
# + [markdown] slideshow={"slide_type": "slide"}
# En los repositorios de software encontrarán abundante información de códigos ya creados que les permitirán avanzar en el conocimiento de los diferentes esquemas de programación, así como ver cómo es que otros programan y tienen (posiblemente) buenas prácticas de programación que te servirán para tener unos códigos muy eficientes y límpios.
#
# - **[Github](https://github.com/ "Github")**
# -
# [Volver a la Tabla de Contenido](#TOC)
| Modulo01_Introduccion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# (COMPPARALELOGPUSSMC)=
# # 5.5 Cómputo en paralelo usando GPUs en un sistema de memoria compartida (SMC)
# ```{admonition} Notas para contenedor de docker:
#
# Comando de docker para ejecución de la nota de forma local:
#
# nota: cambiar `<ruta a mi directorio>` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker.
#
# `docker run --rm -v <ruta a mi directorio>:/datos --name jupyterlab_optimizacion_2 -p 8888:8888 -p 8787:8787 -d palmoreck/jupyterlab_optimizacion_2:3.0.0`
#
# password para jupyterlab: `<PASSWORD>`
#
# Detener el contenedor de docker:
#
# `docker stop jupyterlab_optimizacion_2`
#
# Documentación de la imagen de docker `palmoreck/jupyterlab_optimizacion_2:3.0.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/optimizacion_2).
#
# ```
# ---
# Nota generada a partir de [liga](https://www.dropbox.com/s/yjijtfuky3s5dfz/2.5.Compute_Unified_Device_Architecture.pdf?dl=0).
# ```{admonition} Al final de esta nota el y la lectora:
# :class: tip
#
# * Aprenderá un poco de historia y arquitectura de la GPU.
#
# * Se familiarizará con la sintaxis de *CUDA-C* para cómputo en la GPU con ejemplos sencillos y los relacionará con el modelo de programación CUDA.
#
# * Utilizará el paquete *CuPy* de *Python* para cómputo en la GPU.
#
# ```
# Se presentan códigos y sus ejecuciones en una máquina `p2.xlarge` de la nube de [AWS](https://aws.amazon.com/). Se utilizó la AMI:
#
# ```
# opt2-aws-cuda-toolkit-cupy-08-05-2021
# ```
# de la región `us-east-1` (Virginia) para reproducibilidad de resultados. Tal AMI se construyó a partir de una AMI `ubuntu 20.04 - ami-042e8287309f5df03` con el [script_cuda_and_tools.sh](https://github.com/palmoreck/scripts_for_useful_tools_installations/blob/main/AWS/ubuntu_20.04/optimizacion_2/script_cuda_and_tools.sh)
# ````{admonition} Comentario
#
# Si se utiliza la *AMI* `opt2-aws-cuda-toolkit-cupy-08-05-2021` colocar en `User data` el siguiente *script*:
#
# ```bash
#
# # #!/bin/bash
# ##variables:
# region=us-east-1 #make sure instance is in Virginia
# name_instance=CUDA
# USER=ubuntu
# ##System update
# apt-get update -yq
# ##Tag instance
# INSTANCE_ID=$(curl -s http://instance-data/latest/meta-data/instance-id)
# PUBLIC_IP=$(curl -s http://instance-data/latest/meta-data/public-ipv4)
# sudo -H -u $USER bash -c "/home/$USER/.local/bin/aws ec2 create-tags --resources $INSTANCE_ID --tag Key=Name,Value=$name_instance-$PUBLIC_IP --region=$region"
# sudo -H -u $USER bash -c "cd / && /home/$USER/.local/bin/jupyter lab --ip=0.0.0.0 --no-browser --config=/home/$USER/.jupyter/jupyter_notebook_config.py &"
#
# ```
#
# ````
# La máquina `p2.xlarge` tiene las siguientes características:
# + tags=["output_scroll"] language="bash"
# lscpu
# + tags=["output_scroll"] language="bash"
# sudo lshw -C memory
# + language="bash"
# uname -ar #r for kernel, a for all
# -
# ```{admonition} Observación
# :class: tip
#
# En la celda anterior se utilizó el comando de *magic* `%%bash`. Algunos comandos de *magic* los podemos utilizar también con `import`. Ver [ipython-magics](https://ipython.readthedocs.io/en/stable/interactive/magics.html#)
#
# ```
# ## *Compute Unified Device Architecture* (CUDA)
# ### Un poco de historia...
# ```{margin}
#
# GPGPU es un término que se utilizó para referirse a la programación en unidades de procesamiento gráfico de forma general. Hoy en día se conoce simplemente como *GPU programming*. Ver [General-purpose computing on graphics processing units](https://en.wikipedia.org/wiki/General-purpose_computing_on_graphics_processing_units).
#
# ```
# La industria de videojuegos impulsó el desarrollo de las tarjetas gráficas a una velocidad sin precedente a partir del año 1999 para incrementar el nivel de detalle visual en los juegos de video. Alrededor del 2003 se planteó la posibilidad de utilizar las unidades de procesamiento gráfico para procesamiento en paralelo relacionado con aplicaciones distintas al ambiente de gráficas. A partir del 2006 la empresa [NVIDIA](https://www.nvidia.com/en-us/about-nvidia/) introdujo CUDA, una plataforma GPGPU y un modelo de programación que facilita el procesamiento en paralelo en las GPU's.
# Desde el 2006, las tarjetas gráficas muestran una brecha significativa con las unidades de procesamiento CPU's. Ver por ejemplo las gráficas que *NVIDIA* publica año tras año y que están relacionadas con el número de operaciones en punto flotante por segundo (FLOPS) y la transferencia de datos en la memoria RAM de la GPU: [gráficas cpu vs gpu en imágenes de google](https://www.google.com/search?q=plot+gflops+gpu+cpu+nvidia&tbm=isch&ved=2ahUKEwjKk7Le_bzwAhUUaKwKHX9-AP8Q2-cCegQIABAA&oq=plot+gflops+gpu+cpu+nvidia&gs_lcp=CgNpbWcQA1C_W1i_W2DhXGgAcAB4AIABX4gBX5IBATGYAQCgAQGqAQtnd3Mtd2l6LWltZ8ABAQ&sclient=img&ei=xAiYYMqhL5TQsQX__IH4Dw).
# ```{margin}
#
# La GPU y la CPU están conectadas por una interconexión de nombre [PCI](https://en.wikipedia.org/wiki/Conventional_PCI).
#
# ```
# Hoy en día se continúa el desarrollo de GPU's con mayor RAM, con mayor capacidad de cómputo y mejor conectividad con la CPU. Estos avances han permitido resolver problemas con mayor exactitud que los resueltos con las CPU's, por ejemplo en el terreno de *deep learning* en reconocimiento de imágenes. Ver [ImageNet Classification with Deep Convolutional Neural Networks](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf), [2012: A Breakthrough Year for Deep Learning](https://medium.com/limitlessai/2012-a-breakthrough-year-for-deep-learning-2a31a6796e73).
#
#
# ```{admonition} Observación
# :class: tip
#
# Para más avances ver [NVIDIA Turing Architecture In-Depth](https://devblogs.nvidia.com/nvidia-turing-architecture-in-depth/), [samsung-amd-rdna-gpu-2021](https://wccftech.com/samsung-amd-rdna-gpu-2021/), [playstation-5-specifications-revealed-but-design-is-still-a-mystery](https://www.theguardian.com/games/2020/mar/19/playstation-5-specifications-revealed-but-design-is-still-a-mystery), [xbox-series-x-tech](https://news.xbox.com/en-us/2020/03/16/xbox-series-x-tech/) y recientemente [IBM Supercomputer Summit Attacks Coronavirus…](https://www.ibm.com/blogs/nordic-msp/ibm-supercomputer-summit-attacks-coronavirus/).
#
# ```
# ```{margin}
#
# Recuérdese la [taxonomía de Flynn](https://en.wikipedia.org/wiki/Flynn%27s_taxonomy).
#
# ```
# La arquitectura en la que podemos ubicar a las GPU's es en la de un sistema MIMD y SIMD. De hecho es [SIMT: Simple Instruction Multiple Thread](https://en.wikipedia.org/wiki/Single_instruction,_multiple_threads) en un modelo de sistema de memoria compartida pues "los *threads* en un *warp* leen la misma instrucción para ser ejecutada".
# ```{admonition} Definición
#
# Un *warp* en el contexto de GPU *programming* es un conjunto de *threads*. Equivale a $32$ *threads*.
#
# ```
# ### ¿Diferencia con la CPU multicore?
#
# <img src="https://dl.dropboxusercontent.com/s/k11qub01w4nvksi/CPU_multicore.png?dl=0" heigth="500" width="500">
#
# **GPU**
#
# <img src="https://dl.dropboxusercontent.com/s/lw9kia12qhwp95r/GPU.png?dl=0" heigth="500" width="500">
# ```{admonition} Observación
# :class: tip
#
# Obsérvese en el dibujo anterior la diferencia en tamaño del caché en la CPU y GPU. También la unidad de control es más pequeña en la GPU.
#
#
# ```
# ```{margin}
#
# Una máquina *quad core* soporta cuatro threads en cada *core*.
#
# ```
# A diferencia de una máquina *multicore* o multi CPU's con la habilidad de lanzar en un instante de tiempo unos cuantos *threads*, la GPU puede lanzar cientos o miles de threads en un instante siendo cada core *heavily multithreaded*. Sí hay restricciones en el número de threads que se pueden lanzar en un instante pues las tarjetas gráficas tienen diferentes características (modelo) y arquitecturas, pero la diferencia con la CPU es grande. Por ejemplo, la serie **GT 200** (2009) en un instante puede lanzar 30,720 threads con sus 240 *cores*. Ver [GeForce_200_series](https://en.wikipedia.org/wiki/GeForce_200_series), [List of NVIDIA GPU's](https://en.wikipedia.org/wiki/List_of_Nvidia_graphics_processing_units).
# Ver [How Graphics Cards Work](https://computer.howstuffworks.com/graphics-card1.htm) y [How Microprocessors Work](https://computer.howstuffworks.com/microprocessor.htm) para más información.
# ### ¿Otras compañías producen tarjetas gráficas?
# Sí, ver por ejemplo la lista de GPU's de [Advanced Micro Devices](https://en.wikipedia.org/wiki/List_of_AMD_graphics_processing_units).
# ### ¿Si tengo una tarjeta gráfica de AMD puedo correr un programa de CUDA?
# No es posible pero algunas alternativas son:
#
# * [OpenCl](https://www.khronos.org/opencl/)
#
# * [OpenACC](https://www.openacc.org/about)
# ### ¿Si tengo una tarjeta gráfica de NVIDIA un poco antigua puedo correr un programa de CUDA?
# Las GPU's producidas por NVIDIA desde 2006 son capaces de correr programas basados en ***CUDA C***. La cuestión sería revisar qué *compute capability* tiene tu tarjeta. Ver [Compute Capabilities](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities) para las características que tienen las tarjetas más actuales.
# ### ¿Qué es *CUDA C*?
# Es una extensión al lenguaje *C* de programación en el que se utiliza una nueva sintaxis para procesamiento en la GPU. Contiene también una librería *runtime* que define funciones que se ejecutan desde el ***host*** por ejemplo para alojar y desalojar memoria en el ***device***, transferir datos entre la memoria *host* y la memoria *device* o manejar múltiples *devices*. La librería *runtime* está hecha encima de una API de *C* de bajo nivel llamada [NVIDIA CUDA Driver API](https://docs.nvidia.com/cuda/cuda-driver-api/index.html) la cual es accesible desde el código. Para información de la API de la librería runtime ver [NVIDIA CUDA Runtime API](https://docs.nvidia.com/cuda/cuda-runtime-api/index.html).
# ```{admonition} Comentario
#
# La transferencia de datos entre la memoria del *host* a *device* o viceversa constituye un *bottleneck* fuerte.
#
# ```
# ### ¿A qué se refiere la terminología de *host* y *device*?
# *Host* es la máquina *multicore* CPU y *device* es la GPU. Una máquina puede tener múltiples GPU's por lo que tendrá múltiples *devices*.
# ### Tengo una tarjeta NVIDIA CUDA *capable* ¿qué debo realizar primero?
# Realizar instalaciones dependiendo de tu sistema operativo. Ver [instalación](https://github.com/palmoreck/programming-languages/tree/master/C/extensiones_a_C/CUDA/instalacion) donde además se encontrará información para instalación de [nvidia-docker](https://github.com/NVIDIA/nvidia-docker).
# ### Instalé lo necesario y al ejecutar en la terminal `nvcc -V` obtengo la versión... ¿cómo puedo probar mi instalación?
# 1)Obteniendo información del *NVIDIA driver* ejecutando en la terminal el comando `nvidia-smi`.
# + language="bash"
# nvidia-smi
# + tags=["output_scroll"] language="bash"
# nvidia-smi -a #a for all
# -
# Para más información del comando `nvidia-smi` ver [results-for-the-nvidia-smi-command-in-a-terminal](https://askubuntu.com/questions/1220144/can-somebody-explain-the-results-for-the-nvidia-smi-command-in-a-terminal) y [nvidia-smi-367.38](https://developer.download.nvidia.com/compute/DCGM/docs/nvidia-smi-367.38.pdf).
# ```{admonition} Comentarios
#
# * Ejecutando `nvidia-smi -l 1` nos da información cada segundo.
#
# * Una herramienta que nos ayuda al monitoreo de uso de la(s) GPU(s) es [nvtop](https://github.com/Syllo/nvtop).
#
# ```
# 2)Compilando y ejecutando el siguiente programa de *CUDA C*:
# +
# %%file hello_world.cu
#include<stdio.h>
__global__ void func(void){
printf("Hello world! del bloque %d del thread %d\n", blockIdx.x, threadIdx.x);
}
int main(void){
func<<<2,3>>>();
cudaDeviceSynchronize();
printf("Hello world! del cpu thread\n");
return 0;
}
# -
# ```{admonition} Comentario
#
# La sintaxis `<<<2,3>>>` refiere que serán lanzados 2 bloques de 3 *threads* cada uno.
#
#
# ```
# Compilamos con `nvcc`.
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 hello_world.cu -o hello_world.out
# -
# ```{admonition} Comentarios
#
# * `nvcc` es un *wrapper* para el compilador de programas escritos en *C*.
#
# * En ocasiones para tener funcionalidad de un determinado [compute capability](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities) se especifica la *flag* de `-arch=sm_11` en la línea de `nvcc`. En este caso se le indica al compilador que compile el programa para un *compute capability* de $1.1$. Ver [run a kernel using the larger grid size support offered](https://stackoverflow.com/questions/16954931/cuda-5-0-cudagetdeviceproperties-strange-grid-size-or-a-bug-in-my-code).
#
# * Para la versión 11 de CUDA se requiere explícitamente indicar la arquitectura y código para la compilación. Ver [cuda-11-kernel-doesnt-run](https://stackoverflow.com/questions/63675040/cuda-11-kernel-doesnt-run), [cuda-how-to-use-arch-and-code-and-sm-vs-compute](https://stackoverflow.com/questions/35656294/cuda-how-to-use-arch-and-code-and-sm-vs-compute/35657430#35657430), [cuda-compute-capability-requirements](https://stackoverflow.com/questions/28932864/cuda-compute-capability-requirements/28933055#28933055), [what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api](https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api).
#
# ```
# Ejecutamos.
# + language="bash"
# ./hello_world.out
# -
# 3)Haciendo un query a la GPU para ver qué características tiene (lo siguiente es posible ejecutar sólo si se instaló el [CUDA toolkit](https://developer.nvidia.com/cuda-toolkit)):
# + language="bash"
# cd /usr/local/cuda/samples/1_Utilities/deviceQuery/ && sudo make
# /usr/local/cuda/samples/1_Utilities/deviceQuery/deviceQuery
# -
# ### ¿Por qué usar CUDA y *CUDA-C* o más general cómputo en la GPU?
# * NVIDIA como se mencionó al inicio de la nota fue de las primeras compañías en utilizar la GPU para tareas no relacionadas con el área de gráficos, ha colaborado en el avance del conocimiento de las GPU's y desarrollo de algoritmos y tarjetas gráficas. Otra compañía es [Khronos_Group](https://en.wikipedia.org/wiki/Khronos_Group) por ejemplo, quien actualmente desarrolla [OpenCl](https://www.khronos.org/opencl/).
# ```{margin}
#
# *Deep learning* se ha utilizado para resolver problemas en *machine learning* típicos. Ejemplos de esto son la clasificación de imágenes, de sonidos o análisis de textos. Ver por ejemplo [Practical text analysis using deep learning](https://medium.com/@michael.fire/practical-text-analysis-using-deep-learning-5fb0744efdf9).
#
# ```
# * El cómputo en la GPU constituye hoy en día una alternativa fuerte a la implementación de modelos de *machine learning* ampliamente utilizada por la comunidad científica, también para cómputo matricial y *deep learning*.
#
# * Sí hay publicaciones científicas para la implementación de *deep learning* en las CPU's, ver por ejemplo el *paper* reciente de [SLIDE](https://www.cs.rice.edu/~as143/Papers/SLIDE_MLSys.pdf) cuyo repo de *github* es [HashingDeepLearning](https://github.com/keroro824/HashingDeepLearning). Tal *paper* plantea una discusión a realizar con la frase:
#
# *...change in the state-of-the-art algorithms can render specialized hardware less effective in the future*.
#
# Ver por ejemplo [Tensor Cores](https://developer.nvidia.com/tensor-cores), [NVIDIA TENSOR CORES, The Next Generation of Deep Learning](https://www.nvidia.com/en-us/data-center/tensorcore/), [The most powerful computers on the planet: SUMMIT](https://www.ibm.com/thought-leadership/summit-supercomputer/) como ejemplos de hardware especializado para aprendizaje con *Tensorflow*.
# ```{admonition} Observación
# :class: tip
#
# *Summit powered by 9,126 IBM Power9 CPUs and over 27,000 NVIDIA V100 Tensor Core GPUS, is able to do 200 quadrillion calculations per second...* [IBM Supercomputer Summit Attacks Coronavirus…](https://www.ibm.com/blogs/nordic-msp/ibm-supercomputer-summit-attacks-coronavirus/).
#
# ```
# Sin embargo, por falta de implementaciones algorítmicas en la *CPU* se han adoptado implementaciones de *deep learning* utilizando GPU's:
#
#
# *...However, for the case of DL, this investment is justified due to the lack of significant progressin the algorithmic alternatives for years.*
# ```{admonition} Comentario
#
# Revisar también las entradas [An algorithm could make CPUs a cheap way to train AI](https://www.engadget.com/2020/03/03/rice-university-slide-cpu-gpu-machine-learning/) y [Deep learning rethink overcomes major obstacle in AI industry](https://www.sciencedaily.com/releases/2020/03/200305135041.htm).
#
# ```
# ## [CUDA-C](https://docs.nvidia.com/cuda/cuda-c-programming-guide/)
# Consiste en extensiones al lenguaje C y en una *runtime library*.
# ### *Kernel*
# * En *CUDA C* se define una función que se ejecuta en el ***device*** y que se le nombra ***kernel***. El *kernel* inicia con la sintaxis:
#
# ```C
# __global__ void mifun(int param){
# ...
# }
#
# ```
#
# * Siempre es tipo `void` (no hay `return`).
#
# * El llamado al *kernel* se realiza desde el ***host*** y con una sintaxis en la que se define el número de *threads*, nombrados ***CUDA threads*** (que son distintos a los *CPU threads*), y bloques, nombrados ***CUDA blocks***, que serán utilizados para la ejecución del *kernel*. La sintaxis que se utiliza es `<<< >>>` y en la primera entrada se coloca el número de *CUDA blocks* y en la segunda entrada el número de *CUDA threads*. Por ejemplo para lanzar N bloques de 5 *threads*.
#
#
# ```C
# __global__ void mifun(int param){
# ...
# }
#
# int main(){
# int par=0;
# mifun<<<N,5>>>(par);
# }
#
# ```
# ### Ejemplo
# `hello_world_simple.cu`
# %%file hello_world_simple.cu
#include<stdio.h>
__global__ void func(void){
}
int main(void){
func<<<1,1>>>();
printf("Hello world!\n");
return 0;
}
# Compilación:
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall hello_world_simple.cu -o hello_world_simple.out
# -
# Ejecución:
# + language="bash"
# ./hello_world_simple.out
# -
# ````{admonition} Comentarios
#
# * La función `main` se ejecuta en la CPU.
#
# * `func` es un *kernel* y es ejecutada por los *CUDA threads* en el *device*. Obsérvese que tal función inicia con la sintaxis `__global__`. En este caso el *CUDA thread* que fue lanzado no realiza ninguna acción pues el cuerpo del kernel está vacío.
#
# * El *kernel* sólo puede tener un `return` tipo *void*: `__global__ void func` por lo que el *kernel* debe regresar sus resultados a través de sus argumentos.
#
# * La extensión del archivo debe ser `.cu` aunque esto puede modificarse al compilar con `nvcc`:
#
# ```bash
#
# nvcc -x cu hello_world.c -o hello_world.out
#
# ```
#
# ````
# ### ¿Bloques de threads?
# Los *CUDA threads* son divididos en *CUDA blocks* y éstos se encuentran en un *grid*. En el lanzamiento del *kernel* se debe especificar al hardware cuántos *CUDA blocks* tendrá nuestro *grid* y cuántos *CUDA threads* estarán en cada bloque.
# ### Ejemplo
# ```{margin}
#
# `func<<<2,3>>>();` representa 2 bloques de 3 *threads* cada uno.
#
# ```
# %%file hello_world_2.cu
#include<stdio.h>
__global__ void func(void){
printf("Hello world! del bloque %d del thread %d\n", blockIdx.x, threadIdx.x);
}
int main(void){
func<<<2,3>>>();
cudaDeviceSynchronize();
//printf("Hello world! del cpu thread\n");
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall hello_world_2.cu -o hello_world_2.out
# + language="bash"
# ./hello_world_2.out
# -
# **En lo que continúa de la nota el nombre *thread* hará referencia a *CUDA thread* y el nombre bloque a *CUDA block*.**
# ```{admonition} Comentarios
#
# * El llamado a la ejecución del *kernel* se realizó en el *host* y se lanzaron $2$ bloques cada uno con $3$ *threads*.
#
# * Se utiliza la función [cudaDeviceSynchronize](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html#group__CUDART__DEVICE_1g10e20b05a95f638a4071a655503df25d) para que el *cpu-thread* espere la finalización de la ejecución del *kernel*.
#
# * En el ejemplo anterior, las variables `blockIdx` y `threadIdx` hacen referencia a los **id**'s que tienen los bloques y los *threads*. El *id* del bloque dentro del *grid* y el *id* del thread dentro del bloque. La parte `.x` de las variables: `blockIdx.x` y `threadIdx.x` refieren a la **primera coordenada** del bloque en el *grid* y a la **primera coordenada** del *thread* en en el bloque.
#
# * La elección del número de bloques en un *grid* o el número de *threads* en un bloque no corresponde a alguna disposición del *hardware*. Esto es, si se lanza un *kernel* con `<<< 1, 3 >>>` no implica que la GPU tenga en su *hardware* un bloque o 3 *threads*. Asimismo, las coordenadas que se obtienen vía `blockIdx` o `threadIdx` son meras abstracciones, no corresponden a algún ordenamiento en el hardware de la GPU.
#
# * Todos los *threads* de un bloque ejecutan el *kernel* por lo que se tienen tantas copias del kernel como número de bloques sean lanzados. Esto es una muestra la GPU sigue el modelo *Single Instruction Multiple Threads [(SIMT)](https://en.wikipedia.org/wiki/Single_instruction,_multiple_threads)*.
#
# ```
# ### ¿Grid's y bloques 3-dimensionales?
# En el *device* podemos definir el *grid* de bloques y el bloque de *threads* utilizando el tipo de dato `dim3` el cual también es parte de *CUDA C*.
# ### Ejemplo
# ```{margin}
#
# `dim3 dimGrid(1,2,1);` representa 2 bloques en el *grid*.
#
# `dim3 dimBlock(1,1,3);` representa 3 *threads* por bloque.
#
# ```
# %%file hello_world_3.cu
#include<stdio.h>
__global__ void func(void){
printf("Hello world! del bloque %d del thread %d\n", blockIdx.y, threadIdx.z);
}
int main(void){
dim3 dimGrid(1,2,1);
dim3 dimBlock(1,1,3);
func<<<dimGrid,dimBlock>>>();
cudaDeviceSynchronize();
printf("Hello world! del cpu thread\n");
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall hello_world_3.cu -o hello_world_3.out
# + language="bash"
# ./hello_world_3.out
# -
# ### Ejemplo
# ```{margin}
#
# `dim3 dimGrid(1,1,1);` representa 1 bloque en el *grid*.
#
# `dim3 dimBlock(1,3,1);` representa 3 *threads* por bloque.
#
# ```
# %%file thread_idxs.cu
#include<stdio.h>
__global__ void func(void){
if(threadIdx.x==0 && threadIdx.y==0 && threadIdx.z==0){
printf("blockIdx.x:%d\n",blockIdx.x);
}
printf("thread idx.x:%d\n",threadIdx.x);
printf("thread idx.y:%d\n",threadIdx.y);
printf("thread idx.z:%d\n",threadIdx.z);
}
int main(void){
dim3 dimGrid(1,1,1);
dim3 dimBlock(1,3,1);
func<<<dimGrid,dimBlock>>>();
cudaDeviceSynchronize();
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 thread_idxs.cu -o thread_idxs.out
# + language="bash"
# ./thread_idxs.out
# -
# ### Ejemplo
# ```{margin}
#
# `dim3 dimGrid(1,2,2);` representa 4 bloques en el *grid*.
#
# `dim3 dimBlock(1,1,1);` representa 1 *thread* por bloque.
#
# ```
# +
# %%file block_idxs.cu
#include<stdio.h>
__global__ void func(void){
printf("blockIdx.x:%d\n",blockIdx.x);
printf("blockIdx.y:%d\n",blockIdx.y);
printf("blockIdx.z:%d\n",blockIdx.z);
}
int main(void){
dim3 dimGrid(1,2,2);
dim3 dimBlock(1,1,1);
func<<<dimGrid,dimBlock>>>();
cudaDeviceSynchronize();
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall block_idxs.cu -o block_idxs.out
# + language="bash"
# ./block_idxs.out
# -
# ### Ejemplo
# Podemos usar la variable `blockDim` para cada coordenada `x, y` o `z` y obtener la dimensión de los bloques.
# ```{margin}
#
# `dim3 dimGrid(2,2,2);` representa 8 bloques en el *grid*.
#
# `dim3 dimBlock(3,1,2);` representa 6 *threads* por bloque.
#
# ```
# +
# %%file block_dims.cu
#include<stdio.h>
__global__ void func(void){
if(threadIdx.x==0 && threadIdx.y==0 && threadIdx.z==0 && blockIdx.z==1){
printf("blockDim.x:%d\n",blockDim.x);
printf("blockDim.y:%d\n",blockDim.y);
printf("blockDim.z:%d\n",blockDim.z);
}
}
int main(void){
dim3 dimGrid(2,2,2);
dim3 dimBlock(3,1,2);
func<<<dimGrid,dimBlock>>>();
cudaDeviceSynchronize();
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall block_dims.cu -o block_dims.out
# + language="bash"
# ./block_dims.out
# -
# ### Alojamiento de memoria en el *device*
# Para alojar memoria en el *device* se utiliza el llamado a [cudaMalloc](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY_1g37d37965bfb4803b6d4e59ff26856356) y para transferir datos del *host* al *device* o viceversa se llama a la función [cudaMemcpy](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY_1gc263dbe6574220cc776b45438fc351e8) con respectivos parámetros como `cudaMemcpyHostToDevice` o `cudaMemcpyDeviceToHost`.
#
# Para desalojar memoria del *device* se utiliza el llamado a [cudaFree](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY_1ga042655cbbf3408f01061652a075e094).
# ### Ejemplo
# **N bloques de 1 thread**
# ```{margin}
#
# `dim3 dimGrid(N,1,1);` representa N bloques en el *grid*.
#
# `dim3 dimBlock(1,1,1);` representa 1 *thread* por bloque.
#
# `<<<dimGrid,dimBlock>>>` N bloques de 1 *thread*.
#
# ```
# %%file vector_sum.cu
#include<stdio.h>
#define N 10
__global__ void vect_sum(int *a, int *b, int *c){
int block_id_x = blockIdx.x;
if(block_id_x<N) //we assume N is less than maximum number of blocks
//that can be launched
c[block_id_x] = a[block_id_x]+b[block_id_x];
}
int main(void){
int a[N], b[N],c[N];
int *device_a, *device_b, *device_c;
int i;
dim3 dimGrid(N,1,1);
dim3 dimBlock(1,1,1);
//allocation in device
cudaMalloc((void **)&device_a, sizeof(int)*N);
cudaMalloc((void **)&device_b, sizeof(int)*N);
cudaMalloc((void **)&device_c, sizeof(int)*N);
//dummy data
for(i=0;i<N;i++){
a[i]=i;
b[i]=i*i;
}
//making copies of a, b arrays to GPU
cudaMemcpy(device_a,a,N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_b,b,N*sizeof(int), cudaMemcpyHostToDevice);
vect_sum<<<dimGrid,dimBlock>>>(device_a,device_b,device_c);
cudaDeviceSynchronize();
//copy result to c array
cudaMemcpy(c,device_c,N*sizeof(int),cudaMemcpyDeviceToHost);
for(i=0;i<N;i++)
printf("%d+%d = %d\n",a[i],b[i],c[i]);
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall vector_sum.cu -o vector_sum.out
# + language="bash"
# ./vector_sum.out
# -
# ````{admonition} Comentarios
#
# * El *statement*:
#
# ```C
# int *device_a, *device_b, *device_c;
# ```
#
# en sintaxis de *C* definen apuntadores que refieren a una dirección de memoria. En el contexto de la *GPU programming* estos apuntadores no apuntan a una dirección de memoria en el *device*. Aunque NVIDIA añadió el *feature* de [Unified Memory](https://devblogs.nvidia.com/unified-memory-cuda-beginners/) (un espacio de memoria accesible para el *host* y el *device*) aquí no se está usando tal *feature*. Más bien se están utilizando los apuntadores anteriores para apuntar a un [struct](https://en.wikipedia.org/wiki/Struct_(C_programming_language)) de *C* en el que uno de sus tipos de datos es una dirección de memoria en el *device*.
#
# * El uso de `(void **)` en el *statement* `cudaMalloc((void **)&device_a, sizeof(int)*N);` es por la definición de la función `cudaMalloc`.
#
# * En el programa anterior se coloca en comentario que se asume que $N$ el número de datos en el arreglo es menor al número de bloques que es posible lanzar. Esto como veremos más adelante es importante considerar pues aunque en un *device* se pueden lanzar muchos bloques y muchos *threads*, se tienen límites en el número de éstos que es posible lanzar.
#
#
# ````
# ### ¿Perfilamiento en CUDA?
# Al instalar el *CUDA toolkit* en sus máquinas se instala la línea de comando [nvprof](https://docs.nvidia.com/cuda/profiler-users-guide/index.html) para perfilamiento.
# + language="bash"
# source ~/.profile
# nvprof --normalized-time-unit s ./vector_sum.out
# -
# ```{admonition} Comentarios
#
# * Las unidades en las que se reporta son s: second, ms: millisecond, us: microsecond, ns: nanosecond.
#
# * En la documentación de NVIDIA se menciona que `nvprof` será reemplazada próximamente por [NVIDIA Nsight Compute](https://developer.nvidia.com/nsight-compute) y [NVIDIA Nsight Systems](https://developer.nvidia.com/nsight-systems).
#
# ```
# En el ejemplo anterior se lanzaron $N$ bloques con $1$ *thread* cada uno y a continuación se lanza $1$ bloque con $N$ *threads*.
# ```{margin}
#
# `dim3 dimGrid(1,1,1);` representa 1 bloque en el *grid*.
#
# `dim3 dimBlock(N,1,1);` representa N *threads* por bloque.
#
# `<<<dimGrid,dimBlock>>>` 1 bloque con N *threads*.
#
# ```
# %%file vector_sum_2.cu
#include<stdio.h>
#define N 10
__global__ void vect_sum(int *a, int *b, int *c){
int thread_id_x = threadIdx.x;
if(thread_id_x<N)
c[thread_id_x] = a[thread_id_x]+b[thread_id_x];
}
int main(void){
int *device_a, *device_b, *device_c;
int i;
dim3 dimGrid(1,1,1);
dim3 dimBlock(N,1,1);
//allocation in device with Unified Memory
cudaMallocManaged(&device_a, sizeof(int)*N);
cudaMallocManaged(&device_b, sizeof(int)*N);
cudaMallocManaged(&device_c, sizeof(int)*N);
//dummy data
for(i=0;i<N;i++){
device_a[i]=i;
device_b[i]=i*i;
}
vect_sum<<<dimGrid,dimBlock>>>(device_a,device_b,device_c);
cudaDeviceSynchronize();
for(i=0;i<N;i++)
printf("%d+%d = %d\n",device_a[i],device_b[i],device_c[i]);
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall vector_sum_2.cu -o vector_sum_2.out
# + language="bash"
# source ~/.profile
# nvprof --normalized-time-unit s ./vector_sum_2.out
# -
# ```{admonition} Comentarios
#
# * El programa anterior utiliza la [Unified Memory](https://devblogs.nvidia.com/unified-memory-cuda-beginners/) con la función [cudaMallocManaged](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__HIGHLEVEL.html#group__CUDART__HIGHLEVEL_1gcf6b9b1019e73c5bc2b39b39fe90816e). La *Unified Memory* es un *feature* que se añadió a CUDA desde las arquitecturas de **Kepler** y **Maxwell** pero que ha ido mejorando (por ejemplo añadiendo [page faulting](https://en.wikipedia.org/wiki/Page_fault) and [migration](https://www.kernel.org/doc/html/latest/vm/page_migration.html)) en las arquitecturas siguientes a la de *Kepler*: la arquitectura Pascal y Volta. Por esto en el *output* anterior de *nvprof* aparece una sección de *page fault*.
#
# * Al igual que antes, en el programa anterior se asume que $N$ el número de datos en el arreglo es menor al número de *threads* que es posible lanzar. Esto como veremos más adelante es importante considerar pues aunque en el *device* se pueden lanzar muchos bloques y muchos *threads*, se tienen límites en el número de éstos que es posible lanzar.
#
# ```
# ### ¿Tenemos que inicializar los datos en la CPU y copiarlos hacia la GPU?
# En realidad no tenemos que realizarlo para el ejemplo de `vector_sum_2.cu`.
# +
# %%file vector_sum_3.cu
#include<stdio.h>
#define N 10
__global__ void fill_arrays(int *a, int *b){
int thread_id_x = threadIdx.x;
a[thread_id_x]=thread_id_x;
b[thread_id_x]=thread_id_x*thread_id_x;
}
__global__ void vect_sum(int *a, int *b, int *c){
int thread_id_x = threadIdx.x;
if(thread_id_x<N)
c[thread_id_x] = a[thread_id_x]+b[thread_id_x];
}
int main(void){
int *device_a, *device_b, *device_c;
int i;
dim3 dimGrid(1,1,1);
dim3 dimBlock(N,1,1);
//allocating using Unified Memory in device
cudaMallocManaged(&device_a, sizeof(int)*N);
cudaMallocManaged(&device_b, sizeof(int)*N);
cudaMallocManaged(&device_c, sizeof(int)*N);
fill_arrays<<<dimGrid,dimBlock>>>(device_a,device_b);
cudaDeviceSynchronize();
vect_sum<<<dimGrid,dimBlock>>>(device_a,device_b,device_c);
cudaDeviceSynchronize();
for(i=0;i<N;i++)
printf("%d+%d = %d\n",device_a[i],device_b[i],device_c[i]);
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall vector_sum_3.cu -o vector_sum_3.out
# + language="bash"
# source ~/.profile
# nvprof --normalized-time-unit s ./vector_sum_3.out
# -
# ## Arquitectura de una GPU y límites en número de *threads* y bloques que podemos lanzar en el *kernel*
# Un *device* está compuesto por arreglos de **streaming multiprocessors SM's** (también denotados como MP's) y en cada *SM* encontramos un número (determinado por la arquitectura del device) de **streaming processors SP's** que comparten el caché y unidades de control (que están dentro de cada SM):
# <img src="https://dl.dropboxusercontent.com/s/oxx55upoayfmliw/SMS_CUDA.png?dl=0" heigth="700" width="700">
#
# Ver [Hardware model: streamingmultiprocessor](https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-hw-model).
# En el dibujo anterior se muestran las SM's en color rojo y los SP's en morado. Hay dos SM's por cada bloque anaranjado y ocho SP's por cada SM. Así, una GPU es una máquina *multicore*. Aunque cada SM ejecuta las instrucciones de forma independiente a otra SM, comparten la **memoria global**.
# Los bloques de *threads* son **asignados a cada SM por el *CUDA runtime system***, el cual puede asignar más de un bloque a una SM pero hay un límite de bloques que pueden ser asignados a cada SM. Ver [maximum number of blocks per multiprocessor](https://stackoverflow.com/questions/22520209/programmatically-retrieve-maximum-number-of-blocks-per-multiprocessor).
# ```{admonition} Comentarios
#
# * Por ejemplo para el modelo *GT200* el máximo número de bloques que podían asignarse a cada SM eran de $8$ bloques. Tal modelo tenía $30$ SM's lo que resultaban en $240$ bloques que en un instante podían asignarse al *device* para su ejecución simultánea (asignándose en cualquier orden en alguna SM disponible). Por supuesto que un *grid* podía contener más de $240$ bloques en este modelo y en este caso el *CUDA runtime system* lleva una lista de bloques que va asignando a cada SM y conforme cada SM terminan la ejecución, nuevos bloques son asignados a tales SM que finalizaron. Para visualizar esta situación, considérese una simplificación de lo anterior en donde se tiene un *device* con $2$ SM's y con un *kernel* se han lanzado $6$ bloques. El *CUDA runtime system* ha asignado $3$ bloques a cada SM, entonces se tiene un dibujo como el siguiente:
#
#
# <img src="https://dl.dropboxusercontent.com/s/p0nu72ofmdjtck8/kernel_launch_example.png?dl=0" heigth="600" width="600">
#
#
# * Los bloques asignados a una SM comparten recursos (por ejemplo memoria) y su ejecución es independiente entre ellos, no es posible sincronizar al bloque 1 con el bloque 0. También no es posible sincronizar a los *threads* de diferentes SM's pero sí es posible sincronizar a los *threads* dentro de un mismo bloque.
#
# ```
# ### ¿Qué otros límites puedo encontrar en mi(s) device(s) de mi sistema?
# Para responder lo anterior se puede utilizar el siguiente programa que está basado en [how-query-device-properties-and-handle-errors-cuda-cc](https://devblogs.nvidia.com/how-query-device-properties-and-handle-errors-cuda-cc/) y [cudaDeviceProp Struct Reference](https://docs.nvidia.com/cuda/cuda-runtime-api/structcudaDeviceProp.html).
# +
# %%file device_properties.cu
#include<stdio.h>
int main(void){
cudaDeviceProp properties;
int count;
int i;
cudaGetDeviceCount(&count);
for(i=0;i<count;i++){
printf("----------------------\n");
cudaGetDeviceProperties(&properties, i);
printf("----device %d ----\n",i);
printf("Device Name: %s\n", properties.name);
printf("Compute capability: %d.%d\n", properties.major, properties.minor);
printf("Clock rate: %d\n", properties.clockRate);
printf("Unified memory: %d\n", properties.unifiedAddressing);
printf(" ---Memory Information for device %d (results on bytes)---\n", i);
printf("Total global mem: %ld\n", properties.totalGlobalMem);
printf("Total constant Mem: %ld\n", properties.totalConstMem);
printf("Shared memory per thread block: %ld\n", properties.sharedMemPerBlock);
printf("Shared memory per SM: %ld\n",properties.sharedMemPerMultiprocessor );
printf(" ---MP Information for device %d ---\n", i);
printf("SM count: %d\n", properties.multiProcessorCount);
printf("Threads in warp: %d\n", properties.warpSize);
printf("Max threads per SM: %d\n", properties.maxThreadsPerMultiProcessor);
printf("Max warps per SM: %d\n",properties.maxThreadsPerMultiProcessor/properties.warpSize);
printf("Max threads per block: %d\n", properties.maxThreadsPerBlock);
printf("Max thread dimensions: (%d, %d, %d)\n", properties.maxThreadsDim[0], properties.maxThreadsDim[1], properties.maxThreadsDim[2]);
printf("Max grid dimensions: (%d, %d, %d)\n", properties.maxGridSize[0], properties.maxGridSize[1], properties.maxGridSize[2]);
}
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc --compiler-options -Wall device_properties.cu -o device_properties.out
# + language="bash"
# ./device_properties.out
# -
# ```{admonition} Comentarios
#
# * También en la documentación oficial de NVIDIA dentro de [compute-capabilities](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities) se pueden revisar los valores anteriores y muchos más.
#
# * En un *device* encontramos diferentes tipos de memoria: global, constante, *shared* y *texture*. En esta nota únicamente trabajamos con la memoria global.
#
# * Tenemos funciones en CUDA para poder comunicar/coordinar a los *threads* en un bloque por medio de la *shared memory*. Ver por ejemplo [Using Shared Memory in CUDA C/C++](https://devblogs.nvidia.com/using-shared-memory-cuda-cc/) para un pequeño *post* del $2013$ sobre *shared memory*.
#
# * Los bloques de *threads* que son asignados a una SM son divididos en ***warps*** que es la unidad de ***thread scheduling*** que tiene el *CUDA run time system*. El *output* anterior indica que son divisiones de $32$ *threads*.
#
# * El *thread scheduling* se puede pensar a la funcionalidad que tiene el *hardware* del *device* para seleccionar una instrucción del programa y asginar su ejecución por los *threads* en un *warp* ([SIMT](https://en.wikipedia.org/wiki/Single_instruction,_multiple_threads)). Otro ejemplo es tener una instrucción que indica que se debe realizar lectura o escritura, entonces el *hardware* del *device* utiliza un *warp* de threads para tal operación mientras selecciona un *warp* de *threads* distinto para seleccionar otra instrucción diferente a la de I/O.
#
# * El número máximo de *threads* que pueden iniciarse de forma simultánea o en un instante por SM es de $2048$ o bien $2048/32 = 64$ warps.
#
# * El *output* anterior muestra los límites para número de bloques en las tres dimensiones de un *grid* y el número de *threads* en las tres dimensiones en un bloque.
#
# * Un bloque puede tener como máximo $1024$ *threads* en cualquier configuración: por ejemplo $(1024,1,1), (32,1,32), (4,4,64)$.
#
# * Por los puntos anteriores si lanzamos bloques de $1024$ *threads* entonces sólo $2$ bloques pueden residir en una SM en un instante. Con esta configuración alcanzaríamos $1024/32=32$ *warps* por cada bloque y como lanzamos $2$ bloques alcanzaríamos $64$ *warps* (que es el máximo de *warps* por SM que podemos tener en un instante). Otra configuración para alcanzar el máximo número de *warps* en un instante, es considerar $4$ bloques de $512$ *threads* pues tendríamos $512/32=16$ *warps* por bloque y en total serían $16*4$ (*warps* $\times$ bloques) $=64$ *warps*. Entre los datos que hay que elegir en los programas de *CUDA C* se encuentran las configuraciones en el número de *threads* y el número de bloques a lanzar. La idea es alcanzar o rebasar el máximo número de *warps* en cada SM que soporta nuestro *device* en un instante.
#
# * Por ejemplo para el dibujo en el que se asumió que el *CUDA runtime system* había asignado $3$ bloques a cada SM, se tendría una división de cada bloque en un *warp* de $32$ *threads* como sigue:
#
#
# <img src="https://dl.dropboxusercontent.com/s/yngq4r66i2nk5mg/warp_division.png?dl=0" heigth="600" width="600">
#
# ```
# ### *Grid Configuration Choices*?
# Los programas de *CUDA C* tienen la opción de elegir el número de *threads* y de *bloques* a ser lanzados. En la referencia *Parallel Computing for Data Science. With Examples in R, C++ and CUDA* de <NAME> se enlistan algunas consideraciones para elegir tales parámetros:
#
# * *Given that scheduling is done on a warp basis, block size should be a multiple of the warp size (32).*
#
# * *One wants to utilize all the SMs. If one sets the block size too large, not all will be used, as a block cannot be split across SM's.*
#
# * *..., barrier synchronization can be done effectively only at the block level. The larger the block, the more the barrier delay, so one might want smaller blocks.*
#
# * *On the other hand, if one is using shared memory, this can only be done at the block level, and efficient use may indicate using a larger block.*
#
# * *Two threads doing unrelated work, or the same work but with many if/elses, would cause a lot of thread divergence if they were in the same block. In some cases, it may be known in advance which threads will do the "ifs" and which will do the "elses", in which case they should be placed in different blocks if possible.*
#
# * *A commonly-cited rule of thumb is to have between $128$ and $256$ *threads* per block.*
# ### Ejemplo regla compuesta del rectángulo
# En el uso de CUDA se recomienda que:
#
# * *Users* escriban código de *CUDA C* simple.
#
# * Utilicen las librerías ya hechas por NVIDIA o terceros para mantener simplicidad y eficiencia en el código.
#
# Lo anterior para disminuir el tiempo y la cantidad de código que *users* tengan que hacer (o rehacer) y puesto que dominar la programación de *CUDA C* requiere una buena inversión de tiempo.
#
# Así, tenemos a [Thrust](https://docs.nvidia.com/cuda/thrust/index.html) una *template library* basada en la [Standard Template Library (STL)](https://en.wikipedia.org/wiki/Standard_Template_Library) de C++ construída por NVIDIA que de acuerdo a su documentación:
#
# *Thrust provides a rich collection of data parallel primitives such as scan, sort, and reduce, which can be composed together to implement complex algorithms with concise, readable source code. By describing your computation in terms of these high-level abstractions you provide Thrust with the freedom to select the most efficient implementation automatically. As a result, Thrust can be utilized in rapid prototyping of CUDA applications, where programmer productivity matters most, as well as in production, where robustness and absolute performance are crucial.*
#
#
# *Thrust* tiene la opción de utilizarse con [OpenMP](https://www.openmp.org/), [Thread Building Blocks (TBB)](https://www.threadingbuildingblocks.org/intel-tbb-tutorial) y con *CUDA C++*. Ver por ejemplo [Device Backends](https://github.com/thrust/thrust/wiki/Device-Backends) para conocer cómo cambiar entre *OpenMP* y *CUDA C++*, lo cual se realiza en la compilación y **¡sin hacer cambios en el código!**.
# ```{admonition} Comentarios
#
# * Al *software* que aprovecha el *feature* anterior de los sistemas computacionales (por ejemplo cambiar entre *OpenMP* y *CUDA C++*) se les nombra [Heterogeneous computing](https://en.wikipedia.org/wiki/Heterogeneous_computing).
#
# * Si se instala el *CUDA toolkit*, los *headers* en la librería template de `Thrust` estarán disponibles para su uso.
#
#
# ```
# En el siguiente ejemplo de la regla del rectángulo compuesta se utiliza:
#
# * [Reductions](https://docs.nvidia.com/cuda/thrust/index.html#reductions)
#
# * Los *headers*:
#
# * [thrust/execution_policy](https://thrust.github.io/doc/structthrust_1_1device__execution__policy.html),
#
# * [thhrust/reduce](https://thrust.github.io/doc/group__reductions_ga43eea9a000f912716189687306884fc7.html#ga43eea9a000f912716189687306884fc7).
#
#
# Se hace explícito el uso de la política de ejecucion [thrust::device](https://thrust.github.io/doc/group__execution__policies_ga78249cb3aa4239b64e65aaf6e82ac2f8.html).
#
# Referencias para el programa siguiente se encuentran en [thrust inside user written kernels](https://stackoverflow.com/questions/5510715/thrust-inside-user-written-kernels) y [cuda how to sum all elements of an array into one number within the gpu](https://stackoverflow.com/questions/42525713/cuda-how-to-sum-all-elements-of-an-array-into-one-number-within-the-gpu).
# **Primero utilicemos $n=10^3$ subintervalos.**
# +
# %%file Rcf.cu
#include<stdio.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
__global__ void Rcf(double *data, double a, double h_hat, int n, double *sum_res ) {
/*
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
data (double): array that will hold values evaluated in function
a (int): left point of interval
h_hat (double): width of subinterval
n (int): number of subintervals
sum_res (double): pointer to result
Returns:
sum_res (double): pointer to result
*/
double x=0.0;
if(threadIdx.x<=n-1){
x=a+(threadIdx.x+1/2.0)*h_hat;
data[threadIdx.x]=std::exp(-std::pow(x,2));
}
if(threadIdx.x==0){
*sum_res = thrust::reduce(thrust::device, data , data + n, (double)0, thrust::plus<double>());
}
}
int main(int argc, char *argv[]){
double sum_res=0.0;
double *d_data;
double *d_sum;
double a=0.0, b=1.0;
double h_hat;
int n=1e3;
double obj=0.7468241328124271;
double time_spent;
clock_t begin,end;
cudaMalloc((void **)&d_data,sizeof(double)*n);
cudaMalloc((void**)&d_sum,sizeof(double));
h_hat=(b-a)/n;
begin=clock();
Rcf<<<1,n>>>(d_data, a,h_hat,n,d_sum);
cudaDeviceSynchronize();
end=clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(&sum_res, d_sum, sizeof(double), cudaMemcpyDeviceToHost);
sum_res=h_hat*sum_res;
cudaFree(d_data) ;
cudaFree(d_sum) ;
printf("Integral de %f a %f = %1.15e\n", a,b,sum_res);
printf("Error relativo de la solución: %1.15e\n", fabs(sum_res-obj)/fabs(obj));
printf("Tiempo de cálculo en la gpu %.5f\n", time_spent);
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall Rcf.cu -o Rcf.out
# + language="bash"
# ./Rcf.out
# + language="bash"
# source ~/.profile
# nvprof --normalized-time-unit s ./Rcf.out
# -
# **Incrementemos a $n=1025$ subintervalos.**
# +
# %%file Rcf2.cu
#include<stdio.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
__global__ void Rcf(double *data, double a, double h_hat, int n, double *sum_res ) {
/*
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
data (double): array that will hold values evaluated in function
a (int): left point of interval
h_hat (double): width of subinterval
n (int): number of subintervals
sum_res (double): pointer to result
Returns:
sum_res (double): pointer to result
*/
double x=0.0;
if(threadIdx.x<=n-1){
x=a+(threadIdx.x+1/2.0)*h_hat;
data[threadIdx.x]=std::exp(-std::pow(x,2));
}
if(threadIdx.x==0){
*sum_res = thrust::reduce(thrust::device, data , data + n, (double)0, thrust::plus<double>());
}
}
int main(int argc, char *argv[]){
double sum_res=0.0;
double *d_data;
double *d_sum;
double a=0.0, b=1.0;
double h_hat;
int n=1025;
double obj=0.7468241328124271;
double time_spent;
clock_t begin,end;
cudaMalloc((void **)&d_data,sizeof(double)*n);
cudaMalloc((void**)&d_sum,sizeof(double));
h_hat=(b-a)/n;
begin=clock();
Rcf<<<1,n>>>(d_data, a,h_hat,n,d_sum);
cudaDeviceSynchronize();
end=clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(&sum_res, d_sum, sizeof(double), cudaMemcpyDeviceToHost);
sum_res=h_hat*sum_res;
cudaFree(d_data) ;
cudaFree(d_sum) ;
printf("Integral de %f a %f = %1.15e\n", a,b,sum_res);
printf("Error relativo de la solución: %1.15e\n", fabs(sum_res-obj)/fabs(obj));
printf("Tiempo de cálculo en la gpu %.5f\n", time_spent);
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall Rcf2.cu -o Rcf2.out
# + language="bash"
# ./Rcf2.out
# -
# ```{admonition} Observación
# :class: tip
#
# Obsérvese error relativo de $100\%$
#
# ```
# **¿Cómo lo arreglamos?**
# +
# %%file Rcf3.cu
#include<stdio.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
__global__ void Rcf(double *data, double a, double h_hat, int n, double *sum_res) {
/*
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
data (double): array that will hold values evaluated in function
a (int): left point of interval
h_hat (double): width of subinterval
n (int): number of subintervals
sum_res (double): pointer to result
Returns:
sum_res (double): pointer to result
*/
double x=0.0;
int stride=0;
if(threadIdx.x<=n-1){
x=a+(threadIdx.x+1/2.0)*h_hat;
data[threadIdx.x]=std::exp(-std::pow(x,2));
}
if(threadIdx.x==0){
stride=blockDim.x;
x=a+(threadIdx.x+stride+1/2.0)*h_hat;
data[threadIdx.x+stride]=std::exp(-std::pow(x,2));
*sum_res = thrust::reduce(thrust::device, data , data + n, (double)0, thrust::plus<double>());
}
}
int main(int argc, char *argv[]){
double sum_res=0.0;
double *d_data;
double *d_sum;
double a=0.0, b=1.0;
double h_hat;
int n_threads_per_block=1024;
int n_blocks=2;
int n=1025;
double obj=0.7468241328124271;
double time_spent;
clock_t begin,end;
cudaMalloc((void **)&d_data,sizeof(double)*n);
cudaMalloc((void**)&d_sum,sizeof(double));
h_hat=(b-a)/n;
begin=clock();
Rcf<<<n_blocks,n_threads_per_block>>>(d_data, a,h_hat,n,d_sum);
cudaDeviceSynchronize();
end=clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(&sum_res, d_sum, sizeof(double), cudaMemcpyDeviceToHost);
sum_res=h_hat*sum_res;
cudaFree(d_data) ;
cudaFree(d_sum) ;
printf("Integral de %f a %f = %1.15e\n", a,b,sum_res);
printf("Error relativo de la solución: %1.15e\n", fabs(sum_res-obj)/fabs(obj));
printf("Tiempo de cálculo en la gpu %.5f\n", time_spent);
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall Rcf3.cu -o Rcf3.out
# + language="bash"
# ./Rcf3.out
# -
# Pero en la propuesta anterior lanzamos $2*1024$ (bloques $\times$ número de *threads*) $=2048$ *threads* y sólo ocupamos $1025$ *threads*. Entonces podemos cambiar el código anterior para aprovechar los $2048$ *threads* como sigue:
# +
# %%file Rcf4.cu
#include<stdio.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
__global__ void Rcf(double *data, double a, double h_hat, int n, double *sum_res) {
/*
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
data (double): array that will hold values evaluated in function
a (int): left point of interval
h_hat (double): width of subinterval
n (int): number of subintervals
sum_res (double): pointer to result
Returns:
sum_res (double): pointer to result
*/
double x=0.0;
int stride=0;
int i;
stride=blockDim.x;
for(i=threadIdx.x;i<=n-1;i+=stride){
if(i<=n-1){
x=a+(i+1/2.0)*h_hat;
data[i]=std::exp(-std::pow(x,2));
}
}
if(threadIdx.x==0){
*sum_res = thrust::reduce(thrust::device, data , data + n, (double)0, thrust::plus<double>());
}
}
int main(int argc, char *argv[]){
double sum_res=0.0;
double *d_data;
double *d_sum;
double a=0.0, b=1.0;
double h_hat;
int n_threads_per_block=1024;
int n_blocks=2;
int n=n_threads_per_block*n_blocks;
double obj=0.7468241328124271;
double time_spent;
clock_t begin,end;
cudaMalloc((void **)&d_data,sizeof(double)*n);
cudaMalloc((void**)&d_sum,sizeof(double));
h_hat=(b-a)/n;
begin=clock();
Rcf<<<n_blocks,n_threads_per_block>>>(d_data, a,h_hat,n,d_sum);
cudaDeviceSynchronize();
end=clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(&sum_res, d_sum, sizeof(double), cudaMemcpyDeviceToHost);
sum_res=h_hat*sum_res;
cudaFree(d_data) ;
cudaFree(d_sum) ;
printf("Integral de %f a %f = %1.15e\n", a,b,sum_res);
printf("Error relativo de la solución: %1.15e\n", fabs(sum_res-obj)/fabs(obj));
printf("Tiempo de cálculo en la gpu %.5f\n", time_spent);
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall Rcf4.cu -o Rcf4.out
# + language="bash"
# ./Rcf4.out
# -
# **Y podemos no utilizar el ciclo *for***.
# ```{margin}
#
# Para una visualización sobre la construcción del índice en el kernel utilizando `blockDim.x*blockIdx.x + threadIdx.x` ver [An Even Easier Introduction to CUDA](https://devblogs.nvidia.com/even-easier-introduction-cuda/).
#
# ```
# +
# %%file Rcf5.cu
#include<stdio.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
__global__ void Rcf(double *data, double a, double h_hat, int n, double *sum_res ) {
/*
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
data (double): array that will hold values evaluated in function
a (int): left point of interval
h_hat (double): width of subinterval
n (int): number of subintervals
sum_res (double): pointer to result
Returns:
sum_res (double): pointer to result
*/
double x=0.0;
int idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx<=n-1){
x=a+(idx+1/2.0)*h_hat;
data[idx]=std::exp(-std::pow(x,2));
}
if(idx==0){
*sum_res = thrust::reduce(thrust::device, data , data + n, (double)0, thrust::plus<double>());
}
}
int main(int argc, char *argv[]){
double sum_res=0.0;
double *d_data;
double *d_sum;
double a=0.0, b=1.0;
double h_hat;
int n_threads_per_block=1024;
int n_blocks=2;
double obj=0.7468241328124271;
int n=n_blocks*n_threads_per_block;//number of subintervals
double time_spent;
clock_t begin,end;
cudaMalloc((void **)&d_data,sizeof(double)*n);
cudaMalloc((void**)&d_sum,sizeof(double));
h_hat=(b-a)/n;
begin = clock();
Rcf<<<n_blocks,n_threads_per_block>>>(d_data, a,h_hat,n,d_sum);
cudaDeviceSynchronize();
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(&sum_res, d_sum, sizeof(double), cudaMemcpyDeviceToHost);
sum_res=h_hat*sum_res;
cudaFree(d_data) ;
cudaFree(d_sum) ;
printf("Integral de %f a %f = %1.15e\n", a,b,sum_res);
printf("Error relativo de la solución: %1.15e\n", fabs(sum_res-obj)/fabs(obj));
printf("Tiempo de cálculo en la gpu %.5f\n", time_spent);
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall Rcf5.cu -o Rcf5.out
# + language="bash"
# ./Rcf5.out
# + language="bash"
# source ~/.profile
# nvprof --normalized-time-unit s ./Rcf5.out
# -
# **Utilicemos más nodos.**
# Para el siguiente código, incrementamos el número de bloques.
# +
# %%file Rcf6.cu
#include<stdio.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
__global__ void Rcf(double *data, double a, double h_hat, int n, double *sum_res ) {
/*
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
data (double): array that will hold values evaluated in function
a (int): left point of interval
h_hat (double): width of subinterval
n (int): number of subintervals
sum_res (double): pointer to result
Returns:
sum_res (double): pointer to result
*/
double x=0.0;
int idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx<=n-1){
x=a+(idx+1/2.0)*h_hat;
data[idx]=std::exp(-std::pow(x,2));
}
if(idx==0){
*sum_res = thrust::reduce(thrust::device, data , data + n, (double)0, thrust::plus<double>());
}
}
int main(int argc, char *argv[]){
double sum_res=0.0;
double *d_data;
double *d_sum;
double a=0.0, b=1.0;
double h_hat;
int n_threads_per_block=1024;
int n_blocks=0;
double obj=0.7468241328124271;
int n=0;
double time_spent;
clock_t begin,end;
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, 0);
//we choose a multiple of the number of SMs.
n_blocks = 256 * properties.multiProcessorCount;
n = n_blocks*n_threads_per_block;
cudaMalloc((void **)&d_data,sizeof(double)*n);
cudaMalloc((void**)&d_sum,sizeof(double));
h_hat=(b-a)/n;
begin = clock();
Rcf<<<n_blocks,n_threads_per_block>>>(d_data, a,h_hat,n,d_sum);
cudaDeviceSynchronize();
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(&sum_res, d_sum, sizeof(double), cudaMemcpyDeviceToHost);
sum_res=h_hat*sum_res;
cudaFree(d_data) ;
cudaFree(d_sum) ;
printf("Número de subintervalos: %d\n", n);
printf("Integral de %f a %f = %1.15e\n", a,b,sum_res);
printf("Error relativo de la solución: %1.15e\n", fabs(sum_res-obj)/fabs(obj));
printf("Tiempo de cálculo en la gpu %.5f\n", time_spent);
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall Rcf6.cu -o Rcf6.out
# -
# ```{admonition} Observación
# :class: tip
#
# Mientras se ejecuta la siguiente celda se sugiere en la terminal ejecutar en la línea de comando `nvtop`.
#
# ```
# + language="bash"
# ./Rcf6.out
# + language="bash"
# source ~/.profile
# nvprof --normalized-time-unit s ./Rcf6.out
# -
# **Incrementamos el número de subintervalos.**
# +
# %%file Rcf7.cu
#include<stdio.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
__global__ void Rcf(double *data, double a, double h_hat, int n, double *sum_res ) {
/*
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
data (double): array that will hold values evaluated in function
a (int): left point of interval
h_hat (double): width of subinterval
n (int): number of subintervals
sum_res (double): pointer to result
Returns:
sum_res (double): pointer to result
*/
double x=0.0;
int idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx<=n-1){
x=a+(idx+1/2.0)*h_hat;
data[idx]=std::exp(-std::pow(x,2));
}
if(idx==0){
*sum_res = thrust::reduce(thrust::device, data , data + n, (double)0, thrust::plus<double>());
}
}
int main(int argc, char *argv[]){
double sum_res=0.0;
double *d_data;
double *d_sum;
double a=0.0, b=1.0;
double h_hat;
int n_threads_per_block=512;
int n_blocks=0;
double obj=0.7468241328124271;
int n=0;
double time_spent;
clock_t begin,end;
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, 0);
n_blocks = 1500 * properties.multiProcessorCount;
n = n_blocks*n_threads_per_block;
cudaMalloc((void **)&d_data,sizeof(double)*n);
cudaMalloc((void**)&d_sum,sizeof(double));
h_hat=(b-a)/n;
begin = clock();
Rcf<<<n_blocks,n_threads_per_block>>>(d_data, a,h_hat,n,d_sum);
cudaDeviceSynchronize();
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(&sum_res, d_sum, sizeof(double), cudaMemcpyDeviceToHost);
sum_res=h_hat*sum_res;
cudaFree(d_data) ;
cudaFree(d_sum) ;
printf("Número de subintervalos: %d\n", n);
printf("Integral de %f a %f = %1.15e\n", a,b,sum_res);
printf("Error relativo de la solución: %1.15e\n", fabs(sum_res-obj)/fabs(obj));
printf("Tiempo de cálculo en la gpu %.5f\n", time_spent);
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall Rcf7.cu -o Rcf7.out
# -
# ```{admonition} Observación
# :class: tip
#
# Mientras se ejecuta la siguiente celda se sugiere en la terminal ejecutar en la línea de comando `nvtop`.
#
# ```
# + language="bash"
# ./Rcf7.out
# + language="bash"
# source ~/.profile
# nvprof --normalized-time-unit s ./Rcf7.out
# -
# **Incrementamos el número de subintervalos.**
# (RCF8CU)=
# `Rcf8.cu`
# +
# %%file Rcf8.cu
#include<stdio.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
__global__ void Rcf(double *data, double a, double h_hat, long int n, double *sum_res ) {
/*
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
data (double): array that will hold values evaluated in function
a (int): left point of interval
h_hat (double): width of subinterval
n (int): number of subintervals
sum_res (double): pointer to result
Returns:
sum_res (double): pointer to result
*/
double x=0.0;
int idx;
int num_threads=gridDim.x * blockDim.x;
int stride = num_threads;
int i;
idx = blockIdx.x * blockDim.x + threadIdx.x;
for(i=idx; i<=n-1; i+=stride){
if(idx<=n-1){
x=a+(idx+1/2.0)*h_hat;
data[idx]=std::exp(-std::pow(x,2));
}
}
if(idx==0){
*sum_res = thrust::reduce(thrust::device, data , data + n, (double)0, thrust::plus<double>());
}
}
cudaError_t check_error(cudaError_t result) {
if (result != cudaSuccess) {
fprintf(stderr, "Error: %s\n", cudaGetErrorString(result));
}
return result;
}
int main(int argc, char *argv[]){
double sum_res=0.0;
double *d_data;
double *d_sum;
double a=0.0, b=1.0;
double h_hat;
int n_threads_per_block=1024;
long int n_blocks=0;
double obj=0.7468241328124271;
long int n=0;
double time_spent;
clock_t begin,end;
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, 0);
n_blocks = 100000 * properties.multiProcessorCount;
n = n_blocks*n_threads_per_block;
dim3 dimGrid(n_blocks,1,1);
dim3 dimBlock(n_threads_per_block,1,1);
check_error(cudaMalloc((void **)&d_data,sizeof(double)*n));
check_error(cudaMalloc((void**)&d_sum,sizeof(double)));
h_hat=(b-a)/n;
begin = clock();
Rcf<<<dimGrid,dimBlock>>>(d_data, a,h_hat,n,d_sum);
cudaDeviceSynchronize();
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
check_error(cudaMemcpy(&sum_res, d_sum, sizeof(double), cudaMemcpyDeviceToHost));
sum_res=h_hat*sum_res;
cudaFree(d_data);
cudaFree(d_sum);
printf("Número de subintervalos: %ld\n", n);
printf("Integral de %f a %f = %1.15e\n", a,b,sum_res);
printf("Error relativo de la solución: %1.15e\n", fabs(sum_res-obj)/fabs(obj));
printf("Tiempo de cálculo en la gpu %.5f\n", time_spent);
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall Rcf8.cu -o Rcf8.out
# + language="bash"
# ./Rcf8.out
# -
# ```{admonition} Observación
# :class: tip
#
# En la programación con CUDA-C es importante checar posibles errores de alojamiento de memoria. Una forma es con los tipos [cudaError_t](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__TYPES.html#group__CUDART__TYPES_1gf599e5b8b829ce7db0f5216928f6ecb6) y `cudaSuccess` . Ver [why-do-i-have-insufficient-buffer-space-when-i-put-allocation-code-in-a-functi](https://stackoverflow.com/questions/58902166/why-do-i-have-insufficient-buffer-space-when-i-put-allocation-code-in-a-functi).
#
# ```
# **Incrementamos el número de subintervalos.**
# +
# %%file Rcf9.cu
#include<stdio.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
__global__ void Rcf(double *data, double a, double h_hat, long int n, double *sum_res ) {
/*
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
data (double): array that will hold values evaluated in function
a (int): left point of interval
h_hat (double): width of subinterval
n (int): number of subintervals
sum_res (double): pointer to result
Returns:
sum_res (double): pointer to result
*/
double x=0.0;
int idx;
int num_threads=gridDim.x * blockDim.x;
int stride = num_threads;
int i;
idx = blockIdx.x * blockDim.x + threadIdx.x;
for(i=idx; i<=n-1; i+=stride){
if(idx<=n-1){
x=a+(idx+1/2.0)*h_hat;
data[idx]=std::exp(-std::pow(x,2));
}
}
if(idx==0){
*sum_res = thrust::reduce(thrust::device, data , data + n, (double)0, thrust::plus<double>());
}
}
cudaError_t check_error(cudaError_t result) {
if (result != cudaSuccess) {
fprintf(stderr, "Error: %s\n", cudaGetErrorString(result));
}
return result;
}
int main(int argc, char *argv[]){
double sum_res=0.0;
double *d_data;
double *d_sum;
double a=0.0, b=1.0;
double h_hat;
int n_threads_per_block=1024;
long int n_blocks=0;
double obj=0.7468241328124271;
long int n=0;
double time_spent;
clock_t begin,end;
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, 0);
n_blocks = 150000 * properties.multiProcessorCount;
n = n_blocks*n_threads_per_block;
dim3 dimGrid(n_blocks,1,1);
dim3 dimBlock(n_threads_per_block,1,1);
check_error(cudaMalloc((void **)&d_data,sizeof(double)*n));
check_error(cudaMalloc((void**)&d_sum,sizeof(double)));
h_hat=(b-a)/n;
begin = clock();
Rcf<<<dimGrid,dimBlock>>>(d_data, a,h_hat,n,d_sum);
cudaDeviceSynchronize();
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
check_error(cudaMemcpy(&sum_res, d_sum, sizeof(double), cudaMemcpyDeviceToHost));
sum_res=h_hat*sum_res;
cudaFree(d_data);
cudaFree(d_sum);
printf("Número de subintervalos: %ld\n", n);
printf("Integral de %f a %f = %1.15e\n", a,b,sum_res);
printf("Error relativo de la solución: %1.15e\n", fabs(sum_res-obj)/fabs(obj));
printf("Tiempo de cálculo en la gpu %.5f\n", time_spent);
return 0;
}
# + language="bash"
# source ~/.profile
# nvcc -gencode arch=compute_37,code=sm_37 --compiler-options -Wall Rcf9.cu -o Rcf9.out
# + language="bash"
# ./Rcf9.out
# -
# ```{admonition} Ejercicio
# :class: tip
#
# Implementar la regla de Simpson compuesta con *CUDA-C* en una máquina de AWS con las mismas características que la que se presenta en esta nota y medir tiempo de ejecución.
#
# ```
# ## [CuPy](https://github.com/cupy/cupy)
# *NumPy-like API accelerated with CUDA. CuPy is an implementation of NumPy-compatible multi-dimensional array on CUDA. CuPy consists of the core multi-dimensional array class, cupy.ndarray, and many functions on it. It supports a subset of numpy.ndarray interface.*
# ```{margin}
#
# Ver [Basics of CuPy](https://docs-cupy.chainer.org/en/stable/tutorial/basic.html).
#
# ```
# Un subconjunto de funciones del paquete *NumPy* de *Python* están implementadas en *CuPy* vía la clase [cupy.ndarray](https://docs-cupy.chainer.org/en/stable/reference/generated/cupy.ndarray.html#cupy.ndarray) la cual es compatible en la GPU con la clase [numpy.ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html#numpy.ndarray) que utiliza la CPU.
# ### *Arrays*
import cupy as cp
import numpy as np
x_gpu = cp.array([1, 2, 3])
# Y el *array* $1$-dimensional anterior está alojado en la GPU.
#
# Podemos obtener información del *array* anterior utilizando algunos métodos y atributos.
print('x_gpu.ndim:',x_gpu.ndim)
print('x_gpu.shape:',x_gpu.shape)
print('x_gpu.size:',x_gpu.size)
print('x_gpu.dtype:',x_gpu.dtype)
# Accedemos con corchetes a sus componentes:
print('primer elemento', x_gpu[0])
print('último elemento', x_gpu[-1])
print('segundo elemento', x_gpu[1])
print('penúltimo elemento', x_gpu[-2])
print('del primero al 2º elemento incluyendo este último', x_gpu[:2])
print('del 2º al último elemento sin incluir el 2º', x_gpu[2:])
# A diferencia de *NumPy* que nos devuelve un error al ejecutar.
x_cpu = np.array([1,2,3])
# + tags=["raises-exception"]
print(x_cpu[[3]])
# -
# Con *CuPy* se reciclan los índices.
print(x_gpu[[3]])
# Otra forma de generar *arrays* en *NumPy* es con la función [arange](https://docs.cupy.dev/en/stable/reference/generated/cupy.arange.html) o [random](https://docs.cupy.dev/en/stable/reference/random.html) para un *array* pseudo aleatorio.
print(cp.arange(3))
cp.random.seed(2000)
print(cp.random.rand(4))
# ***Array*'s dos dimensionales.**
A = cp.array([[1,2,3],[4,5,6]])
print(A)
print('A.ndim:', A.ndim)
print('A.shape:', A.shape)
print('A.size:', A.size)
print('A.dtype', A.dtype)
# Accedemos con corchetes a sus componentes
print('elemento en la posición (0,0):', A[0][0])
print('elemento en la posición (1,2):', A[1][2])
#also with:
print('elemento en la posición (0,0):', A[0,0])
print('elemento en la posición (1,2):', A[1,2])
print('primer columna:', A[:,0])
print('tercer columna:', A[:,2])
print('segundo renglón:', A[1,:])
# Funciones `arange` o `random`.
print(cp.arange(6).reshape(2,3))
print(cp.arange(0,1.2,.2).reshape(3,2))
cp.random.seed(2000)
print(cp.random.rand(2,4))
# ### Operaciones en el álgebra lineal con CuPy
# ### Producto escalar-vector, suma y punto entre vectores
v1 = cp.array([6,-3,4])
v2 = cp.array([4,5,0])
scalar = -1/2
print(scalar*v1)
print(v1.dot(v2))
print(v1+v2)
# ### Producto matriz vector *point-wise*
A = cp.array([[2,5,0],[3,6,6],[-6,4,-1],[5,4,9]])
print(A)
v = cp.array([-2,1,4])
print(v)
print(A*v)
# ### Producto matriz-vector
A = cp.array([[2,5,0],[3,6,6],[-6,4,-1],[5,4,9]])
print(A)
# ```{admonition} Observación
# :class: tip
#
# Obsérvese que las clases de los objetos deben ser del mismo tipo.
#
# ```
v = np.array([-2,1,4])
print(v)
# + tags=["raises-exception"]
print(A.dot(v))
# -
v = cp.array([-2,1,4])
print(v)
print(A.dot(v))
print(A@v)
v = cp.array([7,0,-3,2])
print(v)
print(v@A)
# ### Suma y producto matriz-matriz pointwise
A = cp.array([[2,5,0],[3,6,6],[-6,4,-1],[5,4,9]])
print(A)
B = cp.array([[2,-2,3],[1,-1,5],[0,-2,1],[0,0,-3]])
print(B)
print(A+B)
print(A*B)
# ### Producto matriz-matriz
A = cp.array([[2,5,0],[3,6,6],[-6,4,-1],[5,4,9]])
print(A)
B = cp.array([[2,-2,3],[1,-1,5],[0,-2,1]])
print(B)
print(A@B)
# ### Algunas operaciones básicas del álgebra lineal
# ### Norma de vectores
v = cp.array([1,2,3])
print(v)
print(cp.linalg.norm(v))
# ### Norma de matrices
A = cp.array([[2,5,0],[3,6,6],[-6,4,-1]])
print(A)
print(cp.linalg.norm(A))
# ### Resolver sistema de ecuaciones lineales
A = cp.array([[8, -6, 2], [-4, 11, -7], [4, -7, 6]])
b = cp.array([28,-40,33])
print('A:')
print(A)
print('b:')
print(b)
x = cp.linalg.solve(A,b)
print('x:')
print(x)
print('Verificando resultado Ax = b')
print('b:')
print(b)
print('Ax:')
print(A@x)
# ### Transferencia de datos del *host* al *device* o viceversa
x_cpu = np.array([1, 2, 3])
x_gpu = cp.asarray(x_cpu) # move the data to the current device.
print(x_gpu)
print(type(x_gpu))
x_gpu = cp.array([1, 2, 3]) # create an array in the current device
x_cpu = cp.asnumpy(x_gpu) # move the array to the host.
# Y estas funciones pueden utilizarse para realizar operaciones dependiendo del tipo de *array*.
y_cpu = np.array([5,6,7])
# + tags=["raises-exception"]
print(x_gpu + y_cpu)
# -
print(x_gpu + cp.asarray(y_cpu))
print(cp.asnumpy(x_gpu) + y_cpu )
# ### Función ejecutada dependiendo de que sean *array*'s de *NumPy* o *CuPy*
# Es posible ejecutar una función dependiendo de sus argumentos con el módulo [get_array_module](https://docs-cupy.chainer.org/en/stable/reference/generated/cupy.get_array_module.html#cupy.get_array_module).
def fun(x):
xp = cp.get_array_module(x)
return xp.exp(-x) + xp.cos(xp.sin(-abs(x)))
print(fun(x_gpu))
print(fun(x_cpu))
# ### Ejemplo regla compuesta del rectángulo
f_cp = lambda x: cp.exp(-x**2)
def Rcf_cupy(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (float): function expression of integrand.
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
h_hat = (b-a)/n
aux_vec = cp.linspace(a, b, n+1)
nodes = (aux_vec[:-1]+aux_vec[1:])/2
return h_hat*cp.sum(f(nodes))
# +
import math
import time
from pytest import approx
from scipy.integrate import quad
# -
n = 10**7
a = 0
b = 1
f=lambda x: math.exp(-x**2) #using math library
obj, err = quad(f, a, b)
res_cupy = Rcf_cupy(f_cp, a, b,n)
print(res_cupy.get() == approx(obj))
from cupyx.time import repeat
print(repeat(Rcf_cupy, (f_cp,a,b,n), n_repeat=10))
# Ver [performance](https://docs.cupy.dev/en/stable/user_guide/performance.html).
# ```{admonition} Observación
# :class: tip
#
# Obsérvese que se utiliza mayor cantidad de memoria por *CuPy* que utilizando la implementación con *CUDA-C* {ref}`Rcf8.cu <RCF8CU>`.
#
# ```
n = 10**9
# + tags=["raises-exception"]
print(repeat(Rcf_cupy, (f_cp,a,b,n), n_repeat=10))
# -
# ```{admonition} Ejercicio
# :class: tip
#
# Implementar la regla de Simpson compuesta con *CuPy* en una máquina de AWS con las mismas características que la que se presenta en esta nota y medir tiempo de ejecución.
#
# ```
# ## Referencias de interés
# Para más sobre *Unified Memory* revisar:
#
# * [Even easier introduction to cuda](https://devblogs.nvidia.com/even-easier-introduction-cuda/)
#
# * [Unified memory cuda beginners](https://devblogs.nvidia.com/unified-memory-cuda-beginners/)
#
# Es importante el manejo de errores por ejemplo en el alojamiento de memoria en la GPU. En este caso es útil revisar:
#
# * [How to Query Device Properties and Handle Errors in CUDA C/C++](https://devblogs.nvidia.com/how-query-device-properties-and-handle-errors-cuda-cc/)
#
# En las siguientes preguntas encontramos a personas desarrolladoras de CUDA que las resuelven y resultan muy útiles para continuar con el aprendizaje de *CUDA C*. Por ejemplo:
#
# * [Parallel reduction over one axis](https://stackoverflow.com/questions/51526082/cuda-parallel-reduction-over-one-axis)
#
# Otros sistemas de software para el [Heterogeneous computing](https://en.wikipedia.org/wiki/Heterogeneous_computing) son:
#
# * [OpenCl](https://en.wikipedia.org/wiki/OpenCL). Ver [NVIDIA OpenCL SDK Code Samples](https://developer.nvidia.com/opencl) para ejemplos con NVIDIA GPU's.
#
# * [Rth-org/Rth](https://github.com/Rth-org/Rth) y más reciente [matloff/Rth](https://github.com/matloff/Rth). Ver también [rdrr.io matloff/Rth](https://rdrr.io/github/matloff/Rth/f/README.md).
#
# Es posible escribir *kernels* con *CuPy*. Ver por ejemplo: [User-Defined Kernels](https://docs-cupy.chainer.org/en/stable/tutorial/kernel.html).
#
#
# Otro paquete para uso de Python+GPU para cómputo matricial es:
#
# * [PyCUDA](https://github.com/inducer/pycuda/) y ver [PyCUDA en el repo de la clase](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/tree/master/Python/PyCUDA) para más información.
#
# Un paquete para uso de pandas+GPU:
#
# * [Rapids](https://github.com/rapidsai), [cudf](https://github.com/rapidsai/cudf)
#
# Ver [optional-libraries](https://docs-cupy.chainer.org/en/stable/install.html#optional-libraries) para librerías que pueden ser utilizadas con CuPy.
#
# Un paquete de *R* para uso de GPU: [gputools: cran](https://rdrr.io/cran/gputools/).
# ```{admonition} Ejercicios
# :class: tip
#
# 1.Resuelve los ejercicios y preguntas de la nota.
# ```
#
# **Preguntas de comprehensión:**
#
# 1)¿Qué factores han determinado un mejor *performance* de una GPU vs una CPU? (contrasta los diseños de una CPU vs una GPU).
#
# 2)¿Dentro de qué modelo de arquitectura de máquinas se ubica a la GPU dentro de la taxonomía de Flynn? (tip: tal modelo se le puede comparar con el modelo **Single Program Multiple Data (SPMD)**)
#
# 3)¿Qué significan las siglas CUDA y detalla qué es CUDA?.
#
# 4)¿Qué es y en qué consiste CUDA C?
#
# 5)¿Qué es un *kernel*?
#
# 6)¿Qué pieza de CUDA se encarga de asignar los bloques de *cuda-threads* a las SM’s?
#
# 7)¿Qué características (recursos compartidos, dimensiones, forma de agendar la ejecución en *threads*) tienen los bloques que se asignan a una SM al lanzarse y ejecutarse un *kernel*?
#
# 8)¿Qué es un *warp*?
#
# 9)Menciona los tipos de memorias que existen en las GPU’s.
#
# 10)Supón que tienes una tarjeta GT200 cuyas características son:
#
# * Máximo número de *threads* que soporta una SM en un mismo instante en el tiempo: 1024
# * Máximo número de *threads* en un bloque: 512
# * Máximo número de bloques por SM: 8
# * Número de SM’s que tiene esta GPU: 30
#
# Responde:
#
# a)¿Cuál es la máxima cantidad de *threads* que puede soportar esta GPU en un mismo instante en el tiempo?
#
# b)¿Cuál es la máxima cantidad de *warps* por SM que puede soportar esta GPU en un mismo instante en el tiempo?
#
# c)¿Cuáles configuraciones de bloques y *threads* siguientes aprovechan la máxima cantidad de *warps* en una SM de esta GPU para un mismo instante en el tiempo?
#
# 1.Una configuración del tipo: bloques de 64 *threads* y 16 bloques.
#
# 2.Una configuración del tipo: bloques de 1024 *threads* y 1 bloque.
#
# 3.Una configuración del tipo: bloques de 256 *threads* y 4 bloques.
#
# 4.Una configuración del tipo: bloques de 512 *threads* y 8 bloques.
#
# \*Debes considerar las restricciones/características de la GPU dadas para responder pues algunas configuraciones infringen las mismas. No estamos considerando *registers* o *shared memory*.
#
# **Referencias:**
#
# 1. <NAME>, Parallel Computing for Data Science. With Examples in R, C++ and CUDA, 2014.
#
# 2. <NAME>, <NAME>, Programming Massively Parallel Processors: A Hands-on Approach, <NAME>ann, 2010.
#
# 3. NVIDIA,CUDA Programming Guide, NVIDIA Corporation, 2007.
#
# 4. <NAME>, <NAME>, The C Programming Language, Prentice Hall Software Series, 1988
#
# 5. [C/extensiones_a_C/CUDA/](https://github.com/palmoreck/programming-languages/tree/master/C/extensiones_a_C/CUDA)
| libro_optimizacion/temas/V.optimizacion_de_codigo/5.5/Computo_en_paralelo_usando_GPUS_en_SMC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Housing Rental Analysis for San Francisco
#
# In this challenge, your job is to use your data visualization skills, including aggregation, interactive visualizations, and geospatial analysis, to find properties in the San Francisco market that are viable investment opportunities.
#
# ## Instructions
#
# Use the `san_francisco_housing.ipynb` notebook to visualize and analyze the real-estate data.
#
# Note that this assignment requires you to create a visualization by using hvPlot and GeoViews. Additionally, you need to read the `sfo_neighborhoods_census_data.csv` file from the `Resources` folder into the notebook and create the DataFrame that you’ll use in the analysis.
#
# The main task in this Challenge is to visualize and analyze the real-estate data in your Jupyter notebook. Use the `san_francisco_housing.ipynb` notebook to complete the following tasks:
#
# * Calculate and plot the housing units per year.
#
# * Calculate and plot the average prices per square foot.
#
# * Compare the average prices by neighborhood.
#
# * Build an interactive neighborhood map.
#
# * Compose your data story.
#
# ### Calculate and Plot the Housing Units per Year
#
# For this part of the assignment, use numerical and visual aggregation to calculate the number of housing units per year, and then visualize the results as a bar chart. To do so, complete the following steps:
#
# 1. Use the `groupby` function to group the data by year. Aggregate the results by the `mean` of the groups.
#
# 2. Use the `hvplot` function to plot the `housing_units_by_year` DataFrame as a bar chart. Make the x-axis represent the `year` and the y-axis represent the `housing_units`.
#
# 3. Style and format the line plot to ensure a professionally styled visualization.
#
# 4. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 5. Answer the following question:
#
# * What’s the overall trend in housing units over the period that you’re analyzing?
#
# ### Calculate and Plot the Average Sale Prices per Square Foot
#
# For this part of the assignment, use numerical and visual aggregation to calculate the average prices per square foot, and then visualize the results as a bar chart. To do so, complete the following steps:
#
# 1. Group the data by year, and then average the results. What’s the lowest gross rent that’s reported for the years that the DataFrame includes?
#
# 2. Create a new DataFrame named `prices_square_foot_by_year` by filtering out the “housing_units” column. The new DataFrame should include the averages per year for only the sale price per square foot and the gross rent.
#
# 3. Use hvPlot to plot the `prices_square_foot_by_year` DataFrame as a line plot.
#
# > **Hint** This single plot will include lines for both `sale_price_sqr_foot` and `gross_rent`.
#
# 4. Style and format the line plot to ensure a professionally styled visualization.
#
# 5. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 6. Use both the `prices_square_foot_by_year` DataFrame and interactive plots to answer the following questions:
#
# * Did any year experience a drop in the average sale price per square foot compared to the previous year?
#
# * If so, did the gross rent increase or decrease during that year?
#
# ### Compare the Average Sale Prices by Neighborhood
#
# For this part of the assignment, use interactive visualizations and widgets to explore the average sale price per square foot by neighborhood. To do so, complete the following steps:
#
# 1. Create a new DataFrame that groups the original DataFrame by year and neighborhood. Aggregate the results by the `mean` of the groups.
#
# 2. Filter out the “housing_units” column to create a DataFrame that includes only the `sale_price_sqr_foot` and `gross_rent` averages per year.
#
# 3. Create an interactive line plot with hvPlot that visualizes both `sale_price_sqr_foot` and `gross_rent`. Set the x-axis parameter to the year (`x="year"`). Use the `groupby` parameter to create an interactive widget for `neighborhood`.
#
# 4. Style and format the line plot to ensure a professionally styled visualization.
#
# 5. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 6. Use the interactive visualization to answer the following question:
#
# * For the Anza Vista neighborhood, is the average sale price per square foot for 2016 more or less than the price that’s listed for 2012?
#
# ### Build an Interactive Neighborhood Map
#
# For this part of the assignment, explore the geospatial relationships in the data by using interactive visualizations with hvPlot and GeoViews. To build your map, use the `sfo_data_df` DataFrame (created during the initial import), which includes the neighborhood location data with the average prices. To do all this, complete the following steps:
#
# 1. Read the `neighborhood_coordinates.csv` file from the `Resources` folder into the notebook, and create a DataFrame named `neighborhood_locations_df`. Be sure to set the `index_col` of the DataFrame as “Neighborhood”.
#
# 2. Using the original `sfo_data_df` Dataframe, create a DataFrame named `all_neighborhood_info_df` that groups the data by neighborhood. Aggregate the results by the `mean` of the group.
#
# 3. Review the two code cells that concatenate the `neighborhood_locations_df` DataFrame with the `all_neighborhood_info_df` DataFrame. Note that the first cell uses the [Pandas concat function](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html) to create a DataFrame named `all_neighborhoods_df`. The second cell cleans the data and sets the “Neighborhood” column. Be sure to run these cells to create the `all_neighborhoods_df` DataFrame, which you’ll need to create the geospatial visualization.
#
# 4. Using hvPlot with GeoViews enabled, create a `points` plot for the `all_neighborhoods_df` DataFrame. Be sure to do the following:
#
# * Set the `geo` parameter to True.
# * Set the `size` parameter to “sale_price_sqr_foot”.
# * Set the `color` parameter to “gross_rent”.
# * Set the `frame_width` parameter to 700.
# * Set the `frame_height` parameter to 500.
# * Include a descriptive title.
#
# Note that your resulting plot should appear similar to the following image:
#
# 
#
# 5. Use the interactive map to answer the following question:
#
# * Which neighborhood has the highest gross rent, and which has the highest sale price per square foot?
#
# ### Compose Your Data Story
#
# Based on the visualizations that you created, answer the following questions:
#
# * How does the trend in rental income growth compare to the trend in sales prices? Does this same trend hold true for all the neighborhoods across San Francisco?
#
# * What insights can you share with your company about the potential one-click, buy-and-rent strategy that they're pursuing? Do neighborhoods exist that you would suggest for investment, and why?
# Imports the required libraries and dependencies
import pandas as pd
import hvplot.pandas
from pathlib import Path
# ## Import the data
# +
#Reads San Fransico (SFO) Neighborhood Census Data into a data frame
sfo_data_df = pd.read_csv(
Path("Resources/sfo_neighborhoods_census_data.csv"))
# Displays the first 5 and last 5 rows of the SFO Neighborhood dataframe
display(sfo_data_df.head())
display(sfo_data_df.tail())
# -
# ---
# ## Calculate and Plot the Housing Units per Year
#
# For this part of the assignment, use numerical and visual aggregation to calculate the number of housing units per year, and then visualize the results as a bar chart. To do so, complete the following steps:
#
# 1. Use the `groupby` function to group the data by year. Aggregate the results by the `mean` of the groups.
#
# 2. Use the `hvplot` function to plot the `housing_units_by_year` DataFrame as a bar chart. Make the x-axis represent the `year` and the y-axis represent the `housing_units`.
#
# 3. Style and format the line plot to ensure a professionally styled visualization.
#
# 4. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 5. Answer the following question:
#
# * What’s the overall trend in housing units over the period that you’re analyzing?
#
#
# ### Step 1: Use the `groupby` function to group the data by year. Aggregate the results by the `mean` of the groups.
# +
# Creates a numerical aggregation that groups the data by the year and then averages the results.
housing_units_by_year = sfo_data_df.groupby("year").mean()
# Reviews the DataFrame
housing_units_by_year.head()
# -
# ### Step 2: Use the `hvplot` function to plot the `housing_units_by_year` DataFrame as a bar chart. Make the x-axis represent the `year` and the y-axis represent the `housing_units`.
#
# ### Step 3: Style and format the line plot to ensure a professionally styled visualization.
# Creates a visual aggregation explore the housing units by year
housing_units_by_year.hvplot.bar(
yformatter='%.0f',
x="year",
y="housing_units",
xlabel="Year",
ylabel="Housing Units",
ylim=(370000,385000),
title="Average Housing Units by Year (in thousands) ")
# ### Step 5: Answer the following question:
# **Question:** What is the overall trend in housing_units over the period being analyzed?
#
# **Answer:** # The overall trend is that the average number of housing units in San Fransico is steadily increasing year after year.
# ---
# ## Calculate and Plot the Average Sale Prices per Square Foot
#
# For this part of the assignment, use numerical and visual aggregation to calculate the average prices per square foot, and then visualize the results as a bar chart. To do so, complete the following steps:
#
# 1. Group the data by year, and then average the results. What’s the lowest gross rent that’s reported for the years that the DataFrame includes?
#
# 2. Create a new DataFrame named `prices_square_foot_by_year` by filtering out the “housing_units” column. The new DataFrame should include the averages per year for only the sale price per square foot and the gross rent.
#
# 3. Use hvPlot to plot the `prices_square_foot_by_year` DataFrame as a line plot.
#
# > **Hint** This single plot will include lines for both `sale_price_sqr_foot` and `gross_rent`.
#
# 4. Style and format the line plot to ensure a professionally styled visualization.
#
# 5. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 6. Use both the `prices_square_foot_by_year` DataFrame and interactive plots to answer the following questions:
#
# * Did any year experience a drop in the average sale price per square foot compared to the previous year?
#
# * If so, did the gross rent increase or decrease during that year?
#
#
# ### Step 1: Group the data by year, and then average the results.
# +
# Creates a numerical aggregation by grouping the data by year and averaging the results
prices_square_foot_by_year = sfo_data_df.groupby('year').mean()
# Reviews the resulting DataFrame
display(prices_square_foot_by_year.head())
# -
# **Question:** What is the lowest gross rent reported for the years included in the DataFrame?
#
# **Answer:** # The lowest gross rent in the dataframe is 1239 in 2010.
# ### Step 2: Create a new DataFrame named `prices_square_foot_by_year` by filtering out the “housing_units” column. The new DataFrame should include the averages per year for only the sale price per square foot and the gross rent.
# +
# Filters out the housing_units column, creating a new DataFrame
# Keeps only sale_price_sqr_foot and gross_rent averages per year
prices_square_foot_by_year = prices_square_foot_by_year.drop(columns="housing_units")
# Reviews the DataFrame
display(prices_square_foot_by_year)
# -
# ### Step 3: Use hvPlot to plot the `prices_square_foot_by_year` DataFrame as a line plot.
#
# > **Hint** This single plot will include lines for both `sale_price_sqr_foot` and `gross_rent`
#
# ### Step 4: Style and format the line plot to ensure a professionally styled visualization.
#
# Plots prices_square_foot_by_year.
# Included labels for the x- and y-axes, and a title.
prices_square_foot_by_year.hvplot(
ylabel="Price in USD",
xlabel="Year",
group_label=("Legend"),
title="Avg. Gross Rent & Sale Price per Sq. Ft. in San Fransico")
# ### Step 6: Use both the `prices_square_foot_by_year` DataFrame and interactive plots to answer the following questions:
# **Question:** Did any year experience a drop in the average sale price per square foot compared to the previous year?
#
# **Answer:** # There was a slight drop in price per square foot in 2011 when compared to 2010.
# **Question:** If so, did the gross rent increase or decrease during that year?
#
# **Answer:** # The gross rent continued to increase during 2011.
# ---
# ## Compare the Average Sale Prices by Neighborhood
#
# For this part of the assignment, use interactive visualizations and widgets to explore the average sale price per square foot by neighborhood. To do so, complete the following steps:
#
# 1. Create a new DataFrame that groups the original DataFrame by year and neighborhood. Aggregate the results by the `mean` of the groups.
#
# 2. Filter out the “housing_units” column to create a DataFrame that includes only the `sale_price_sqr_foot` and `gross_rent` averages per year.
#
# 3. Create an interactive line plot with hvPlot that visualizes both `sale_price_sqr_foot` and `gross_rent`. Set the x-axis parameter to the year (`x="year"`). Use the `groupby` parameter to create an interactive widget for `neighborhood`.
#
# 4. Style and format the line plot to ensure a professionally styled visualization.
#
# 5. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 6. Use the interactive visualization to answer the following question:
#
# * For the Anza Vista neighborhood, is the average sale price per square foot for 2016 more or less than the price that’s listed for 2012?
#
# ### Step 1: Create a new DataFrame that groups the original DataFrame by year and neighborhood. Aggregate the results by the `mean` of the groups.
# +
# Groups by year and neighborhood and then create a new dataframe of the mean values
prices_by_year_by_neighborhood = sfo_data_df.groupby(["year","neighborhood"]).mean()
# Reviews the DataFrame
prices_by_year_by_neighborhood
# -
# ### Step 2: Filter out the “housing_units” column to create a DataFrame that includes only the `sale_price_sqr_foot` and `gross_rent` averages per year.
# +
# Filters out the housing_units
prices_by_year_by_neighborhood = prices_by_year_by_neighborhood.drop(columns="housing_units")
# Reviews the first and last five rows of the DataFrame
display(prices_by_year_by_neighborhood.head())
display(prices_by_year_by_neighborhood.tail())
# -
# ### Step 3: Create an interactive line plot with hvPlot that visualizes both `sale_price_sqr_foot` and `gross_rent`. Set the x-axis parameter to the year (`x="year"`). Use the `groupby` parameter to create an interactive widget for `neighborhood`.
#
# ### Step 4: Style and format the line plot to ensure a professionally styled visualization.
# Uses hvplot to create an interactive line plot of the average price per square foot
# The plot has a dropdown selector for the neighborhood
prices_by_year_by_neighborhood.hvplot(
x="year",
groupby="neighborhood")
# ### Step 6: Use the interactive visualization to answer the following question:
# **Question:** For the Anza Vista neighborhood, is the average sale price per square foot for 2016 more or less than the price that’s listed for 2012?
#
# **Answer:** # The average sale price per square foot in 2016 is less than the price listed for 2012.
# ---
# ## Build an Interactive Neighborhood Map
#
# For this part of the assignment, explore the geospatial relationships in the data by using interactive visualizations with hvPlot and GeoViews. To build your map, use the `sfo_data_df` DataFrame (created during the initial import), which includes the neighborhood location data with the average prices. To do all this, complete the following steps:
#
# 1. Read the `neighborhood_coordinates.csv` file from the `Resources` folder into the notebook, and create a DataFrame named `neighborhood_locations_df`. Be sure to set the `index_col` of the DataFrame as “Neighborhood”.
#
# 2. Using the original `sfo_data_df` Dataframe, create a DataFrame named `all_neighborhood_info_df` that groups the data by neighborhood. Aggregate the results by the `mean` of the group.
#
# 3. Review the two code cells that concatenate the `neighborhood_locations_df` DataFrame with the `all_neighborhood_info_df` DataFrame. Note that the first cell uses the [Pandas concat function](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html) to create a DataFrame named `all_neighborhoods_df`. The second cell cleans the data and sets the “Neighborhood” column. Be sure to run these cells to create the `all_neighborhoods_df` DataFrame, which you’ll need to create the geospatial visualization.
#
# 4. Using hvPlot with GeoViews enabled, create a `points` plot for the `all_neighborhoods_df` DataFrame. Be sure to do the following:
#
# * Set the `size` parameter to “sale_price_sqr_foot”.
#
# * Set the `color` parameter to “gross_rent”.
#
# * Set the `size_max` parameter to “25”.
#
# * Set the `zoom` parameter to “11”.
#
# Note that your resulting plot should appear similar to the following image:
#
# 
#
# 5. Use the interactive map to answer the following question:
#
# * Which neighborhood has the highest gross rent, and which has the highest sale price per square foot?
#
# ### Step 1: Read the `neighborhood_coordinates.csv` file from the `Resources` folder into the notebook, and create a DataFrame named `neighborhood_locations_df`. Be sure to set the `index_col` of the DataFrame as “Neighborhood”.
# +
# Loads neighborhoods coordinates data
neighborhood_locations_df = pd.read_csv(
Path("Resources/neighborhoods_coordinates.csv"),
index_col="Neighborhood")
# Reviews the DataFrame
neighborhood_locations_df
# -
# ### Step 2: Using the original `sfo_data_df` Dataframe, create a DataFrame named `all_neighborhood_info_df` that groups the data by neighborhood. Aggregate the results by the `mean` of the group.
# Calculates the mean values for each neighborhood
all_neighborhood_info_df = sfo_data_df.groupby("neighborhood").mean().drop(columns="year")
# Reviews the resulting DataFrame
display(all_neighborhood_info_df)
# ### Step 3: Review the two code cells that concatenate the `neighborhood_locations_df` DataFrame with the `all_neighborhood_info_df` DataFrame.
#
# Note that the first cell uses the [Pandas concat function](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html) to create a DataFrame named `all_neighborhoods_df`.
#
# The second cell cleans the data and sets the “Neighborhood” column.
#
# Be sure to run these cells to create the `all_neighborhoods_df` DataFrame, which you’ll need to create the geospatial visualization.
# +
# Uses the Pandas "concat" function to join the
# neighborhood_locations_df and the all_neighborhood_info_df DataFrame
# The axis of the concatenation is "columns".
# The concat function automatially combines columns with
# identical information, while keeping the additional columns.
all_neighborhoods_df = pd.concat(
[neighborhood_locations_df, all_neighborhood_info_df],
axis="columns",
sort=False
)
# Review the resulting DataFrame
display(all_neighborhoods_df.head())
display(all_neighborhoods_df.tail())
# +
# Calls the dropna function to remove any neighborhoods that do not have data
all_neighborhoods_df = all_neighborhoods_df.reset_index().dropna().drop_duplicates()
# Renames the "index" column as "Neighborhood" for use in the Visualization
all_neighborhoods_df = all_neighborhoods_df.rename(columns={"index": "Neighborhood"})
# Reviews the resulting DataFrame
display(all_neighborhoods_df.head())
display(all_neighborhoods_df.tail())
# -
# ### Step 4: Using hvPlot with GeoViews enabled, create a `points` plot for the `all_neighborhoods_df` DataFrame. Be sure to do the following:
#
# * Set the `geo` parameter to True.
# * Set the `size` parameter to “sale_price_sqr_foot”.
# * Set the `color` parameter to “gross_rent”.
# * Set the `frame_width` parameter to 700.
# * Set the `frame_height` parameter to 500.
# * Include a descriptive title.
# Creates a plot to analyze neighborhood info
all_neighborhoods_df.hvplot.points(
'Lon',
'Lat',
geo=True,
size="sale_price_sqr_foot",
color="gross_rent",
point="Neighborhood",
xlabel='Latitude',
ylabel='Longitude',
hover_cols="Neighborhood",
frame_width=700,
frame_height=500,
tiles='OSM',
title= 'San Fransico Housing Costs Grouped by Neighborhood')
# ### Step 5: Use the interactive map to answer the following question:
# **Question:** Which neighborhood has the highest gross rent, and which has the highest sale price per square foot?
#
# **Answer:** # Westwood Park has the highest gross rent. Miraloma park has the highest sale price per square foot.
# ## Compose Your Data Story
#
# Based on the visualizations that you have created, compose a data story that synthesizes your analysis by answering the following questions:
# **Question:** How does the trend in rental income growth compare to the trend in sales prices? Does this same trend hold true for all the neighborhoods across San Francisco?
#
# **Answer:** # Gross Rent has steadily increased in every neighborhood through the years while price per square foot is a little more volatile. There seem to be higher prices per square foot near the water. The highest price per square foot, as well as the highest rents, tend to be on the Southern side of the city.
# **Question:** What insights can you share with your company about the potential one-click, buy-and-rent strategy that they're pursuing? Do neighborhoods exist that you would suggest for investment, and why?
#
# **Answer:** # I would my company that now is the time to invest if they are going to. The prices per square foot are down from previous years, and the gross rent has increased. I would recommend they look for neighborhoods where the average number of housing units is increasing while the price per square foot is decreasing. These are likely neighborhoods where there is an increasing supply that has outpaced demand. However, as the areas are developed more and new business move into the area, demand will increase, driving up prices and rent. South Beach and Forest Knolls would be good places to invest in. These are areas where the price per square foot is lowest. If and when these areas develop, because in a city like San Fransico, it is only a matter of time, they stand to earn the greatest return on their investment as opposed to buying property in an already developed and thriving neighborhood.
| san_francisco_housing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# dependencies
from IPython.display import Image
from matplotlib import pyplot as plt
import cv2 as cv
import argparse
import sys
import numpy as np
import os.path
# -
input_path = 'Pa140025.jpg'
is_video = False
Image(input_path)
# ## Inpute Receiever
# +
# winName = 'Deep learning object detection in OpenCV'
# cv.namedWindow(winName, cv.WINDOW_NORMAL)
# +
# Process inputs
# Open the input file
cap = cv.VideoCapture(input_path)
if is_video:
outputFile = input_path + '_yolo_out_py.avi'
else:
outputFile = input_path + '_yolo_out_py.jpg'
# -
# ## YOLO Plate Detection
# ### Initialize the parameters
# +
confThreshold = 0.5 #Confidence threshold
nmsThreshold = 0.4 #Non-maximum suppression threshold
inpWidth = 416 #608 #Width of network's input image
inpHeight = 416 #608 #Height of network's input image
# -
# Load names of classes
classesFile = "../conf/classes.names";
classes = None
with open(classesFile, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
# ### Restore the nural network
# Give the configuration and weight files for the model and load the network using them.
modelConfiguration = "../conf/darknet-yolov3.cfg";
modelWeights = "../conf/lapi.weights";
net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
# ### Define the post processing functions
# Get the names of the output layers
def getOutputsNames(net):
# Get the names of all the layers in the network
layersNames = net.getLayerNames()
# Get the names of the output layers, i.e. the layers with unconnected outputs
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# Draw the predicted bounding box
def drawPred(classId, conf, left, top, right, bottom, frame):
# Draw a bounding box.
cv.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 3)
label = '%.2f' % conf
# Get the label for the class name and its confidence
if classes:
assert(classId < len(classes))
label = '%s:%s' % (classes[classId], label)
#Display the label at the top of the bounding box
labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, labelSize[1])
cv.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (0, 0, 255), cv.FILLED)
#cv.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED)
cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,0), 2)
# Remove the bounding boxes with low confidence using non-maxima suppression
def postprocess(frame, outs):
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores. Assign the box's class label as the class with the highest score.
classIds = []
confidences = []
boxes = []
for out in outs:
print("out.shape : ", out.shape)
for detection in out:
#if detection[4]>0.001:
scores = detection[5:]
classId = np.argmax(scores)
#if scores[classId]>confThreshold:
confidence = scores[classId]
if detection[4]>confThreshold:
print(detection[4], " - ", scores[classId], " - th : ", confThreshold)
print(detection)
if confidence > confThreshold:
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
# calculate bottom and right
bottom = top + height
right = left + width
#crop the plate out
cropped = frame[top:bottom, left:right].copy()
# drawPred
drawPred(classIds[i], confidences[i], left, top, right, bottom, frame)
return cropped
# ### Detection
# +
# Get the video writer initialized to save the output video
if is_video:
vid_writer = cv.VideoWriter(outputFile, cv.VideoWriter_fourcc('M','J','P','G'), 30, (round(cap.get(cv.CAP_PROP_FRAME_WIDTH)),round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))
while cv.waitKey(1) < 0:
# get frame from the video
hasFrame, frame = cap.read() #frame: an image object from cv2
# Stop the program if reached end of video
if not hasFrame:
print("Done processing !!!")
print("Output file is stored as ", outputFile)
cv.waitKey(3000)
break
# Create a 4D blob from a frame.
blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(getOutputsNames(net))
# Remove the bounding boxes with low confidence
cropped = postprocess(frame, outs)
# Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
#cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
# Write the frame with the detection boxes
if is_video:
vid_writer.write(frame.astype(np.uint8))
else:
plt.imshow(cropped)
cv.imwrite('cropped_plate_final.jpg', cropped.astype(np.uint8))
# save
cv.imwrite(outputFile, frame.astype(np.uint8))
# -
# ## Validation
# <NAME>
# ## Plate Segmentation
def get_contour_precedence(contour, cols):
tolerance_factor = 10
origin = cv.boundingRect(contour)
return ((origin[1] // tolerance_factor) * tolerance_factor) * cols + origin[0]
def square(img):
"""
This function resize non square image to square one (height == width)
:param img: input image as numpy array
:return: numpy array
"""
# image after making height equal to width
squared_image = img
# Get image height and width
h = img.shape[0]
w = img.shape[1]
# In case height superior than width
if h > w:
diff = h-w
if diff % 2 == 0:
x1 = np.zeros(shape=(h, diff//2))
x2 = x1
else:
x1 = np.zeros(shape=(h, diff//2))
x2 = np.zeros(shape=(h, (diff//2)+1))
squared_image = np.concatenate((x1, img, x2), axis=1)
# In case height inferior than width
if h < w:
diff = w-h
if diff % 2 == 0:
x1 = np.zeros(shape=(diff//2, w))
x2 = x1
else:
x1 = np.zeros(shape=(diff//2, w))
x2 = np.zeros(shape=((diff//2)+1, w))
squared_image = np.concatenate((x1, img, x2), axis=0)
return squared_image
def sort(vector):
sort = True
while (sort == True):
sort = False
for i in range(len(vector) - 1):
x_1 = vector[i][0]
y_1 = vector[i][1]
for j in range(i + 1, len(vector)):
x_2 = vector[j][0]
y_2 = vector[j][1]
if (x_1 >= x_2 and y_2 >= y_1):
tmp = vector[i]
vector[i] = vector[j]
vector[j] = tmp
sort = True
elif (x_1 < x_2 and y_2 > y_1):
tmp = vector[i]
vector[i] = vector[j]
vector[j] = tmp
sort = True
return vector
def plate_segmentation(img_file_path):
img = cv.imread(img_file_path)
imgray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
height = img.shape[0]
width = img.shape[1]
area = height * width
#scale1 = 0.001
scale1 = 0.01
scale2 = 0.1
area_condition1 = area * scale1
area_condition2 = area * scale2
# global thresholding
ret1,th1 = cv.threshold(imgray,127,255,cv.THRESH_BINARY)
# Otsu's thresholding
ret2,th2 = cv.threshold(imgray,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# Otsu's thresholding after Gaussian filtering
blur = cv.GaussianBlur(imgray,(5,5),0)
ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
contours, hierarchy = cv.findContours(th3, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# sort contours
contours = sorted(contours, key=cv.contourArea, reverse=True)
cropped = dict()
#cropped = []
for cnt in contours:
(x,y,w,h) = cv.boundingRect(cnt)
distance_center = (2*x+w)/2
if distance_center in cropped:
pass
else:
if (w * h > area_condition1 and w * h < area_condition2 and w/h > 0.3 and h/w > 0.3):
cv.drawContours(img, [cnt], 0, (0, 255, 0), 1)
cv.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 1)
c = th2[y:y+h,x:x+w]
c = np.array(c)
c = cv.bitwise_not(c)
c = square(c)
c = cv.resize(c,(28,28), interpolation = cv.INTER_AREA)
cropped[distance_center] = c
#cropped.append(c)
sorted_cropped = []
for x_center in sorted(cropped):
sorted_cropped.append(cropped[x_center])
cv.imwrite('detection.png', img)
return img, sorted_cropped
img, digits = plate_segmentation('cropped_plate_final.jpg')
plt.imshow(img)
len(digits)
fig, axs = plt.subplots(len(digits))
fig.set_size_inches(20,50)
for ax in range(len(axs)):
axs[ax].imshow(digits[ax])
# ## Char-Num Recoginition
import pandas as pd
import os
import pickle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential, load_model
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
# Load model
model = load_model('../conf/cnn_classifier.h5')
# +
# # Detect chars
# digits = plate_segmentation('demo/plates/PENNSYLVANIA.jpg')
# +
# Predict
#for d in digits:
for d in sorted_digits:
d = np.reshape(d, (1,28,28,1))
out = model.predict(d)
# Get max pre arg
p = []
precision = 0
for i in range(len(out)):
z = np.zeros(36)
z[np.argmax(out[i])] = 1.
precision = max(out[i])
p.append(z)
prediction = np.array(p)
# one hot encoding
alphabets = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
classes = []
for a in alphabets:
classes.append([a])
ohe = OneHotEncoder(handle_unknown='ignore', categorical_features=None)
ohe.fit(classes)
pred = ohe.inverse_transform(prediction)
# if precision > 0.8:
# print('Prediction : ' + str(pred[0][0]) + ' , Precision : ' + str(precision))
print('Prediction : ' + str(pred[0][0]) + ' , Precision : ' + str(precision))
# -
# # CNN Rcognizer Training
import pandas as pd
import numpy as np
import cv2
import os
import pickle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from matplotlib import pyplot as plt
# +
# Load dataset
# -
# Create dictionary for alphabets and related numbers
alphabets_dic = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J',
10: 'K', 11: 'L', 12: 'M', 13: 'N', 14: 'O', 15: 'P', 16: 'Q', 17: 'R', 18: 'S', 19: 'T',
20: 'U', 21: 'V', 22: 'W', 23: 'X', 24: 'Y', 25: 'Z', 26: '0', 27: '1', 28: '2', 29:'3',
30: '4', 31: '5', 32: '6', 33: '7', 34: '8', 35: '9'}
alphabets = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
dataset_classes = []
for cls in alphabets:
dataset_classes.append([cls])
# Load old dataset
d = open("data.pickle","rb")
l = open("labels.pickle","rb")
data = pickle.load(d)
labels = pickle.load(l)
label_list = []
for l in labels:
label_list.append([l])
# One hot encoding format for output
ohe = OneHotEncoder(handle_unknown='ignore', categorical_features=None)
ohe.fit(dataset_classes)
labels_ohe = ohe.transform(label_list).toarray()
data = np.array(data)
labels = np.array(labels)
# Split the data
X_train, X_test, y_train, y_test = train_test_split(data, labels_ohe, test_size=0.20, random_state=42)
X_train = X_train.reshape(29260,28,28,1)
X_test = X_test.reshape(7316,28,28,1)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
# +
# CNN model
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=(28,28,1)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(36, activation='softmax'))
# -
print(model.summary())
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train, y_train,validation_data=(X_test, y_test), epochs=20, batch_size=64)
model.save('cnn_classifier.h5')
# +
# Visualization
plt.figure(figsize=[8, 6])
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.figure(figsize=[8, 6])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# -
| notebooks/CarPlateDecection-naveen-copy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Data Wrangling
#
# ## Introducción
#
# Data wrangling es el proceso de limpieza y unificación de conjuntos de datos desordenados y complejos para facilitar su acceso, exploración, análisis o modelización posterior.
#
# Las tareas que involucra son
# * Limpieza de datos
# * Eliminación de registros duplicados
# * Transformación de datos
# * Discretización de variables
# * Detección y filtro de outliers
# * Construcción de variables dummies
#
# ## Dataset
#
# En esta clase usaremos un dataset con info de películas que disponibiliza datos de movielens (https://movielens.org/).
#
# https://grouplens.org/datasets/movielens/
#
# http://files.grouplens.org/datasets/movielens/ml-latest-small.zip
#
# Este conjunto de datos está conformado por varios archivos:
# * **movies**: idPelicula, título y género;
#
# donde cada registro tiene los datos de una película
#
# * **ratings**: idUsuario, idPelicula, rating, fecha;
#
# donde cada registro tienen la calificación otorgada por un usuario a una película
#
# * **tags**: idUsuario, idPelicula, tag, fecha;
#
# donde cada registro tienen el tag que asignó un usuario a una película
#
# ## Imports
import pandas as pd
import numpy as np
# ## Ejercicio 1 - Importar
#
# Leamos los datos de movies, ratings y tags desde los archivos
# * ../Data/movies.csv
# * ../Data/ratings.csv
# * ../Data/tags.csv
#
# en las variables
# * data_movies
# * data_ratings
# * data_tags
#
# Veamos cuántos registros hay en cada DataFrame y de qué tipos son los datos de cada columna.
#
# Veamos los primeros registros de cada DataFrame para verificar que los datos fueron importados correctamente.
# ## Ejercicio 2 - Registros duplicados
#
# **2.a** Veamos si existen registros duplicados en el DataFrame data_tags considerando sólo las columnas "movieId", "tag", marcando como no duplicado la primera ocurrencia de un valor.
#
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.duplicated.html
#
#
# **2.b** Usemos el método `drop_duplicates` para obtener otro `DataFrame` sin los casos duplicados considerando sólo las columnas "movieId", "tag". Usemos el método `duplicated` para verificar que el nuevo `DataFrame` efectivamente no tiene valores duplicados.
#
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html
# ## Ejercicio 3 - Transformar datos
#
# Construyamos un diccionario que asocie un puntaje a una etiqueta.
#
# Las etiquetas son:
#
# * mala, para puntajes menores a 3;
#
# * regular, para mayor igual a 3 y menor que 4;
#
# * buena para puntaje mayor o igual que 4
#
# Usemos el método `map` para crear una nueva columna en data (`rating_label`) que tenga las etiquetas asociadas al valor del campo `rating` para cada registro
#
# Nota: esto ya sabíamos resolverlo usando máscaras booleanas
# ## Ejercicio 4 - Reemplazar valores
#
# El método `replace` ofrece varias formas de efectuar reemplazos sobre una serie de Pandas:
#
# * Un valor viejo por un valor nuevo.
#
# * Una lista de valores viejos por un valor nuevo.
#
# * Una lista de valores viejos por una lista de valores nuevos.
#
# * Un diccionario que mapee valores nuevos y viejos.
#
#
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.replace.html
#
# **4.a - Una lista de valores viejos por un valor nuevo**
#
# Veamos cuáles son los tags que están asignados a una única película.
#
# Reemplacemos ese valor por "tag_que_no_funciona" y eliminemos registros duplicados considerando los campos "userId", "movieId", "tag".
#
# Ayuda: `value_counts`
# **4.b - Una lista de valores viejos por una lista de valores nuevos**
#
# Reemplacemos cada valor de tag, por la primera palabra que lo compone.
#
# Para eso, creamos una serie con valores únicos con el valor del campo tag.
#
# Contruimos otra instancia de Series donde cada elemento sea la primera palabra del objeto Series anterior. Ayuda: listas por comprensión y `split`
#
# Usamos replace para campiar el valor de cada tag por su primera palabra.
#
# **4.c - Un diccionario que mapee valores nuevos y viejos**
#
# Reemplacemos los valores de tags
# * "Al Pacino" por "Pacino"
# * "<NAME>" por "DiCaprio"
# * "<NAME>" por "Hanks"
# * "<NAME>" por "Scorsese"
#
# Contemos cuantas veces aparecen cada uno de los valores a reemplazar, y cuántas los valores de reemplazo. Ayuda: `value_counts`
#
# Construyamos un diccionario con este mapeo y usemos el método `replace`
#
# Volvamos a contar cuántas veces aparecen cada uno de los valores a reemplazar, y cuántas los valores de reemplazo.
#
# ## Ejercicio 5 - Discretizar variables
#
# Vamos a volver a resolver el Ejercicio 3 usando el método `cut`
#
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.cut.html
#
# Defino los valores de corte:
#
# * mala, para puntajes menores a 3;
#
# * regular, para mayor igual a 3 y menor que 4;
#
# * buena para puntaje mayor o igual que 4
#
# ## Ejercicio 6 - Detectar y filtrar outliers
#
# No existe un criterio que sea válido en todos los casos para identificar los outliers. El criterio de mayor que el tercer cuartil más 1.5 veces el rango intercuartil o menor que el primer cuartil menos 1.5 veces el rango intercuartil (Q3 - Q1) surge de la distribución normal. En esa distribución el 99.7% de la población se encuentra en el rango definido por la media (poblacional) más menos 3 veces el desvío estándar (poblacional)
#
# **Queremos ver cuáles son las películas que son outliers en cantidad de calificaciones.**
#
# **6.a** Usando data_ratings eliminamos duplicados considerando las columnas "userId", "movieId". Esto lo hacemos para contar sólo una vez los votos de un usuario a una película.
#
# **6.b** Sobre el DataFrame obtenido en el paso anterior, hacemos count agrupado por película. Esto nos da como resultado una instancia de Series que asignamos a la variable movie_votes_count.
#
# **6.c** Calculemos los cuartilos de los valores de movie_votes_count y los valores que usaremos de umbral para determinar outliers.
# (Ayuda: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.quantile.html)
#
# **6.d** Filtremos los datos de movie_votes_count excluyendo los outliers.
#
# **6.e** Comparemos movie_votes_count antes y después del filtro con:
# * el método `describe`
# * boxplots de seaborn
#
# **6.f** Adicional: Miremos cuáles son los títulos de las cinco películas más votadas que son outliers de cantidad de calificaciones
# ## Ejercicio 7 - Variables categóricas y dummies
#
# **7.a** Usando el método `get_dummies` con `drop_first = True` agreguemos al DataFrame data_ratings variables dummies que representen las categorias de rating_label
#
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.get_dummies.html
#
#
# **7.b** Comparemos las variables dummies generadas en el punto anterior con las que se generan usando `drop_first = False`. ¿Cuál es la diferencia? ¿Representan el mismo conjunto de valores posibles?
#
# **7.c** Adicional: Cambienos las categorias que se muestran como resultado de `get_dummies` con `drop_first = True`. Ayuda: https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html#categoricaldtype
#
#
# En esta segunda solución (7.b), tenemos una columna para cada categoría de los valores originales.
#
# Los valores representados son exactamente los mismos que había en la columna original (como en las solución 7.a), pero una de las columnas es redundante porque se puede determinar su valor partiendo de los valores de las otras dos.
#
# Las dos soluciones representan todas la categorias posibles de la variable original.
#
# Observemos que los valores (0,0,0), (0,1,1), (1,1,0), (1,0,1), (1,1) no representan una categoría en la variable original.
| clase_12_DataWrangling/4_desafio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (epi)
# language: python
# name: epi
# ---
# ## Tutorial 3: Choosing hyperparameters for augmented Lagrangian optimization.
#
# Here, we will run the same EPI optimization as in Tutorials 1 and 2, but with different choices of augmented Lagrangian (AL) optimization hyperparameters. The stochastic constrained objective of EPI is optimized using an AL methodology elaborated below. The hyperparameters of this optimization have default values, but it is useful to learn how to modify these choices for your application. This tutorial should give an idea of what hyperparameters are suitable for a given EPI distribution, and how to design a hyperparameter search.
#
# Throughout the tutorial, please refer to the API documentation at [https://epi.readthedocs.io/en/latest/API.html](https://epi.readthedocs.io/en/latest/API.html).
#
# First, we'll run the setup code from Tutorials 1 and 2 right up until we invoke the EPI method.
# +
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import numpy as np
import tensorflow as tf
# Mac OS jupyter kernel dies without.
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from epi.models import Model, Parameter
import time
matplotlib.rcParams['axes.labelsize'] = 12
matplotlib.rcParams['axes.titlesize'] = 14
matplotlib.rcParams['axes.spines.right'] = False
matplotlib.rcParams['axes.spines.top'] = False
# 1. Define the model.
lb, ub = -10., 10.
a11 = Parameter("a11", 1, lb=lb, ub=ub)
a12 = Parameter("a12", 1, lb=lb, ub=ub)
a21 = Parameter("a21", 1, lb=lb, ub=ub)
a22 = Parameter("a22", 1, lb=lb, ub=ub)
name = "lds_2D"
params = [a11, a12, a21, a22]
M = Model(name, params)
# 2. Define the emergent property.
def linear2D_eig(a11, a12, a21, a22):
tau = 1.0
c11 = a11 / tau
c12 = a12 / tau
c21 = a21 / tau
c22 = a22 / tau
# Quadratic formula.
real_term = 0.5 * (c11 + c22)
complex_term = 0.5 * tf.sqrt(
tf.complex(tf.square(c11 + c22) - 4.0 * (c11 * c22 - c12 * c21), 0.0)
)
real_lambda = real_term + tf.math.real(complex_term)
imag_lambda = tf.math.imag(complex_term)
T_x = tf.concat(
(
real_lambda,
imag_lambda,
tf.square(real_lambda - 0.),
tf.square(imag_lambda - (2. * np.pi)),
),
axis=1,
)
return T_x
M.set_eps(linear2D_eig)
mu = np.array([0.0, 2 * np.pi, 0.5**2, (0.2 * np.pi)**2])
# -
# ### EPI optimization
#
# The objective of EPI is
#
# $$\underset{q_\theta \in Q}{\mathrm{arg max}} \hspace{.2cm} H(q_\theta(z))$$
#
# $$\text{s.t.} \mathbb{E}_{z \sim q_\theta}[ \mathbb{E}_{x \sim p(x \mid z)}[T(x)]] = \mu.$$
#
# $q_\theta$ is optimized with the following augmented Lagrangian loss function:
#
# $$L(\theta; \eta, c) = -H(q_\theta) + \eta^\top R(\theta) + \frac{c}{2}||R(\theta)||^2$$
#
# where $R(\theta) = \mathbb{E}_{z \sim q_\theta}[ \mathbb{E}_{x \sim p(x \mid z)}[T(x)]] - \mu$ is the vector of constraint violation. This objective is optimized for $\eta_k$ and $c_k$, which are updated for each of the `K` augmented Lagrangian optimization epochs, which proceed for `num_iters` iterations until convergence. Convergence (stochastic constraint adherance) is evaluated using a series of hypothesis tests for each constraint. The details are elaborated below.
#
# #### Update rule for $\eta_k$ and $c_k$
# $\eta_1$ is initialized to the zero vector, and $c_1$ is initialized to `c0`. Following each augmented Lagrangian epoch, they are updated according to a criteria parameterized by hyperparameter $\gamma$ (optional parameter `gamma` of `Model.epi`) and by an amount parameterized by hyperparameter $\beta$ (optional parameter `beta` of `Model.epi`).
#
# **Criteria**:
#
# The p-value ($p$) of $\mathbb{E}[|R(\theta_{k})|] > \gamma \mathbb{E}[|R(\theta_{k-1})|]$ is computed.
#
# **Update**:
#
# Always, $\eta_{k+1} = \eta_k + c_k$.
#
# With probability $1-p$, $c_{k+1} = \beta c_k$.
#
# #### `c0` is very important!
#
# The initialization of $c_1$ to `c0` is the most critical hyperparamter choice of the augmented Lagrangian. It defines the initial tradeoff between constraint satisfaction and entropic growth of EPI, and will only increasingly weight constraint satisfaction during later epochs.
#
# A general rule of thumb for well-behaved EPI optimizations is that an initial period of stable entropic growth should cover the modes of the distribution you wish to uncover. This initial period of stable entropic growth should then be followed by iterative changes at each epoch until the constraints are satisfied.
#
# Let's take a look at the 2D LDS oscillation EPI optimization at various choices of `c0`.
# +
c0s = [1e-5, 1e-4, 1e-3, 1e-2]
q_thetas = []
opt_datas = []
for i in range(len(c0s)):
c0 = c0s[i]
# Run EPI with coupling flow.
print('Running EPI with c0 = %.2E.' % c0)
q_theta, opt_data, _, _ = M.epi(
mu,
c0=c0,
K=20,
stop_early = True,
)
q_thetas.append(q_theta)
opt_datas.append(opt_data)
# -
print(epi_df['c0'])
print(1.e-5 == 0.00001)
# +
epi_df = M.get_epi_df()
epi_df['log10_c0'] = [np.log10(row['AL_hps']['c0']) for i, row in epi_df.iterrows()]
epi_df['Rnorm'] = [np.linalg.norm([row['R%d' % j] for j in range(1,5)])
for i, row in epi_df.iterrows()]
_epi_df = epi_df[epi_df['log10_c0'] < -1]
H_max = -np.log(1/(20.**4))
fig, axs = plt.subplots(1,2,figsize=(14,5))
sns.lineplot(x='iteration', y='H', ax=axs[0], hue='log10_c0', data=_epi_df)
axs[0].set_ylabel(r'$H(\theta)$')
axs[0].set_ylim([-1, 12.5])
axs[0].plot([0, _epi_df['iteration'].max()], [H_max, H_max], 'k--')
sns.lineplot(x='iteration', y='Rnorm', ax=axs[1], hue='log10_c0', data=_epi_df)
axs[1].set_ylabel(r'$|R(\theta)|$')
# -
# The effect of `c0` on the EPI optimization is quite evident from the plots above. Each EPI optimization was run for enough augmented Lagrangian epochs until convergence, with 2,000 iterations per epoch.
#
# We see that low values of `c0` (e.g. `1e-5`) result in fast entropic growth to the uniform distribution at the outset of optimization. (The dashed black line indicates the maximum entropy of a distribution with the given bounds.) Initial entropic growth can be desireable based on the application, however one should expect a greater number of epochs until convergence compared to a greater value of `c0=1`.
#
# However, an important drawback of selecting a high `c0` is that you may miss interesting structure in the learned distribution.
| notebooks/Augmented_Lagrangian.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/maitysuvo19/News-Articles-Classification/blob/main/Real_news_classification_with__Bert.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="D4SEyMZS_S1G"
#basic imports
import os
import pandas as pd
import numpy as np
# + [markdown] id="6ACJP1uPo_7M"
# Preparing the data
# + colab={"base_uri": "https://localhost:8080/"} id="OBSMQHO5A_Qu" outputId="9a16b939-e5ff-47a4-a875-67864f91464c"
# !unzip news.zip -d news
# + id="BF5x5IQgBD2Q"
# Step 1 - Get the file details
directory = []
file = []
title = []
text = []
label = []
datapath = '/content/news/news article'
for dirname, _ , filenames in os.walk(datapath):
#print('Directory: ', dirname)
#print('Subdir: ', dirname.split('/')[-1])
# remove the Readme.txt file
# will not find file in the second iteration so we skip the error
try:
filenames.remove('README.TXT')
except:
pass
for filename in filenames:
directory.append(dirname)
file.append(filename)
label.append(dirname.split('/')[-1])
#print(filename)
fullpathfile = os.path.join(dirname,filename)
with open(fullpathfile, 'r', encoding="utf8", errors='ignore') as infile:
intext = ''
firstline = True
for line in infile:
if firstline:
title.append(line.replace('\n',''))
firstline = False
else:
intext = intext + ' ' + line.replace('\n','')
text.append(intext)
#
# + colab={"base_uri": "https://localhost:8080/"} id="AZmsLFaEBUvl" outputId="3d156872-9a82-43b9-c9ca-6f85c8a5e153"
fulldf = pd.DataFrame(list(zip(directory, file, title, text, label)),
columns =['directory', 'file', 'title', 'text', 'label'])
df = fulldf.filter(['text','label'], axis=1)
print("FullDf : ", fulldf.shape)
print("DF : ", df.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="2thS78tVBcL_" outputId="0740c49c-f241-42ae-8a79-9bfe043f0c99"
df.head(5)
# + [markdown] id="NW9J19pjpPG0"
# Bert
# + id="tTiov7aoBqHo"
#importing a few necessary packages and setting the DATA directory
DATA_DIR="."
import pickle
import tensorflow as tf
import warnings
warnings.filterwarnings("ignore")
# + id="_x5CvcXbBxxa" colab={"base_uri": "https://localhost:8080/"} outputId="f656d671-c420-4311-d55e-18a9a3b2b58b"
# install BERT
# !pip install pytorch_pretrained_bert pytorch-nlp
# BERT imports
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from pytorch_pretrained_bert import BertTokenizer, BertConfig
from pytorch_pretrained_bert import BertAdam, BertForSequenceClassification
from tqdm import tqdm, trange
import pandas as pd
import io
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# specify GPU device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
n_gpu = torch.cuda.device_count()
torch.cuda.get_device_name(0)
# + id="cb2S5BW0C_Y8"
from sklearn.preprocessing import LabelEncoder
#sentiment is positive and negative we need to convert it to 0,1
le = LabelEncoder()
df["label"] = le.fit_transform(df["label"])
# + colab={"base_uri": "https://localhost:8080/"} id="885QJrx6DK3D" outputId="b443c0b7-e391-4cc1-8709-6740679d0b79"
df['label'].value_counts()
# + id="TW57rT0WDT60"
#cleaning the text
from bs4 import BeautifulSoup
import re
def strip(text):
soup = BeautifulSoup(text, "html.parser")
text = re.sub('\[[^]]*\]', '', soup.get_text())
pattern=r"[^a-zA-z0-9\s,']"
text=re.sub(pattern,'',text)
return text
df['text']=df['text'].apply(strip)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="CjDtoUwSDuvm" outputId="ab2ef767-503e-4c51-86db-f7b313bf425e"
df.head()
# + id="59JgQG-RDzZx"
#BERT needs to understand 2 things:
#1) The start and end of each sentiment
# so we declare a special token CLS which tells BERT that its a classification task
sentences = df['text']
sentence = ["[CLS] "+i+" [SEP]" for i in sentences]
# + colab={"base_uri": "https://localhost:8080/", "height": 103} id="W1x2i73hEFH_" outputId="181d99d6-7051-4057-cee2-f8ed0af244aa"
sentence[0]
# + [markdown] id="4M9gZ7VlEPdz"
# We now need to tokenize our text into tokens that correspond to BERT’s vocabulary.
# + colab={"base_uri": "https://localhost:8080/"} id="8Omb5nkNEKHt" outputId="f985a803-fa8b-4bad-f714-34dc09dcebc4"
# Tokenize with BERT tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# Restricting the max size of Tokens to 512(BERT doest accept any more than this)
tokenized_texts = list(map(lambda t: tokenizer.tokenize(t)[:510] , sentence))
print ("Tokenize the first sentence:")
print (tokenized_texts[0])
# + id="puDYAHq-EZY9"
labels = list(df['label'])#storing the labels
# + [markdown] id="OtXk6YDbEpFB"
# We now need to give BERT input ids,ie, a sequence of integers which uniquely identify each input token to its index number.
# + id="NxPn-qOGElcG"
# Set the maximum sequence length.
MAX_LEN = 128
# Pad our input tokens so that everything has a uniform length
input_ids = pad_sequences(list(map(tokenizer.convert_tokens_to_ids, tokenized_texts)),
maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
# + id="9gPSJgHtEzd5"
# Use the BERT tokenizer to convert the tokens to their index numbers in the BERT vocabulary
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
# + [markdown] id="U3tPP95BE63_"
# BERT is a MLM(Masked Language Model). We have to define its mask.
# + id="7XEt_UtmE3sn"
# Create attention masks
attention_masks = []
# Create a mask of 1s for each token followed by 0s for padding
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
# + [markdown] id="v4oFGUWzFDFc"
# Now we need to split the data into train and validation. Convert it to tensors and then create iterator for our data
# + id="wCs3HQ3fE_6S"
# Select a batch size for training.
batch_size = 16
# Use train_test_split to split our data into train and validation sets for training
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids, labels,
random_state=2018, test_size=0.1)
train_masks, validation_masks, _, _ = train_test_split(attention_masks, input_ids,
random_state=2018, test_size=0.1)
# Convert all of our data into torch tensors, the required datatype for our model
train_inputs = torch.tensor(train_inputs)
validation_inputs = torch.tensor(validation_inputs)
train_labels = torch.tensor(train_labels)
validation_labels = torch.tensor(validation_labels)
train_masks = torch.tensor(train_masks)
validation_masks = torch.tensor(validation_masks)
# Create an iterator of our data with torch DataLoader
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
# + colab={"base_uri": "https://localhost:8080/"} id="l7usW3PdFJLt" outputId="642a5943-00e3-4ee2-a95d-86c7916105c7"
#Loading pre trained BERT
model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=5)#binary classification
model.cuda()
# + colab={"base_uri": "https://localhost:8080/", "height": 683} id="9Kowad6vFauv" outputId="5012cd92-fd44-4462-facd-aad46d0013c4"
# BERT fine-tuning parameters
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=2e-5,
warmup=.1)
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
torch.cuda.empty_cache()
# Store our loss and accuracy for plotting
train_loss_set = []
# Number of training epochs
epochs = 4
# BERT training loop
for _ in trange(epochs, desc="Epoch"):
## TRAINING
# Set our model to training mode
model.train()
# Tracking variables
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
# Train the data for one epoch
for step, batch in enumerate(train_dataloader):
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Clear out the gradients (by default they accumulate)
optimizer.zero_grad()
# Forward pass
loss = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
train_loss_set.append(loss.item())
# Backward pass
loss.backward()
# Update parameters and take a step using the computed gradient
optimizer.step()
# Update tracking variables
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print("Train loss: {}".format(tr_loss/nb_tr_steps))
## VALIDATION
# Put model in evaluation mode
model.eval()
# Tracking variables
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions
logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
# plot training performance
plt.figure(figsize=(15,8))
plt.title("Training loss")
plt.xlabel("Batch")
plt.ylabel("Loss")
plt.plot(train_loss_set)
plt.show()
| Real_news_classification_with__Bert.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ___
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4a/Python3-powered_hello-world.svg/1000px-Python3-powered_hello-world.svg.png" width="300px" height="100px" />
#
#
# # <font color= #8A0829> Propedéutico en programación para la Maestría de Ciencia de Datos.</font>
# #### <font color= #2E9AFE> `Lunes, Miércoles y Jueves de 19:00 a 21:00 hrs (Virtual)` </font>
# - <Strong> <NAME> </Strong>
# - <Strong> Año </Strong>: 2020
# - <Strong> Copyright: </Strong> MIT License (Exepto donde se indique lo contrario)
#
# - <Strong> Email: </Strong> <font color="blue"> `<EMAIL>, <EMAIL>` </font>
# ___
# ### `Presentación mía`
# ___
# ### `Presentación de ustedes`
# ___
# ### `Algunas reglas de juego`
#
# - Deben asistir, mínimo, al 80% de las clases para tener derecho a calificación.
# - La nota del propedéutico hace parte de las notas del curso IDI1.
# - La regla más importante de todas (regla de oro): todas las entregas se realizan a través de canvas con los plazos asignados. No se aceptan trabajos fuera de la plataforma ni fuera de plazo. No insistan en este punto, no hay negocio.
# ___
# ### `Horario de asesoría`
#
# Es posible antes de clase: **DEBEN ESCRIBIRME CORREO CON ANTICIPACIÓN PARA COORDINARNOS**
# ___
# ### `Descripción de la asignatura`
#
# La ciencia de datos es una disciplina que exige comprender elementos de diversas áreas de conocimiento que incluyen las matemáticas, probabilidad y programación, además te tener conocimientos específicos del área de donde se desprende la información.
#
# En este contexto, el estudiante que opte por iniciar una especialización en ciencia de datos debe tener bases sólidas para aprovechar al máximo las técnicas modernas del análisis de datos.
#
# El propedéutico en programación revisa estos conocimientos base para lograr que el estudiante pueda insertarse de una mejor manera en un posgrado cuyo objetivo es dar herramientas de frontera en ciencia de datos.
# #### `OBJETIVO GENERAL `
# > <p style='text-align: justify;'> Modelar y resolver problemas algorítmicos mediante el uso de programación estructurada y/u orientada a objetos.</p>
# `Semana 1.` **Conociendo python**
# > En esta semana veremos las habilidades básicas necesarias de Python para trabajar en ciencia de datos. Al final de esta semana habrás aprendido la sintaxis básica de Python, cómo usar funciones y cómo definir nuestras propias funciones, los distintos tipos de variables básicas en Python, estructuras condicionales e iterativas.
#
# 1. Conocer el entorno del lenguaje Python
# - Introducción e instalación de software
# 2. ¿Qué tipos de variables tengo a la mano en Python?
# 3. Estructuras básicas de programación
# 4. Funciones en Python
# `Semana 2.` **¿Qué es la programación orientada a objetos?**
# > En esta semana aprenderás los conceptos básicos de la POO en Python: ¿qué son las clases y cómo se definen en python?, ¿cómo se instancian objetos?, ¿cómo definir y utilizar métodos?
#
# 1. ¿Qué es una clase? ¿qué es un objeto?
# 2. Métodos y atributos
# 3. Herencia
# `Semana 3.` **Manejo de librerías en python: numpy**
# > NumPy, que quiere decir Numerical Python, es una librería que nos permite manejar arreglos multidimensionales (datos, por ejemplo). Esta semana aprenderemos a trabajar con esta librería. Al final de la semana habrás aprendido cómo manejar arreglos multidimensionales, cómo accesar dichos arreglos y modificarlos, y cómo operar y aplicar functiones sobre dichos arreglos.
#
# 1. ¿Qué son las librerías? ¿Porqué las usamos?
# 2. Librería numpy
# 3. Manejo de matrices y vectores con numpy
# 4. Aplicaciones
# `Semana 4.` **Manejo de librerías en python: pandas**
# > No podemos trabajar con datos si no tenemos los datos. En esta semana aprenderemos a utilizar la librería de análisis de datos de Python: Pandas. Al finalizar la semana habrás aprendido a leer y a escribir datos, extraer características importantes de los datos, agrupar y organizar datos, entre otros.
#
# 1. Librería pandas
# 2. ¿Cómo podemos importar datos con pandas?
# 3. Limpieza de datos con pandas
# 4. Aplicaciones
# ### `Evaluación`
#
# - Tareas 30%
# - Quices 30%
# - Examen final 40%
# ### `Bibliografía `
# > ```
# - https://www.python.org/
# - https://www.kaggle.com/learn
# - Python for Scientists by Stewart, <NAME>. ```
# Estos y muchos mas libros los pueden encontrar en la Biblioteca.
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by <NAME>.
# </footer>
| Semana1/Clase0_GuiaPropedeutico.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Avaliaçao de Modelos - Métricas de Classificação - Parte 2
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn import linear_model
from sklearn import model_selection
from sklearn import metrics
from sklearn import preprocessing
import matplotlib.pyplot as plt
X, y = datasets.load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3, stratify=y, random_state=42)
#model = linear_model.LogisticRegression(multi_class='auto', solver='lbfgs')
model = linear_model.LogisticRegression(multi_class='auto', solver='lbfgs', max_iter=130)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# ### Matriz de Confusão
metrics.confusion_matrix(y_test, y_pred)
# ### Classification Report
print(metrics.classification_report(y_test, y_pred))
# ### Estimativa de probabilidade
#
# Probabilidade da amostra para cada classe do modelo.
y_probs = model.predict_proba(X_test)
for i in range(5):
linha = y_probs[i, :]
print(linha.round(4), linha.sum(), y_pred[i])
# ## Métrica Log Loss
#
# Log Loss é o log da função de probabilidade condicional negativa dos rótulos verdadeiros, dadas as probabilidades das predições de um classificador.
#
# - Leva em conta a probabilidade da predição com base em quanto ela varia em relação ao rótulo real.
# - Usa log negativo para facilitar a comparação de resultados entre diferentes modelos.
# - É bastante usado para classificação multi-classe.
# - Penaliza classificações falsas.
# - Usa o valor negativo do log para facilitar a comparação entre o desempenho de diferentes modelos.
# - Valores próximos de zero, significam bom desempenho do modelo.
#
# 
#
# **Log Loss** e **Cross-Entropy** são ligeiramente diferentes dependendo do contexto, mas em aprendizado de máquina ao calcular as taxas de erro entre 0 e 1 elas são equivalentes (mesmos resultados).
#
# ### Regra MinMax
#
# Ao calcular o Log Loss, os valores de probabilidade previstos 0 e 1 são indefinidos. Para evitar esse problema, Log Loss ajusta as probabilidades previstas (p) usando um valor pequeno (eps / epsilon) de acordo com a fórmula a seguir:
#
# $$ max( min(p, 1−10^{−15}), 10^{-15}) $$
#
# ### Log Loss
#
# Para 2 classes (classificação binária):
#
# $$ - ( y \cdot \log{(p)} + (1 - y) \cdot \log{(1 - p)} ) $$
#
# Para mais de 2 classes:
#
# $$ - \sum_{c=1}^{M}{ y_{o,c} \log{(p_{o,c})} } $$
metrics.log_loss([0, 1], [0.25, 0.75])
metrics.log_loss([1, 0], [0.75, 0.25])
metrics.log_loss([0, 1], [0.75, 0.25])
def logloss(y_true, y_prob, eps=1e-15):
p = np.clip(y_prob, eps, 1 - eps)
return -np.log(1 - p)
logloss(0, 0.25)
logloss(0, 0.75)
# ### Calculando para o conjunto de dados iris
metrics.log_loss(y_test, y_probs)
# ## Métrica Curva ROC - Receiver Operating Characteristic
#
# É uma forma de visualizar o desempenho de um classificador binário.
# 
# ### AUC - Area Under the Curve
# 
np.unique(y_test)
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_probs[:, 0], pos_label=0)
print('x', fpr.round(4))
print('y', tpr.round(4))
print('AUC', metrics.auc(fpr, tpr))
plt.plot([0, 1], [0, 1], '--')
plt.plot(fpr, tpr)
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('Curva ROC')
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_probs[:, 1], pos_label=1)
print('x', fpr.round(4))
print('y', tpr.round(4))
print('AUC', metrics.auc(fpr, tpr))
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_probs[:, 2], pos_label=2)
print('x', fpr.round(4))
print('y', tpr.round(4))
print('AUC', metrics.auc(fpr, tpr))
plt.plot([0, 1], [0, 1], '--')
plt.plot(fpr, tpr)
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('Curva ROC')
# ### Outra forma de calcular AUC
y_test_binary_2 = np.where(y_test == 2, 1, 0)
metrics.roc_auc_score(y_test_binary_2, y_probs[:, 2])
| 2019/05-Metricas_de_Avaliacao/6-Metricas_de_Classificacao_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Ed-10/Daa_2021_1/blob/master/2/12/20.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="0A7nsPiW_tQ9"
def fnRecInfinita():
print("Holi")
fnRecInfinita()
# + id="kWREgxgqAQNZ"
fnRecInfinita()
# + colab={"base_uri": "https://localhost:8080/"} id="aC2xfIaAATKh" outputId="6d4e2c2c-3ef9-4990-e0c2-fbaee8197943"
#Esta es una funcion recursiva
def fnRec(x):
if x == 0:
print("Stop")
else:
print(x)
fnRec(x-1)
def main():
print("Inicio del programa")
fnRec(5)
print("Fin del programa")
main()
# + id="OVeIzzVKBmyX" colab={"base_uri": "https://localhost:8080/"} outputId="2e700a9e-a576-42f1-923c-8cd43a37e490"
def printRev(x):
if x > 0:
printRev(x-1)
print(x)
printRev(3)
# + colab={"base_uri": "https://localhost:8080/"} id="63OS8MecIuup" outputId="1efa1b8c-f0b2-4c2b-9bc9-2d7c8201be32"
def fibonacci( n ):
if n == 1 or n == 0:
return n
else:
return(fibonacci(n-1) +fibonacci(n-2) )
print(fibonacci(8))
| 2/12/20.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''tectosaur2'': conda)'
# language: python
# name: python3
# ---
from tectosaur2.nb_config import setup
setup()
import sympy as sp
import numpy as np
import matplotlib.pyplot as plt
from tectosaur2 import integrate_term, refine_surfaces, gauss_rule, tensor_dot
from tectosaur2.mesh import concat_meshes, pts_grid
from tectosaur2.laplace2d import hypersingular
t = sp.var("t")
A = 0.0987
(faultA, faultB) = refine_surfaces(
[
(t, 0.5 * (t - 1), A * (t - 1)),
(t, 0.5 * (t + 1), -A * (t + 1))
],
gauss_rule(8), control_points=np.array([(0, 0, 1, 1.1)])
)
fault = concat_meshes((faultA, faultB))
plt.plot(fault.pts[:,0], fault.pts[:,1], 'k-o')
plt.plot(fault.pts[:,0], fault.pts[:,1], 'k-o')
plt.axis('scaled')
plt.ylim([-1.1, 1.1])
plt.xlim([-1.1,1.1])
plt.show()
# +
fx = fault.pts[:,0]
slip = np.cos(fx * np.pi * 0.5)
plt.plot(fx, slip)
plt.show()
# -
nobs = 200
zoomx = [-3.2, 3.2]
zoomy = [-3.2, 3.2]
xs = np.linspace(*zoomx, nobs)
ys = np.linspace(*zoomy, nobs)
obs_pts = pts_grid(xs, ys)
obsx = obs_pts[:, 0]
obsy = obs_pts[:, 1]
# +
singularities = np.array([(-1, -A), (0,0), (1, -A)])
get_stress, report = integrate_term(
hypersingular, obs_pts, fault, singularities=singularities, return_report=True
)
# -
stress = tensor_dot(get_stress, slip)
for d in range(2):
v2d = stress[:,d].reshape((nobs, nobs))
levels = np.linspace(-1, 1, 21)
cntf = plt.contourf(xs, ys, v2d, levels=levels, extend="both")
plt.contour(
xs,
ys,
v2d,
colors="k",
linestyles="-",
linewidths=0.5,
levels=levels,
extend="both",
)
plt.colorbar(cntf)
plt.show()
# +
phi = np.arctan(0.6)
h = 0
cohesion = 0.5
stress_trace = 0.5
yield_stress = (-stress_trace / 3) * np.sin(phi) + cohesion * np.cos(phi)
sxz = stress[:,0]
syz = stress[:,1]
devS2 = np.sqrt(sxz ** 2 + syz ** 2)
Lam = 1.0
flow_xz = Lam * sxz / devS2
flow_yz = Lam * syz / devS2
flow_xz, flow_yz
# -
yield_stress
v2d = np.abs(stress[:,1].reshape((nobs, nobs))) > yield_stress
levels = np.linspace(-1, 1, 21)
cntf = plt.contourf(xs, ys, v2d, levels=levels, extend="both")
plt.contour(
xs,
ys,
v2d,
colors="k",
linestyles="-",
linewidths=0.5,
levels=levels,
extend="both",
)
plt.colorbar(cntf)
plt.show()
sy = sp.var('s_y')
se_var = sp.var('\\tilde{\\tau}')
sxz, syz = sp.var('s_{xz}, s_{yz}')
exz, eyz = sp.var('e_{xz}, e_{yz}')
h = sp.var('h')
p = sp.var('p')
r = h * p
st = [0,0,sxz,0,0,syz,sxz, syz, 0]
et = [0,0,exz,0,0,eyz,exz, eyz, 0]
trace_st = st[0] + st[4] + st[8]
I = [1,0,0,0,1,0,0,0,1]
dev_st = [st[i] - sp.Rational(1,3) * trace_st * I[i] for i in range(9)]
se = sp.sqrt(sp.Rational(3,2) * sum([dev_st[i] * dev_st[i] for i in range(9)]))
f = se - r - sy
f
dfds = [sp.diff(f, st[i]) if st[i] != 0 else 0 for i in range(9)]
dfds[2].subs(se, se_var)
# +
# Why are these different?!!
# dfds[2]
# dev_st[2] * sp.Rational(3,2) / se
# -
dfdp = sp.diff(f, p)
dfdp
lam, mu = sp.var('lam, mu')
C = [[0 for j in range(9)] for i in range(9)]
for i in range(3):
for j in range(3):
ij = i * 3 + j
for k in range(3):
for l in range(3):
kl = k * 3 + l
C[ij][kl] = lam * int(i==j) * int(k==l) + 2*mu * int(i==k) * int(j==l)
dfds_C_dfds = sum([dfds[i] * sum([C[i][j] * dfds[j] for j in range(9)]) for i in range(9)])
#dfds_C_dfds = dfds_C_dfds.simplify()
dfds_C_dfds = dfds_C_dfds.subs(se, se_var)
dfds_C_de = sum([dfds[i] * sum([C[i][j] * et[j] for j in range(9)]) for i in range(9)])
dfds_C_de = dfds_C_de.subs(se, se_var)
dfdp_dfds = dfdp * sp.sqrt(sp.Rational(2, 3) * sum([dfds[i] * dfds[i] for i in range(9)]))
dfdp_dfds = dfdp_dfds.subs(se, se_var)
dlambda = (dfds_C_de / (dfds_C_dfds + dfdp_dfds)).simplify()
dlambda
dep = [(dlambda * dfds[i]).subs(se, se_var) for i in range(9)]
dep[2].simplify()
n = [2 * sp.diff(f, sxz), 2 * sp.diff(f, syz)]
n
numer = n[0] * mu * exz + n[1] * mu * eyz
denom = (mu * n[0] ** 2 + mu * n[1] ** 2 + h)
dlam = (numer / denom)
dlam
dep = [(dlam * n[0]).factor(), (dlam * n[1]).factor()]
dep[1]
# 1. Increment the slip on the fault.
# 2. Calculate the consequent elastic stress and strain in the volume.
# 3. Total strain = elastic strain + past plastic strain
# 3. Given stress, calculate if F < 0 or F >= 0.
# 4. If F < 0 --> elastic
# 5. If F >= 0 --> plastic
# 6. Calculate $d\lambda_t$
# 7. Calculate stressing rate
# 8. Calculate hardening
| experiments/plasticity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1차원 데이터의 정리
# ## 데이터 중심의 지표
# +
import numpy as np
import pandas as pd
# Jupyter Notebook의 출력을 소수점 이하 3자리로 제한
# %precision 3
# Dataframe의 출력을 소수점 이하 3자리로 제한
pd.set_option('precision', 3)
# -
df = pd.read_csv('../data/ch2_scores_em.csv',
index_col='student number')
# df의 처음 5행을 표시
df.head()
scores = np.array(df['english'])[:10]
scores
scores_df = pd.DataFrame({'score':scores},
index=pd.Index(['A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J'],
name='student'))
scores_df
# ### 평균값
sum(scores) / len(scores)
np.mean(scores)
scores_df.mean()
# ### 중앙값
sorted_scores = np.sort(scores)
sorted_scores
n = len(sorted_scores)
if n % 2 == 0:
m0 = sorted_scores[n//2 - 1]
m1 = sorted_scores[n//2]
median = (m0 + m1) / 2
else:
median = sorted_scores[(n+1)//2 - 1]
median
np.median(scores)
scores_df.median()
# ### 최빈값
pd.Series([1, 1, 1, 2, 2, 3]).mode()
pd.Series([1, 2, 3, 4, 5]).mode()
# ## 데이터의 산포도 지표
# ### 분산과 표준편차
# #### 편차
mean = np.mean(scores)
deviation = scores - mean
deviation
another_scores = [50, 60, 58, 54, 51, 56, 57, 53, 52, 59]
another_mean = np.mean(another_scores)
another_deviation = another_scores - another_mean
another_deviation
np.mean(deviation)
np.mean(another_deviation)
summary_df = scores_df.copy()
summary_df['deviation'] = deviation
summary_df
summary_df.mean()
# #### 분산
np.mean(deviation ** 2)
np.var(scores)
scores_df.var()
summary_df['square of deviation'] = np.square(deviation)
summary_df
summary_df.mean()
# #### 표준편차
np.sqrt(np.var(scores, ddof=0))
np.std(scores, ddof=0)
# ### 범위와 4분위수 범위
# #### 범위
np.max(scores) - np.min(scores)
scores_Q1 = np.percentile(scores, 25)
scores_Q3 = np.percentile(scores, 75)
scores_IQR = scores_Q3 - scores_Q1
scores_IQR
# #### 4분위수 범위
# ### 데이터의 지표 정리
pd.Series(scores).describe()
# ## 데이터의 정규화
# ### 표준화
z = (scores - np.mean(scores)) / np.std(scores)
z
np.mean(z), np.std(z, ddof=0)
# ### 편차값
z = 50 + 10 * (scores - np.mean(scores)) / np.std(scores)
z
scores_df['deviation value'] = z
scores_df
# ## 데이터의 시각화
# 50명의 영어 점수 array
english_scores = np.array(df['english'])
# Series로 변환하여 describe를 표시
pd.Series(english_scores).describe()
# ### 도수분포표
freq, _ = np.histogram(english_scores, bins=10, range=(0, 100))
freq
# 0~10, 10~20, ... 이라는 문자열의 리스트를 작성
freq_class = [f'{i}~{i+10}' for i in range(0, 100, 10)]
# freq_class를 인덱스로 DataFrame을 작성
freq_dist_df = pd.DataFrame({'frequency':freq},
index=pd.Index(freq_class,
name='class'))
freq_dist_df
class_value = [(i+(i+10))//2 for i in range(0, 100, 10)]
class_value
rel_freq = freq / freq.sum()
rel_freq
cum_rel_freq = np.cumsum(rel_freq)
cum_rel_freq
# +
freq_dist_df['class value'] = class_value
freq_dist_df['relative frequency'] = rel_freq
freq_dist_df['cumulative relative frequency'] = cum_rel_freq
freq_dist_df = freq_dist_df[['class value', 'frequency',
'relative frequency', 'cumulative relative frequency']]
freq_dist_df
# -
# #### 최빈값 재검토
freq_dist_df.loc[freq_dist_df['frequency'].idxmax(), 'class value']
# ### 히스토그램
# +
# Matplotlib의 pyplot 모듈을 plt라는 이름으로 임포트
import matplotlib.pyplot as plt
# 그래프가 notebook 위에 표시
# %matplotlib inline
# +
# 캔버스를 생성
# figsize로 가로・세로 크기를 지정
fig = plt.figure(figsize=(10, 6))
# 켄버스 위에 그래프를 그리기 위한 영역을 지정
# 인수는 영역을 1×1개 지정、하나의 영역에 그린다는 것을 의미
ax = fig.add_subplot(111)
# 계급수를 10으로 하여 히스토그램을 그림
freq, _, _ = ax.hist(english_scores, bins=10, range=(0, 100))
# X축에 레이블 부여
ax.set_xlabel('score')
# Y축에 레이블 부여
ax.set_ylabel('person number')
# X축을 0, 10, 20, ..., 100 눈금으로 구분
ax.set_xticks(np.linspace(0, 100, 10+1))
# Y축을 0, 1, 2, ...의 눈금으로 구분
ax.set_yticks(np.arange(0, freq.max()+1))
# 그래프 표시
plt.show()
# +
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
freq, _ , _ = ax.hist(english_scores, bins=25, range=(0, 100))
ax.set_xlabel('score')
ax.set_ylabel('person number')
ax.set_xticks(np.linspace(0, 100, 25+1))
ax.set_yticks(np.arange(0, freq.max()+1))
plt.show()
# +
fig = plt.figure(figsize=(10, 6))
ax1 = fig.add_subplot(111)
# Y축의 스케일이 다른 그래프를 ax1과 동일한 영역에 생성
ax2 = ax1.twinx()
# 상대도수의 히스토그램으로 하기 위해서는, 도수를 데이터의 수로 나눌 필요가 있음
# 이것은 hist의 인수 weight를 지정하면 실현 가능
weights = np.ones_like(english_scores) / len(english_scores)
rel_freq, _, _ = ax1.hist(english_scores, bins=25,
range=(0, 100), weights=weights)
cum_rel_freq = np.cumsum(rel_freq)
class_value = [(i+(i+4))//2 for i in range(0, 100, 4)]
# 꺾은선 그래프를 그림
# 인수 ls를 '--'로 하면 점선이 그려짐
# 인수 marker를 'o'으로 하면 데이터 점을 그람
# 인수 color를 'gray'로 하면 회색으로 지정
ax2.plot(class_value, cum_rel_freq,
ls='--', marker='o', color='gray')
# 꺾은선 그래프의 눈금선을 제거
ax2.grid(visible=False)
ax1.set_xlabel('score')
ax1.set_ylabel('relative frequency')
ax2.set_ylabel('cumulative relative frequency')
ax1.set_xticks(np.linspace(0, 100, 25+1))
plt.show()
# -
# ### 상자 그림
# +
fig = plt.figure(figsize=(5, 6))
ax = fig.add_subplot(111)
ax.boxplot(english_scores, labels=['english'])
plt.show()
# -
| notebook/chap02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 그래프 그리기
# + 데이터 시각화가 필요한 이유
# ### 1. 앤스콤 데이터 집합 불러오기
# 앤스콤 데이터 집합은 seaborn 라이브러리에 포함되어 있습니다.
#
# seaborn 라이브러리의 load_dataset 메스드에 문자열 anscombe을 전달하면 앤스콤 데이터 집합을
#
# 불러올 수 있습니다. 이때 데이터프레임의 열 이름 중 dataset 열이 데이터 그룹을 구분한다는 것을
#
# 알 수 있습니다.
# (먼저 cmd 창을 열고 pip install seaborn 을 해서 다운을 받아줍시다)
# +
import seaborn as sns
anscombe = sns.load_dataset("anscombe")
print(anscombe)
print(type(anscombe))
# -
# ### 2. matplotlib 라이브러리로 그래프 그리기
# 데이터 집합이 준비되었으면 이제 그래프로 시각화를 하면 됩니다.
# 그래프를 그리기 위해 matplotlib 라이브러리를 사용합니다.
# 먼저 그래프를 그리기 위해 matplotlib 라이브러리를 불러오겠습니다.
# %matplotlib notebook
import matplotlib.pyplot as plt
# 3.다음은 asnscombe 데이터프레임의 dataset 열에서 데이터 값이 I인 것만 추출한 것입니다.
# 즉, 첫 번째 데이터 그룹을 추출한 것입니다.
dataset_1 = anscombe[anscombe['dataset'] == 'I']
# 4. 선 그래프는 plot 메서드로 그릴 수 있습니다. plot 메서드에 x, y축 데이터를 전달하면
#
# 선그래프가 나타납니다. 앞에서 준비한 dataset_1의 x, y 열을 전달하세요.
plt.plot(dataset_1['x'], dataset_1['y'])
# 5. plot 메서드는 기본적으로 선으로 그래프를 그립니다.
#
# 만약 점으로 그래프를 그리려면 o를 세 번째 인자로 전달하면 됩니다.
plt.plot(dataset_1['x'], dataset_1['y'], 'o')
# # 앤스콤 데이터 집합 모두 사용해 그래프 만들기
# 앤스콤 데이터 집합은 4개의 데이터 그룹으로 구성되어 있으며 각 데이터 그룹의 차이를 파악하려면 그래프로 시각화해야 한다고 했습니다. 이번에는 모든 데이터 그룹에 대하여 그래프를 그려보겠습니다.
# ### matplotlib 라이브러리로 그래프 그리기
# matplotlib 라이브러리로 그래프를 그리는 방법 입니다. 아래에 정리한 과정을 따라해 보세요.
# 1. 전체 그래프가 위치할 기본 틀을 만듭니다.
# 2. 그래프를 그려 넣을 그래프 격자를 만듭니다.
# 3. 그런 다음 격자에 그래프를 하나씩 추가합니다. 격자에 그래프가 추가되는 순서는 왼쪽에서 오른쪽 방향 힙니다.
# 4. 만약 격자의 첫 번째 행이 꽉 차면 두 번째 행에 그래프를 그려 넣습니다.
# ## 한 번에 4개의 그래프 그리기
# 1. 앤스콤 데이터프레임의 dataset 열의 값이 I, II, III, IV인 것을 불린 추출하여 dataset_1, 2, 3, 4에 저장합니다.
dataset_2 = anscombe[anscombe['dataset'] == 'II']
dataset_3 = anscombe[anscombe['dataset'] == 'III']
dataset_4 = anscombe[anscombe['dataset'] == 'IV']
# 2. 먼저 그래프 격자가 위치할 기본 틀을 만듭니다.
fig = plt.figure()
# 3.그런 다음 add_subplot 메서드로 그래프 격자를 그립니다. 기본 틀(fig)에 격자를 추가 한다는 기분으로 add_subplot 메서드를 사용하면 됩니다. add_subplot의 첫 번째 인자에는 그래프 기본 틀의 행 크기를, 두 번째 인자에는 그래프 기본 틀의 열 크기를 지정합니다.
axes1 = fig.add_subplot(2, 2, 1)
axes2 = fig.add_subplot(2, 2, 2)
axes3 = fig.add_subplot(2, 2, 3)
axes4 = fig.add_subplot(2, 2, 4)
# 4.이제 plot 메서드에 데이터를 전달하여 그래프를 그리면 됩니다. 이번에는 점으로 그래프를 표현했습니다.
# 그래프를 확인하려면 fig를 받드시 입력해야 합니다.
# +
axes1.plot(dataset_1['x'], dataset_1['y'], 'o')
axes2.plot(dataset_2['x'], dataset_1['y'], 'o')
axes3.plot(dataset_3['x'], dataset_1['y'], 'o')
axes4.plot(dataset_4['x'], dataset_1['y'], 'o')
fig
# -
# 5. 각각의 그래프를 쉽게 구분할 수 있도록 그래프 격자에 제목을 추가해 볼까요? set_title 메서드로 그래프 이름을 전달하면 그래프 격자에 제목이 추가됩니다.
# +
axes1.set_title("dataset_1")
axes2.set_title("dataset_2")
axes3.set_title("dataset_3")
axes4.set_title("dataset_4")
fig
# -
# 6. 기본 틀(fig)에도 제목을 추가해 봅시다 기본 틀에 제목을 추가하려면 suptitle 메서드를 사용하면 됩니다.
# +
fig.suptitle("Anscombe Data")
fig
# -
# 7. 그런데 과정 5,6의 그래프를 보면 각 그래프의 이름과 숫자가 겹쳐 보입니다. 이런 경우에는 tight_layout 메서드를 호출하여 각 그래프의 레이아웃을 조절할 수 있습니다.
# +
fig.tight_layout()
fig
# -
# 평균, 분산, 상관관계, 회귀선의 통계 수치가 같아도 그래프의 형태는 다를수 있습니다.
# 앤스콤 4분할 그래프는 데이터 분석 시 수치에만 의존하면 잘못된 판단을 할 수 있다는 것을 알려주는 좋은 예시 입니다.
| pandas_04_1(Draw a graph).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Question 1
# <img src="images/lec9_quiz01.png">
#
# *Screenshot taken from [Coursera](https://www.coursera.org/learn/machine-learning/exam/3UQTb/neural-networks-learning)*
#
# <!--TEASER_END-->
# # Question 2
# <img src="images/lec9_quiz02.png">
#
# *Screenshot taken from [Coursera](https://www.coursera.org/learn/machine-learning/exam/3UQTb/neural-networks-learning)*
#
# <!--TEASER_END-->
# # Question 3
# <img src="images/lec9_quiz03.png">
# <img src="images/lec9_quiz03-02.png">
#
# *Screenshot taken from [Coursera](https://www.coursera.org/learn/machine-learning/exam/3UQTb/neural-networks-learning)*
#
# <!--TEASER_END-->
# **Answer**
#
# - $J(\theta) = 3 \theta^3 + 2$
# - $\theta=1, \epsilon = 0.01$
# - $\large \frac{J(\theta + \epsilon) - J(\theta - \epsilon)}{2\epsilon}$
# = $\large\frac{(3 \times (1 + 0.01)^3 + 2) - (3 \times (1 - 0.01)^3 + 2)}{2\times 0.01}$
# = $\large\frac{(3\times(1.030301) + 2) - (3 \times (0.970299) + 2)}{0.02}$
# = $\large\frac{5.090903 - 4.910897}{0.02}$
# = $9.0003$
#
#
# # Question 4
# <img src="images/lec9_quiz04-01.png">
# <img src="images/lec9_quiz04-02.png">
#
# *Screenshot taken from [Coursera](https://www.coursera.org/learn/machine-learning/exam/3UQTb/neural-networks-learning)*
#
# <!--TEASER_END-->
# # Question 5
# <img src="images/lec9_quiz05-01.png">
# <img src="images/lec9_quiz05-02.png">
#
# *Screenshot taken from [Coursera](https://www.coursera.org/learn/machine-learning/exam/3UQTb/neural-networks-learning)*
#
# <!--TEASER_END-->
| machine_learning/lecture/week_5/ix_neural_networks_learning/.ipynb_checkpoints/quiz - Neural Networks-Learning-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# # CONTENT
# + [markdown] _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
# 1. Introduction and hypotheses
#
# 2. Data
#
# 2.1. Data loading and transformation
#
# 2.2. Data outliers
#
# 2.3. Missing values
#
# 3. Features engineering
#
# 3.1. Hypotheses
#
# 3.2. Hypotheses testing
#
# 4. Modeling
#
# 5. Conclusion
# + [markdown] _uuid="4fd583f3583c8eb8312c8bbffaf76a36e2e630a4"
# ***
# + [markdown] _uuid="ce0af92f469a2382f9317cf8e2b4e40222ce147e"
# # 1. Introduction and hypotheses
# + [markdown] _uuid="50a42f45516c31a5ef02427afc8646c9bdb9a8bf"
# To begin with we load 2 essential packages for data loading, transformation, grouping, calculations etc.
# Pandas package depends on NumPy.
# + _uuid="afeeb70a91a7c1349f7f15a5040aabd6a06d5a79"
import numpy as np
import pandas as pd
# + [markdown] _uuid="97bb071818ba064f1e5de51f22d2e6fea27a560f"
# Then we load packages to carry out explorations and actual modeling
# + _uuid="38832d19944018cabe2a5fd2a3e56b8d0ac9175d"
# Logit models (Logistic Regression)
from sklearn.linear_model import LogisticRegression
# + [markdown] _uuid="cfa0fa7e00cea762d6cd04d482304523292e3342"
# ***
# + [markdown] _uuid="6d6b4f4d573c4dd386ebbc49457a9e693ad9c92e"
# # 2. Data
# ## 2.1 Data loading and transformation
#
# Load the data:
# + _uuid="3c5aefd81c129744e5d2f480520b256633e90b57"
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
# + _uuid="79ed76585d1030f95111c5d78e09d51afd4d32ed"
#tests2
#check the first rows of train data set (to check if the import was Ok)
train.head()
# + _uuid="cb1b1b8ecc807c8a6e426bcc17106c681c640993"
#check the first rows of test data set (to check if the import was Ok)
test.head()
# + _uuid="88659212f0fb13d2842762afd69d34c9d34ef221"
#train.loc[train['Sex'] == 'male'][:3]
#train_embarked = train['Embarked']
#train.loc[train['Embarked'].isnull()]
#train.loc[train['Cabin'].isnull()]
#if train['Embarked'].
# + [markdown] _uuid="40f5c60fb2950a4b9ae4f41da6ce6a6b4eb77333"
# Reclassify all the literal data (except Name; NaN - is not literal, it is Null-value) to numeric to use it in regression models:
# + _uuid="7fef40822a01eb023897e1cd5633a7e3f44258b8"
# Train data set
train.loc[train['Sex'] == 'male', 'Sex'] = 1
train.loc[train['Sex'] == 'female', 'Sex'] = 0
train.loc[train['Embarked'] == 'S', 'Embarked'] = 0
train.loc[train['Embarked'] == 'C', 'Embarked'] = 0
train.loc[train['Embarked'] == 'Q', 'Embarked'] = 0
# Test data set
test.loc[test['Sex'] == 'male', 'Sex'] = 1
test.loc[test['Sex'] == 'female', 'Sex'] = 0
test.loc[test['Embarked'] == 'S', 'Embarked'] = 0
test.loc[test['Embarked'] == 'C', 'Embarked'] = 0
test.loc[test['Embarked'] == 'Q', 'Embarked'] = 0
# + [markdown] _uuid="8f221b96969aa90b9c4b418aff0e7140131bb5ad"
# ## 2.2. Data outliers
# + [markdown] _uuid="40fa72512a8af0b2d88618959f557c1a42156a17"
# ## 2.3. Missing values
# + [markdown] _uuid="1c9aec074b3f40c6e0bfa4d4173f6777f61cb27a"
# ### Exploring empty data (missing values or NaN)
# ** - Age**
#
# Check the passengers with the empty age.
# + _uuid="c4840be301dc16321a32ce017b867801fd6e72c5"
#age_nan = train.loc[train.Age.isnull()]
#age_nan[:10]
# + [markdown] _uuid="ec49b1dc6b4622092bd8f3694b2901a3bf50e0bf"
# **Cabins**
#
# Analysis of empty cabins vs passenger class
# + _uuid="0f2929ef3d69313c593cd25f8524c172d2d45ee1"
#cabin_1 = train.loc[train.Cabin.notnull()]
#cabin_1.loc[:,['Pclass', 'Cabin']]
# + [markdown] _uuid="b9015b8627d6244e4ea4061b59eca5ced6a484d9"
# Fill the missing values with our assumptions
# + _uuid="6d2c53ce54ff06445eda285aa23396774a871f98"
# Train data set
train['Age'] = train['Age'].fillna(train['Age'].median())
#train['Embarked'] = train['Embarked'].fillna(2)
train['Embarked'] = train['Embarked'].fillna(1)
train['Fare'] = train['Fare'].fillna(train['Fare'].mean())
# Test data set
test['Age'] = test['Age'].fillna(train['Age'].median())
#test['Embarked'] = test['Embarked'].fillna(2)
test['Embarked'] = test['Embarked'].fillna(1)
test['Fare'] = test['Fare'].fillna(test['Fare'].mean())
# + [markdown] _uuid="3f8a1a5194ba057ae7bfcba1534a2df4fb754197"
# ***
# + [markdown] _uuid="bb96c927c2d58da16cd39f825037f89e9d328fc7"
# # 3. Features engineering
# + [markdown] _uuid="7c6d5fa316d8b4023ccb6a4d936073bfc1739833"
# ## 3.1. Hypotheses
#
# - Possible predictors:
# * Pclass
# * Sex
# * Age
# * SibSp
# * Parch
# * Fare
# * Embarked
# - Pclass and Fare probably correlates with each other (so we shouldn't use these variables together or we will need to substitute these two variables with one derivative variable)
# - If a passenger not Embarked (NaN for Embarked) than he could survive
# - Since the catastrophe occured in the midnight than the closer Cabin to boats / exits the higher chance to survive
#
# + [markdown] _uuid="549ca0e19444a5b45c6a09384e4c4e2c8eca863c"
# ## 3.2. Hypotheses testing
# + [markdown] _uuid="adcbd86dfabe1960866c44214c2d31e2f725c921"
# ***
# + [markdown] _uuid="81745dfdcaf20f58c68737a64006d0b24a768695"
# # 4. Modeling
# + _uuid="d87c99b87487c0b48f76d1131df603aae662b8d5"
predictors = ["Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
# + _uuid="6419c03d8f7b491a5374dc5b2de394ff01eacc44"
lr = LogisticRegression(random_state=1)
lr.fit(train[predictors], train["Survived"])
predictions = lr.predict(test[predictors])
#accuracy_train = round(lr.score(train[predictors], train["Survived"]) * 100, 2)
#accuracy_test = round(lr.score(test[predictors], test["Survived"]) * 100, 2)
#print(accuracy_train)
#print(accuracy_test)
# + [markdown] _uuid="2e9a77b923b0841c809bfee32f1d6b679e430efe"
# ***
# + [markdown] _uuid="ed4f314f278490622612f1e01428d4d6622f6539"
# # 5. Conclusion
# + [markdown] _uuid="065ffffd593d0f884ed065e641c56477f006471f"
# ## Submission
# + [markdown] _uuid="1008fe3ad091e1ebdd058adb160b986a608a21c7"
# We need to put our csv-file with the results into Kaggle /input directory like so:
# + _uuid="7151936789a2541afc044411acaa7aa54e1fe87c"
submission = pd.DataFrame({
"PassengerId": test["PassengerId"],
"Survived": predictions
})
submission.to_csv("submit.csv", index=False)
# + [markdown] _uuid="b432306265f3ccea1d7b8ddcba5a7765d3e52de0"
#
| src/logistic-regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Core and Satellite
# https://www.investopedia.com/articles/financial-theory/08/core-satellite-investing.asp
# Portfolio Construction
# Managed passively
# Actively managed
# High-yield bond
# + outputHidden=false inputHidden=false
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import math
import warnings
warnings.filterwarnings("ignore")
# yfinance is used to fetch data
import yfinance as yf
yf.pdr_override()
# -
# S&P 500 Index Fund
# Actively Managed High-Yield Bond Fund
# Actively Managed Biotechnology Fund
# Actively Managed Commodities Fund
# + outputHidden=false inputHidden=false
# input
symbols = ['SPY','FIHBX','FBTAX','DBC']
start = '2014-01-01'
end = '2019-01-01'
# Read data
dataset = yf.download(symbols,start,end)['Adj Close']
# View Columns
dataset.head()
# + outputHidden=false inputHidden=false
dataset.tail()
# + outputHidden=false inputHidden=false
from datetime import datetime
def calculate_years(start, end):
date_format = "%Y-%m-%d"
a = datetime.strptime(start, date_format).year
b = datetime.strptime(end, date_format).year
years = b - a
return years
# + outputHidden=false inputHidden=false
print(calculate_years(start, end), 'years')
# + outputHidden=false inputHidden=false
# Calculate Daily Returns
returns = dataset.pct_change()
returns = returns.dropna()
# + outputHidden=false inputHidden=false
# Calculate mean returns
meanDailyReturns = returns.mean()
print(meanDailyReturns)
# + outputHidden=false inputHidden=false
# Calculate std returns
stdDailyReturns = returns.std()
print(stdDailyReturns)
# + outputHidden=false inputHidden=false
# Define weights for the portfolio
weights = np.array([0.50, 0.10, 0.20, 0.20])
# + outputHidden=false inputHidden=false
# Calculate the covariance matrix on daily returns
cov_matrix = (returns.cov())*250
print (cov_matrix)
# + outputHidden=false inputHidden=false
# Calculate expected portfolio performance
portReturn = np.sum(meanDailyReturns*weights)
# + outputHidden=false inputHidden=false
# Print the portfolio return
print(portReturn)
# + outputHidden=false inputHidden=false
# Create portfolio returns column
returns['Portfolio'] = returns.dot(weights)
# + outputHidden=false inputHidden=false
returns.head()
# + outputHidden=false inputHidden=false
returns.tail()
# + outputHidden=false inputHidden=false
# Calculate cumulative returns
daily_cum_ret=(1+returns).cumprod()
print(daily_cum_ret.tail())
# + outputHidden=false inputHidden=false
returns['Portfolio'].hist()
plt.show()
# + outputHidden=false inputHidden=false
import matplotlib.dates
# Plot the portfolio cumulative returns only
fig, ax = plt.subplots()
ax.plot(daily_cum_ret.index, daily_cum_ret.Portfolio, color='purple', label="portfolio")
ax.xaxis.set_major_locator(matplotlib.dates.YearLocator())
plt.legend()
plt.show()
# + outputHidden=false inputHidden=false
# Print the mean
print("mean : ", returns['Portfolio'].mean()*100)
# Print the standard deviation
print("Std. dev: ", returns['Portfolio'].std()*100)
# Print the skewness
print("skew: ", returns['Portfolio'].skew())
# Print the kurtosis
print("kurt: ", returns['Portfolio'].kurtosis())
# + outputHidden=false inputHidden=false
# Calculate the standard deviation by taking the square root
port_standard_dev = np.sqrt(np.dot(weights.T, np.dot(weights, cov_matrix)))
# Print the results
print(str(np.round(port_standard_dev, 4) * 100) + '%')
# + outputHidden=false inputHidden=false
# Calculate the portfolio variance
port_variance = np.dot(weights.T, np.dot(cov_matrix, weights))
# Print the result
print(str(np.round(port_variance, 4) * 100) + '%')
# + outputHidden=false inputHidden=false
# Calculate total return and annualized return from price data
total_return = (returns['Portfolio'][-1] - returns['Portfolio'][0]) / returns['Portfolio'][0]
# Annualize the total return over 5 year
annualized_return = ((1+total_return)**(1/5))-1
# + outputHidden=false inputHidden=false
# Calculate annualized volatility from the standard deviation
vol_port = returns['Portfolio'].std() * np.sqrt(250)
# + outputHidden=false inputHidden=false
# Calculate the Sharpe ratio
rf = 0.01
sharpe_ratio = ((annualized_return - rf) / vol_port)
print(sharpe_ratio)
# + outputHidden=false inputHidden=false
# Create a downside return column with the negative returns only
target = 0
downside_returns = returns.loc[returns['Portfolio'] < target]
# Calculate expected return and std dev of downside
expected_return = returns['Portfolio'].mean()
down_stdev = downside_returns.std()
# Calculate the sortino ratio
rf = 0.01
sortino_ratio = (expected_return - rf)/down_stdev
# Print the results
print("Expected return: ", expected_return*100)
print('-' * 50)
print("Downside risk:")
print(down_stdev*100)
print('-' * 50)
print("Sortino ratio:")
print(sortino_ratio)
# + outputHidden=false inputHidden=false
# Calculate the max value
roll_max = returns['Portfolio'].rolling(center=False,min_periods=1,window=252).max()
# Calculate the daily draw-down relative to the max
daily_draw_down = returns['Portfolio']/roll_max - 1.0
# Calculate the minimum (negative) daily draw-down
max_daily_draw_down = daily_draw_down.rolling(center=False,min_periods=1,window=252).min()
# Plot the results
plt.figure(figsize=(15,15))
plt.plot(returns.index, daily_draw_down, label='Daily drawdown')
plt.plot(returns.index, max_daily_draw_down, label='Maximum daily drawdown in time-window')
plt.legend()
plt.show()
# + outputHidden=false inputHidden=false
plt.figure(figsize=(7,7))
corr = returns.corr()
# plot the heatmap
sns.heatmap(corr,
xticklabels=corr.columns,
yticklabels=corr.columns,
cmap="Blues")
# + outputHidden=false inputHidden=false
# Box plot
returns.plot(kind='box')
# + outputHidden=false inputHidden=false
rets = returns.dropna()
plt.scatter(rets.mean(), rets.std(),alpha = 0.5)
plt.title('Stocks Risk & Returns')
plt.xlabel('Expected returns')
plt.ylabel('Risk')
plt.grid(which='major')
for label, x, y in zip(rets.columns, rets.mean(), rets.std()):
plt.annotate(
label,
xy = (x, y), xytext = (50, 50),
textcoords = 'offset points', ha = 'right', va = 'bottom',
arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3,rad=-0.3'))
# + outputHidden=false inputHidden=false
area = np.pi*20.0
sns.set(style='darkgrid')
plt.figure(figsize=(12,8))
plt.scatter(rets.mean(), rets.std(), s=area)
plt.xlabel("Expected Return", fontsize=15)
plt.ylabel("Risk", fontsize=15)
plt.title("Return vs. Risk for Core and Satellite", fontsize=20)
for label, x, y in zip(rets.columns, rets.mean(), rets.std()) :
plt.annotate(label, xy=(x,y), xytext=(50, 0), textcoords='offset points',
arrowprops=dict(arrowstyle='-', connectionstyle='bar,angle=180,fraction=-0.2'),
bbox=dict(boxstyle="round", fc="w"))
# + outputHidden=false inputHidden=false
print("Stock returns: ")
print(rets.mean())
print('-' * 50)
print("Stock risk:")
print(rets.std())
# + outputHidden=false inputHidden=false
table = pd.DataFrame()
table['Returns'] = rets.mean()
table['Risk'] = rets.std()
table.sort_values(by='Returns')
# + outputHidden=false inputHidden=false
table.sort_values(by='Risk')
# + outputHidden=false inputHidden=false
rf = 0.01
table['Sharpe_Ratio'] = (table['Returns'] - rf) / table['Risk']
table
# + outputHidden=false inputHidden=false
days_per_year = 52 * 5
total_days_in_simulation = dataset.shape[0]
number_of_years = total_days_in_simulation / days_per_year
# + outputHidden=false inputHidden=false
total_relative_returns = (np.exp(returns['Portfolio'].cumsum()) - 1)
total_portfolio_return = total_relative_returns[-1]
# Average portfolio return assuming compunding of returns
average_yearly_return = (1 + total_portfolio_return)**(1 / number_of_years) - 1
# + outputHidden=false inputHidden=false
print('Total portfolio return is: ' +
'{:5.2f}'.format(100 * total_portfolio_return) + '%')
print('Average yearly return is: ' +
'{:5.2f}'.format(100 * average_yearly_return) + '%')
| Python_Stock/Portfolio_Strategies/Core_and_Satellite.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Deploy Model
# ## Init Model
# _Note: TensorFlow only supports numeric versions._
#
# _In addition, the version you define here must be LARGER than any other version known to the TensorFlow Serving instance to which you are deploying._
# + language="bash"
#
# pio init-model \
# --model-server-url http://prediction-tensorflow.community.pipeline.io/ \
# --model-type tensorflow \
# --model-namespace default \
# --model-name tensorflow_linear \
# --model-version 1 \
# --model-path .
# -
# ## Deploy Model (CLI)
# + language="bash"
#
# pio deploy
# -
# ## TODO: Deploy Model (REST)
| jupyterhub/notebooks/tensorflow/tensorflow_linear/03_DeployModel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Grids
#
# +
import matplotlib as plt
# %matplotlib inline
import seaborn as sns
iris = sns.load_dataset('iris')
# -
iris.head()
g = sns.pairplot(iris)
g = sns.PairGrid(iris)
g.map_diag(sns.distplot)
g.map_upper(sns.scatterplot)
g.map_lower(sns.kdeplot)
tips = sns.load_dataset('tips')
tips.head()
t = sns.FacetGrid(data=tips, row="time", col='smoker')
t.map(sns.distplot, 'total_bill')
| Data Visualization/seaborn/Grids.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from glob import glob
from nltools.data import Brain_Data, Adjacency
from nltools.mask import expand_mask, collapse_mask
import pickle
import hypertools as hyp
import numpy as np
from scipy.spatial.distance import pdist, cdist,squareform
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.cluster import KMeans
import seaborn as sns
from scipy.stats import pearsonr, spearmanr
from nltools.plotting import plotBrain
from nltools.stats import threshold
# %matplotlib inline
base_dir = '/mnt'
data_dir = os.path.join(base_dir,'data')
roi_dir = os.path.join(data_dir,'rois')
mask_dir = os.path.join(base_dir,'masks')
mni_mask = os.path.join(mask_dir,'MNI152_T1_3mm_brain_mask.nii.gz')
rois = glob(os.path.join(mask_dir,'k50*'))[0]
social_features = pd.read_csv(os.path.join(data_dir,'social_features_with_pca.csv'))
scene_data = pd.read_excel(os.path.join(data_dir,'Sherlock_Movie_Scenes_only.xlsx'))
#scene_data_full = pd.concat([scene_data[scene_data.columns[:-1]],social_features[social_features.columns[1:]]],axis=1)
#scene_data_full.to_csv(os.path.join(data_dir,'scene_data_complexity.csv'),index=False)
scene_data_full = pd.read_csv(os.path.join(data_dir,'scene_data_complexity.csv'),index=False)
# +
#Jeremy's funcs not currently used
def io_ratio(observations, group_labels):
group_labels = np.array(group_labels)
unique_groups = np.unique(group_labels)
in_group = 0.0
out_group = 0.0
for g in unique_groups:
if np.sum(group_labels == g) == 0:
continue
in_obs = np.array(observations[group_labels == g, :], ndmin=2)
out_obs = np.array(observations[group_labels != g, :], ndmin=2)
in_group += np.mean(pdist(in_obs))
out_group += np.mean(cdist(in_obs, out_obs))
return in_group / out_group
def dynamic_kmeans_old(observations, minK, maxK, n, statfun=io_ratio):
'''
observations: T by V numpy array of observations
minK, maxK: minimum and maximum values of K to use
n: number of timepoints
statfun: takes in an observations matrix and cluster labels; returns a clustering stat
'''
stats = np.zeros([observations.shape[0] - n + 1, maxK - minK])
for t in np.arange(stats.shape[0]):
next_obs = observations[t:(t+n), :]
stats[t, :] = np.array(list(map(lambda k: statfun(next_obs.T, hyp.tools.cluster(next_obs.T, n_clusters=k)),
np.arange(minK, maxK))))
return stats
# -
#Load in full ROI time series
sub_list = sorted(glob(os.path.join(roi_dir,'*ROI_0*')))
data = np.array([np.load(sub) for sub in sub_list])
#Compute subject-spatial similarity across time in ROI in case we need it
roi_corrs = Adjacency(np.array([pdist(data[:,i,:],metric='correlation') for i in range(data.shape[1])]))
#Compute scene-level average pattern for each subject
avg_scene_response = []
for i, row in scene_data_full.iterrows():
avg_scene_response.append(data[:,row['Start Time (TRs, 1.5s)']:row['End Time (TRs, 1.5s)']+1,:].mean(axis=1))
avg_scene_response = np.array(scene_data)
def dynamic_kmeans(dat,kmin,kmax):
'''
Computer within cluster:between cluster SS ratio for a range of cluster sizes.
Args:
dat: obs X features matrix (e.g. subs X voxels)
kmin: min cluster size
kmax: max cluster size
Outputs:
gradient: average gradient of the change in wss/bss ratio over k
ratio: array of wss/bss ratios
labels: kmax-kmin X obs matrix of cluster label assigments
'''
k_range = range(kmax,kmin,-1)
k_fits = [KMeans(n_clusters=k).fit(dat) for k in k_range]
labels = np.array([k.labels_ for k in k_fits])
centroids = [k.cluster_centers_ for k in k_fits]
dist_to_centroid = [cdist(dat, cent, 'euclidean') for cent in centroids]
dist = [np.min(d,axis=1) for d in dist_to_centroid]
#Not sure if we need this
avg_within_ss = [sum(d)/dat.shape[0] for d in dist]
# Total with-in sum of square
wss = [sum(d**2) for d in dist]
tss = sum(pdist(dat)**2)/dat.shape[0]
bss = tss-wss
ratio = wss/bss
#Gradient of the change in ratio as a function of k
gradient = np.gradient(ratio)
return gradient, ratio, labels,
#Roi data for 1 scene
dat = avg_scene_response[:,0,:]
kmin, kmax = 2,16
gradient, ratio, labels = dynamic_kmeans(dat,kmin,kmax)
plt.plot(ratio,label='Ratio');
plt.plot(gradient,label='Gradient');
plt.legend();
plt.title("Scene 1 dACC ROI");
kmin, kmax = 2,8
roi_gradients = []
for scene in range(avg_scene_response.shape[0]):
gradient, _, _ = dynamic_kmeans(avg_scene_response[scene,:,:],kmin,kmax)
grad_var = gradient.var()
roi_gradients.append(grad_var)
roi_gradients = np.array(roi_gradients)
# +
f,axs = plt.subplots(1,1,figsize=(14,6));
axs.plot(np.log(roi_gradients),color=sns.color_palette()[0],label='Log Variance of Cluster Size Gradient');
axs.plot(scene_data_full['component'].values, color = sns.color_palette()[1], label = 'Social Complexity');
axs.set(xlabel='Scene Number',ylabel='Value',title='dACC ROI');
plt.legend()
# -
#Run it for the whole brain correlating the entire scene-wise time-series
whole_brain_similarity = pd.DataFrame()
for roi in range(50):
print("Analyzing ROI %s" % roi)
#Load in full ROI time series
sub_list = sorted(glob(os.path.join(roi_dir,'*ROI_'+str(roi)+'.npy')))
data = np.array([np.load(sub) for sub in sub_list])
#Compute scene-level average pattern for each subject
avg_scene_response = []
for i, row in scene_data_full.iterrows():
avg_scene_response.append(data[:,row['Start Time (TRs, 1.5s)']:row['End Time (TRs, 1.5s)']+1,:].mean(axis=1))
avg_scene_response = np.array(scene_data)
#Compute all scene gradients
kmin, kmax = 2,8
roi_gradients = []
for scene in range(avg_scene_response.shape[0]):
gradient, _, _ = dynamic_kmeans(avg_scene_response[scene,:,:],kmin,kmax)
grad_var = gradient.var()
roi_gradients.append(grad_var)
roi_gradients = np.array(roi_gradients)
#Compute rank correlation
r,p = spearmanr(np.log(roi_gradients),scene_data_full['component'].values)
df = pd.DataFrame({'ROI': roi, 'Log_Gradient':np.log(roi_gradients),
'Social_Complexity':scene_data_full['component'].values,
'P-val': p,
'Corr': r,
'Scene': list(range(1,51))
})
whole_brain_similarity = whole_brain_similarity.append(df,ignore_index=True)
whole_brain_similarity.to_csv(os.path.join(data_dir,'whole_brain_similarity.csv'),index=False)
# +
parcels = Brain_Data(rois,mask=mni_mask)
expanded_parcels = expand_mask(parcels)
corr_map = expanded_parcels.copy()
corrs = whole_brain_similarity.groupby('ROI')['Corr'].mean().values
corr_map.data = np.array([(x.data*y) for (x,y) in zip(expanded_parcels,corrs)])
corr_map = corr_map.sum()
p_map = expanded_parcels.copy()
ps = whole_brain_similarity.groupby('ROI')['P-val'].mean().values
p_map.data = np.array([(x.data*y) for (x,y) in zip(expanded_parcels,ps)])
p_map = p_map.sum()
#Completely arbitrary threshold
thresh_map = threshold(corr_map,p_map,thr=.2)
# -
plotBrain(thresh_map)
#Try ward clustering cause we get distances for a bunch of diff clusters
linkage_mat = linkage(roi_corrs[0].data,method='ward')
dendrogram(linkage_mat,leaf_rotation=90);
_,ax = plt.subplots(1,1);
ax.plot(linkage_mat[:,3],linkage_mat[:,2]);
ax.set(ylabel='Ward Distance',xlabel='Num in Group');
labels = hyp.tools.cluster(dat,n_clusters=5)
hyp.plot(dat,'o',group=labels);
adj = Adjacency(roi_corrs)
adj.plot()
sns.heatmap(squareform(1-pdist(data[:,0,:],metric='correlation')))
| notebooks/ISC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import BAC0
bacnet = BAC0.connect()
a = bacnet.whois()
# a = bacnet.devices
a
mycontroller = BAC0.device(a[0][0],100,bacnet)
mycontroller.properties
s = mycontroller.points
bacnet.write('20:0x640000000000 analogValue 1 presentValue 500 - 8')
# +
# Define your own list
my_obj_list = [('file', 1),('analogInput', 2),('analogInput', 3),('analogInput', 5),('analogInput', 4),('analogInput', 0),('analogInput', 1)]
# Provide it as an argument
fx = BAC0.device('2:5',5,bacnet, object_list = my_obj_list)
# -
# bacnet.whois()
# bacnet.devices
# fx.properties
bacnet.registered_devices
import configparser
config = configparser.ConfigParser()
config.read('command.ini')
readparam = config['Device']
readparam = dict(config['Device'])
x = len(readparam)
x
# +
# read=[]
device0 = 0
device1 = 0
device2 = 0
device3 = 0
device4 = 0
device5 = 0
for i in range(x):
if i==0 :
device0 = config['Device']['Device'+str(i)]
device0 = device0.split(',')
device0address = device0[0]
device0ID = device0[1]
if i==1 :
device1 = config['Device']['Device'+str(i)]
device1 = device1.split(',')
device1address = device1[0]
device1ID = device1[1]
if i==2 :
device2 = config['Device']['Device'+str(i)]
device2 = device2.split(',')
device2address = device2[0]
device2ID = device2[1]
if i==3 :
device3 = config['Device']['Device'+str(i)]
device3 = device3.split(',')
device3address = device3[0]
device3ID = device3[1]
if i==4 :
device4 = config['Device']['Device'+str(i)]
device4 = device4.split(',')
device4address = device4[0]
device4ID = device4[1]
if i==5 :
device5 = config['Device']['Device'+str(i)]
device5 = device5.split(',')
device5address = device5[0]
device5ID = device5[1]
# a = config['Device']['Device'+str(i)]
# read.append(a)
# print(read)
# -
len(device0)
| Python/Tweaking_Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lesson 5 Topic 1:
# # <span style=color:blue>How to read data from different text based (and non-text based) sources</span>
# ## Libraries to be installed for this Lesson
# Because this lesson deals with reading various file formats, not surprisingly, we need to have support of additional libraries and software platforms to accomplish that.
#
# Execute following commands at the beginning to install necessary libraries,
#
# # !apt-get update<br>
# # !apt-get install -y default-jdk<br>
# # !pip install tabula-py xlrd lxml
#
# Uncomment the following codes and execute them before proceeding
# +
# #!apt-get update
# #!apt-get install -y default-jdk
# #!pip install tabula-py xlrd lxml
# -
import numpy as np
import pandas as pd
# ### Exercise 1: Read data from a CSV
df1 = pd.read_csv("CSV_EX_1.csv")
df1
# ### Exercise 2: Read data from a CSV where headers are missing
df2 = pd.read_csv("CSV_EX_2.csv")
df2
df2 = pd.read_csv("CSV_EX_2.csv",header=None)
df2
df2 = pd.read_csv("CSV_EX_2.csv",header=None, names=['Bedroom','Sq.ft','Locality','Price($)'])
df2
# ### Exercise 3: Read data from a CSV where delimiters/separators are not comma
df3 = pd.read_csv("CSV_EX_3.csv")
df3
df3 = pd.read_csv("CSV_EX_3.csv",sep=';')
df3
# ### Exercise 4: How to bypass given headers with your own?
df4 = pd.read_csv("CSV_EX_1.csv",names=['A','B','C','D'])
df4
df4 = pd.read_csv("CSV_EX_1.csv",header=0,names=['A','B','C','D'])
df4
# ### Exercise 5: Skip initial rows
df5 = pd.read_csv("CSV_EX_skiprows.csv")
df5
df5 = pd.read_csv("CSV_EX_skiprows.csv",skiprows=2)
df5
# ### Exercise 6: Skip footers
df6 = pd.read_csv("CSV_EX_skipfooter.csv")
df6
df6 = pd.read_csv("CSV_EX_skipfooter.csv",skiprows=2,skipfooter=1,engine='python')
df6
# ### Exercise 7: Read only first _n_ rows (especially useful for large files)
df7 = pd.read_csv("CSV_EX_1.csv",nrows=2)
df7
# ### Exercise 8: How to combine `skiprows` and `nrows` to read data in small chunks
# List where DataFrames will be stored
list_of_dataframe = []
# Number of rows to be read in one chunk
rows_in_a_chunk = 10
# Number of chunks to be read (this many separate DataFrames will be produced)
num_chunks = 5
# Dummy DataFrame to get the column names
df_dummy = pd.read_csv("Boston_housing.csv",nrows=2)
colnames = df_dummy.columns
# Loop over the CSV file to read only specified number of rows at a time
# Note how the iterator variable i is set up inside the range
for i in range(0,num_chunks*rows_in_a_chunk,rows_in_a_chunk):
df = pd.read_csv("Boston_housing.csv",header=0,skiprows=i,nrows=rows_in_a_chunk,names=colnames)
list_of_dataframe.append(df)
list_of_dataframe[0]
list_of_dataframe[1]
# ### Exercise 9: Setting the option `skip_blank_lines`
df9 = pd.read_csv("CSV_EX_blankline.csv")
df9
df9 = pd.read_csv("CSV_EX_blankline.csv",skip_blank_lines=False)
df9
# ### Exercise 10: Read CSV from inside a compressed (.zip/.gz/.bz2/.xz) file
df10 = pd.read_csv('CSV_EX_1.zip')
df10
# ### Exercise 11: Reading from an Excel file - how to use `sheet_name`
df11_1 = pd.read_excel("Housing_data.xlsx",sheet_name='Data_Tab_1')
df11_2 = pd.read_excel("Housing_data.xlsx",sheet_name='Data_Tab_2')
df11_3 = pd.read_excel("Housing_data.xlsx",sheet_name='Data_Tab_3')
df11_1.shape
df11_2.shape
df11_3.shape
# ### Exercise 12: If `sheet_name` is set to `None` then an Ordered Dictionary of DataFrame is returned if the Excel file has distinct sheets
dict_df = pd.read_excel("Housing_data.xlsx",sheet_name=None)
dict_df.keys()
# ### Exercise 13: General delimated text file can be read same as a CSV
df13 = pd.read_table("Table_EX_1.txt")
df13
df13 = pd.read_table("Table_EX_1.txt",sep=',')
df13
df13 = pd.read_table("Table_tab_separated.txt")
df13
# ### Exercise 14: Read HTML tables directly from an URL
url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
list_of_df = pd.read_html(url)
df14 = list_of_df[0]
df14.head()
# ### Exercise 15: Mostly, `read_html` returns more than one table and further wrangling is needed to get the desired data
list_of_df = pd.read_html("https://en.wikipedia.org/wiki/2016_Summer_Olympics_medal_table",header=0)
len(list_of_df)
for t in list_of_df:
print(t.shape)
df15=list_of_df[1]
df15.head()
# ### Exercise 16: Read in a JSON file
df16 = pd.read_json("movies.json")
df16.head()
df16[df16['title']=="The Avengers"]['cast']
cast_of_avengers=df16[(df16['title']=="The Avengers") & (df16['year']==2012)]['cast']
print(list(cast_of_avengers))
# ### Exercise 17: Read Stata file (.dta)
df17 = pd.read_stata("wu-data.dta")
df17.head()
# ### Exercise 18: Read tabular data from PDF file
from tabula import read_pdf
df18_1 = read_pdf('Housing_data.pdf',pages=[1],pandas_options={'header':None})
df18_1
df18_2 = read_pdf('Housing_data.pdf',pages=[2],pandas_options={'header':None})
df18_2
df18=pd.concat([df18_1,df18_2],axis=1)
df18
# #### With PDF extraction, most of the time, headres will be difficult to extract automatically. You have to pass on the list of headres as the `names` argument in the `read-pdf` function as `pandas_option`,
names=['CRIM','ZN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT','PRICE']
df18_1 = read_pdf('Housing_data.pdf',pages=[1],pandas_options={'header':None,'names':names[:10]})
df18_2 = read_pdf('Housing_data.pdf',pages=[2],pandas_options={'header':None,'names':names[10:]})
df18=pd.concat([df18_1,df18_2],axis=1)
df18
| Lesson05/Exercise60-68/Reading Data From Sources.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#source: https://github.com/llSourcell/linear_regression_live/blob/master/demo.py
def compute_error_for_line_given_points(b, m, coordinates):
"""
For a given line, (defined by the equation below)
and a given set of coordinates, comprising a list of two-element sublists,
produce the error, which is the sum of the square of the discrepancies from the line.
Equation of line: y = mx + b
where m is the slope, and b is the y-intercept
"""
totalError = 0 # Initialize sum of discrepancies to naught.
for i in range(0, len(coordinates)): # Sweep over pairs inthe list of coordinates.
x = coordinates[i][0] # Select the first number in the ith cooirdinate pair in the list.
y = coordinates[i][1] # Select the second number in the ith cooirdinate pair in the list.
totalError += (y - (m * x + b)) ** 2 # Add to the sum, the square of the discrepancy between the *predicted* y value (of the line equation) and the *actual* y value (of the coordinate).
result = totalError / float(len(coordinates)) # Divide the total sum of squares-of-discrepancies by number of pairs in the list, to arrive at the average.
return result
# example
b = 1
m = 2
coordinates = [[3,6],[6,9],[12,18]]
compute_error_for_line_given_points(b, m, coordinates)
# +
import matplotlib as plt
plt.[[3,6],[6,9],[12,18]]
# -
plt.help()
| 1 - The_method_of_least_squares.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/pachterlab/MBGBLHGP_2019/blob/master/notebooks/merge/bustools_mash_merge.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="7LrGhLCQUf6W" outputId="6d35674f-211f-4150-989d-a435f295a209" colab={"base_uri": "https://localhost:8080/", "height": 453}
# install kallisto and bustools and seqkit
# !git clone https://github.com/pachterlab/MBGBLHGP_2019.git
# !cp MBGBLHGP_2019/notebooks/merge/kallisto /usr/local/bin/
# !cp MBGBLHGP_2019/notebooks/merge/bustools /usr/local/bin/
# !wget https://github.com/shenwei356/seqkit/releases/download/v0.13.2/seqkit_linux_amd64.tar.gz && tar -xvf seqkit_linux_amd64.tar.gz && cp seqkit /usr/local/bin/
# + id="nQd5MhOVcTT_"
# !mkdir -p split full ref/split ref/full
# + id="aiWXEt32cBk7" outputId="808ec6df-c494-4d80-cc07-700da706e843" colab={"base_uri": "https://localhost:8080/", "height": 277}
# download the transcriptome
# !wget ftp://ftp.ensembl.org/pub/release-101/fasta/homo_sapiens/cdna/Homo_sapiens.GRCh38.cdna.all.fa.gz
# + id="9CuzJy04d9_R" outputId="e1c61339-8e72-435f-af2b-ca1766248db7" colab={"base_uri": "https://localhost:8080/", "height": 904}
# download fastqs
# !wget --quiet -O r1.fastq.gz https://caltech.box.com/shared/static/6boju5zerptobm51fkbq5zwmchjhhk92.gz
# !wget --quiet -O r2.fastq.gz https://caltech.box.com/shared/static/adme7zu1y8nz4ng2ph5wjbei6unvy093.gz
# + [markdown] id="o4gm7TSDgHG8"
# ## Full transcriptome
# + id="nKIa8F6TdH4A" outputId="4a6deb43-1850-4e77-9782-fd6376384e88" colab={"base_uri": "https://localhost:8080/", "height": 260}
# !kallisto index -i index.idx Homo_sapiens.GRCh38.cdna.all.fa.gz
# + id="uYxLIuCQgM8i"
# !zcat Homo_sapiens.GRCh38.cdna.all.fa.gz | awk '(NR-1)%2==0{print $1, $4}' OFS="\t"| tr -d "gene:" | tr -d ">" > ref/full/t2g.txt
# + [markdown] id="BBekqijZgKSP"
# ## Split transcriptome
# + id="8zWVcJiicWkl" outputId="0f670adb-7cf5-46f7-e2df-f56526a0cb10" colab={"base_uri": "https://localhost:8080/", "height": 208}
# !seqkit split -p 8 Homo_sapiens.GRCh38.cdna.all.fa.gz
# + id="za7Hc8KCc4SG" outputId="9091503e-2e03-4c40-ed5c-76dd9c332bdc" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !cd Homo_sapiens.GRCh38.cdna.all.fa.gz.split/ && for f in *.gz; do kallisto index -i $(echo $f | cut -d"." -f5).idx $f; done
# + id="RQVY4FElenBM"
# !mv index.idx ref/full/
# !mv Homo_sapiens.GRCh38.cdna.all.fa.gz ref/full
# !mv Homo_sapiens.GRCh38.cdna.all.fa.gz.split/ ref/split
# + [markdown] id="f_L_u_Fel1OG"
# ## Full alignment
# + id="JDAl_pTnmFyH" outputId="fcc354b9-294d-4953-ad2e-763200807380" colab={"base_uri": "https://localhost:8080/", "height": 260}
# kallisto bus on full guy
# !time kallisto bus -x 10xv2 -i ref/full/index.idx -o full/o -t 2 \
# r1.fastq.gz \
# r2.fastq.gz
# + id="61y1Q_Yne0xM" outputId="1b38e5dd-a5bf-424b-99a1-ffbec232a464" colab={"base_uri": "https://localhost:8080/", "height": 191}
# !bustools sort -o full/o/s.bus full/o/output.bus
# !bustools correct -o full/o/cs.bus -w /content/MBGBLHGP_2019/notebooks/merge/10xv2_whitelist.txt full/o/s.bus
# !bustools sort -o full/o/scs.bus full/o/cs.bus
# !mkdir full/o/count
# !bustools count -o full/o/count/count -g ref/full/t2g.txt -e full/o/matrix.ec -t full/o/transcripts.txt --genecounts -m full/o/scs.bus
# + [markdown] id="Cr1ugdwzpwvs"
# ## Split alignment
# + id="4WF6F-3Ml9Nx" outputId="11753e58-5295-43b2-816c-50937de42d14" colab={"base_uri": "https://localhost:8080/", "height": 156}
# !mkdir split/o1 split/o2 split/o3 split/o4 split/o5 split/o6 split/o7 split/o8
# + id="fIp88RvYp_lj" outputId="9827cb57-e5d2-4237-f851-e0fadcbb5e1f" colab={"background_save": true, "base_uri": "https://localhost:8080/", "height": 1000}
# !for d in ./split/o*; do kallisto bus -k -n -x 10xv2 -o $d \
# -i ref/split/Homo_sapiens.GRCh38.cdna.all.fa.gz.split/part_00$(echo $d | cut -c10).idx \
# r1.fastq.gz \
# r2.fastq.gz; done
# + id="7suFTSfBtoiF" outputId="cf0619de-1e6e-401e-a780-59063bdd4491" colab={"base_uri": "https://localhost:8080/", "height": 295}
# !for d in ./split/o*; do bustools sort --flags -o $d/flagsort.bus $d/output.bus; done
# + id="TvHVAfP6ts8h"
# !for d in ./split/o*; do mv $d/flagsort.bus $d/output.bus; done
# + id="aBKKQk-luPWC" outputId="9cf21249-7418-4ff7-8206-8837152f33ba" colab={"base_uri": "https://localhost:8080/", "height": 69}
# !bustools mash -o ./split/mashed ./split/o*
# + id="WOsw39vIurVr"
# !bustools sort --flags -o ./split/mashed/smashed.bus ./split/mashed/mashed.bus
# + id="MbagnDm5uWds" outputId="883132cf-4ba8-4944-9d86-2aa02f5f6279" colab={"base_uri": "https://localhost:8080/", "height": 104}
# !bustools merge -o ./split/mashed/merged.bus -e ./split/mashed/matrix.ec -t ./split/mashed/transcripts.txt ./split/mashed/smashed.bus
# + id="tgpRfvhPuceJ"
# !mv merged.ec ./split/mashed/
# + id="ofTcVUsvxSQ6" outputId="62921c6b-a8b1-4ebd-a722-f5c708623bbb" colab={"base_uri": "https://localhost:8080/", "height": 191}
# !bustools sort -o ./split/mashed/s.bus ./split/mashed/merged.bus
# !bustools correct -o ./split/mashed/cs.bus -w /content/MBGBLHGP_2019/notebooks/merge/10xv2_whitelist.txt ./split/mashed/s.bus
# !bustools sort -o ./split/mashed/scs.bus ./split/mashed/cs.bus
# !mkdir ./split/mashed/count
# !bustools count -o ./split/mashed/count/count -g ref/full/t2g.txt -e ./split/mashed/merged.ec -t ./split/mashed/transcripts.txt --genecounts -m ./split/mashed/scs.bus
# + [markdown] id="kEUv-PXYxoia"
# # Comparisons
# + id="Cjx0yUNByT_3" outputId="230621a7-4e5e-4072-e4f0-8894e2c097c4" colab={"base_uri": "https://localhost:8080/", "height": 121}
# !pip install -q kb-python
# + id="Un1ke1xzxoOF"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import string
from scipy import stats
from scipy.io import mmread
from collections import defaultdict
from kb_python.utils import import_matrix_as_anndata # (matrix_path, barcodes_path, genes_path)
def nd(arr):
return np.asarray(arr).reshape(-1)
def yex(ax):
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.set_aspect('equal')
ax.set_xlim(lims)
ax.set_ylim(lims)
return ax
fsize=15
plt.rcParams.update({'font.size': fsize})
# %config InlineBackend.figure_format = 'retina'
# + id="Eo2i-E0cxgt4"
# load matrices
base = '/content/full/o/count'
matrix_path = os.path.join(base, 'count.mtx')
barcodes_path = os.path.join(base, 'count.barcodes.txt')
genes_path = os.path.join(base, 'count.genes.txt')
full = import_matrix_as_anndata(matrix_path, barcodes_path, genes_path)
full = full[full.obs.sort_index().index]
full = full[:,full.var.sort_index().index]
# + id="mGcVHFekynRi"
# load matrices
base = '/content/split/mashed/count'
matrix_path = os.path.join(base, 'count.mtx')
barcodes_path = os.path.join(base, 'count.barcodes.txt')
genes_path = os.path.join(base, 'count.genes.txt')
split = import_matrix_as_anndata(matrix_path, barcodes_path, genes_path)
split = split[split.obs.sort_index().index]
split = split[:,split.var.sort_index().index]
# + id="QssyKAGlyzxT"
# match the matrices
common_genes = np.intersect1d(full.var.index.values, split.var.index.values)
full = full[:,common_genes]
split = split[:,common_genes]
# + id="NNHxq8KGy73r"
common_bcs = np.intersect1d(full.obs.index.values, split.obs.index.values)
full = full[common_bcs]
split = split[common_bcs]
# + id="-TIVVjlU1Sbj" outputId="bd7259fc-46c0-42e1-c8d4-2ed2e2801a86" colab={"base_uri": "https://localhost:8080/", "height": 35}
False in full.obs.index.values == split.obs.index.values
# + id="1uxJmkSjzGuE" outputId="dc01bd2d-4a5c-4cdd-9992-80e10d60ab58" colab={"base_uri": "https://localhost:8080/", "height": 52}
print(full)
print(split)
# + id="UeyQFvMLzSpT"
# mask out genes with zero counts
full_gene_mask = full.X.sum(axis=0)>0
split_gene_mask = split.X.sum(axis=0)>0
common_genes_mask = nd(np.logical_and(full_gene_mask,split_gene_mask))
full = full[:,common_genes_mask]
split = split[:,common_genes_mask]
# + [markdown] id="1XCXs97fzzNA"
# ## Plot
# + id="hIY4oCeGz90P"
# each dot is a cell
data = (nd(full.X.sum(axis=1)), nd(split.X.sum(axis=1)))
r, p = stats.pearsonr(*data)
# + id="N9tIBO_L0HvX" outputId="8f2ebcb6-240b-4b7e-da16-977949334ebb" colab={"base_uri": "https://localhost:8080/", "height": 488}
# %%time
fig, ax = plt.subplots(figsize=(7,7))
x = data[0]
y = data[1]
ax.plot(x, y, 'o', alpha=0.1, color="k", label=f"r$^2$: {r**2:,.2f}")
kwd = {
"xscale": 'log',
"yscale": "log",
"xlabel": "Full",
"ylabel": "8-way split",
"xlim": (0.1),
"ylim": (0.1)
}
ax.set(**kwd)
yex(ax)
ax.legend()
fig.show()
# + id="cJOHCLkcW8Yv"
| notebooks/merge/bustools_mash_merge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # A Comparison of Public and Private Healthcare Systems
# *May 2016* <br />
# <NAME> <br />
# **N15659338**
# ## Abstract
# This project looks at the similarities and differences between health systems. Specifically those of the **United Kingdom** (which has a universal healthcare model) and the **United States of America** (which retains a largely privatised system). It begins with an introduction on both systems as well as an overview of global healthcare spending as a percentage of GDP before focusing on the two respective systems and their implications on: *cost, staffing and mortality*.
# ## Background
#
# ### Healthcare in the United Kingdom
# The United Kingdom relies heavily on its single-payer **NHS** (National Health Service), which is the worlds largest and oldest single-payer healthcare system. Healthcare is entirely free for all UK & EU citizens, nationals from elsewhere are provided with free emergency cover. (NHS, 2016).
#
# While the NHS is a publicly funded entity it also issues contracts to private firms in order to increase cost efficiency and in an attempt to boost competitivity (NHS, 2016). While it is considered one of the UK's proudest accomplishments it is not without flaws, many complain of long waiting times and an occasionally substandard quality of care, a quick google provides plenty of examples: e.g *"NHS problems 'at their worst since 1990s'"* (BBC, 2015a). This can be seen as an explanation as to why there is also a market for private health insurance policies that are purchased by those who believe they can get a 'better' quality of care. But again even those with these policies will often blend between both systems, commencing treatment with an NHS General Practitioner and subsequently making use of whichever system will give them their desired form of care (Pemberton, 2014).
#
# ### Healthcare in the United States
# The healthcare system of the United States is largely privatised, consumers are expected to pay once they have received almost any form of care, including visits to the Emergency Room. This can be either through an insurance plan or cash. From general doctor visits, to hospitals and ambulances big brands are visible everywhere in healthcare and it is these private companies that provide the majority of care and infrastructure (Lorenzetti, 2015). Many efforts have been made in order to make the system more accesible to those who might otherwise be unable to use it. For example, there are now state funded programmes such as **Medicare** and **Medicaid** for the elderly and those on low income (Medicare.gov, 2016). More recently the *Patient Protection and Affordable Care Act (PPACA)* has introduced stricter controls on insurance companies in order to make the entire system more accessible and affordable (ObamaCare Facts, 2016).
#
# It can be argued that through this privatised system there is an allowance for greater competition which may drive prices down and boosts efficiency. It allows patients a greater choice when choosing both their healthcare providers and insurers. There is also the argument that it creates a freer system able to experiment with new treatments and unhampered by restrictive governmental budgets and interventions.
# ## Global Public Versus Private Healthcare Spending
# Through `Pandas` and `Matplotlib` the data below was sourced from the **World Bank** in order to create three graphs to demonstrate public and private healthcare spending, both over time and as an average across time. These figures focus specifically on spending as a percentage of GDP in **Argentina, Australia, Canada, Germany, the UK & the US**. 'Western' countries were chosen specifically to provide a fair comparison.
# +
#Import pandas & matplotlib Tools
# %matplotlib inline
import pandas as pd
import pandas_datareader.data as web
from pandas_datareader import wb
import matplotlib as mpl
import matplotlib.pyplot as plt
#Download necessary data from World Bank
#Private health spending as a percentage of GDP
df1 = wb.download(indicator='SH.XPD.PUBL.ZS', country = ['GB', 'USA', 'CAN', 'DEU', 'AUS', 'ARG'], start=1990, end=2016)
#Public health spending as a percentage of gdp
df2 = wb.download(indicator='SH.XPD.PRIV.ZS', country = ['GB', 'USA', 'CAN', 'DEU', 'AUS', 'ARG'], start=1990, end=2016)
#Combine two dataframes together
df3 = pd.concat([df1,df2], axis = 1)
#Rename column and index names
df3.columns = ['Public Spending', 'Private Spending']
df3.index.names = ['Country', 'Year']
#Drop any NA values
df3 = df3.dropna(0)
# -
# ### Public and Private Spending Over Time
#Public spending over time
#Create fig and ax objects
fig, ax = plt.subplots()
#Loop through the six countries and plot them
for x in ['Argentina', 'Australia', 'Canada', 'Germany', 'United Kingdom', 'United States']:
df3.xs(x)['Public Spending'].plot(ax = ax)
#Set the title
ax.set_title('Public Spending Over Time', loc='left')
ax.set_ylabel('Percentage of GDP')
#Invert the X-axis, because it's flipped
plt.gca().invert_xaxis()
#Set and move legend
l1 = ax.legend(loc='best', bbox_to_anchor=(0.5, 1.05),
ncol=3, fancybox=True, shadow=True)
#Counter variable for setting names in legend
count = 0
#Loop for going through legend and setting names
for x in ['Argentina', 'Australia', 'Canada', 'Germany', 'United Kingdom', 'United States']:
l1.get_texts()[count].set_text(x)
#Increment counter
count += 1
# We can see overall that for the majority of countries there has been a generally positive trend in public spending over time. There also seems to be a peak for many of them during 2009, potentially due to the end of the **global economic recession**. Argentinian public funding however seems to be faltering.
#Private spending over time
#Create fig and ax objects
fig, ax = plt.subplots()
#Loop through the six countries and plot them
for x in ['Argentina', 'Australia', 'Canada', 'Germany', 'United Kingdom', 'United States']:
df3.xs(x)['Private Spending'].plot(ax = ax)
#Set the title
ax.set_title('Private Spending Over Time', loc='left')
ax.set_ylabel('Percentage of GDP')
#Invert the X-axis, because it's flipped
plt.gca().invert_xaxis()
#Set and move legend
l2 = ax.legend(loc='best', bbox_to_anchor=(0.5, 1.05),
ncol=3, fancybox=True, shadow=True)
#Counter variable for setting names in legend
count = 0
#Loop for going through legend and setting names
for x in ['Argentina', 'Australia', 'Canada', 'Germany', 'United Kingdom', 'United States']:
l2.get_texts()[count].set_text(x)
#Increment counter
count += 1
# On the other hand private spending as a percentage of GDP has relatively constant over the past ten years apart from in Argentina, where it seems that both public and private spending are on the downturn. While there has been some gradual increase in the UK private funding, Canada, Germany and Australia have had fairly constant levels over the past 25 years. The USA on the other hand has experienced a fairly large increase in the same period. Showing that both systems have been spending more and more on healthcare irrespective of structure.
# ### Mean Public Spending (1990-2014)
# +
#Group by mean
mean_plot = df3.groupby(level=0).mean()
#Create fig and ax objects
fig, ax = plt.subplots()
#Plot and set titles
mean_plot.plot(ax=ax, kind='bar', alpha=0.5)
ax.set_title('Mean Public Spending', loc='left', fontsize=14)
ax.set_xlabel('Country')
ax.set_ylabel('Percentage of GDP')
# -
# In terms of a summary mean, We can see that the private spending in the USA far outranks that in any of the other countries. It does however also have the second highest percentage of GDP spent on public funding out of the others, behind Germany. The Commonwealth Fund (2015) cites a study that this is due to both the higher cost of medication and a much greater (potentially over) use of advanced medical technology in the US. The UK has relatively high public funding, but compared to the others very low private spending.
# ## Implications Of The System's Costs on Treatment, Staffing & Mortality
# Turning solely to the UK and the USA, this section looks to examine the difference(s) *if any* in quality of care between the two systems. Namely the effect of system costs on the patients, the ability to staff said systems and mortality rates.
# ### Effect of Costs on Continuation of Treatment
# When it comes to analysing the quality of care, the ability of a patient to acutally take part in that care and continue with it is essential. This is where the cost of each system to each personal user comes in to play. In a single-payer system patients are reassured knowing that all their care is taken care of through tax contribution (aside from a potential £8 copay for prescriptions (NHS, 2016)). Whereas a system that albeit may be potentially more lean and efficient such as that of the United States can prevent those without sufficient funds from receiving or discontinuing the treatment they recquire.
# The following data comes from the **OECD**, unfortunatly the data set is too large for `pandas_datareader` to read process and so a `.csv` file of the required variables was downloaded from: http://stats.oecd.org/. The data set for costs unfortunately only had information for 2013. Aside from renaming the file all manipulation is done below:
# +
#Read in file
costsdf = pd.read_csv('/Users/ThomasWebb/Documents/University/Third Year/Second Semester/Data Bootcamp/OECD_COSTS.csv')
#Remove extra columns, that aren't needed or blank
costsdf.drop(['COU', 'PER', 'Periods', 'IND', 'GEN', 'AGE',
'Age Group', 'VAL', 'Gender', 'Value', 'Flags', 'Flag Codes'],inplace=True,axis=1)
#Rename columns
costsdf.columns = ['Country', 'Description', 'Rate Per 100 Patients']
#Set index to country
costsdf = costsdf.set_index(['Country'])
costsdf[''] = ''
costsdf.set_index('', append=True, inplace=True)
#Split the Table for Easy Viewing
UK_Costs = costsdf.head(4)
US_Costs = costsdf.tail(4)
#resize output display
pd.set_option('display.max_colwidth', 400)
# -
UK_Costs
US_Costs
# These two tables show fairly predictable data, in a state-funded system is there is a much lower skipping of treatments, be it consultations, tests, or taking of prescripted medication. However in terms of waiting times for a consultantcy appointment, they are actually higher in the United States than the UK, this could be due to a number of factors such as a higher demand due to a larger population. However, it is surprising considering the high number of complaints about the NHS. It would be interesting to see how this data has changed in years following 2013, I'd imagine that the US figures relating to treatment skipped would be gradually declining due to *Obamacare* and further policies aimed at making insurance and care more affordable.
# ### Staffing
# Apart from its effects on a patient's ability to pay for their treatment, the amount of wealth each system creates will affect its ability to remunerate staff. This may or may not result in a greater number of nurses, doctors, support staff etc. An assumption here is that a privatised system designed to create profit will be more attractive to work for and will result in higher staffing and by extension better patient care (i.e. more doctors and nurses will result in better treatment).
# The following data was downloaded again from OECD Stats, again it was too large to read using `datareader`. As above, all manipulation is done in the notebook apart from renaming the file. The dataset is locatable under health > Total health and social employment.
#read in file
address = '/Users/ThomasWebb/Documents/University/Third Year/Second Semester/Data Bootcamp/OECD_Employment.csv'
employmentdf = pd.read_csv(address)
#drop columns
employmentdf.drop(["Variable", "UNIT", "COU", "Flag Codes", "Flags", "YEA", '"VAR"'], inplace = True, axis = 1)
#set index
employmentdf.set_index(["Country", "Year"])
#remove the rows for 2014 for USA as N/A in UK
employmentdf = employmentdf.drop(employmentdf.index[[104, 34]])
#same for uk years prior to 2003 as missing in USA
employmentdf = employmentdf.drop(employmentdf.index[0:12])
employmentdf = employmentdf.drop(employmentdf.index[22:34])
employmentdf = employmentdf.drop(employmentdf.index[45:57])
# ### Mean Table
#create mean table
employment_mean = employmentdf.groupby(["Country", "Measure"]).mean()
employment_mean.drop("Year", inplace = True, axis = 1)
employment_mean.columns = ["Mean (2003-2013)"]
employment_mean.round(2)
# ### TimeSeries Plot
#create a dataframe with just density per 1,000 population
densitydf = employmentdf[employmentdf['Measure'] == 'Density per 1 000 population (head counts)']
#import seaborn
import seaborn as sns
#plot the time series
sns.set()
fig, ax = plt.subplots(figsize = (6.5,4.5))
sns.tsplot(data=densitydf, time="Year", unit="Measure", condition="Country", value="Value", ax = ax)
ax.set_ylabel('Density per 1,000 Population (Head Counts)')
ax.set_title('Employment Over Time')
# The table and graph show that overall there is not a large difference over time in employment between the two systems per capita. However, this might be slightly different in more recent years as the data set only reaches as far forward as 2013. We can see US employment dipping around 2014. What is interesting to note is that the number of healthcare professionals employed dropped solely, and largely, in the UK (around 2007) which might be a side effect of the **global recession**. This might suggest that the publicly funded NHS model is more affected by the economy than a private firm. News articles from that time suggest that it was hit hard: http://www.telegraph.co.uk/news/health/news/5485814/NHS-will-face-15bn-budget-shortfall-due-to-effects-of-recession-managers-warn.html
#
# What surprises me about this data is that I expected employment to be much higher in the US due to the ability of a private industry to attract staff through higher wages (which at a glance are on average $20,000 higher according to the OECD figures on their site), however it may be possible that around 13% of total civilian employment is an optimal number for healthcare systems.
# ### Mortality
# The most crucial factor to examine between two countries and their healthcare systems is their respective mortality rates. The whole purpose of a hospital is to heal (or somewhat crudely, to try and stop us dying). There are arguments for both systems being superior in this regard, a privatised system is often less restricted with treatment options unlike a single-payer scheme where treatment is often decided at a regional/national level and gaining approval for new or experimental treatment is difficult. Whereas a publicly funded operation is more readily available and a lack of treatment costs incentivises people to seek treatment which can then in turn drop mortality rates.
# The below data is taken again from the OECD. Variables were selected and then the file was downloaded as a `.CSV` and read into pandas. I decided to focus on four types: **total deaths, circulatory related, respiratory related and lukemia**.
# +
#read in data
mortality = pd.read_csv('OECD_MORTALITY.csv')
#drop extra columns
mortality.drop(['"VAR"', "UNIT", "COU", "YEA", "Flag Codes", "Flags"], inplace=True, axis=1)
#create datasets for total, circulatory, respiratory & lukemia deaths
totaldeaths = mortality[mortality['Variable'] == 'All causes of death']
circulatory_deaths = mortality[mortality['Variable'] == 'Diseases of the circulatory system']
respiratory_deaths = mortality[mortality['Variable'] == 'Diseases of the respiratory system']
leukemia_deaths = mortality[mortality['Variable'] == '-- Leukemia']
# +
#plot them all on three sub plots, with seaborn
fig, ax = plt.subplots(4, figsize=(12, 12))
#plot settings:
sns.tsplot(data=totaldeaths, time="Year", unit="Measure", condition="Country", value="Value", ax = ax[0],
color = ['lightblue', 'limegreen'])
sns.tsplot(data=circulatory_deaths, time="Year", unit="Measure", condition="Country", value="Value", ax=ax[1],
color = ['red','orange'])
sns.tsplot(data=respiratory_deaths, time="Year", unit="Measure", condition="Country", value="Value", ax=ax[2],
color = ['darkgreen','green'])
sns.tsplot(data=leukemia_deaths, time="Year", unit="Measure", condition="Country", value="Value", ax=ax[3],
color = ['purple', 'darkblue'])
#titles
ax[0].set_title("All Causes of Death"), ax[1].set_title("Circulatory Deaths"), ax[2].set_title("Respiratory Deaths"),
ax[3].set_title("Leukemia Deaths")
ax[0].set_ylabel('Deaths per 100,000'),ax[1].set_ylabel('Deaths per 100,000'),ax[2].set_ylabel('Deaths per 100,000'),
ax[3].set_ylabel('Deaths per 100,000')
fig.tight_layout()
# -
# The above graphs show how in terms of *Total Deaths* over time both countries have had reducing overall mortality. Which would be expected as medical technology and treatments are shared around the world. Interestingly a very similar trend is seen for *Circulatory Deaths* as well, suggesting treatment has really improved in the last 50 years.
#
# However both countries have very different mortality rates for Leukemia and Respiratory deaths. The UK has a much lower rate of leukemia deaths, compared to the US which conversely has a lower rate of respiratory deaths.
#
# There are many extra variables that would need to be taken into account here to draw any conclusions, but what it does show is that in terms of the ultimate metric for a health care system you cannot say that one is simply better than the other. In fact if lifestyle factors were taken into account between the two countries you would likely see a narrowing of the gaps between the lines on these graphs.
# ## Conclusion
# Both systems are well entrenched in their respective countries. The purpose of this project wasn't to specifically recommend one over the other, they both have their pro's and con's.
#
# What the above analysis does show however is that when it comes to continuation of care and its affordability, there is no question it is better to have a state-funded system. It allows cheaper medical costs and in turn creates the scenario you see above in public global spending per GDP where publicly funded systems end up spending less than the USA. The example of *<NAME>*'s hike of the price of Daraprim comes to mind as a scenario that is simply not seen in other countries (BBC, 2015b). Publicly funded systems also seem to have no problem in attracting staff, despite lower wages. However the United States seems to fair slightly better in terms of overall mortality as well as patient choice. As the cost of care becomes more affordable to the general public it might even start to outperform services such as the NHS in the other metrics such as patient continuation, but this remains something to be seen, and certainly only in the far future.
# ## Bibliography
# ### Data Sources
# • **OECD** - http://stats.oecd.org/Index.aspx?DatasetCode=HEALTH_STAT *(Accessed 5.2.15)* <br />
# -- (Quality Indicators) http://stats.oecd.org/Index.aspx?DatasetCode=HEALTH_HCQI *(Accessed 5.2.15)*<br />
# -- (Mortality) - http://stats.oecd.org/restsdmx/sdmx.ashx/GetDataStructure/HEALTH_STAT *(Accessed 5.2.15)*<br />
# • **World Bank** - http://data.worldbank.org/ *(Accessed 5.2.15)*
# ### Citations
# • **BBC (2015a)** NHS problems 'at their worst since 1990s' [online] Available at: http://www.bbc.com/news/health-32057948 [Accessed 12 Apr. 2015]. <br />
# • **BBC (2015b)** Who is <NAME> - 'the most hated man in America'? [online] Available at: http://www.bbc.com/news/world-us-canada-34331761 [Accessed 30 Apr. 2015]. <br />
# • **Commonwealth Fund, The (2015)** US Spends More on Health Care Than Other High-Income Nations But Has Lower Life Expectancy, Worse Health [online] Available at: http://www.commonwealthfund.org/publications/press-releases/2015/oct/us-spends-more-on-health-care-than-other-nations [Accessed 23 Apr. 2015] <br />
# • **Lorenzetti (2015)** The Fortune 500's 10 biggest health-care companies, Fortune.com [online] Available at: http://fortune.com/2015/06/20/fortune-500-biggest-healthcare-companies/ [Accessed 12 Apr. 2015]. <br />
# • **Medicare.gov (2016) ** Medicare.gov: the official U.S. government site for Medicare [online] Available at: https://www.medicare.gov [Accessed 14 Apr. 2015]. <br />
# • **NHS (2016)** The NHS history [online] Available at: http://www.nhs.uk/NHSEngland/thenhs/nhshistory/Pages/the-nhs%20history.aspx [Accessed 11 Apr. 2016]. <br />
# • **ObamaCare Facts (2015) ** Affordable Care Act Summary [online] Available at: http://obamacarefacts.com [Accessed 14 Apr. 2015].<br />
# • **Pemberton (2014)** 'Superior' private health is a myth, The Telegraph [online] Available at: http://www.telegraph.co.uk/news/nhs/11053982/Superior-private-health-is-a-myth.html [Accessed 12 Apr. 2015]. <br />
| UG_S16/Webb-HealthcareSystems.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import unittest
import numpy as np
import pandas as pd
import numpy.testing as np_testing
import pandas.testing as pd_testing
import os
import import_ipynb
class Test(unittest.TestCase):
def _dirname_if_file(self, filename):
if os.path.isdir(filename):
return filename
else:
return os.path.dirname(os.path.abspath(filename))
def setUp(self):
import Exercise1_03_codeExtension.ipynb
self.exercise = Exercise1_03_codeExtension
self.tips = pd.read_csv('tips.csv')
def test_input_frames(self):
pd_testing.assert_frame_equal(self.exercise.tips, self.tips)
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False)
| Chapter01/tests/Exercise1_03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
neighbors = pd.read_csv('assets/neighborhoods.csv')
fig, ax = plt.subplots(figsize=(10,5))
neighbors = neighbors.set_index('Neighborhoods')
neighbors.plot.bar(rot = 0, ax=ax)
ax.set_title("Top Neighborhoods with Highest Percent of Hispanic vs. White Population", size=16)
ax.set_ylabel('Percent')
plt.savefig('assets/neighborhood_percents.png')
# -
stops = pd.read_csv('')
| .ipynb_checkpoints/plotting-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## emacs的学习
# [emacs中文网](http://emacser.com/windows-emacs-home.htm)
# [国内镜像推荐链接](https://github.com/aborn/popkit-elpa)
#
# [推荐的内容](https://github.com/emacs-tw/awesome-emacs#javascript)
#
# [EMACS爱好者的博客](http://maskray.me/blog/2015-09-18-conversion-to-emacs)
#
# [课程书籍](http://book.emacs-china.org/#orgheadline11)
from IPython.display import HTML
HTML('<iframe width="800" height="800" src="https://www.youtube.com/embed/QKhS9EX9qd8?list=PLqQgufb9_uJeSg3ChhgcDHD9C5MbZSCal" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>')
# `f1 t ` -- emacs教程:
#
# 注意! 现在centos下的`help` 都变成了`f1`!
#
#
# C - `ctrl + str` is a command
#
# M `Alt + str` is a command
#
# `ctrl b` - 往前移动光标
#
# `ctrl f` - 往后移动光标
#
# `ctrl a` - 到开头
#
# `ctrl e` - 到结尾
#
# `ctrl n` - 到下一行
#
# `ctrl p` - 到上一行
#
# `ctrl k` - 删除
#
# `ctrl x u` - 撤销修改
#
# `ctrl g` - 取消任何命令
#
# `C-x b` - switch to buffer
#
# `ctrl x ctrl f` - 打开文件
#
# `M-x` - `Alt x linum-mode` - 如果忘记了,tab键补全.
#
# `ctrl x ctrl s` - 保存文件
#
# `f1 k ` - 查询命令,比如查询保存文件的命令 `f1 k ctrl-x ctrl-s`,有`-`代表一起按住.
#
# `f1 v` - 查询变量
#
# `C-` - 取消上一步的操作
#
# [学习链接](https://learnxinyminutes.com/)
#
# [具体链接](https://learnxinyminutes.com/docs/elisp/)
# 通过buffer切换到`scratch`这个buffer.来练习`elisp`.
#
# `ctrl x ctrl e` 出计算结果
#
# 按 `f1 m`关闭 `electric indent`.
#
# `alt x electric indent-mode`永久关闭
#
# `interactive`这是一个交互函数,光标位于下一行的末尾.按`ctrl-x ctrl-e`,
#
# 然后通过`alt m my-fun`可以直接调用`my-fun`函数了.
#
# 查询一个函数.光标在函数体内,按`f1 f`,就能知道这个函数的描述了.
#
#
# 
#
# `查找命令的意思`,点击`ctrl-h k`然后输入`ctrl-x ctrl-f`就会显示出`ctrl-x ctrl-f`是做什么的.
# ## emasc的配置文件
#
# `AppData/Roming/.emacs.d/init.el`,如果不确定,可以利用`ctrl-x ctrl-f`使用`~`来进行查看.
# ```
# ;- turn off tool-bar
# (tool-bar-mode -1)
# ;- turn off scrool-bar
# (scroll-bar-mode -1)
#
# (electric-indent-mode -1)
# ;- turn off splash screen
# (setq inhibit-splash-screen t)
# ;- show linum-mode
# (linum-mode t)
#
# ;- define a function to quickly open your config file
# (defun open-my-file-config()
# (interactive)
# (find-file "~/.emacs.d/init.el"))
# (global-set-key (kbd "<f7>") 'open-my-file-config)
#
# ```
# ## 安装package
#
# options - manage emacs package
#
# install `company`,这个包可以补全任何东西.
#
#
# 全局开启 `alt-x`,然后输入`global-company-mode`
#
# `ctrl-h m `显示所有激活的`minor mode`.在进度条上显示的是`major mode`
#
# ### 极致部分
#
# 按`tab`键可以展开.只需要你会写提纲.
#
# ```
# * xxxx
# ** xxxx
# *** xxx
# ```
#
# 加`TODO`,按`ctrl-c ctrl-t`
#
# 如果GTD(get things down),那么再按一次`ctrl-c ctrl-t`,就会由`TODO`显示`DOWN`
#
# `C-c C-s` 显示日历
#
# `C-c C-x` 表格形式展示日历
#
# `C-c a` 再点击 `a` 查看自己的完成情况
# ## 复制,粘贴
# - C-x C-x -- 选中你想选中的范围
# - C-w -- 剪切
# - C-y -- 粘贴
# - M-x --复制
# ## 关于文件的操作-Dired-mode
#
# `C-x d` --进入编辑界面
# - 直接进入文件列表,按`+`号直接创建文件夹
#
# - `+` --创建目录
# - g --刷新目录
# - C --拷贝
# - D --删除
# - R --重命名
# - d --标记删除
# - u --取消标记
# - x --执行所有的标记
#
# 添加 `(require 'dired-x)`,可以使用 `C-x C-j` 替换 `C-x d` 了.
#
# aa packages `reveal-in-osx-finder` in MACOSX
#
#
#
#
#
# ## 直接跳出文件夹的目录
# `M-x reveal-in-osx-finder`
# ## 常用命令的简单记忆
# - `C-w` 往后删除一个单词
#
# - `C-a` 到开头
#
# - `C-e` 到结尾
#
# - `C-d` 往前删除一个字符
#
# - `C-k` 删除一行
#
# - `M-w` 复制
#
# - `C-y` 粘贴
#
# - `C-w C-w` 跳转窗口
| emacs/emacs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/DJCordhose/ml-workshop/blob/master/notebooks/tf2/tf-keras-api.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="r2NPAI4jZZgi"
# # TensorFlow Keras API - short intro
#
# Some plotting code from: https://github.com/margaretmz/deep-learning/blob/master/fashion_mnist_keras.ipynb
#
# + id="-J6E9tQ_VjjS" colab_type="code" colab={}
import matplotlib.pyplot as plt
# %matplotlib inline
dpi = 96
# dpi = 300
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (15, 8)
# mpl.rcParams["figure.dpi"] = dpi
mpl.rc('xtick', labelsize=15)
mpl.rc('ytick', labelsize=15)
# + id="BvUsR5imy8nu" colab_type="code" outputId="b8ebd916-99f8-4d7d-b1f5-539e86664d09" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Gives us a well defined version of tensorflow
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
# + id="KOqgY6jcMlqc" colab_type="code" outputId="72a9b5b2-a293-4b11-f5ad-a3dff686cc30" colab={"base_uri": "https://localhost:8080/", "height": 34}
import tensorflow as tf
print(tf.__version__)
# + id="qqIpwfaXVMFj" colab_type="code" colab={}
tf.random.set_seed(42)
# + id="qzAUGONqVNn5" colab_type="code" outputId="1a8702b6-453b-4289-922a-9a809edc852c" colab={"base_uri": "https://localhost:8080/", "height": 306}
# https://cloud.google.com/blog/products/gcp/introducing-nvidia-tesla-p4-gpus-accelerating-virtual-workstations-and-ml-inference-compute-engine
# !nvidia-smi
# + id="pY_fjTEALQmc" colab_type="code" colab={}
# https://github.com/AndreasMadsen/python-lrcurve
# !pip install -q lrcurve
# + id="R-m5Tu2cLRUh" colab_type="code" colab={}
from lrcurve import KerasLearningCurve
# + [markdown] id="EC2PvYtBxbTO" colab_type="text"
# ## MNIST data set
# + id="CXcAXADBw9aF" colab_type="code" outputId="f0ceb18a-9875-4140-f4b2-48e0c3f2df13" colab={"base_uri": "https://localhost:8080/", "height": 34}
import numpy as np
from tensorflow.keras.datasets import mnist, fashion_mnist
x_res = 28
y_res = 28
image_size = x_res * y_res
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape(-1, x_res, y_res, 1)
x_test = x_test.reshape(-1, x_res, y_res, 1)
x_train.shape, x_test.shape
# + id="BMK-lugNVaKs" colab_type="code" colab={}
def plot_samples(x, y, n_samples=15, labels=None):
figure = plt.figure()
for i, index in enumerate(np.random.choice(x_test.shape[0], size=n_samples, replace=False)):
ax = figure.add_subplot(3, 5, i + 1, xticks=[], yticks=[])
ax.imshow(x[index].reshape(x_res, y_res), cmap="binary")
if labels:
ax.set_title("{}".format(labels[y[index]]))
else:
ax.set_title("{}".format(y[index]))
# + id="AgFN0CPpVfx5" colab_type="code" outputId="6567a946-76f0-464b-ec7d-349ccbe7e860" colab={"base_uri": "https://localhost:8080/", "height": 485}
fashion_mnist_labels = ["T-shirt/top", # index 0
"Trouser", # index 1
"Pullover", # index 2
"Dress", # index 3
"Coat", # index 4
"Sandal", # index 5
"Shirt", # index 6
"Sneaker", # index 7
"Bag", # index 8
"Ankle boot"] # index 9
plot_samples(x_train, y_train, labels=fashion_mnist_labels)
# + [markdown] id="XxQtgCki-BHS" colab_type="text"
# ## There are a number of different Keras APIs
# * For sequential models:
# * https://www.tensorflow.org/guide/keras/overview
# * https://www.tensorflow.org/api_docs/python/tf/keras/Sequential
# * For any kind of architecture: https://www.tensorflow.org/guide/keras/functional
# + [markdown] id="u6aMopBQ-bkW" colab_type="text"
# ### Straight forward sequential model
# + id="8TTpBQiMI6lH" colab_type="code" colab={}
dropout_rate = 0.4
# + colab_type="code" id="QgTZ47SsZZg4" colab={}
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, BatchNormalization, Flatten
from tensorflow.keras import Sequential, Model
simple_sequential_model = Sequential()
simple_sequential_model.add(Input(shape=(x_res, y_res, 1)))
simple_sequential_model.add(Conv2D(filters=64, kernel_size=4, activation='relu'))
simple_sequential_model.add(MaxPooling2D())
simple_sequential_model.add(Dropout(rate=dropout_rate))
simple_sequential_model.add(Conv2D(filters=32, kernel_size=4, activation='relu'))
simple_sequential_model.add(MaxPooling2D())
simple_sequential_model.add(Dropout(rate=dropout_rate))
simple_sequential_model.add(Flatten())
simple_sequential_model.add(Dense(256, activation='relu'))
simple_sequential_model.add(BatchNormalization())
simple_sequential_model.add(Dropout(rate=dropout_rate))
simple_sequential_model.add(Dense(10, activation='softmax'))
# + [markdown] id="hDKd0Z6z-g1L" colab_type="text"
# ### Structured sequential model
# + id="0SbVvm1l-Zqt" colab_type="code" colab={}
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras import Sequential, Model
conv_block_1 = Sequential([
Conv2D(filters=64, kernel_size=4, activation='relu'),
MaxPooling2D(),
Dropout(rate=dropout_rate)
], name='conv_block_1')
conv_block_2 = Sequential([
Conv2D(filters=32, kernel_size=4, activation='relu'),
MaxPooling2D(),
Dropout(rate=dropout_rate)
], name='conv_block_2')
classifier = Sequential([
Flatten(),
Dense(256, activation='relu'),
BatchNormalization(),
Dropout(rate=dropout_rate),
Dense(10, activation='softmax')
], name='classifier')
structured_sequential_model = Sequential([
Input(shape=(x_res, y_res, 1)),
conv_block_1,
conv_block_2,
classifier
])
# + [markdown] colab_type="text" id="9OWSCosaGN7A"
# ### Model using functional API (allows for any architecture)
# + colab_type="code" id="enqNZDwrGN7G" colab={}
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras import Model
input = Input(shape=(x_res, y_res, 1))
x = conv_block_1(input)
x = conv_block_2(x)
output = classifier(x)
functional_model = Model(input, output)
# + id="p_6INrOV-Uk-" colab_type="code" outputId="07e342fa-e947-49b0-f772-bc6fd7b1e43f" colab={"base_uri": "https://localhost:8080/", "height": 289}
# model = simple_sequential_model
# model = structured_sequential_model
model = functional_model
model.summary()
# + id="f1NMWdymWqOX" colab_type="code" outputId="26c82bcc-53cf-4ad8-bc93-7767f4460dc4" colab={"base_uri": "https://localhost:8080/", "height": 422}
from tensorflow.keras.utils import plot_model
plot_model(model, show_shapes=True, dpi=dpi)
# + colab_type="code" id="CQUlOa8cZZg9" outputId="2ea7d848-d973-4cc1-9872-876ef9d60e6e" colab={"base_uri": "https://localhost:8080/", "height": 545}
# %%time
BATCH_SIZE=5000
EPOCHS = 50
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(x_train, y_train,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=[KerasLearningCurve()],
verbose=0)
# + id="HlTgzH0gMVIq" colab_type="code" outputId="63beecde-3690-4537-a1a6-7727d0725591" colab={"base_uri": "https://localhost:8080/", "height": 51}
train_loss, train_accuracy = model.evaluate(x_train, y_train, batch_size=BATCH_SIZE)
train_accuracy
# + id="Td0SPCLtMVIt" colab_type="code" outputId="499bb134-d2be-4855-af0e-c45e85a931e6" colab={"base_uri": "https://localhost:8080/", "height": 51}
test_loss, test_accuracy = model.evaluate(x_test, y_test, batch_size=BATCH_SIZE)
test_accuracy
# + id="0K5hmkSMOFvr" colab_type="code" outputId="b582abc5-0d89-4ce8-84d9-4e1740a28a13" colab={"base_uri": "https://localhost:8080/", "height": 502}
plt.xlabel('epochs')
plt.ylabel('loss')
plt.yscale('log')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['Loss', 'Validation Loss']);
# + id="Oks2LGiSOapb" colab_type="code" outputId="113af610-9e51-4a55-b0a8-41a84073ba58" colab={"base_uri": "https://localhost:8080/", "height": 502}
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['Accuracy', 'Validation Accuracy']);
# + id="qu5tpdEcMVI0" colab_type="code" outputId="c45e22e5-4150-4369-f5dc-2d73c4dcfeef" colab={"base_uri": "https://localhost:8080/", "height": 485}
y_hat = model.predict(x_test)
figure = plt.figure()
# Plot a random sample of 10 test images, their predicted labels and ground truth
for i, index in enumerate(np.random.choice(x_test.shape[0], size=15, replace=False)):
ax = figure.add_subplot(3, 5, i + 1, xticks=[], yticks=[])
ax.imshow(np.squeeze(x_test[index]), cmap="binary")
predict_index = np.argmax(y_hat[index])
true_index = y_test[index]
ax.set_title("{} ({})".format(fashion_mnist_labels[predict_index],
fashion_mnist_labels[true_index]),
color=("green" if predict_index == true_index else "red"))
# + id="UutMFsgiMVI4" colab_type="code" colab={}
model.save('mnist-classifier.h5')
# + id="5PlpMoae8lPM" colab_type="code" outputId="7e113115-1908-40bc-bb0b-34afc9615d1f" colab={"base_uri": "https://localhost:8080/", "height": 85}
# !ls -l
# + id="EDM82FpJ8mTd" colab_type="code" colab={}
| notebooks/tf2/tf-keras-api.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Visualization Systems and Database Visualization
# ### Visualization System
#
# * **Information Visualization**: Plotting data often using different coordinates than what is provided.
# * **Large Data Visualization**: How to interactively manage visually overwhelming amounts of data.
# * **Visual Analytics**: Connecting the process of data visualization with the process of decision making.
# * **Dashboard**: How to organize the visual display of information to provide only the necessary details.
# ### The Information Visualization Mantra
#
# **Schneiderman's Mantra** is good one rule of thumb to create informative visualization. There are three points:
#
# 1. **Overview Fisrt**: In the most cases of big data visualization, it is good idea to provide overview to the audiences. We can provide such shape of the data or patterns that esily catches by boarder audience.
# <br>
# 2. **Zoom and Filter**: After look the overview, some audiences may interested in different detail. Provide zoom and filter feature may very useful for audience in order to exploration for specific goal. There is a problem when we zooming into specific area, sometime we just lost out of context. To prevent it, we may used extra features that provide **focus+context**. Tools such as zoom lens and fish eye lens may be very helpful. A very good demo of fish eye lens can be fount [here](https://bost.ocks.org/mike/fisheye/).
# 3. **Detail on Demand**: May very helpful to provide audience supllement and annotation when user need it.
# #### Database Visualization
#
# **OLAP** (OnLine Analytical Processing) is a method to summarize data from database for analytical need. This method should produce information as fast as possible and the result should can be accessed in anytime. Information generated from OLAP may be called as report. For example, bank transaction report, corporate accounting report, user activity report, etc.
#
# **Data Cube** is a concept which widely used in OLAP in order to represent summarization in compact form. Consider a document from sales database below:
#
# ```
# Database: sales database
# Document Headers: <date, product, location, amount>
# Document values: [(8/15, coffe, Seattle, $4), (8/8/15, tea, Beijing, $3), (8/5/15, espresso, Rome, $5)]
# ```
#
# A document a above has *three dimensions* (date, product, location) defined by document keys that have ordinal values and a *measure* (amount) defined by discrete values.
# **Data Aggregation** is exhaustive method to generate summarization from database or multiple databases. In this case *measure* variable used as *pivot key*. By using pivot key, we can reduce dimension of information. For example, from (date, product, location), we can reduce a summarization of each two dimensions data: (date, product), (date, location), (product, location) that depend on value in *amount*.
# **Infoviz** (Information Visualization) is higher level operation which execute OLAP operation in lower level. This concept may be useful in communication between manager and database administrator, software developer and database API, or user and Infoviz software.
#
# Below some important Infoviz and OLAP operations in data cube:
#
# Cube operation: Slicing
# Infoviz operation: Filter Value
# Description: Reduce dimensionality by selecting a single attribute value along one of the dimensions.
# 
#
# Cube operation: Dicing
# Infoviz operation: Filter range, zoom plot area
# Description: Focus on a subcube spanning a range of values across one or more dimensions of the cube.
# 
#
# Cube operation: Roll-Up
# Infoviz operation: Aggregation
# Description: Reduce dimensionality by projecting cube along one of its axes using a summary op
#
# Cube operation: Drill Down
# Infoviz operation: Zoom fields, details on demand, dissaggregation
# Description: Increases dimensionality by expanding summaries into values, or subdivides dimensions into finer details.
# 
#
# Cube operation: Pivot
# Infoviz operation: Field selection
# Description: Rotates cube to display a different face comparing different dimensions.
# 
# ### Organizing Axes
#
# It is may be very useful to reduce high dimensional data into prejection in two dimensional space such way to revel relationship between keys. There are some projection operation that widely used:
#
# 1. **Concatenation** is projection of two or more axes into compact from such as cross table. An image below show concatenation of Quarter+Product:
# <br>
# 2. **Product** is projection of two axes in cartesian coordinate which generate combination of data. This projection produce compact data shape in one dimensional space. An image below show product of Quarter+Product:
# <br>
# 3. **Nesting** is projection similar to *product projection* but remove unnecessary combination. This projection will be useful to make sense some data projection that may not sense in product projection. An image below show nesting of Quarter+Product:
# <br>
# An image below show nesting of Quarter+Month which may be not make sense in product and concatenation projection:
# 
# ## Visualization System Design
# ### User Interface
#
# User Interfaces (UIs) is data visualization systems that used Graphical User Interfaces (GUI) to provides users intuitive capabilities in order to access and manipulate data.
# There are three importants aspect in order to design a user interface.
#
# ### System Design
#
# System design is core aspect which cost lot of investment. There are seven steps in order to design system with user interface that may executed in parallel or sequential:
#
# 1. **Requirements Specification** is initial step in which we create formulation to guide system design. It may need to conduct some research in order to figuring out any problems and user needs. Some questions that may be useful to answer are:
# * What specific problems will the system solve?
# * Who will the system serve?
# * How will a user use the system to make a decision?
# 2. **Architectural Design** is step in which we decompose the problem and the system into components, e.g. elements of a dashboard. Arhciteture in this term means high level abstraction rather than technical abstraction. In this step we may do brainstroming with sub devision, e.g. UI/UX dev, in order to decide best architecture.
# 3. **Compenent Design** is step in which we planned how to execute arhitectural design technically. We may need to discuss with engineer team to decide apprpriate tool, possible visualization and interactions mechanism for each component.
# 4. **Implementation** is step in which we actually implemented component design, such as coding or setup.
# 5. **Component Testing** is step in which we do debugging.
# 6. **User Evaluation** is step in which we do user testing to get feedback from real user and fnd any part need to be fixed. But, depend only on user's feedbak is not enough since there are will be some biases. So, consider to implement scientific method while collect data from user testing method, practical statistical method such as A/B testing is one widely used approach.
# 7. **Maintenance** is step in which we do fixes based on user evaluation result and documentation.
# Most of people tend to uderestimate all the step above and only focusing on the implementation. It may caused by limited resources. If it is the case, the it is good idea to implement approriate methodology with resource constrain, such as *lean methodology*. For example, we may used prototype to simulate how a system might work with a user. A prototype may not to be complete system, it may a system which has many bugs. By let users try to use prototype, we may get fast feedback from users and capable to figuring out in which user interested.
#
# Another good approach to boost system design is by use models of the user, task and dialog to ensure the design will work with the user. Creating a model is advance technique and we may need expertise to do that, e.g. dev ops.
# ### User Modeling
#
# This is advance method which aimed to deduce user behavior. This method extensively implemented by commercial products that tend to use user-driven development approach rather than technical-driven. In order to create user model, we need proper data abou user which includes: familiarity, role, knowledge, and motivation. One good technique to collect user data called as **GOMS** described below:
#
# * **Goal** is what the user will hope to achieve, e.g. an informed decision.
# * **Operators** is basic user action or thought process steps.
# * **Methods** is sequence of operators used to achieve goal.
# * **Selections** is about how the user decides to use one method over another.
# ### Task Analysis
#
# * There is a concept called as **synthesizability** which try to connect system tasks to user goals.
# * If we have complex tasks, the it is good idea to decompose tasks hierarchically into subtasks.
# * Create plans out of task sequences.
# * Examine triggers for conditonal tasks.
# * Also consider waiting, cycle and multitasking.
# * Use results for dashboard layot, organization of screens.
| coursera/notes/week4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import pylab as pl
import numpy as np
import scipy.optimize as opt
import statsmodels.api as sm
from sklearn import preprocessing
'exec(% matplotlib inline)'
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import seaborn as sn
import os
os.chdir('C://Users//yashr//Downloads//')
# dataset
disease_df = pd.read_csv('framingham.csv')
disease_df.drop(['education'], inplace = True, axis = 1)
disease_df.rename(columns ={'male':'Sex_male'}, inplace = True)
# removing NaN / NULL values
disease_df.dropna(axis = 0, inplace = True)
print(disease_df.head(), disease_df.shape)
print(disease_df.TenYearCHD.value_counts())
# counting no. of patients affected with CHD
plt.figure(figsize = (7, 5))
sn.countplot(x ='TenYearCHD', data = disease_df,
palette ="BuGn_r" )
plt.show()
laste = disease_df['TenYearCHD'].plot()
plt.show(laste)
# +
X = np.asarray(disease_df[['age', 'Sex_male', 'cigsPerDay',
'totChol', 'sysBP', 'glucose']])
y = np.asarray(disease_df['TenYearCHD'])
# normalization of the datset
X = preprocessing.StandardScaler().fit(X).transform(X)
# Train-and-Test -Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size = 0.3, random_state = 4)
print ('Train set:', X_train.shape, y_train.shape)
print ('Test set:', X_test.shape, y_test.shape)
# -
# # Code: Modeling of the Dataset | Evaluation and Accuracy :
# +
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
# Evaluation and accuracy
from sklearn.metrics import jaccard_similarity_score
print('')
print('Accuracy of the model in jaccard similarity score is = ',
jaccard_similarity_score(y_test, y_pred))
# -
# # Code: Using Confusion Matrix to find the Acuuracy of the model :
# +
# Confusion matrix
from sklearn.metrics import confusion_matrix, classification_report
cm = confusion_matrix(y_test, y_pred)
conf_matrix = pd.DataFrame(data = cm,
columns = ['Predicted:0', 'Predicted:1'],
index =['Actual:0', 'Actual:1'])
plt.figure(figsize = (8, 5))
sn.heatmap(conf_matrix, annot = True, fmt = 'd', cmap = "Greens")
plt.show()
print('The details for confusion matrix is =')
print (classification_report(y_test, y_pred))
# This code is contributed by parna_28 .
# -
| Logistic Regression for Heart Disease Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>Transfer Learning</h1>
# <h6>Saket Tiwari</h6>
# Date: 03 Jul 2019
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from keras.datasets import mnist
from keras.layers import Dense, Conv2D, Activation,Flatten, Dropout, MaxPooling2D
from keras.models import Sequential
from keras.utils import np_utils
(x_train,y_train),(x_test,y_test)= mnist.load_data()
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
# +
num_examples= 20000
x1_train= []
x1_test= []
y1_train= []
y1_test= []
x2_train= []
x2_test= []
y2_train= []
y2_test= []
for i in range(num_examples):
if y_train[i] <5:
x1_train.append(x_train[i]/ 255)
y1_train.append(y_train[i])
else:
x2_train.append(x_train[i]/ 255)
y2_train.append(y_train[i])
num_of_test_examples=y_test.shape[0]
for i in range(num_of_test_examples):
if y_test[i] <5:
x1_test.append(x_test[i]/ 255)
y1_test.append(y_test[i])
else:
x2_test.append(x_test[i]/ 255)
y2_test.append(y_test[i])
# +
X1_train= np.asarray(x1_train).reshape(-1,28,28,1)
X1_test= np.asarray(x1_test).reshape(-1,28,28,1)
X2_train= np.asarray(x2_train).reshape(-1,28,28,1)
X2_test= np.asarray(x2_test).reshape(-1,28,28,1)
Y1_train = np_utils.to_categorical(np.asarray(y1_train), num_classes=5)
Y1_test = np_utils.to_categorical(np.asarray(y1_test), num_classes=5)
Y2_train = np_utils.to_categorical(np.asarray(y2_train), num_classes=10)
Y2_test = np_utils.to_categorical(np.asarray(y2_test), num_classes=10)
# +
print(X1_train.shape, X1_test.shape)
print(Y1_train.shape, Y1_test.shape)
print(X2_train.shape, X2_test.shape)
print(Y2_train.shape, Y2_test.shape)
# +
model=Sequential()
model.add(Conv2D(32,5, input_shape=(28,28,1), activation='relu'))
model.add(Conv2D(16,5, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(8,3,activation='relu'))
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(128))
model.add(Activation('relu'))
#output layer
model.add(Dense(5))
model.add(Activation('softmax'))
model.summary()
# -
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# +
import time
import datetime
start=datetime.datetime.now()
time.sleep(5)
end=datetime.datetime.now()
print(end-start)
# +
start=datetime.datetime.now()
model.fit(X1_train,Y1_train,epochs=10,shuffle=True,batch_size=100, verbose=2, validation_data=(X1_test, Y1_test))
end=datetime.datetime.now()
print(end-start)
# -
#CNN ko fir se train nahi karna h, isliye humlog usko false karenge
# + active=""
# model.layers
# -
#Hmara 6 transferable layer h, jinke weights humlogon ko update nahi karna h, isse model fast ho jayga
for layer in model.layers[:6]:
layer.trainable=False
for layer in model.layers:
print(layer.trainable)
# +
#humlog transfer model tyaar karenge yaha par
trans_model= Sequential(model.layers[:6])
trans_model.add(Dense(128))
trans_model.add(Activation('relu'))
#output
trans_model.add(Dense(10))
trans_model.add(Activation('softmax'))
trans_model.summary()
# -
trans_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# +
start=datetime.datetime.now()
trans_model.fit(X2_train,Y2_train,epochs=10,shuffle=True,batch_size=100, verbose=2, validation_data=(X2_test, Y2_test))
end=datetime.datetime.now()
print(end-start)
| Deep Learning/Transfer Learning/Transfer Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#ループ強化学習した結果強かったパーティの表示
# 各iterationから代表100パーティずつが対戦
# -
from pokeai.ai.party_db import col_party, col_rate, pack_obj, unpack_obj
from bson import ObjectId
import numpy as np
import matplotlib.pyplot as plt
from pokeai.util import json_load, pickle_load, ROOT_DIR, DATASET_DIR
file_base_dir = r"D:\dev\pokeai\pokeai\experiment\gmm\rl\rl_loop_200815"
iter_ids = json_load(file_base_dir+"/iter_ids.json")
rates = col_rate.find_one({"_id": ObjectId("5f48dfb7a97c3f95899b0142")})["rates"]
def load_parties(player_ids):
party_ids = {player_id.split('+')[1] for player_id in player_ids}
return {party_id:col_party.find_one({"_id":ObjectId(party_id)})["party"] for party_id in party_ids}
parties = load_parties(rates.keys())
name2jp = json_load(DATASET_DIR.joinpath("name2jp.json"))["name2jp"]
def print_party(p):
for poke in p:
return (",".join([name2jp[poke["species"]],"LV"+str(poke["level"]), *[name2jp[move] for move in poke["moves"]]]))
def get_mean_by_prefix(rates, prefix):
f_rates = []
for player_id, rate in rates.items():
if player_id.startswith(prefix):
f_rates.append(rate)
return np.mean(f_rates)
xs = []
mean_rates = []
for iter_id in iter_ids:
xs.append(iter_id["iternum"])
mean_rates.append(get_mean_by_prefix(rates, iter_id["trainer_id"]))
mean_rates
plt.plot(xs, mean_rates, marker="*")
plt.xlabel("iteration")
plt.ylabel("mean rates")
plt.show()
rate_tuples = [(rate, player_id) for player_id, rate in rates.items()]
rate_tuples.sort()
rate_tuples
for rate, player_id in rate_tuples[:-51:-1]:
print(rate, player_id)
print(print_party(parties[player_id.split("+")[1]]))
for rate, player_id in rate_tuples[:10]:
print(rate, player_id)
print(print_party(parties[player_id.split("+")[1]]))
print("|レート|パーティ|")
print("|---|---|")
for rate, player_id in rate_tuples[:-11:-1]:
print(f"|{int(rate)}|{print_party(parties[player_id.split('+')[1]])}|")
print("|レート|パーティ|")
print("|---|---|")
for rate, player_id in rate_tuples[:10]:
print(f"|{int(rate)}|{print_party(parties[player_id.split('+')[1]])}|")
pokes = []
for rate, player_id in rate_tuples:
pokes.append((parties[player_id.split("+")[1]])[0]["species"])
moves = []
for rate, player_id in rate_tuples:
moves.extend((parties[player_id.split("+")[1]])[0]["moves"])
from collections import Counter
def display_hindo(items):
c = Counter(items)
print("|要素|出現回数|")
print("|---|---|")
for k, v in c.most_common():
print(f"|{name2jp[k]}|{v}|")
display_hindo(pokes)
display_hindo(moves)
| reproduce/ipynb_202009/display_high_rate_party_loop_200815_blog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from globalConstants import Const
from dataset import Dataset
from pipeline import Pipeline
from autoEncoder import AutoEncoder
from plotter import *
from data_read import *
import numpy as np
import tensorflow as tf
np.set_printoptions(precision=3, suppress=True)
# %load_ext autoreload
# %autoreload 2
# +
dataSetType = "DW" #"DW", "ZP", or "MH"
assert dataSetType == "DW" or dataSetType == "ZP" or dataSetType == "MH",\
"dataSetType needs to be set to 'DW', 'ZP' or 'MH'."
if dataSetType == "DW" or dataSetType == "ZP":
train_val_test_function = make_train_val_test_from_toy
get_paths_function=get_toy_paths
elif dataSetType == "MH":
train_val_test_function = make_train_val_test_from_TIS_and_TPS
get_paths_function=get_TPS_and_TIS_paths
c = Const(dataSetType)
# -
try:
trainData = pickle.load(
open("datasets/{}_trainData_{}.p".format(
dataSetType, c.used_dataset_fraction), "rb"))
valData = pickle.load(
open("datasets/{}_valData_{}.p".format(
dataSetType, c.used_dataset_fraction), "rb"))
except Exception:
print("Processed dataset files not found."
+"\nGenerating datasets from raw data.")
trainData, valData, _ = Dataset\
.initialize_train_val_test_datasets(
*train_val_test_function(c))
print("Saving datasets for future use.")
pickle.dump(
trainData,
open("datasets/{}_trainData_{}.p".format(
dataSetType, c.used_dataset_fraction), "wb"))
pickle.dump(
valData,
open("datasets/{}_valData_{}.p".format(
dataSetType, c.used_dataset_fraction), "wb"))
pipeline = Pipeline(c, trainData.snapshots)
train_ds, train_corrected_1D, train_corrected_2D = \
pipeline.prepare_prediction_plotter(trainData)
val_ds, _, _ = \
pipeline.prepare_prediction_plotter(valData)
autoencoder, autoencoder_1, autoencoder_2, encoder, decoder_1, decoder_2 = \
AutoEncoder.make_models(c)
history = autoencoder.fit(
x=train_ds,
epochs=c.epochs,
validation_data=val_ds,
callbacks=[tf.keras.callbacks.EarlyStopping(
monitor="val_loss", patience=3)])
AutoEncoder.store_model_weights(
f"results/{dataSetType}_model_weights_{c.model_stamp}",
autoencoder, autoencoder_1, autoencoder_2, encoder, decoder_1, decoder_2)
autoencoder, autoencoder_1, autoencoder_2, encoder, decoder_1, decoder_2 = \
AutoEncoder.load_model_weights(
f"results/{dataSetType}_model_weights_{c.model_stamp}",
*AutoEncoder.make_models(c))
make_single_map_plot(
DimensionalPosition(pipeline.const, 0, 1),
pipeline=pipeline,
method=calc_represented_map_generated,
model=autoencoder_1,
minmax_container=pipeline,
representations=train_corrected_2D)
# +
make_super_map_plot(
method=calc_represented_map_generated,
pipeline=pipeline,
model=autoencoder_1,
minmax_container=pipeline,
representations=train_corrected_2D)
make_super_scatter_plot(
method=calc_represented_scatter_generated,
pipeline=pipeline,
model=autoencoder_2,
minmax_container=pipeline,
representations=train_corrected_1D,
max_row_len=4)
# -
make_projected_path_plot(pipeline=pipeline, model=encoder)
make_projected_path_plot(pipeline=pipeline, model=autoencoder_1)
make_representative_path_plot(
const=c,
latent_minmax=get_projected_minimum_and_maximum(pipeline, encoder),
reconstruction_decoder=decoder_2)
make_relative_importance_plot(encoder, c)
train_grid_snapshots, train_labels, train_weights = \
pipeline.prepare_groundTruth(
trainData)
# +
make_single_map_plot(
DimensionalPosition(pipeline.const, 0, 1),
pipeline=pipeline,
method=calc_map_given,
grid_snapshots=train_grid_snapshots,
labels=train_labels,
weights=train_weights)
make_single_map_plot(
DimensionalPosition(pipeline.const, 0, 1),
pipeline=pipeline,
method=calc_map_given_configurational_density,
grid_snapshots=train_grid_snapshots,
weights=train_weights)
# +
make_super_map_plot(
method=calc_map_given_configurational_density,
pipeline=pipeline,
grid_snapshots=train_grid_snapshots,
weights=train_weights)
make_super_map_plot(
method=calc_map_given,
pipeline=pipeline,
grid_snapshots=train_grid_snapshots,
labels=train_labels,
weights=train_weights)
# +
make_single_map_plot(
DimensionalPosition(pipeline.const, 0, 1),
pipeline=pipeline,
method=calc_map_given,
grid_snapshots=train_grid_snapshots,
labels=train_labels,
weights=train_weights,
line_formula=calculate_slope_MCG_BigCage)
make_single_map_plot(
DimensionalPosition(pipeline.const, 6, 1),
pipeline=pipeline,
method=calc_map_given,
grid_snapshots=train_grid_snapshots,
labels=train_labels,
weights=train_weights,
line_formula=calculate_slope_now_BigCage)
# +
make_single_map_plot(
DimensionalPosition(pipeline.const, 0, 1),
pipeline=pipeline,
method=calc_represented_map_generated,
model=autoencoder_1,
minmax_container=pipeline,
representations=train_corrected_2D,
line_formula=calculate_slope_MCG_BigCage)
make_single_map_plot(
DimensionalPosition(pipeline.const, 6, 1),
pipeline=pipeline,
method=calc_represented_map_generated,
model=autoencoder_1,
minmax_container=pipeline,
representations=train_corrected_2D,
line_formula=calculate_slope_now_BigCage)
# +
shooting_points, shooting_labels = read_shooting_points("shooting_data.txt")
shootingData = Dataset(
shooting_points,
shooting_labels,
np.ones(len(shooting_labels)),
flag="Shooting")
shoot_grid_snapshots, shoot_labels, shoot_weights = \
pipeline.prepare_groundTruth(shootingData)
# +
make_single_map_plot(
DimensionalPosition(pipeline.const, 0, 1),
pipeline=pipeline,
method=calc_map_given,
grid_snapshots=shoot_grid_snapshots,
labels=shoot_labels,
weights=shoot_weights,
line_formula=calculate_slope_MCG_BigCage)
make_single_map_plot(
DimensionalPosition(pipeline.const, 6, 1),
pipeline=pipeline,
method=calc_map_given,
grid_snapshots=shoot_grid_snapshots,
labels=shoot_labels,
weights=shoot_weights,
line_formula=calculate_slope_now_BigCage)
# -
make_super_map_plot(
method=calc_map_given,
pipeline=pipeline,
grid_snapshots=shoot_grid_snapshots,
labels=shoot_labels,
weights=shoot_weights)
_, pBs, train_grid_snapshots = pipeline.prepare_dataset_pickle(trainData)
make_input_distribution_plot(train_grid_snapshots, 5, pipeline)
make_histogram_with_broken_axis_plot(pBs, 10, (0, 500), (1000, 250000))
| NucleationModel/NPA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S2: MCMTpy Notebook
#
# In this notebook, we will show some **funcs** in **MCMTpy** for Focal Mechanism Inversion Result, including the **conversion of source parameters**, **plot beachball**, **MT decompose** and **plot Huston**.
#
# Contents:
# * Conversion of source parameters
# * Plot beachball
# * MT decompose
# * Plot Huston
#
#
#
# More details on the descriptions of data processing, parameters can be found in the online [documentations](https://github.com/OUCyf) and our paper.
#
# `MCMTpy: A Python Package for Simultaneous Inversion of Source Location, Focal Mechanism, and Rupture Directivity. In prep for Seismological Research Letter.`
#
#
#
# <NAME>
#
# School of Earth and Space Sciences
#
# University of Science and Technology of China
#
# No.96, JinZhai Road Baohe District, Hefei, Anhui, 230026, P.R.China.
#
# June 2021
# ## Building env for MCMTpy
#
# Before running this notebook, make sure that you have created and activated the conda env made for MCMTpy. If not, you can create one using command lines below ( note that `jupyter` is installed with the command lines here in order to run this notebook).
#
# ```bash
# $ conda create -n MCMTpy python=3.8 numpy=1.16 matplotlib=3.1.1 mpi4py obspy pyasdf json5 tqdm
# $ conda activate MCMTpy
# $ pip install pyfk
# $ pip install MCMTpy
# ```
#
# Then you need to activate this notebook with the newly built MCMTpy env by invoking the jupyter with the following command line.
#
# ```bash
# $ jupyter notebook
# ```
#
# Now we can begin to load the modules needed for this practise.
# - First, let's run the cell below to import packages and the last cell with [helper functions](#helper).
# - Back from Helper Function
# <a id='helper_back'></a>
import os
import sys
import glob
import obspy
import json5
import pyasdf
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from obspy import Stream
from obspy.taup import TauPyModel
from obspy.imaging.mopad_wrapper import beach
from MCMTpy import MomentTensor as MTpy
# ## 1. Conversion of Source Parameters
#
# The **MCMTpy** provides a series of scripts for source parameter calculation and conversion. The MomentTensor script refers to some theories and codes of [Obspy](https://github.com/obspy/obspy)/[MoPaD](https://github.com/geophysics/MoPaD)/Introduction to Seismology (Yongge Wan), mainly including:
#
# 1) The installation
# 2) str_dip_rake to mt
# 3) The conversion between str_dip_rake and A/N vector
# 4) The conversion between A/N vector and P/T/N vector
# 5) mt to P/T/N vector
# 6) P/T/N vector to P/T/N vector's stirke and dip
# 7) Describe_fault_plane with two str_dip_rake
#
# We do not recommend using relative paths because of the possibility of errors. Absolute paths are preferred.
# ### (a). The installation
# +
# import MomentTensor
from MCMTpy import MomentTensor as MTpy
# get __doc__
MTpy.__doc__.strip().split("\n")
# -
# ### (b). str_dip_rake to mt
# +
# function: str_dip_rake2MT
MTpy.str_dip_rake2MT.__doc__.strip().split("\n")
# +
# input
strike = 50
dip = 50
rake = 100
A = MTpy.str_dip_rake2MT(strike,dip,rake)
A.mt
# -
# ### (c). The conversion between str_dip_rake and A/N vector
# +
# function: str_dip_rake2AN
MTpy.str_dip_rake2AN.__doc__.strip().split("\n")
# +
# str_dip_rake to A/N vector
# input
strike = 50
dip = 50
rake = 100
A,N = MTpy.str_dip_rake2AN(strike,dip,rake)
print('A = ', A)
print('N = ', N)
# +
# function AN2str_dip_rake
MTpy.AN2str_dip_rake.__doc__.strip().split("\n")
# +
# A/N vector to str_dip_rake
# input
A = np.array([0.37330426, -0.53992106, -0.75440651])
N = np.array([-0.58682409, 0.49240388, -0.64278761])
DD = MTpy.AN2str_dip_rake(A,N)
print('strike = ', DD.strike)
print('dip = ', DD.dip)
print('rake = ', DD.rake)
# -
# ### (d). The conversion between A/N vector and P/T/N vector
# +
# function AN2TPN
MTpy.AN2TPN.__doc__.strip().split("\n")
# +
# Calculate the T-axis, P-axis and N-axis according to the slip vector (A) and fault plane direction vector (N)
# input
A = np.array([0.37330426, -0.53992106, -0.75440651])
N = np.array([-0.58682409, 0.49240388, -0.64278761])
T,P,Null = MTpy.AN2TPN(A,N)
print('T = ', T)
print('P = ', P)
print('Null = ', Null)
# +
# function TP2AN
MTpy.TP2AN.__doc__.strip().split("\n")
# +
# Calculate the slip vector (A) and fault plane direction vector (N) according to the T-axis and P-axis
# input
T = np.array([-0.15098132, -0.03359972, -0.98796544])
P = np.array([0.67891327, -0.72996397, -0.07892648])
A,N = MTpy.TP2AN(T,P)
print('A = ', A)
print('N = ', N)
# -
# ### (e). mt (in NED system) to P/T/N vector
# +
# function MT2TPN
MTpy.MT2TPN.__doc__.strip().split("\n")
# +
# input
strike = 50
dip = 50
rake = 100
A = MTpy.str_dip_rake2MT(strike,dip,rake)
T, P, Null = MTpy.MT2TPN(A)
print('T = ', T)
print('P = ', P)
print('Null = ', Null)
# -
# ### (f). P/T/N vector to P/T/N vector's stirke and dip
# +
# function vector2str_dip
MTpy.vector2str_dip.__doc__.strip().split("\n")
# +
# input
A = np.array([0.37330426, -0.53992106, -0.75440651])
CC = MTpy.vector2str_dip(A)
print('A.strike = ', CC.strike)
print('A.dip = ', CC.dip)
# -
# ### (g). Describe_fault_plane with two str_dip_rake
# +
# function describe_fault_plane
MTpy.describe_fault_plane.__doc__.strip().split("\n")
# +
# input
strike = 50
dip = 50
rake = 100
A = MTpy.str_dip_rake2MT(strike,dip,rake)
CC = MTpy.describe_fault_plane(A.mt)
print('FM_1 = ', CC[0,:])
print('FM_2 = ', CC[1,:])
# -
# ## 2. Plot beachball
#
# - The **MCMTpy** provides a series of scripts to plot results.
# +
## 0. set path
example_path = '/Users/yf/3.Project/8.MCMTpy/MCMTpy-master/data/example_yunnan'
FM_path=os.path.join(example_path,"YN.202105212148_Inv/dc_inv/Output_YN.202105212148_dc/rank_0_output/chain_0_FM_accept_all")
allfiles_path = os.path.join(example_path,'YN.202105212148_Inv/YN.202105212148_raw/*.SAC')
## 1. read FM
N=3 # Three parameters are required to describe the focal mechanism
FM_all = np.loadtxt(FM_path)
FM_raw = FM_all[0:,1:N+1] # Define the number of solutions you want to plot
strike_np = FM_raw[:,0]
dip_np = FM_raw[:,1]
rake_np = FM_raw[:,2]
FM = np.vstack((strike_np, dip_np, rake_np)).T
FM_mean=np.zeros(shape=(N))
for i in range(0,N,1):
FM_mean[i]=np.mean(FM[0:,i])
## 2.read raw data
data = read_data(allfiles_path)
data.filter('bandpass', freqmin=0.005, freqmax=0.5, corners=4, zerophase=True)
## 3.ray trace with taup
model_path = os.path.join(example_path,"v_model/v_model.npz")
model = TauPyModel(model=model_path) # "iasp91" "prem"
for i in range(0,len(data),1):
depth = data[i].stats.sac['evdp']
distance = data[i].stats.sac['dist']
ray_p,tp,angle_p,ray_s,ts,angle_s = get_taup_tp_ts(model,depth,distance,degree=False)
data[i].stats.sac["user1"]=angle_p
data[i].stats.sac["user2"]=angle_s
## 4.1 plot FM_mean
ax0 = plt.gca()
Length_Ball = 100
beach1 = beach(FM_mean, xy=(50, 50), linewidth=1,width=Length_Ball-1, alpha=1,\
facecolor='g',bgcolor='w', edgecolor='k',mopad_basis='NED',nofill=False,zorder=1 )
ax0.add_collection(beach1)
ax0.set_aspect("equal")
## 4.2 plot FM_all
for i in range(0,FM.shape[0],1):
beach1 = beach(FM[i,:], xy=(50, 50), linewidth=1,width=Length_Ball-1, alpha=1,\
facecolor='b',bgcolor='w', edgecolor='orange',mopad_basis='NED',nofill=True,zorder=1 )
ax0.add_collection(beach1)
ax0.set_aspect("equal")
## 4.3 plot backgroud line
beach1 = beach(FM_mean, xy=(50, 50), linewidth=1,width=Length_Ball-1, alpha=1,\
facecolor='w',bgcolor='w', edgecolor='k',mopad_basis='NED',nofill=True,zorder=1 )
ax0.add_collection(beach1)
ax0.set_aspect("equal")
## 5.plot station and waveform
menthod='schmidt' # 'schmidt' 'wulff'
for i in range(0,len(data),3):
AZM = data[i].stats.sac['az']
TKO = data[i].stats.sac['user1']
net_sta_name = data[i].stats.network+'_'+data[i].stats.station
X, Y = MTpy.project_beachball(AZM, TKO, R=Length_Ball/2, menthod=menthod)
tt=np.linspace(X, X+10, num=len(data[i].data))
ax0.plot(X, Y, "rv", ms=10,zorder=1)
ax0.plot(tt, 5*data[i].data/2000000 + Y, color='black',lw=0.2,alpha=0.6,zorder=1)
ax0.text(X, Y,net_sta_name,horizontalalignment='right', verticalalignment='center',\
fontsize=5, color='black',bbox = dict(facecolor = "r", alpha = 0.0),zorder=1)
## 6. plot P/T/N axis
MT = MTpy.str_dip_rake2MT(strike=FM_mean[0],dip=FM_mean[1],rake=FM_mean[2])
T_axis, P_axis, N_axis = MTpy.MT2TPN(MT)
T = MTpy.vector2str_dip(T_axis)
P = MTpy.vector2str_dip(P_axis)
N = MTpy.vector2str_dip(N_axis)
Tx, Ty = MTpy.project_beachball(AZM=T.strike, TKO=(90-T.dip), R=Length_Ball/2, menthod=menthod)
ax0.text(Tx,Ty,'T',horizontalalignment='center', verticalalignment='center',\
fontsize=20, color='k',alpha=0.7,zorder=1)
Px, Py = MTpy.project_beachball(AZM=P.strike, TKO=(90-P.dip), R=Length_Ball/2, menthod=menthod)
ax0.text(Px,Py,'P',horizontalalignment='center', verticalalignment='center',\
fontsize=20, color='k',alpha=0.7,zorder=1)
Nx, Ny = MTpy.project_beachball(AZM=N.strike, TKO=(90-N.dip), R=Length_Ball/2, menthod=menthod)
ax0.text(Nx,Ny,'N',horizontalalignment='center', verticalalignment='center',\
fontsize=20, color='k',alpha=0.7,zorder=1)
## 7. save figure
ax0.set_xlim(0,100)
ax0.set_ylim(0,100)
ax0.set_axis_off()
figurename=os.path.join('./S2_figure/beachball.pdf')
plt.savefig(figurename,dpi=800, format="pdf")
# -
# ## 3. MT decompose
# +
# [Mxx_np, Myy_np, Mzz_np, Mxy_np, Mxz_np, Myz_np] or [strike,dip,rake]
FM=[150,50,100]
# 分解矩张量
MT = MTpy.MTensor(FM)
Dec = MTpy.Decompose(MT)
Dec.decomposition_iso_DC_CLVD() # Dec.help()
print(Dec.help())
print('\n\n*********************************\n\n')
Dec.print_self()
Mxx = Dec.M_iso[0,0]
Mxy = Dec.M_iso[0,1]
Mxz = Dec.M_iso[0,2]
Myy = Dec.M_iso[1,1]
Myz = Dec.M_iso[1,2]
Mzz = Dec.M_iso[2,2]
FM_iso = np.array((Mxx, Myy, Mzz, Mxy, Mxz, Myz))
Mxx = Dec.M_DC[0,0]
Mxy = Dec.M_DC[0,1]
Mxz = Dec.M_DC[0,2]
Myy = Dec.M_DC[1,1]
Myz = Dec.M_DC[1,2]
Mzz = Dec.M_DC[2,2]
FM_DC = np.array((Mxx, Myy, Mzz, Mxy, Mxz, Myz))
Mxx = Dec.M_CLVD[0,0]
Mxy = Dec.M_CLVD[0,1]
Mxz = Dec.M_CLVD[0,2]
Myy = Dec.M_CLVD[1,1]
Myz = Dec.M_CLVD[1,2]
Mzz = Dec.M_CLVD[2,2]
FM_CLVD = np.array((Mxx, Myy, Mzz, Mxy, Mxz, Myz))
# -
fig = plot_decompose(FM, FM_DC, FM_CLVD, FM_iso)
figurename=os.path.join('./S2_figure/Decompose.pdf')
plt.savefig(figurename,dpi=800, format="pdf")
# ## 4. Plot Huston
# +
## 0. set path
example_path = '/Users/yf/3.Project/8.MCMTpy/MCMTpy-master/data/example_yunnan'
FM_path=os.path.join(example_path,"YN.202105212148_Inv/mt_inv/Output_YN.202105212148_mt/rank_0_output/chain_0_FM_accept_all")
allfiles_path = os.path.join(example_path,'YN.202105212148_Inv/YN.202105212148_raw/*.SAC')
## 1. read FM
N=6
FM_all = np.loadtxt(FM_path)
FM_raw = FM_all[0:,1:N+1]
Mxx_np = FM_raw[:,0]
Mxy_np = FM_raw[:,1]
Mxz_np = FM_raw[:,2]
Myy_np = FM_raw[:,3]
Myz_np = FM_raw[:,4]
Mzz_np = FM_raw[:,5]
FM = np.vstack((Mxx_np, Myy_np, Mzz_np, Mxy_np, Mxz_np, Myz_np)).T
## 2. plot Hudson
fig = plot_Hudson_points(FM)
figurename=os.path.join('./S2_figure/Hudson.pdf')
plt.savefig(figurename,dpi=800, format="pdf")
# -
# ## The end.
#
# We hope you enjoy it!
#
# Most of the core steps of MCMTpy are included here.
# # Helper function
# <a id='helper'></a>
# +
#***************************************************************
#* -----------
#* Functions
#* -----------
#***************************************************************
# The notobook needs some functions, please run it firstly:
#----------------------------------------------------#
#%% 1.read raw data
def read_data(allfiles_path):
allfiles = sorted( glob.glob(allfiles_path) )
data_raw = Stream()
for i in range(0,len(allfiles),1):
try:
tr = obspy.read(allfiles[i])
data_raw += tr
except Exception:
print(allfiles[i],': no such file or obspy read error');continue
data = data_raw.copy()
return data
#----------------------------------------------------#
#%% 2.taup ray trace
def get_taup_tp_ts(model,depth,distance,degree=None):
if degree==False:
distance = distance/111.19
time_p = model.get_travel_times(source_depth_in_km=depth,
distance_in_degree=distance,
phase_list=["p", "P"])
time_s = model.get_travel_times(source_depth_in_km=depth,
distance_in_degree=distance,
phase_list=["s", "S"])
ray_p = time_p[0].ray_param
tp = time_p[0].time
angle_p = time_p[0].incident_angle
ray_s = time_s[0].ray_param
ts = time_s[0].time
angle_s = time_s[0].incident_angle
return ray_p,tp,angle_p,ray_s,ts,angle_s
#----------------------------------------------------#
#%% 3.plot decompose mt
def plot_decompose(FM,FM_DC,FM_CLVD,FM_iso):
MT = MTpy.MTensor(FM)
Dec = MTpy.Decompose(MT)
Dec.decomposition_iso_DC_CLVD()
fig2, ax2 = plt.subplots(1 ,1, dpi=800)
Length_Ball = 100
###### MT
beach1 = beach(FM, xy=(50, 50), linewidth=1,width=Length_Ball-1, alpha=1,\
facecolor='g',bgcolor='w', edgecolor='k',mopad_basis='NED',nofill=False,zorder=1 )
ax2.add_collection(beach1)
ax2.set_aspect("equal")
menthod='schmidt' # 'schmidt' # 'wulff'
T_axis, P_axis, N_axis = MTpy.MT2TPN(MTpy.MTensor(FM))
T = MTpy.vector2str_dip(T_axis)
P = MTpy.vector2str_dip(P_axis)
N = MTpy.vector2str_dip(N_axis)
Tx, Ty = MTpy.project_beachball(AZM=T.strike, TKO=(90-T.dip), R=Length_Ball/2, menthod=menthod)
ax2.text(Tx,Ty,'T',horizontalalignment='center', verticalalignment='center',\
fontsize=10, color='k',alpha=0.7,zorder=1)
Px, Py = MTpy.project_beachball(AZM=P.strike, TKO=(90-P.dip), R=Length_Ball/2, menthod=menthod)
ax2.text(Px,Py,'P',horizontalalignment='center', verticalalignment='center',\
fontsize=10, color='k',alpha=0.7,zorder=1)
Nx, Ny = MTpy.project_beachball(AZM=N.strike, TKO=(90-N.dip), R=Length_Ball/2, menthod=menthod)
ax2.text(Nx,Ny,'N',horizontalalignment='center', verticalalignment='center',\
fontsize=10, color='k',alpha=0.7,zorder=1)
###### DC
beach2 = beach(FM_DC, xy=(200, 50), linewidth=1,width=Length_Ball-1, alpha=1,\
facecolor='g',bgcolor='w', edgecolor='k',mopad_basis='NED',nofill=False,zorder=1 )
ax2.add_collection(beach2)
ax2.set_aspect("equal")
menthod='schmidt' # 'schmidt' # 'wulff'
T_axis, P_axis, N_axis = MTpy.MT2TPN(MTpy.MTensor(FM_DC))
T = MTpy.vector2str_dip(T_axis)
P = MTpy.vector2str_dip(P_axis)
N = MTpy.vector2str_dip(N_axis)
Tx, Ty = MTpy.project_beachball(AZM=T.strike, TKO=(90-T.dip), R=Length_Ball/2, menthod=menthod)
ax2.text(150*1+Tx,Ty,'T',horizontalalignment='center', verticalalignment='center',\
fontsize=10, color='k',alpha=0.7,zorder=1)
Px, Py = MTpy.project_beachball(AZM=P.strike, TKO=(90-P.dip), R=Length_Ball/2, menthod=menthod)
ax2.text(150*1+Px,Py,'P',horizontalalignment='center', verticalalignment='center',\
fontsize=10, color='k',alpha=0.7,zorder=1)
Nx, Ny = MTpy.project_beachball(AZM=N.strike, TKO=(90-N.dip), R=Length_Ball/2, menthod=menthod)
ax2.text(150*1+Nx,Ny,'N',horizontalalignment='center', verticalalignment='center',\
fontsize=10, color='k',alpha=0.7,zorder=1)
####### CLVD
beach3 = beach(FM_CLVD, xy=(350, 50), linewidth=1,width=Length_Ball-1, alpha=1,\
facecolor='g',bgcolor='w', edgecolor='k',mopad_basis='NED',nofill=False,zorder=1 )
ax2.add_collection(beach3)
ax2.set_aspect("equal")
menthod='schmidt' # 'schmidt' # 'wulff'
T_axis, P_axis, N_axis = MTpy.MT2TPN(MTpy.MTensor(FM_CLVD))
T = MTpy.vector2str_dip(T_axis)
P = MTpy.vector2str_dip(P_axis)
N = MTpy.vector2str_dip(N_axis)
Tx, Ty = MTpy.project_beachball(AZM=T.strike, TKO=(90-T.dip), R=Length_Ball/2, menthod=menthod)
ax2.text(150*2+Tx,Ty,'T',horizontalalignment='center', verticalalignment='center',\
fontsize=10, color='k',alpha=0.7,zorder=1)
Px, Py = MTpy.project_beachball(AZM=P.strike, TKO=(90-P.dip), R=Length_Ball/2, menthod=menthod)
ax2.text(150*2+Px,Py,'P',horizontalalignment='center', verticalalignment='center',\
fontsize=10, color='k',alpha=0.7,zorder=1)
Nx, Ny = MTpy.project_beachball(AZM=N.strike, TKO=(90-N.dip), R=Length_Ball/2, menthod=menthod)
ax2.text(150*2+Nx,Ny,'N',horizontalalignment='center', verticalalignment='center',\
fontsize=10, color='k',alpha=0.7,zorder=1)
####### iso
beach4 = beach(FM_iso, xy=(500, 50), linewidth=1,width=Length_Ball-1, alpha=1,\
facecolor='g',bgcolor='w', edgecolor='k',mopad_basis='NED',nofill=False,zorder=1 )
ax2.add_collection(beach4)
ax2.set_aspect("equal")
menthod='schmidt' # 'schmidt' # 'wulff'
T_axis, P_axis, N_axis = MTpy.MT2TPN(MTpy.MTensor(FM_iso))
T = MTpy.vector2str_dip(T_axis)
P = MTpy.vector2str_dip(P_axis)
N = MTpy.vector2str_dip(N_axis)
Tx, Ty = MTpy.project_beachball(AZM=T.strike, TKO=(90-T.dip), R=Length_Ball/2, menthod=menthod)
ax2.text(150*3+Tx,Ty,'T',horizontalalignment='center', verticalalignment='center',\
fontsize=10, color='k',alpha=0.7,zorder=1)
Px, Py = MTpy.project_beachball(AZM=P.strike, TKO=(90-P.dip), R=Length_Ball/2, menthod=menthod)
ax2.text(150*3+Px,Py,'P',horizontalalignment='center', verticalalignment='center',\
fontsize=10, color='k',alpha=0.7,zorder=1)
Nx, Ny = MTpy.project_beachball(AZM=N.strike, TKO=(90-N.dip), R=Length_Ball/2, menthod=menthod)
ax2.text(150*3+Nx,Ny,'N',horizontalalignment='center', verticalalignment='center',\
fontsize=10, color='k',alpha=0.7,zorder=1)
####### plot '+' and '='
ax2.text(125,50,'=',horizontalalignment='center', verticalalignment='center',\
fontsize=20, color='k',alpha=0.7,zorder=1)
ax2.text(275,50,'+',horizontalalignment='center', verticalalignment='center',\
fontsize=20, color='k',alpha=0.7,zorder=1)
ax2.text(425,50,'+',horizontalalignment='center', verticalalignment='center',\
fontsize=20, color='k',alpha=0.7,zorder=1)
####### plot percentage
MT_text = 'MT'
ax2.text(50,-15,MT_text,horizontalalignment='center', verticalalignment='center',\
fontsize=8, color='k',alpha=0.7,zorder=1)
iso_text = 'ISO: '+str(round(Dec.M_iso_percentage,2))+'%'
ax2.text(500,-15,iso_text,horizontalalignment='center', verticalalignment='center',\
fontsize=8, color='k',alpha=0.7,zorder=1)
DC_text = 'DC: '+str(round(Dec.M_DC_percentage,2))+'%'
ax2.text(200,-15,DC_text,horizontalalignment='center', verticalalignment='center',\
fontsize=8, color='k',alpha=0.7,zorder=1)
CLVD_text = 'CLVD: '+str(round(Dec.M_CLVD_percentage,2))+'%'
ax2.text(350,-15,CLVD_text,horizontalalignment='center', verticalalignment='center',\
fontsize=8, color='k',alpha=0.7,zorder=1)
####### set figure
plt.title("Decompose")
ax2.set_xlim(0,550)
ax2.set_ylim(-30,100)
ax2.set_axis_off()
return fig2
#----------------------------------------------------#
#%% 4. plot_Hudson_points
def plot_Hudson_points(FM):
fig1, ax1 = plt.subplots(1 ,1, dpi=800)
plt.rc('font',family='Times New Roman')
MTpy.Hudson_plot(ax=ax1)
for i in range(0,len(FM)):
MT = MTpy.MTensor(FM[i,:])
M=MT.mt
k,T = MTpy.M2kT_space(M)
U,V = MTpy.kT2UV_space(k,T)
map_vir = cm.get_cmap(name='YlGn')
colors = map_vir(i/len(FM))
ax1.scatter(U,V, color=colors,marker='o', s=5)
position=fig1.add_axes([0.85, 0.15, 0.01, 0.5])
font_colorbar = {'family' : 'Times New Roman',
'color' : 'black',
'weight' : 'normal',
'size' : 6,
}
sm = cm.ScalarMappable(cmap=map_vir)
sm.set_array(np.arange(0,len(FM)+1))
cb=plt.colorbar(sm,cax=position,orientation='vertical')
cb.set_label('Sample',fontdict=font_colorbar)
ax1.set_xlim(-4/3-0.1, 4/3+0.3)
ax1.set_ylim(-1-0.1, 1+0.1)
## 2. save figure
#plt.title("hudson_plot")
# figurename=os.path.join('./Hudson_plot.pdf')
# plt.savefig(figurename,dpi=800, format="pdf")
return fig1
# -
# -[Turn back!](#helper_back)
| data/example_yunnan/Jupyter_notebook/S2_decompose.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <b>Considere as matrizes abaixo: </b>
# $A = \left [ \begin{array}{rcr}
# 3 & 0 \\
# -1 & 2 \\
# 1 & 1
# \end{array} \right ],$
# $B = \left [ \begin{array}{rcr}
# 4 & -1 \\
# 0 & 2
# \end{array} \right ],$
# $C = \left [ \begin{array}{rcr}
# 1 & 4 & 2 \\
# 3 & 1 & 5
# \end{array} \right ]$
# $D = \left [ \begin{array}{rcr}
# 1 & 5 & 2 \\
# -1 & 0 & 1 \\
# 3 & 2 & 4
# \end{array} \right ]$
# $E = \left [ \begin{array}{rcr}
# 6 & 1 & 3 \\
# -1 & 1 & 2 \\
# 4 & 1 & 3
# \end{array} \right ]$
#
# <b>E) $CC^t$</b>
# <b>Determinando $C^t$</b>
# $C^t = \left [ \begin{array}{rcr}
# 1 & 3 \\
# 4 & 1 \\
# 2 & 5
# \end{array} \right ]$
# <b>Determinando $CC^t$</b>
# $CC^t = \left [ \begin{array}{rcr}
# 1 \cdot 1 + 4 \cdot 4 + 2 \cdot 2 & 1 \cdot 3 + 4 \cdot 1 + 2 \cdot 5 \\
# 3 \cdot 1 + 1 \cdot 4 + 5 \cdot 2 & 3 \cdot 3 + 1 \cdot 1 + 5 \cdot 5
# \end{array} \right ]$
# $CC^t = \left [ \begin{array}{rcr}
# 21 & 17 \\
# 17 & 35
# \end{array} \right ]$
| Lista 01/E.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Add Bayesian-and-novelty directory to the PYTHONPATH
import sys
import os
sys.path.append(os.path.realpath('../../..'))
# Autoreload changes in utils, etc.
# %load_ext autoreload
# %autoreload 2
from novelty.utils.metrics import plot_roc, plot_prc
from novelty.utils.metrics import get_summary_statistics
from novelty.utils.metrics import html_summary_table
# +
# Training settings
BATCH_SIZE = 128
EPOCHS = 200
LR = 0.1
MOMENTUM = 0.9
NO_CUDA = False
SEED = 1
CLASSES = 10
MODEL_PATH_ROOT = './weights/wrn-28-10-mnist10'
MODEL_PATH = MODEL_PATH_ROOT + '.pth'
# MNIST mean and stdevs of training data by channel
CHANNEL_MEANS = (33.791240975260735/255,)
CHANNEL_STDS = (79.17246803641319/255,)
# Plot ROC and PR curves
PLOT_CHARTS = True
# ODIN parameters
TEMP = 1000.
NOISE_MAGNITUDE = 0.0012
# -
# ## Training and Testing functions
# +
from novelty.utils import Progbar
def train(model, device, train_loader, optimizer, epoch):
progbar = Progbar(target=len(train_loader.dataset))
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = F.log_softmax(model(data), dim=1)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
progbar.add(len(data), [("loss", loss.item())])
# -
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = F.log_softmax(model(data), dim=1)
# sum up batch loss
test_loss += F.nll_loss(output, target, size_average=False).item()
# get the index of the max log-probability
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
test_acc = 100. * correct / len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), test_acc))
return test_loss, test_acc
# ## Initialize model and load MNIST
# +
from novelty.utils import DATA_DIR
from src.wide_resnet import Wide_ResNet
torch.manual_seed(SEED)
use_cuda = not NO_CUDA and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# Dataset transformation
transform = transforms.Compose([
transforms.Resize(32), # Resize to work with WRN
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS),
transforms.Lambda(lambda x: x.expand(3, -1, -1)) # Expand to 3 channels to work with WRN model
])
# Load training and test sets
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(DATA_DIR, 'mnist'), train=True, transform=transform, download=True),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(DATA_DIR, 'mnist'), train=False, transform=transform, download=True),
batch_size=BATCH_SIZE, shuffle=False, **kwargs)
# Create model instance
model = Wide_ResNet(28, 10, 0.0, CLASSES)
model = model.to(device)
# Initialize optimizer
optimizer = optim.SGD(model.parameters(), lr=LR, momentum=MOMENTUM)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(EPOCHS*0.5), int(EPOCHS*0.75)], gamma=0.1)
# -
# ## Optimization loop
# +
if os.path.exists(MODEL_PATH):
# load previously trained model:
model.load_state_dict(torch.load(MODEL_PATH))
else:
best_loss = float("inf")
# Training loop
for epoch in range(EPOCHS):
print("Epoch:", epoch)
scheduler.step()
# Print the learning rate
for param_group in optimizer.param_groups:
print('Learning rate:', param_group['lr'])
train(model, device, train_loader, optimizer, epoch)
loss, acc = test(model, device, test_loader)
# Checkpoint the model parameters
if loss < best_loss:
torch.save(model.state_dict(), "{}_epoch{}.pth".format(MODEL_PATH_ROOT, epoch))
best_loss = loss
# save the model
torch.save(model.state_dict(), MODEL_PATH)
# -
# ## ODIN prediction functions
# +
from torch.autograd import Variable
def predict(model, data, device):
model.eval()
data = data.to(device)
outputs = model(data)
outputs = outputs - outputs.max(1)[0].unsqueeze(1) # For stability
return F.softmax(outputs, dim=1)
def predict_temp(model, data, device, temp=1000.):
model.eval()
data = data.to(device)
outputs = model(data)
outputs /= temp
outputs = outputs - outputs.max(1)[0].unsqueeze(1) # For stability
return F.softmax(outputs, dim=1)
def predict_novelty(model, data, device, temp=1000., noiseMagnitude=0.0012):
model.eval()
# Create a variable so we can get the gradients on the input
inputs = Variable(data.to(device), requires_grad=True)
# Get the predicted labels
outputs = model(inputs)
outputs = outputs / temp
outputs = F.log_softmax(outputs, dim=1)
# Calculate the perturbation to add to the input
maxIndexTemp = torch.argmax(outputs, dim=1)
labels = Variable(maxIndexTemp).to(device)
loss = F.nll_loss(outputs, labels)
loss.backward()
# Normalizing the gradient to binary in {0, 1}
gradient = torch.ge(inputs.grad.data, 0)
gradient = (gradient.float() - 0.5) * 2
# Normalize the gradient to the same space of image
for channel, (mean, std) in enumerate(zip(CHANNEL_MEANS, CHANNEL_STDS)):
gradient[0][channel] = (gradient[0][channel] - mean) / std
# Add small perturbations to image
# TODO, this is from the released code, but disagrees with paper I think
tempInputs = torch.add(inputs.data, -noiseMagnitude, gradient)
# Get new outputs after perturbations
outputs = model(Variable(tempInputs))
outputs = outputs / temp
outputs = outputs - outputs.max(1)[0].unsqueeze(1) # For stability
outputs = F.softmax(outputs, dim=1)
return outputs
# -
# ## Evaluate method on outlier datasets
# +
def get_max_model_outputs(data_loader, device):
"""Get the max softmax output from the model in a Python array.
data_loader: object
A pytorch dataloader with the data you want to calculate values for.
device: object
The CUDA device handle.
"""
result = []
for data, target in data_loader:
# Using regular model
p = predict(model, data, device)
max_val, label = torch.max(p, dim=1)
# Convert torch tensors to python list
max_val = list(max_val.cpu().detach().numpy())
result += max_val
return result
def get_max_odin_outputs(data_loader, device, temp=1000., noiseMagnitude=0.0012):
"""Convenience function to get the max softmax values from the ODIN model in a Python array.
data_loader: object
A pytorch dataloader with the data you want to calculate values for.
device: object
The CUDA device handle.
temp: float, optional (default=1000.)
The temp the model should use to do temperature scaling on the softmax outputs.
noiseMagnitude: float, optional (default=0.0012)
The epsilon value used to scale the input images according to the ODIN paper.
"""
result = []
for data, target in data_loader:
# Using ODIN model
p = predict_novelty(model, data, device, temp=temp, noiseMagnitude=noiseMagnitude)
max_val, label = torch.max(p, dim=1)
# Convert torch tensors to python list
max_val = list(max_val.cpu().detach().numpy())
result += max_val
return result
# +
import pandas as pd
# Build results table
results = {
"WRN-28-10": {
"inlier_name": "MNIST10",
"outliers": {}
},
"WRN-28-10-ODIN": {
"inlier_name": "MNIST10",
"outliers": {}
}
}
df = pd.DataFrame(columns=['auroc', 'aupr_in', 'aupr_out', 'fpr_at_95_tpr', 'detection_error'],
index=['letters', 'rot90', 'gaussian', 'uniform', 'not_mnist'])
# -
# ### Process Inliers
# +
num_inliers = test_loader.dataset.test_data.shape[0]
# Get predictions on in-distribution images
mnist_model_maximums = get_max_model_outputs(test_loader, device)
mnist_odin_maximums = get_max_odin_outputs(test_loader, device, temp=TEMP, noiseMagnitude=NOISE_MAGNITUDE)
# -
# ### Fashion MNIST
# +
directory = os.path.join(DATA_DIR, 'fashion_mnist')
# Dataset transformation
transform = transforms.Compose([
transforms.Resize(32), # Resize to work with WRN
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS),
transforms.Lambda(lambda x: x.expand(3, -1, -1)) # Expand to 3 channels to work with WRN model
])
# Load the dataset
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
fashion_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST(directory, train=False, transform=transform, download=True),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
num_fashion = len(fashion_loader.dataset)
# Get predictions on in-distribution images
fashion_model_maximums = get_max_model_outputs(fashion_loader, device)
fashion_odin_maximums = get_max_odin_outputs(fashion_loader, device, temp=TEMP, noiseMagnitude=NOISE_MAGNITUDE)
# +
labels = [1] * num_inliers + [0] * num_fashion
predictions = mnist_model_maximums + fashion_model_maximums
predictions_odin = mnist_odin_maximums + fashion_odin_maximums
results["WRN-28-10"]["outliers"]["Fashion"] = get_summary_statistics(predictions, labels)
results["WRN-28-10-ODIN"]["outliers"]["Fashion"] = get_summary_statistics(predictions_odin, labels)
df.loc['fashion'] = pd.Series(results["WRN-28-10-ODIN"]["outliers"]["Fashion"])
if PLOT_CHARTS:
plot_roc(predictions, labels, title="Softmax Thresholding ROC Curve")
plot_roc(predictions_odin, labels, title="ODIN ROC Curve")
# plot_prc(predictions, labels, title="Softmax Thresholding PRC Curve")
# plot_prc(predictions_odin, labels, title="ODIN PRC Curve")
# -
# ### EMNIST Letters
# +
directory = os.path.join(DATA_DIR, 'emnist')
# Dataset transformation
transform = transforms.Compose([
transforms.Resize(32), # Resize to work with WRN
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS),
transforms.Lambda(lambda x: x.expand(3, -1, -1)) # Expand to 3 channels to work with WRN model
])
# Load the dataset
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
emnist_loader = torch.utils.data.DataLoader(
datasets.EMNIST(directory, "letters", train=False, transform=transform, download=True),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
num_emnist = len(emnist_loader.dataset)
# Get predictions on in-distribution images
emnist_model_maximums = get_max_model_outputs(emnist_loader, device)
emnist_odin_maximums = get_max_odin_outputs(emnist_loader, device, temp=TEMP, noiseMagnitude=NOISE_MAGNITUDE)
# +
labels = [1] * num_inliers + [0] * num_emnist
predictions = mnist_model_maximums + emnist_model_maximums
predictions_odin = mnist_odin_maximums + emnist_odin_maximums
results["WRN-28-10"]["outliers"]["EMNIST"] = get_summary_statistics(predictions, labels)
results["WRN-28-10-ODIN"]["outliers"]["EMNIST"] = get_summary_statistics(predictions_odin, labels)
df.loc['letters'] = pd.Series(results["WRN-28-10-ODIN"]["outliers"]["EMNIST"])
if PLOT_CHARTS:
plot_roc(predictions, labels, title="Softmax Thresholding ROC Curve")
plot_roc(predictions_odin, labels, title="ODIN ROC Curve")
# plot_prc(predictions, labels, title="Softmax Thresholding PRC Curve")
# plot_prc(predictions_odin, labels, title="ODIN PRC Curve")
# -
# ### Not MNIST
# +
directory = os.path.join(DATA_DIR, 'notmnist/notMNIST_small')
# Dataset transformation
transform = transforms.Compose([
transforms.Grayscale(),
transforms.Resize(32), # Resize to work with WRN
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS),
transforms.Lambda(lambda x: x.expand(3, -1, -1)) # Expand to 3 channels to work with WRN model
])
# Load the dataset
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
notmnist_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(directory, transform=transform),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
num_notmnist = len(notmnist_loader.dataset)
# Get predictions on in-distribution images
notmnist_model_maximums = get_max_model_outputs(notmnist_loader, device)
notmnist_odin_maximums = get_max_odin_outputs(notmnist_loader, device, temp=TEMP, noiseMagnitude=NOISE_MAGNITUDE)
# +
labels = [1] * num_inliers + [0] * num_notmnist
predictions = mnist_model_maximums + notmnist_model_maximums
predictions_odin = mnist_odin_maximums + notmnist_odin_maximums
results["WRN-28-10"]["outliers"]["Not MNIST"] = get_summary_statistics(predictions, labels)
results["WRN-28-10-ODIN"]["outliers"]["Not MNIST"] = get_summary_statistics(predictions_odin, labels)
df.loc['not_mnist'] = pd.Series(results["WRN-28-10-ODIN"]["outliers"]["Not MNIST"])
if PLOT_CHARTS:
plot_roc(predictions, labels, title="Softmax Thresholding ROC Curve")
plot_roc(predictions_odin, labels, title="ODIN ROC Curve")
# plot_prc(predictions, labels, title="Softmax Thresholding PRC Curve")
# plot_prc(predictions_odin, labels, title="ODIN PRC Curve")
# -
# ### Rotated 90 MNIST
# +
directory = os.path.join(DATA_DIR, 'mnist')
# Dataset transformation
transform = transforms.Compose([
transforms.Lambda(lambda image: image.rotate(90)),
transforms.Resize(32), # Resize to work with WRN
transforms.ToTensor(),
transforms.Normalize(CHANNEL_MEANS, CHANNEL_STDS),
transforms.Lambda(lambda x: x.expand(3, -1, -1)) # Expand to 3 channels to work with WRN model
])
# Load the dataset
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
rot90_loader = torch.utils.data.DataLoader(
datasets.MNIST(directory, train=False, transform=transform, download=True),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
num_rot90 = len(rot90_loader.dataset)
# Get predictions on in-distribution images
rot90_model_maximums = get_max_model_outputs(rot90_loader, device)
rot90_odin_maximums = get_max_odin_outputs(rot90_loader, device, temp=TEMP, noiseMagnitude=NOISE_MAGNITUDE)
# +
labels = [1] * num_inliers + [0] * num_rot90
predictions = mnist_model_maximums + rot90_model_maximums
predictions_odin = mnist_odin_maximums + rot90_odin_maximums
results["WRN-28-10"]["outliers"]["Rot 90"] = get_summary_statistics(predictions, labels)
results["WRN-28-10-ODIN"]["outliers"]["Rot 90"] = get_summary_statistics(predictions_odin, labels)
df.loc['rot90'] = pd.Series(results["WRN-28-10-ODIN"]["outliers"]["Rot 90"])
if PLOT_CHARTS:
plot_roc(predictions, labels, title="Softmax Thresholding ROC Curve")
plot_roc(predictions_odin, labels, title="ODIN ROC Curve")
# plot_prc(predictions, labels, title="Softmax Thresholding PRC Curve")
# plot_prc(predictions_odin, labels, title="ODIN PRC Curve")
# -
# ### Gaussian Noise Dataset
# +
from novelty.utils.datasets import GaussianNoiseDataset
gaussian_transform = transforms.Compose([
#TODO clip to [0,1] range
transforms.ToTensor()
])
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
gaussian_loader = torch.utils.data.DataLoader(
GaussianNoiseDataset((10000, 32, 32, 3), mean=0., std=1., transform=gaussian_transform),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
num_gaussian = len(gaussian_loader.dataset)
# Get predictions on in-distribution images
gaussian_model_maximums = get_max_model_outputs(gaussian_loader, device)
gaussian_odin_maximums = get_max_odin_outputs(
gaussian_loader, device, temp=TEMP, noiseMagnitude=NOISE_MAGNITUDE)
# +
labels = [1] * num_inliers + [0] * num_gaussian
predictions = mnist_model_maximums + gaussian_model_maximums
predictions_odin = mnist_odin_maximums + gaussian_odin_maximums
results["WRN-28-10"]["outliers"]["Gaussian Noise"] = get_summary_statistics(predictions, labels)
results["WRN-28-10-ODIN"]["outliers"]["Gaussian Noise"] = get_summary_statistics(predictions_odin, labels)
df.loc['gaussian'] = pd.Series(results["WRN-28-10-ODIN"]["outliers"]["Gaussian Noise"])
if PLOT_CHARTS:
plot_roc(predictions, labels, title="Softmax Thresholding ROC Curve")
plot_roc(predictions_odin, labels, title="ODIN ROC Curve")
# plot_prc(predictions, labels, title="Softmax Thresholding PRC Curve")
# plot_prc(predictions_odin, labels, title="ODIN PRC Curve")
# -
# ### Uniform Noise Dataset
# +
from novelty.utils.datasets import UniformNoiseDataset
import math
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
uniform_loader = torch.utils.data.DataLoader(
UniformNoiseDataset((10000, 32, 32, 3), low=-math.sqrt(3.), high=math.sqrt(3.), transform=transforms.ToTensor()),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
num_uniform = len(uniform_loader.dataset)
# Get predictions on in-distribution images
uniform_model_maximums = get_max_model_outputs(uniform_loader, device)
uniform_odin_maximums = get_max_odin_outputs(
uniform_loader, device, temp=TEMP, noiseMagnitude=NOISE_MAGNITUDE)
# +
labels = [1] * num_inliers + [0] * num_uniform
predictions = mnist_model_maximums + uniform_model_maximums
predictions_odin = mnist_odin_maximums + uniform_odin_maximums
results["WRN-28-10"]["outliers"]["Uniform Noise"] = get_summary_statistics(predictions, labels)
results["WRN-28-10-ODIN"]["outliers"]["Uniform Noise"] = get_summary_statistics(predictions_odin, labels)
df.loc['uniform'] = pd.Series(results["WRN-28-10-ODIN"]["outliers"]["Uniform Noise"])
if PLOT_CHARTS:
plot_roc(predictions, labels, title="Softmax Thresholding ROC Curve")
plot_roc(predictions_odin, labels, title="ODIN ROC Curve")
# plot_prc(predictions, labels, title="Softmax Thresholding PRC Curve")
# plot_prc(predictions_odin, labels, title="ODIN PRC Curve")
# -
df.to_pickle('./mnist10_wrn28_10_odin.pkl')
df
# # Show Results Table
# +
from IPython.display import HTML, display
table = html_summary_table(results)
display(HTML(table))
# -
| notebooks/perturbation_temp_scaling_liang2018/_old_/experiments-mnist10_wrn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # EDA : Distributions
import pandas as pd
import os
# +
raw_data_path = os.path.join(os.pardir,"data","raw")
train_csv = os.path.join(raw_data_path, "train.csv")
df =pd.read_csv(train_csv)
# -
df.head()
# #### Histograms
# %matplotlib inline
df.Age.plot(kind="hist", title="The gae of the passengers", color='c', bins=30)
# ### KDE PLOT
df.Age.plot(kind='kde', title='Kernel Density Estimation', color = 'c')
# +
### for Fare
df.Fare.plot(kind="kde", title='Fare', color='b')
# -
# ### Skewness
# +
print("Age skweness : {0:.2f}".format(df.Age.skew()))
print("Fare skweness : ", df.Fare.skew())
# -
# ## Scatter Plot - Bivariate
df.plot.scatter(x="Age", y= "Fare" ,color='c', title="Scatter Plot: Age vs Fare", alpha= 0.15)
# +
## sactter plot Fare and Class
df.plot.scatter(x = 'Pclass' , y='Fare' , color ='r' , title="Scatter Plot: Avg vs Fare", alpha=0.15)
# -
# ## EDA: Grouping and Aggregations
df.groupby(["Sex"]).Age.median()
df.groupby('Fare').Fare.median()
df.groupby(['Pclass'])['Fare', 'Age'].median()
df.groupby(['Pclass','Sex']).Age.median()
# #### Aggregations
df.groupby("Pclass").agg({"Fare":'mean', 'Age':'median'})
import numpy as np
aggregations = {
"Fare":{
"mean_fare":'mean',
'median_fare':'median',
'max_fare':max,
'min_far':np.min
},
"Age": {
"mean_age":'mean',
'median_age':'median',
'max_age':max,
'range_age': lambda x: max(x) - min(x)
}
}
df.groupby(['Pclass','Sex','Embarked']).agg(aggregations)
# ### Cross Tab
pd.crosstab(df.Pclass, df.Sex)
## Plot
pd.crosstab( df.Sex, df.Pclass).plot(kind="bar")
# ### Pivot Table
df.pivot_table(index= 'Sex', columns = 'Pclass', values= 'Age', aggfunc ='mean')
# +
## another way for the same resut - unstack
# -
df.groupby(['Sex', 'Pclass']).Age.mean().unstack()
| notebooks/EDA -Distributions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 4
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path="https://raw.githubusercontent.com/Anasuya-Sahoo/DMDW-Lab/main/Toyota.csv"
data=pd.read_csv(path)
data.shape
data.head()
plt.scatter(data['Age'],data['Price'],c='crimson')
plt.xlabel('Age(months)')
plt.ylabel('Price)(Euros)')
plt.show()
plt.hist(data['KM'])
plt.show()
plt.hist(data['KM'],
#color ='green',
edgecolor ='orange',
bins = 5)
plt.title('Histograms of kilometers')
plt.xlabel('kilometer')
plt.ylabel('Frequency')
plt.show()
counts = [979, 120, 12]
fuelType = ('Petrol', 'Diesel', 'CNG')
index = np.arange(len(fuelType))
plt.bar(index, counts, color=['red', 'blue', 'cyan'])
plt.title('Bar plot of fuel types')
plt.xlabel('Fuel Types')
plt.ylabel('Frequency')
plt.xticks(index, fuelType, rotation = 90)
plt.show()
import seaborn as sns
sns.set(style="darkgrid")
sns.regplot(x=data['Age'],y=data['Price'])
sns.distplot(data['Age'],kde=False,bins=5)
sns.countplot(x="FuelType", data=data)
sns.countplot(x="FuelType", data=data, hue = "Automatic")
#box plot diagram
sns.boxplot(y=data["Price"])
#muliple data representation on box plot
sns.boxplot(x=data['FuelType'], y=data["Price"])
sns.boxplot(x = "FuelType", y=data["Price"], hue = "Automatic",data=data)
f,(ax_box, ax_hist)=plt.subplots(2, gridspec_kw={"height_ratios":(.20,.80)})
| 18cse002-Assignment 4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Insights of Exploratory Data Analysis from [2019 Stack Overflow Developer Survey](https://insights.stackoverflow.com/survey).
# There are six sections in this survey.
# * Basic Information
# * Education, Work, and Career
# * Technology and Tech Culture
# * Stack Overflow Usage + Community
# * Demographic Information
# * Survey Review
# ### Major Python resources used for this analysis
# * [MatplotLib](https://matplotlib.org/)
# * [Pandas](https://pandas.pydata.org/)
# * [Seaborn](https://seaborn.pydata.org/)
# + pycharm={"is_executing": false, "name": "#%%\n"}
import matplotlib.pyplot as plt # for plotting graphs
# import required python libraries
import pandas as pd
import seaborn as sns
# this is used to plot on Juputer notebook console, if you want to have plots as separate window,
# please use plt.show()
# %matplotlib inline
# load main data set in to Pandas dataframe
df = pd.read_csv('./data/survey_results_public-2019.csv') # main Pandas Df
# load schema data set in to Pandas DF
schema_df = pd.read_csv('./data/survey_results_schema-2019.csv')
# preview head of the dataset
df.head()
# + pycharm={"is_executing": false, "name": "#%%\n"}
# schema is discribed, what are available features in this data set with the feature definitions.
schema_df.values
# -
# ### Questions that are going to be discused in this analysis
# * What is the overall job satisfaction in the IT industry?
# * What are gender and age differences in job satisfaction?
# * How does education relate to job satisfaction?
# * How does income level affect job satisfaction?
# + pycharm={"is_executing": false, "name": "#%%\n"}
"""
Pandas value_counts is an inbuilt pandas function that returns an object containing counts of
unique values in sorted order.
"""
df['JobSat'].value_counts()
# + pycharm={"is_executing": false, "name": "#%%\n"}
# setting the font scale for matplot graphs
sns.set(font_scale=1)
# Exploratory visualization for `JobSat` colunm, using summetion of each qunique values in `JobSat`. i.e JobSat level
df['JobSat'].value_counts().plot.bar(figsize=(12, 6))
# matplotlib decorations for the graph
plt.title('Job Satisfaction Summary by Satisfaction Level') # titile
plt.xlabel('JobSat Level') # X - axis
plt.ylabel('Number of Respondents') # Y -axis
# -
# A total of 24207 of the employees were slightly satisfied, and another 22452 were very satisfied. Added together, these two groups formed 46659 of the samples. Slightly dissatisfied and very dissatisfied employees were only 10752 and 4857 respectively. It can conclude that most of the employees are satisfied with their job among all respondents.
# + pycharm={"is_executing": false, "name": "#%%\n"}
# formating and grouping `Gender` with more readable
df.loc[(df['Gender'] !='Man') & (df['Gender'] !='Woman'), 'Gender'] = 'Other'
df.loc[ df['Gender'] =='Man', 'Gender'] = 'Male'
df.loc[ df['Gender'] =='Woman', 'Gender'] = 'Female'
# + pycharm={"is_executing": false, "name": "#%%\n"}
# remoev missing values for `JobSat` column
df['JobSat'].dropna(axis=0)
# + pycharm={"is_executing": false, "name": "#%%\n"}
# select values for Job Satisfaction
job_sat_vals = ['Slightly satisfied','Very satisfied']
# select values for Job Dissatisfaction
job_dis_sat_vals = ['Slightly dissatisfied','Very dissatisfied']
# + pycharm={"is_executing": false, "name": "#%%\n"}
# create a Pandas DF for most job satisfaction respondents
sat_job_respondents_df = df.copy()[(df['JobSat'] == job_sat_vals[0] ) | (df['JobSat'] == job_sat_vals[1])]
# create a Pandas DF fot least job satisfaction respondents
dis_sat_job_respondents_df = df.copy()[(df['JobSat'] == job_dis_sat_vals[0] ) | (df['JobSat']== job_dis_sat_vals[1])]
# + pycharm={"is_executing": false, "name": "#%%\n"}
# assign a common value for `JobSat` in satisfaction respondent DF
sat_job_respondents_df.loc[:, 'JobSat'] = 'Satisfied'
# assign a common value for `JobSat` in dissatisfaction respondent DF
dis_sat_job_respondents_df.loc[:, 'JobSat'] = 'Dissatisfied'
# + pycharm={"is_executing": false, "name": "#%%\n"}
# merge sat and dis sat dfs as one df using Pandas concat function
job_sat_respondents_df = pd.concat([sat_job_respondents_df, dis_sat_job_respondents_df], axis=0)
# -
# ### Question 01
# **Question 01:** What is the overall job satisfaction in the IT industry?
# + pycharm={"is_executing": false, "name": "#%%\n"}
# setting the font scale for matplot graphs
sns.set(font_scale=1)
# draw a pie chart for `Overall Job Satisfaction` with common `JobSat` values. i.e Satisfied and Dissatisfied
job_sat_respondents_df['JobSat'].value_counts().plot(kind='pie', autopct='%1.2f%%',figsize=(12, 6))
# set the title of the pie chart
plt.title('Overall Job Satisfaction Summary')
# -
# **Overall, 74.93 percent of employees are satisfied, whereas 25.07 percent are dissatisfied**
# + pycharm={"is_executing": false, "name": "#%%\n"}
def merge_sat_and_dsat_by_cols(col):
"""
INPUT:
the column name that is joined two dataframes as an one dataframe.
OUTPUT:
merge df by using selected column
"""
sat_job_df = sat_job_respondents_df.groupby([col]).size().sort_values(ascending=False)\
.reset_index(name='SatCount')
dis_sat_job_df = dis_sat_job_respondents_df.groupby([col]).size().sort_values(ascending=False)\
.reset_index(name='DisSatCount')
return pd.merge(sat_job_df, dis_sat_job_df , on=col)
# + pycharm={"is_executing": false, "name": "#%%\n"}
def calculate_sat_and_dsat_pecentage(merged_df):
"""
INPUT:
merged pandas Df, that is required to append new columns `SatPercentage`,`DisSatPercentage`, and `TotalPercentage`
i.e TotalPercentage = (SatCount+DisSatCount)/ (SatCount.Sum + DisSatCount.Sum) * 100
OUTPUT:
A Pandas Df, that is inclued with new columns with percentages
"""
# add `SatPercentage` to merged DF
merged_df['SatPercentage'] = merged_df['SatCount'] / (merged_df['SatCount'] + merged_df['DisSatCount'])
# format `SatPercentage` values in DF
merged_df['SatPercentage'] = pd.Series(["{0:.2f}%".format(val * 100) for val in merged_df['SatPercentage']],
index = merged_df.index)
# add `DisSatPercentage` to merged DF
merged_df['DisSatPercentage'] = merged_df['DisSatCount'] / (merged_df['SatCount']+merged_df['DisSatCount'])
# format `DisSatPercentage`
merged_df['DisSatPercentage'] = pd.Series(["{0:.2f}%".format(val * 100) for val in merged_df['DisSatPercentage']],
index = merged_df.index)
# total column for each row
total = merged_df['SatCount'] + merged_df['DisSatCount']
# summention of the all rows
total_sum = merged_df['SatCount'].sum() + merged_df['DisSatCount'].sum()
merged_df['Total'] = total
# add `TotalPercentage` to merged DF
merged_df['TotalPercentage'] = total/total_sum
# format `TotalPercentage`
merged_df['TotalPercentage'] = pd.Series(["{0:.2f}%".format(val * 100) for val in merged_df['TotalPercentage']],
index = merged_df.index)
# sort and return merged DF
return merged_df.sort_values(by='SatPercentage', ascending=False).reset_index(drop=True)
# -
# ### Question 2
# **Question 2:** What are gender and age differences in job satisfaction?
# #### Gender of Employees by Job Satisfaction
# + pycharm={"is_executing": false, "name": "#%%\n"}
# create a merged DF using `Gender` column
merged_gender_df = merge_sat_and_dsat_by_cols('Gender')
# + pycharm={"is_executing": false, "name": "#%%\n"}
# calculate row percentages and all column percentages for merged_gender_df
calculate_sat_and_dsat_pecentage(merged_gender_df)
# -
# **Concerning gender, about 88.88 percent were males whereas 6.99. percent were females. The level of job satisfaction by gender did not show much difference with more than 74 percent of both males and females were satisfied with their job.**
# #### Age Group of Employees by Job Satisfaction
# + pycharm={"is_executing": false, "name": "#%%\n"}
# age group column name
age_group_col_name = 'AgeGroup(Years)'
# age groups labels
age_group_labels = ['15-24', '25-34','35-44','45-54','55-64']
# age group inbouds and out bounds
age_group_bins = [14, 24, 34, 44, 54, 64]
# add new column `AgeGroup(Years)` to sat_job_respondents_df
sat_job_respondents_df.loc[:,age_group_col_name] = pd.cut(sat_job_respondents_df['Age'],
bins=age_group_bins, labels=age_group_labels)
# add new column `AgeGroup(Years)` to dis_sat_job_respondents_df
dis_sat_job_respondents_df.loc[:,age_group_col_name] = pd.cut(dis_sat_job_respondents_df['Age'],
bins=age_group_bins,labels=age_group_labels)
# + pycharm={"is_executing": false, "name": "#%%\n"}
# create a merged df using `AgeGroup(Years)` column
merged_age_group_df = merge_sat_and_dsat_by_cols(age_group_col_name)
# + pycharm={"is_executing": false, "name": "#%%\n"}
# calculate row percentages and all column percentages for merged_age_group_df
calculate_sat_and_dsat_pecentage(merged_age_group_df)
# -
# **The largest age group of the employees amounting to 52.91 percent was in the age group of between 25 and 34 years. The smallest age group was 55 years or above, with about 1.90 percent of the total. The finding reflects that the employees in the IT industry were relatively young. The young group was less satisfied, as opposed to the older satisfied group. There was no much job level satisfaction difference among employees who were in age groups 25–34, 35–44, and 45–54.**
# + pycharm={"is_executing": false, "name": "#%%\n"}
# create matplotlib subplot for satisfaied and dissatisfied age groups
# row count =1
# column count = 2
# with squire figures
fig ,axs = plt.subplots(1,2, figsize=(15, 15))
# common configurations for subplots in matplotlib
# https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.05, top=0.9,hspace=0.4, wspace=0.3)
# draw a plot for satisfaied respondents
sat_job_respondents_df[age_group_col_name].value_counts().plot(kind='pie', ax=axs.flatten()[0], autopct='%1.2f%%')
# set the satisfied job respondent's pie chart title
axs[0].set_title('Job Satisfaction By Age Groups')
# draw a plot for dissatisfied respondents
dis_sat_job_respondents_df[age_group_col_name].value_counts().plot(kind='pie', ax=axs.flatten()[1], autopct='%1.2f%%')
# set the dissatisfied job respondent's pie chart title
axs[1].set_title('Job Dissatisfaction By Age Groups')
# -
# ### Question 3
# **Question 3:** How does education relate to job satisfaction?
# **Employees who had a higher level of education are more satisfied**
# + pycharm={"is_executing": false, "name": "#%%\n"}
# create a merged df for `EdLevel` column
merged_edl_df = merge_sat_and_dsat_by_cols('EdLevel')
# + pycharm={"is_executing": false, "name": "#%%\n"}
# calculate row percentages and all column percentages for merged_edl_df
calculate_sat_and_dsat_pecentage(merged_edl_df)
# -
# **Concerning job satisfaction level, 80.12 percent of the employees who had a doctoral degree were satisfied with their job. This value is relatively high compared to 65.44 percent of satisfied employees who had no formal education. The finding to some extent indicates that those who obtained a higher level of education experience a high level of satisfaction.**
# **Employees who had studied in different fields are more satisfied**
# + pycharm={"is_executing": false, "name": "#%%\n"}
# create a merged df for `UndergradMajor` column
merged_unmajor_df = merge_sat_and_dsat_by_cols('UndergradMajor')
# + pycharm={"is_executing": false, "name": "#%%\n"}
# calculate row percentages and all column percentages for merged_unmajor_df
calculate_sat_and_dsat_pecentage(merged_unmajor_df)
# -
# **74.79 percent of the employees who had studied in computer science, computer engineering, or software engineering fields were satisfied with their job. This percentage is lower, as opposed to those who had studied in social science, health science or natural science.**
# ### Question 4
# **Question 4:** How does income level affect job satisfaction?
# + pycharm={"is_executing": false, "name": "#%%\n"}
# Exploratory data analysis to undestand outliers for `ConvertedComp`
# matplotlib subplots configurations
fig, axs = plt.subplots(1,2, figsize=(20,10))
# draw a boxplot for job_sat_respondents_df
job_sat_respondents_df.boxplot(column=['ConvertedComp'], ax=axs.flatten()[0])
axs[0].set_title('Income Level by Job Satisfaction')
# draw a boxplot for dis_sat_job_respondents_df
dis_sat_job_respondents_df.boxplot(column=['ConvertedComp'], ax=axs.flatten()[1])
axs[1].set_title('Income Level by Job Dissatisfaction')
# + pycharm={"is_executing": false, "name": "#%%\n"}
# matplotlib subplots configurations
fig, axs = plt.subplots(1,2, figsize=(20,10))
# ignore the outliers and draw a boxplot for job_sat_respondents_df
job_sat_respondents_df[job_sat_respondents_df['ConvertedComp'] <= 175000].boxplot(column=['ConvertedComp'],
ax=axs.flatten()[0])
axs[0].set_title('Most Job Satisfaction')
# ignore the outlies and draw a boxplot for dis_sat_job_respondents_df
dis_sat_job_respondents_df[dis_sat_job_respondents_df['ConvertedComp'] <= 160000].boxplot(column=['ConvertedComp'],
ax=axs.flatten()[1])
axs[1].set_title('Least Job Satisfaction')
# + pycharm={"is_executing": false, "name": "#%%\n"}
# income level group column name
income_level_group_col_name = 'IncomeGroup($)'
# income level group labels
income_level_group_labels = ['25K-50K', '50K-75K','75K-100K','100K-125K','125K-150K', '150K-175K']
# income level group bins
income_level_group_bins = [25000, 50000, 75000, 100000, 125000, 150000, 175000]
# -
# add a new column `IncomeGroup($)` sat_job_respondents_df with inbund, outbound values
sat_job_respondents_df.loc[:,income_level_group_col_name] = pd.cut(job_sat_respondents_df['ConvertedComp'],
bins=income_level_group_bins, labels=income_level_group_labels)
# add a new column `IncomeGroup($)` dis_sat_job_respondents_df with inbund, outbound values
dis_sat_job_respondents_df.loc[:,income_level_group_col_name] = pd.cut(dis_sat_job_respondents_df['ConvertedComp'],
bins=income_level_group_bins, labels=income_level_group_labels)
# + pycharm={"is_executing": false, "name": "#%%\n"}
# create a merged df for `IncomeGroup($)` column
merged_income_group_df = merge_sat_and_dsat_by_cols(income_level_group_col_name)
# + pycharm={"is_executing": false, "name": "#%%\n"}
# calculate row percentages and all column percentages for merged_income_group_df
calculate_sat_and_dsat_pecentage(merged_income_group_df)
# -
# **Interestingly, the finding reveals that a total of 73.57 percent of those with an annual income between $25K and $50K is satisfied with their job, lower as compared to 80.17 percent and 79.66 percent respectively for those with income from $125K to $150K and $150K to $175K.**
# **Gender wise income levels of satisfied employees**
# + pycharm={"is_executing": false, "name": "#%%\n"}
# create a new pandas df with join `Gender`,`JobSat`, and 'ConvertedComp'
gender_income_df = df.copy()[(df['Gender'] == 'Male') | (df['Gender'] == 'Female') ][['Gender', 'ConvertedComp', 'JobSat']]
# remove NAN values from gender_income_df
gender_income_df = gender_income_df.dropna(axis=0)
# + pycharm={"is_executing": false, "name": "#%% \n"}
# fill gender_income_df with income levels' bins
gender_income_df.loc[:,income_level_group_col_name] = pd.cut(gender_income_df['ConvertedComp'],
bins=income_level_group_bins, labels=income_level_group_labels)
# remove NANs for `IncomeGroup($)` gender_income_df
gender_income_df = gender_income_df.dropna(subset=[income_level_group_col_name],axis=0)
# create a new DF with selected columns
selected_df = gender_income_df.copy()[['Gender', income_level_group_col_name, 'JobSat']]
# + pycharm={"is_executing": false, "name": "#%%\n"}
# reasgins values for satisfaction
selected_df.loc[selected_df['JobSat'] =='Slightly satisfied', 'JobSat'] = 'Satisfied'
selected_df.loc[selected_df['JobSat'] =='Very satisfied', 'JobSat'] = 'Satisfied'
# + pycharm={"is_executing": false, "name": "#%%\n"}
# group the selected DF for `Gender` and 'IncomeGroup($)'
job_sat_income_gender_df = selected_df[selected_df['JobSat'] =='Satisfied']\
.groupby([income_level_group_col_name,'Gender'])\
.count().reset_index()
# + pycharm={"is_executing": false, "name": "#%%\n"}
# create a df for males
male_df = job_sat_income_gender_df[job_sat_income_gender_df['Gender'] == 'Male'][[income_level_group_col_name,'JobSat']]
# rename `JobSat` as `MaleJobSat`
male_df.columns = [income_level_group_col_name, 'MaleJobSat']
# + pycharm={"is_executing": false, "name": "#%%\n"}
# create a df for female
female_df = job_sat_income_gender_df[job_sat_income_gender_df['Gender'] == 'Female'][[income_level_group_col_name,'JobSat']]
# rename `JobSat` as `FemaleJ`
female_df.columns = [income_level_group_col_name, 'FemaleJobSat']
# + pycharm={"is_executing": false, "name": "#%%\n"}
gender_contact_df = pd.merge(female_df, male_df, on=income_level_group_col_name)
gender_contact_df
# + pycharm={"is_executing": false, "name": "#%%\n"}
gender_contact_df.index = income_level_group_labels
gender_contact_df.plot(kind='bar',figsize=(10,10) )
# -
# **There is a decreasing trend from low to high-income levels between male and female groups.**
| Stackoverflow_Job_Satisfaction_Statistical_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.1 64-bit (''.venv'': venv)'
# name: python3
# ---
import course;course.header()
# + [markdown] slideshow={"slide_type": "slide"}
# # Functions
# -
# Functions are encapsulated code blocks. Useful because:
# * code is reusable (can be used in different parts of the code or even imported from other scripts)
# * can be documented
# * can be tested
# ## Examples
import hashlib
def calculate_md5(string):
"""Calculate the md5 for a given string
Args:
string (str) string for which the md5 hex digest is calculated.
can be byte of string instance
Returns:
str: md5 hex digest
"""
m = hashlib.md5()
if isinstance(string, str):
m.update(string.encode("utf-8"))
elif isinstance(string, bytes):
m.update(string)
else:
raise TypeError("This function supports only string input")
return m.hexdigest()
a = """
The path of the righteous man is beset
on all sides by the iniquities of the
selfish and the tyranny of evil men.
"""
calculate_md5(a)
b = b"""
The path of the righteous man is beset
on all sides by the iniquities of the
selfish and the tyranny of evil men.
"""
calculate_md5(b)
# SideNote: Personally, I find googles docstring format the most readable. We will use this format in day 3. Example of google style python docstrings can be found [here](https://www.sphinx-doc.org/en/1.5/ext/example_google.html). If you wonder why we test for byte strings and use encode, please read [this](https://realpython.com/python-encodings-guide/) well written blog post about it.
#
# Docstring plugin in VSCode does the same thing.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Dangerous mistakes using functions
# What are the outcomes of these lines
# -
def extend_list_with_three_none(input_list):
"""Extend input_list with 3 * None or
create new list with three nones
"""
input_list += [None, None, None]
return input_list
extend_list_with_three_none()
extend_list_with_three_none()
extend_list_with_three_none()
# ## Fix it !
def extend_list_with_three_none():
"""Extend input_list with 3 * None
"""
input_list += [None, None, None]
return input_list
# + [markdown] slideshow={"slide_type": "slide"}
# ### Setting up functions properly
# **Never** set default kwargs in functions to mutable objects as they are initialized once, exist until program is stopped and will behave strangly.
# + slideshow={"slide_type": "-"}
def extend_list_with_three_none_without_bug(input_list = None):
"""Extend input_list with 3 None"""
if input_list is None:
input_list = []
input_list += [None, None, None]
return input_list
# -
extend_list_with_three_none_without_bug()
extend_list_with_three_none_without_bug()
extend_list_with_three_none_without_bug()
# + [markdown] slideshow={"slide_type": "slide"}
# # Scopes: local & global
# -
counter = 0 # global
def increase_counter():
counter += 10 # local
return counter
increase_counter()
counter = 0
def increase_counter(counter):
counter += 10
return
counter = increase_counter(counter)
counter
counter = 0
def increase_counter(counter):
counter += 10
return counter # or directly return counter += 10
counter = increase_counter(counter)
counter
# + [markdown] slideshow={"slide_type": "slide"}
# If unsure avoid using global all together!
# Advantages:
# * variable can be overwritten in functions without changing code else where unexpectedly
# * code becomes very readble
#
# -
# If you need global (and please avoid using them) ...
counter = 0
def increase_counter():
"""Ugly!"""
global counter
counter += 10
return
increase_counter()
counter
# Biggest danger is counter in the global name space can be overwritten by any routin, hence if you really need to use them (please dont!!) then use namespaces
import course
course.student_counter = 0
def increase_counter():
"""Still Ugly as not very explicit"""
course.student_counter += 10
return
increase_counter()
course.student_counter
# + [markdown] slideshow={"slide_type": "slide"}
# # Changing object during iteration
# this is also a common mistake using other modules e.g. pandas
# -
# ### Example
students = [
"Anne",
"Ben",
"Chris",
"Don",
"Charles"
]
for student in students:
student = student + " - 5th semster!"
students
# ### How to change the list?
for pos, student in enumerate(students):
students[pos] = student + " - 5th semster!"
students
students = [
"Anne",
"Ben",
"Chris",
"Don",
"Charles"
]
students
for pos, student in enumerate(students):
if student[0] == "C":
# if student.startswith("C") is True:
students.pop(pos)
students
# + [markdown] slideshow={"slide_type": "subslide"}
# ### How to delete all students starting with "C"?
# +
for pos, student in enumerate(students):
if student[0] == "C":
# if student.startswith("C") is True:
students.pop(pos)
# -
# ### Answer?
| notebooks/02.a_Functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="YueulYZXyF7e" colab={"base_uri": "https://localhost:8080/"} outputId="db50c0a5-128c-4a21-dc10-c6e63ed2ee41"
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
nltk.download('wordnet', quiet=True)
words = []
kw_original = """cooking in oven"""
for word in kw_original.split(" "):
w = lemmatizer.lemmatize(word)
words.append(w)
print(" ".join(words))
# + colab={"base_uri": "https://localhost:8080/"} id="hKU2Lywzbesn" outputId="b174c409-4d12-4550-b034-6be7e5df1547"
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
ps = PorterStemmer()
# choose some words to be stemmed
kw_original = """cooking in oven"""
words = []
for word in kw_original.split(" "):
w = ps.stem(word)
words.append(w)
print(" ".join(words))
| seo/notebooks/Phrase_Lemma.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # YOLO v3 Finetuning on AWS
#
# This series of notebooks demonstrates how to finetune pretrained YOLO v3 (aka YOLO3) using MXNet on AWS.
#
# **This notebook** walks through using the [DeepSORT](https://github.com/haandol/mxnet-deepsort-yolo3) on Localhost.
# We are going to capture mis-detected scenes in order to finetune YOLO3 object detector.
# **Follow-on** the content of the notebooks shows:
#
# * How to use MXNet YOLO3 pretrained model
# * How to use Deep SORT with MXNet YOLO3
# * How to create Ground-Truth dataset from images the model mis-detected
# * How to finetune the model using the created dataset
# * Load your finetuned model and Deploy Sagemaker-Endpoint with it using CPU instance.
# * Load your finetuned model and Deploy Sagemaker-Endpoint with it using GPU instance.
#
#
# ## Prerequisites
#
# This notebook is designed to be run in Localhost. To run it (and understand what's going on), you'll need:
#
# * Anaconda (Python 3.6+)
# * Basic familiarity with Python, Multi Object Tracking
# # What does the DeepSORT do?
#
# `DeepSORT` is one of the famous method for multiple object tracking.
#
# It is the enhanced version of `SORT` which is uses `Kalman Filter` and `Hungarian Algorithm` to track objects.
#
# First, object detector, such as SSD, Faster RCNN and YOLO, detects location of the objects, it sends locations to DeepSORT. DeepSORT extracts `appreance feature` using the deeplearning model from the locations. The algorithm uses it as a factor in process for matching tracking-id to each location.
#
# In this notebook is focusing on the usage of the framework, so if you want more details please visit [DeepSORT](https://nanonets.com/blog/object-tracking-deepsort/#deep-sort) and [Computer Vsion for tracking](https://towardsdatascience.com/computer-vision-for-tracking-8220759eee85).
# ## Step 0: Create Conda Environment
#
# In order to install the required packages, we need to set up a virtual environment first.
#
# ```bash
# $ conda create -n yolo-hol-env python=3.6
# ```
#
# After installing the virtual environment, enter the following command to activate it.
#
# ```bash
# $ source activate yolo-hol-env
# ```
# ## Step 1: Clone DeepSORT repository
#
# As I mentioned before, DeepSORT is a method for object tracking.
# There are many implementations out there.
# In this module we are going to use this [repository](https://github.com/haandol/mxnet-deepsort-yolo3) that is customized with MXNet YOLO3 for this workshop.
#
# Clone repository with following command,
#
# ```bash
# (yolo-hol-env) $ git clone https://github.com/haandol/mxnet-deepsort-yolo3 deepsort
#
# .
# ├── LICENSE
# ├── README.md
# ├── deep_sort
# ├── demo.py
# ├── model_data
# ├── requirements.txt
# ├── tools
# └── video.mp4
# ```
# ## Step 2: Install libraries and set configurations
#
# In order to run `demo.py`, you should install some requirements.
#
# ```bash
# (yolo-hol-env) $ pip install -r requirements.txt
# ```
# ## Step 3: Run demo.py
#
# Now you are ready to run `demo.py`.
#
# ```bash
# (yolo-hol-env) $ python demo.py --src video.mp4 --out-dir images --fps 10
# ...
# INFO:deepsort:Skipped by time_since_update
# INFO:deepsort:Skipped by time_since_update
# INFO:deepsort:Skipped by time_since_update
# INFO:deepsort:Skipped by time_since_update
# INFO:deepsort:Frame #5 - Id: 1
# INFO:deepsort:Frame #5 - Id: 2
# INFO:deepsort:Frame #7 - Id: 1
# ...
# INFO:deepsort:Missed obj: 90, Missed frame: 82
# ```
# ## Step 4: Check Results
#
# As you noticed above, there are some mis-detected frames. The demo script store the scene into your `out_dir`, it is `images` in this case.
#
# ```bash
# (yolo-hol-env) $ ls images
# 1.jpg
# 4.jpg
# ...
# missed-76-2.jpg
# missed-196-3.jpg
# missed-217-3.jpg
# missed-244-1.jpg
# missed-340-6.jpg
# missed-349-3.jpg
# ```
#
# Stored `{frame_index}.jpg` image shows how the detector see the frame(scene), and `missed-{frame_index}-{track_id}.jpg` is the region where the `track_id` disappears in the `frame_index.jpg` scene.
import mxnet as mx
import gluoncv as gcv
from matplotlib import pyplot as plt
# +
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 3)
ax4 = fig.add_subplot(2, 2, 4)
gcv.utils.viz.plot_image(mx.img.imread('Assets/76.jpg'), ax=ax1)
gcv.utils.viz.plot_image(mx.img.imread('Assets/missed-76-2.jpg'), ax=ax2)
gcv.utils.viz.plot_image(mx.img.imread('Assets/196.jpg'), ax=ax3)
gcv.utils.viz.plot_image(mx.img.imread('Assets/missed-196-3.jpg'), ax=ax4)
plt.show()
# -
# In the tracking workload, the worst case is switched `track_id` and it happened between `238.jpg` and `241.jpg`.
# In the scence, `1` track_id is switched to `3` that is assigned to the guy wearing navy vest originally.
# +
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
gcv.utils.viz.plot_image(mx.img.imread('Assets/238.jpg'), ax=ax1)
gcv.utils.viz.plot_image(mx.img.imread('Assets/241.jpg'), ax=ax2)
plt.show()
# -
# In next notebook, we are going to use Sagemaker Ground Truth to label `missed-{frame_index}-{track_id}.jpg` images with bounding box.
| 01. Demo DeepSORT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook reads in photometry data in the format of CSV files from The Open Supernova Catalog, and CSV files output by my DOLPHOT-scraping script dolphot_retrieval_finalized.py and converts it to text files for input into Matt Nicholl's superbol.py program (code and documentation found here https://github.com/mnicholl/superbol).
#
# The code takes as input the data from both sources, and the luminosity distance lum_d in Mpc (labelled d_L on the sne.space page for each SN). It subtracts the distance modulus (5log10(lum_d(10^6)/10)) and foreground galactic extinction (which must be looked up and added into the code by the user) from each magnitude value, and converts the data into text files of the format 'MJD filter1_mag filter1_uncertainty filter2_mag filter2_uncertainty ... '. The text files are named in the format 'SNname_filters.txt', for example 'SN2006aj_ugriz.txt' or 'iPTF13bvn_JHK.txt'.
#
# It also collects the sources of each sne.space data set used for referencing purposes.
#
# The code contains a function to perform this analysis and formatting specific to each SN, so the user will need to edit the code for use with data from other SNe.
#import dependencies
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import csv
from astropy.time import Time
# +
#read in data from The Open Supernova Catalog (input correct filenames or paths)
iPTF13bvn = pd.read_csv('sne_space_photometry_data/iPTF13bvn_photometry.txt')
SN2006aj = pd.read_csv('sne_space_photometry_data/SN2006aj_photometry.txt')
SN2007uy = pd.read_csv('sne_space_photometry_data/SN2007uy_photometry.txt')
SN1993J = pd.read_csv('sne_space_photometry_data/SN1993J_photometry.txt')
#read in data scraped from dolphot (input correct filenames or paths)
iPTF13bvn_lt = pd.read_csv('phot_plot_data_iPTF13bvn_old.csv')
SN1993J_lt = pd.read_csv('phot_plot_data_SN1993J_old.csv')
SN2007uy_lt = pd.read_csv('phot_plot_data_2007uy.csv')
SN2006aj_lt = pd.read_csv('phot_plot_data_2006aj.csv')
# -
# The next two cells define and then evaluate a function to analyze and format the data from both sources into two separate text files for one specific SN.
#define a function to format both data sets for iPTF13bvn
def format_for_superbol_iPTF13bvn(data, data_lt, lum_d):
srcs = []
#remove unneccesary column from sne.space data
data.drop(['upperlimit'], axis=1)
#remove data points with no associated error from sne.space data
data = data[data.e_magnitude > 0]
#select desired filters from sne.space data
data = data[data.band.isin(['B', 'V', 'UVW2','I'])]
#round datetime stamp to the day
data.time = np.round(data.time, 0)
#convert dates from dolphot data to MJD
newcol = []
for i in data_lt.date_obs:
newcol.append(np.round(Time(i+'T00:00:00.000').mjd,0))
data_lt['time'] = newcol
#calculate distance modulus and print (will need as input for superbol)
dmod = (5*np.log10(lum_d*(10**6)/10.))
print(dmod)
#list sources of data taken from sne.space, for referencing purposes
for x in data['source']:
if x not in srcs:
srcs.append(x)
#format sne.space data and write to a text file
file1 = open('iPTF13bvn_UBVI.txt', 'w+')
#group by observation date
for k in data.time.unique():
j = data[data.time == k]
#select the data in this filter and average the magnitudes and magnitude uncertainties
#for each observation date
b = j[j.band == 'B']
B = np.average(b['magnitude'])
B_err= np.average(b['e_magnitude'])
#subtract the foreground galactic extinction for this filter and host galaxy,
#and the distance modulus for this object
B = B - 0.184 - dmod
#follow the same process for the other filters
v = j[j.band == 'V']
V = np.average(v['magnitude'])
V_err = np.average(v['e_magnitude'])
V = V - 0.139 - dmod
u = j[j.band == 'UVW2']
U = np.average(u['magnitude'])
U_err = np.average(u['e_magnitude'])
U = U - 0.220 - dmod
i = j[j.band == 'I']
I = np.average(i['magnitude'])
I_err = np.average(i['e_magnitude'])
#write to a tab delimited text file
file1.writelines([str(k)+'\t', str(U)+'\t', str(U_err)+'\t', str(B)+'\t', str(B_err)+'\t', str(V)+'\t', str(V_err)+'\t', str(I)+'\t', str(I_err)+'\t \n'])
file1.close()
#follow the same process as above to format dolphot data and write to a separate text file
file2 = open('iPTF13bvnlt_UBVI.txt', 'w+')
for k in data_lt.time.unique():
j = data_lt[data_lt.time == k]
b = j[j.band.isin(['Extremely wide blue','WFPC2 B'])]
B = np.average(b['m_avg'])
B = B - 0.173 - dmod
B_err= np.average(b['sigma_m_avg'])
v = j[j.band == 'WFPC2 V']
V = np.average(v['m_avg'])
V = V - 0.145 - dmod
V_err = np.average(v['sigma_m_avg'])
u = j[j.band == 'UV wide']
U = np.average(u['m_avg'])
U = U - 0.355 - dmod
U_err = np.average(u['sigma_m_avg'])
i = j[j.band == 'Wfpc 2 Wide I']
I = np.average(i['m_avg'])
I = I - 0.078 - dmod
I_err = np.average(i['sigma_m_avg'])
file2.writelines([str(k)+'\t', str(U)+'\t', str(U_err)+'\t', str(B)+'\t', str(B_err)+'\t', str(V)+'\t', str(V_err)+'\t', str(I)+'\t', str(I_err)+'\t \n'])
file2.close()
#print references
return srcs
#evaluate function to create text files for iPTF13bvn sne.space and dolphot photometry data
#(input names of data files read in at beginning of notebook, and luminosity distance from
#sne.space) and print reference codes
format_for_superbol_iPTF13bvn(iPTF13bvn, iPTF13bvn_lt, 19.94)
# The cells below follow a similar process for a few more SNe.
def format_for_superbol_1993J(data, data_lt, lum_d):
srcs = []
data.drop(['upperlimit','instrument','telescope'], axis=1)
#instead of removing data with no associated uncertainty, assign a value of 0.1 to the uncertainty for that data
data = data.fillna(0.1)
data = data[data.band.isin(['U','B', 'V', 'R','I'])]
data.time = np.round(data.time, 0)
newcol = []
for i in data_lt.date_obs:
newcol.append(np.round(Time(i+'T00:00:00.000').mjd,0))
data_lt['time'] = newcol
dmod = (5*np.log10(lum_d*(10**6)/10.))
print(dmod)
for x in data['source']:
if x not in srcs:
srcs.append(x)
file1 = open('SN1993J_UBVRI.txt', 'w+')
for k in data.time.unique():
j = data[data.time == k]
u = j[j.band == 'U']
U = np.average(u['magnitude'])
U = U - 0.348 - dmod
U_err = np.average(u['e_magnitude'])
b = j[j.band == 'B']
B = np.average(b['magnitude'])
B = B - 0.291 - dmod
B_err= np.average(b['e_magnitude'])
v = j[j.band == 'V']
V = np.average(v['magnitude'])
V = V - 0.220 - dmod
V_err = np.average(v['e_magnitude'])
r = j[j.band == 'R']
R = np.average(r['magnitude'])
R = R - 0.174 - dmod
R_err = np.average(r['e_magnitude'])
i = j[j.band == 'I']
I = np.average(i['magnitude'])
I = I - 0.121 - dmod
I_err = np.average(i['e_magnitude'])
file1.writelines([str(k)+'\t', str(U)+'\t', str(U_err)+'\t', str(B)+'\t', str(B_err)+'\t', str(V)+'\t', str(V_err)+'\t', str(R)+'\t', str(R_err)+'\t', str(I)+'\t', str(I_err)+'\t \n'])
file1.close()
file2 = open('SN1993Jlt_UBVRI.txt', 'w+')
for k in data_lt.time.unique():
j = data_lt[data_lt.time == k]
u = j[j.band == 'ISM feature']
U = np.average(u['m_avg'])
U = U - 0.622 - dmod
U_err = np.average(u['sigma_m_avg'])
b = j[j.band == 'WFPC2 B']
B = np.average(b['m_avg'])
B = B - 0.291 - dmod
B_err= np.average(b['sigma_m_avg'])
v = j[j.band == 'WFPC2 V']
V = np.average(v['m_avg'])
V = V - 0.229 - dmod
V_err = np.average(v['sigma_m_avg'])
r = j[j.band == 'SDSS r']
R = np.average(r['m_avg'])
R = R - 0.181 - dmod
R_err = np.average(r['sigma_m_avg'])
i = j[j.band == 'WFPC2 wide I']
I = np.average(i['m_avg'])
I = I - 0.124 - dmod
I_err = np.average(i['sigma_m_avg'])
file2.writelines([str(k)+'\t', str(U)+'\t', str(U_err)+'\t', str(B)+'\t', str(B_err)+'\t', str(V)+'\t', str(V_err)+'\t', str(R)+'\t', str(R_err)+'\t',str(I)+'\t', str(I_err)+'\t \n'])
file2.close()
return srcs
format_for_superbol_1993J(SN1993J,SN1993J_lt,2.9)
def format_for_superbol_2007uy(data, data_lt, lum_d):
srcs = []
data.drop(['upperlimit'], axis=1)
data = data[data.e_magnitude > 0]
data = data[data.band.isin(['V',"i'"])]
data.time = np.round(data.time, 0)
newcol = []
for i in data_lt.date_obs:
newcol.append(np.round(Time(i+'T00:00:00.000').mjd,0))
data_lt['time'] = newcol
dmod = (5*np.log10(lum_d*(10**6)/10.))
print(dmod)
for x in data['source']:
if x not in srcs:
srcs.append(x)
file1 = open('SN2007uy_VI.txt', 'w+')
for k in data.time.unique():
j = data[data.time == k]
v = j[j.band == 'V']
V = np.average(v['magnitude'])
V = V - 0.062 - dmod
V_err = np.average(v['e_magnitude'])
i = j[j.band == "i'"]
I = np.average(i['magnitude'])
I = I - 0.038 - dmod
I_err = np.average(i['e_magnitude'])
file1.writelines([str(k)+'\t', str(V)+'\t', str(V_err)+'\t', str(I)+'\t', str(I_err)+'\t \n'])
file1.close()
file2 = open('SN2007uylt_VI.txt', 'w+')
for k in data_lt.time.unique():
j = data_lt[data_lt.time == k]
v = j[j.band == 'WFPC2 V']
V = np.average(v['m_avg'])
V = V - 0.064 - dmod
V_err = np.average(v['sigma_m_avg'])
i = j[j.band == 'WFPC2 wide I']
I = np.average(i['m_avg'])
I = I - 0.035 - dmod
I_err = np.average(i['sigma_m_avg'])
file2.writelines([str(k)+'\t', str(V)+'\t', str(V_err)+'\t', str(I)+'\t', str(I_err)+'\t \n'])
file2.close()
return srcs
format_for_superbol_2007uy(SN2007uy,SN2007uy_lt,26)
def format_for_superbol_2006aj(data, data_lt, lum_d):
srcs = []
data.drop(['upperlimit'], axis=1)
data = data[data.e_magnitude > 0]
data = data[data.band.isin(['B', 'V', "r'","i'"])]
data.time = np.round(data.time, 0)
newcol = []
for i in data_lt.date_obs:
newcol.append(np.round(Time(i+'T00:00:00.000').mjd,0))
data_lt['time'] = newcol
dmod = (5*np.log10(lum_d*(10**6)/10.))
print(dmod)
for x in data['source']:
if x not in srcs:
srcs.append(x)
file1 = open('SN2006aj_BVRI.txt', 'w+')
for k in data.time.unique():
j = data[data.time == k]
b = j[j.band == 'B']
B = np.average(b['magnitude'])
B = B - 0.527 - dmod
B_err= np.average(b['e_magnitude'])
v = j[j.band == 'V']
V = np.average(v['magnitude'])
V = V - 0.399 - dmod
V_err = np.average(v['e_magnitude'])
r = j[j.band == "r'"]
R = np.average(r['magnitude'])
R = R - 0.332 - dmod
R_err = np.average(r['e_magnitude'])
i = j[j.band == "i'"]
I = np.average(i['magnitude'])
I = I - 0.247 - dmod
I_err = np.average(i['e_magnitude'])
file1.writelines([str(k)+'\t', str(B)+'\t', str(B_err)+'\t', str(V)+'\t', str(V_err)+'\t', str(R)+'\t', str(R_err)+'\t', str(I)+'\t', str(I_err)+'\t \n'])
file1.close()
file2 = open('SN2006ajlt_BVRI.txt', 'w+')
for k in data_lt.time.unique():
j = data_lt[data_lt.time == k]
b = j[j.band == '<NAME>']
B = np.average(b['m_avg'])
B = B - 0.527 - dmod
B_err= np.average(b['sigma_m_avg'])
v = j[j.band == 'WFPC2 V']
V = np.average(v['m_avg'])
V = V - 0.399 - dmod
V_err = np.average(v['sigma_m_avg'])
r = j[j.band == "SDSS r'"]
R = np.average(r['m_avg'])
R = R - 0.332 - dmod
R_err = np.average(r['sigma_m_avg'])
i = j[j.band == 'Wfpc 2 Wide I']
I = np.average(i['m_avg'])
I = I - 0.219 - dmod
I_err = np.average(i['sigma_m_avg'])
file2.writelines([str(k)+'\t', str(B)+'\t', str(B_err)+'\t', str(V)+'\t', str(V_err)+'\t', str(R)+'\t', str(R_err)+'\t', str(I)+'\t', str(I_err)+'\t \n'])
file2.close()
return srcs
format_for_superbol_2006aj(SN2006aj,SN2006aj_lt,149.8)
| convert_for_superbol.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp softwares.fastq.minimap2
# -
# # minimap2
# +
# export
from pybiotools4p.softwares.base import Base, modify_cmd
from pybiotools4p.utils import dict_to_paras
# +
# export
class Minimap2(Base):
def __init__(self, software, fd):
super(Minimap2, self).__init__(software)
self._default = fd
@modify_cmd
def cmd_version(self):
'''
:return:
'''
return 'echo {repr} ;{software} --version'.format(
repr=self.__repr__(),
software=self._software
)
@modify_cmd
def cmd_splice_align(self,reference,fastq,samtools,samtools_idx,outbam,ext_paras={}):
return r'''
{software} {splice_align} {ext_p} -a {reference} {fastq} | {samtools_sam2bam} | {samtools_sort}
{samtools_index}
'''.format(
software=self._software,
splice_align=self._default['splice_align'],
samtools_sam2bam=samtools.cmd_sam2bam(samtools_idx, '-', bamfile=None),
samtools_sort=samtools.cmd_sort('-', sortbam=outbam),
samtools_index=samtools.cmd_index(outbam),
ext_p=dict_to_paras(ext_paras),
**locals()
)
@modify_cmd
def cmd_nonsplice_align(self,reference,fastq,samtools,samtools_idx,outbam,ext_paras={}):
return r'''
{software} {nonsplice_align} {ext_p} -a {reference} {fastq} | {samtools_sam2bam} | {samtools_sort}
{samtools_index}
'''.format(
software=self._software,
nonsplice_align=self._default['nonsplice_align'],
samtools_sam2bam=samtools.cmd_sam2bam(samtools_idx, '-', bamfile=None),
samtools_sort=samtools.cmd_sort('-', sortbam=outbam),
samtools_index=samtools.cmd_index(outbam),
ext_p=dict_to_paras(ext_paras),
**locals()
)
def __repr__(self):
return 'minimap2:' + self._software
def __str__(self):
return 'A versatile pairwise aligner for genomic and spliced nucleotide sequences'
# +
import configparser
config=configparser.ConfigParser()
config.read('pybiotools4p/default.ini')
from pybiotools4p.softwares.bam.samtools import Samtools
samtools=Samtools('samtools',config['samtools'])
minimap2=Minimap2('minimap2',config['minimap2'])
# -
reference='biology-test-data/fasta/Homo_sapiens.GRCh38.dna.primary_assembly.chromosome22.fa'
samtools_idx='biology-test-data/fasta/Homo_sapiens.GRCh38.dna.primary_assembly.chromosome22.fa.fai'
fastq = '/path/to/fastq'
outbam='/path/to/bam'
print(minimap2.cmd_splice_align(reference,fastq,samtools,samtools_idx,outbam,{'--junbed':'abc.bed'}))
print(minimap2.cmd_nonsplice_align(reference,fastq,samtools,samtools_idx,outbam))
| 12_minimap2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Basic Usage
#
# This section is going to take you through a whirlwind through of all of [**priwo**](https://github.com/astrogewgaw/priwo)'s capabilities, so hang on tight! [**priwo**](https://github.com/astrogewgaw/priwo) comes with a couple of simple functions that give you, the user, some information about itself, such as what is the maximum chunk size that it uses for reading large amounts of data, or what are the data formats that it supports. [**priwo**](https://github.com/astrogewgaw/priwo) can report stuff like this two ways: directly via the command line(note that the exclamation mark at the beginning allows us to run shell commands in a Jupyter notebook):
# !priwo max
# !priwo available
# or, programatically, via the `max_size` and `available_formats` functions:
# +
import priwo
priwo.max_size()
# -
priwo.available_formats()
# As you can see, [**priwo**](https://github.com/astrogewgaw/priwo) supports quite a few pulsar data formats. If you want to read in data from any of them, just import the corresponding function. All function names in [**priwo**](https://github.com/astrogewgaw/priwo) have the format: read_*extension*. For instance, to read in a `*.dat` file, you can do:
# +
from priwo import read_dat
ts = read_dat("data/Lband_DM62.00.dat")
ts
# -
# The data from the time series file is in the *data* key. We can take a look at what our time series looks like, using [**matplotlib**](https://matplotlib.org/):
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.plot(ts["data"])
plt.xlabel("Sample index, $i$")
plt.ylabel("Amplitude")
plt.title(ts["object"])
| examples/basic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Day 6 : Data Loading and Manipulation and Visulatiozation
# ### You can use the following liberaries for your assignment:
# > Numpy, Pandas, Matplotlib, Seaborn, LASIO, Welly
# ## Kindly load the las file of Exported.csv file from the data folder
# ## Perform the below Tasks:
#
# >1. Investigate the component of the data file (number of columns , numbers of observations, Null values, normal statistics)
# 2. Plot the null values as bars
# 3. Create a copy of the data frame and drop the NAN values
# 3. Use the other copy to fill-in NAN values.
# 4. Which option do you prefer to work with regarding the relationship with PHIE and DT or PHIE and RHOB
#
#
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import lasio
import welly
from IPython.display import display
# -
df = pd.read_csv("C:/Users/HP/Documents/GitHub/GeoML-2.0/10DaysChallenge/Dutch_F3_Logs/Exported.csv", delimiter=',')
display(df)
# +
#1) Data Investigation
display('Col, Rows',df.shape)
display('info',df.info())
display('Null values',df.isna().sum())
display('Null values%',df.isna().sum()/ df.shape[0] *100)
display('normal statistics',df.describe())
# -
#2) Plot the null values as bars
df.isnull().sum().plot(kind = 'bar', title = 'Missing Values Comparison', figsize=(20,6))
#3) Create a copy of the data frame and drop the NAN values
df_dropna=df.copy()
print('original df =',df_dropna.shape)
df_dropna.dropna(inplace=True)
print('after removing nans =',df_dropna.shape)
print('=' *100)
print('This dataset has {0} rows and {1} columns'.format(df_dropna.shape[0],df_dropna.shape[1]))
#3) Use the other copy to fill-in NAN values (interpolation).
df_fillna=df.copy()
df_fillna=df_fillna.interpolate(method ='linear', limit_direction ='backward')
display('after filling nans',df_fillna)
# +
#4) PHIE vs. DT comparison
plt.figure(figsize=(10,5))
plt.scatter(df_dropna.PHIE, df_dropna.DT)
plt.title('PHIE vs DT plot after removing null values ')
plt.xlabel('PHIE')
plt.ylabel('DT')
plt.grid()
plt.show();
plt.figure(figsize=(10,5))
plt.scatter(df_fillna.PHIE, df.DT)
plt.title('PHIE vs DT plot after filling null values using interpolation')
plt.xlabel('PHIE')
plt.ylabel('DT')
plt.grid()
plt.show();
# +
#4) PHIE vs. RHOB comparison
plt.figure(figsize=(10,5))
plt.scatter(df_dropna.PHIE, df_dropna.RHOB)
plt.title('PHIE vs RHOB plot after removing null values ')
plt.xlabel('PHIE')
plt.ylabel('RHOB')
plt.grid()
plt.show();
plt.figure(figsize=(10,5))
plt.scatter(df_fillna.PHIE, df.RHOB)
plt.title('PHIE vs RHOB plot after filling null values using interpolation')
plt.xlabel('PHIE')
plt.ylabel('RHOB')
plt.grid()
plt.show();
# -
# #Removing null values gives less odd values in the logs interrelations than filling with interpolation method
| RY Day6 of 10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
fig = plt.figure(1, (12,8))
ax = fig.gca(projection='3d')
a = np.array([1, 1, 0])
# Showing the velocity vector
ax.quiver(1, 0, 0, *a, length=1, color = 'green', linewidth = 4)
# Setting the axes properties
ax.set(xlim3d=(-2, 2), xlabel=r'$X$')
ax.set(ylim3d=(-2, 2), ylabel=r'$Y$')
ax.set(zlim3d=(-2, 2), zlabel=r'$Z$')
# Showing the position of the particle P
ax.scatter(1, 0, 0, color = 'b', label = 'P', s = 25)
# Showing the position of the observer O
ax.scatter(0, 0, 0, color = 'r', label='O', s = 9)
# Showing labels
ax.text(-0.1, 0, 0.1, "O", "x", size = 16)
ax.text(0.9, 0, 0.1, "P", "x", size = 16)
ax.grid(False)
plt.show()
# Saving to a file
fig.savefig('MovingParticle.png')
# -
| Chapter04/Section02/scripts/MovingParticle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/git-guocc/d2l-zh/blob/master/chapter_computer-vision/semantic-segmentation-and-dataset.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="-z9Od0igU1ie"
# The following additional libraries are needed to run this
# notebook. Note that running on Colab is experimental, please report a Github
# issue if you have any problem.
# + id="7VUfWHGEU1ij" outputId="ee378b9f-f12c-4cbb-963e-2f2569bd357a" colab={"base_uri": "https://localhost:8080/"}
# !pip install git+https://github.com/d2l-ai/d2l-zh@release # installing d2l
# + [markdown] origin_pos=0 id="pvcXwa9aU1il"
# # 语义分割和数据集
# :label:`sec_semantic_segmentation`
#
# 在 :numref:`sec_bbox`— :numref:`sec_rcnn`中讨论的目标检测问题中,我们一直使用方形边界框来标注和预测图像中的目标。
# 本节将探讨*语义分割*(semantic segmentation)问题,它重点关注于如何将图像分割成属于不同语义类别的区域。
# 与目标检测不同,语义分割可以识别并理解图像中每一个像素的内容:其语义区域的标注和预测是像素级的。
# :numref:`fig_segmentation`展示了语义分割中图像有关狗、猫和背景的标签。
# 与目标检测相比,语义分割标注的像素级的边框显然更加精细。
#
# 
# :label:`fig_segmentation`
#
# ## 图像分割和实例分割
#
# 计算机视觉领域还有2个与语义分割相似的重要问题,即*图像分割*(image segmentation)和*实例分割*(instance segmentation)。
# 我们在这里将它们同语义分割简单区分一下。
#
# * *图像分割*将图像划分为若干组成区域,这类问题的方法通常利用图像中像素之间的相关性。它在训练时不需要有关图像像素的标签信息,在预测时也无法保证分割出的区域具有我们希望得到的语义。以 :numref:`fig_segmentation`中的图像作为输入,图像分割可能会将狗分为两个区域:一个覆盖以黑色为主的嘴和眼睛,另一个覆盖以黄色为主的其余部分身体。
# * *实例分割*也叫*同时检测并分割*(simultaneous detection and segmentation),它研究如何识别图像中各个目标实例的像素级区域。与语义分割不同,实例分割不仅需要区分语义,还要区分不同的目标实例。例如,如果图像中有两条狗,则实例分割需要区分像素属于的两条狗中的哪一条。
#
# ## Pascal VOC2012 语义分割数据集
#
# [**最重要的语义分割数据集之一是[Pascal VOC2012](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/)。**]
# 下面我们深入了解一下这个数据集。
#
# + origin_pos=2 tab=["pytorch"] id="H5zFmSPoU1im"
# %matplotlib inline
import os
import torch
import torchvision
from d2l import torch as d2l
# + [markdown] origin_pos=3 id="4tS3oGv1U1in"
# 数据集的tar文件大约为2GB,所以下载可能需要一段时间。
# 提取出的数据集位于`../data/VOCdevkit/VOC2012`。
#
# + origin_pos=4 tab=["pytorch"] id="yPaejG2rU1io" outputId="906b7771-9cdd-4b02-b1f2-f2a43861bb15" colab={"base_uri": "https://localhost:8080/"}
d2l.DATA_HUB['voc2012'] = (d2l.DATA_URL + 'VOCtrainval_11-May-2012.tar',
'4e443f8a2eca6b1dac8a6c57641b67dd40621a49')
voc_dir = d2l.download_extract('voc2012', 'VOCdevkit/VOC2012')
# + [markdown] origin_pos=5 id="iU_SIouGU1io"
# 进入路径`../data/VOCdevkit/VOC2012`之后,我们可以看到数据集的不同组件。
# `ImageSets/Segmentation`路径包含用于训练和测试样本的文本文件,而`JPEGImages`和`SegmentationClass`路径分别存储着每个示例的输入图像和标签。
# 此处的标签也采用图像格式,其尺寸和它所标注的输入图像的尺寸相同。
# 此外,标签中颜色相同的像素属于同一个语义类别。
# 下面将`read_voc_images`函数定义为[**将所有输入的图像和标签读入内存**]。
#
# + origin_pos=7 tab=["pytorch"] id="ralhgb2OU1ip"
def read_voc_images(voc_dir, is_train=True):
"""读取所有VOC图像并标注"""
txt_fname = os.path.join(voc_dir, 'ImageSets', 'Segmentation',
'train.txt' if is_train else 'val.txt')
mode = torchvision.io.image.ImageReadMode.RGB
with open(txt_fname, 'r') as f:
images = f.read().split()
features, labels = [], []
for i, fname in enumerate(images):
features.append(torchvision.io.read_image(os.path.join(
voc_dir, 'JPEGImages', f'{fname}.jpg')))
labels.append(torchvision.io.read_image(os.path.join(
voc_dir, 'SegmentationClass' ,f'{fname}.png'), mode))
return features, labels
train_features, train_labels = read_voc_images(voc_dir, True)
# + [markdown] origin_pos=8 id="E1VgNgIiU1iq"
# 下面我们[**绘制前5个输入图像及其标签**]。
# 在标签图像中,白色和黑色分别表示边框和背景,而其他颜色则对应不同的类别。
#
# + origin_pos=10 tab=["pytorch"] id="rymmEK-NU1ir" outputId="8035a182-c4de-4646-d761-8ab48504b05c" colab={"base_uri": "https://localhost:8080/", "height": 194}
n = 5
imgs = train_features[0:n] + train_labels[0:n]
imgs = [img.permute(1,2,0) for img in imgs]
d2l.show_images(imgs, 2, n);
# + [markdown] origin_pos=11 id="ZmTQqwMRU1iu"
# 接下来,我们[**列举RGB颜色值和类名**]。
#
# + origin_pos=12 tab=["pytorch"] id="aPu8WorJU1iu"
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
# + [markdown] origin_pos=13 id="O-eLJ4p6U1iv"
# 通过上面定义的两个常量,我们可以方便地[**查找标签中每个像素的类索引**]。
# 我们定义了`voc_colormap2label`函数来构建从上述RGB颜色值到类别索引的映射,而`voc_label_indices`函数将RGB值映射到在Pascal VOC2012数据集中的类别索引。
#
# + origin_pos=15 tab=["pytorch"] id="rAjrV_5VU1iw"
def voc_colormap2label():
"""构建从RGB到VOC类别索引的映射"""
colormap2label = torch.zeros(256 ** 3, dtype=torch.long)
for i, colormap in enumerate(VOC_COLORMAP):
colormap2label[
(colormap[0] * 256 + colormap[1]) * 256 + colormap[2]] = i
return colormap2label
def voc_label_indices(colormap, colormap2label):
"""将VOC标签中的RGB值映射到它们的类别索引"""
colormap = colormap.permute(1, 2, 0).numpy().astype('int32')
idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256
+ colormap[:, :, 2])
return colormap2label[idx]
# + [markdown] origin_pos=16 id="PY3F4-HOU1iw"
# [**例如**],在第一张样本图像中,飞机头部区域的类别索引为1,而背景索引为0。
#
# + origin_pos=17 tab=["pytorch"] id="HaULkudaU1iw" outputId="ed8afabd-6cf3-4c74-b56f-d3779e17ab50"
y = voc_label_indices(train_labels[0], voc_colormap2label())
y[105:115, 130:140], VOC_CLASSES[1]
# + [markdown] origin_pos=18 id="tBJFErJvU1ix"
# ### 预处理数据
#
# 在之前的实验,例如 :numref:`sec_alexnet`— :numref:`sec_googlenet`中,我们通过再缩放图像使其符合模型的输入形状。
# 然而在语义分割中,这样做需要将预测的像素类别重新映射回原始尺寸的输入图像。
# 这样的映射可能不够精确,尤其在不同语义的分割区域。
# 为了避免这个问题,我们将图像裁剪为固定尺寸,而不是再缩放。
# 具体来说,我们[**使用图像增广中的随机裁剪,裁剪输入图像和标签的相同区域**]。
#
# + origin_pos=20 tab=["pytorch"] id="KUIJZObfU1ix"
def voc_rand_crop(feature, label, height, width):
"""随机裁剪特征和标签图像"""
rect = torchvision.transforms.RandomCrop.get_params(
feature, (height, width))
feature = torchvision.transforms.functional.crop(feature, *rect)
label = torchvision.transforms.functional.crop(label, *rect)
return feature, label
# + origin_pos=22 tab=["pytorch"] id="IO26HrFKU1ix" outputId="96dbb331-3819-4fea-e66a-e329117766fb"
imgs = []
for _ in range(n):
imgs += voc_rand_crop(train_features[0], train_labels[0], 200, 300)
imgs = [img.permute(1, 2, 0) for img in imgs]
d2l.show_images(imgs[::2] + imgs[1::2], 2, n);
# + [markdown] origin_pos=23 id="A5sXOFqxU1iy"
# ### [**自定义语义分割数据集类**]
#
# 我们通过继承高级API提供的`Dataset`类,自定义了一个语义分割数据集类`VOCSegDataset`。
# 通过实现`__getitem__`函数,我们可以任意访问数据集中索引为`idx`的输入图像及其每个像素的类别索引。
# 由于数据集中有些图像的尺寸可能小于随机裁剪所指定的输出尺寸,这些样本可以通过自定义的`filter`函数移除掉。
# 此外,我们还定义了`normalize_image`函数,从而对输入图像的RGB三个通道的值分别做标准化。
#
# + origin_pos=25 tab=["pytorch"] id="M1XqSa-uU1iy"
class VOCSegDataset(torch.utils.data.Dataset):
"""一个用于加载VOC数据集的自定义数据集"""
def __init__(self, is_train, crop_size, voc_dir):
self.transform = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.crop_size = crop_size
features, labels = read_voc_images(voc_dir, is_train=is_train)
self.features = [self.normalize_image(feature)
for feature in self.filter(features)]
self.labels = self.filter(labels)
self.colormap2label = voc_colormap2label()
print('read ' + str(len(self.features)) + ' examples')
def normalize_image(self, img):
return self.transform(img.float() / 255)
def filter(self, imgs):
return [img for img in imgs if (
img.shape[1] >= self.crop_size[0] and
img.shape[2] >= self.crop_size[1])]
def __getitem__(self, idx):
feature, label = voc_rand_crop(self.features[idx], self.labels[idx],
*self.crop_size)
return (feature, voc_label_indices(label, self.colormap2label))
def __len__(self):
return len(self.features)
# + [markdown] origin_pos=26 id="0QdPnmfYU1iz"
# ### [**读取数据集**]
#
# 我们通过自定义的`VOCSegDataset`类来分别创建训练集和测试集的实例。
# 假设我们指定随机裁剪的输出图像的形状为$320\times 480$,
# 下面我们可以查看训练集和测试集所保留的样本个数。
#
# + origin_pos=27 tab=["pytorch"] id="5fKKke35U1iz" outputId="03cf7dad-d0a0-4c08-c2e7-c1eae9fff543"
crop_size = (320, 480)
voc_train = VOCSegDataset(True, crop_size, voc_dir)
voc_test = VOCSegDataset(False, crop_size, voc_dir)
# + [markdown] origin_pos=28 id="s18vvJQmU1iz"
# 设批量大小为64,我们定义训练集的迭代器。
# 打印第一个小批量的形状会发现:与图像分类或目标检测不同,这里的标签是一个三维数组。
#
# + origin_pos=30 tab=["pytorch"] id="AUHhhwptU1iz" outputId="90b21239-1f31-4d7f-e55c-a7be82674ca3"
batch_size = 64
train_iter = torch.utils.data.DataLoader(voc_train, batch_size, shuffle=True,
drop_last=True,
num_workers=d2l.get_dataloader_workers())
for X, Y in train_iter:
print(X.shape)
print(Y.shape)
break
# + [markdown] origin_pos=31 id="G4qKb2CCU1i0"
# ### [**整合所有组件**]
#
# 最后,我们定义以下`load_data_voc`函数来下载并读取Pascal VOC2012语义分割数据集。
# 它返回训练集和测试集的数据迭代器。
#
# + origin_pos=33 tab=["pytorch"] id="TK_jxj_KU1i0"
#@save
def load_data_voc(batch_size, crop_size):
"""加载VOC语义分割数据集"""
voc_dir = d2l.download_extract('voc2012', os.path.join(
'VOCdevkit', 'VOC2012'))
num_workers = d2l.get_dataloader_workers()
train_iter = torch.utils.data.DataLoader(
VOCSegDataset(True, crop_size, voc_dir), batch_size,
shuffle=True, drop_last=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(
VOCSegDataset(False, crop_size, voc_dir), batch_size,
drop_last=True, num_workers=num_workers)
return train_iter, test_iter
# + [markdown] origin_pos=34 id="kbxGrTOYU1i0"
# ## 小结
#
# * 语义分割通过将图像划分为属于不同语义类别的区域,来识别并理解图像中像素级别的内容。
# * 语义分割的一个重要的数据集叫做Pascal VOC2012。
# * 由于语义分割的输入图像和标签在像素上一一对应,输入图像会被随机裁剪为固定尺寸而不是缩放。
#
# ## 练习
#
# 1. 如何在自动驾驶和医疗图像诊断中应用语义分割?还能想到其他领域的应用吗?
# 1. 回想一下 :numref:`sec_image_augmentation`中对数据增强的描述。图像分类中使用的哪种图像增强方法是难以用于语义分割的?
#
# + [markdown] origin_pos=36 tab=["pytorch"] id="W774c3NmU1i1"
# [Discussions](https://discuss.d2l.ai/t/3295)
#
| chapter_computer-vision/semantic-segmentation-and-dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Plots the control experiment that varies the number of reference sequences for integrated gradients and DeepSHAP (Supplementary Fig. 16)
import os
import numpy as np
from six.moves import cPickle
import matplotlib.pyplot as plt
# %matplotlib inline
results_path = os.path.join('../../results/', 'task3')
file_path = os.path.join(results_path, 'num_background_sweep.pickle')
with open(file_path, 'rb') as f:
results = cPickle.load(f)
# +
num_backgrounds = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25]
shap_vals_relu = np.mean(results['relu']['deepshap'], axis=2)
shap_vals_exp = np.mean(results['exponential']['deepshap'], axis=2)
ig_vals_relu = np.mean(results['relu']['integratedgrad'], axis=2)
ig_vals_exp = np.mean(results['exponential']['integratedgrad'], axis=2)
shap_vals_relu_std = np.std(results['relu']['deepshap'], axis=2)
shap_vals_exp_std = np.std(results['exponential']['deepshap'], axis=2)
ig_vals_relu_std = np.std(results['relu']['integratedgrad'], axis=2)
ig_vals_exp_std = np.std(results['exponential']['integratedgrad'], axis=2)
# +
fig = plt.figure(figsize=(10,7))
ax= plt.subplot(2,2,1)
x = num_backgrounds
y = shap_vals_relu[:,0]
ax.plot(x, y)
error = shap_vals_relu_std[:,0]
ax.fill_between(x, y-error, y+error, alpha=0.3)
#plt.plot(num_backgrounds, shap_vals_exp[:,0])
y = shap_vals_exp[:,0]
ax.plot(x, y)
error = shap_vals_exp_std[:,0]
ax.fill_between(x, y-error, y+error, alpha=0.3)
#plt.xlabel('Number of backgrounds', fontsize=12)
plt.ylabel('AUROC', fontsize=12)
plt.yticks([0.65, 0.7, 0.75, 0.8, .85], fontsize=12)
plt.xticks(num_backgrounds, fontsize=12);
ax.tick_params(labelbottom=False)
ax.set_ybound([.647,0.865])
plt.title('DeepSHAP', fontsize=12)
ax = plt.subplot(2,2,3)
y = shap_vals_relu[:,1]
plt.plot(x, y)
error = shap_vals_relu_std[:,1]
plt.fill_between(x, y-error, y+error, alpha=0.3)
#plt.plot(num_backgrounds, shap_vals_exp[:,0])
y = shap_vals_exp[:,1]
plt.plot(x, y)
error = shap_vals_exp_std[:,1]
plt.fill_between(x, y-error, y+error, alpha=0.3)
plt.xlabel('Number of backgrounds', fontsize=12)
plt.ylabel('AUPR', fontsize=12)
plt.yticks([ 0.5, 0.6, 0.7, 0.8], fontsize=12)
plt.xticks(num_backgrounds, fontsize=12);
ax.set_ybound([.42,0.81])
ax = plt.subplot(2,2,2)
x = num_backgrounds
y = ig_vals_relu[:,0]
ax.plot(x, y)
error = ig_vals_relu_std[:,0]
ax.fill_between(x, y-error, y+error, alpha=0.3)
#plt.plot(num_backgrounds, shap_vals_exp[:,0])
y = ig_vals_exp[:,0]
ax.plot(x, y)
error = ig_vals_exp_std[:,0]
ax.fill_between(x, y-error, y+error, alpha=0.3)
#plt.xlabel('Number of backgrounds', fontsize=12)
#plt.ylabel('AUROC', fontsize=12)
plt.yticks([0.65, 0.7, 0.75, 0.8, .85], fontsize=12)
plt.xticks(num_backgrounds, fontsize=12);
ax.tick_params(labelbottom=False)
ax.tick_params(labelleft=False)
ax.set_ybound([.647,0.865])
plt.title('Integrated gradients', fontsize=12)
ax = plt.subplot(2,2,4)
y = ig_vals_relu[:,1]
plt.plot(x, y)
error = ig_vals_relu_std[:,1]
plt.fill_between(x, y-error, y+error, alpha=0.3)
#plt.plot(num_backgrounds, shap_vals_exp[:,0])
y = ig_vals_exp[:,1]
plt.plot(x, y)
error = ig_vals_exp_std[:,1]
plt.fill_between(x, y-error, y+error, alpha=0.3)
plt.xlabel('Number of backgrounds', fontsize=12)
#plt.ylabel('AUPR', fontsize=12)
ax.tick_params(labelleft=False)
plt.yticks([ 0.5, 0.6, 0.7, 0.8], fontsize=12)
ax.set_ybound([.42,0.81])
plt.xticks(num_backgrounds, fontsize=12);
plt.subplots_adjust(wspace=0.05, hspace=0.08)
outfile = os.path.join(results_path, 'task3_num_background_sweep')
fig.savefig(outfile, format='pdf', dpi=200, bbox_inches='tight')
# -
| code/controls/background_analysis_step2_plot_sweep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introducción
#
# <a id='c1'></a>
# ## Sistema Operativo
#
#
# <img src="https://www.howtogeek.com/thumbcache/2/200/8b2cb8c7c5fc73604d66fd5f0c38be7a/wp-content/uploads/2018/08/img_5b68e80f77e33.png" alt="" align="center"/>
#
#
# * Personalmente recomiendo **Linux**, en particular distribuciones como Ubuntu, Mint o Fedora por su facilidad a la hora de instalar.
# * En ocasiones las implementaciones en **Windows** no están completamente integradas e inclusive en ocasiones no están disponibles.
# - Una alternativa es [**Windows Subsystem for Linux**](https://docs.microsoft.com/en-us/windows/wsl/about), pero lamentablemente no se asegura un 100% de compatibilidad.
# * En el caso que poseas un equipo con **macOS** no debería haber problema.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Interfaz de Línea de Comandos (*Command Line Interface* / CLI)
#
# * Es un método que permite a los usuarios interactuar con algún programa informático por medio de líneas de texto.
# * Típicamente se hace uso de una terminal/*shell* (ver imagen).
# * En el día a día dentro de la oficina facilita flujo de trabajo.
# * Permite moverse entre manipular directorios y ficheros, instalar/actualizar herramientas, aplicaciones, softwares, etc.
# + [markdown] slideshow={"slide_type": "-"}
# <img src="https://upload.wikimedia.org/wikipedia/commons/2/29/Linux_command-line._Bash._GNOME_Terminal._screenshot.png" alt="" align="center"/>
#
# *Screenshot of a sample bash session in GNOME Terminal 3, Fedora 15. [Wikipedia](https://en.wikipedia.org/wiki/Command-line_interface)*
# -
# <a id='c3'></a>
# ## Python
#
# <img src="https://wordsofthislife.com/wp-content/uploads/2013/11/python_icon-1.png" alt="" width="300" height="300" align="center"/>
#
# [Python](https://www.python.org/) es un lenguaje de programación interpretado cuya filosofía hace hincapié en la legibilidad de su código.
#
# Se trata de un lenguaje de programación multiparadigma, ya que soporta orientación a objetos, programación imperativa y, en menor medida, programación funcional. Es un lenguaje interpretado, dinámico y multiplataforma.
#
# Las principales librerías científicas a instalar y que ocuparemos durante el curso son:
#
# * [Numpy](http://www.numpy.org/): Computación científica.
# * [Pandas](https://pandas.pydata.org/): Análisis de datos.
# * [Matplotlib](https://matplotlib.org/): Visualización.
# * [Scikit-Learn](http://scikit-learn.org/stable/): Machine Learning
#
# Durante el curso se ocuparán más librerías a modo de complementación (ejemplo, scipy, seaborn, statsmodels ,etc.)
# + [markdown] slideshow={"slide_type": "slide"}
# <a id='c2'></a>
# ### Entorno Virtual
#
# <img src="https://files.virgool.io/upload/users/63719/posts/bko9k535kg6q/j1xiuolo2c9c.png" alt="" align="center"/>
#
#
# __Problemas recurrentes:__
# - Dependencias de librerías (*packages*) incompatibles.
# - Dificultad a la hora de compartir y reproducir resultados, e.g. no conocer las versiones de las librerías instaladas.
# - Tener una máquina virtual para cada desarrollo es tedioso y costoso.
# - Miedo constante a instalar algo nuevo y tu script vuelva a funcionar.
# + [markdown] slideshow={"slide_type": "subslide"}
# **Solución**
#
# Aislar el desarrollo con tal de mejorar la compatibilidad y reproducibilidad de resultados.
# -
# **Para el curso (es recomendable)**
#
# 
#
# *Package, dependency and environment management for any language—Python, R, Ruby, Lua, Scala, Java, JavaScript, C/ C++, FORTRAN.* [(Link)](https://conda.io/docs/)
# **¿Por qué Conda?**
#
# * Open Source
# * Gestor de librerías __y__ entornos virtuales.
# * Compatible con Linux, Windows y macOS.
# * Es agnóstico al lenguaje de programación (inicialmente fue desarrollado para Python).
# * Es de fácil instalación y uso.
# **Otras alternativas**
#
# * `pip + virtualenv`: el primero es el gestor favorito de librerías de Python y el segundo es un gestos de entornos virtuales, el contra es que es exclusivo de Python.
# * `Pipenv` o `Poetry`: librerías enfocadas al manejo de dependencias (muy recomendables!)
#
# ## Entorno de desarrollo integrado
#
# <img src="https://cdn-images.visual-paradigm.com/features/v12/ide-image.png" alt="" width="300" height="300" align="center"/>
#
# * Un [entorno de desarrollo integrado](https://es.wikipedia.org/wiki/Entorno_de_desarrollo_integrado), en inglés **Integrated Development Environment (IDE)**, es una aplicación informática que proporciona servicios integrales para facilitarle al desarrollador o programador el desarrollo de software.
#
# * Normalmente, un IDE consiste de un editor de código fuente, herramientas de construcción automáticas y un depurador. La mayoría de los IDE tienen auto-completado inteligente de código (IntelliSense).
#
# * Algunos IDE contienen un compilador, un intérprete, o ambos, tales como NetBeans y Eclipse; otros no, tales como SharpDevelop y Lazarus.
# Existen varios IDE populares que sirven para varios lenguajes de programaci+on. En python, el más recomendable es [Pycharm](https://www.jetbrains.com/pycharm/?gclid=Cj0KCQjw-uH6BRDQARIsAI3I-Uc0HFf8ll0EZRnc6FCFaozMsnxkCiciIok-iRMyfN1xHB9vaArGw6IaAnV7EALw_wcB).
#
# ### Pycharm
#
# <img src="https://resources.jetbrains.com/storage/products/pycharm/img/meta/pycharm_logo_300x300.png" alt="" width="200" height="200" align="center"/>
#
# PyCharm es un IDE para desarrolladores profesionales. Fue creado por JetBrains, una empresa conocida por crear excelentes herramientas de desarrollo de software.
#
# Hay dos versiones de PyCharm:
#
# * **Community**: versión gratuita de código abierto, ligera, buena para Python y desarrollo científico
# * **Professional**: versión de pago, IDE con todas las funciones con soporte para desarrollo web también
# ### Observación
#
# Se recomienda que puedan descargar Pycharm (en su versión gratuita) para poder familiarizarse con este tipo de herramientas, aunque el curso está orientado a trabajar sobre la terminal y con jupyter notebook.
# + [markdown] slideshow={"slide_type": "slide"}
# <a id='c4'></a>
# ## Project Jupyter
#
# [Project Jupyter](https://jupyter.org/index.html) exists to develop open-source software, open-standards, and services for interactive computing across dozens of programming languages.*
#
# <img src="https://2.bp.blogspot.com/-Q23VBETHLS0/WN_lgpxinkI/AAAAAAAAA-k/f3DJQfBre0QD5rwMWmGIGhBGjU40MTAxQCLcB/s1600/jupyter.png" alt="" width="360" height="360" align="center"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Jupyter Notebook
#
# Es una aplicación web que permite crear y compartir documentos que contienen código, ecuaciones, visualizaciones y texto. Entre sus usos se encuentra:
#
# * Limpieza de datos
# * Transformación de datos
# * Simulaciones numéricas
# * Modelamiendo Estadístico
# * Visualización de Datos
# * Machine Learning
# * Mucho más.
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Jupyter Lab
#
# * Es la siguiente generación de la interfaz de usuario de *Project Jupyter*.
# * Similar a Jupyter Notebook cuenta con la facilidad de editar archivos .ipynb (notebooks) y heramientas como una terminal, editor de texto, explorador de archivos, etc.
# * Eventualmente Jupyter Lab reemplazará a Jupyter Notebok (aunque la versión estable fue liberada hace algunos meses).
# * Cuenta con una serie de extensiones que puedes instalar (y desarrollar inclurisve.
# * Más información en: https://github.com/jupyterlab/jupyterlab-demo
#
# -
# Puedes probar Jupyter Lab con solo dos clicks!
#
# 1. Ingresar a este link: https://github.com/jupyterlab/jupyterlab-demo
# 2. Hacer click en el icono de binder: 
# ### Otros Proyectos
#
# Entre los más conocidos se encuentran:
#
# * [JupyterHub](https://jupyterhub.readthedocs.io/): Distribuir Jupyter Noterbooks a múltiples usuarios.
# * [nbviewer](https://nbviewer.jupyter.org/): Compartir Jupyter Notebooks.
# * [Jupyter Book](https://jupyterbook.org/): Construir y publicar libros de tópicos computacionales.
# * [Jupyter Docker Stacks](https://jupyter-docker-stacks.readthedocs.io/): Imágenes de Jupyter para utilizar en Docker.
# <a id='c5'></a>
# ## Versionamiento de Código
#
# <img src="https://i1.wp.com/help.lieberlieber.com/download/attachments/32342017/image2019-7-2_14-53-19.png?w=1080&ssl=1" width="480" height="240" align="center"/>
#
# * Permite compartir el código fuente de nuestros desarrollos y a la vez mantener un registro de los cambios por los que va pasando.
#
# * Herramienta más importante y fundamental dentro del desarrollo.
# * Tipos de versionadores de código:
# * [Sistemas Centralizados](https://sites.google.com/site/practicadesarrollosoft/temario/sistemas-de-versionado-de-cdigo/sistemas-de-versionado-de-cdigo-centralizados): Son los más "tradicionales", por ejemplo SVN, CVS, etc.
# * [Sistemas Distribuidos](https://sites.google.com/site/practicadesarrollosoft/temario/sistemas-de-versionado-de-cdigo/sistemas-de-versionado-de-cdigo-distribuidos): son los que están en auge actualmente como: Git, Mercurial, Bazaar, etc.
#
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Git
#
# <img src="https://static.platzi.com/media/user_upload/Beginners_guide_setting_up-git-a4bacd39-5be0-4ae4-a956-5117c18efa94.jpg" alt="" width="240" height="240" align="center"/>
#
# _[__Git__](https://git-scm.com/) is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency._
#
#
#
# Es importante comprender que _Git_ es la herramienta que permite versionar tus proyectos, sin embargo, a la hora de querer aprovechar más funcionalidades, como compartir o sincronizar tus trabajos se hace necesario utilizar servicios externos. Los más famosos son:
#
# * GitHub
# * GitLab
# * Bitbucket
#
# Piensa lo siguiente, cualquiera podría implementar un correo electrónico entre dos computadoras conectadas entre ellas por LAN pero no conectadas a Internet. Sin embargo la gente utiliza servicios como Gmail, Outlook, etc. con tal de aprovechar de mejor manera las funcionalidades que ofrece la tecnología del correo electrónico. Esta es una analogía perfecta entre las diferencias de Git y los servicios como GitHub o GitLab.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### GitHub
#
# <img src="https://miro.medium.com/max/1125/1*wotzQboYWAfaj-7bvGNIkQ.png" alt="" width="350" height="160" align="center"/>
#
#
#
# _[GitHub](https://github.com/) is a development platform inspired by the way you work. From open source to business, you can host and review code, manage projects, and build software alongside 30 million developers._
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Gitlab
#
# <img src="https://blog.desafiolatam.com/wp-content/uploads/2017/10/gitlab-cover.png" alt="" width="350" height="160" align="center"/>
#
#
#
# _[Gitlab](https://gitlab.com/) is an open source end-to-end software development platform with built-in version control, issue tracking, code review, CI/CD, and more. Self-host GitLab on your own servers, in a container, or on a cloud provider._
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Bitbucket
#
# <img src="https://wptavern.com/wp-content/uploads/2016/10/bitbucket-logo.png" alt="" width="350" height="160" align="center"/>
#
#
#
# _[Bitbucket](https://bitbucket.org/product/) is more than just Git code management. Bitbucket gives teams one place to plan projects, collaborate on code, test, and deploy._
#
# .
# -
# ### Resumen
#
# * Sistema operativo: Cualquiera, sin embargo se recomiendan alternativas basadas en Unix.
# * Lenguaje de programación: Python
# * Entorno virtual: Conda, preferentemetne a través de miniconda.
# * Entorno de trabajo: Jupyter Lab.
# * Versionamiento: Git & GitHub.
| lectures/basic_tools/lecture_000_intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: venv_andrew_ng_2_python
# language: python
# name: venv_andrew_ng_2_python
# ---
# +
import numpy as np
from scipy import optimize
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
# %matplotlib inline
from sklearn.preprocessing import PolynomialFeatures
import csv
import time
# %load_ext autoreload
# %autoreload 2
# -
# # 1 Univariate Logistic Regression
file_path_1 = '../course_materials/ex2data1.txt'
# # 1.1 Data Visualisation
dF_1 = pd.read_csv(file_path_1, header = None, names = ['x1', 'x2', 'y'])
n_samples_1, n_variables_1 = dF_1.shape
print (f"\tn_samples # {n_samples_1}\n\tvariables # {n_variables_1}")
dF_1.head()
x1 = dF_1['x1']
x2 = dF_1['x2']
plt.figure(figsize=(8,6))
plt.scatter(x1, x2, c = dF_1['y'])
plt.xlabel("X1")
plt.ylabel("X2")
plt.show()
# ## 1.2 Data Extraction and Transformation
def get_data(file_path):
# try wiht matrices as well
data = np.loadtxt(file_path, delimiter = ',')
n_samples, n_variables = data.shape
X = np.insert(data[:, :-1], 0, 1, axis=1)
y = data[:, -1:]
# β = np.matrix(np.zeros(n_variables)).T
β = np.zeros(n_variables)
return β, X.flatten(), y.flatten(), n_samples, n_variables
# ## 1.3 Logistic Regression
# ### 1.3.1 Logistic Regression
#
# **Sigmoid Function** ${\σ}(z) = \frac{1}{1 + e^{-z}}$
#
#
# ### 1.3.2 Vectorisation of Logistic Regression
#
# **Hypothesis** $h_{\beta}(X) = \frac{1}{1 + e^{X\cdot\beta}}$
#
# **Cost Function** $J = \frac{-1}{n}\sum(y^T\cdot \log h_{\beta} +(1-y)^T\cdot \log (1-h_{\beta}))$
# +
def get_sigmoid(z):
return 1/(1 + np.exp(-z))
def get_hypothesis(β, X, n_samples, n_variables):
β = β.reshape(n_variables, -1)
X = X.reshape(n_samples, -1)
return get_sigmoid(np.dot(X, β))
# def cost_function(X, y, β):
def cost_function(β, X, y, n_samples, n_variables, λ=0.):
# β = β.reshape(n_variables, -1)
# X = X.reshape(n_samples, -1)
y = y.reshape(n_samples, -1)
# hypothesis vector h(n, 1)
h = get_hypothesis(β, X, n_samples, n_variables)
# cost scalar J(1, 1)
J = (- np.dot(y.T, np.log(h)) - np.dot((1-y).T, np.log(1-h)))/n_samples
# similarly cost J can be calculated using np.multiply together with np.sum
# cost = -np.sum(np.multiply(y, np.log(h)) + np.multiply((1-y), np.log(1-h)))/n_samples
# regularisation scalar (R)
R = λ*np.dot(β[1:].T,β[1:])/(2*n_samples)
return (J + R)[0][0]
def optimise_β_1 (β, X, y, n_samples, n_variables, λ=0.):
return optimize.fmin(cost_function, x0=β, args=(X, y, n_samples, n_variables, λ), maxiter=1500, full_output=True)
def get_prediction(β, X, n_samples, n_variables):
return get_hypothesis(β, X, n_samples, n_variables) >= 0.5
# -
# ## 1.4 Function Tests
β_test_1, X_1, y_1, n_samples_1, n_variables_1 = get_data(file_path_1)
y_1.shape
# ### 1.4.1 Cost-Function Test
# The outputs of the cost_function should be as follows:<br\>
# β_test (set to zeros), X, λ=0. — **J = 0.693** (Andrew Ng) <br\>
J = cost_function(β_test_1, X_1, y_1, n_samples_1, n_variables_1)
print(f"\tJ = {J:.5f}")
# ### 1.5.1 Prediction Test
# The outputs of the cost_function should be as follows:<br\>
# Exam_1: 45, Exam_2: 85 — **P = 0.776** (Andrew Ng) <br\>
β_opt_1 = optimise_β_1(β_test_1, X_1, y_1, n_samples_1, n_variables_1)[0]
xTest_1 = np.array([1, 45, 85])
n_samplesTest_1 = 1
hypothesis = get_hypothesis(β_opt_1, xTest_1, n_samplesTest_1, n_variables_1)[0][0]
print(f"\tP = {hypothesis:.5f}")
# ## 1.5 Results Visualisation & Analysis
# ### 1.5.1 Goodness of _fit Measures
# #### 1.5.1.1 Decision Boundary
# This comment is here thanks to this dude (https://github.com/vsevolodloik).<br />
# Decision boundary is defined as follows:<br />
# $\frac{1}{1 + e^{X\cdot\beta}} = \frac{1}{2}$<br />
# Therefore, for the simple case of two variables, the equation of decision boundary takes the following form:<br />
# $\beta_0+\beta_1\cdot{X_1}+\beta_2 \cdot{X_2} = 0$
# #### 1.5.1.2 Types of Errors & Accuracy, Precision, Recal
#
# The rate **type I error** (false positives) is denoted by $\alpha$.<br />
# The rate **type II error** (false negatives) is denoted by $\beta$.<br /><br />
# * **Accuracy** $= \frac {TP + TN}{TP + TN + FP + FN}$
# * **Precision** $= \frac {TP}{TP + FP}$
# * **Recall** $= \frac {TP}{TP + FN}$
# +
def get_accurary_precision_recall(
X, y, p):
# Elegant way to calculate TP, FP, and FN
tp = np.sum(y * p)
fp = np.sum(y - p == -1)
fn = np.sum(y - p == 1)
accuracy = (X.shape[0] - fp - fn)/X.shape[0]
precision = tp / (tp + fp)
recall = tp / (tp + fn)
print(f"\taccuracy {accuracy:.3f}\n\tprecision {precision:.3f}\n\trecall {recall:.3f}")
return {
'accuracy': accuracy,
'precision': precision,
'recall': recall}
def goodness_of_fit(β, X, y, n_samples, n_variables):
β_R = β.reshape(n_variables, -1)
X_R = X.reshape(n_samples, -1)
y_R = y.reshape(n_samples, -1)
p = get_prediction(β, X, n_samples, n_variables).flatten()
get_accurary_precision_recall(X, y, p)
plt.figure(figsize=(8,6))
x1 = X_R[:, 1:2]
x2 = X_R[:, 2:]
plt.scatter(x1, x2, c = y_R[:, 0:])
x2_fit = - β_R[0]/β_R[2] - x1*β_R[1]/β_R[2]
plt.plot(x1, x2_fit, '-')
plt.xlabel("X1")
plt.ylabel("X2")
return plt.show()
# -
goodness_of_fit(β_opt_1, X_1, y_1, n_samples_1, n_variables_1)
# http://www.johnwittenauer.net/tag/machine-learning/
#
# http://aimotion.blogspot.se/2011/11/machine-learning-with-python-logistic.html
#
# https://beckernick.github.io/logistic-regression-from-scratch/
#
# https://github.com/kaleko/CourseraML/blob/master/ex2/ex2.ipynb
#
# http://www.scipy-lectures.org/advanced/mathematical_optimization/
# # 2 Multivariate Logistic Regression
file_path_2 = '../course_materials/ex2data2.txt'
dF_2 = pd.read_csv(file_path_2, header = None)
n_samples, n_variables = dF_2.shape
print (f"\tsamples # {n_samples}\n\tvariables # {n_variables}")
dF_2.head()
# ## 2.1 Data Visualisation
X_1s = dF_2.iloc[:, :1]
X_2s = dF_2.iloc[:, 1:2]
plt.figure(figsize=(8,6))
plt.scatter(X_1s, X_2s, c = dF_2.iloc[:, 2:])
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
# ## 2.2 Data Extraction Transformation
# Add **polynomial** and **interaction** features using **SciKitLearn Preprocessing**<br\>
# http://scikit-learn.org/stable/modules/getd/sklearn.preprocessing.PolynomialFeatures.html
def add_polynomial(file_path, polynomialDegree):
data = np.loadtxt(file_path, delimiter = ',')
n_samples, n_variables = data.shape
X = data[:, :-1]
y = data[:, -1:]
poly = PolynomialFeatures(polynomialDegree)
# X without intercept is passed to PolynomialFeatures.fit_transform.
# Intercept is added automatically.
poly_X = poly.fit_transform(X)
n_samples, n_variables = poly_X.shape
β = np.zeros((n_variables,1))
return β.flatten(), poly_X.flatten(), y.flatten(), n_samples, n_variables
# ## 2.3 Function Tests
βPoly6, XPoly6, yPoly6, n_samplesPoly6, n_variablesPoly6 = add_polynomial(file_path_2, 6)
# ### 2.3.1 Cost-Function Test
# The outputs of the cost_function should be as follows:<br\>
# β_test (set to zeros), X, λ=0. — **J = 0.693** (Andrew Ng) <br\>
J = cost_function(βPoly6, XPoly6, yPoly6, n_samplesPoly6, n_variablesPoly6)
print(f"J = {J:.5f}")
# +
def optimise_β_2(β, X, y, n_samples, n_variables, λ=0.):
# Number of iterations significantly affects the output of this function
β_optimised = optimize.minimize(cost_function, β, args=(X, y, n_samples, n_variables, λ),
method='BFGS', options={'maxiter':200, "disp":False})
# β_optimised = optimize.fmin_cg(cost_function, fprime=back_propagation, x0=β_flat,
# args=(layer, X_flat, n_samples, y, yUnique),
# maxiter=50,disp=True,full_output=True)
return(β_optimised['x'])
# +
# β_opt = optimise_β(βPoly6, XPoly6, yPoly6, λ = 0.)[0]
# -
# ## 2.4 Results Visualisation & Analysis
def decision_boundary(β, X, y, n_samples, n_variables, xMin, xMax, step, polyOrder, λ=0.):
p = get_prediction(β, X, n_samples, n_variables).flatten()
get_accurary_precision_recall(X, y, p)
xx = np.linspace(xMin[0], xMax[0], step)
yy = np.linspace(xMin[1], xMax[1], step)
XX, YY = np.meshgrid(xx, yy)
XY = np.concatenate((XX.reshape(step**2, -1), YY.reshape(step**2, -1)), axis=1)
# X without intercept is passed to PolynomialFeatures.fit_transform.
# Intercept is added automatically.
poly = PolynomialFeatures(polyOrder)
poly_XY = poly.fit_transform(XY)
ZZ = get_hypothesis(β, poly_XY, step**2, poly_XY.shape[1]).reshape(step, -1)
# ZZ = prediction(β, poly_XY, step**2, poly_XY.shape[1]).reshape(step, -1)
# For plt.contour, ZZ is a matrix
# xx(XX) and yy(YY) can be either matrices or vectors of appropriate dimensions
decision_boundary = plt.contour(xx, yy, ZZ, [0.5])
plt.clabel(decision_boundary, inline=1, fontsize=15, fmt = '$\lambda $= %d'%λ)
x_1 = X.reshape(n_samples,-1)[:, 1:2]
x_2 = X.reshape(n_samples,-1)[:, 2:3]
plt.scatter(x_1, x_2, c = y.reshape(n_samples,-1)[:, 0:])
plt.title("Decision Boundary")
plt.xlabel("X1")
plt.ylabel("X2")
return plt.show()
# +
λ = 0
polyOrder = 6
βPoly, XPoly, yPoly, sS_Poly, nV_Poly = add_polynomial(file_path_2, polyOrder)
β_opt_2 = optimise_β_2(βPoly, XPoly, yPoly, sS_Poly, nV_Poly, λ)
xMin = (-1., -1.)
xMax = (1.2, 1.2)
step = 50
decision_boundary(β_opt_2, XPoly, yPoly, sS_Poly, nV_Poly, xMin, xMax, step, polyOrder, λ)
# -
for i, λ in enumerate([0., 1., 10, 100 ]):
polyOrder = 6
βPoly, XPoly, yPoly, n_samplesPoly, n_variablesPoly = add_polynomial(file_path_2, polyOrder)
β_opt = optimise_β_2(βPoly, XPoly, yPoly, n_samplesPoly, n_variablesPoly, λ)
xMin = (-1., -1.)
xMax = (1.2, 1.2)
step = 50
decision_boundary(β_opt, XPoly, yPoly, n_samplesPoly, n_variablesPoly, xMin, xMax, step, polyOrder, λ)
| notebooks/2_logistic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''udemyPro_py388'': venv)'
# name: python3
# ---
# ### DEBUGGER
# +
from functools import wraps
def debug(fn):
#not to loose meta-info! (.e.g. __str__ )
@wraps(fn)
def debugger(*args, **kwargs):
args_values_types = [(a, type(a)) for a in args]
kwargs_values_types = [(k, v, type(v)) for k, v in kwargs.items()]
print("Args: {}".format(args_values_types))
print("Kwargs: {}".format(kwargs_values_types))
print("Function {} called".format(fn.__name__))
fn_result = fn(*args, **kwargs)
print("Function {} returns: {}".format(fn.__name__, fn_result))
return fn_result
return debugger
# +
@debug
def print_arguments(a, b, c=None):
print("a: {}, b: {}, c: {}".format(a, b, c))
print_arguments(10, 20, c=False)
# +
@debug
def addition(a, b, c=None):
return a+b if c else 0
@debug
def sub(a, b, c=None):
return a-b if c else 0
@debug
def mul(a, b, c=None):
return a*b if c else 0
@debug
def div(a, b, c=None):
return a/b if c else 0
addition(10, 20, c=False)
sub(10, 20, c=False)
mul(10, 20, c=False)
div(10, 20, c=False)
# -
# bei decorator werden metadaten verloren !
# Dies kann verhindert werden wenn von functools wraps importiert wird. siehe func oben!
print(addition.__name__)
print(addition.__doc__)
# ### TIMER
# +
from functools import wraps
import time
# perf_counter() ist genaueres Zeit Element, gibt es auch als perf_counter_ns
# Wall time wird hier ausgegeben (wie lang das Programm dauert)
#Gegenstück: CPU time:
def timing(fn):
#not to loose meta-info!
@wraps(fn)
def timer(*args, **kwargs):
start_time = time.perf_counter_ns()
fn_result = fn(*args, **kwargs)
end_time = time.perf_counter_ns()
time_duration = end_time - start_time
print("Function {} took: {} ns".format(fn.__name__, time_duration))
return fn_result
return timer
# -
@timing
def do_something(a, b, c=None):
"""Do something"""
return a+b if c else 0
do_something(a=10, b=20, c=True)
| Python/zzz_training_challenge/UdemyPythonPro/Chapter5_Functions/Decorators/decorator_beispiel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="kXOTyoXbZKNw" colab_type="code" colab={}
# + [markdown] id="BmCAwR5rZMrg" colab_type="text"
# Text Generator
# + id="LROGUH6xZRQP" colab_type="code" outputId="18bd45cd-78c0-44a4-f8f7-9546d89f62e2" colab={"base_uri": "https://localhost:8080/", "height": 80}
import numpy
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
# + id="cyUyeneJd_m3" colab_type="code" outputId="b52f2934-c005-4856-d7ce-27a73b417f59" colab={"base_uri": "https://localhost:8080/", "height": 34}
#Code to read file into Colaboratory:
# !pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
link = 'https://drive.google.com/open?id=1w4lOacBmOtly-ozf8Ncfrl4oCL4minZ8'
fluff, id = link.split('=')
print (id) # Verify that you have everything after '='
downloaded = drive.CreateFile({'id':id})
raw_text = downloaded.GetContentString()
# + id="_Q1Wl5k8riUQ" colab_type="code" colab={}
raw_text = raw_text.lower()
# + id="RQ6PuI7nWCai" colab_type="code" colab={}
#print(raw_text)
# + id="NUfBcAsIXQjz" colab_type="code" colab={}
# create mapping of unique chars to integers, and a reverse mapping
chars = sorted(list(set(raw_text)))
char_to_int = dict((c, i) for i, c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
# + id="NxsyOwYWXay5" colab_type="code" outputId="dbf4dc8c-01f8-4efe-f511-bbf556c64868" colab={"base_uri": "https://localhost:8080/", "height": 51}
# summarize the loaded data
n_chars = len(raw_text)
n_vocab = len(chars)
print ("Total Characters: ", n_chars)
print ("Total Vocab: ", n_vocab)
# + id="has4F79NXwUz" colab_type="code" outputId="a6edd672-a799-49f9-9e77-289ef8ca0a9e" colab={"base_uri": "https://localhost:8080/", "height": 34}
# prepare the dataset of input to output pairs encoded as integers
seq_length = 100
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
seq_in = raw_text[i:i + seq_length]
seq_out = raw_text[i + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
print ("Total Patterns: ", n_patterns)
# + id="PeSkBwa6BF5r" colab_type="code" colab={}
def perplexity(y_true, y_pred):
cross_entropy = K.categorical_crossentropy(y_true, y_pred)
perplexity = K.exp(cross_entropy)
return perplexity
def crossentropy(y_true, y_pred):
return K.categorical_crossentropy(y_true, y_pred)
# + id="abxpmQHyYIw1" colab_type="code" outputId="27683bed-be5c-4fb3-90a1-2c73c31757a3" colab={"base_uri": "https://localhost:8080/", "height": 51}
# reshape X to be [samples, time steps, features]
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
# normalize
X = X / float(n_vocab)
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
print(X.shape)
print(y.shape)
# + id="YNAMlEpAjlUl" colab_type="code" outputId="d7d7f371-a6db-4492-818a-c6fba3b16340" colab={"base_uri": "https://localhost:8080/", "height": 292}
# define the LSTM model
model = Sequential()
model.add(LSTM(512, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(512))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[crossentropy, perplexity])
# + id="9H8PykF60xhL" colab_type="code" outputId="b8354f7e-f4a8-4a93-cb7d-192776d469f1" colab={"base_uri": "https://localhost:8080/", "height": 340}
print(model.summary())
# + id="EJEb1uY8lEOf" colab_type="code" outputId="8929e839-c343-4c08-e500-caa333661844" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# fit the model
model.fit(X, y, epochs=50, batch_size=2000)
# + id="dot4p39B8ENX" colab_type="code" colab={}
# def sample(preds, temperature=1.0):
# # helper function to sample an index from a probability array
# preds = np.asarray(preds).astype('float64')
# preds = np.log(preds) / temperature
# exp_preds = np.exp(preds)
# preds = exp_preds / np.sum(exp_preds)
# probas = np.random.multinomial(1, preds, 1)
# return np.argmax(probas)
# + id="faoCXlWKDtFY" colab_type="code" outputId="12be6146-7a18-4d73-8aa5-edb060b55fb2" colab={"base_uri": "https://localhost:8080/", "height": 680}
import sys
import numpy as np
# pick a random seed
start = np.random.randint(0, len(dataX)-1)
pattern = dataX[start]
print ("Seed:")
print ("\"", ''.join([int_to_char[value] for value in pattern]), "\"")
# generate characters
for i in range(1000):
x = np.reshape(pattern, (1, len(pattern), 1))
x = x / float(n_vocab)
prediction = model.predict(x, verbose=0)
index = np.argmax(prediction)
result = int_to_char[index]
seq_in = [int_to_char[value] for value in pattern]
sys.stdout.write(result)
pattern.append(index)
pattern = pattern[1:len(pattern)]
print ("\nDone.")
# + id="JykP46T8z4zS" colab_type="code" colab={}
# + id="9dXnIoaKz2fK" colab_type="code" colab={}
# + id="f_q3PaoF7_zF" colab_type="code" colab={}
| SourceCode/Novel_generator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Model Building
#
# This notebook trains and examins different ML classifiers on training data dataset.
#
# * K Nearest neighbours
# * SVM
# * Random forest
# * Logistic regression
# * Decision Tree
# * ADABoost
# * ANN
# +
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
import xgboost as xgb
import pickle
from sklearn.metrics import classification_report, accuracy_score, roc_auc_score, precision_recall_fscore_support
from sklearn.utils.class_weight import compute_class_weight
# -
df_tr = pd.read_csv('./data/train_features_processed.csv')
y_tr = pd.read_csv('./data/train_labels_processed.csv')
y_tr = y_tr.squeeze()
df_tr.info()
# ## 1. Splitting training data
# Split training dataset into training dataset, cross validation dataset and test dataset - 55%,25% and 20% resp.
X_train_temp, X_test, y_train_temp, y_test = train_test_split(df_tr,
y_tr,
test_size=0.2,
random_state=42)
X_train, X_cv, y_train, y_cv = train_test_split(X_train_temp,
y_train_temp,
test_size=0.25)
type(y_train)
# ## 2. Model development
# +
def bestModel(model, X_train, X_cv, y_train, y_cv, grid):
model_cv = GridSearchCV(model, grid, cv=4)
model_cv.fit(X_train, y_train)
print("tuned hpyerparameters :(best parameters) ", model_cv.best_params_)
print("accuracy :", model_cv.best_score_)
mod_best= model_cv.best_estimator_
return mod_best
def testModel(model, X_train, X_cv, y_train, y_cv):
model.fit(X_train,y_train)
y_pred_tr = model.predict(X_train)
y_pred_cv = model.predict(X_cv)
precision_tr, recall_tr, fscore_tr, support_tr = precision_recall_fscore_support(
y_train, y_pred_tr, average='macro')
precision_cv, recall_cv, fscore_cv, support_cv = precision_recall_fscore_support(
y_cv, y_pred_cv, average='macro')
report= np.array([precision_tr, recall_tr, fscore_tr, precision_cv, recall_cv, fscore_cv])
return report,model
def testResult(model_trained,X_test):
y_pred_test=model_trained.predict(X_test)
return y_pred_test
# -
# ### 1. Logistic Regression
logReg = LogisticRegression(class_weight='auto', max_iter=200)
grid = {
'C': np.logspace(-5, 0, 40),
'solver': ['newton-cg', 'lbfgs', 'sag', 'saga']
}
logReg_best=bestModel(logReg, X_train, X_cv, y_train, y_cv, grid)
# ### 2. K-Nearest neighbours
knn = KNeighborsClassifier()
grid = {
'n_neighbors': np.int0(np.linspace(3, 22, 20)),
'weights': ['uniform', 'distance'],
'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute']
}
knn_best=bestModel(knn, X_train, X_cv, y_train, y_cv, grid)
# ### 3. Decision tree
decTree = DecisionTreeClassifier()
grid = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random']}
decTree_best=bestModel(decTree, X_train, X_cv, y_train, y_cv, grid)
# ### 4. Random forest
rnFr = RandomForestClassifier()
grid = {
'n_estimators': np.array([10,20,30]),
'criterion': ['gini', 'entropy']
}
rnFr_best=bestModel(rnFr, X_train, X_cv, y_train, y_cv, grid)
# ### 5. SVM
svm = SVC()
grid = {
'C': np.logspace(-5, 2, 10),
'kernel': ['linear', 'poly', 'rbf', 'sigmoid']
}
svm_best=bestModel(svm, X_train, X_cv, y_train, y_cv, grid)
# ### 6. ADABoost
adab = AdaBoostClassifier()
grid = {'algorithm': ['SAMME', 'SAMME.R']}
adab_best=bestModel(adab, X_train, X_cv, y_train, y_cv, grid)
# ### 7. XGBoost
xgbst = xgb.XGBClassifier(use_label_encoder=False, objective='binary:logistic')
grid = {'n_estimators': np.array([50, 70, 100])}
xgbst_best=bestModel(xgbst, X_train, X_cv, y_train, y_cv, grid)
# ### 8. Neural Network
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Activation
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dropout
ann_mod=Sequential()
ann_mod.add(Dense(units=9,activation='relu'))
ann_mod.add(Dropout(0.5))
ann_mod.add(Dense(units=18,activation='relu'))
ann_mod.add(Dropout(0.5))
ann_mod.add(Dense(units=1,activation='sigmoid'))
ann_mod.compile(optimizer='adam',loss='binary_crossentropy')
early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=25)
ann_mod.fit(x=X_train.values,
y=y_train.values,
epochs=600,
validation_data=(X_cv.values, y_cv.values), verbose=1,
callbacks=[early_stop])
model_loss=pd.DataFrame(ann_mod.history.history)
model_loss.plot()
y_pred_tr = ann_mod.predict_classes(X_train.values)
y_pred_cv = ann_mod.predict_classes(X_cv.values)
precision_tr, recall_tr, fscore_tr, support_tr = precision_recall_fscore_support(
y_train.values, y_pred_tr, average='macro')
precision_cv, recall_cv, fscore_cv, support_cv = precision_recall_fscore_support(
y_cv.values, y_pred_cv, average='macro')
print(precision_tr, recall_tr, fscore_tr, support_tr)
print(precision_cv, recall_cv, fscore_cv, support_cv)
ann_mod.save('ann_titanic.h5')
# ## 3. Model testing
model_list=[logReg_best,knn_best,decTree_best,rnFr_best,svm_best,adab_best,xgbst_best]
for model in model_list:
print(str(model).split('(')[0])
report_d={}
for model in model_list:
name=str(model).split('(')[0]
filename = './models/'+name + '_titanic.model'
report,mod=testModel(model, X_train, X_cv, y_train, y_cv)
pickle.dump(model, open(filename, 'wb'))
report_d[name]=report
report_d
| 03_Model_building.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## 1 to 1 convolution
#
# This will be our filter — it tests the presence of a horizontal line:
conv_hline = [
[-1,-1,-1],
[ 2, 2, 2],
[-1,-1,-1]
]
plt.imshow(conv_hline, cmap='gray_r')
# Dot product between a horizontal line and the horizontal line filter:
# +
data = [
[0,0,0],
[1,1,1],
[0,0,0]
]
s = np.tensordot(data, conv_hline)
plt.imshow(data, cmap='gray_r')
plt.title('Dot product: {:.2f}'.format(s))
# -
# Dot product between a vertical line and the horizontal line filter:
# +
data = [
[1,0,0],
[1,0,0],
[1,0,0]
]
s = np.tensordot(data, conv_hline)
plt.imshow(data, cmap='gray_r')
plt.title('Dot product: {:.2f}'.format(s))
# -
# ## Sliding convolution
#
# Now, we want to detect parts of an image that contain certain features, such as horizontal lines.
# +
img = plt.imread('https://i.imgur.com/BeRusDc.png')
plt.imshow(img, cmap='gray')
# -
img.shape
# We will apply our 3x3 filter on 3x3 pixels of the image, sliding from top to bottom, left to right.
# +
height, width = img.shape
res = []
for i in range(height-3+1):
line = []
for j in range(width-3+1):
s = np.tensordot(img[i:i+3,j:j+3], conv_hline)
line.append(float(s))
res.append(line)
# -
# The dot product of all 3x3 pixels convoluted with the horizontal line filter is now an image that only contains horizontal lines.
img_h = np.array(res).reshape(height-3+1, -1)
img_h.shape
plt.imshow(img_h, cmap='gray_r')
# ## Combining convolutions
#
# We will do the same thing with vertical lines.
conv_vline = [
[-1,2,-1],
[-1,2,-1],
[-1,2,-1]
]
plt.imshow(conv_vline, cmap='gray_r')
# +
height, width = img.shape
res = []
for i in range(height-3+1):
line = []
for j in range(width-3+1):
s = np.tensordot(img[i:i+3,j:j+3], conv_vline)
line.append(float(s))
res.append(line)
# -
img_v = np.array(res).reshape(height-3+1, -1)
img_v.shape
plt.imshow(img_v, cmap='gray_r')
# If we add the two, the image now contains horizontal and vertical lines. We basically just simplified the initial image.
plt.imshow(img_v + img_h, cmap='gray_r')
| docs/!ml/notebooks/Convolutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Carlosrnes/group_work_ml/blob/main/ML_Pandas_Profiling.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="hicw0S7PHhjg"
# #! pip install https://github.com/pandas-profiling/pandas-profiling/archive/master.zip
# + id="2XpeIeHLF4wm"
import pandas as pd
from pandas_profiling import ProfileReport
# + id="gBvaAoFOA9aX" colab={"base_uri": "https://localhost:8080/", "height": 305} outputId="8822fdab-1f63-4f6c-9618-073304262650"
dataset = pd.read_csv('https://raw.githubusercontent.com/Carlosrnes/group_work_ml/main/techscape-ecommerce/train.csv')
dataset = dataset.drop(['Access_ID'], axis=1)
dataset['Date'] = pd.to_datetime(dataset['Date'], format='%d-%b-%y')
dataset.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 144, "referenced_widgets": ["887fe596e77248dcab7307d931db6c7a", "4731e942e0c04df8b798b1e8a9e71ee6", "896c7573d3e84f8b9044c4ec66508c10", "e4dceda2086c419f93128421e08b0e74", "94d07153ae61490d99310d0d4787e3d3", "db9d31421aa841c7a12f8e0696e6f0ef", "<KEY>", "<KEY>", "3ece8fd4bedb43989a0560678518e816", "<KEY>", "f2246abee78a427c939a039c3173884c", "c8a183c4c9574521a0b2e01ebf464a97", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "702ee7cc67e744e08ce4f5ed6467b943", "fb95effee7234d899fa3ab143c72a35d", "f2d998095bb340279a9587e5ceb9b1eb", "cae8d12cc9764bccaf7ed190423461b0", "<KEY>", "<KEY>", "<KEY>", "fdac146611d1433aa09933ad9a2c633c", "54b3317177ce49abb64fb4e4945d55e5", "9d2d798433ce4917b748c4e1455edef0", "e5615c6023ab40b48078ae8ca862bf7a", "3b505386a0ca46d396a007690974671a", "b16efcd0bdca4bf593abe85282e66058", "<KEY>", "<KEY>", "<KEY>", "8ba5f49b539d49d59d06c063e8e4fd48", "<KEY>", "<KEY>", "<KEY>", "a7289ce3376d4815929120fae863c703", "<KEY>", "62553bc9eed9419392ab6e3dd64c3d2d", "<KEY>", "<KEY>", "<KEY>", "603eed4e108f4c2aa11d5bb560a2cd77"]} id="rU4q3qyY_Dmo" outputId="bbf643ce-d766-422a-9258-5c499bf25e66"
profile_report = ProfileReport(dataset, html={"style": {"full_width": True}})
profile_report.to_file("/content/techscape.html")
| ML_Pandas_Profiling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="0MRC0e0KhQ0S"
# # Logistic Regression
# + [markdown] colab_type="text" id="LWd1UlMnhT2s"
# ## Importing the libraries
# + colab={} colab_type="code" id="YvGPUQaHhXfL"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + [markdown] colab_type="text" id="K1VMqkGvhc3-"
# ## Importing the dataset
# + colab={} colab_type="code" id="M52QDmyzhh9s"
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, -1].values
Y = Y.reshape(len(Y),1)
# + [markdown] colab_type="text" id="YvxIPVyMhmKp"
# ## Splitting the dataset into the Training set and Test set
# -
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.25,random_state=0)
# + [markdown] colab_type="text" id="kW3c7UYih0hT"
# ## Feature Scaling
# -
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
# + [markdown] colab_type="text" id="bb6jCOCQiAmP"
# ## Training the Logistic Regression model on the Training set
# -
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0)
classifier.fit(X_train,np.ravel(Y_train,'C'))
# + [markdown] colab_type="text" id="yyxW5b395mR2"
# ## Predicting a new result
# -
print(classifier.predict(sc.transform([[30,87000]])))
# + [markdown] colab_type="text" id="vKYVQH-l5NpE"
# ## Predicting the Test set results
# -
Y_pred = classifier.predict(X_test)
print(np.concatenate((Y_pred.reshape(len(Y_pred),1), Y_test.reshape(len(Y_test),1)),1))
# + [markdown] colab_type="text" id="h4Hwj34ziWQW"
# ## Making the Confusion Matrix
# -
from sklearn.metrics import confusion_matrix,accuracy_score
cm = confusion_matrix(Y_test,Y_pred)
print(cm)
print(accuracy_score(Y_test,Y_pred))
# + [markdown] colab_type="text" id="6OMC_P0diaoD"
# ## Visualising the Training set results
# -
from matplotlib.colors import ListedColormap
X_set, y_set = sc.inverse_transform(X_train), Y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 0.25),
np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25))
plt.contourf(X1, X2, classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# + [markdown] colab_type="text" id="SZ-j28aPihZx"
# ## Visualising the Test set results
# -
from matplotlib.colors import ListedColormap
X_set, y_set = sc.inverse_transform(X_test), Y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 0.25),
np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25))
plt.contourf(X1, X2, classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
| logistic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbgrader={"grade": false, "grade_id": "cell-86e0de040aac317a", "locked": true, "schema_version": 2, "solution": false}
# # Lab assignment №2: Gradient boosting and feature importance estimation
#
# This lab assignment consists of several parts. You are supposed to make some transformations, train some models, estimate the quality of the models and explain your results.
#
# Several comments:
# * Don't hesitate to ask questions, it's a good practice.
# * No private/public sharing, please. The copied assignments will be graded with 0 points.
# * Blocks of this lab will be graded separately.
# -
# Here we will work with widely known Human Actividy Recognition (HAR) dataset. Data is available at [UCI repository](https://archive.ics.uci.edu/ml/datasets/human+activity+recognition+using+smartphones). Download it and place in `data/` folder in the same directory as this notebook. There are available both raw and preprocessed datasets. This time we will use the preprocessed one.
#
# There are several great frameworks (listed below). However, we recommend to stick to `LightGBM` for this task.
# * LightGBM by Microsoft. [Link to github](https://github.com/Microsoft/LightGBM). It is one of the most popular frameworks these days that shows both great quality and performance.
# * xgboost by dlmc. [Link to github](https://github.com/dmlc/xgboost). The most famous framework which got very popular on kaggle.
# * Catboost by Yandex. [Link to github](https://github.com/catboost/catboost). Novel framework by Yandex company tuned to deal well with categorical features.
#
# Some simple preprocessing is done for you.
#
# Parts 1 and 3 have the same weight equal to $1$. Part 2 has weight $0.5$.
#
# ### Part 1:
# Your __ultimate target is to get familiar with one of the frameworks above__ and achieve at least 90% accuracy on test dataset:
#
# * $\geq 90\%$ accuracy: 0.5 points for this part
# * $\geq 92\%$ accuracy: 0.7 points for this part
# * $\geq 94\%$ accuracy: 1 point for this part
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
# +
X_train = np.genfromtxt('data/train/X_train.txt')
y_train = np.genfromtxt('data/train/y_train.txt')
X_test = np.genfromtxt('data/test/X_test.txt')
y_test = np.genfromtxt('data/test/y_test.txt')
with open('data/activity_labels.txt', 'r') as iofile:
activity_labels = iofile.readlines()
activity_labels = [x.replace('\n', '').split(' ') for x in activity_labels]
activity_labels = dict([(int(x[0]), x[1]) for x in activity_labels])
# -
activity_labels
# +
print(X_train.shape)
data_mean = X_train.mean(axis=0)
data_std = X_train.std(axis=0)
X_train = (X_train - data_mean)/data_std
X_test = (X_test - data_mean)/data_std
# -
# The dataset has some duplicating features. File `unique_columns.txt` stores the indices of the unique ones.
# +
try:
unique_columns = np.genfromtxt('unique_columns.txt', delimiter=',').astype(int)
except FileNotFoundError:
# ! wget https://raw.githubusercontent.com/girafe-ai/ml-mipt/basic_s20/homeworks_basic/Lab2_boosting/unique_columns.txt -nc
unique_columns = np.genfromtxt('unique_columns.txt', delimiter=',').astype(int)
X_train_unique = X_train[:, unique_columns]
X_test_unique = X_test[:, unique_columns]
# -
# PCA could be useful in this case. E.g.
pca = PCA(0.99)
X_train_pca = pca.fit_transform(X_train_unique)
X_test_pca = pca.transform(X_test_unique)
X_train_pca.shape
X_test_pca.shape
plt.scatter(X_train_pca[:1000, 0], X_train_pca[:1000, 1], c=y_train[:1000])
plt.grid()
plt.xlabel('Principal component 1')
plt.ylabel('Principal component 2')
plt.scatter(X_train_pca[:1000, 3], X_train_pca[:1000, 4], c=y_train[:1000])
plt.grid()
plt.xlabel('Principal component 4')
plt.ylabel('Principal component 5')
# Despite optimal parameters (e.g. for xgboost) can be found on the web, we still want you to use grid/random search (or any other approach) to approximate them by yourself.
#
# Please try at least several models of different structure.
#
# Provide the following to describe your path:
#
# * Plot describing the model accuracy/precision/recall w.r.t. model complexity.
# * ROC-AUC plot for the 3 best models you aquired (for multiclass case you might refer to the `scikit-plot` library.
# * Small report describing your experiments.
#
# [DART](https://arxiv.org/abs/1505.01866) might be useful as well in your experiments. It is available in [xgboost](https://xgboost.readthedocs.io/en/latest/tutorials/dart.html) and [LightGBM](https://lightgbm.readthedocs.io/en/latest/Parameters.html), but seems [missing in CatBoost](https://github.com/catboost/catboost/issues/1006).
#
# __Without the report and plots maximum score for this part of the lab is 0.3 of its full weight.__
# +
# Your code here.
# -
# ### Part 2. Blending the models
#
# Take three (or more) best models and try to build the blending ensemble of them. Compare the quality of the final model using the same quality measures as above.
# +
# Your code here
# -
# ### Part 3. Explaining the model and estimating the feature importances.
#
# Now your goal to take three best models and estimate feature importances using this models.
#
# * First, use the methods that libraries provide by default (e.g. `lightgbm.plot_importance`).
# * Next, use the [`shap`](https://github.com/slundberg/shap) library to explain the models behaviour and analyse the model performance. Compare the feature importances estimated by `shap` and by methods on the previous step.
# +
# Your code here.
| homeworks_basic/Lab2_boosting/Lab2_boosting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import tensorflow as tf
import matplotlib.pyplot as plt
train_dir='../dataset\\cifar_10_small\\train'
test_dir='../dataset\\cifar_10_small\\test'
train_aeroplane_dir= os.path.join(train_dir,'aeroplane')
train_bird_dir=os.path.join(train_dir,'bird')
train_car_dir= os.path.join(train_dir,'car')
train_cat_dir=os.path.join(train_dir,'cat')
test_aeroplane_dir= os.path.join(test_dir,'aeroplane')
test_bird_dir=os.path.join(test_dir,'bird')
test_car_dir= os.path.join(test_dir,'car')
test_cat_dir=os.path.join(test_dir,'cat')
print('훈련용 aeroplane 이미지 전체 개수:', len(os.listdir(train_aeroplane_dir)))
print('훈련용 bird 이미지 전체 개수:', len(os.listdir(train_bird_dir)))
print('훈련용 car 이미지 전체 개수:', len(os.listdir(train_car_dir)))
print('훈련용 cat 이미지 전체 개수:', len(os.listdir(train_cat_dir)))
print('테스트용 aeroplane 이미지 전체 개수:', len(os.listdir(test_aeroplane_dir)))
print('테스트용 bird 이미지 전체 개수:', len(os.listdir(test_bird_dir)))
print('테스트용 car 이미지 전체 개수:', len(os.listdir(test_car_dir)))
print('테스트용 cat 이미지 전체 개수:', len(os.listdir(test_cat_dir)))
# +
batch_size=20
### data
# 모든 이미지를 1/255로 스케일을 조정합니다
agumentation_train_datagen = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1./255,
rotation_range=40,
width_shift_range=0.2, ## 가로로 이동 비율
height_shift_range=0.2, ## 세로로 이동 비율
shear_range=0.2, # 전단의 강도
zoom_range=0.2, ## 확대 와 축소 범위 [1-0.2 ~ 1+0.2 ]
horizontal_flip=True,) ## 수평기준 플립
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
train_generator = agumentation_train_datagen.flow_from_directory(
directory=train_dir, # 타깃 디렉터리
target_size=(300, 300), # 모든 이미지를 150 × 150 크기로 바꿉니다
batch_size=batch_size,
interpolation='bilinear', ## resize시 interpolatrion 기법
color_mode ='rgb',
shuffle='True',
# binary_crossentropy 손실을 사용하기 때문에 이진 레이블이 필요합니다
class_mode='categorical') # categorical , sparse , input
print(train_generator.class_indices)
test_generator = test_datagen.flow_from_directory(
directory=test_dir,
target_size=(300, 300),
batch_size=batch_size,
shuffle='True',
interpolation='bilinear', ## resize시 interpolatrion 기법
color_mode='rgb',
class_mode='categorical') #categorical
for data_batch, labels_batch in train_generator:
print('배치 데이터 크기:', data_batch.shape)
print('배치 레이블 크기:', labels_batch.shape)
break
# +
## back born
conv_base=tf.keras.applications.ResNet50(weights='imagenet',include_top=False)
#conv_base.summary()
## 모델
input_Layer = tf.keras.layers.Input(shape=(300,300,3))
x=conv_base(input_Layer)
x=tf.keras.layers.Flatten()(x)
x= tf.keras.layers.Dense(512, activation='relu')(x)
Out_Layer= tf.keras.layers.Dense(4, activation='softmax')(x)
conv_base.trainable = False
model = tf.keras.Model(inputs=[input_Layer], outputs=[Out_Layer])
model.summary()
# +
loss_function=tf.keras.losses.categorical_crossentropy
optimize=tf.keras.optimizers.Adam(learning_rate=0.0001)
metric=tf.keras.metrics.categorical_accuracy
model.compile(loss=loss_function,
optimizer=optimize,
metrics=[metric])
## generator는 입력과 타싯의 배치를 끝없이 반환한다.
## 데이터가 끝없이 생성되기 때문에 모델에 하나의 에포크를 정의할때 제너레이터로부터 얼만큼 많은 샘플을 뽑을지 전달해야함
## steps_pr_epoch 가 100이면 위에서 선언된 배치 수만큼의 인풋/아웃풋데이터가 생성되어 학습이 된다.
##즉, 배치가 20이면 20의 배치인 데이터가 100번만큼 생성되어 학습한다. 즉, 20의 배치 데이터를 100번 학습완료하면 1에포크
## 단, 20의 배치데이터를 생성할때마다 랜덤적으로 생성한다.
## 일반적으로 배치사이즈/전체 데이터 길이를 steps_per_epoch로 설정한다.
history = model.fit(
train_generator,
steps_per_epoch=(len(os.listdir(train_aeroplane_dir))+len(os.listdir(train_bird_dir))+len(os.listdir(train_car_dir))+len(os.listdir(train_cat_dir)))//batch_size,
epochs=30,
validation_data=test_generator,
validation_freq=1)
model.save('multi_classification_augumentation_model.hdf5')
# +
acc = history.history['categorical_accuracy']
val_acc = history.history['val_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
| tensorflow/day5/answer/A_05_01_pretraind_feature_extraction_2.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
# ## U shape PHP dynamics
using Plots
pyplot()
using Revise
using EnKF
using Distributions
using DocStringExtensions
using LinearAlgebra
using ProgressMeter
using DifferentialEquations
# +
cd(dirname(pwd()))
cd("src")
include("Systems.jl")
using ..Systems
include("Tools.jl")
using ..Tools
include("Thermomodel.jl")
using ..Thermomodel
include("Postprocessing.jl")
using ..Postprocessing
# -
# ## Initialization
ω0 = [sqrt(1.2e004)];
℘ = [1.2e005];
Θ = 0.15;
θe = 1 + Θ;
θc = 1 - Θ;
He = 3000.0;
Hc = 3000.0;
γ = 1.3;#not 1.4!!!!! maybe 1.33?
L = 4.0 # total length of the pipe when streched to a 1D pipe
L2D = 2.0 # the actual length of the bended pipe in the real world
alpha = pi/2 # inclination angle
tube = Tube(L,L2D,alpha)
Xe = map(tuple, [0.0; 3.0], [1.0; 4.0])
evap = Evaporator(He,θe,Xe)
Xc = map(tuple, [1.0], [3.0])
cond = Condenser(Hc,θc,Xc)
X0 = [(1.5,3.5)]
dXdt0 = [(0.0,0.0)]
liquids=LiquidSlug(γ,ω0,℘,X0,dXdt0)
# +
P = [1.0,1.0]
vapors=VaporPlug(γ,P)
# -
sys0 = PHPSystem(tube,evap,cond,liquids,vapors)
# ## Get numerical solution
# +
Lvaporplug = XptoLvaporplug(X0,L)
M = P.^(1/γ).* Lvaporplug
u=vcat(XMtovec(sys0.liquidslug.Xp,sys0.liquidslug.dXdt,M), He...)
# -
# ## EnKF PropagationFunction
# +
function zhang2002modelEnKF!(du::Array{Float64,1},uEnKF::Array{Float64,1},p::PHPSystem,t::Float64)
sys = p
# added He as a state vector entry
u = uEnKF[1:end-1]
uHe = deepcopy(uEnKF[end])
numofliquidslug = Integer( (length(u) - 1)/5 )
(Xp,dXdt0,M)=vectoXM(u)
γ = sys.liquidslug.γ
ω0 = sys.liquidslug.ω0
℘ = sys.liquidslug.℘
Lvaporplug = XptoLvaporplug(Xp,sys.tube.L)
# Lliquidslug = XptoLliquidslug(Xp)
height = getheight(Xp,sys.tube.L2D,sys.tube.alpha)
Xpvapor = getXpvapor(Xp,sys.tube.L)
P = real.((M./Lvaporplug .+ 0im).^(γ))
θ = real.((P .+ 0im).^((γ-1)/γ))
for i = 1:numofliquidslug
du[2*i-1] = u[2*numofliquidslug+2*i-1]
du[2*i] = du[2*i-1]
du[2*numofliquidslug + 2*i-1] = -32*u[2*numofliquidslug + 2*i-1] - (ω0[i]^2)*(0.5*(height[i][end]-height[i][1])) + ℘[i]*(P[i]-P[i+1])
du[2*numofliquidslug + 2*i] = du[2*numofliquidslug + 2*i-1]
end
du[4*numofliquidslug+1:5*numofliquidslug+1] .= dMdtzhang2002modelEnKF(Xpvapor,θ,sys,uHe)
du[end]=0.0
return du
end
function dMdtzhang2002modelEnKF(Xpvapor::Array{Tuple{Float64,Float64},1},θ::Array{Float64,1},sys::PHPSystem,uHe)
dMdt=zeros(length(Xpvapor))
Xe = sys.evaporator.Xe
He = uHe
θe = sys.evaporator.θe
Xc = sys.condenser.Xc
Hc = sys.condenser.Hc
θc = sys.condenser.θc
Levapoverlap=XpvaportoLoverlap(Xpvapor,Xe)
Lcondoverlap=XpvaportoLoverlap(Xpvapor,Xc)
# May not be right for multi liquid flow
for i = 1:length(Xpvapor)
if Lcondoverlap[i] < 1e-8
dMdt[i] = He * Levapoverlap[i] * (θe-θ[i])
else
dMdt[i] = -Hc * Lcondoverlap[i] * (θ[i]-θc)
end
end
return dMdt
end
# +
tstep=1.0e-5
p = sys0
tspan = (0.0, 0.1);
T = tspan[1]:tstep:tspan[end]
prob = ODEProblem(zhang2002modelEnKF!, u, tspan, p)
sol = solve(prob, RK4(), adaptive=false, dt=tstep);
integrator = init(prob, RK4(), adaptive =false, dt = tstep, save_everystep=false)
# -
# ## EnKF
newHe = 10000.0
function (::PropagationFunction)(t::Float64, ENS::EnsembleState{N, TS}) where {N, TS}
for (i,s) in enumerate(ENS.S)
set_t!(integrator, deepcopy(t))
set_u!(integrator, deepcopy(s))
# for j=1:10
step!(integrator)
# end
ENS.S[i] = deepcopy(integrator.u)
end
return ENS
end
fprop = PropagationFunction()
function (::MeasurementFunction)(t::Float64, s::TS) where TS
result=soltoResult(s[1:end-1],sys0);
# return [s[3]]
return [deepcopy(result.θ[1]),deepcopy(result.θ[2])]
end
function (::MeasurementFunction)(t::Float64)
# return reshape([0.0, 0.0, 1.0, 0.0, 0.0, 0.0],(1,6))
return reshape([1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,0.0,0.0],(2,7))
end
m = MeasurementFunction()
function (::RealMeasurementFunction)(t::Float64, ENS::EnsembleState{N, TZ}) where {N, TZ}
let s = sol(t)
# fill!(ENS, [deepcopy(s[3])])
result=soltoResult(s[1:end-1],sys0);
fill!(ENS, [deepcopy(result.θ[1]),deepcopy(result.θ[2])])
end
return ENS
end
z = RealMeasurementFunction()
# A = MultiAdditiveInflation(2, 1.05, MvNormal(zeros(1), 2.0*I))
A = MultiAdditiveInflation(7, 1.001, MvNormal(zeros(7), 0.001*Diagonal([0.1,0.1,10.0,10.0,0.1,0.1,100.0])))
# A = IdentityInflation()
# this is measurement noise!!!!!!!!
# ϵ = AdditiveInflation(MvNormal(zeros(1), 1.0*I))
ϵ = AdditiveInflation(MvNormal(zeros(2), 0.1*Diagonal([0.1,0.1])))
N = 50
# NZ = 1
NZ = 2
isinflated = false
isfiltered = false
isaugmented = true
# +
x₀ = [1.0, 3.0, 0.0, 0.0, 0.5, 1.5, newHe]
ens = initialize(N, MvNormal(x₀, 0.1*Diagonal([1.0,1.0,100.0,100.0,1.0,1.0,3000.0])))
estimation_state = [deepcopy(ens.S)]
true_state = [deepcopy(x₀)]
covs = []
# -
g = FilteringFunction()
enkf = ENKF(N, NZ, fprop, A, g, m, z, ϵ, isinflated, isfiltered, isaugmented)
# +
Δt = 1e-5
Tsub = 0.0:Δt:0.1-Δt
for (n,t) in enumerate(Tsub)
global ens
t, ens, cov = enkf(t, Δt, ens)
push!(estimation_state, deepcopy(ens.S))
push!(covs, deepcopy(cov))
end
# -
# ## Show the results
# +
s = hcat(sol(T).u...)
ŝ = hcat(mean.(estimation_state)...)
# ŝ = hcat(solEnKF(T).u...)
results=soltoResult(sol[1:end-1,:],sys0);
resultŝ=soltoResult(ŝ[1:end-1,:],sys0);
# resultŝ=soltoResult(solEnKF[1:end-1,:],sys0);
# +
plt1 = plot(layout = (2, 1), legend = :bottomright)
plot!(plt1[1], T, results.Xp[1][1:end-1], linewidth = 3, label = "truth")
plot!(plt1[1], Tsub, resultŝ.Xp[1][1:end-1], linewidth = 3, markersize = 2, label = "EnKF mean", xlabel = "t", ylabel = "x", linestyle =:dash,title="Xp1 location")
plot!(plt1[2], T, results.M[1][1:end-1], linewidth = 3, label = "truth")
plot!(plt1[2], Tsub, resultŝ.M[1][1:end-1], linewidth = 3, markersize = 2, label = "EnKF mean", xlabel = "t", ylabel = "y", linestyle =:dash,title="M1")
# +
plt2 = plot(layout = (2, 1), legend = :bottomright)
plot!(plt2[1], T, results.θ[1][1:end-1], linewidth = 3, label = "truth")
plot!(plt2[1], Tsub, resultŝ.θ[1][1:end-1], linewidth = 3, markersize = 2, label = "EnKF mean", xlabel = "t", ylabel = "x", linestyle =:dash,title="Temperature 1")
# plot!(plt2[2], T, s[5,1:end], linewidth = 3, label = "truth")
plot!(plt2[2], Tsub, ŝ[7,1:end-1], linewidth = 3, markersize = 2, label = "EnKF mean", xlabel = "t", ylabel = "y", linestyle =:dash,title="He")
# -
plot(Tsub, map(covs) do P
P[1,1]+ eps() end, yscale = :log10, linewidth = 3, label = "1")
plot!(Tsub, map(covs) do P
P[5,5]+ eps() end, yscale = :log10, linewidth = 3, xlabel = "t", label = "5")
ens.S
solEnKF[5000]
| test/.ipynb_checkpoints/OHP He estimation from temperature measurements-no inflation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### 1. Which of the following will an application spend the longest time retrieving data from?
# - [ ] CPU L2 cache
# - [ ] RAM
# - [ ] Disk
# - [x] **The network**
#
# ### 2. Which tool can you use to verify reports of 'slowness' for web pages served by a web server you manage?
# - [ ] The top tool
# - [x] **The ab tool**
# - [ ] The nice tool
# - [ ] The pidof tool
#
# ### 3. If our computer running Microsoft Windows is running slow, what performance monitoring tools can we use to analyze our system resource usage to identify the bottleneck? (Check all that apply)
# - [x] **Performance Monitor**
# - [x] **Resource Monitor**
# - [ ] Activity Monitor
# - [ ] top
#
# ### 4. Which of the following programs is likely to run faster and more efficiently, with the least slowdown?
# - [ ] A program with a cache stored on a hard drive
# - [x] **A program small enough to fit in RAM**
# - [ ] A program that reads files from an optical disc
# - [ ] A program that retrieves most of its data from the Internet
#
# ### 5. What might cause a single application to slow down an entire system? (Check all that apply)
# - [x] **A memory leak**
# - [ ] The application relies on a slow network connection
# - [x] **Handling files that have grown too large**
# - [ ] Hardware faults
| troubleshooting-debugging-techniques/week-2/quiz-understanding-slowness.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # COMP9318 Lab4
# ## Instructions
# 1. This note book contains instructions for **COMP9318-Lab4**.
#
# * You are required to complete your implementation in a file `submission.py` provided along with this notebook.
#
# * You are not allowed to print out unnecessary stuff. We will not consider any output printed out on the screen. All results should be returned in appropriate data structures via corresponding functions.
#
# * You can submit your implementation for **Lab4** via following link: https://kg.cse.unsw.edu.au/submit/ .
#
# * For each question, we have provided you with detailed instructions along with question headings. In case of any problem, you can post your query @ Piazza.
#
# * You are allowed to add other functions and/or import modules (you may have to in this lab), but you are not allowed to define global variables. **Only functions are allowed** in `submission.py`.
#
# * You should not import unnecessary modules/libraries, failing to import such modules at test time will lead to errors.
#
# * We will provide immediate feedback on your submission. You can access your scores using the online submission portal on the same day.
#
# * For **Final Evaluation** we will be using a different dataset, so your final scores may vary.
#
# * You are allowed to submit as many times as you want before the deadline, but **ONLY the latest version will be kept and marked**.
#
# * Submission deadline for this assignment is **23:59:59 on 16th April, 2019**. We will **not** accept any late submissions.
# # Question-1: Text Classification using Multinomial Naive Bayes
#
# You are required to implement a multinomial naive bayes classifier to predict spam SMS.
#
# The training data is a set of SMS categoried into `spam` and `ham`.
# +
import pandas as pd
raw_data = pd.read_csv('./asset/data.txt', sep='\t')
raw_data.head()
# -
# In order to implement a unigram model, first we tokenize the text. We use the count corresponding to each token (word) in the SMS as its feature (i.e., bag of words). We store the features and catrgorical information for each SMS in a `dictionary`.
# +
def tokenize(sms):
return sms.split(' ')
def get_freq_of_tokens(sms):
tokens = {}
for token in tokenize(sms):
if token not in tokens:
tokens[token] = 1
else:
tokens[token] += 1
return tokens
training_data = []
for index in range(len(raw_data)):
training_data.append((get_freq_of_tokens(raw_data.iloc[index].text), raw_data.iloc[index].category))
# -
# For this lab, you need to **implement** a multinomial naive bayes classifier (i.e., `multinomial_nb()` in the file: `submission.py`) with add-1 smoothing. The input arguments of `multinomial_nb()` are:
# * `training_data`: pre-processed data stored as a `dictionary`
# * `sms`: test-sms (i.e., a list of tokens) that you need to categorize as `spam` and/or `ham`
#
# The return value of `multinomial_nb()` should be the **ratio** of the probability of sms is spam and the probability of sms is ham. A return value larger than 1 implies the `sms` is spam and vice versa.
#
# For example, a sample output is shown in the cell given below:
# +
## How we test your implementation...
import submission_ans as submission
sms = 'I am not spam'
print(submission.multinomial_nb(training_data, tokenize(sms)))
# -
#
#
# # Test Environment
#
# For testing, we have pre-installed the requisite modules and/or libraries in the testing environment. You are only allowed to use following libraries:
# * python: 3.6.5
# * pandas: 0.19.2
#
# NOTE: You are required to implement the classifier by yourself. You are not allowed to import **sklearn** and/or any other library in Lab4.
# +
import submission as submission
sms = 'I am not spam'
print(submission.multinomial_nb(training_data, tokenize(sms)))
# -
| Lab4/Lab4-Specs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Question 1
# +
# %%writefile primechecker.py
'''another test comment'''
def check_prime(num):
''' This is check prime function'''
if num > 1:
for i in range(2, num):
if (num % i) == 0:
print(num, "is not a prime number")
else:
print(num, "is a prime number")
else:
print(num, "is not a prime number")
# -
# ! pylint "primechecker.py"
# +
#using unittesting
# +
# %%writefile prime-checkr.py
import unittest
class primecheckr(unittest.TestCase):
def check_prime(self,num):
if num > 1:
for i in range(2, num):
if (num % i) == 0:
print(num, "is not a prime number")
else:
print(num, "is a prime number")
else:
print(num, "is not a prime number")
def ansckr(self):
check_prime(2)
result = "is a prime number"
self.assertEquals(result,"is a prime number")
unittest.main(argv=['first-arg-is-ignored'], exit=False)
# -
# ! python prime-checkr.py
# # Question 2
# +
def armstrongcheckr(num):
# initialize sum
sum = 0
# find the sum of the cube of each digit
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** 3
temp //= 10
# display the result
if num == sum:
return num
# -
armstrongcheckr(153)
lst = list(range(1,1001))
def ArmStrongCheckrGen(lst):
for item in lst:
sum = 0
temp = item
while temp > 0:
digit = temp % 10
sum += digit ** 3
temp //= 10
if item == sum:
yield item
print(list(ArmStrongCheckrGen(lst)))
| Assignments Day 9.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="0H_U8pz-6Q2J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 140} outputId="59a93be5-1181-4c63-a507-a65eab2a61a5"
from google.colab import drive
import os
drive.mount('/content/gdrive')
os.chdir("/content/gdrive/My Drive/animalese-generator-master")
os.getcwd()
# + id="cNBL_f4R7wqt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="2db70feb-2373-49e2-960c-9c2c031d8816"
# !pip install pydub
# !pip install ffmpeg
# + id="9cYHOfOrvR9P" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="9cad1692-c855-48c3-fd45-e5da236e7649"
# !python my_animalese.py \
# --stringy ''
# + id="5qq6pRPb2Ot1" colab_type="code" colab={}
ch = "Bang"
print(ch.isalpha())
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SD211 TP1 Systèmes de recommandation
# *<NAME>*
import numpy as np
from scipy import sparse
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy.optimize import check_grad
from scipy import linalg
# %matplotlib inline
# # 1 Présentation du modèle
# ## Question 1.1
def load_movielens(filename, minidata=False):
"""
Cette fonction lit le fichier filename de la base de donnees
Movielens, par exemple
filename = '~/datasets/ml-100k/u.data'
Elle retourne
R : une matrice utilisateur-item contenant les scores
mask : une matrice valant 1 si il y a un score et 0 sinon
"""
data = np.loadtxt(filename, dtype=int)
R = sparse.coo_matrix((data[:, 2], (data[:, 0]-1, data[:, 1]-1)),
dtype=float)
R = R.toarray() # not optimized for big data
# code la fonction 1_K
mask = sparse.coo_matrix((np.ones(data[:, 2].shape),
(data[:, 0]-1, data[:, 1]-1)), dtype=bool )
mask = mask.toarray() # not optimized for big data
if minidata is True:
R = R[0:100, 0:200].copy()
mask = mask[0:100, 0:200].copy()
return R, mask
R, mask = load_movielens('ml-100k/u.data', minidata=False)
print R.shape, mask.shape
# ## Reponse:
# L'option minidata est pour juste choisir un dataset de taille 100*200 donc on peut réduire le temps de calculation.
# ## Question 1.2
print np.nonzero(R)[0].shape
# ## Reponse:
#
# Selon le taille de R et le nombre de non_zero valeurs, on peut voir que:
# 1. Le nombre d'utilisateurs: 943;
# 2. Le nombre de films: 1682;
# 3. Le nombre total de notes: 100000
# ## Question 1.3
def objSimple(P, Q, R, rho):
tmp = R - Q*P
val = (tmp ** 2)/2. + rho/2. * (Q ** 2 + P ** 2)
return val
# +
fig = plt.figure(figsize=(12, 8))
ax = fig.gca(projection='3d')
# Make data.
P = np.arange(-5, 5, 0.05)
Q = np.arange(-5, 5, 0.05)
P, Q = np.meshgrid(P, Q)
Z = objSimple(P, Q, 10, 0.2)
# Plot the surface.
surf = ax.plot_surface(P, Q, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=True)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
# -
# ## Reponse:
# On prend une situation extrême, c'est à dire qu'on utilise les nombres pour remplacer les matrices.
# Selon le graphe, on peut voir que le minimum n'est pas au milieu du surface. Donc, c'est pas convexe.
# # 2 Trouver $P$ quand $Q_0$ est fixé
# ## Question 2.1
#
# $$g(P) = \frac{1}{2}\|1_K\circ(R - Q^0P)\|^2_F+\frac{\rho}{2}\|Q^0\|^2_F+\frac{\rho}{2}\|P\|^2_F$$
# <br>
# $$\triangledown g(P) = - (Q^0)^T(1_K \circ (R - Q^0P)) + \rho P$$
# <br>
# $${\triangledown} ^2 g(P) = (Q^0)^TQ^0 + \rho I$$<br>
# Il est évident que la matrice est définie positive, donc $g(P)$ est convexe.<br>
# Son gradient est $\triangledown g(P) = (Q^0)^T((Q^0P - R)\circ 1_K) + \rho P$
# ## Question 2.2
# +
def func(P_ravel, Q0, R, mask, rho, c):
P = P_ravel.reshape((c, P_ravel.shape[0]/c))
tmp = (R - Q0.dot(P)) * mask
val = np.sum(tmp ** 2)/2. + rho/2. * (np.sum(Q0 ** 2) + np.sum(P ** 2))
return val
def grad(P_ravel, Q0, R, mask, rho, c):
P = P_ravel.reshape((6, P_ravel.shape[0]/c))
tmp = (R - Q0.dot(P)) * mask
grad_P = -Q0.T.dot(tmp) + rho * P
return grad_P.ravel()
# -
U, s, Vh = linalg.svd(R)
c = 6
rho = 0.2
Q0 = U[:, :c]
P0 = Vh[:c, :]
check_grad(func, grad, P0.ravel(), Q0, R, mask, rho, c)
# ## Reponse:
# On verifie le calcul par *check_grad*. D'abord, il faut séparer la fonction *objective* à deux fonctions: *func* et *grad*. Après, il faut changer le paramètre *P* à un array(C'est à dire qu'il faut le reformer à un matrice. Selon le résultat de *check_grad*, on peut voir que les deux gradients sont presque égales.
# ## Question 2.3
# +
def objective(P, Q0, R, mask, rho):
"""
La fonction objectif du probleme simplifie.
Prend en entree
P : la variable matricielle de taille C x I
Q0 : une matrice de taille U x C
R : une matrice de taille U x I
mask : une matrice 0-1 de taille U x I
rho : un reel positif ou nul
Sorties :
val : la valeur de la fonction
grad_P : le gradient par rapport a P
"""
tmp = (R - Q0.dot(P)) * mask
val = np.sum(tmp ** 2)/2. + rho/2. * (np.sum(Q0 ** 2) + np.sum(P ** 2))
grad_P = -Q0.T.dot(tmp) + rho * P
return val, grad_P
def gradient(g, P0, gamma, epsilon):
values = []
new_value, grad = g(P0, Q0, R, mask, rho)
values.append(new_value)
while np.sum((grad) ** 2) ** 0.5 > epsilon:
step = -gamma * grad / (np.sum(grad ** 2) ** 0.5)
P0 = P0 + step
new_value, grad = g(P0, Q0, R, mask, rho)
values.append(new_value)
return new_value, values
# -
# ## Question 2.4
v, vs = gradient(objective, P0, 1, 1)
print 'The minimal value is: %.2f' %v
print 'The iteration times are: %r' %len(vs)
plt.figure()
plt.plot(vs)
plt.show()
# # 3 Raffinements algorithmiques pour le problème à $Q_0$ fixé
# ## Question 3.1
def gradientRecLin(g, P0, epsilon):
values = []
new_value, grad = g(P0, Q0, R, mask, rho)
values.append(new_value)
while np.sum((grad) ** 2) ** 0.5 > epsilon:
temp1 = rho*np.sum(grad*P0) - np.sum((Q0.dot(grad)*(R - Q0.dot(P0))*mask))
temp2 = rho*np.sum((grad)**2) + np.sum((Q0.dot(grad)*mask)**2)
y = temp1/temp2
P0 = P0 - y*grad
new_value, grad = g(P0, Q0, R, mask, rho)
values.append(new_value)
return new_value, values
v_r, vs_r = gradientRecLin(objective, P0, 1)
print 'The minimal value is: %.2f' %v_r
print 'The iteration times are: %r' %len(vs_r)
plt.figure()
plt.plot(vs_r)
plt.show()
# ## Question 3.2
# #### Théorème:
# Soit $q(x) = \frac{1}{2}x^t A x + b^t x + c$ une fonction quadratique, où A est une matrice symétrique définie positive. On peut utiliser le méthode des gradients conjugués.
#
# \begin{align}
# g(P) &= \frac{1}{2}\|1_K\circ(R - Q^0P)\|^2_F+\frac{\rho}{2}\|Q^0\|^2_F+\frac{\rho}{2}\|P\|^2_F\\
# &= Tr[\frac{1}{2}(1_K \circ (R - Q_0P))^T(1_K \circ (R - Q_0P)) + \frac{\rho}{2}P^T P ] + \frac{\rho}{2}\|Q_0\|^2_F
# \end{align}
#
# Pour transformer le calcul de matrice au celui de vecteur, on définit quelques variables:
#
# $X = P.reshape(CI,1) =
# \begin{Bmatrix}
# P_{1} \\
# P_{2} \\
# \vdots \\
# P_{I-1} \\
# P_{I} \\
# \end{Bmatrix}$
#
# $R' = R.reshape(UI,1) =
# \begin{Bmatrix}
# R_{1} \\
# R_{2} \\
# \vdots \\
# R_{I-1} \\
# R_{I} \\
# \end{Bmatrix}$
#
# $Q' =
# \begin{Bmatrix}
# Q_0 & 0 & \cdots & 0 \\
# 0 & Q_0 & \cdots & 0 \\
# \vdots & \vdots & \ddots & \vdots \\
# 0 & 0 & \cdots & Q_0 \\
# \end{Bmatrix} $
# <br>
# (Q'.shape = $UI*CI$)
#
# $M' =
# \begin{Bmatrix}
# mask_{1, 1} & 0 & \cdots & 0 \\
# 0 & mask_{2, 1} & \cdots & 0 \\
# \vdots & \vdots & \ddots & \vdots \\
# 0 & 0 & \cdots & mask_{U, I} \\
# \end{Bmatrix} $
# <br>
# (M'.shape = $UI*UI$)
#
# On définit que $f(X)=g(P)$, et:
# \begin{align}
# f(x) & = g(P) \\
# & = Tr[\frac{1}{2}(1_K \circ (R - Q_0P))^T(1_K \circ (R - Q_0P)) + \frac{\rho}{2}P^T P ] + \frac{\rho}{2}\|Q_0\|^2_F \\
# & = \frac{1}{2}(M'*(R'-Q'X))^T(M'*(R'-Q'X)) + \frac{\rho}{2}X^TX + \frac{\rho}{2}\|Q_0\|^2_F\\
# & = \frac{1}{2}(X^T((M'Q')^TM'Q' + \rho I_d)X - 2R'^TM'Q'X + R'^TM'R') + \frac{\rho}{2}\|Q_0\|^2_F \\
# \end{align}
# On a que:<br>
# $A = Q'^TM'Q' + \rho I_{CI}$
#
# $b^t = -R'^TM'Q'$
#
# $c = \frac{1}{2}R'^TM''R' + \frac{\rho}{2}\|Q_0\|^2_F$
#
# et:<br>
# $f(x) = \frac{1}{2}x^t A x + b^t x + c$<br>
# Il est évident que A est définie positive, donc on peut utiliser la méthode des gradients conjugués.
# Car pour ce problème, c'est pas pratique de transformer tous les matrices aux vecteurs, on utilise un algorithme pour la fonction quelconque lors de calculer le résultat numérique.
def gradientGc(g, P0, epsilon):
values = []
new_value, grad = g(P0, Q0, R, mask, rho)
d0 = grad
values.append(new_value)
while np.sum((grad) ** 2) ** 0.5 > epsilon:
temp1 = rho*np.sum(d0*P0) - np.sum((Q0.dot(d0)*(R - Q0.dot(P0))*mask))
temp2 = rho*np.sum((d0)**2) + np.sum((Q0.dot(d0)*mask)**2)
y = temp1/temp2
P0 = P0 - y*d0
grad_old = grad
new_value, grad = g(P0, Q0, R, mask, rho)
values.append(new_value)
b = np.sum(grad ** 2)/np.sum(grad_old ** 2)
d0 = grad + b * d0
return new_value, values
v_gc, vs_gc = gradientGc(objective, P0, 1)
print 'The minimal value is: %.2f' %v_gc
print 'The iteration times are: %r' %len(vs_gc)
plt.figure()
plt.plot(vs_gc)
plt.show()
# ## Question 3.3
# Pour minimiser la fonction jusqu'à $\epsilon<=1$:
# 1. La méthode de gradient a besoins de 1128 fois d'itération avec $\gamma=1$, le valeur mnimal est 303938.38.
# 2. La méthode de recherche linéaire a besoins de 12 fois d'itération , le valeur mnimal est 303937.25 qui est plus petit que la méthode de gradient.
# 3. La méthode de gradient conjugué a besoins de juste 8 fois d'itération , le valeur mnimal est 303936.98 qui est plus petit que la méthode de gradient.
# #Résolution du problème complet
# ## Question 4.1
# +
def total_objective(P, Q, R, mask, rho):
"""
La fonction objectif du probleme complet.
Prend en entree
P : la variable matricielle de taille C x I
Q : la variable matricielle de taille U x C
R : une matrice de taille U x I
mask : une matrice 0-1 de taille U x I
rho : un reel positif ou nul
Sorties :
val : la valeur de la fonction
grad_P : le gradient par rapport a P
grad_Q : le gradient par rapport a Q
"""
tmp = (R - Q.dot(P)) * mask
val = np.sum(tmp ** 2)/2. + rho/2. * (np.sum(Q ** 2) + np.sum(P ** 2))
grad_P = -Q0.T.dot(tmp) + rho * P
grad_Q = -tmp.dot(P0.T) + rho * Q
return val, grad_P, grad_Q
def gradientRecLinP(g, P00, Q00, epsilon):
new_value, grad, _ = g(P00, Q00, R, mask, rho)
k = 0
while k < 30:
temp1 = rho*np.sum(grad*P00) - np.sum(Q00.dot(grad)*(R - Q00.dot(P00))*mask)
temp2 = rho*np.sum((grad)**2) + np.sum((Q00.dot(grad)*mask)**2)
y = temp1/temp2
P00 = P00 - y*grad
new_value, grad, _ = g(P00, Q00, R, mask, rho)
k = k + 1
return new_value, P00
def gradientRecLinQ(g, P00, Q00, epsilon):
new_value, _, grad = g(P00, Q00, R, mask, rho)
k = 0
while k < 30:
temp1 = rho*np.sum(grad*Q00) - np.sum(grad.dot(P00)*(R - Q00.dot(P00))*mask)
temp2 = rho*np.sum((grad)**2) + np.sum((grad.dot(P00)*mask)**2)
y = temp1/temp2
Q00 = Q00 - y*grad
new_value, _, grad = g(P00, Q00, R, mask, rho)
k = k + 1
return new_value, Q00
def gradientTotal(P00, Q00, epsilon, times):
k = 0
values = []
P11, Q11 = P00, Q00
while k < times:
v, Q11 = gradientRecLinQ(total_objective, P11, Q11, epsilon)
values.append(v)
v, P11 = gradientRecLinP(total_objective, P11, Q11, epsilon)
values.append(v)
k = k + 1
return v, values, P11, Q11
# -
P1 = np.ones(P0.shape)
Q1 = np.ones(Q0.shape)
print P1.shape, Q1.shape
v_t, vs_t, P_rl, Q_rl = gradientTotal(P1, Q1, 100, 5)
print 'The minimal value is: %.2f' %v_t
print 'The iteration times are: %r' %len(vs_t)
plt.figure()
plt.plot(vs_t)
plt.show()
# ## Question 4.2
# *for k >= 1 do*
#
# $$P_k = argmin_P(\frac{1}{2}(1_K \circ ||R-Q_{k-1}P||_F^2) + \frac{\rho}{2}(||P||_F^2 + ||Q_{k-1}||_F^2)$$
#
# $$Q_k = argmin_Q(\frac{1}{2}(1_K \circ ||R-QP_k||_F^2) + \frac{\rho}{2}(||P_k||_F^2 + ||Q||_F^2)$$
#
# *end for*<br>
# Pour ce problème, la fonction est toujours convexe par rapport à Q et P. Donc à chaque étape, l'objectif décroît. Cependant, on peut seulement avoir le minimum local car la fonction n'est pas convexe lorque tous Q et P ne sont pas fixé;
# ## Question 4.3
# ## Question 4.4
# ## Question 4.5
# +
resLS = np.argmax((Q_rl[449,:].dot(P_rl))*(1 - mask[449,:]))
print(resLS)
# -
# Le meillieur film pour utilisateur 449 est: 951
| SD-TSIA211/TP/SD211_TP1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
import pandas as pd
classes = ['negative','positive']
columns = ['age','gender','polyuria','polydipsia',
'sudden_weight_loss','weakness','polyphagia',
'genital_thrush','visual_blurring','itching',
'irritability','delayed_healing','partial_paresis',
'muscle_stiffness','alopecia','obesity']
answers = ['42','male','yes','yes','yes','yes','yes','yes',
'yes','yes','yes','yes','yes','yes','yes','yes']
model = pickle.load(open('ext_random_forest.pkl', 'rb'))
data = pd.DataFrame([answers], columns=columns)
data.replace({'male':1,'female':0}, inplace=True)
data.replace({'yes':1,'no':0}, inplace=True)
data['age'] = data['age'].astype('int')
probas = model.predict_proba(data)[:,1]
predicted = model.predict(data)
classes[predicted[0]], probas[0]
answers = [1 if a == 'yes' else a for a in answers]
answers = [0 if a == 'no' else a for a in answers]
answers = [1 if a == 'male' else a for a in answers]
answers = [0 if a == 'female' else a for a in answers]
probas = model.predict_proba([answers])[:,1]
predicted = model.predict([answers])
classes[predicted[0]], probas[0]
| diabetes/bot/ext_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="PcAvS4amzDrZ"
# # Mapping model training
# + [markdown] id="w0fVn8yUJl_v"
# ## Setup Google Drive
# + id="m33xuTjEKazJ" colab={"base_uri": "https://localhost:8080/"} outputId="99b85c0f-5cd0-4bfc-9383-e151e7fbd973"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="Vn7CQ4GQizHy"
# ## Install Dependencies
#
# First we install the required dependencies with `pip`.
# + id="mjhdKFJbvRVU" colab={"base_uri": "https://localhost:8080/"} outputId="47ebd18a-45d2-49e8-a9a8-d92828dafd3d"
# %tensorflow_version 2.x
# !pip install -qU ddsp[data_preparation]==1.0.1
# + [markdown] id="6LVV4Dc61HHY"
# ## Make directories to save model and data
# + id="9XJcymGj1IwY" colab={"base_uri": "https://localhost:8080/"} outputId="77609dba-214d-4f03-a576-b4678dda3947"
import os
drive_dir = '/content/drive/My Drive/nsynth_guitar'
checkpoint_dir = os.path.join(drive_dir, 'mapping/checkpoint')
assert os.path.exists(drive_dir)
print('Drive Directory Exists:', drive_dir)
# !mkdir -p "$checkpoint_dir"
# + [markdown] id="5fgGZzyMGyA4"
# ## Clear existing checkpoints
# + id="WYaZoeNeGrvo"
# import shutil
# try:
# shutil.rmtree(checkpoint_dir)
# except OSError as e:
# print("Error: %s : %s" % (checkpoint_dir, e.strerror))
# + [markdown] id="SxRUhnmKsUj9"
# ### Download Complete NSynth Guitar Subset
# + id="tTVOibF9sb3y" colab={"base_uri": "https://localhost:8080/"} outputId="b71333b1-73a2-430c-d17c-5a1454acf39e"
'''This one download the folder recursively'''
def folder_download(folder_id):
# authenticate
from google.colab import auth
auth.authenticate_user()
# get folder_name
from googleapiclient.discovery import build
service = build('drive', 'v3')
folder_name = service.files().get(fileId=folder_id).execute()['name']
# import library and download
# !wget -qnc https://github.com/segnolin/google-drive-folder-downloader/raw/master/download.py
from download import download_folder
download_folder(service, folder_id, './', folder_name)
return folder_name
dataset_dir = '/content/complete_dataset'
if not os.path.exists(dataset_dir):
folder_name = folder_download('1-lJfBAVswi8JXR_kKbOkfvNHRNvAZ1TB')
train_dataset_dir = os.path.join(dataset_dir, 'train')
valid_dataset_dir = os.path.join(dataset_dir, 'valid')
test_dataset_dir = os.path.join(dataset_dir, 'test')
train_tfrecord_file = os.path.join(train_dataset_dir, 'complete.tfrecord')
valid_tfrecord_file = os.path.join(valid_dataset_dir, 'complete.tfrecord')
test_tfrecord_file = os.path.join(test_dataset_dir, 'complete.tfrecord')
# + [markdown] id="s9AWf8NpBiB4"
# ## Define DataProvider class
# + id="_6180WP6AkkJ" colab={"base_uri": "https://localhost:8080/"} outputId="70c373e4-733c-425c-a11f-2774220f2f20"
import tensorflow as tf
import ddsp.training.data as data
class CompleteTFRecordProvider(data.RecordProvider):
def __init__(self,
file_pattern=None,
example_secs=4,
sample_rate=16000,
frame_rate=250,
map_func=None):
super().__init__(file_pattern, example_secs, sample_rate,
frame_rate, tf.data.TFRecordDataset)
self._map_func = map_func
def get_dataset(self, shuffle=True):
def parse_tfexample(record):
features = tf.io.parse_single_example(record, self.features_dict)
if self._map_func is not None:
return self._map_func(features)
else:
return features
filenames = tf.data.Dataset.list_files(self._file_pattern, shuffle=shuffle)
dataset = filenames.interleave(
map_func=self._data_format_map_fn,
cycle_length=40,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(parse_tfexample,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
@property
def features_dict(self):
return {
'sample_name':
tf.io.FixedLenFeature([1], dtype=tf.string),
'note_number':
tf.io.FixedLenFeature([1], dtype=tf.int64),
'velocity':
tf.io.FixedLenFeature([1], dtype=tf.int64),
'instrument_source':
tf.io.FixedLenFeature([1], dtype=tf.int64),
'qualities':
tf.io.FixedLenFeature([10], dtype=tf.int64),
'audio':
tf.io.FixedLenFeature([self._audio_length], dtype=tf.float32),
'f0_hz':
tf.io.FixedLenFeature([self._feature_length], dtype=tf.float32),
'f0_confidence':
tf.io.FixedLenFeature([self._feature_length], dtype=tf.float32),
'loudness_db':
tf.io.FixedLenFeature([self._feature_length], dtype=tf.float32),
'f0_scaled':
tf.io.FixedLenFeature([self._feature_length], dtype=tf.float32),
'ld_scaled':
tf.io.FixedLenFeature([self._feature_length], dtype=tf.float32),
'z':
tf.io.FixedLenFeature([self._feature_length * 16], dtype=tf.float32),
}
# + [markdown] id="nbUpwtyRB8wV"
# ## Define features map function
# + id="NbXhqrZaB5rw"
def features_map(features):
note_number = features['note_number']
velocity = features['velocity']
instrument_source = features['instrument_source']
qualities = features['qualities']
f0_scaled = features['f0_scaled']
ld_scaled = features['ld_scaled']
z = features['z']
sequence_length = f0_scaled.shape[0]
# compute outputs
f0_variation = f0_scaled * 127.0 - tf.cast(note_number, dtype=tf.float32)
# f0_variation = tf.clip_by_value(f0_variation, -1.0, 1.0)
f0_variation *= tf.cast(tf.math.less_equal(tf.math.abs(f0_variation), 1.0),
dtype=tf.float32)
f0_variation = tf.expand_dims(f0_variation, axis=-1)
ld_scaled = tf.expand_dims(ld_scaled, axis=-1)
z = tf.reshape(z, shape=(sequence_length, 16))
# compute inputs
note_number = tf.squeeze(tf.one_hot(note_number, 128))
velocity = tf.squeeze(tf.one_hot(velocity, 128))
instrument_source = tf.squeeze(tf.one_hot(instrument_source, 3))
qualities = tf.cast(qualities, dtype=tf.float32)
input_z = tf.math.reduce_mean(z, axis=0)
# construct input output vectors
inputs = tf.concat(
[note_number, velocity, instrument_source, qualities, input_z],
axis=-1)
inputs = tf.expand_dims(inputs, axis=0)
targets = tf.concat(
[f0_variation, ld_scaled], # z
axis=-1)
inputs = {
'inputs': inputs,
'targets': tf.pad(targets[0:-1, :], [[1, 0], [0, 0]])
}
return inputs, targets
# + [markdown] id="d7dYOU811Ni4"
# ## Create datasets
# + id="rBa055Xy1MIL"
batch_size = 16
example_secs = 4
sample_rate = 16000
frame_rate = 250
# Create train dataset
train_data_provider = CompleteTFRecordProvider(
file_pattern=train_tfrecord_file + '*',
example_secs=example_secs,
sample_rate=sample_rate,
frame_rate=frame_rate,
map_func=features_map)
train_dataset = train_data_provider.get_batch(
batch_size,
shuffle=True,
repeats=-1)
# Create valid dataset
valid_data_provider = CompleteTFRecordProvider(
file_pattern=valid_tfrecord_file + '*',
example_secs=example_secs,
sample_rate=sample_rate,
frame_rate=frame_rate,
map_func=features_map)
valid_dataset = valid_data_provider.get_batch(
batch_size,
shuffle=True,
repeats=-1)
# Create test dataset
test_data_provider = CompleteTFRecordProvider(
file_pattern=test_tfrecord_file + '*',
example_secs=example_secs,
sample_rate=sample_rate,
frame_rate=frame_rate,
map_func=features_map)
test_dataset = test_data_provider.get_batch(
batch_size,
shuffle=True,
repeats=-1)
# + [markdown] id="PVxCGOXOY4Ab"
# # Transformer
# + id="sha2F2FdZDOJ"
import numpy as np
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
# (..., seq_len_q, seq_len_k)
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
# (..., seq_len_q, depth_v)
output = tf.matmul(attention_weights, v)
return output, attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
# (batch_size, num_heads, seq_len_q, depth)
q = self.split_heads(q, batch_size)
# (batch_size, num_heads, seq_len_k, depth)
k = self.split_heads(k, batch_size)
# (batch_size, num_heads, seq_len_v, depth)
v = self.split_heads(v, batch_size)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape ==
# (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
# (batch_size, seq_len_q, num_heads, depth)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])
# (batch_size, seq_len_q, d_model)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model))
# (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention)
return output, attention_weights
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential([
# (batch_size, seq_len, dff)
tf.keras.layers.Dense(dff, activation='relu'),
# (batch_size, seq_len, d_model)
tf.keras.layers.Dense(d_model)
])
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
# (batch_size, input_seq_len, d_model)
attn_output, _ = self.mha(x, x, x, mask)
attn_output = self.dropout1(attn_output, training=training)
# (batch_size, input_seq_len, d_model)
out1 = self.layernorm1(x + attn_output)
# (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
# (batch_size, input_seq_len, d_model)
out2 = self.layernorm2(out1 + ffn_output)
return out2
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(d_model, num_heads)
self.mha2 = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
# (batch_size, target_seq_len, d_model)
attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(
enc_output, enc_output, out1,
padding_mask) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(
attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(
ffn_output + out2) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
# + id="WQaueb_AbcOK"
def ffn(input_shape, num_layers, d_model, dff, rate=0.1):
inputs = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.Dense(dff)(inputs)
x = tf.keras.layers.LayerNormalization()(x)
for i in range(num_layers):
y = tf.keras.layers.Dense(dff, activation='relu')(x)
y = tf.keras.layers.Dropout(rate)(y)
x = tf.keras.layers.Add()([x, y])
x = tf.keras.layers.LayerNormalization()(x)
outputs = tf.keras.layers.Dense(d_model)(x)
return tf.keras.Model(inputs, outputs)
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_inputs, num_layers, d_model, dff, rate=0.1):
super(Encoder, self).__init__()
self.ffn = ffn(
input_shape=(None, num_inputs),
num_layers=num_layers,
d_model=d_model,
dff=dff,
rate=rate)
def call(self, x, training):
return self.ffn(x, training=training)
class Decoder(tf.keras.layers.Layer):
def __init__(self, num_outputs, num_layers, d_model, num_heads, dff,
max_seq_len, rate=0.1):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = ffn(
input_shape=(None, num_outputs),
num_layers=4,
d_model=d_model,
dff=dff,
rate=rate)
self.pos_encoding = positional_encoding(max_seq_len, d_model)
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training, look_ahead_mask):
seq_len = tf.shape(x)[1]
attention_weights = {}
# (batch_size, target_seq_len, d_model)
x = self.embedding(x, training=training)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](x, enc_output, training,
look_ahead_mask,
None)
attention_weights['decoder_layer{}_block1'.format(i + 1)] = block1
attention_weights['decoder_layer{}_block2'.format(i + 1)] = block2
# x.shape == (batch_size, target_seq_len, d_model)
return x, attention_weights
class Transformer(tf.keras.Model):
def __init__(self, num_layers, d_model, num_heads, dff, max_seq_len,
num_inputs, num_outputs, rate=0.1):
super(Transformer, self).__init__()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.max_seq_len = max_seq_len
self.encoder = Encoder(num_inputs, num_layers, d_model, dff, rate)
self.decoder = Decoder(num_outputs, num_layers, d_model, num_heads, dff,
max_seq_len, rate)
self.final_layer = tf.keras.layers.Dense(num_outputs)
def call(self, inputs, training=None, mask=None):
inp = inputs['inputs']
tar = inputs['targets']
# (batch_size, inp_seq_len, d_model)
enc_output = self.encoder(inp, training)
seq_len = tf.shape(tar)[1]
look_ahead_mask = create_look_ahead_mask(seq_len)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask)
# (batch_size, tar_seq_len, target_vocab_size)
final_output = self.final_layer(dec_output)
return final_output
def compute_outputs(self, inputs, num_targets=0):
batch_size = tf.shape(inputs['inputs'])[0]
outputs = tf.zeros(shape=(batch_size, 1, self.num_outputs),
dtype=tf.float32)
for i in range(self.max_seq_len):
if i < num_targets:
input_dict = {
'inputs': inputs['inputs'],
'targets': inputs['targets'][:, 0:i, :]
}
else:
input_dict = {
'inputs': inputs['inputs'],
'targets': outputs
}
_outputs = self(input_dict, training=False)
outputs = tf.concat([outputs, _outputs[:, -1:, :]], axis=1)
return outputs[:, 1:, :]
# + id="RlGxnlpUervM"
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
# + [markdown] id="pTiP6aA82Uay"
# # Create and compile mapping model
# + id="26aSTwuy2ZKy" colab={"base_uri": "https://localhost:8080/"} outputId="d9232cc4-ef24-434e-f36e-5eca10f53c64"
x_train, y_train = next(iter(train_dataset))
num_inputs = x_train['inputs'].shape[-1]
num_outputs = y_train.shape[-1]
max_seq_len = y_train.shape[-2]
print(num_inputs)
print(num_outputs)
print(max_seq_len)
num_layers = 4
d_model = 128
num_heads = 4
dff = 128
mapping_model = Transformer(
num_layers=num_layers,
d_model=d_model,
num_heads=num_heads,
dff=dff,
max_seq_len=max_seq_len,
num_inputs=num_inputs,
num_outputs=num_outputs,
rate=0.1)
loss = tf.keras.losses.MeanAbsoluteError()
learning_rate = CustomSchedule(d_model)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
mapping_model.compile(
optimizer=optimizer, # tf.keras.optimizers.Adam(learning_rate=0.01),
loss=loss,
metrics=[tf.keras.losses.MeanSquaredError()])
# + id="zOBxsJYCGf-2" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="ff71419d-0681-4a86-9d33-7b674df9887b"
import matplotlib.pyplot as plt
temp_learning_rate_schedule = CustomSchedule(d_model)
plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32)))
plt.ylabel("Learning Rate")
plt.xlabel("Train Step")
# + [markdown] id="zceUmkJI35zb"
# ## Build model
# + id="qoczioW23-Da" colab={"base_uri": "https://localhost:8080/"} outputId="065f8ee1-ab01-4154-f01e-a4396161d13d"
_ = mapping_model(x_train)
print(mapping_model.summary())
# + [markdown] id="SunnK0BY2utQ"
# # Load model checkpoint
# + id="gi46si8f2bOi"
checkpoint_file = os.path.join(checkpoint_dir, 'cp.ckpt')
if os.path.isdir(checkpoint_dir) and os.listdir(checkpoint_dir):
mapping_model.load_weights(checkpoint_file)
# + [markdown] id="gkCyoCxp3l7r"
# ## Create training callbacks
# + id="23w7DNkh2ytf"
checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_file,
save_weights_only=False,
verbose=0,
save_freq='epoch')
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
# def scheduler(epoch, lr):
# if epoch < 10:
# return lr
# else:
# return lr * 0.9
# lr_scheduler = tf.keras.callbacks.LearningRateScheduler(scheduler)
# + [markdown] id="mCkWXZD-5XsD"
# ## Train the model
# + id="xl4D5c_u5a1x" colab={"base_uri": "https://localhost:8080/"} outputId="7908f072-f939-4859-bd3c-439167fa0fd5"
epochs = 500
steps_per_epoch = 100
validation_steps = 10
mapping_model.fit(train_dataset,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=valid_dataset,
validation_steps=validation_steps,
callbacks=[checkpoint, early_stop])
# + [markdown] id="hNyLeJcN6wS8"
# ## Evaluate model on test dataset
# + id="hX4SXagL6vwB" colab={"base_uri": "https://localhost:8080/"} outputId="dc066a82-1984-4ced-8b5c-eb4e360cccee"
mapping_model.evaluate(test_dataset, steps=500)
# + [markdown] id="6KlP07NBK98a"
# ### Load Data and Model Checkpoints
# + id="1Te94ZhJK9Gq"
import ddsp.training
from ddsp.training.preprocessing import F0LoudnessPreprocessor
import gin
import matplotlib.pyplot as plt
from IPython.display import Audio
sr = 16000
ddsp_dir = os.path.normpath('/content/drive/My Drive/nsynth_guitar/train_30000')
# Parse the gin config.
gin_file = os.path.join(ddsp_dir, 'operative_config-30000.gin')
gin.parse_config_file(gin_file)
# Load model
ddsp_model = ddsp.training.models.Autoencoder()
ddsp_model.restore(ddsp_dir)
def map_func(features):
inputs, targets = features_map(features)
return inputs, targets, features
# Create test dataset
data_provider = CompleteTFRecordProvider(
file_pattern=test_tfrecord_file + '*',
example_secs=example_secs,
sample_rate=sample_rate,
frame_rate=frame_rate,
map_func=map_func)
dataset = data_provider.get_batch(
1,
shuffle=True,
repeats=-1)
datagen = iter(dataset)
# + [markdown] id="S5yqSI3e8i4U"
# ### Get Predictions
# + id="_A2hgXwivy8I"
def get_preds(inputs, targets, features, num_targets):
outputs = mapping_model.compute_outputs(inputs, num_targets=num_targets)
# outputs = mapping_model(inputs)
f0_variation_pred = outputs[:, :, 0]
ld_scaled_pred = outputs[:, :, 1]
# z_pred = outputs[:, :, 2:]
f0_variation_pred = tf.squeeze(f0_variation_pred)
ld_scaled_pred = tf.squeeze(ld_scaled_pred)
# z_pred = tf.squeeze(z_pred)
f0_scaled = np.squeeze(features['f0_scaled'])
ld_scaled = np.squeeze(features['ld_scaled'])
# z = tf.reshape(features['z'], shape=z_pred.shape)
note_number = features['note_number']
note_number = tf.squeeze(tf.cast(note_number, dtype=tf.float32))
f0_variation = f0_scaled * 127.0 - note_number
f0_variation *= tf.cast(tf.math.less_equal(tf.math.abs(f0_variation), 1.0),
dtype=tf.float32)
plt.figure(figsize=(12, 6))
plt.subplot(2, 2, 1)
plt.plot(f0_variation, label='f0_variation')
plt.plot(f0_variation_pred, label='f0_variation prediction')
plt.xlabel('time')
plt.legend()
plt.subplot(2, 2, 2)
plt.plot(ld_scaled, label='ld_scaled')
plt.plot(ld_scaled_pred, label='ld_scaled prediction')
plt.xlabel('time')
plt.legend()
# plt.subplot(2, 2, 3)
# plt.xlabel('time')
# plt.ylabel('z')
# plt.plot(z)
# plt.ylim([-7., 7.])
# plt.subplot(2, 2, 4)
# plt.xlabel('time')
# plt.ylabel('z prediction')
# plt.plot(z_pred)
# plt.ylim([-7., 7.])
plt.show()
f0_scaled_pred = (f0_variation_pred + note_number) / 127.0
f0_scaled_pred = tf.expand_dims(f0_scaled_pred, axis=0)
ld_scaled_pred = tf.expand_dims(ld_scaled_pred, axis=0)
# Update the features with predicted f0 and ld
features['f0_scaled'] = tf.convert_to_tensor(f0_scaled_pred)
features['ld_scaled'] = tf.convert_to_tensor(ld_scaled_pred)
f0_hz, loudness_db = F0LoudnessPreprocessor.invert_scaling(
f0_scaled_pred, ld_scaled_pred)
features['f0_hz'] = f0_hz
features['loudness_db'] = loudness_db
# features['z'] = z_pred
ddsp_outputs = ddsp_model(features, training=False)
audio_pred = ddsp_model.get_audio_from_outputs(ddsp_outputs)
return features['audio'], audio_pred
# + colab={"base_uri": "https://localhost:8080/"} id="wqrLZxrFR3io" outputId="f6bc1cd3-f1ec-45ae-d1c7-a9c610a3bf26"
inputs, targets, features = next(datagen)
print(features['sample_name'])
# + id="lvN2zKAM7cdr" colab={"base_uri": "https://localhost:8080/", "height": 213} outputId="98a56632-783f-4f9c-bfa4-ef21e7361aa9"
audio, audio_pred = get_preds(inputs, targets, features, num_targets=100)
# + colab={"base_uri": "https://localhost:8080/", "height": 213} id="kI8ZWV9zRhg4" outputId="a6cd5a81-e061-4766-e87d-96b2043bf678"
audio, audio_pred = get_preds(inputs, targets, features, num_targets=1000)
# + id="X_I5pQV2vzvv" colab={"base_uri": "https://localhost:8080/", "height": 92} outputId="3264e583-3652-4ae8-9154-a485aac0310e"
print('Original Audio')
Audio(audio, rate=sr)
# + id="Ur9hQi6c7jxK" colab={"base_uri": "https://localhost:8080/", "height": 92} outputId="15e233fc-0eb2-4ed5-d94a-5d3e7b367214"
print('Predicted Audio')
Audio(audio_pred, rate=sr)
| members/fabio/transformer_mapping_model_training.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %%
from utils import get_binary_mnist_dataloaders
from fastprogress.fastprogress import master_bar, progress_bar
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from jupyterthemes import jtplot
jtplot.style(context="talk")
# %% [markdown]
# # Explore Pixel Importance
#
# - knockout
# - highest variance
# %%
data_path = "../data"
classA = 1
classB = 8
full_train_loader, _, _, _ = get_binary_mnist_dataloaders(
data_path, classA, classB, 0, 0
)
# %%
deviations = torch.std(full_train_loader.dataset.data, dim=0)
max_dev_row = deviations.sum(dim=1).argmax()
max_dev_row
# %%
_, ax = plt.subplots()
ax.imshow(deviations)
xmin, xmax = ax.get_xlim()
rect = patches.Rectangle(
(xmin, max_dev_row), 28, 1, linewidth=1, edgecolor="r", facecolor="none"
)
ax.add_patch(rect)
ax.grid(None)
ax.set_xticklabels([])
ax.set_yticklabels([]);
# %%
class CustomNeuron(nn.Module):
def __init__(self, layer_sizes):
super(CustomNeuron, self).__init__()
self.layers = nn.Sequential(nn.Linear(28, 1), nn.Sigmoid())
def forward(self, X):
row = max_dev_row
X = X.view(-1, 784)[:, row * 28 : row * 28 + 28]
return self.layers(X)
# %%
def train_one_epoch(
dataloader, model, criterion, learning_rate, weight_decay, momentum, device, mb
):
if not hasattr(model, "momentum_grads"):
model.momentum_grads = [torch.zeros_like(p) for p in model.parameters()]
model.train()
num_batches = len(train_loader)
dataiter = iter(dataloader)
for batch in progress_bar(range(num_batches), parent=mb):
X, Y = next(dataiter)
X, Y = X.to(device), Y.to(device)
output = model(X)
loss = criterion(output, Y)
model.zero_grad()
loss.backward()
with torch.no_grad():
for param, grad in zip(model.parameters(), model.momentum_grads):
grad.set_(momentum * grad + (1 - momentum) * param.grad)
param -= learning_rate * grad + weight_decay * param
# %%
def compute_validation_accuracy(dataloader, model, criterion, device, mb, epoch):
model.eval()
N = len(dataloader.dataset)
num_batches = len(dataloader)
valid_loss, num_correct = 0, 0
with torch.no_grad():
for X, Y in dataloader:
X, Y = X.to(device), Y.to(device)
output = model(X)
valid_loss += criterion(output, Y).item()
num_correct += (output.round().eq(Y)).type(torch.float).sum().item()
valid_loss /= num_batches
valid_accuracy = num_correct / N
mb.write(
f"{epoch:>3}: validation accuracy={(100*valid_accuracy):5.2f}% and loss={valid_loss:.3f}"
)
return valid_loss, valid_accuracy
# %%
# Configuration parameters
seed = 0
torch.manual_seed(seed)
# Hyperparameters
num_epochs = 4
batch_size = 128
valid_batch_size = 0
learning_rate = 1e-2
weight_decay = 0#1e-3
momentum = 0#0.9
# Training device
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using '{device}' device.")
# %%
# Get data loaders
train_loader, _, valid_loader, _ = get_binary_mnist_dataloaders(
data_path, classA, classB, batch_size, valid_batch_size
)
# %%
# Create neural network model
nx = train_loader.dataset.data.shape[1:].numel()
ny = 1
layer_sizes = (nx, ny)
model = CustomNeuron(layer_sizes).to(device)
print(model)
# Training utilities
criterion = nn.L1Loss()
# %%
# Training loop
mb = master_bar(range(num_epochs))
compute_validation_accuracy(valid_loader, model, criterion, device, mb, 0)
for epoch in mb:
train_one_epoch(
train_loader,
model,
criterion,
learning_rate,
weight_decay,
momentum,
device,
mb,
)
tloss, taccuracy = compute_validation_accuracy(
valid_loader, model, criterion, device, mb, epoch + 1
)
# %%
w_index = 0
w_orig = model.layers[0].weight[0][w_index].item()
l_orig = tloss
a_orig = taccuracy
w_values = torch.linspace(w_orig - 15, w_orig + 15, 100)
losses = []
accuracies = []
device = "cpu"
model.to(device)
with torch.no_grad():
for w in progress_bar(w_values):
model.layers[0].weight[0][w_index] = w
loss, accuracy = compute_validation_accuracy(
valid_loader, model, criterion, device, mb, 0
)
losses.append(loss)
accuracies.append(accuracy)
# Fix the changed parameter
model.layers[0].weight[0][w_index] = w_orig
# %%
_, axes = plt.subplots(2, 1)
axes[0].plot(w_values, accuracies)
axes[0].plot(w_orig, a_orig, "o", color="pink")
axes[0].set_title("Parameter vs Accuracy")
axes[1].plot(w_values, losses)
axes[1].plot(w_orig, l_orig, "o", color="pink")
axes[1].set_title("Parameter vs Loss")
# %%
| lectures/l18-PlottingParameters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.1
# language: julia
# name: julia-1.4
# ---
# # Dubins car navigating in a cluttered environment
# This script shows an example of trajectory obtained with SCP.
# The problem consists of computing a trajectory avoiding obstacles and minimizing control effort.
# The final time is a free optimization variable.
# +
# If first time running this code on a personal computer
# using Pkg
# Pkg.instantiate()
# -
# ### Script / SCP Initialization
# +
# Initializing script
using LinearAlgebra
using Ipopt
using JuMP
include("./Models/dubins_free.jl")
include("./SCP/scp_problem.jl")
include("./utils/dubins_plotting.jl")
# Number of time-discretization points and maximum number of SCP iterations
N, max_it = 41, 10
# Defining the model, the initial strategy and each convex subproblem
model = DubinsFree()
Xp,Up = initialize_trajectory(model,N)
problem = SCPProblem(model,N,Xp,Up)
model.tf_guess = 6.0
model.k = 5.
model.x_init = [0;0;pi/3]
model.x_final = [3;3;pi/2.5]
model.B_free_final_time = true
model.B_free_final_angle = true
model.obstacles = []
obs = [[0.7,0.9],0.3]
push!(model.obstacles, obs)
obs = [[0.7,1.5],0.4]
push!(model.obstacles, obs)
obs = [[1.5,1.2],0.3]
push!(model.obstacles, obs)
obs = [[1.8,0.65],0.45]
push!(model.obstacles, obs)
obs = [[2.0,2.85],0.45]
push!(model.obstacles, obs)
obs = [[2.1,2.2],0.3]
push!(model.obstacles, obs)
# Defining SCP parameters
(Delta0, omega0, omegamax, epsilon,
convergence_threshold) = get_initial_scp_parameters(model)
# plt_solutions(problem, model, X_all, U_all)
# -
# ## SCP
# +
# Defining penalization weights, trust-region radius and the list of solutions
include("SCP/scp_problem.jl")
include("./Models/dubins_free.jl")
Xp,Up = initialize_trajectory(model,N)
X, U = copy(Xp), copy(Up)
omega, Delta = omega0, Delta0
X_all, U_all = [], []
push!(X_all, copy(X))
push!(U_all, copy(U))
# SCP loop
success, it = false, 1
while it < max_it &&
!(success && convergence_metric(model,X,U,Xp,Up) < convergence_threshold) &&
omega < omegamax
println("-----------\nIteration $it\n-----------")
# Storing the solution at the previous step and the linearized dynamics
Xp, Up = copy(X), copy(U)
model.f, model.A, model.B = compute_dynamics(model,Xp,Up)
# Defining the convex subproblem
reset_problem(problem, model)
set_parameters(problem, model, Xp, Up, omega, Delta)
define_nonconvex_cost(problem, model)
define_constraints(problem, model)
# Solving the convex subproblem
JuMP.optimize!(problem.solver_model)
X_sol, U_sol = JuMP.value.(problem.X), JuMP.value.(problem.U)
println("tf=",X_sol[end,end])
X, U = copy(X_sol), copy(U_sol)
Delta = 0.99 * Delta
success = true
# Collecting the solution at each iteration
push!(X_all,copy(X))
push!(U_all,copy(U))
it += 1
println("Parameters:")
println("omega = $omega; Delta = $Delta")
println("metric = $(convergence_metric(model,X,U,Xp,Up))")
end
print(">>> Finished <<<")
# -
include("./utils/dubins_plotting.jl")
plt_solutions(problem, model, X_all, U_all)
plt.xlim([-0.2, 3.3])
plt.ylim([-0.2, 3.3])
print("Done Plotting")
tf_all = [X_all[iter_i][end,end] for iter_i in 1:length(X_all)]
| dubins_free.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
#Tipos especiales de matrices: Identidad, Inversa, Singulares
# Identidad
identidad = np.eye(4)
#Me devuelve TODO flotante, los 1 y 0. ¿Porqué? Es el leemento neutro dentro de las matrices, es el 1 para los números.
#La identidad no transforma el espacio.
print(identidad)
#Transformación lienal, es tener un vector que transfomro cuando aplico o multiplico una matriz.
# Inversa
# A* X = AI
A = np.array([[-3, 1], [-2, 1]])
inversaA = np.linalg.inv(A)
print(f'Soy una matriz inversa \n {inversaA}')
#Identidad
I = np.array([[1,0,1],[0,1,1],[-1,1,1]])
#Matriz singular cuanod no existe la identidad.
print(f'Soy una matriz singular \n {A.dot(inversaA)}')
# Lo importante es entender que:
# Puedo calcular mis incognitas con la propiedad de
# Ax= b
# x = b* A^-1
# -
identidad = np.eye(4)
print(identidad)
vector = np.array([[2],[3],[5],[7]])
print(identidad.dot(vector))
print(vector)
A = np.array([[1,0,1],[0,1,1],[-1,1,1]])
print(A)
inversa_A = np.linalg.inv(A)
print(inversa_A)
print(A.dot(inversa_A))
singular = np.array([[1,1],[1,1]])
print(singular)
print(np.linalg.inv(singular))
| NoteBooks/Curso de Algebra lineal con Python/fundamentos-algebra-lineal-master/04 - Matrices identidad inversa singular/01 - Matrices identidad inversa singular.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import pandas as pd
import nltk
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import matplotlib.pyplot as plt
# +
# c={1:'come.................................................................................... coming came comes Improving the quality of end-of-life care for hospitalized patients is a priority for healthcare organizations. Studies have shown that physicians tend to over-estimate prognoses, which in combination with treatment inertia results in a mismatch between patients wishes and actual care at the end of life',
# 2:'We describe a method to address this problem using Deep Learning and Electronic Health Record (EHR) data, which is currently being piloted, with Institutional Review Board approval, at an academic medical center. The EHR data of admitted patients are automatically evaluated by an algorithm, which brings patients who are likely to benefit from palliative care services to the attention of the Palliative Care team.',
# 3:'The algorithm is a Deep Neural Network trained on the EHR data from previous years, to predict all-cause 3-12 month mortality of patients as a proxy for patients that could benefit from palliative care. Our predictions enable the Palliative Care team to take a proactive approach in reaching out to such patients,',
# 4:'rather than relying on referrals from treating physicians, or conduct time consuming chart reviews of all patients. We also present a novel interpretation technique which we use to provide explanations of the model\'s predictions.'}
# +
# data=pd.DataFrame.from_dict(c,orient='index')
# data.rename(columns={0:'SentimentText'},inplace=True)
# -
X_train = pd.read_csv('train.csv',encoding = 'ISO-8859-1')
X_test = pd.read_csv('test.csv',encoding = 'ISO-8859-1')
data = X_train[['ItemID','SentimentText']]
target = X_train['Sentiment']
test = X_test[['ItemID','SentimentText']]
# +
# lowercase all data
data['SentimentText']=data['SentimentText'].str.lower()
test['SentimentText']=test['SentimentText'].str.lower()
# remove punctuation
table = str.maketrans('','',string.punctuation)
data['SentimentText'] = data['SentimentText'].apply(lambda x: x.translate(table))
test['SentimentText'] = test['SentimentText'].apply(lambda x: x.translate(table))
# tokenize words
data['SentimentText'] = data['SentimentText'].apply(nltk.word_tokenize)
test['SentimentText'] = test['SentimentText'].apply(nltk.word_tokenize)
# remove stopwords
# stopword = stopwords.words('english')
# for word in ['ain','aren','couldn','didn','doesn','hadn','hasn','haven','isn','mightn','mustn',
# 'needn','shan','shouldn','wasn','weren','wouldn','don','no','nor','not','won']:
# stopword.remove(word)
# data['SentimentText']=data['SentimentText'].apply(lambda x: [word for word in x if word not in stopword])
# test['SentimentText']=test['SentimentText'].apply(lambda x: [word for word in x if word not in stopword])
# +
# data
# -
# stemming
ps=PorterStemmer()
data['SentimentText']=data['SentimentText'].apply(lambda x: ' '.join([ps.stem(word) for word in x]))
test['SentimentText']=test['SentimentText'].apply(lambda x: ' '.join([ps.stem(word) for word in x]))
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(min_df=0.0001)
X = tfidf.fit_transform(data['SentimentText']).toarray()
X_test = tfidf.transform(test['SentimentText']).toarray()
X_test.shape
# +
#tfidf.get_feature_names()
# -
y=target.values
# +
# from sklearn.linear_model import LogisticRegression
# clf = LogisticRegression()
# clf.fit(X,y)
# clf.score(X,y)
# -
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(X,y)
# +
# from sklearn.metrics import confusion_matrix
# confusion_matrix(y,clf.predict(X))
# -
predictions = clf.predict(X_test)
pd.concat([test['ItemID'],pd.Series(predictions)],axis=1).set_index('ItemID').rename(columns={0:'Sentiment'}).to_csv('solution_1.csv')
d = pd.concat([test['ItemID'],pd.Series(predictions)],axis=1).set_index('ItemID').rename(columns={0:'Sentiment'})
d.groupby('Sentiment').size().plot(kind='bar',figsize=[6,5],fontsize=20)
plt.xlabel('Sentiment',fontsize=16)
plt.ylabel('No. of tweets',fontsize=16)
plt.tight_layout()
plt.savefig('2',c='blue')
d['Sentiment'].head(20).plot(kind='bar',figsize=[20,10],fontsize=20)
plt.xlabel('Tweet_number',fontsize=16)
plt.ylabel('Sentiment',fontsize=16)
plt.savefig('1')
| Sentiment Analysis using Random Forest and tfidf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/dyjdlopez/jru-ai-workshop-2021/blob/main/day-3/notebooks/jru_ai_05.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="AK43JeITdao9"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="lILSwdy7dgXH"
# # Deploying AI Models: TensorFlow Lite
# Copyright D.Lopez 2021 | All Rights reserved
#
#
# Developing AI models is one thing, but deploying them is another. Deployment of AI apps make them shine as consumer products or displays their applicability as applied research. In this module, we will discuss how to deploy an AI model.
# + colab={"base_uri": "https://localhost:8080/"} id="IH02y0SRg3Om" outputId="5a1ca388-a114-4da3-debd-71f660c50cc8"
import numpy as np
import matplotlib.pylab as plt
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
from tqdm import tqdm
print("\u2022 Using TensorFlow Version:", tf.__version__)
print("\u2022 Using TensorFlow Hub Version: ", hub.__version__)
print('\u2022 GPU Device Found.' if tf.test.is_gpu_available() else '\u2022 GPU Device Not Found. Running on CPU')
# + [markdown] id="qOc1RmtPdjqm"
# ## TensorFlow Lite
# TensorFlow Lite is a set of tools to help developers run TensorFlow models on mobile, embedded, and IoT devices. It enables on-device machine learning inference with low latency and a small binary size.
# TensorFlow Lite is designed to make it easy to perform machine learning on devices, "at the edge" of the network, instead of sending data back and forth from a server. For developers, performing machine learning on-device can help improve:
#
# * Latency: there's no round-trip to a server
# * Privacy: no data needs to leave the device
# * Connectivity: an Internet connection isn't required
# * Power consumption: network connections are power hungry
#
# TensorFlow Lite works with a huge range of devices, from tiny microcontrollers to powerful mobile phones.
#
# See the TensorFlow Lite [docs](https://www.tensorflow.org/lite/guide) for more info.
#
#
#
# + [markdown] id="pJIOA-e3d8le"
# ## Model Training
#
# Usually, the first step in converting an existing model to a TFLite model is train or retrieve it first.
#
# Use [TensorFlow Datasets](http://tensorflow.org/datasets) to load the cats and dogs dataset.
#
# This `tfds` package is the easiest way to load pre-defined data. If you have your own data, and are interested in importing using it with TensorFlow see [loading image data](../load_data/images.ipynb)
#
# The `tfds.load` method downloads and caches the data, and returns a `tf.data.Dataset` object. These objects provide powerful, efficient methods for manipulating data and piping it into your model.
#
# Since `"cats_vs_dog"` doesn't define standard splits, use the subsplit feature to divide it into (train, validation, test) with 80%, 10%, 10% of the data respectively.
#
# + colab={"base_uri": "https://localhost:8080/"} id="sxXwcLpAg8fu" outputId="cf793ef8-35cc-488e-fade-d27ebbb06e6b"
module_selection = ("mobilenet_v2", 224, 1280) #@param ["(\"mobilenet_v2\", 224, 1280)", "(\"inception_v3\", 299, 2048)"] {type:"raw", allow-input: true}
handle_base, pixels, FV_SIZE = module_selection
MODULE_HANDLE ="https://tfhub.dev/google/tf2-preview/{}/feature_vector/4".format(handle_base)
IMAGE_SIZE = (pixels, pixels)
print("Using {} with input size {} and output dimension {}".format(MODULE_HANDLE, IMAGE_SIZE, FV_SIZE))
# + id="cCtPZNRwg-02"
splits, info = tfds.load('cats_vs_dogs', with_info=True, as_supervised=True,
split =["train[:80%]", "train[80%:-10%]", "train[-10%:]"])
(train_examples, validation_examples, test_examples) = splits
num_examples = info.splits['train'].num_examples
num_classes = info.features['label'].num_classes
# + [markdown] id="8YTRme8vFEp8"
# ### Format the Data
#
# Use the `tf.image` module to format the images for the task.
#
# Resize the images to a fixes input size, and rescale the input channels
# + id="MYrPPooshApF"
def format_image(image, label):
image = tf.image.resize(image, IMAGE_SIZE) / 255.0
return image, label
# + id="o_n4rX5ghNri"
## Shuffling and batching the data
BATCH_SIZE = 32 #@param {type:"integer"}
# + id="gHXm7wdphQwe"
train_batches = train_examples.shuffle(num_examples // 4).map(format_image).batch(BATCH_SIZE).prefetch(1)
validation_batches = validation_examples.map(format_image).batch(BATCH_SIZE).prefetch(1)
test_batches = test_examples.map(format_image).batch(1)
# + id="eFXNv3wfhUqN"
do_fine_tuning = False
feature_extractor = hub.KerasLayer(MODULE_HANDLE,
input_shape=IMAGE_SIZE + (3,),
output_shape=[FV_SIZE],
trainable=False)
# + [markdown] id="ftfVKpfnHefl"
# ## Defining the Model
#
# All it takes is to put a linear classifier on top of the `feature_extractor_layer` with the Hub module.
#
# For speed, we start out with a non-trainable `feature_extractor_layer`, but you can also enable fine-tuning for greater accuracy.
# + colab={"base_uri": "https://localhost:8080/"} id="TmnqdQ1ShYso" outputId="6d3d1c8e-30f8-4167-b614-0ff7e16e534d"
print("Building model with", MODULE_HANDLE)
model = tf.keras.Sequential([
feature_extractor,
tf.keras.layers.Dense(num_classes, activation='softmax')
])
model.summary()
# + id="AEeFqA20ha0O"
#@title (Optional) Unfreeze some layers
NUM_LAYERS = 10 #@param {type:"slider", min:1, max:50, step:1}
if do_fine_tuning:
feature_extractor.trainable = True
for layer in model.layers[-NUM_LAYERS:]:
layer.trainable = True
else:
feature_extractor.trainable = False
# + id="3dW0kGh7hcbP"
if do_fine_tuning:
model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.002, momentum=0.9),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
else:
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="FeQJ6eYFheKg" outputId="df6cf0c9-1c38-461d-e4f4-35e32093dcde"
## Model Training
EPOCHS = 5
hist = model.fit(train_batches,
epochs=EPOCHS,
validation_data=validation_batches)
# + id="YZLM5et6hffJ"
## Export Model
CATS_VS_DOGS_SAVED_MODEL = "exp_saved_model"
# + [markdown] id="BK6s3vrEeApu"
# ## Model Conversion
# + colab={"base_uri": "https://localhost:8080/"} id="Zo-4AjJDhidu" outputId="566a7356-c414-4a2f-ae0f-4ebcc5757c64"
tf.saved_model.save(model, CATS_VS_DOGS_SAVED_MODEL)
# + colab={"base_uri": "https://localhost:8080/"} id="1ctNNqEVhjyp" outputId="47bbdc8d-6a5d-4030-9478-f0df8aa6a0d3" magic_args="-s $CATS_VS_DOGS_SAVED_MODEL" language="bash"
# saved_model_cli show --dir $1 --tag_set serve --signature_def serving_default
# + id="362mIq7urkxk"
loaded = tf.saved_model.load(CATS_VS_DOGS_SAVED_MODEL)
# + colab={"base_uri": "https://localhost:8080/"} id="dMeEAc0phlYe" outputId="79be320c-dd2f-47a3-f3ee-b91b6a02ee4f"
print(list(loaded.signatures.keys()))
infer = loaded.signatures["serving_default"]
print(infer.structured_input_signature)
print(infer.structured_outputs)
# + id="oujsQRm4hmpF"
converter = tf.lite.TFLiteConverter.from_saved_model(CATS_VS_DOGS_SAVED_MODEL)
# + [markdown] id="V2AljUsjH53T"
# ### Post-Training Quantization
# The simplest form of post-training quantization quantizes weights from floating point to 8-bits of precision. This technique is enabled as an option in the TensorFlow Lite converter. At inference, weights are converted from 8-bits of precision to floating point and computed using floating-point kernels. This conversion is done once and cached to reduce latency.
#
# To further improve latency, hybrid operators dynamically quantize activations to 8-bits and perform computations with 8-bit weights and activations. This optimization provides latencies close to fully fixed-point inference. However, the outputs are still stored using floating point, so that the speedup with hybrid ops is less than a full fixed-point computation.
# + id="jAYold33hpF6"
converter.optimizations = [tf.lite.Optimize.DEFAULT]
# + [markdown] id="iW_ThMOmIJ00"
# ### Post-Training Integer Quantization
# We can get further latency improvements, reductions in peak memory usage, and access to integer only hardware accelerators by making sure all model math is quantized. To do this, we need to measure the dynamic range of activations and inputs with a representative data set. You can simply create an input data generator and provide it to our converter.
# + id="zEushlhPhvgP"
def representative_data_gen():
for input_value, _ in test_batches.take(100):
yield [input_value]
converter.representative_dataset = representative_data_gen
# + [markdown] id="8O2GVWnfIOos"
# ### Full Integer Quantization
#
# To require the converter to only output integer operations, one can specify:
# + id="_jM4z8erh4nt"
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# + id="73K2jB2mh6yd"
tflite_model = converter.convert()
tflite_model_file = 'converted_model.tflite'
with open(tflite_model_file, "wb") as f:
f.write(tflite_model)
# + [markdown] id="SPLCyiH6eHt3"
# ## Saving the Quantized Model
# + id="kI8dH99EiDNF"
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path=tflite_model_file)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# + colab={"base_uri": "https://localhost:8080/"} id="z1YDE8ZziDm6" outputId="ff8afbfb-d6d7-49e0-ef75-9f37e1705dc6"
# Gather results for the randomly sampled test images
predictions = []
test_labels, test_imgs = [], []
for img, label in tqdm(test_batches.take(10)):
interpreter.set_tensor(input_index, img)
interpreter.invoke()
predictions.append(interpreter.get_tensor(output_index))
test_labels.append(label.numpy()[0])
test_imgs.append(img)
# + id="AfgXBRkosxOX"
#@title Utility functions for plotting
# Utilities for plotting
class_names = ['cat', 'dog']
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
img = np.squeeze(img)
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'green'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]), color=color)
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="tr24d3XwiHFI" outputId="9fad3750-e472-4560-946f-e83ad998cc8d"
#@title Visualize the outputs { run: "auto" }
index = 1 #@param {type:"slider", min:0, max:9, step:1}
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(index, predictions, test_labels, test_imgs)
plt.show()
# + [markdown] id="2mfa_tT4xtOU"
# # TF Serving
#
# TensorFlow Serving is a flexible, high-performance serving system for machine learning models, designed for production environments. TensorFlow Serving makes it easy to deploy new algorithms and experiments, while keeping the same server architecture and APIs. TensorFlow Serving provides out-of-the-box integration with TensorFlow models, but can be easily extended to serve other types of models and data.
#
# Check out more at the TF Serving [docs](https://www.tensorflow.org/tfx/guide/serving).
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="WqunJfkIxujT" outputId="eec747d5-cc87-4f3d-fc70-9ee1f7c603bc"
import sys
import os
print("Installing dependencies for Colab environment")
# !pip install -Uq grpcio==1.26.0
# + colab={"base_uri": "https://localhost:8080/"} id="dbBKTqKpx37e" outputId="68cfb223-c1ab-4f68-a5ee-8757a1ccec3c"
import tempfile
MODEL_DIR = tempfile.gettempdir()
version = 1
export_path = os.path.join(MODEL_DIR, str(version))
print('export_path = {}\n'.format(export_path))
tf.keras.models.save_model(
model,
export_path,
overwrite=True,
include_optimizer=True,
save_format=None,
signatures=None,
options=None
)
print('\nSaved model:')
# !ls -l {export_path}
# + [markdown] id="dMjpK3W-JFQD"
# ## Examine your saved model
#
# We'll use the command line utility `saved_model_cli` to look at the [MetaGraphDefs](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/MetaGraphDef) (the models) and [SignatureDefs](../signature_defs) (the methods you can call) in our SavedModel. See [this discussion of the SavedModel CLI](https://github.com/tensorflow/docs/blob/master/site/en/r1/guide/saved_model.md#cli-to-inspect-and-execute-savedmodel) in the TensorFlow Guide.
# + colab={"base_uri": "https://localhost:8080/"} id="tkKoP7x8x7E7" outputId="86c60c12-0e2e-4510-bb63-01a627769488"
# !saved_model_cli show --dir {export_path} --all
# + [markdown] id="XqNE53bNJLuj"
# ## Serve your model with TensorFlow Serving
#
# ### Add TensorFlow Serving distribution URI as a package source:
#
# We're preparing to install TensorFlow Serving using [Aptitude](https://wiki.debian.org/Aptitude) since this Colab runs in a Debian environment. We'll add the `tensorflow-model-server` package to the list of packages that Aptitude knows about. Note that we're running as root.
#
# Note: This example is running TensorFlow Serving natively, but [you can also run it in a Docker container](https://www.tensorflow.org/tfx/serving/docker), which is one of the easiest ways to get started using TensorFlow Serving.
# + colab={"base_uri": "https://localhost:8080/"} id="LeNVyRKSyCjj" outputId="b736b1d3-68d1-46b8-c25f-aab24797bb2f"
# !echo "deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" | tee /etc/apt/sources.list.d/tensorflow-serving.list && \
# curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add -
# !apt update
# + colab={"base_uri": "https://localhost:8080/"} id="RJ1PEnV-yHsj" outputId="3a211ddc-0276-4765-b877-4832fe9cc461"
# !apt-get install tensorflow-model-server
# + [markdown] id="ZUcdC8gZJUC8"
# ### Start running TensorFlow Serving
#
# This is where we start running TensorFlow Serving and load our model. After it loads we can start making inference requests using REST. There are some important parameters:
#
# * `rest_api_port`: The port that you'll use for REST requests.
# * `model_name`: You'll use this in the URL of REST requests. It can be anything.
# * `model_base_path`: This is the path to the directory where you've saved your model.
# + id="BcqcBBw5yKcq"
os.environ["MODEL_DIR"] = MODEL_DIR
# + colab={"base_uri": "https://localhost:8080/"} id="nyVpmNWNyNYD" outputId="ca1e4c80-8a7d-4a31-b42a-dde98a4eed0c" magic_args="--bg " language="bash"
# nohup tensorflow_model_server \
# --rest_api_port=8501 \
# --model_name=catvdog_model \
# --model_base_path="${MODEL_DIR}" >server.log 2>&1
# + id="SMqwd3UryOyD"
# !tail server.log
# + colab={"base_uri": "https://localhost:8080/"} id="UsdL7ozx6gSN" outputId="814c194d-4e13-4cae-fe65-eacbd7c56384"
test_images, test_labels = [], []
for img, labels in tqdm(test_batches.take(5)):
test_images.append(img.numpy().tolist())
test_labels.append(labels)
test_images = np.array(test_images)
# + colab={"base_uri": "https://localhost:8080/"} id="RH0cjP6sELq-" outputId="18cf9846-027f-4049-a911-9c787955e02a"
test_labels
# + [markdown] id="kYuLaA6TJbv0"
# ### Make REST requests
# We'll send a predict request as a POST to our server's REST endpoint, and pass it three examples. We'll ask our server to give us the latest version of our servable by not specifying a particular version.
# + colab={"base_uri": "https://localhost:8080/"} id="jjvU4R-UyQMD" outputId="ebcca4d5-f042-4d53-ef4b-4c8032bb3ca0"
import json
# !pip install -q requests
import requests
index = 1
inp_img =
data = json.dumps({"signature_name": "serving_default", "instances": test_images[index].tolist()})
print('Data: {} ... {}'.format(data[:50], data[len(data)-52:]))
headers = {"content-type": "application/json"}
json_response = requests.post('http://localhost:8501/v1/models/catvdog_model:predict', data=data, headers=headers)
print(json.loads(json_response.text))
predictions = json.loads(json_response.text)['predictions']
# + colab={"base_uri": "https://localhost:8080/", "height": 208} id="1R-rna6B5byK" outputId="371897f1-0c34-46b6-d22b-f0da5838a82c"
#@title Visualize the outputs { run: "auto" }
plt.figure(figsize=(6,3))
plot_image(0, predictions, test_labels[index], test_imgs[index])
plt.show()
| day-3/notebooks/jru_ai_05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="HaA1pmS05YvO"
# ##**Installing the transformers library**
#
#
# + id="H8NKlQOUwdC1" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616885051146, "user_tz": -60, "elapsed": 9474, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="10a8d6f1-d386-4453-b546-014359f8df2a"
# !pip install transformers
# + [markdown] id="2RTRSs0dH-V-"
# ##**Importing the tools**
# + id="p7WXSnQowxsP"
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
import torch
import transformers as ppb
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import warnings
import re
warnings.filterwarnings('ignore')
# + [markdown] id="YcCtVBHw6OyL"
# ##**Importing the dataset from Drive**
# + id="dDgA4lpHw24H" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886181089, "user_tz": -60, "elapsed": 47046, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="903b177f-bcdf-4929-91f0-9463c96279b7"
from google.colab import drive
drive.mount('/content/gdrive')
# + id="P4GSW5B5wMKV"
#Mozilla
df1=pd.read_csv('gdrive/My Drive/cln_copie_dup_total1.csv',delimiter=';')
df2=pd.read_csv('gdrive/My Drive/cln_copie_nndup_total1.csv',delimiter=';')
# + id="N8mxwPmXwQLw" executionInfo={"status": "ok", "timestamp": 1616886189601, "user_tz": -60, "elapsed": 2571, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
#Thunderbird
df1=pd.read_csv('gdrive/My Drive/test_dup_TB.csv',delimiter=';')
df2=pd.read_csv('gdrive/My Drive/test_Nondup_TB.csv',delimiter=';')
# + id="gmmQx4iNXmp2"
#Eclipse
df1=pd.read_csv('gdrive/My Drive/EP_test_dup.csv',delimiter=';')
df2=pd.read_csv('gdrive/My Drive/EP_test_nondup.csv',delimiter=';')
# + id="IxNxIhawCSgc" executionInfo={"status": "ok", "timestamp": 1616886191045, "user_tz": -60, "elapsed": 1177, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
df1['Label'] = 1
df2['Label'] = 0
# + [markdown] id="oXDpKzHH68eY"
# ##**Loading the Pre-trained BERT model**
# + id="NQu-ZrK0zIpZ" colab={"base_uri": "https://localhost:8080/", "height": 262, "referenced_widgets": ["5aecb7f76a354132bcdcb61066590f25", "<KEY>", "39434ead356c40f5896af5057fd169f9", "<KEY>", "<KEY>", "<KEY>", "e1fea3ef249f4686af242747c1385a98", "44ddf467fa77463797f49ff5c68c14c2", "<KEY>", "b1e2329a82784b5c8a16ee005367c65c", "<KEY>", "<KEY>", "3daba6ee3d9f4f2db9440e241ded6d9a", "f6573b45e923476b832a1be137c2e863", "528bfea329ec4ab080976473ff4ea9df", "ef2449e1bec4427398d15a8b0077db00", "c808696460a94f439a947cee357ffb20", "e778ac6005904b33b7a7447e87c5e489", "3ac6cced9ade4919b8f60fb53a8c06b7", "<KEY>", "<KEY>", "41e0eecb72b3427a8a2147d6c413c16a", "46f53cb9d8084fb58e7ea29cebe4eae9", "cf2a68718f694adfa4fda9fd9acd8d35", "<KEY>", "9e8a64bb823b4ecd82117f0145cd0ce8", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "0e2be8165cca4f36a31dd89cd7f4e17f", "<KEY>", "feabe7b269ec4f4eabaaed3ffb5d45fb", "7f8044d3f6224052a5118927f9a666c3", "52361a524b4c494386911ace0a3fca02", "53891c14aaa043658cddf6f4a34b70a1", "cac4408648ad42b6b0faf55d95750fd3", "<KEY>", "f92b22aa3b2c4b6d9b412686ed910b2d", "<KEY>"]} executionInfo={"status": "ok", "timestamp": 1616886249124, "user_tz": -60, "elapsed": 25957, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="5a511007-d016-480b-f438-6fe6b9cf7555"
import time
start = time.time()
model_class, tokenizer_class, pretrained_weights = (ppb.BertModel, ppb.BertTokenizer, 'bert-base-uncased')
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights)
# + [markdown] id="yXePBDNnYoaC"
# # **Remove stop words**
# + id="JVNkRAxRluFC" executionInfo={"status": "ok", "timestamp": 1616886198573, "user_tz": -60, "elapsed": 774, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
df1['Title1']= df1['Title1'].str.replace(r'i' 'me' 'my' 'myself' 'we'
'our' 'ours' 'ourselves' 'you' 'your' 'yours' 'yourself''yourselves' 'they' 'we' 'him' 'he' 'him' 'his' 'himself' 'she' 'her' 'hers' 'herself' 'it''its' 'itself' 'they' 'them' 'their' 'theirs' 'themselves' 'what' 'which' 'who' 'whom' 'this' 'that'
'these' 'those' 'am' 'is' 'are' 'was' 'were' 'be' 'been' 'being' 'have' 'has' 'had' 'having' 'do' 'does' 'did' 'doing' 'a' 'an' 'the' 'and' 'but' 'if' 'or' 'because' 'as' 'until' 'while' 'of' 'at' 'by' 'for' 'with' 'about' 'against' 'between' 'into' 'through'
'during' 'before' 'after' 'above' 'below' 'to' 'from' 'up' 'down' 'in' 'out' 'on' 'off' 'over' 'under' 'again' 'further' 'then' 'once' 'here' 'there' 'when' 'where' 'why' 'how' 'all' 'any' 'both' 'each' 'few' 'more' 'most' 'other' 'some' 'such' 'no' 'nor' 'not' 'only' 'own' 'same' 'so' 'than' 'too' 'very' 's' 't' 'can' 'will' 'just' 'don' 'should' 'now'
'java' 'com' 'org' ,'')
df1['Title2']= df1['Title2'].str.replace(r'i' 'me' 'my' 'myself' 'we'
'our' 'ours' 'ourselves' 'you' 'your' 'yours' 'yourself''yourselves' 'they' 'we' 'him' 'he' 'him' 'his' 'himself' 'she' 'her' 'hers' 'herself' 'it''its' 'itself' 'they' 'them' 'their' 'theirs' 'themselves' 'what' 'which' 'who' 'whom' 'this' 'that'
'these' 'those' 'am' 'is' 'are' 'was' 'were' 'be' 'been' 'being' 'have' 'has' 'had' 'having' 'do' 'does' 'did' 'doing' 'a' 'an'
'the' 'and' 'but' 'if' 'or' 'because' 'as' 'until' 'while' 'of' 'at' 'by' 'for' 'with' 'about' 'against' 'between' 'into' 'through' 'during' 'before' 'after' 'above' 'below' 'to' 'from' 'up' 'down' 'in' 'out' 'on' 'off' 'over' 'under' 'again' 'further' 'then' 'once' 'here' 'there' 'when' 'where' 'why' 'how' 'all' 'any' 'both' 'each' 'few' 'more' 'most' 'other' 'some' 'such' 'no' 'nor' 'not' 'only' 'own'
'same' 'so' 'than' 'too' 'very' 's' 't' 'can' 'will' 'just' 'don' 'should' 'now'
'java' 'com' 'org' ,'')
df2['Title1']= df2['Title1'].str.replace(r'i' 'me' 'my' 'myself' 'we' 'our' 'ours' 'ourselves' 'you' 'your' 'yours' 'yourself''yourselves' 'they' 'we' 'him' 'he' 'him' 'his' 'himself' 'she'
'her' 'hers' 'herself' 'it''its' 'itself' 'they' 'them' 'their' 'theirs' 'themselves' 'what' 'which' 'who' 'whom' 'this' 'that' 'these' 'those' 'am' 'is' 'are' 'was' 'were' 'be' 'been' 'being' 'have' 'has' 'had' 'having' 'do' 'does' 'did' 'doing' 'a' 'an'
'the' 'and' 'but' 'if' 'or' 'because' 'as' 'until' 'while' 'of' 'at' 'by' 'for' 'with' 'about' 'against' 'between' 'into' 'through'
'during' 'before' 'after' 'above' 'below' 'to' 'from' 'up' 'down' 'in' 'out' 'on' 'off' 'over' 'under' 'again' 'further' 'then' 'once' 'here' 'there' 'when' 'where' 'why' 'how' 'all' 'any' 'both' 'each' 'few' 'more' 'most' 'other' 'some' 'such' 'no' 'nor' 'not' 'only' 'own' 'same' 'so' 'than' 'too' 'very' 's' 't' 'can' 'will' 'just' 'don' 'should' 'now'
'java' 'com' 'org' ,'')
df2['Title2']= df2['Title2'].str.replace(r'i' 'me' 'my' 'myself' 'we' 'our' 'ours' 'ourselves' 'you' 'your' 'yours' 'yourself''yourselves' 'they' 'we' 'him' 'he' 'him' 'his' 'himself' 'she'
'her' 'hers' 'herself' 'it''its' 'itself' 'they' 'them' 'their' 'theirs' 'themselves' 'what' 'which' 'who' 'whom' 'this' 'that' 'these' 'those' 'am' 'is' 'are' 'was' 'were' 'be' 'been' 'being' 'have' 'has' 'had' 'having' 'do' 'does' 'did' 'doing' 'a' 'an'
'the' 'and' 'but' 'if' 'or' 'because' 'as' 'until' 'while' 'of' 'at' 'by' 'for' 'with' 'about' 'against' 'between' 'into' 'through'
'during' 'before' 'after' 'above' 'below' 'to' 'from' 'up' 'down' 'in' 'out' 'on' 'off' 'over' 'under' 'again' 'further' 'then' 'once' 'here' 'there' 'when' 'where' 'why' 'how' 'all' 'any' 'both' 'each' 'few' 'more' 'most' 'other' 'some' 'such' 'no' 'nor' 'not' 'only' 'own'
'same' 'so' 'than' 'too' 'very' 's' 't' 'can' 'will' 'just' 'don' 'should' 'now'
'java' 'com' 'org' ,'')
df1['Description1']= df1['Description1'].str.replace(r'i' 'me' 'my' 'myself' 'we' 'our' 'ours' 'ourselves' 'you' 'your' 'yours' 'yourself''yourselves' 'they' 'we' 'him' 'he' 'him' 'his' 'himself' 'she'
'her' 'hers' 'herself' 'it''its' 'itself' 'they' 'them' 'their' 'theirs' 'themselves' 'what' 'which' 'who' 'whom' 'this' 'that' 'these' 'those' 'am' 'is' 'are' 'was' 'were' 'be' 'been' 'being' 'have' 'has' 'had' 'having' 'do' 'does' 'did' 'doing' 'a' 'an' 'the' 'and' 'but' 'if' 'or' 'because' 'as' 'until' 'while' 'of' 'at' 'by' 'for' 'with' 'about' 'against' 'between' 'into' 'through'
'during' 'before' 'after' 'above' 'below' 'to' 'from' 'up' 'down' 'in' 'out' 'on' 'off' 'over' 'under' 'again' 'further' 'then' 'once'
'here' 'there' 'when' 'where' 'why' 'how' 'all' 'any' 'both' 'each' 'few' 'more' 'most' 'other' 'some' 'such' 'no' 'nor' 'not' 'only' 'own' 'same' 'so' 'than' 'too' 'very' 's' 't' 'can' 'will' 'just' 'don' 'should' 'now'
'java' 'com' 'org' ,'')
df1['Description2']= df1['Description2'].str.replace(r'i' 'me' 'my' 'myself' 'we' 'our' 'ours' 'ourselves' 'you' 'your' 'yours' 'yourself''yourselves' 'they' 'we' 'him' 'he' 'him' 'his' 'himself' 'she'
'her' 'hers' 'herself' 'it''its' 'itself' 'they' 'them' 'their' 'theirs' 'themselves' 'what' 'which' 'who' 'whom' 'this' 'that' 'these' 'those' 'am' 'is' 'are' 'was' 'were' 'be' 'been' 'being' 'have' 'has' 'had' 'having' 'do' 'does' 'did' 'doing' 'a' 'an'
'the' 'and' 'but' 'if' 'or' 'because' 'as' 'until' 'while' 'of' 'at' 'by' 'for' 'with' 'about' 'against' 'between' 'into' 'through'
'during' 'before' 'after' 'above' 'below' 'to' 'from' 'up' 'down' 'in' 'out' 'on' 'off' 'over' 'under' 'again' 'further' 'then' 'once' 'here' 'there' 'when' 'where' 'why' 'how' 'all' 'any' 'both' 'each' 'few' 'more' 'most' 'other' 'some' 'such' 'no' 'nor' 'not' 'only' 'own'
'same' 'so' 'than' 'too' 'very' 's' 't' 'can' 'will' 'just' 'don' 'should' 'now'
'java' 'com' 'org' ,'')
df2['Description1']= df2['Description1'].str.replace(r'i' 'me' 'my' 'myself' 'we' 'our' 'ours' 'ourselves' 'you' 'your' 'yours' 'yourself''yourselves' 'they' 'we' 'him' 'he' 'him' 'his' 'himself' 'she'
'her' 'hers' 'herself' 'it''its' 'itself' 'they' 'them' 'their' 'theirs' 'themselves' 'what' 'which' 'who' 'whom' 'this' 'that' 'these' 'those' 'am' 'is' 'are' 'was' 'were' 'be' 'been' 'being' 'have' 'has' 'had' 'having' 'do' 'does' 'did' 'doing' 'a' 'an'
'the' 'and' 'but' 'if' 'or' 'because' 'as' 'until' 'while' 'of' 'at' 'by' 'for' 'with' 'about' 'against' 'between' 'into' 'through' 'during' 'before' 'after' 'above' 'below' 'to' 'from' 'up' 'down' 'in' 'out' 'on' 'off' 'over' 'under' 'again' 'further' 'then' 'once'
'here' 'there' 'when' 'where' 'why' 'how' 'all' 'any' 'both' 'each' 'few' 'more' 'most' 'other' 'some' 'such' 'no' 'nor' 'not' 'only' 'own' 'same' 'so' 'than' 'too' 'very' 's' 't' 'can' 'will' 'just' 'don' 'should' 'now'
'java' 'com' 'org' ,'')
df2['Description2']= df2['Description2'].str.replace(r'i' 'me' 'my' 'myself' 'we' 'our' 'ours' 'ourselves' 'you' 'your' 'yours' 'yourself''yourselves' 'they' 'we' 'him' 'he' 'him' 'his' 'himself' 'she'
'her' 'hers' 'herself' 'it''its' 'itself' 'they' 'them' 'their' 'theirs' 'themselves' 'what' 'which' 'who' 'whom' 'this' 'that' 'these' 'those' 'am' 'is' 'are' 'was' 'were' 'be' 'been' 'being' 'have' 'has' 'had' 'having' 'do' 'does' 'did' 'doing' 'a' 'an'
'the' 'and' 'but' 'if' 'or' 'because' 'as' 'until' 'while' 'of' 'at' 'by' 'for' 'with' 'about' 'against' 'between' 'into' 'through' 'during' 'before' 'after' 'above' 'below' 'to' 'from' 'up' 'down' 'in' 'out' 'on' 'off' 'over' 'under' 'again' 'further' 'then' 'once'
'here' 'there' 'when' 'where' 'why' 'how' 'all' 'any' 'both' 'each' 'few' 'more' 'most' 'other' 'some' 'such' 'no' 'nor' 'not' 'only' 'own' 'same' 'so' 'than' 'too' 'very' 's' 't' 'can' 'will' 'just' 'don' 'should' 'now'
'java' 'com' 'org' ,'')
# + id="PJjz50OskWuz"
from nltk.corpus import stopwords
import nltk
nltk.download('stopwords')
#stop = stopwords.words('english')
stop_words = stopwords.words('english')
newStopWords = ['java','com','org']
stop_words.extend(newStopWords)
df1['Title1'] = np.array(df1['Title1'].apply(lambda x: [item for item in x if item not in stop_words]))
df1['Title2'] = np.array(df1['Title2'].apply(lambda x: [item for item in x if item not in stop_words]))
df1['Description1'] = np.array(df1['Description1'].apply(lambda x: [item for item in x if item not in stop_words]))
df1['Description2'] = np.array(df1['Description2'].apply(lambda x: [item for item in x if item not in stop_words]))
df1['Title1'] = df1['Title1'].apply((lambda x: item for item in x if item not in stop_words))
df2['Title1'] = np.array(df2['Title1'].apply(lambda x: [item for item in x if item not in stop_words]))
df2['Title2'] = np.array(df2['Title2'].apply(lambda x: [item for item in x if item not in stop_words]))
df2['Description1'] = np.array(df2['Description1'].apply(lambda x: [item for item in x if item not in stop_words]))
df2['Description2'] = np.array(df2['Description2'].apply(lambda x: [item for item in x if item not in stop_words]))
# + [markdown] id="xJq9MbD5Lii8"
# ##**Lower case**
# + id="NMGQBwa0iSEg" colab={"base_uri": "https://localhost:8080/", "height": 231} executionInfo={"status": "error", "timestamp": 1614714586054, "user_tz": -60, "elapsed": 881, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="a3e0137f-f2b8-4e91-8c6f-603fd8b0b9c0"
df[0]= df[0].str.lower()
df[1]= df[1].str.lower()
df[2]= df[2].str.lower()
df[3]= df[3].str.lower()
df[4]= df[4].str.lower()
df[5]= df[5].str.lower()
# + [markdown] id="qlH7UyBSmAH7"
# ## **Remove Digits**
# + id="M9wYzDSbmSlV"
df[3] = df[3].str.replace(r'0', '')
df[3] = df[3].str.replace(r'1', '')
df[3] = df[3].str.replace(r'2', '')
df[3] = df[3].str.replace(r'3', '')
df[3] = df[3].str.replace(r'4', '')
df[3] = df[3].str.replace(r'5', '')
df[3] = df[3].str.replace(r'6', '')
df[3] = df[3].str.replace(r'7', '')
df[3] = df[3].str.replace(r'8', '')
df[3] = df[3].str.replace(r'9', '')
# + [markdown] id="lDL7VxbX7NS_"
# ##**Remove special characters**
# + id="X3wEYfHU6EjL"
df[3] = df[3].str.replace(r'/', '')
df[3] = df[3].str.replace(r'@ ?', '')
df[3] = df[3].str.replace(r'!', '')
df[3] = df[3].str.replace(r'+', '')
df[3] = df[3].str.replace(r'-', '')
df[3] = df[3].str.replace(r'/', '')
df[3] = df[3].str.replace(r':', '')
df[3] = df[3].str.replace(r';', '')
df[3] = df[3].str.replace(r'>', '')
df[3] = df[3].str.replace(r'=', '')
df[3] = df[3].str.replace(r'<', '')
df[3] = df[3].str.replace(r'(', '')
df[3] = df[3].str.replace(r')', '')
df[3] = df[3].str.replace(r'#', '')
df[3] = df[3].str.replace(r'$', '')
df[3] = df[3].str.replace(r'&', '')
df[3] = df[3].str.replace(r'*', '')
df[3] = df[3].str.replace(r'%', '')
df[3] = df[3].str.replace(r'_', '')
# + id="BB2p17rbF-6n" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1601739185372, "user_tz": -60, "elapsed": 637, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="1d799acd-1db8-4e0c-92a5-7ece85baa32c"
df2.shape
# + id="GApjihzgRMjm"
df[3] = pd.Series(df[3], dtype="string") # Pblm tokenize : " Input is not valid ,Should be a string, a list/tuple of strings or a list/tuple of integers"
df[2] = pd.Series(df[2], dtype="string")
df[2] = df[2].astype("|S")
df[2].str.decode("utf-8")
df[3] = df[3].astype("|S")
df[3].str.decode("utf-8")
# + id="A3RHS3QohXKz"
df[3].str.len()
# + [markdown] id="pFq2bWlB8ptu"
# ##**Tokenization**
# + id="UegEdeD-JdRQ"
batch_31=df1[:3000]
batch_32=df2[:3000]
df3 = pd.concat([batch_31,batch_32], ignore_index=True)
batch_41=df1[3000:6000]
batch_42=df2[3000:6000]
df4 = pd.concat([batch_41,batch_42], ignore_index=True)
batch_51=df1[6000:9000]
batch_52=df2[6000:9000]
df5 = pd.concat([batch_51,batch_52], ignore_index=True)
batch_61=df1[9000:12000]
batch_62=df2[9000:12000]
df6 = pd.concat([batch_61,batch_62], ignore_index=True)
batch_71=df1[12000:15000]
batch_72=df2[12000:15000]
df7 = pd.concat([batch_71,batch_72], ignore_index=True)
batch_81=df1[15000:18000]
batch_82=df2[15000:18000]
df8 = pd.concat([batch_81,batch_82], ignore_index=True)
batch_91=df1[18000:21000]
batch_92=df2[18000:21000]
df9 = pd.concat([batch_91,batch_92], ignore_index=True)
batch_101=df1[21000:]
batch_102=df2[21000:]
df10 = pd.concat([batch_101,batch_102], ignore_index=True)
# + [markdown] id="f-GEFOyziCKS"
# #**Batch ThunderBird**
# + id="HzsZ95IlEJKQ" executionInfo={"status": "ok", "timestamp": 1616886249127, "user_tz": -60, "elapsed": 19457, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
batch_31=df1[:500]
batch_32=df2[:500]
df3 = pd.concat([batch_31,batch_32], ignore_index=True)
batch_41=df1[500:1000]
batch_42=df2[500:1000]
df4 = pd.concat([batch_41,batch_42], ignore_index=True)
batch_51=df1[1000:1500]
batch_52=df2[1000:1500]
df5 = pd.concat([batch_51,batch_52], ignore_index=True)
batch_61=df1[1500:2000]
batch_62=df2[1500:2000]
df6 = pd.concat([batch_61,batch_62], ignore_index=True)
batch_71=df1[2000:2500]
batch_72=df2[2000:2500]
df7 = pd.concat([batch_71,batch_72], ignore_index=True)
batch_81=df1[2500:3000]
batch_82=df2[2500:3000]
df8 = pd.concat([batch_81,batch_82], ignore_index=True)
batch_91=df1[3000:3486]
batch_92=df2[3000:3486]
df9 = pd.concat([batch_91,batch_92], ignore_index=True)
#Testing
batch_101=df1[3486:3900]
batch_102=df2[3486:3900]
df10 = pd.concat([batch_101,batch_102], ignore_index=True)
batch_111=df1[3900:4338]
batch_112=df2[3900:4374]
df11 = pd.concat([batch_111,batch_112], ignore_index=True)
#df12 = df2[4500:5500]
#df13=df2[5500:6000]
#df14=df2[6000:6500]
#df15=df2[6500:7000]
#df16=df2[7000:7500]
#df17=df2[7500:8000]
#df18=df2[8000:8500]
#df19=df2[8500:9000]
#df20=df2[9000:9500]
#df21=df2[9500:]
# + id="U-w8E1PFLR2n"
batch_121=df1[4500:]
batch_122=df2[4500:5500]
df12 = pd.concat([batch_121,batch_122], ignore_index=True)
# + [markdown] id="cDqEIaitiQpP"
# #**Batch EP**
# + id="CmoBT_kR-qEz"
#training
batch_31=df1[:500]
batch_32=df2[:500]
df3 = pd.concat([batch_31,batch_32], ignore_index=True)
batch_41=df1[500:1000]
batch_42=df2[500:1000]
df4 = pd.concat([batch_41,batch_42], ignore_index=True)
batch_51=df1[1000:1500]
batch_52=df2[1000:1500]
df5 = pd.concat([batch_51,batch_52], ignore_index=True)
batch_61=df1[1500:2000]
batch_62=df2[1500:2000]
df6 = pd.concat([batch_61,batch_62], ignore_index=True)
batch_71=df1[2000:2500]
batch_72=df2[2000:2500]
df7 = pd.concat([batch_71,batch_72], ignore_index=True)
batch_81=df1[2500:3000]
batch_82=df2[2500:3000]
df8 = pd.concat([batch_81,batch_82], ignore_index=True)
batch_91=df1[3000:3500]
batch_92=df2[3000:3500]
df9 = pd.concat([batch_91,batch_92], ignore_index=True)
batch_101=df1[3500:4000]
batch_102=df2[3500:4000]
df10 = pd.concat([batch_101,batch_102], ignore_index=True)
batch_111=df1[4000:4500]
batch_112=df2[4000:4500]
df11 = pd.concat([batch_111,batch_112], ignore_index=True)
batch_121=df1[4500:5000]
batch_122=df2[4500:5000]
df12 = pd.concat([batch_121,batch_122], ignore_index=True)
batch_131=df1[5000:5250]
batch_132=df2[5000:5250]
df13 = pd.concat([batch_131,batch_132], ignore_index=True)
batch_141=df1[5250:5500]
batch_142=df2[5250:5500]
df14 = pd.concat([batch_141,batch_142], ignore_index=True)
batch_151=df1[5500:5750]
batch_152=df2[5500:5750]
df15 = pd.concat([batch_151,batch_152], ignore_index=True)
batch_161=df1[5750:6000]
batch_162=df2[5750:6000]
df16 = pd.concat([batch_161,batch_162], ignore_index=True)
batch_171=df1[6000:6250]
batch_172=df2[6000:6250]
df17 = pd.concat([batch_171,batch_172], ignore_index=True)
batch_181=df1[6250:6500]
batch_182=df2[6250:6500]
df18 = pd.concat([batch_181,batch_182], ignore_index=True)
batch_191=df1[6500:6750]
batch_192=df2[6500:6750]
df19 = pd.concat([batch_191,batch_192], ignore_index=True)
batch_201=df1[6750:7000]
batch_202=df2[6750:7000]
df20 = pd.concat([batch_201,batch_202], ignore_index=True)
batch_211=df1[7000:7250]
batch_212=df2[7000:7250]
df21 = pd.concat([batch_211,batch_212], ignore_index=True)
batch_221=df1[7250:7400]
batch_222=df2[7250:7400]
df22 = pd.concat([batch_221,batch_222], ignore_index=True)
#testing : df1: D: 8103 / df2: Non: 10397
batch_231=df1[7400:7750]
batch_232=df2[7400:7750]
df23 = pd.concat([batch_231,batch_232], ignore_index=True)
batch_241=df1[7750:8000]
batch_242=df2[7750:8000]
df24 = pd.concat([batch_241,batch_242], ignore_index=True)
batch_251=df1[8000:8103]
batch_252=df2[8000:8250]
df25 = pd.concat([batch_251,batch_252], ignore_index=True)
#batch_261=df[8250:8500]
batch_262=df2[8250:8500]
df26=batch_262
#df26 = pd.concat([batch_261,batch_262], ignore_index=True)
#batch_271=df1[8500:8750]
batch_272=df2[8500:9000]
df27= batch_272
#df27 = pd.concat([batch_271,batch_272], ignore_index=True)
#batch_281=df1[8750:9000]
#batch_282=df2[8750:9000]
#df28= batch_282
#df28 = pd.concat([batch_281,batch_282], ignore_index=True)
#batch_291=df1[9000:9500]
batch_292=df2[9000:9500]
df29= batch_292
#df29 = pd.concat([batch_291,batch_292], ignore_index=True)
#batch_301=df1[9250:9500]
batch_302=df2[9500:9900]
df30 = batch_302
#df30 = pd.concat([batch_301,batch_302], ignore_index=True)
#batch_311=df1[9500:9750]
batch_312=df2[9900:10103]
df31= batch_312
#df31 = pd.concat([batch_311,batch_312], ignore_index=True)
#atch_321=df1[9750:10000]
#batch_322=df2[9750:10000]
#df32 = pd.concat([batch_321,batch_322], ignore_index=True)
# + [markdown] id="-e0njZ3G85WM"
# #**Mozilla**
# + id="hcQ0Nxd75rg8"
#training
batch_31=df1[:500]
batch_32=df2[:500]
df3 = pd.concat([batch_31,batch_32], ignore_index=True)
batch_41=df1[500:1000]
batch_42=df2[500:1000]
df4 = pd.concat([batch_41,batch_42], ignore_index=True)
batch_51=df1[1000:1500]
batch_52=df2[1000:1500]
df5 = pd.concat([batch_51,batch_52], ignore_index=True)
batch_61=df1[1500:2000]
batch_62=df2[1500:2000]
df6 = pd.concat([batch_61,batch_62], ignore_index=True)
batch_71=df1[2000:2500]
batch_72=df2[2000:2500]
df7 = pd.concat([batch_71,batch_72], ignore_index=True)
batch_81=df1[2500:3000]
batch_82=df2[2500:3000]
df8 = pd.concat([batch_81,batch_82], ignore_index=True)
batch_91=df1[3000:3500]
batch_92=df2[3000:3500]
df9 = pd.concat([batch_91,batch_92], ignore_index=True)
batch_101=df1[3500:4000]
batch_102=df2[3500:4000]
df10 = pd.concat([batch_101,batch_102], ignore_index=True)
batch_111=df1[4000:4500]
batch_112=df2[4000:4500]
df11 = pd.concat([batch_111,batch_112], ignore_index=True)
batch_121=df1[4500:5000]
batch_122=df2[4500:5000]
df12 = pd.concat([batch_121,batch_122], ignore_index=True)
batch_131=df1[5000:5250]
batch_132=df2[5000:5250]
df13 = pd.concat([batch_131,batch_132], ignore_index=True)
batch_141=df1[5250:5500]
batch_142=df2[5250:5500]
df14 = pd.concat([batch_141,batch_142], ignore_index=True)
batch_151=df1[5500:5750]
batch_152=df2[5500:5750]
df15 = pd.concat([batch_151,batch_152], ignore_index=True)
batch_161=df1[5750:6000]
batch_162=df2[5750:6000]
df16 = pd.concat([batch_161,batch_162], ignore_index=True)
batch_171=df1[6000:6250]
batch_172=df2[6000:6250]
df17 = pd.concat([batch_171,batch_172], ignore_index=True)
batch_181=df1[6250:6500]
batch_182=df2[6250:6500]
df18 = pd.concat([batch_181,batch_182], ignore_index=True)
batch_191=df1[6500:6750]
batch_192=df2[6500:6750]
df19 = pd.concat([batch_191,batch_192], ignore_index=True)
batch_201=df1[6750:7000]
batch_202=df2[6750:7000]
df20 = pd.concat([batch_201,batch_202], ignore_index=True)
batch_211=df1[7000:7250]
batch_212=df2[7000:7250]
df21 = pd.concat([batch_211,batch_212], ignore_index=True)
batch_221=df1[7250:7400]
batch_222=df2[7250:7400]
df22 = pd.concat([batch_221,batch_222], ignore_index=True)
batch_231=df1[7400:7600]
batch_232=df2[7400:7600]
df23 = pd.concat([batch_231,batch_232], ignore_index=True)
#testing : df1: D: 9044 / df2: Non: 9956
batch_241=df1[7600:7900]
batch_242=df2[7600:7900]
df24 = pd.concat([batch_241,batch_242], ignore_index=True)
batch_251=df1[7900:8250]
batch_252=df2[7900:8250]
df25 = pd.concat([batch_251,batch_252], ignore_index=True)
batch_261=df1[8250:8500]
batch_262=df2[8250:8500]
#df26=batch_262
df26 = pd.concat([batch_261,batch_262], ignore_index=True)
batch_271=df1[8500:8750]
batch_272=df2[8500:8750]
#df27= batch_272
df27 = pd.concat([batch_271,batch_272], ignore_index=True)
batch_281=df1[8750:9044]
batch_282=df2[8750:9000]
#df28= batch_282
df28 = pd.concat([batch_281,batch_282], ignore_index=True)
#batch_291=df1[9000:9500]
batch_292=df2[9000:9500]
df29= batch_292
#df29 = pd.concat([batch_291,batch_292], ignore_index=True)
#batch_301=df1[9250:9500]
batch_302=df2[9500:9956]
df30 = batch_302
#df30 = pd.concat([batch_301,batch_302], ignore_index=True)
#batch_311=df1[9500:9750]
#batch_312=df2[9900:10103]
#df31= batch_312
#df31 = pd.concat([batch_311,batch_312], ignore_index=True)
#atch_321=df1[9750:10000]
#batch_322=df2[9750:10000]
#df32 = pd.concat([batch_321,batch_322], ignore_index=True)
# + [markdown] id="9CicZVkjA6bK"
# #**Batch CrossProject**
# + id="sIP44sDnA-nX" colab={"base_uri": "https://localhost:8080/", "height": 231} executionInfo={"status": "error", "timestamp": 1616604422990, "user_tz": -60, "elapsed": 910, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="74e1ba73-9467-463e-9667-e083d090484e"
batch_31=df1[:500]
batch_32=df2[:500]
df3 = pd.concat([batch_31,batch_32], ignore_index=True)
batch_41=df1[500:1000]
batch_42=df2[500:1000]
df4 = pd.concat([batch_41,batch_42], ignore_index=True)
batch_51=df1[1000:1500]
batch_52=df2[1000:1500]
df5 = pd.concat([batch_51,batch_52], ignore_index=True)
batch_61=df1[1500:2000]
batch_62=df2[1500:2000]
df6 = pd.concat([batch_61,batch_62], ignore_index=True)
batch_71=df1[2000:2500]
batch_72=df2[2000:2500]
df7 = pd.concat([batch_71,batch_72], ignore_index=True)
batch_81=df1[2500:3000]
batch_82=df2[2500:3000]
df8 = pd.concat([batch_81,batch_82], ignore_index=True)
batch_91=df1[3000:3500]
batch_92=df2[3000:3500]
df9 = pd.concat([batch_91,batch_92], ignore_index=True)
batch_101=df1[3500:4000]
batch_102=df2[3500:4000]
df10 = pd.concat([batch_101,batch_102], ignore_index=True)
batch_111=df1[4000:4500]
batch_112=df2[4000:4500]
df11 = pd.concat([batch_111,batch_112], ignore_index=True)
batch_121=df1[4500:5000]
batch_122=df2[4500:5000]
df12 = pd.concat([batch_121,batch_122], ignore_index=True)
batch_311=df1[5000:5250]
batch_312=df2[5000:5250]
df31 = pd.concat([batch_311,batch_312], ignore_index=True)
batch_321=df1[5250:5500]
batch_322=df2[5250:5500]
df32 = pd.concat([batch_321,batch_322], ignore_index=True)
batch_131=Tdf1[:250]
batch_132=Tdf2[:250]
df13 = pd.concat([batch_131,batch_132], ignore_index=True)
batch_141=Tdf1[250:500]
batch_142=Tdf2[250:500]
df14 = pd.concat([batch_141,batch_142], ignore_index=True)
batch_151=Tdf1[500:750]
batch_152=Tdf2[500:750]
df15 = pd.concat([batch_151,batch_152], ignore_index=True)
batch_161=Tdf1[750:1000]
batch_162=Tdf2[750:1000]
df16 = pd.concat([batch_161,batch_162], ignore_index=True)
batch_171=Tdf1[1000:1250]
batch_172=Tdf2[1000:1250]
df17 = pd.concat([batch_171,batch_172], ignore_index=True)
batch_181=Tdf1[1250:1500]
batch_182=Tdf2[1250:1500]
df18 = pd.concat([batch_181,batch_182], ignore_index=True)
batch_191=Tdf1[1500:1750]
batch_192=Tdf2[1500:1750]
df19 = pd.concat([batch_191,batch_192], ignore_index=True)
batch_201=Tdf1[1750:2000]
batch_202=Tdf2[1750:2000]
df20 = pd.concat([batch_201,batch_202], ignore_index=True)
batch_211=Tdf1[2000:2250]
batch_212=Tdf2[2000:2250]
df21 = pd.concat([batch_211,batch_212], ignore_index=True)
batch_221=Tdf1[2250:2500]
batch_222=Tdf2[2250:2500]
df22 = pd.concat([batch_221,batch_222], ignore_index=True)
batch_231=Tdf1[2500:2750]
batch_232=Tdf2[2500:2750]
df23 = pd.concat([batch_231,batch_232], ignore_index=True)
batch_241=Tdf1[2750:3000]
batch_242=Tdf2[2750:3000]
df24 = pd.concat([batch_241,batch_242], ignore_index=True)
batch_251=Tdf1[3000:3250]
batch_252=Tdf2[3000:3250]
df25 = pd.concat([batch_251,batch_252], ignore_index=True)
batch_261=Tdf1[3250:3500]
batch_262=Tdf2[3250:3500]
df26 = pd.concat([batch_261,batch_262], ignore_index=True)
batch_271=Tdf1[3500:3750]
batch_272=Tdf2[3500:3750]
df27 = pd.concat([batch_271,batch_272], ignore_index=True)
batch_281=Tdf1[3750:4000]
batch_282=Tdf2[3750:4000]
df28 = pd.concat([batch_281,batch_282], ignore_index=True)
batch_291=Tdf1[4000:4250]
batch_292=Tdf2[4000:4250]
df29 = pd.concat([batch_291,batch_292], ignore_index=True)
batch_301=Tdf1[4250:4500]
batch_302=Tdf2[4250:4500]
df30 = pd.concat([batch_301,batch_302], ignore_index=True)
# + [markdown] id="aTcVvZLxicB_"
# ### **_get_segments3**
# + id="XCoiTguWgxeg" executionInfo={"status": "ok", "timestamp": 1616886249128, "user_tz": -60, "elapsed": 11043, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
def _get_segments3(tokens, max_seq_length):
"""Segments: 0 for the first sequence, 1 for the second"""
if len(tokens)>max_seq_length:
raise IndexError("Token length more than max seq length!")
segments = []
first_sep = False
current_segment_id = 0
for token in tokens:
segments.append(current_segment_id)
#print(token)
if token == 102:
#if first_sep:
#first_sep = False
#else:
current_segment_id = 1
return segments + [0] * (max_seq_length - len(tokens))
# + [markdown] id="gOFGJdemfgnG"
# #**df3**
# + id="JtiWncHWC6nI" executionInfo={"status": "ok", "timestamp": 1616886259161, "user_tz": -60, "elapsed": 8084, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
pair3= df3['Title1'] + df3['Description1']+ [" [SEP] "] + df3['Title2'] + df3['Description2']
tokenized3 = pair3.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
# + id="YOy32MU0gZ6c" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886259162, "user_tz": -60, "elapsed": 6485, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="ed31ba82-534c-44eb-b60b-868a9008a1ba"
max_len3 = 0 # padding all lists to the same size
for i in tokenized3.values:
if len(i) > max_len3:
max_len3 = len(i)
max_len3 =300
padded3 = np.array([i + [0]*(max_len3-len(i)) for i in tokenized3.values])
np.array(padded3).shape
# + id="aZSPfF7rglgG" executionInfo={"status": "ok", "timestamp": 1616886259163, "user_tz": -60, "elapsed": 4420, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
attention_mask3 = np.where(padded3 != 0, 1, 0)
attention_mask3.shape
input_ids3 = torch.tensor(padded3)
attention_mask3 = torch.tensor(attention_mask3)
input_segments3= np.array([_get_segments3(token, max_len3)for token in tokenized3.values])
token_type_ids3 = torch.tensor(input_segments3)
input_segments3 = torch.tensor(input_segments3)
# + id="1Nulz336hBcB" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886329549, "user_tz": -60, "elapsed": 73117, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="1e52a3af-27d1-4dfa-c5ea-0a24be420456"
with torch.no_grad():
last_hidden_states3 = model(input_ids3, attention_mask=attention_mask3, token_type_ids=input_segments3) # <<< 600 rows only !!!
features3 = last_hidden_states3[0][:,0,:].numpy()
features3
# + [markdown] id="11le9CiUVDCO"
# #**df4**
# + id="-VsXGOj8IaZy" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886402179, "user_tz": -60, "elapsed": 143462, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="dedda18f-175e-43f5-b81d-b8806da30fea"
pair4=df4['Title1'] + df4['Description1']+ [" [SEP] "] + df4['Title2'] + df4['Description2']
tokenized4 = pair4.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len4 = 0 # padding all lists to the same size
for i in tokenized4.values:
if len(i) > max_len4:
max_len4 = len(i)
max_len4 =300
padded4 = np.array([i + [0]*(max_len4-len(i)) for i in tokenized4.values])
np.array(padded4).shape
attention_mask4 = np.where(padded4 != 0, 1, 0)
attention_mask4.shape
input_ids4 = torch.tensor(padded4)
attention_mask4 = torch.tensor(attention_mask4)
input_segments4= np.array([_get_segments3(token, max_len4)for token in tokenized4.values])
token_type_ids4 = torch.tensor(input_segments4)
input_segments4 = torch.tensor(input_segments4)
with torch.no_grad():
last_hidden_states4 = model(input_ids4, attention_mask=attention_mask4, token_type_ids=input_segments4)
features4 = last_hidden_states4[0][:,0,:].numpy()
features4
# + [markdown] id="udmOsWumVKOa"
# #**df5**
# + id="glQ0YXKhzyXZ" executionInfo={"status": "ok", "timestamp": 1616886408082, "user_tz": -60, "elapsed": 144905, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
pair5=df5['Title1'] + df5['Description1']+ [" [SEP] "] + df5['Title2'] + df5['Description2']
tokenized5 = pair5.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
# + [markdown] id="QvzhBBP081nL"
# ##**Padding**
# + id="lR8b3SBtz6HA" executionInfo={"status": "ok", "timestamp": 1616886408085, "user_tz": -60, "elapsed": 141771, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
max_len5 = 0 # padding all lists to the same size
for i in tokenized5.values:
if len(i) > max_len5:
max_len5 = len(i)
# + id="OppURTFyz-pp" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886408086, "user_tz": -60, "elapsed": 139761, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="e70e405c-d3fa-402b-a6e2-964e806ee8ce"
max_len5 =300
padded5 = np.array([i + [0]*(max_len5-len(i)) for i in tokenized5.values])
np.array(padded5).shape # Dimensions of the padded variable
# + [markdown] id="sOZ4VGE99GuU"
# ##**Masking**
# + id="c9j3ZYcZ0BGt" executionInfo={"status": "ok", "timestamp": 1616886408087, "user_tz": -60, "elapsed": 137747, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
attention_mask5 = np.where(padded5 != 0, 1, 0)
attention_mask5.shape
input_ids5 = torch.tensor(padded5)
attention_mask5 = torch.tensor(attention_mask5)
# + [markdown] id="zqZuEvAPElvT"
# ##**Running the `model()` function through BERT**
# + id="BijBFjIs581u" executionInfo={"status": "ok", "timestamp": 1616886408087, "user_tz": -60, "elapsed": 135875, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
input_segments5= np.array([_get_segments3(token, max_len5)for token in tokenized5.values])
token_type_ids5 = torch.tensor(input_segments5)
input_segments5 = torch.tensor(input_segments5)
# + id="pd5IRqMXiMGm" executionInfo={"status": "ok", "timestamp": 1616886473194, "user_tz": -60, "elapsed": 199300, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
with torch.no_grad():
last_hidden_states5 = model(input_ids5, attention_mask=attention_mask5, token_type_ids=input_segments5) # <<< 600 rows only !!!
# + [markdown] id="mlP88s-1FPZW"
# ##**Slicing the part of the output of BERT : [cls]**
# + id="iag0CM1N0Xhk" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886473196, "user_tz": -60, "elapsed": 196278, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="b7f68876-cc79-4b1b-e7d2-b95079324509"
features5 = last_hidden_states5[0][:,0,:].numpy()
features5
# + [markdown] id="vCmfgIy9frZR"
# #**df6**
# + id="CRnTQdzMFJ6I" executionInfo={"status": "ok", "timestamp": 1616886478863, "user_tz": -60, "elapsed": 199650, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
pair6=df6['Title1'] + df6['Description1']+ [" [SEP] "] + df6['Title2'] + df6['Description2']
tokenized6 = pair6.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len6 = 0 # padding all lists to the same size
for i in tokenized6.values:
if len(i) > max_len6:
max_len6 = len(i)
max_len6=300
padded6 = np.array([i + [0]*(max_len6-len(i)) for i in tokenized6.values])
np.array(padded6).shape # Dimensions of the padded variable
attention_mask6 = np.where(padded6 != 0, 1, 0)
attention_mask6.shape
input_ids6 = torch.tensor(padded6)
attention_mask6 = torch.tensor(attention_mask6)
input_segments6= np.array([_get_segments3(token, max_len6)for token in tokenized6.values])
token_type_ids6 = torch.tensor(input_segments6)
input_segments6 = torch.tensor(input_segments6)
# + id="ggFLgBpQjCn2" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886541937, "user_tz": -60, "elapsed": 262711, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="bb521a01-1e9e-415d-ceef-3da9cd05e2d3"
with torch.no_grad():
last_hidden_states6 = model(input_ids6, attention_mask=attention_mask6, token_type_ids=input_segments6)
features6 = last_hidden_states6[0][:,0,:].numpy()
features6
# + [markdown] id="Mj5OSBQMf4-d"
# #**df7**
# + id="VWIWi1oAGaAO" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886628872, "user_tz": -60, "elapsed": 347202, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="c0739507-c730-49b7-d02d-5e431264a389"
pair7=df7['Title1'] + df7['Description1']+ [" [SEP] "] + df7['Title2'] + df7['Description2']
tokenized7 = pair7.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len7 = 0 # padding all lists to the same size
for i in tokenized7.values:
if len(i) > max_len7:
max_len7 = len(i)
max_len7=300
padded7 = np.array([i + [0]*(max_len7-len(i)) for i in tokenized7.values])
np.array(padded7).shape # Dimensions of the padded variable
attention_mask7 = np.where(padded7 != 0, 1, 0)
attention_mask7.shape
input_ids7 = torch.tensor(padded7)
attention_mask7 = torch.tensor(attention_mask7)
input_segments7= np.array([_get_segments3(token, max_len7)for token in tokenized7.values])
token_type_ids7 = torch.tensor(input_segments7)
input_segments7 = torch.tensor(input_segments7)
with torch.no_grad():
last_hidden_states7 = model(input_ids7, attention_mask=attention_mask7, token_type_ids=input_segments7)
features7 = last_hidden_states7[0][:,0,:].numpy()
features7
# + [markdown] id="dJ3RvBPQf7fT"
# #**df8**
# + id="5VoQeXl4HEjN" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886694336, "user_tz": -60, "elapsed": 410617, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="88132907-265d-46e3-e570-f9fa6d468c0b"
pair8=df8['Title1'] + df8['Description1']+ [" [SEP] "] + df8['Title2'] + df8['Description2']
tokenized8 = pair8.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len8 = 0 # padding all lists to the same size
for i in tokenized8.values:
if len(i) > max_len8:
max_len8 = len(i)
max_len8=300
padded8 = np.array([i + [0]*(max_len8-len(i)) for i in tokenized8.values])
np.array(padded8).shape # Dimensions of the padded variable
attention_mask8 = np.where(padded8 != 0, 1, 0)
attention_mask8.shape
input_ids8 = torch.tensor(padded8)
attention_mask8 = torch.tensor(attention_mask8)
input_segments8= np.array([_get_segments3(token, max_len8)for token in tokenized8.values])
token_type_ids8 = torch.tensor(input_segments8)
input_segments8 = torch.tensor(input_segments8)
with torch.no_grad():
last_hidden_states8 = model(input_ids8, attention_mask=attention_mask8, token_type_ids=input_segments8)
features8 = last_hidden_states8[0][:,0,:].numpy()
features8
# + [markdown] id="rNXTxV1l0pIJ"
# #**df9**
# + id="sk-H7hcUH7cg" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886759047, "user_tz": -60, "elapsed": 472517, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="0c9a2e41-80b3-4859-a77c-4a06d9927fb5"
pair9=df9['Title1'] + df9['Description1']+ [" [SEP] "] + df9['Title2'] + df9['Description2']
tokenized9 = pair9.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len9 = 0 # padding all lists to the same size
for i in tokenized9.values:
if len(i) > max_len9:
max_len9 = len(i)
max_len9=300
padded9 = np.array([i + [0]*(max_len9-len(i)) for i in tokenized9.values])
np.array(padded9).shape # Dimensions of the padded variable
attention_mask9 = np.where(padded9 != 0, 1, 0)
attention_mask9.shape
input_ids9 = torch.tensor(padded9)
attention_mask9 = torch.tensor(attention_mask9)
input_segments9= np.array([_get_segments3(token, max_len9)for token in tokenized9.values])
token_type_ids9 = torch.tensor(input_segments9)
input_segments9 = torch.tensor(input_segments9)
with torch.no_grad():
last_hidden_states9 = model(input_ids9, attention_mask=attention_mask9, token_type_ids=input_segments9)
features9 = last_hidden_states9[0][:,0,:].numpy()
features9
# + [markdown] id="0ok-IVZl0xID"
# #**df10**
# + id="bfnW68FxIlN1" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886814946, "user_tz": -60, "elapsed": 524798, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="68538607-31aa-4ef9-8594-1c5e272a4314"
pair10=df10['Title1'] + df10['Description1']+ [" [SEP] "] + df10['Title2'] + df10['Description2']
tokenized10 = pair10.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len10 = 0 # padding all lists to the same size
for i in tokenized10.values:
if len(i) > max_len10:
max_len10 = len(i)
max_len10=300
padded10 = np.array([i + [0]*(max_len10-len(i)) for i in tokenized10.values])
np.array(padded10).shape # Dimensions of the padded variable
attention_mask10 = np.where(padded10 != 0, 1, 0)
attention_mask10.shape
input_ids10 = torch.tensor(padded10)
attention_mask10 = torch.tensor(attention_mask10)
input_segments10= np.array([_get_segments3(token, max_len10)for token in tokenized10.values])
token_type_ids10 = torch.tensor(input_segments10)
input_segments10 = torch.tensor(input_segments10)
with torch.no_grad():
last_hidden_states10 = model(input_ids10, attention_mask=attention_mask10, token_type_ids=input_segments10)
features10 = last_hidden_states10[0][:,0,:].numpy()
features10
# + [markdown] id="dJ_ER9DhCQGj"
# #**df11**
# + id="kmG3KS5TJffr" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886877255, "user_tz": -60, "elapsed": 139661, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="d5e1c64c-1159-4eff-998e-4c4a33399caa"
pair11=df11['Title1'] + df11['Description1']+ [" [SEP] "] + df11['Title2'] + df11['Description2']
tokenized11 = pair11.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len11 = 0 # padding all lists to the same size
for i in tokenized11.values:
if len(i) > max_len11:
max_len11 = len(i)
max_len11=300
padded11 = np.array([i + [0]*(max_len11-len(i)) for i in tokenized11.values])
np.array(padded11).shape # Dimensions of the padded variable
attention_mask11 = np.where(padded11 != 0, 1, 0)
attention_mask11.shape
input_ids11 = torch.tensor(padded11)
attention_mask11 = torch.tensor(attention_mask11)
input_segments11= np.array([_get_segments3(token, max_len11)for token in tokenized11.values])
token_type_ids11 = torch.tensor(input_segments11)
input_segments11 = torch.tensor(input_segments11)
with torch.no_grad():
last_hidden_states11 = model(input_ids11, attention_mask=attention_mask11, token_type_ids=input_segments11)
features11 = last_hidden_states11[0][:,0,:].numpy()
features11
# + [markdown] id="Akm6P3kv5_8j"
# #**df12**
# + id="2pyD5tXKKU0s" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616712509162, "user_tz": -60, "elapsed": 658765, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="c6f2e68c-b30c-41b9-e678-753acafb37c1"
pair12=df12['Title1'] + df12['Description1']+ [" [SEP] "] + df12['Title2'] + df12['Description2']
tokenized12 = pair12.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len12 = 0 # padding all lists to the same size
for i in tokenized12.values:
if len(i) > max_len12:
max_len12 = len(i)
max_len12=300
padded12 = np.array([i + [0]*(max_len12-len(i)) for i in tokenized12.values])
np.array(padded12).shape # Dimensions of the padded variable
attention_mask12 = np.where(padded12 != 0, 1, 0)
attention_mask12.shape
input_ids12 = torch.tensor(padded12)
attention_mask12 = torch.tensor(attention_mask12)
input_segments12= np.array([_get_segments3(token, max_len12)for token in tokenized12.values])
token_type_ids12 = torch.tensor(input_segments12)
input_segments12 = torch.tensor(input_segments12)
with torch.no_grad():
last_hidden_states12 = model(input_ids12, attention_mask=attention_mask12, token_type_ids=input_segments12)
features12 = last_hidden_states12[0][:,0,:].numpy()
features12
# + [markdown] id="PIb7HOBxEyVz"
# #**df13**
# + id="9tc3z0e4RWpi"
#df13=df2[5500:6000]
# + id="kMl3VKD7Fhaq" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616712544999, "user_tz": -60, "elapsed": 692145, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="9ff4e74e-5fd1-4ee0-8226-5dae22bc65dd"
#pair13= df13['Title1'] + [" [SEP] "] + df13['Title2']
pair13=df13['Title1'] + df13['Description1']+ [" [SEP] "] + df13['Title2'] + df13['Description2']
tokenized13 = pair13.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len13 = 0 # padding all lists to the same size
for i in tokenized13.values:
if len(i) > max_len13:
max_len13 = len(i)
max_len13=300
padded13 = np.array([i + [0]*(max_len13-len(i)) for i in tokenized13.values])
np.array(padded13).shape # Dimensions of the padded variable
attention_mask13 = np.where(padded13 != 0, 1, 0)
attention_mask13.shape
input_ids13 = torch.tensor(padded13)
attention_mask13 = torch.tensor(attention_mask13)
input_segments13= np.array([_get_segments3(token, max_len13)for token in tokenized13.values])
token_type_ids13 = torch.tensor(input_segments13)
input_segments13 = torch.tensor(input_segments13)
with torch.no_grad():
last_hidden_states13 = model(input_ids13, attention_mask=attention_mask13, token_type_ids=input_segments13)
features13 = last_hidden_states13[0][:,0,:].numpy()
features13
# + [markdown] id="rrURrafXSSHX"
# #**df14**
# + id="cqNWaeC7SVOW" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616712582454, "user_tz": -60, "elapsed": 727259, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="7e0c28cb-e5dd-4f55-e066-21a49071ab19"
#pair14= df14['Title1'] + [" [SEP] "] + df14['Title2']
pair14=df14['Title1'] + df14['Description1']+ [" [SEP] "] + df14['Title2'] + df14['Description2']
tokenized14 = pair14.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len14 = 0 # padding all lists to the same size
for i in tokenized14.values:
if len(i) > max_len14:
max_len14 = len(i)
max_len14=300
padded14 = np.array([i + [0]*(max_len14-len(i)) for i in tokenized14.values])
np.array(padded14).shape
attention_mask14 = np.where(padded14 != 0, 1, 0)
attention_mask14.shape
input_ids14 = torch.tensor(padded14)
attention_mask14 = torch.tensor(attention_mask14)
input_segments14= np.array([_get_segments3(token, max_len14)for token in tokenized14.values])
token_type_ids14 = torch.tensor(input_segments14)
input_segments14 = torch.tensor(input_segments14)
with torch.no_grad():
last_hidden_states14 = model(input_ids14, attention_mask=attention_mask14, token_type_ids=input_segments14)
features14 = last_hidden_states14[0][:,0,:].numpy()
features14
# + [markdown] id="IXx11g-u69uK"
# #**df15**
# + id="RJWqIhRR6_LJ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616712619536, "user_tz": -60, "elapsed": 759590, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="42725f03-0d14-4dc8-d9ff-9205809de368"
#pair15= df15['Title1'] + [" [SEP] "] + df15['Title2']
pair15=df15['Title1'] + df15['Description1']+ [" [SEP] "] + df15['Title2'] + df15['Description2']
tokenized15 = pair15.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len15 = 0 # padding all lists to the same size
for i in tokenized15.values:
if len(i) > max_len15:
max_len15 = len(i)
max_len15=300
padded15 = np.array([i + [0]*(max_len15-len(i)) for i in tokenized15.values])
np.array(padded15).shape
attention_mask15 = np.where(padded15 != 0, 1, 0)
attention_mask15.shape
input_ids15 = torch.tensor(padded15)
attention_mask15 = torch.tensor(attention_mask15)
input_segments15= np.array([_get_segments3(token, max_len15)for token in tokenized15.values])
token_type_ids15 = torch.tensor(input_segments15)
input_segments15 = torch.tensor(input_segments15)
with torch.no_grad():
last_hidden_states15 = model(input_ids15, attention_mask=attention_mask15, token_type_ids=input_segments15)
features15 = last_hidden_states15[0][:,0,:].numpy()
features15
# + [markdown] id="00u5IAxw_sev"
# #**df16**
# + id="uayZ5uUx_vIP" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616712660724, "user_tz": -60, "elapsed": 422206, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="159f57c0-4b65-4c51-eaf4-45168f46ee56"
#pair16= df16['Title1'] + [" [SEP] "] + df16['Title2']
#df16=df2[7000:7500]
pair16=df16['Title1'] + df16['Description1']+ [" [SEP] "] + df16['Title2'] + df16['Description2']
tokenized16 = pair16.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len16 = 0 # padding all lists to the same size
for i in tokenized16.values:
if len(i) > max_len16:
max_len16 = len(i)
max_len16=300
padded16 = np.array([i + [0]*(max_len16-len(i)) for i in tokenized16.values])
np.array(padded16).shape
attention_mask16 = np.where(padded16 != 0, 1, 0)
attention_mask16.shape
input_ids16 = torch.tensor(padded16)
attention_mask16 = torch.tensor(attention_mask16)
input_segments16= np.array([_get_segments3(token, max_len16)for token in tokenized16.values])
token_type_ids16 = torch.tensor(input_segments16)
input_segments16 = torch.tensor(input_segments16)
with torch.no_grad():
last_hidden_states16 = model(input_ids16, attention_mask=attention_mask16, token_type_ids=input_segments16)
features16 = last_hidden_states16[0][:,0,:].numpy()
features16
# + [markdown] id="jfq3WVjwLee9"
# #**df17**
# + id="BwAuoQlaLion" colab={"base_uri": "https://localhost:8080/", "height": 231} executionInfo={"status": "error", "timestamp": 1616773397089, "user_tz": -60, "elapsed": 45802, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="f3392aa1-d189-49bb-d51b-dcabda2341ed"
#df17=df2[7500:8000]
#pair17= df17['Title1'] + [" [SEP] "] + df17['Title2']
pair17=df17['Title1'] + df17['Description1']+ [" [SEP] "] + df17['Title2'] + df17['Description2']
tokenized17 = pair17.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len17 = 0 # padding all lists to the same size
for i in tokenized17.values:
if len(i) > max_len17:
max_len17 = len(i)
max_len17=300
padded17 = np.array([i + [0]*(max_len17-len(i)) for i in tokenized17.values])
np.array(padded17).shape
attention_mask17 = np.where(padded17 != 0, 1, 0)
attention_mask17.shape
input_ids17 = torch.tensor(padded17)
attention_mask17 = torch.tensor(attention_mask17)
input_segments17= np.array([_get_segments3(token, max_len17)for token in tokenized17.values])
token_type_ids17 = torch.tensor(input_segments17)
input_segments17 = torch.tensor(input_segments17)
with torch.no_grad():
last_hidden_states17 = model(input_ids17, attention_mask=attention_mask17, token_type_ids=input_segments17)
features17 = last_hidden_states17[0][:,0,:].numpy()
features17
# + [markdown] id="2Fp-Cbw_e2TW"
# #**df18**
# + id="RG2dmIavgBVQ"
#pair18= df18['Title1'] + [" [SEP] "] + df18['Title2']
pair18=df18['Title1'] + df18['Description1']+ [" [SEP] "] + df18['Title2'] + df18['Description2']
tokenized18 = pair18.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len18 = 0 # padding all lists to the same size
for i in tokenized18.values:
if len(i) > max_len18:
max_len18 = len(i)
max_len18=300
padded18 = np.array([i + [0]*(max_len18-len(i)) for i in tokenized18.values])
np.array(padded18).shape
attention_mask18 = np.where(padded18 != 0, 1, 0)
attention_mask18.shape
input_ids18 = torch.tensor(padded18)
attention_mask18 = torch.tensor(attention_mask18)
input_segments18= np.array([_get_segments3(token, max_len18)for token in tokenized18.values])
token_type_ids18 = torch.tensor(input_segments18)
input_segments18 = torch.tensor(input_segments18)
with torch.no_grad():
last_hidden_states18 = model(input_ids18, attention_mask=attention_mask18, token_type_ids=input_segments18)
features18 = last_hidden_states18[0][:,0,:].numpy()
features18
# + [markdown] id="iCIVAvMVe5eO"
# #**df19**
# + id="kIv9C1kygDC9"
#pair19= df19['Title1'] + [" [SEP] "] + df19['Title2']
pair19=df19['Title1'] + df19['Description1']+ [" [SEP] "] + df19['Title2'] + df19['Description2']
tokenized19 = pair19.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len19 = 0 # padding all lists to the same size
for i in tokenized19.values:
if len(i) > max_len19:
max_len19 = len(i)
max_len19=300
padded19 = np.array([i + [0]*(max_len19-len(i)) for i in tokenized19.values])
np.array(padded19).shape
attention_mask19 = np.where(padded19 != 0, 1, 0)
attention_mask19.shape
input_ids19 = torch.tensor(padded19)
attention_mask19 = torch.tensor(attention_mask19)
input_segments19= np.array([_get_segments3(token, max_len19)for token in tokenized19.values])
token_type_ids19 = torch.tensor(input_segments19)
input_segments19 = torch.tensor(input_segments19)
with torch.no_grad():
last_hidden_states19 = model(input_ids19, attention_mask=attention_mask19, token_type_ids=input_segments19)
features19 = last_hidden_states19[0][:,0,:].numpy()
features19
# + [markdown] id="abr35-RAe8dV"
# #**df20**
# + id="aLeD6Gyuf8KD"
pair20=df20['Title1'] + df20['Description1']+ [" [SEP] "] + df20['Title2'] + df20['Description2']
tokenized20 = pair20.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len20 = 0 # padding all lists to the same size
for i in tokenized20.values:
if len(i) > max_len20:
max_len20 = len(i)
max_len20=300
padded20 = np.array([i + [0]*(max_len20-len(i)) for i in tokenized20.values])
np.array(padded20).shape
attention_mask20 = np.where(padded20 != 0, 1, 0)
attention_mask20.shape
input_ids20 = torch.tensor(padded20)
attention_mask20 = torch.tensor(attention_mask20)
input_segments20= np.array([_get_segments3(token, max_len20)for token in tokenized20.values])
token_type_ids20 = torch.tensor(input_segments20)
input_segments20 = torch.tensor(input_segments20)
with torch.no_grad():
last_hidden_states20 = model(input_ids20, attention_mask=attention_mask20, token_type_ids=input_segments20)
features20 = last_hidden_states20[0][:,0,:].numpy()
features20
# + [markdown] id="OfUSgZhjfI-A"
# #**df21**
# + id="w0_IhvLof9c5" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616712899976, "user_tz": -60, "elapsed": 119368, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="d5495a2d-81ca-4ee9-bc0b-965a4c86c863"
pair21=df21['Title1'] + df21['Description1']+ [" [SEP] "] + df21['Title2'] + df21['Description2']
tokenized21 = pair21.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len21 = 0 # padding all lists to the same size
for i in tokenized21.values:
if len(i) > max_len21:
max_len21 = len(i)
max_len21=300
padded21 = np.array([i + [0]*(max_len21-len(i)) for i in tokenized21.values])
np.array(padded21).shape
attention_mask21 = np.where(padded21 != 0, 1, 0)
attention_mask21.shape
input_ids21 = torch.tensor(padded21)
attention_mask21 = torch.tensor(attention_mask21)
input_segments21= np.array([_get_segments3(token, max_len21)for token in tokenized21.values])
token_type_ids21 = torch.tensor(input_segments21)
input_segments21 = torch.tensor(input_segments21)
with torch.no_grad():
last_hidden_states21 = model(input_ids21, attention_mask=attention_mask21, token_type_ids=input_segments21)
features21 = last_hidden_states21[0][:,0,:].numpy()
features21
# + [markdown] id="zr_YIvreub21"
# #**df22**
# + id="IYWTfJkpvYhH" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616712924732, "user_tz": -60, "elapsed": 141128, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="379ce00e-0b7b-4246-c1d7-6527a35b621b"
pair22=df22['Title1'] + df22['Description1']+ [" [SEP] "] + df22['Title2'] + df22['Description2']
tokenized22 = pair22.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len22 = 0 # padding all lists to the same size
for i in tokenized22.values:
if len(i) > max_len22:
max_len22 = len(i)
max_len22=300
padded22 = np.array([i + [0]*(max_len22-len(i)) for i in tokenized22.values])
np.array(padded22).shape
attention_mask22 = np.where(padded22 != 0, 1, 0)
attention_mask22.shape
input_ids22 = torch.tensor(padded22)
attention_mask22 = torch.tensor(attention_mask22)
input_segments22= np.array([_get_segments3(token, max_len22)for token in tokenized22.values])
token_type_ids22 = torch.tensor(input_segments22)
input_segments22 = torch.tensor(input_segments22)
with torch.no_grad():
last_hidden_states22 = model(input_ids22, attention_mask=attention_mask22, token_type_ids=input_segments22)
features22 = last_hidden_states22[0][:,0,:].numpy()
features22
# + [markdown] id="WsasMBu0ud9R"
# #**df23**
# + id="ciD4eUC0vZ0d" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616712958604, "user_tz": -60, "elapsed": 172993, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="f07610a4-159d-430a-ea1e-8add6c71d07f"
pair23=df23['Title1'] + df23['Description1']+ [" [SEP] "] + df23['Title2'] + df23['Description2']
tokenized23 = pair23.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len23 = 0 # padding all lists to the same size
for i in tokenized23.values:
if len(i) > max_len23:
max_len23 = len(i)
max_len23=300
padded23 = np.array([i + [0]*(max_len23-len(i)) for i in tokenized23.values])
np.array(padded23).shape
attention_mask23 = np.where(padded23 != 0, 1, 0)
attention_mask23.shape
input_ids23 = torch.tensor(padded23)
attention_mask23 = torch.tensor(attention_mask23)
input_segments23= np.array([_get_segments3(token, max_len23)for token in tokenized23.values])
token_type_ids23 = torch.tensor(input_segments23)
input_segments23 = torch.tensor(input_segments23)
with torch.no_grad():
last_hidden_states23 = model(input_ids23, attention_mask=attention_mask23, token_type_ids=input_segments23)
features23 = last_hidden_states23[0][:,0,:].numpy()
features23
# + [markdown] id="Wh3F7eOvugt3"
# #**df24**
# + id="GzL-3zj2vbCz" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616713003313, "user_tz": -60, "elapsed": 215166, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="18791a11-5dfe-412d-add1-ff744a6d6207"
pair24=df24['Title1'] + df24['Description1']+ [" [SEP] "] + df24['Title2'] + df24['Description2']
tokenized24 = pair24.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len24 = 0 # padding all lists to the same size
for i in tokenized24.values:
if len(i) > max_len24:
max_len24 = len(i)
max_len24=300
padded24 = np.array([i + [0]*(max_len24-len(i)) for i in tokenized24.values])
np.array(padded24).shape
attention_mask24 = np.where(padded24 != 0, 1, 0)
attention_mask24.shape
input_ids24 = torch.tensor(padded24)
attention_mask24 = torch.tensor(attention_mask24)
input_segments24= np.array([_get_segments3(token, max_len24)for token in tokenized24.values])
token_type_ids24 = torch.tensor(input_segments24)
input_segments24 = torch.tensor(input_segments24)
with torch.no_grad():
last_hidden_states24 = model(input_ids24, attention_mask=attention_mask24, token_type_ids=input_segments24)
features24 = last_hidden_states24[0][:,0,:].numpy()
features24
# + [markdown] id="yv0oIj3fujmJ"
# #**df25**
# + id="JDXWdzfOvFgq" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616713061177, "user_tz": -60, "elapsed": 270893, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="7df40997-a460-43ea-c85f-7cc88c9efbcc"
pair25=df25['Title1'] + df25['Description1']+ [" [SEP] "] + df25['Title2'] + df25['Description2']
tokenized25 = pair25.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len25 = 0 # padding all lists to the same size
for i in tokenized25.values:
if len(i) > max_len25:
max_len25 = len(i)
max_len25=300
padded25 = np.array([i + [0]*(max_len25-len(i)) for i in tokenized25.values])
np.array(padded25).shape
attention_mask25 = np.where(padded25 != 0, 1, 0)
attention_mask25.shape
input_ids25 = torch.tensor(padded25)
attention_mask25 = torch.tensor(attention_mask25)
input_segments25= np.array([_get_segments3(token, max_len25)for token in tokenized25.values])
token_type_ids25 = torch.tensor(input_segments25)
input_segments25 = torch.tensor(input_segments25)
with torch.no_grad():
last_hidden_states25 = model(input_ids25, attention_mask=attention_mask25, token_type_ids=input_segments25)
features25 = last_hidden_states25[0][:,0,:].numpy()
features25
# + [markdown] id="bdt_480Cul5Y"
# #**df26**
# + id="Y8nMJ1KQvHt3" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616713096504, "user_tz": -60, "elapsed": 303767, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="3ca0c488-fefe-47c9-c1a2-58ee5670e819"
pair26=df26['Title1'] + df26['Description1']+ [" [SEP] "] + df26['Title2'] + df26['Description2']
tokenized26 = pair26.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len26 = 0 # padding all lists to the same size
for i in tokenized26.values:
if len(i) > max_len26:
max_len26 = len(i)
max_len26=300
padded26 = np.array([i + [0]*(max_len26-len(i)) for i in tokenized26.values])
np.array(padded26).shape
attention_mask26 = np.where(padded26 != 0, 1, 0)
attention_mask26.shape
input_ids26 = torch.tensor(padded26)
attention_mask26 = torch.tensor(attention_mask26)
input_segments26= np.array([_get_segments3(token, max_len26)for token in tokenized26.values])
token_type_ids26 = torch.tensor(input_segments26)
input_segments26 = torch.tensor(input_segments26)
with torch.no_grad():
last_hidden_states26 = model(input_ids26, attention_mask=attention_mask26, token_type_ids=input_segments26)
features26 = last_hidden_states26[0][:,0,:].numpy()
features26
# + [markdown] id="Us8kmV9suo7q"
# #**df27**
# + id="QkgDrZddvelN" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616713133798, "user_tz": -60, "elapsed": 338709, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="03566932-ac20-4b72-e91c-dc1ab60a543a"
pair27=df27['Title1'] + df27['Description1']+ [" [SEP] "] + df27['Title2'] + df27['Description2']
tokenized27 = pair27.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len27 = 0 # padding all lists to the same size
for i in tokenized27.values:
if len(i) > max_len27:
max_len27 = len(i)
max_len27=300
padded27 = np.array([i + [0]*(max_len27-len(i)) for i in tokenized27.values])
np.array(padded27).shape
attention_mask27 = np.where(padded27 != 0, 1, 0)
attention_mask27.shape
input_ids27 = torch.tensor(padded27)
attention_mask27 = torch.tensor(attention_mask27)
input_segments27= np.array([_get_segments3(token, max_len27)for token in tokenized27.values])
token_type_ids27 = torch.tensor(input_segments27)
input_segments27 = torch.tensor(input_segments27)
with torch.no_grad():
last_hidden_states27 = model(input_ids27, attention_mask=attention_mask27, token_type_ids=input_segments27)
features27 = last_hidden_states27[0][:,0,:].numpy()
features27
# + [markdown] id="Uph3ij1pusnG"
# #**df28**
# + id="QRvWDtfevKkR" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616713179224, "user_tz": -60, "elapsed": 45391, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="b262e750-de65-48dd-f909-606d5785042a"
pair28=df28['Title1'] + df28['Description1']+ [" [SEP] "] + df28['Title2'] + df28['Description2']
tokenized28 = pair28.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len28 = 0 # padding all lists to the same size
for i in tokenized28.values:
if len(i) > max_len28:
max_len28 = len(i)
max_len28=300
padded28 = np.array([i + [0]*(max_len28-len(i)) for i in tokenized28.values])
np.array(padded28).shape
attention_mask28 = np.where(padded28 != 0, 1, 0)
attention_mask28.shape
input_ids28 = torch.tensor(padded28)
attention_mask28 = torch.tensor(attention_mask28)
input_segments28= np.array([_get_segments3(token, max_len28)for token in tokenized28.values])
token_type_ids28 = torch.tensor(input_segments28)
input_segments28 = torch.tensor(input_segments28)
with torch.no_grad():
last_hidden_states28 = model(input_ids28, attention_mask=attention_mask28, token_type_ids=input_segments28)
features28 = last_hidden_states28[0][:,0,:].numpy()
features28
# + [markdown] id="4Wd2GPEhuuxN"
# #**df29**
# + id="VOahLiBNmqCz" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616713222793, "user_tz": -60, "elapsed": 39309, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="9a7d8324-159d-4fe0-e58c-5e9325b2c483"
pair29=df29['Title1'] + df29['Description1']+ [" [SEP] "] + df29['Title2'] + df29['Description2']
tokenized29 = pair29.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len29 = 0 # padding all lists to the same size
for i in tokenized29.values:
if len(i) > max_len29:
max_len29 = len(i)
max_len29=300
padded29 = np.array([i + [0]*(max_len29-len(i)) for i in tokenized29.values])
np.array(padded29).shape
attention_mask29 = np.where(padded29 != 0, 1, 0)
attention_mask29.shape
input_ids29 = torch.tensor(padded29)
attention_mask29 = torch.tensor(attention_mask29)
input_segments29= np.array([_get_segments3(token, max_len29)for token in tokenized29.values])
token_type_ids29 = torch.tensor(input_segments29)
input_segments29 = torch.tensor(input_segments29)
with torch.no_grad():
last_hidden_states29 = model(input_ids29, attention_mask=attention_mask29, token_type_ids=input_segments29)
features29 = last_hidden_states29[0][:,0,:].numpy()
features29
# + [markdown] id="VhtNowb-uw75"
# #**df30**
# + id="5K2h0TYgvjDd" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616713274625, "user_tz": -60, "elapsed": 87156, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="6657bd51-1bb8-415c-c17a-80b31bf6b5de"
pair30=df30['Title1'] + df30['Description1']+ [" [SEP] "] + df30['Title2'] + df30['Description2']
tokenized30 = pair30.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len30 = 0 # padding all lists to the same size
for i in tokenized30.values:
if len(i) > max_len30:
max_len30 = len(i)
max_len30=400
padded30 = np.array([i + [0]*(max_len30-len(i)) for i in tokenized30.values])
np.array(padded30).shape
attention_mask30 = np.where(padded30 != 0, 1, 0)
attention_mask30.shape
input_ids30 = torch.tensor(padded30)
attention_mask30 = torch.tensor(attention_mask30)
input_segments30= np.array([_get_segments3(token, max_len30)for token in tokenized30.values])
token_type_ids30 = torch.tensor(input_segments30)
input_segments30 = torch.tensor(input_segments30)
with torch.no_grad():
last_hidden_states30 = model(input_ids30, attention_mask=attention_mask30, token_type_ids=input_segments30)
features30 = last_hidden_states30[0][:,0,:].numpy()
features30
# + [markdown] id="2q_qQwvxuy--"
# #**df32**
# + id="9A5DutfSvO3r"
#pair31= df31['Title1'] + [" [SEP] "] + df31['Title2']
# + id="J-b3kFUXvk08" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616670524672, "user_tz": -60, "elapsed": 18415, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="529a6b27-f57a-4bdd-ac14-d83307bca180"
pair31=df31['Title1'] + df31['Description1']+ [" [SEP] "] + df31['Title2'] + df31['Description2']
tokenized31 = pair31.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,truncation=True, max_length=300)))
max_len31 = 0 # padding all lists to the same size
for i in tokenized31.values:
if len(i) > max_len31:
max_len31 = len(i)
max_len31=300
padded31 = np.array([i + [0]*(max_len31-len(i)) for i in tokenized31.values])
np.array(padded31).shape
attention_mask31 = np.where(padded31 != 0, 1, 0)
attention_mask31.shape
input_ids31 = torch.tensor(padded31)
attention_mask31 = torch.tensor(attention_mask31)
input_segments31= np.array([_get_segments3(token, max_len31)for token in tokenized31.values])
token_type_ids31 = torch.tensor(input_segments31)
input_segments31 = torch.tensor(input_segments31)
with torch.no_grad():
last_hidden_states31 = model(input_ids31, attention_mask=attention_mask31, token_type_ids=input_segments31)
features31 = last_hidden_states31[0][:,0,:].numpy()
features31
# + [markdown] id="T-y98h_O17Es"
#
# + [markdown] id="z6m1jSq0fvSK"
# #**Classification**
# + id="n_ehQvqlzpKb"
features=np.concatenate([features3,features4,features5,features6,features7,features8,features9,features10,features11,features12,features13,features14,features15,features16,features17,features18,features19,features20,features21,features22,features23,features24,features25,features26,features27,features29, features30, features28])
# + id="_fQOGOczXPSr" executionInfo={"status": "ok", "timestamp": 1616886877256, "user_tz": -60, "elapsed": 120516, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
features=np.concatenate([features3,features4,features5,features6,features7,features8,features9,features10,features11])
# + id="eWLJczI21W7N" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1614726838964, "user_tz": -60, "elapsed": 1755, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="287d8392-4457-4e6e-b7d6-05522f88bed3"
features.shape
# + id="eoP6quJLy7xr"
Total = pd.concat([df3,df4,df5,df6,df7,df8,df9,df10,df11,df12,df13,df14,df15,df16,df17,df18,df19,df20,df21,df22,df23,df24,df25,df26,df27,df29,df30, df28], ignore_index=True)
# + id="anfoc7F0g8Qw" executionInfo={"status": "ok", "timestamp": 1616886878818, "user_tz": -60, "elapsed": 1560, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
Total = pd.concat([df3,df4,df5,df6,df7,df8,df9,df10,df11], ignore_index=True)
# + id="axi2Jeq5zfpG" colab={"base_uri": "https://localhost:8080/", "height": 606} executionInfo={"status": "ok", "timestamp": 1616778622835, "user_tz": -60, "elapsed": 757, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="3dd8056a-fdbb-4ae1-e6c9-21226e6a45f2"
Total
# + id="N_15I5LWzLoQ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886878819, "user_tz": -60, "elapsed": 1543, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="2a8b1b5c-411a-4c48-817a-83796e69ef0a"
labels =Total['Label']
labels
# + [markdown] id="qs5eiuYdG2pW"
# hold out
# + id="fAhI7LZdhC3c" executionInfo={"status": "ok", "timestamp": 1616886900814, "user_tz": -60, "elapsed": 839, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
train_features = features[0:3486]
train_labels = labels[0:3486]
test_features = features[3486:]
test_labels = labels[3486:]
# + id="j_hjZHRuiiD6"
train_features = features[0:7600]
train_labels = labels[0:7600]
test_features = features[7600:]
test_labels = labels[7600:]
# + id="UqHZPrNpGsfQ"
train_features, test_features, train_labels, test_labels = train_test_split(features, labels,test_size=0.2,random_state=42)
# + [markdown] id="nG_ymUrw3ozc"
# #**LogisticRegression Optimized**
#
# ---
#
#
# + id="DKr7gDRz5S2L" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616887109706, "user_tz": -60, "elapsed": 13328, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="db15cfa7-4160-4518-fddd-b65876f007a9"
#n_splits=2
#cross_val_score=5
parameters = {'C': np.linspace(0.0001, 100, 20)}
grid_search = GridSearchCV(LogisticRegression(), parameters, cv=5)
grid_search.fit(train_features, train_labels)
print('best parameters: ', grid_search.best_params_)
print('best scrores: ', grid_search.best_score_)
# + id="7rm32RA-HgBd" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616887109707, "user_tz": -60, "elapsed": 11133, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="30960e83-e1fb-4861-c00c-c7d9b6728428"
lr_clf = LogisticRegression(C=10.52)
lr_clf.fit(train_features, train_labels)
# + id="8sm8PZlaHo-b" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616887109708, "user_tz": -60, "elapsed": 9731, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="4bfcc856-6b6d-4924-d46d-804a60192890"
lr_clf.score(test_features, test_labels)
# + id="0ICHUVuhRs3X" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616887109710, "user_tz": -60, "elapsed": 7662, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="67bba838-e63f-42cd-eacf-6c66a5a35b3f"
y_predLr = lr_clf.predict(test_features)
np.set_printoptions(threshold=np.inf)
y_predLr
# + id="vKbU0wQtSN7M" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616887117311, "user_tz": -60, "elapsed": 1355, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="b74336cd-1747-4e6f-b02b-d324208c14b8"
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(test_labels,y_predLr))
print(confusion_matrix(test_labels, y_predLr))
# + id="W6uV_wgzvP53" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616887119506, "user_tz": -60, "elapsed": 851, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="7e03cd41-96f6-42c8-b6f5-0e6610be018b"
from sklearn.metrics import accuracy_score
print(accuracy_score(test_labels, y_predLr))
# + id="dZ6fZqC6Wo7v" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616887124643, "user_tz": -60, "elapsed": 2454, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="1ada8d56-9374-4ad0-eaa4-33dd1b793826"
scores = cross_val_score(lr_clf, features, labels,cv=5)
print("mean: {:.3f} (std: {:.3f})".format(scores.mean(),
scores.std()),
end="\n\n" )
# + colab={"base_uri": "https://localhost:8080/", "height": 520} id="OwB70CQCpWsQ" executionInfo={"status": "ok", "timestamp": 1616887128492, "user_tz": -60, "elapsed": 942, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="52504724-3ed8-4d3a-8bcd-89285a517e95"
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sklearn
from scipy import stats
import matplotlib.pyplot as plt
import os
import seaborn as sns
cm=confusion_matrix(test_labels,y_predLr)
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
# + [markdown] id="GklbE2mcyFKb"
# #**Decision tree**
# + id="9tW7vrR9yQaZ" executionInfo={"status": "ok", "timestamp": 1616887133320, "user_tz": -60, "elapsed": 918, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
from sklearn.tree import DecisionTreeClassifier
# + id="2AphF0d1yX37"
clf = DecisionTreeClassifier(max_depth = 500, random_state = 0,criterion='gini')
# + id="cLU-AQqIxzJI" executionInfo={"status": "ok", "timestamp": 1616887135163, "user_tz": -60, "elapsed": 1333, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
clf = DecisionTreeClassifier(max_depth = 500, random_state = 0)
# + id="ygtC-1A9ycDW" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616887140589, "user_tz": -60, "elapsed": 5463, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="23468cdb-4e82-41a9-f557-5a1d5fd7cfeb"
clf.fit(train_features, train_labels)
# + id="JvKjL68bSqi2" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616887140590, "user_tz": -60, "elapsed": 3098, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="eb52f7bd-9958-4b7a-ff61-786922d601fc"
y_preddt = clf.predict(test_features)
np.set_printoptions(threshold=np.inf)
y_preddt
# + id="kerN3aUaSpfh" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616887144167, "user_tz": -60, "elapsed": 791, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="64f8820a-2428-444c-8ae1-d34bd2c6b509"
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(test_labels,y_preddt))
print(confusion_matrix(test_labels, y_preddt))
# + colab={"base_uri": "https://localhost:8080/", "height": 523} id="ehJDtCspyJLW" executionInfo={"status": "ok", "timestamp": 1616887150650, "user_tz": -60, "elapsed": 1430, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="f735502b-94f8-46d3-d162-10f27c37420f"
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sklearn
from scipy import stats
import matplotlib.pyplot as plt
import os
import seaborn as sns
cm=confusion_matrix(test_labels,y_preddt)
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
# + id="7eIUnRYivoEP" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616790769285, "user_tz": -60, "elapsed": 956, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="23058967-be6a-4997-9a3f-08bf63a3be17"
from sklearn.metrics import accuracy_score
print(accuracy_score(test_labels, y_preddt))
# + id="WrVSkqR0y71G" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616715666073, "user_tz": -60, "elapsed": 4306, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="22e12ca0-fcb0-4485-db10-5b351fe07107"
# The score method returns the accuracy of the model
score = clf.score(test_features, test_labels)
print(score)
# + id="9tR2H-yHmxqJ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616790836383, "user_tz": -60, "elapsed": 63715, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="536c77d5-63ec-4d36-ee15-03c5e23e86fe"
scores = cross_val_score(clf, features, labels, cv=5)
print("mean: {:.3f} (std: {:.3f})".format(scores.mean(),
scores.std()),
end="\n\n" )
# + [markdown] id="ZkQR7uLE22Y0"
# #**SVM**
# + id="T16t0BN2thID"
from sklearn.svm import SVC
# + id="Q6CgvC0n21oc" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616790840912, "user_tz": -60, "elapsed": 55544, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="e74d281f-dc0d-4135-9193-aab31a516d37"
svclassifier = SVC(kernel='linear')
svclassifier.fit(train_features, train_labels)
# + id="DLyTKn6K3yqj" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616790845728, "user_tz": -60, "elapsed": 58002, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="6475d74d-18a0-4281-d582-4901e9fce0f7"
y_pred = svclassifier.predict(test_features)
np.set_printoptions(threshold=np.inf)
y_pred
# + [markdown] id="WrkMmKZUkQPE"
# Kernel : Sigmoid: 0.88 / Linear: 0.92/ rbf:0.9 / poly : 0.9
# + id="qmHE07TI4HCh" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616790845730, "user_tz": -60, "elapsed": 54539, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="10d06542-a4bd-4a04-e5cc-aff4ebb53b9f"
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(test_labels,y_pred))
print(classification_report(test_labels,y_pred))
# + colab={"base_uri": "https://localhost:8080/", "height": 520} id="RyhSG9fIyGym" executionInfo={"status": "ok", "timestamp": 1616790845732, "user_tz": -60, "elapsed": 52106, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="28632070-426a-4673-afd8-0b3ec1ed0eef"
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sklearn
from scipy import stats
import matplotlib.pyplot as plt
import os
import seaborn as sns
cm=confusion_matrix(test_labels,y_pred)
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
# + id="rQXTzIAKvxpe" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616790845733, "user_tz": -60, "elapsed": 49134, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="11f5581e-fc44-4a5e-cf24-6ede50aeb253"
from sklearn.metrics import accuracy_score
print(accuracy_score(test_labels, y_pred))
# + id="1Cg9zFCCZRDD"
param_grid = {'C':[1,10,100,1000],'gamma':[1,0.1,0.001,0.0001], 'kernel':['linear','rbf']}
# + id="0aIAteOTZVFI"
grid = GridSearchCV(SVC(),param_grid,refit = True, verbose=2)
# + id="SvsdclfXZYtf" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="380850f1-551d-4911-e3a7-9dd4655586b2"
grid.fit(train_features,train_labels)
# + id="Aq--IG7WZo7W"
grid.best_params_
# + id="9g5eOxMYZvlh"
predic = grid.predict(test_features)
# + id="1bp4AxfJZ4SL"
print(classification_report(test_labels,predic))
print(confusion_matrix(test_labels, predic))
# + [markdown] id="ry43MpkWaTix"
# #Cross_Val
# + id="NYWTVLNknWXg"
from sklearn.model_selection import cross_val_score
# + id="B6LUaMOyrMPF"
from sklearn import svm
# + id="JOd-QdsqnXPE"
clf = svm.SVC(kernel='linear')
# + id="ArvTazd6qpr9" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616790981579, "user_tz": -60, "elapsed": 181547, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="605bfc34-6262-4453-83a0-8a9e4a2cc8ac"
scores = cross_val_score(svclassifier,features, labels, cv=5)
print("mean: {:.3f} (std: {:.3f})".format(scores.mean(),
scores.std()),
end="\n\n" )
# + [markdown] id="-GfWBgit7kKc"
# #**MLP Best params**
# + id="2jo__QQE7omY" executionInfo={"status": "ok", "timestamp": 1616886915723, "user_tz": -60, "elapsed": 1681, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(max_iter=100)
from sklearn.datasets import make_classification
# + id="fS44TAenh4zR" executionInfo={"status": "ok", "timestamp": 1616886917896, "user_tz": -60, "elapsed": 1457, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}}
parameter_space = {
'hidden_layer_sizes': [(50,100,50), (50,100,50), (100,)],
'activation': ['tanh', 'relu'],
'solver': ['sgd', 'adam'],
'alpha': [0.0001, 0.05],
'learning_rate': ['constant','adaptive'],
}
# + id="dnwD6IhehjFW" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886999598, "user_tz": -60, "elapsed": 81275, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="34b15295-29a9-4c11-ecf9-a968348849c1"
from sklearn.model_selection import GridSearchCV
clf = GridSearchCV(mlp, parameter_space, n_jobs=-1, cv=3)
clf.fit(train_features, train_labels)
# + id="Q3R7Hhe-KUWC" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886999599, "user_tz": -60, "elapsed": 78847, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="2827babb-2c24-4eb7-e712-f06a17553399"
# Best paramete set
print('Best parameters found:\n', clf.best_params_)
# + id="D4iS97yMiCXi" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886999600, "user_tz": -60, "elapsed": 76976, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="74e545d7-68b8-466f-c65b-e63ac7215eb1"
# All results
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
# + id="eXkRctmi9IHD" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886999601, "user_tz": -60, "elapsed": 72779, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="177d2c83-a3e1-4de3-c27f-eebfcd345cdf"
end = time.time()
print(f"Runtime of the program is {end - start}")
# + id="ur092BrFxETo" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886999601, "user_tz": -60, "elapsed": 70445, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="186888bb-1a46-4174-f23a-f79875f0ba50"
import time
start = time.time()
y_true, y_pred = test_labels , clf.predict(test_features)
np.set_printoptions(threshold=np.inf)
y_pred
# + id="hCVtqVsfizFy" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886999602, "user_tz": -60, "elapsed": 66895, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="88a379a4-21fe-42d1-abad-dc5af0c71ce6"
from sklearn.metrics import classification_report, confusion_matrix
print('Results on the test set:')
print(classification_report(y_true, y_pred))
print(confusion_matrix(y_true, y_pred))
# + id="7FQKuu8b9Nps" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616886999603, "user_tz": -60, "elapsed": 64346, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="703cfb57-1bb4-40df-da05-1aa72437e753"
end = time.time()
print(f"Runtime of the program is {end - start}")
# + colab={"base_uri": "https://localhost:8080/", "height": 520} id="xm3_8wnnxafJ" executionInfo={"status": "ok", "timestamp": 1616779257369, "user_tz": -60, "elapsed": 1180, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="c7740556-5fb3-4052-a2e4-f3c56088eee6"
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sklearn
from scipy import stats
import matplotlib.pyplot as plt
import os
import seaborn as sns
cm=confusion_matrix(y_true,y_pred)
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
# + id="HV96Dn8xwINN" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616779261517, "user_tz": -60, "elapsed": 1025, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="9d45c160-eee0-4723-bb17-1ee1e9ceda4b"
from sklearn.metrics import accuracy_score
print(accuracy_score(y_true, y_pred))
# + id="W7XXMqpz8kI5" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616713834181, "user_tz": -60, "elapsed": 516, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="61949842-4876-44e8-cf2b-934ae21013d3"
clf.score(test_features, test_labels)
# + id="qJbrz3Cb9mfo"
from sklearn.model_selection import cross_val_score
# + id="jhUP__Kx94rC"
scores = cross_val_score(clf, test_labels, y_pred, cv=5)
# + id="m49YtVHa99I9" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616779918088, "user_tz": -60, "elapsed": 654547, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="bc395270-2c74-4f89-e37f-e5ddb2db3e0b"
scores = cross_val_score(clf, features, labels, cv=5)
print("mean: {:.3f} (std: {:.3f})".format(scores.mean(),
scores.std()),
end="\n\n" )
# + [markdown] id="rkStE77lVPjB"
# #**Random Forest**
# + id="-Cvq-n5HVUH2" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791007864, "user_tz": -60, "elapsed": 2676, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="14327ddd-9076-4547-fe3a-c8198aacaff1"
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=20, random_state=0)
rf.fit(train_features, train_labels)
y_pred1 = rf.predict(test_features)
y_pred1
# + id="ORiXr8DQI5E5" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791010086, "user_tz": -60, "elapsed": 650, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="e66a11df-9d30-4f58-d50c-6824f4c02c3d"
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(test_labels,y_pred1))
print(confusion_matrix(test_labels, y_pred1))
# + colab={"base_uri": "https://localhost:8080/", "height": 520} id="vRIz1HBfx5Xv" executionInfo={"status": "ok", "timestamp": 1616791016017, "user_tz": -60, "elapsed": 1081, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="5597cb04-40fd-4732-8302-6b80791efe83"
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sklearn
from scipy import stats
import matplotlib.pyplot as plt
import os
import seaborn as sns
cm=confusion_matrix(test_labels,y_pred1)
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
# + id="3AOvVQIjwSQ_" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791019441, "user_tz": -60, "elapsed": 821, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="88b1d15a-af38-40b8-d33f-800e6343c968"
from sklearn.metrics import accuracy_score
print(accuracy_score(test_labels, y_pred1))
# + id="76vce9ZUVdLB"
scores = cross_val_score(rf, features, labels, cv=5)
# + id="DQ-gblPatPEi" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616716954841, "user_tz": -60, "elapsed": 1189, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="e3e9bb9a-13f1-404a-86ad-0ab019cb9f52"
print("mean: {:.3f} (std: {:.3f})".format(scores.mean(),
scores.std()),
end="\n\n" )
# + id="yLS7LQUBVlgo" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791042314, "user_tz": -60, "elapsed": 21149, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="bed87078-ee4f-40ec-b6fd-d25e60d4d7c5"
scores = cross_val_score(rf, features, labels, cv=5)
print("mean: {:.3f} (std: {:.3f})".format(scores.mean(),
scores.std()),
end="\n\n" )
# + [markdown] id="UK1v4gA6-ssL"
# #**Naive Bayes**
# + [markdown] id="fEsrF7jnB8ck"
# #Gaussian
# + id="x4-7126r-110" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791042318, "user_tz": -60, "elapsed": 12454, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="ed7859a4-b740-443a-f396-7e70e1f74ae9"
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(train_features, train_labels)
# + id="lD6bQGaX_Ch8" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791042320, "user_tz": -60, "elapsed": 10427, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="33cc8983-7751-4a12-a277-b0de2b4f9322"
y_pred = gnb.predict(test_features)
y_pred
# + id="gNCBuGYI_HeA" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791042321, "user_tz": -60, "elapsed": 7093, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="677dc7ea-4e9a-4d57-c5df-93736f2d8471"
from sklearn import metrics
print("Accuracy:",metrics.accuracy_score(test_labels, y_pred))
# + id="ZNq7MJ3eG4r1" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791042324, "user_tz": -60, "elapsed": 5189, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="4c4ffb16-a97f-461c-9af6-feb0eb168ae8"
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(test_labels,y_pred))
print(confusion_matrix(test_labels, y_pred))
# + colab={"base_uri": "https://localhost:8080/", "height": 526} id="B5rFujwdx6_O" executionInfo={"status": "ok", "timestamp": 1616791042325, "user_tz": -60, "elapsed": 2011, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="1e67ace7-9051-47e9-d12c-41834b5dd313"
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sklearn
from scipy import stats
import matplotlib.pyplot as plt
import os
import seaborn as sns
cm=confusion_matrix(test_labels,y_pred)
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
# + [markdown] id="sfE0-AH4DltJ"
# *Cross Validation*
# + id="-mpqZDu9SJA-"
scores = cross_val_score(gnb, test_labels, y_pred, cv=5)
# + id="k9n7Rtxd7avI" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791044880, "user_tz": -60, "elapsed": 914, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="e2daa1e0-4ce5-47bc-e6f0-950eb766af4a"
scores = cross_val_score(gnb, features, labels, cv=5)
print("mean: {:.3f} (std: {:.3f})".format(scores.mean(),
scores.std()),
end="\n\n" )
# + [markdown] id="KMtKlGiKkglz"
# #**Optimized SVC**
# + id="iy2vI2U3klp-"
from sklearn.svm import SVC
# + id="BHXJjiAjkzGv"
model = SVC()
# + id="QVaipTgdk1yG" colab={"base_uri": "https://localhost:8080/", "height": 85} executionInfo={"status": "ok", "timestamp": 1603311275655, "user_tz": -60, "elapsed": 43449, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="0741843a-230a-4ba0-f5d9-58eb48a34526"
model.fit(train_features, train_labels)
# + id="3pykStcPk-3y"
prediction = model.predict(test_features)
# + id="8fIiNAjklJKh" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1603311428421, "user_tz": -60, "elapsed": 963, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="af081b60-7fac-4897-dadd-6f840f4d7f5d"
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(test_labels,prediction))
print(confusion_matrix(test_labels, prediction))
# + id="oS0abKaklbv9"
param_grid = {'C':[1,10],'gamma':[1,0.1], 'kernel':['linear','rbf']}
# + id="jhHpD4Gtleqr"
grid = GridSearchCV(SVC(),param_grid,refit = True, verbose=2)
# + id="aC1ULpsvlh4J" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1603315653807, "user_tz": -60, "elapsed": 1351373, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="8b18abcf-e346-40e8-de7b-b4c399a9e211"
grid.fit(train_features,train_labels)
# + id="VcgVNkPnlr1r" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1603315653815, "user_tz": -60, "elapsed": 1172724, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="a280ecab-79c5-47da-ae1b-6851a9e64a23"
grid.param_grid
# + id="YClW2tzulvYg"
predic = grid.predict(test_features)
# + id="esuC0EeWl2Pi" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1603316838448, "user_tz": -60, "elapsed": 943, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="76dcae1a-8e99-47c7-f10d-4758131b1b0c"
print(classification_report(test_labels,predic))
print(confusion_matrix(test_labels, predic))
# + [markdown] id="MsvXLipBnrum"
# #**Random Forest Optimized**
# + id="9-L23_YdnxQ-"
from sklearn.ensemble import RandomForestClassifier
rfc=RandomForestClassifier(random_state=42)
# + id="KQYpYWOin8Zu"
param_grid = {
'n_estimators': [200, 500],
'max_features': ['auto', 'sqrt', 'log2'],
'max_depth' : [4,5,6,7,8],
'criterion' :['gini', 'entropy']
}
# + id="rTixtlAbn_Jp"
CV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5)
CV_rfc.fit(train_features, train_labels)
# + id="ihYv5YuPoH9c" colab={"base_uri": "https://localhost:8080/", "height": 85} executionInfo={"status": "ok", "timestamp": 1603231151664, "user_tz": -60, "elapsed": 1900, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="31ed64d9-3e5e-4fc0-db97-c0198ccf640a"
CV_rfc.best_params_
# + id="1VvRqQsGoLon"
rfc1=RandomForestClassifier(random_state=42, max_features='auto', n_estimators= 200, max_depth=8, criterion='gini')
# + id="Kh0Kg6VaoPX7" colab={"base_uri": "https://localhost:8080/", "height": 153} executionInfo={"status": "ok", "timestamp": 1603231221862, "user_tz": -60, "elapsed": 26575, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="dbdb652b-1d9c-4276-f669-725100723322"
rfc1.fit(train_features, train_labels)
# + id="11qWbyQ6oWKc"
pred=rfc1.predict(test_features)
# + id="L_kXzkJVJCzB" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1603231795794, "user_tz": -60, "elapsed": 827, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="6fa137aa-cde4-4d0e-ab6b-5a9a0501de12"
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(test_labels,pred))
print(confusion_matrix(test_labels, pred))
# + id="5oIbVhGFocDD" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1603231185119, "user_tz": -60, "elapsed": 1065, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="c95f03fe-242e-4672-a793-39d19f114e1c"
from sklearn.metrics import accuracy_score
print("Accuracy for Random Forest on CV data: ",accuracy_score(test_labels,pred))
# + [markdown] id="XAXuL31Rqv2B"
# #**XGBoost**
# + id="rS6XD1Wl5aCJ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791066829, "user_tz": -60, "elapsed": 17947, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="d1513ea5-e2e0-45d0-fd4d-deb2351d457d"
import xgboost as xgb
modelxgb=xgb.XGBClassifier(random_state=1,learning_rate=0.01)
modelxgb.fit(train_features, train_labels)
# + id="VERUlLMfVcff" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791066833, "user_tz": -60, "elapsed": 16228, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="cad294ef-7908-461f-819b-8c016d5a3bd6"
predxgb = modelxgb.predict(test_features)
predxgb
# + id="3VDVkMJ8VxEs" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791066840, "user_tz": -60, "elapsed": 12571, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="3cc1444e-247a-4886-e447-8c131d284687"
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(test_labels,predxgb))
print(confusion_matrix(test_labels, predxgb))
# + colab={"base_uri": "https://localhost:8080/", "height": 520} id="hJOb8S9ax9zi" executionInfo={"status": "ok", "timestamp": 1616791066845, "user_tz": -60, "elapsed": 9634, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="4bc6d5e3-5cbf-4dcc-93b0-8194a31c3c0d"
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sklearn
from scipy import stats
import matplotlib.pyplot as plt
import os
import seaborn as sns
cm=confusion_matrix(test_labels,predxgb)
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
# + id="EZvEqHDkh5T9" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791095614, "user_tz": -60, "elapsed": 568, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="92ac3088-1148-4c05-f4a9-6b519da95baa"
from sklearn.metrics import accuracy_score
print(accuracy_score(test_labels, predxgb))
# + id="wHThKNgWELHA" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791098335, "user_tz": -60, "elapsed": 1078, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="a8fb793d-17f0-4e11-f3d6-d4843c29e9df"
modelxgb.score(test_features,test_labels)
# + id="8ww8gvggEbQ5"
scores = cross_val_score(model, test_labels, test_features, cv=5)
# + id="hUPXhje3Em-x" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791268689, "user_tz": -60, "elapsed": 169495, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="cf1590ac-a53c-456e-e380-68241e4ef94e"
scores = cross_val_score(modelxgb, features, labels, cv=5)
print("mean: {:.3f} (std: {:.3f})".format(scores.mean(),
scores.std()),
end="\n\n" )
# + [markdown] id="FJcrg46fq96M"
# #**Optimized DT**
# + id="tGXKLf88OWWB"
from sklearn.tree import DecisionTreeClassifier
# + id="vV3F64KqNrvU"
# Same 3-step process
# 1. Instantiate
# default criterion=gini
# you can swap to criterion=entropy
dtc = DecisionTreeClassifier(random_state=0)
# 2. Fit
dtc.fit(train_features, train_labels)
# 3. Predict, there're 4 features in the iris dataset
y_pred_class = dtc.predict(test_features)
# + id="CX1K_T-kO7e5"
from sklearn import metrics
# + id="eaYZDO_LPDtR" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1601506916593, "user_tz": -60, "elapsed": 590, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="63421e7b-f9f0-4b68-ccec-fb211fbc3200"
# Accuracy
metrics.accuracy_score(test_labels, y_pred_class)
# + id="68Q9-FlUPOUd" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1601506960945, "user_tz": -60, "elapsed": 737, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="8a48eca0-69d6-4bce-d30c-05c3b2a2d585"
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(test_labels,y_pred_class))
print(confusion_matrix(test_labels, y_pred_class))
# + id="gXuobfEYQck2" colab={"base_uri": "https://localhost:8080/", "height": 340} executionInfo={"status": "ok", "timestamp": 1602187561716, "user_tz": -60, "elapsed": 361126, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="50e30c2f-f04b-422f-f017-e72fcde576da"
# Import
from sklearn.model_selection import GridSearchCV
# Define the parameter values that should be searched
sample_split_range = list(range(1, 50))
# Create a parameter grid: map the parameter names to the values that should be searched
# Simply a python dictionary
# Key: parameter name
# Value: list of values that should be searched for that parameter
# Single key-value pair for param_grid
param_grid = dict(min_samples_split=sample_split_range)
# instantiate the grid
dtc = DecisionTreeClassifier(random_state=0)
grid = GridSearchCV(dtc, param_grid, cv=10, scoring='accuracy')
# fit the grid with data
grid.fit(train_features, train_labels)
# + id="VPx7qq5MVHuX"
# Single best score achieved across all params (min_samples_split)
print(grid.best_score_)
# Dictionary containing the parameters (min_samples_split) used to generate that score
print(grid.best_params_)
# Actual model object fit with those best parameters
# Shows default parameters that we did not specify
print(grid.best_estimator_)
# + id="L_dRDZrNVRg5"
# Entropy of child 1 = 0
# Perfect split for this child
# Entropy of child 2 = 0.918
-(2/3)*np.log2(2/3) - (1/3)*np.log2(1/3)
# + id="t7aCcTzpVS0s"
# Weighted average of entropy(children)
(3/4)*(0.9184) + (1/4)*0
# + id="qacxtswJVf9J"
# Entropy Gain
1 - (3/4)*(0.9184) + (1/4)*0
# + [markdown] id="PtBlFouhFeDi"
# #**KNN**
# + id="zXg9cPXey1su"
#import KNeighborsClassifier
from sklearn.neighbors import KNeighborsClassifier
#Setup arrays to store training and test accuracies
neighbors = np.arange(1,9)
train_accuracy =np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
for i,k in enumerate(neighbors):
#Setup a knn classifier with k neighbors
knn = KNeighborsClassifier(n_neighbors=k)
#Fit the model
knn.fit(train_features, train_labels)
#Compute accuracy on the training set
train_accuracy[i] = knn.score(train_features, train_labels)
#Compute accuracy on the test set
test_accuracy[i] = knn.score(test_features, test_labels)
# + id="PvGnHUIV6KQd"
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# + id="Uu5db0IA6AMY"
#Generate plot
plt.title('k-NN Varying number of neighbors')
plt.plot(neighbors, test_accuracy, label='Testing Accuracy')
plt.plot(neighbors, train_accuracy, label='Training accuracy')
plt.legend()
plt.xlabel('Number of neighbors')
plt.ylabel('Accuracy')
plt.show()
# + id="ka82d7XU6XMu"
knn = KNeighborsClassifier(n_neighbors=7)
# + id="57Q8uMC66b2n" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791601701, "user_tz": -60, "elapsed": 489018, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="fcbcde07-07a2-49ea-b0f7-2b66e6081fd9"
#Fit the model
knn.fit(train_features,train_labels)
# + id="jeJ8XtHW6kb5" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791628047, "user_tz": -60, "elapsed": 513516, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="d1b5af09-5942-4eac-b861-1474855859df"
knn.score(test_features,test_labels)
# + id="Da6HuDZLG6ou" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791655978, "user_tz": -60, "elapsed": 540027, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="e5a4877f-d132-43d8-d993-1fb4c2ce5ae9"
y_pred = knn.predict(test_features)
y_pred
# + id="D4f66fCe7DI2" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791655981, "user_tz": -60, "elapsed": 536079, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="73989824-3892-4fa0-e91e-c7a94fc3efac"
from sklearn.metrics import confusion_matrix
print(confusion_matrix(test_labels,y_pred))
from sklearn.metrics import classification_report
print(classification_report(test_labels,y_pred))
# + id="Rm9D-fJ7x_zB" colab={"base_uri": "https://localhost:8080/", "height": 520} executionInfo={"status": "ok", "timestamp": 1616791656777, "user_tz": -60, "elapsed": 534793, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="37a90b75-a92d-4c2c-fb15-07e403a1f524"
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sklearn
from scipy import stats
import matplotlib.pyplot as plt
import os
import seaborn as sns
cm=confusion_matrix(test_labels,y_pred)
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
# + id="OyNTSlJyh830" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791656780, "user_tz": -60, "elapsed": 531688, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="10571789-b098-4559-dc94-558e22829a70"
from sklearn.metrics import accuracy_score
print(accuracy_score(test_labels, y_pred))
# + id="BIyj4Oah7s-J" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616791695870, "user_tz": -60, "elapsed": 569386, "user": {"displayName": "SSBSE Admin", "photoUrl": "", "userId": "11167642343971864398"}} outputId="c39f3c8f-552c-4774-854c-8d6698acd57f"
scores = cross_val_score(knn, test_features, test_labels, cv=5)
print("mean: {:.3f} (std: {:.3f})".format(scores.mean(),
scores.std()),
end="\n\n" )
#88%
| Copie de Modified_ Modification_de test_tokens(3).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import json
import numpy as np
import pandas as pd
import re
import chart_studio.plotly as py
import plotly.graph_objects as go
# -
os.makedirs("figures", exist_ok=True)
# equivalent = pd.read_excel("data/GHG_EmissionsData.xlsx", engine="openpyxl", skiprows=4, index_col=0, sheet_name="CO2e")
carbon = pd.read_excel("data/GHG_EmissionsData.xlsx", engine="openpyxl", skiprows=4, index_col=0, sheet_name="CO2")
methane = pd.read_excel("data/GHG_EmissionsData.xlsx", engine="openpyxl", skiprows=4, index_col=0, sheet_name="CH4")
nitrous = pd.read_excel("data/GHG_EmissionsData.xlsx", engine="openpyxl", skiprows=4, index_col=0, sheet_name="N2O")
fgases = pd.read_excel("data/GHG_EmissionsData.xlsx", engine="openpyxl", skiprows=4, index_col=0, sheet_name="f-gases")
year = "2018"
for missing in ["NO", "NE", "NO,NA"]:
carbon.loc[carbon[year] == missing, year] = 0
nitrous.loc[nitrous[year] == missing, year] = 0
methane.loc[methane[year] == missing, year] = 0
carbon.fillna(0.0, inplace=True)
nitrous.fillna(0.0, inplace=True)
methane.fillna(0.0, inplace=True)
with open("data/structure_pretty.json", "r") as f:
structure = json.load(f)
with open("data/labels.json", "r") as f:
label_dict = json.load(f)
carbon_scaling = 1
nitrous_scaling = 298
methane_scaling = 25
# +
sources = []
targets = []
value = []
labels = []
def add_items(source, target):
if source not in labels:
labels.append(source)
if target not in labels:
labels.append(target)
sources.append(labels.index(source))
targets.append(labels.index(target))
for start in list(structure.keys()):
s = structure[start]
if type(s) is dict:
# This is the case for Energy as it has an extra level
for key, val in s.items():
add_items(start, key)
value.append(
carbon.loc[key, year] * carbon_scaling + nitrous.loc[key, year] * nitrous_scaling + methane.loc[key, year] * methane_scaling
)
if not val:
for t in ["CO2", "N2O", "CH4"]:
add_items(key, t)
if t == "CO2":
value.append(carbon.loc[key, year] * carbon_scaling)
elif t == "N2O":
value.append(nitrous.loc[key, year] * nitrous_scaling)
elif t == "CH4":
value.append(methane.loc[key, year] * methane_scaling)
for v in val:
value.append(
carbon.loc[v, year] * carbon_scaling + nitrous.loc[v, year] * nitrous_scaling + methane.loc[v, year] * methane_scaling
)
add_items(key, v)
# Final level
for t in ["CO2", "N2O", "CH4"]:
add_items(v, t)
if t == "CO2":
value.append(carbon.loc[v, year] * carbon_scaling)
elif t == "N2O":
value.append(nitrous.loc[v, year] * nitrous_scaling)
elif t == "CH4":
value.append(methane.loc[v, year] * methane_scaling)
# Add other fugitive emissions
value.append(fgases.loc[["Emissions of HFCs and PFCs - (kt CO2 equivalent) ", "Emissions of SF6 - (kt CO2 equivalent)"], year].sum())
add_items("fugitive1", "CO2")
else:
for val in s:
add_items(start, val)
value.append(
abs(carbon.loc[val, year]) * carbon_scaling + nitrous.loc[val, year] * nitrous_scaling + methane.loc[val, year] * methane_scaling
)
# Final level
for t in ["CO2", "N2O", "CH4"]:
add_items(val, t)
if t == "CO2":
value.append(abs(carbon.loc[val, year]) * carbon_scaling)
elif t == "N2O":
value.append(nitrous.loc[val, year] * nitrous_scaling)
elif t == "CH4":
value.append(methane.loc[val, year] * methane_scaling)
# -
flows = pd.DataFrame(data={
'source_id': sources,
'target_id': targets,
'source': [labels[s] for s in sources],
'target': [labels[t] for t in targets],
'value': value
})
with open("data/link_color.json", "r") as f:
link_colors = json.load(f)
with open("data/node_color.json", "r") as f:
node_colors= json.load(f)
with open("data/node_position.json", "r") as f:
node_positions= json.load(f)
flows["link_color"] = [link_colors[s] for s in flows.source]
# +
fig = go.Figure(data=[go.Sankey(
valueformat = ".1f",
arrangement="snap",
orientation="h",
node=dict(
pad=8,
thickness=15,
line=dict(color="black", width=0.5),
label=[label_dict[l] for l in labels],
color=[node_colors[l] for l in labels],
),
link=dict(
source=flows["source_id"],
target=flows["target_id"],
value=flows["value"],
color=flows["link_color"],
))])
fig.update_layout(title_text=f"Udledning af drivhusgasser i Danmark {year}", font_size=10)
fig.show(config={"responsive": True, "displayModeBar": False})
# -
fig.write_html("figures/sankey-fgases.html", config={"responsive": True, "displayModeBar": False})
fig.write_image("figures/sankey-fgases.svg", width=1920, height=1080, scale=1)
fig.write_image("figures/sankey-fgases.png", width=1920, height=1080, scale=1)
| Sankey - fgases.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Now You Code 1: Vote or Retire?
#
# Write a program to ask for your age as input, then output whether or not you can vote and whether your not you can retire. Let's assume the voting age is 18 or higher, and the retirement age is 65 or higher.
#
# **NOTE:** This program is making two seprate decisions, and thus should have two separate if else statements.
#
# Example Run:
#
# ```
# Enter your age: 45
# You can vote.
# You cannot retire.
# ```
#
# After you get that working, surround your code with `try... except` so that the program handles bad input:
#
# Example run:
#
# ```
# Enter your age: threve
# That's not an age!
# ```
#
# Start out the program by writing your initial todo list (without the `try...except`)
##TODO List
## input for entering age
## if statement to determince if age is over 18
## include statement for over 65 retiring
## create a statement for bad input
try:
##try handles the intial attempt at an input
##if a valid input is not given the except statement will print
age = int(input("Please enter your age: "))
print(("You are %d years old.") % (age))
if age < 18:
print("You are not old enough to vote.")
elif age >= 18:
print("You can vote!")
if age < 65:
print("You are not old enough to retire yet.")
##there was an issue with entering 65 where the line you can retire would not print
##for reasons unknow to me making this an if statement fixes it
elif age >= 65:
print("You can Retire!")
except ValueError:
print("Please enter a valid input for age!")
##ensures that a int value is entered for age
##if a string or float value is entered this print line will execute
| content/lessons/04/Now-You-Code/NYC1-Vote-Or-Retire.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Let's load up the Yelp review dataset,
# an array of JSON structures
# Grab the data and progress bar
# We only need to do this once.
# !pip install bokeh
import codecs
from io import open
# !wget https://storage.googleapis.com/aai17/yelp_dataset.tar
# !tar xfvz yelp_dataset.tar
# !mv dataset/review.json yelp_reviews.json
# -
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.plotting import figure, show, output_file
from bokeh.io import output_notebook
output_notebook()
# +
from tqdm import tqdm
from collections import Counter
from datetime import datetime
import json
t1 = datetime.now()
print("Loading...")
with open("yelp_reviews.json", "r", encoding="utf-8") as f:
reviews = f.read().strip().split("\n")
reviews = [json.loads(review) for review in tqdm(reviews)]
print("Loaded ", len(reviews), "reviews in ", datetime.now() - t1)
print(reviews[0]['text'], "\n\nRating: ", reviews[0]['stars'],"stars")
# +
# choose a random subset of reviews
count = 100000
import numpy as np
import re
np.random.seed(1)
positive = []
negative = []
all_reviews = np.array(reviews)
np.random.shuffle(all_reviews)
notalpha = re.compile('[^a-zA-Z ]')
def tokenize(text):
return notalpha.sub('',text).lower().strip()
for review in tqdm(all_reviews):
neg = review['stars'] < 3
pos = review['stars'] > 3
text = tokenize(review['text'])
if neg and len(negative) < count:
negative.append(text)
elif pos and len(positive) < count:
positive.append(text)
if len(negative) >= count and len(positive) >= count:
break
print("Selected ",len(positive),"positive and",len(negative),"negative reviews")
# -
# clean up memory
print("Please stand by...")
reviews = []
all_reviews = []
print("Memory cleared")
# +
from collections import Counter
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
pos_neg_ratios = Counter()
polarity_cutoff = 0.2
min_count = 50
print("Gathering positive words")
for review in tqdm(positive):
for word in review.split(" "):
positive_counts[word] += 1
total_counts[word] += 1
print("Gathering negative words")
for review in tqdm(negative):
for word in review.split(" "):
negative_counts[word] += 1
total_counts[word] += 1
print("Creating influence ratios of frequent words")
for term,cnt in list(total_counts.most_common()):
if(cnt >= min_count):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio + 0.01)))
print("Creating training vocabulary")
review_vocab = set()
for word,p in pos_neg_ratios.most_common():
if (p >= polarity_cutoff or p <= -polarity_cutoff):
review_vocab.add(word)
print("Found",len(review_vocab),"words")
# +
from bokeh.layouts import gridplot
from bokeh.plotting import figure, show, output_file
p1 = figure(title="Discriminating Word Distribution",tools="save",
background_fill_color="#E8DDCB")
measured = []
for word in review_vocab:
measured.append(pos_neg_ratios[word])
measured = np.array(measured)
hist, edges = np.histogram(measured, density=True, bins=50)
p1.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649")
p1.legend.location = "center_right"
p1.legend.background_fill_color = "darkgrey"
p1.xaxis.axis_label = 'log(pos/neg ratio)'
p1.yaxis.axis_label = 'relative count'
p2 = figure(title="Raw Word Distribution",tools="save",
background_fill_color="#E8DDCB")
measured = []
for word in total_counts:
measured.append(total_counts[word])
measured = np.array(measured)
hist, edges = np.histogram(measured, density=True, bins=50)
p2.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649")
p2.legend.location = "center_right"
p2.legend.background_fill_color = "darkgrey"
p2.xaxis.axis_label = 'word frequency'
p2.yaxis.axis_label = 'relative count'
p3 = figure(title="Min Count Word Distribution",tools="save",
background_fill_color="#E8DDCB")
measured = []
for word in total_counts:
c = total_counts[word]
if c > min_count:
measured.append(c)
measured = np.array(measured)
hist, edges = np.histogram(measured, density=True, bins=50)
p3.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649")
p3.legend.location = "center_right"
p3.legend.background_fill_color = "darkgrey"
p3.xaxis.axis_label = 'word frequency (above cutoff)'
p3.yaxis.axis_label = 'relative count'
p4 = figure(title="Pos/Neg Distribution",tools="save",
background_fill_color="#E8DDCB")
measured = []
for term,cnt in list(total_counts.most_common()):
if(cnt >= min_count):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
measured.append(pos_neg_ratio)
measured = np.array(measured)
hist, edges = np.histogram(measured, density=True, bins=50)
p4.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649")
p4.legend.location = "center_right"
p4.legend.background_fill_color = "darkgrey"
p4.xaxis.axis_label = 'pos/neg ratio'
p4.yaxis.axis_label = 'relative count'
show(gridplot(p2,p3,p4,p1, ncols=2, plot_width=400, plot_height=400, toolbar_location=None))
# +
# create mappings from words to numbers and vice versa
word2index = {}
index2word = {}
for i, word in enumerate(review_vocab):
word2index[word] = i
index2word[i] = word
n = len(review_vocab)
samples = len(positive)+len(negative)
all_words = word2index.keys()
# encode 1-hot reviews
x = np.zeros((samples, n))
y = np.zeros((samples,2))
idx = -1
print("Creating 1-hot positive encodings")
for review in tqdm(positive):
idx += 1
for word in review.split(" "):
if word in all_words:
x[idx, word2index[word]] = 1
y[idx,0] = 1
print("Creating 1-hot negative encodings")
for review in tqdm(negative):
idx += 1
for word in review.split(" "):
if word in all_words:
x[idx, word2index[word]] = 1
y[idx,1] = 1
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=42)
# +
import tensorflow as tf
# We'll bundle groups of examples during training for efficiency.
# This defines the size of the batch.
BATCH_SIZE = 100
VOCAB_SIZE = len(review_vocab)
EMBEDDING_SIZE = 64
NUM_LABELS = 2
NUM_GPUS = 2
LEARNING_RATE = 0.0005
DISPLAY_STEP = 100
NUM_STEPS = 2000
# The random seed that defines initialization.
SEED = 42
def model(x, prefix='model', reuse=True, is_training=True):
# Define a scope for reusing the variables
with tf.variable_scope('Model', reuse=reuse):
nn = tf.layers.dense(x, EMBEDDING_SIZE, activation=tf.nn.sigmoid, name=prefix+'_embedding')
nn = tf.layers.dense(nn, NUM_LABELS, activation=tf.nn.sigmoid, name=prefix+'_logits')
# We only apply need to apply softmax to testing network
out = tf.nn.softmax(nn) if not is_training else nn
return out
print('Done')
# +
# Build the function to average the gradients
def average_gradients(tower_grads):
average_grads = []
# print(tower_grads)
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
print('Done')
# +
# wire everything up
# By default, all variables will be placed on '/gpu:0'
# So we need a custom device function, to assign all variables to '/cpu:0'
# Note: If GPUs are peered, '/gpu:0' can be a faster option
PS_OPS = ['Variable', 'VariableV2', 'AutoReloadVariable']
def assign_to_device(device, ps_device='/cpu:0'):
def _assign(op):
node_def = op if isinstance(op, tf.NodeDef) else op.node_def
if node_def.op in PS_OPS:
return "/" + ps_device
else:
return device
return _assign
print('Done')
# +
import time
tf.reset_default_graph()
NUM_STEPS=1000
# Place all ops on CPU by default
with tf.device('/cpu:0'):
tower_grads = []
reuse_vars = False
#reuse_vars = True
num_samples = X_train.shape[0]
# tf Graph input
X = tf.placeholder(tf.float32, [None, VOCAB_SIZE])
Y = tf.placeholder(tf.float32, [None, NUM_LABELS])
# Loop over all GPUs and construct their own computation graph
for i in range(NUM_GPUS):
with tf.device(assign_to_device('/gpu:{}'.format(i), ps_device='/cpu:0')):
# Split data between GPUs
_x = X[i * BATCH_SIZE: (i+1) * BATCH_SIZE]
_y = Y[i * BATCH_SIZE: (i+1) * BATCH_SIZE]
# Because Dropout have different behavior at training and prediction time, we
# need to create 2 distinct computation graphs that share the same weights.
# Create a graph for training
logits_train = model(_x, reuse=reuse_vars, prefix="yelp")
# Create another graph for testing that reuse the same weights
logits_test = model(_x, reuse=True, prefix="yelp", is_training=False)
# Define loss and optimizer (with train logits, for dropout to take effect)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits_train, labels=_y))
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
grads = optimizer.compute_gradients(loss_op)
print("GPU",i,"configured")
# Only first GPU compute accuracy
if i == 0:
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.argmax(_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
reuse_vars = True
tower_grads.append(grads)
tower_grads = average_gradients(tower_grads)
train_op = optimizer.apply_gradients(tower_grads)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
for step in range(1, NUM_STEPS + 1):
# Get a batch for each GPU
indices = np.random.choice(num_samples, BATCH_SIZE*NUM_GPUS)
batch_x = X_train[indices]
batch_y = y_train[indices]
# Run optimization op (backprop)
ts = time.time()
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
te = time.time() - ts
if step % DISPLAY_STEP == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print("Step " + str(step) + ": Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc) + ", %i Examples/sec" % int(len(batch_x)/te))
step += 1
print("Optimization Finished!")
graph = tf.get_default_graph()
t1 = graph.get_tensor_by_name('Model/yelp_embedding/kernel:0')
embeddings = np.array(sess.run(t1))
# Calculate accuracy for 1000 mnist test images
print("Testing Accuracy:", \
np.mean([sess.run(accuracy, feed_dict={X: X_test[i:i+BATCH_SIZE],
Y: y_test[i:i+BATCH_SIZE]}) for i in range(0, X_test.shape[0], BATCH_SIZE)]))
# +
def get_most_similar_words(focus = "love"):
keys = word2index.keys()
if focus not in keys:
print("Sorry, word not found")
return
most_similar = Counter()
for word in word2index.keys():
most_similar[word] = np.dot(embeddings[word2index[word]],
embeddings[word2index[focus]])
return most_similar.most_common()[0:10]
get_most_similar_words('yummy')
# +
import matplotlib.colors as colors
words_to_visualize = list()
for word, ratio in pos_neg_ratios.most_common(500):
if(word in word2index.keys()):
words_to_visualize.append(word)
for word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]:
if(word in word2index.keys()):
words_to_visualize.append(word)
# +
pos = 0
neg = 0
colors_list = list()
vectors_list = list()
for word in words_to_visualize:
if word in pos_neg_ratios.keys():
vectors_list.append(embeddings[word2index[word]])
if(pos_neg_ratios[word] > 0):
pos+=1
colors_list.append("#00ff00")
else:
neg+=1
colors_list.append("#000000")
print("Pos",pos,"neg",neg)
# -
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
words_top_ted_tsne = tsne.fit_transform(vectors_list)
print("TSNE visualization ready")
# +
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="vector T-SNE for most polarized words")
source = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0],
x2=words_top_ted_tsne[:,1],
names=words_to_visualize,
color=colors_list))
p.scatter(x="x1", y="x2", size=8, source=source, fill_color="color")
word_labels = LabelSet(x="x1", y="x2", text="names", y_offset=6,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
p.add_layout(word_labels)
show(p)
# -
| demos/yelp_demo/notebooks/yelp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ___
#
# <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
# ___
# # MNIST Multi-Layer Perceptron
#
# In this lecture we will build out a Multi Layer Perceptron model to try to classify hand written digits using TensorFlow (a very famous example).
#
# Keep in mind that no single lecture (or course!) can cover the vastness that is Deep Learning, I would highly suggest reading MIT's [Deep Learning](http://www.deeplearningbook.org/) textbook for more information on these topics!
# ## Get the Data
#
# We will be using the famous MNIST data set of [handwritten digits](http://yann.lecun.com/exdb/mnist/).
#
# The images which we will be working with are black and white images of size 28 x 28 pixels, or 784 pixels total. Our features will be the pixel values for each pixel. Either the pixel is "white" (blank with a 0), or there is some pixel value.
#
# We will try to correctly predict what number is written down based solely on the image data in the form of an array. This type of problem (Image Recognition) is a great use case for Deep Learning Methods!
#
# This data is to Deep Learning what the iris data set is to typical machine learning algorithms.
#
# Let's get the data:
# +
import tensorflow as tf
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# -
# ### Data Format
#
# The data is stored in a vector format, although the original data was a 2-dimensional matirx with values representing how much pigment was at a certain location. Let's explore this:
type(mnist)
type(mnist.train.images)
#mnist.train.images[0]
mnist.train.images[2].shape
sample = mnist.train.images[2].reshape(28,28)
import matplotlib.pyplot as plt
# %matplotlib inline
plt.imshow(sample)
# ## Parameters
#
# We'll need to define 4 parameters, it is really (really) hard to know what good parameter values are on a data set for which you have no experience with, however since MNIST is pretty famous, we have some reasonable values for our data below. The parameters here are:
#
# * Learning Rate - How quickly to adjust the cost function.
# * Training Epochs - How many training cycles to go through
# * Batch Size - Size of the 'batches' of training data
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
# ### Network Parameters
#
# Here we have parameters which will directly define our Neural Network, these would be adjusted depending on what your data looked like and what kind of a net you would want to build. Basically just some numbers we will eventually use to define some variables later on in our model:
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
n_samples = mnist.train.num_examples
# ### TensorFlow Graph Input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# ## MultiLayer Model
#
# It is time to create our model, let's review what we want to create here.
#
# First we receive the input data array and then to send it to the first hidden layer. Then the data will begin to have a weight attached to it between layers (remember this is initially a random value) and then sent to a node to undergo an activation function (along with a Bias as mentioned in the lecture). Then it will continue on to the next hidden layer, and so on until the final output layer. In our case, we will just use two hidden layers, the more you use the longer the model will take to run (but it has more of an opportunity to possibly be more accurate on the training data).
#
# Once the transformed "data" has reached the output layer we need to evaluate it. Here we will use a loss function (also called a cost function) to evaluate how far off we are from the desired result. In this case, how many of the classes we got correct.
#
# Then we will apply an optimization function to minimize the cost (lower the error). This is done by adjusting weight values accordingly across the network. In out example, we will use the [Adam Optimizer](http://arxiv.org/pdf/1412.6980v8.pdf), which keep in mind, relative to other mathematical concepts, is an extremely recent development.
#
# We can adjust how quickly to apply this optimization by changing our earlier learning rate parameter. The lower the rate the higher the possibility for accurate training results, but that comes at the cost of having to wait (physical time wise) for the results. Of course, after a certain point there is no benefit to lower the learning rate.
#
# Now we will create our model, we'll start with 2 hidden layers, which use the [RELU](https://en.wikipedia.org/wiki/Rectifier_(neural_networks) activation function, which is a very simple rectifier function which essentially either returns x or zero. For our final output layer we will use a linear activation with matrix multiplication:
def multilayer_perceptron(x, weights, biases):
'''
x : Place Holder for Data Input
weights: Dictionary of weights
biases: Dicitionary of biases
'''
# First Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Second Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Last Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# ### Weights and Bias
#
# In order for our tensorflow model to work we need to create two dictionaries containing our weight and bias objects for the model. We can use the **tf.variable** object type. This is different from a constant because TensorFlow's Graph Object becomes aware of the states of all the variables. A Variable is a modifiable tensor that lives in TensorFlow's graph of interacting operations. It can be used and even modified by the computation. We will generally have the model parameters be Variables. From the documentation string:
#
# A variable maintains state in the graph across calls to `run()`. You add a variable to the graph by constructing an instance of the class `Variable`.
#
# The `Variable()` constructor requires an initial value for the variable, which can be a `Tensor` of any type and shape. The initial value defines the type and shape of the variable. After construction, the type and shape of the variable are fixed. The value can be changed using one of the assign methods.
#
# We'll use tf's built-in random_normal method to create the random values for our weights and biases (you could also just pass ones as the initial biases).
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# ## Cost and Optimization Functions
#
# We'll use Tensorflow's built-in functions for this part (check out the documentation for a lot more options and discussion on this):
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# #### Initialization of Variables
#
# Now initialize all those tf.Variable objects we created earlier. This will be the first thing we run when training our model:
# Initializing the variables
init = tf.initialize_all_variables()
# # Training the Model
#
# ### next_batch()
#
# Before we get started I want to cover one more convenience function in our mnist data object called next_batch. This returns a tuple in the form (X,y) with an array of the data and a y array indicating the class in the form of a binary array. For example:
Xsamp,ysamp = mnist.train.next_batch(1)
plt.imshow(Xsamp.reshape(28,28))
# Remember indexing starts at zero!
print(ysamp)
# ## Running the Session
# Now it is time to run our session! Pay attention to how we have two loops, the outer loop which runs the epochs, and the inner loop which runs the batches for each epoch of training. Let's breakdown each step!
# +
# Launch the session
sess = tf.InteractiveSession()
# Intialize all the variables
sess.run(init)
# Training Epochs
# Essentially the max amount of loops possible before we stop
# May stop earlier if cost/loss limit was set
for epoch in range(training_epochs):
# Start with cost = 0.0
avg_cost = 0.0
# Convert total number of batches to integer
total_batch = int(n_samples/batch_size)
# Loop over all batches
for i in range(total_batch):
# Grab the next batch of training data and labels
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Feed dictionary for optimization and loss value
# Returns a tuple, but we only need 'c' the cost
# So we set an underscore as a "throwaway"
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
# Compute average loss
avg_cost += c / total_batch
print("Epoch: {} cost={:.4f}".format(epoch+1,avg_cost))
print("Model has completed {} Epochs of Training".format(training_epochs))
# -
# ## Model Evaluations
#
# Tensorflow comes with some built-in functions to help evaluate our model, including tf.equal and tf.cast with tf.reduce_mean.
#
# **tf.equal()**
#
# This is essentially just a check of predictions == y_test. In our case since we know the format of the labels is a 1 in an array of zeroes, we can compare argmax() location of that 1. Remember that **y** here is still that placeholder we created at the very beginning, we will perform a series of operations to get a Tensor that we can eventually fill in the test data for with an evaluation method. What we are currently running will still be empty of test data:
# Test model
correct_predictions = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
print(correct_predictions[0])
# In order to get a numerical value for our predictions we will need to use tf.cast to cast the Tensor of booleans back into a Tensor of Floating point values in order to take the mean of it.
correct_predictions = tf.cast(correct_predictions, "float")
print(correct_predictions[0])
# Now we use the tf.reduce_mean function in order to grab the mean of the elements across the tensor.
accuracy = tf.reduce_mean(correct_predictions)
type(accuracy)
# This may seem a little strange, but this accuracy is still a Tensor object. Remember that we still need to pass in our actual test data! Now we can call the MNIST test labels and images and evaluate our accuracy!
mnist.test.labels
mnist.test.images
# The eval() method allows you to directly evaluates this tensor in a `Session` without needing to call tf.sess():mm
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
# 94% not too shabby! But this actually isn't anywhere near as good as it could be. Running for more training epochs with this data (around 20,000) can produce accuracy around 99%. But we won't do that here because that will take a very long time to run!
# # Great Job!
#
# ### Extra Credit: See what happens if you try to make this model again with more layers!
| udemy_ml_bootcamp/Deep Learning/MNIST with Multi-Layer Perceptron.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + tags=[]
raw_data = """byr:1985
eyr:2021 iyr:2011 hgt:175cm pid:163069444 hcl:#18171d
eyr:2023
hcl:#cfa07d ecl:blu hgt:169cm pid:494407412 byr:1936
ecl:zzz
eyr:2036 hgt:109 hcl:#623a2f iyr:1997 byr:2029
cid:169 pid:170290956
hcl:#18171d ecl:oth
pid:266824158 hgt:168cm byr:1992 eyr:2021
byr:1932 ecl:hzl pid:284313291 iyr:2017 hcl:#efcc98
eyr:2024 hgt:184cm
iyr:2017 pid:359621042
cid:239 eyr:2025 ecl:blu byr:1986 hgt:188cm
eyr:2027 hgt:185cm hcl:#373b34 pid:807766874 iyr:2015 byr:1955
ecl:hzl
iyr:2017 hcl:#7d3b0c hgt:174cm
byr:1942 eyr:2025 ecl:blu pid:424955675
eyr:2026 byr:1950 hcl:#ceb3a1
hgt:182cm
iyr:2016 pid:440353084 ecl:amb
hcl:a4c546
iyr:1932 pid:156cm eyr:2034 hgt:193 ecl:zzz byr:2025
hcl:#ceb3a1 eyr:2020 pid:348696077 hgt:163cm
ecl:hzl
byr:1921 iyr:2016
ecl:gmt eyr:2031 iyr:2018 byr:1971 hgt:152in pid:454492414
hcl:z
hcl:#341e13 byr:1921 iyr:2020
pid:072379782 eyr:2022 hgt:166cm cid:253 ecl:brn
ecl:blu hgt:75in byr:1954 eyr:2026 iyr:2012 hcl:#623a2f pid:328598886
byr:2004 eyr:2035 hcl:#7d3b0c pid:359128744 iyr:2020 hgt:65cm
ecl:#70f23f
eyr:1988
pid:171cm byr:2003
iyr:1984
cid:50
hcl:z hgt:66cm ecl:#7a4c6e
pid:9632440323 eyr:1964 hgt:63cm
ecl:#fab0c5 hcl:z iyr:1945 byr:1986
pid:936403762 ecl:#337357 byr:1997
cid:196 iyr:2020
eyr:2030 hgt:165cm
hcl:#7d3b0c
byr:1931 pid:488791624 hgt:169cm ecl:blu
eyr:2029 hcl:#fffffd iyr:2013
hcl:#733820 hgt:76in pid:517689823
eyr:2028 byr:1988
ecl:brn iyr:2016
eyr:2023 hcl:#fffffd hgt:190cm iyr:2015 ecl:brn pid:739536900 byr:1951
ecl:brn
byr:1986 cid:262 hcl:#efcc98 pid:880203213 hgt:185cm iyr:2018 eyr:2029
pid:181cm hgt:113 hcl:z ecl:#2c2d2c iyr:1961 byr:2021 eyr:2031
hcl:#ceb3a1 iyr:2020
byr:1977
hgt:192cm
pid:338237458 eyr:2030 ecl:amb
iyr:1953 byr:2025 hgt:66cm eyr:1932
pid:181cm
ecl:#6f0b15 hcl:f79cb7
cid:109
hcl:#6b5442 pid:164cm ecl:blu
hgt:176cm byr:2015
iyr:2010 eyr:2029
eyr:2035
pid:085002665 ecl:#f88074 iyr:2018 hcl:#602927
hgt:169cm
byr:1958
hcl:z
pid:0468194841 iyr:2016 eyr:2007
hgt:152cm
ecl:#1c7a89 cid:124
hcl:z pid:233430735 byr:2021 eyr:2026
iyr:1953 ecl:#64769d hgt:184
hgt:70cm pid:156397147
iyr:2014 ecl:#d6ada0
byr:2030
hcl:#cfa07d
ecl:amb
byr:1990
iyr:2017 hgt:164cm hcl:10f33a
cid:293 eyr:2020 pid:332276985
pid:163252726 eyr:2026
hgt:163cm
iyr:2011 hcl:#efcc98
ecl:hzl byr:1936
hgt:157cm iyr:2019 pid:078770050 hcl:#efcc98 byr:1967 eyr:2030
ecl:gry cid:190
hgt:184cm ecl:amb pid:851379559 hcl:#ceb3a1 byr:1946 eyr:2022
iyr:2017 cid:280
hgt:171cm byr:1942 pid:830156471 hcl:#cfa07d ecl:gry eyr:2032
iyr:2022
byr:2013 ecl:#67cbe8 eyr:2024
pid:242908367
hgt:76cm
iyr:2025
hcl:796bda
ecl:amb iyr:2019
byr:1945 eyr:2021 hcl:#602927 pid:550065206
hgt:72in ecl:brn byr:1956 pid:253685193 iyr:2017 eyr:2023
hcl:#6b5442
eyr:2032 iyr:2019
hgt:176cm
ecl:oth pid:800237895 hcl:#888785 byr:1979
eyr:2026 iyr:2020 cid:226 pid:882830512
hcl:#866857 byr:1929 ecl:amb
hgt:60in
hcl:#cfa07d ecl:oth
iyr:2015 pid:807837948 byr:1966 eyr:2030 hgt:191in
byr:1969 iyr:2012 eyr:2024
cid:244 ecl:hzl hcl:#18171d pid:344160556
eyr:2020 pid:718422803
hcl:#18171d
hgt:181cm
byr:1925 ecl:amb
iyr:2019
byr:1943 pid:740807220 hgt:72in ecl:amb
iyr:2013 eyr:2022
hcl:#cfa07d
hcl:#733820
byr:1986 iyr:2016 hgt:184cm cid:333
pid:768188726 ecl:oth eyr:2030
eyr:2022 byr:1996 hcl:#341e13 ecl:hzl iyr:2015 hgt:160cm
pid:516401532
hgt:182cm ecl:grn pid:336742028 iyr:2014 hcl:#34f021 byr:1967
eyr:2029
byr:2030
hgt:142 iyr:2029 eyr:2040 hcl:426fc5
cid:312
pid:169cm
ecl:#069ff7
hgt:169cm ecl:gry hcl:#6b5442 iyr:2012 byr:1949 pid:131835020 eyr:2022
hgt:70cm iyr:2012
eyr:2037
hcl:64fd76
cid:175 pid:4880649770 ecl:grn byr:2029
iyr:2013 hcl:#7d3b0c eyr:2024 hgt:190cm pid:659772377 cid:226 ecl:oth byr:1958
ecl:lzr hgt:163cm pid:013605217
byr:2000
eyr:2020
hcl:z iyr:2024
cid:131 pid:896076106
hcl:#c0946f byr:1930
hgt:162cm eyr:2023 ecl:oth iyr:2017
byr:1935 iyr:2012
pid:942509879
ecl:amb
hgt:185cm cid:152 eyr:2024 hcl:#866857
ecl:#e490a3 hcl:4813a2 hgt:176cm pid:778369210 iyr:2020
eyr:2035 byr:2020
byr:2006 ecl:amb pid:148409219
hgt:189cm
eyr:2021 hcl:z iyr:2028
hgt:188in hcl:#9ed525
iyr:2018 ecl:grn eyr:2021
pid:065515632 byr:2012
cid:109 hgt:167cm
pid:545112664 ecl:grn hcl:#a62fea eyr:2026
iyr:2012
byr:1921
pid:174997024
iyr:2012
eyr:2030
ecl:grn
hgt:150cm
byr:1997
hcl:#866857
pid:451921339
hgt:181cm
hcl:#888785 iyr:2017 eyr:2026 byr:1936
ecl:hzl
hgt:187in
hcl:#866857 ecl:grn pid:623919686 eyr:2028 iyr:2011
byr:2016
byr:2001
ecl:gry eyr:2023 pid:324948416
hcl:ef16f8 cid:139 hgt:184in iyr:2026
byr:1954 hcl:#341e13 eyr:2023 pid:129321944 iyr:2012
hgt:183cm
ecl:amb
hgt:164cm pid:596870080
ecl:hzl eyr:2021 iyr:2017 hcl:#a97842
byr:1951
iyr:2013 byr:1944 hcl:#cfa07d
hgt:168cm cid:72 pid:160531632
ecl:grn
iyr:2012 pid:900043442 hcl:#ceb3a1 cid:124 byr:1941
ecl:blu hgt:156cm
eyr:2025
eyr:2021 hgt:61in iyr:2020 ecl:grn byr:1933
byr:1971 cid:175
eyr:2028 hcl:#efcc98 iyr:2013 hgt:170cm
pid:225213589
pid:147112660 hcl:#ceb3a1 eyr:2029 hgt:159cm ecl:grn iyr:2014
byr:1967
iyr:2015 pid:502975636 hgt:71in byr:1994
hcl:#18171d ecl:amb eyr:2029
byr:1948 hcl:#b6652a hgt:171in pid:181cm iyr:2019 ecl:grt cid:87
pid:859849571 ecl:amb hcl:#6b5442
hgt:193cm byr:1980
iyr:2017
eyr:2020
cid:125 pid:508147848
hcl:06ea75 iyr:1997 byr:2010 ecl:#c707f7 eyr:1970 hgt:161
eyr:2020 cid:326 byr:1989 ecl:gry hgt:160cm hcl:#cc080c pid:319135853 iyr:2010
ecl:utc
pid:531595917 hgt:180cm byr:1987
eyr:2024 hcl:#cfa07d iyr:2025
ecl:gry byr:2007
eyr:2028
iyr:2025
pid:6072964414 hgt:59cm hcl:#888785
pid:791025828 ecl:hzl hgt:178cm
iyr:2017
hcl:#733820
byr:1960 eyr:2021 cid:66
byr:1991 iyr:1934
cid:304 hgt:183cm ecl:grn
pid:408294229
eyr:2027 hcl:#623a2f
ecl:blu hgt:181cm eyr:2024 iyr:2010
pid:633234602 hcl:#2ce009
byr:1985
hcl:#c0946f hgt:192cm
iyr:2012 pid:120684397 ecl:grn eyr:2027
byr:1974
eyr:2026
pid:068304960 hgt:190cm byr:1925 iyr:2020 ecl:oth
hcl:#733820
hgt:168cm cid:307 iyr:2014 byr:1981 ecl:hzl pid:898831724 eyr:2026
hgt:73cm
eyr:2038
byr:1980 ecl:gry iyr:2027 pid:678846912 hcl:z
hgt:150cm cid:261 eyr:2021
hcl:z pid:159cm iyr:2014 ecl:hzl
byr:1955
pid:#172650 ecl:gry eyr:2040 hcl:z iyr:2013 hgt:169cm byr:2008 cid:290
iyr:2017 byr:1998
hcl:#ceb3a1 pid:274178898 eyr:2027 ecl:brn
hgt:183cm
eyr:2024 cid:183 ecl:grn
byr:1946
hgt:63in hcl:#6b5442 iyr:2017
hgt:97 byr:1990
iyr:2019
ecl:grn
pid:587580330
hcl:#341e13 eyr:2022
ecl:oth
pid:441517075 hcl:#c0946f iyr:2015 hgt:188cm eyr:2024 byr:1920
hgt:191in pid:185cm iyr:1993
hcl:93033d
eyr:2034 ecl:dne
pid:591478424 ecl:grn hcl:#888785
byr:1929 eyr:2023 hgt:173cm iyr:2017
iyr:1954
hgt:63cm
hcl:bdf2e0 ecl:amb pid:#912f46
byr:1956 iyr:2012 hgt:73in pid:986643426
ecl:blu
cid:235 eyr:2025
hcl:#cfa07d
cid:320 byr:1930
hgt:172cm
ecl:oth eyr:2024 iyr:2019
byr:1935 hgt:182cm pid:22794407 hcl:1b96fb eyr:1961 iyr:1941 ecl:#5e80cd cid:70
iyr:2020 eyr:2021
ecl:amb
hgt:59in pid:594829025 hcl:#93092e
byr:1976
hcl:#a97842 eyr:2030
byr:1937 iyr:2018 cid:295 ecl:oth
hgt:166cm pid:282634012
hgt:171cm hcl:#623a2f byr:1956
pid:068178613 cid:214
iyr:2012 eyr:2026 ecl:brn
byr:1921
hgt:161cm hcl:#888785
ecl:brn pid:010348794
eyr:2023 iyr:2011
hcl:#a97842 iyr:2010
byr:1955 eyr:2024
pid:473791166
ecl:brn
hgt:175cm
eyr:2028 ecl:grn pid:186196675 byr:1945 hgt:155cm cid:349
iyr:2011 hcl:#6b5442
hgt:161cm eyr:2030 cid:221
pid:994494879 hcl:#733820 iyr:2012 ecl:blu
byr:1957
eyr:1993 iyr:2022 hcl:z byr:2020 pid:013428192 hgt:62cm
ecl:dne
hgt:178cm eyr:2029 hcl:#733820 byr:1962 iyr:2017 ecl:blu pid:567713232
hcl:#fffffd
byr:1928 pid:390162554
eyr:2030 cid:79 hgt:150cm ecl:amb iyr:2019
eyr:2030 cid:320 hgt:171cm hcl:#888785 pid:540720799 ecl:amb iyr:2012 byr:1979
byr:1921
ecl:oth pid:204986110 eyr:2023 hgt:154cm iyr:2017 hcl:#341e13 cid:126
eyr:2020 cid:175 ecl:dne byr:1983 iyr:2016 hcl:#c0946f hgt:65cm
hgt:191cm
iyr:2010 cid:295 byr:1984 eyr:2025 hcl:#cfa07d pid:799775698
ecl:amb
iyr:2020 cid:278 hcl:#c0946f byr:1970 pid:773144393 eyr:2024 hgt:180cm
hgt:176cm
byr:1963
pid:252396293 iyr:2012 ecl:brn hcl:#ceb3a1
eyr:2030
pid:545130492
byr:2030 iyr:2020
hgt:190cm eyr:2034 ecl:blu hcl:#fffffd
hcl:#a97842 pid:032201787 hgt:190cm ecl:gry
eyr:2028 iyr:2012 byr:1994
hcl:#a97842 pid:064591809
ecl:hzl byr:1927 hgt:165cm
iyr:2011
eyr:2028
cid:77
byr:2005
hgt:125 iyr:1923 ecl:#605d73
eyr:2022 pid:90184674 hcl:z
cid:301 pid:106820988
iyr:2018
hcl:#cfa07d eyr:2029
byr:1993
hgt:193cm ecl:grn
hcl:#623a2f
cid:118
ecl:oth pid:75827285
hgt:189cm iyr:2010
eyr:2030 byr:1976
ecl:blu iyr:2023 eyr:1996
hgt:66cm cid:251 byr:1972 hcl:z
pid:557774244
byr:2002
hgt:169cm pid:629420566 eyr:2026 ecl:grn hcl:#341e13
cid:166 iyr:2019
iyr:2026 hcl:9b83a1 eyr:1979
ecl:dne hgt:111 pid:176cm
pid:#89718c byr:2026
hcl:2ca5c7 hgt:142 eyr:2040
ecl:lzr iyr:2029
ecl:grn
byr:2022 eyr:2020
pid:7024869 hgt:123 iyr:2019 hcl:z
hcl:#733820 hgt:155cm ecl:grn iyr:2020 byr:1955 eyr:2028
pid:217362007
hcl:#18171d ecl:gry
byr:1971 hgt:193cm
eyr:2020
pid:352009857 iyr:2013
byr:2018
hgt:175in ecl:xry iyr:2015
eyr:2036
cid:171 pid:6132398 hcl:#efcc98
pid:839955293
byr:1928 hcl:#fffffd ecl:hzl iyr:2011
hgt:162cm eyr:2023
hgt:175cm pid:482827478 eyr:2028
hcl:#6b5442 ecl:blu byr:1932 iyr:2010
iyr:2020 hcl:#866857
ecl:brn byr:1933 cid:269 pid:003931873 hgt:188cm
eyr:2022
byr:1981 hcl:#fffffd hgt:160cm cid:311 ecl:brn eyr:2025
pid:930857758 iyr:2014
hcl:#cfa07d hgt:73in
ecl:gry
pid:383281251
iyr:2013 byr:1934 eyr:2026
byr:1988 eyr:2026 pid:458002476
iyr:2017
hgt:175cm ecl:amb
eyr:1987
byr:2020 pid:299341304
hcl:#341e13 iyr:1935 cid:125
hgt:168cm
ecl:gry
iyr:2014 hcl:#b6652a pid:445799347
hgt:188cm byr:1960
eyr:2030 cid:290 ecl:amb
eyr:2023
hgt:75cm hcl:#733820 cid:195 byr:1933
ecl:amb pid:062770586 iyr:2019
hgt:168cm
eyr:2021
pid:725299968 ecl:grn byr:1932
iyr:2016 hcl:#888785
hgt:161cm hcl:#ceb3a1 byr:1962 eyr:2026 iyr:2013 ecl:amb pid:695426469 cid:227
ecl:dne hcl:#ceb3a1 iyr:2013 eyr:2022
pid:434786988 byr:1956 hgt:183cm
pid:697500517
byr:1968 hgt:169cm hcl:#fffffd ecl:grn cid:143
iyr:2010
eyr:2027
byr:2029 ecl:amb hgt:175in iyr:2015 hcl:#ceb3a1
pid:39839448
eyr:2021 cid:105
pid:0985607981 ecl:hzl iyr:2012
eyr:2021 byr:2024 hcl:5cad22
hgt:190cm
hcl:#b6652a hgt:178cm cid:222 byr:1992 ecl:grn
iyr:2011 pid:419544742
iyr:2019 byr:1960 ecl:hzl eyr:2021 hgt:184cm cid:66 hcl:#866857 pid:412920622
eyr:2025 hcl:#888785 iyr:2018 byr:1956 pid:698098389 ecl:grn hgt:173cm
ecl:blu byr:1935
pid:354892542 hgt:161cm
iyr:2018
eyr:2021 hcl:#b6652a
ecl:oth cid:287 iyr:2028 byr:1953 eyr:2027 hcl:#7d3b0c hgt:151cm
pid:211411839
iyr:2018 byr:1934 hcl:#a97842
pid:859748861
ecl:oth hgt:175cm eyr:2025
byr:1930 iyr:2018 eyr:2022
hgt:175cm
hcl:#292092
ecl:brn pid:987163365
hgt:167in hcl:#888785 eyr:2040 pid:4646402867 byr:2013 iyr:1941 ecl:#389aec
ecl:hzl hcl:#602927
hgt:168cm eyr:2026
cid:235 iyr:2016
byr:1942
iyr:1975 pid:11337832 ecl:#a25273 hgt:151 byr:2017
eyr:1979
hgt:71cm
byr:2003 hcl:7e7da7 pid:151cm ecl:#a8afb3 iyr:1937
eyr:2021 hgt:74in hcl:#cfa07d iyr:2014 byr:1932
pid:641867677 ecl:grn
ecl:gry hgt:185cm pid:556229206 iyr:2013
byr:1984
hcl:#fffffd eyr:2028
eyr:2020 byr:1989
ecl:grn pid:618876158 hcl:z
hgt:176cm iyr:2025
eyr:2025 byr:2001 hcl:#cdb7f9
pid:377402126 ecl:hzl hgt:184cm iyr:2019
byr:1939 hgt:180cm eyr:2029 ecl:oth hcl:#733820 iyr:2016
pid:733456875
pid:883743276
hcl:#7d3b0c eyr:2022 ecl:blu
byr:1928 hgt:150cm cid:150 iyr:2013
hgt:60cm ecl:#43f03d eyr:1994 byr:1975
iyr:1980 pid:169cm
hgt:104 byr:2029 eyr:2040 hcl:64a9b2
pid:83898860
iyr:1990
ecl:#938bbe
pid:284399238 ecl:gry hcl:#888785 iyr:2019 hgt:168cm byr:1944
eyr:2022
hcl:#733820 pid:486515752 ecl:grn hgt:188in byr:1941 iyr:2017 eyr:2005
iyr:2010
byr:1978 hgt:160cm eyr:2003
ecl:oth
hcl:#efcc98 pid:584668011
byr:1944 ecl:gry pid:962700562 iyr:2011 hcl:#866857 eyr:2022
hgt:191cm
hcl:z pid:758583213 iyr:1941 ecl:gry eyr:2007
hgt:67 byr:2022
cid:215
byr:1988
ecl:#ae2a9b hcl:#fe9d14 iyr:2012
pid:411550516 hgt:169cm eyr:2038
pid:400034647 byr:1927 hgt:165cm
iyr:2017 ecl:brn eyr:2024 cid:144 hcl:#341e13
hcl:#733820 hgt:153cm eyr:2027
byr:1935 pid:217121064 cid:120 iyr:2012 ecl:grn
hgt:168cm hcl:#866857 iyr:2012 pid:1527348755
byr:1946 eyr:2028 cid:184 ecl:amb
hcl:#a97842
byr:1967
hgt:152cm eyr:2030
ecl:blu
pid:929661915 iyr:2018
pid:671485026
hgt:188cm byr:1974 iyr:2015 ecl:grn cid:268 eyr:2021 hcl:#c0946f
pid:789877199 iyr:2011 cid:219 eyr:2029
ecl:oth byr:1991
hcl:#866857 hgt:154cm
cid:137 pid:059579902
eyr:2020 byr:1952
hcl:#18171d iyr:2020
hgt:172cm ecl:oth
pid:182cm iyr:1997 byr:2012
eyr:2034
hgt:161in ecl:#528abf hcl:b7d2fe
hgt:192cm ecl:oth iyr:2017 pid:264538307 byr:1994 cid:285
hcl:#18171d eyr:2030
hcl:#efcc98
pid:38036608
eyr:2010
iyr:2026
byr:2027
cid:239 ecl:zzz hgt:74
iyr:2012
eyr:2022 hgt:178cm
hcl:#888785
ecl:hzl
byr:1998 pid:000080585
pid:719620152 hcl:#b6652a cid:133
ecl:hzl
byr:1983 iyr:2012 hgt:175cm
eyr:2024
cid:155 eyr:1977 iyr:2019 ecl:#28de8b byr:1941 hcl:#602927 hgt:173cm pid:493773064
iyr:2010
pid:842124616 ecl:hzl eyr:2025 cid:146 hcl:#733820 hgt:166cm byr:1987
hcl:fd4dcf byr:2006 iyr:2011 pid:820797708 eyr:2020 hgt:189cm
ecl:gry
iyr:1971 pid:22107293 hcl:#5b3f01 cid:257
ecl:hzl
hgt:60cm eyr:2000 byr:1965
byr:1932 eyr:2028
hcl:#6b5442 ecl:amb pid:947149686
iyr:2015 hgt:187cm
hcl:#a97842
cid:260
hgt:167cm eyr:2027 byr:1973 ecl:oth pid:741678753 iyr:2016
pid:334234443 ecl:gry hcl:#18171d eyr:2020
iyr:2016 hgt:159cm byr:1926
hgt:118 eyr:1929 iyr:2013
pid:987139064
cid:196
hcl:#cfa07d ecl:#f72601 byr:1929
byr:1924
pid:623185744 iyr:2012 cid:341 hcl:#602927 hgt:192cm eyr:2022
iyr:2012 byr:1971 hgt:168cm cid:146 pid:673038025 hcl:#866857 eyr:2020 ecl:hzl
eyr:2023 iyr:2017
pid:205596613 cid:298 hcl:#341e13
hgt:169cm ecl:oth
byr:1996
ecl:blu pid:775831730
eyr:2029 iyr:1924 hgt:168cm hcl:z
byr:2023 hgt:181cm
pid:4365105095 iyr:2021
ecl:lzr eyr:2024 hcl:z
hgt:184cm byr:1987 pid:175cm ecl:#83a5fa eyr:2023
eyr:2021 pid:422371422 ecl:oth iyr:2015 hcl:#866857
byr:1963 hgt:174cm
pid:006970943
hcl:#2f22ef iyr:2020
ecl:gry
byr:1922
eyr:2024 hgt:163cm
cid:160 byr:2015
eyr:2038 hcl:z ecl:grt hgt:166 iyr:2026
pid:#14978f
hgt:178cm eyr:2021 iyr:2016 pid:471529794
hcl:#b6652a cid:192
ecl:grn byr:1970
iyr:2015 ecl:brn hcl:#602927 hgt:187cm
pid:729284172
eyr:2024 byr:1932
cid:153
ecl:dne eyr:2005
pid:178cm iyr:2028
byr:2029 hgt:160in hcl:482a92
byr:1995 iyr:2012 hcl:#866857 hgt:159cm
eyr:1950 ecl:gry pid:183cm
pid:875885919
hgt:159cm
iyr:2011
ecl:gry byr:1988 hcl:#341e13 eyr:2028
pid:2390267705 hcl:#7d3b0c byr:2009
eyr:2017 ecl:grn hgt:183cm iyr:2015
ecl:brn eyr:2029 hcl:#866857 iyr:2020 hgt:180cm byr:2001
pid:668021168
hcl:#c0946f
eyr:2024 ecl:amb pid:013487714 byr:1965 hgt:172cm cid:320 iyr:2020
eyr:2025 pid:115479767 hcl:#866857 ecl:oth
hgt:163cm iyr:2010 byr:1999
byr:1967 iyr:2011 cid:112 hcl:#733820
eyr:2040 ecl:grt
hgt:66 pid:804536366
hgt:163 pid:1764836278 eyr:2035
iyr:2021
hcl:z ecl:#f1bb27
hcl:#efcc98 hgt:176cm byr:1994 pid:590539278 ecl:grn iyr:2011 eyr:2021
iyr:2017 eyr:2024 hgt:167cm hcl:#b62e29 pid:495674801
byr:1970 ecl:brn
hgt:168cm pid:993244641
byr:1968
eyr:1926
hcl:#b6652a ecl:brn
iyr:2023
hgt:63in hcl:z pid:594070517
eyr:2021 ecl:oth
iyr:2017
byr:2000
eyr:2030 pid:272955042 cid:319 iyr:2011 ecl:amb byr:1999 hcl:#888785 hgt:158cm
eyr:2025
pid:814305816 byr:1945 ecl:brn hgt:162cm iyr:2018
hcl:#a97842
cid:229
byr:1996 eyr:2026 pid:582584802 hcl:#c0946f iyr:2020 ecl:grn
hgt:162cm
eyr:2027
hgt:155cm byr:1925
hcl:#888785
cid:182
iyr:2014 ecl:brn
pid:250884352
hgt:173cm cid:135
iyr:2017 pid:661330507 byr:1950 eyr:2020 ecl:gry hcl:#18171d
pid:208932950
eyr:2030 hgt:179cm
iyr:2013
ecl:oth
byr:1981
cid:58 hcl:#6b5442
hcl:#f183e7 iyr:2014
hgt:159cm pid:614579850 ecl:gry eyr:2029
cid:186 byr:1962
eyr:2027 hcl:#db3405 byr:1938 pid:194516631 cid:167 hgt:177cm ecl:oth
hgt:68in hcl:#733820 pid:228644594 eyr:2030 ecl:gry iyr:2010 cid:334 byr:1951
iyr:2017 hcl:#341e13
pid:#6a28c9 hgt:154cm ecl:gry
byr:1966 eyr:2023
pid:250155574 cid:84
hgt:157cm ecl:grn byr:1937 iyr:2017 eyr:2024 hcl:#b6652a
pid:831823039 eyr:2028 iyr:2015 ecl:gry
hgt:192cm cid:137 byr:1922
hcl:#6b5442
hgt:193cm byr:1941 eyr:2024 cid:56
hcl:#623a2f ecl:amb
pid:351293754 iyr:2016
byr:1947 iyr:2012 ecl:hzl hcl:#602927 eyr:2028 pid:252010138 hgt:152cm
hcl:#a97842 pid:801192586 ecl:hzl iyr:2018 hgt:193cm byr:1928 cid:323
eyr:2028
hgt:151cm
pid:756347561 ecl:hzl
eyr:2024 cid:161
iyr:2016 hcl:#623a2f
byr:2002
pid:648012871 iyr:2015 ecl:blu
eyr:2025 hcl:#623a2f byr:1973 hgt:177cm
byr:1999 hcl:#ceb3a1 cid:345 eyr:2025 ecl:#b29a96 pid:093304949
iyr:2017 hgt:93
hcl:#b6652a
iyr:2018 ecl:grn
byr:1951 pid:077278028 eyr:2024 hgt:62in
hgt:164cm pid:410770618 byr:1958
iyr:2019
eyr:2030
ecl:gry hcl:#fffffd cid:293
ecl:grt
eyr:2039
hcl:z pid:188cm byr:2022
iyr:2027 hgt:76cm
ecl:grn iyr:2012 hgt:150cm eyr:2024
byr:1926 pid:954310029 cid:64
hcl:#fffffd
ecl:oth eyr:2027 pid:091152959 hgt:180cm hcl:#ceb3a1 iyr:2015 cid:350
byr:1924
iyr:2017 hcl:#49a793 eyr:2021 cid:144 byr:1966
pid:717543257
hgt:161cm
ecl:hzl
eyr:2025 ecl:brn hgt:60in pid:391973520 byr:1928 cid:77
iyr:2012
hcl:#602927
iyr:2013 hgt:161cm pid:784483994 byr:1991
hcl:#cfa07d
eyr:2024 ecl:grn
ecl:hzl iyr:1967 byr:2009 cid:265 hgt:180in pid:168cm
eyr:1966
eyr:2024 iyr:2019 pid:534453983
byr:2028 ecl:oth hcl:#341e13 hgt:193cm
eyr:2029 iyr:2010 hcl:#623a2f ecl:gry hgt:152cm pid:572128647
byr:1996
iyr:2014 byr:1981 cid:176
ecl:grn hgt:183cm pid:974469723 eyr:2027
eyr:2029 pid:233353682 byr:1968
ecl:gry hgt:181cm iyr:2011
hcl:#efcc98
hgt:61 iyr:2005 cid:203 ecl:gmt pid:157cm hcl:z
byr:2013
iyr:2020
byr:1923 ecl:blu eyr:2026 pid:069770502 hgt:69cm
hcl:z
byr:1997 hgt:160cm
hcl:z iyr:2021 eyr:1920 pid:9374226872
ecl:hzl eyr:2024 pid:537492791 hgt:186cm byr:1952
hcl:#cfa07d
iyr:2020
hgt:73cm byr:1974
ecl:xry iyr:2016 cid:133
hcl:e741f5 pid:186cm
pid:161cm
byr:1950
eyr:2028 ecl:hzl hcl:#7d3b0c
iyr:2014 hgt:158cm
ecl:#2c491e
hcl:f8fe13 byr:2022
hgt:137 iyr:1948
eyr:2040 pid:#959a0f
byr:1923 hgt:70in
pid:904825661 hcl:#b6652a iyr:2010 eyr:2020
ecl:oth
iyr:2013
ecl:blu pid:858020233 byr:1950 hgt:61in
hcl:#18171d
iyr:2016
ecl:amb pid:613754206 byr:1975 hgt:164cm eyr:2025
byr:1938
iyr:2017 hcl:#623a2f cid:191 eyr:2027 hgt:174cm pid:287108745 ecl:amb
iyr:2025 hcl:#623a2f byr:2019 hgt:170cm
cid:233 pid:55323151 ecl:amb eyr:2037
ecl:amb
hgt:177cm hcl:#b6a3ce eyr:2025 byr:1967 pid:506927066
iyr:2018 cid:93
byr:1964 hgt:173cm eyr:2030 cid:106 pid:587635596 iyr:2012
hcl:#fb5993
ecl:hzl
ecl:lzr pid:190cm hcl:44746d eyr:1955 hgt:66cm iyr:1990 byr:2003
ecl:brn byr:1968 cid:216 hgt:181in hcl:#b6652a iyr:2016 eyr:2020 pid:0208311541
ecl:hzl hgt:181cm
eyr:1977 byr:2018 pid:527754216 hcl:#c0946f
ecl:grn hcl:#efcc98
byr:1935 eyr:2025 iyr:2018 hgt:65in pid:396444938 cid:293
hgt:64in ecl:oth
hcl:#18171d
pid:105602506 byr:1973
eyr:2022
iyr:2014
eyr:2039 hgt:64
ecl:#ab45a8 byr:2009
iyr:2025 pid:182cm hcl:d1614a cid:103"""
data = raw_data.split("\n\n")
data = list(map(lambda passport: passport.replace("\n", " "), data))
def line_to_dict(line: str):
result = {}
entries = line.split(" ")
for entry in entries:
key, val = tuple(entry.split(":"))
result[key] = int(val) if val.isdecimal() else val
return result
data = list(map(line_to_dict, data))
class Passport:
def __init__(self, **kwargs):
self.byr = kwargs.get("byr")
self.iyr = kwargs.get("iyr")
self.eyr = kwargs.get("eyr")
self.hgt = kwargs.get("hgt")
self.hcl = kwargs.get("hcl")
self.ecl = kwargs.get("ecl")
self.pid = kwargs.get("pid")
self.cid = kwargs.get("cid")
def __repr__(self):
return "<Passport {}>".format(" ".join([f"{k}={v}" for k, v in zip(self.__dict__.keys(), self.__dict__.values())]))
def is_valid(self):
attrs = {k: v for k, v in zip(self.__dict__.keys(), self.__dict__.values())}
attrs.pop("cid")
return not any(elem is None for elem in attrs.values())
data = [Passport(**passport) for passport in data]
data = list(filter(lambda p: p.is_valid(), data))
print(len(data))
| 2020/Day 4/part1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # QGIS Plugin: NRW DTM Download and Translate
#
# The HSRW student <NAME> from the Environment & Energy Program (B.Sc.) developed a QGIS plugin to simplify download and translation of the HRW DTM tiles.
#
# Look at his repo: https://github.com/khaymanr/qgis_tile_downloader
#
| gi0380_QGIS_plugin_DTM_download_translate/gi381_QGIS_plugin_DTM_download_translate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: "\u201CMachineLearning\u201D"
# language: python
# name: machginelearning
# ---
# # 文本预处理
#
# ##### 文本是一类序列数据,一篇文章可以看作是字符或单词的序列,本节将介绍文本数据的常见预处理步骤,预处理通常包括四个步骤:
#
# ##### 1.读入文本
# ##### 2.分词
# ##### 3.建立字典,将每个词映射到一个唯一的索引(index)
# ##### 4.将文本从词的序列转换为索引的序列,方便输入模型
# # 1.1 读入文本
# +
import re
def read_time_machine():
with open('/Users/CYJ/Desktop/timemachine2.txt', 'r') as f:
lines = []
for line in f:
lines.append( re.sub('[^a-z]+', ' ',line.strip().lower()) )
return lines
# -
lines = read_time_machine()
print(lines)
print('# sentences %d' % len(lines))
# ## 1.2 分词
def tokenize(sentences, token='word'):
'Split sentence into word or char tokens'
if token == 'word':
return [sentence.split(' ') for sentence in sentences]
elif token == 'char':
return [list(sentence) for sentence in sentences]
else:
print('Error: unknow token type' + token)
tokens = tokenize(lines)
print(tokens)
# ## 1.3 建立词典
#
# #### 先构建一个字典(vocabulary),将每个词映射到一个唯一的索引编号
# +
import collections
def count_corpus(sentences):
tokens = [tk for st in sentences for tk in st]
return collections.Counter(tokens) #返回词典,记录每个词出现频数
class Vocab(object):
def __init__(self, tokens, min_freq=0, use_special_tokems=False):
counter = count_corpus(tokens)#每个词出现的频数,字典类型
self.token_freqs = list(counter.items())
self.idx_to_token = [] #单词集合,无重复元素
if use_special_tokems: # ??
# padding, begin of sentence, end of sentence, unknown
self.pad, self.bos, self.eos, self.unk = (0, 1, 2, 3)
self.idx_to_token += ['', '', '', '']
else:
self.unk = 0
self.idx_to_token += ['']
#去重 & 去低频
self.idx_to_token = [token for token, freq in self.token_freqs
if freq > 0 and token not in self.idx_to_token]
self.token_to_idx = dict() #创建唯一索引的词典
for idx, token in enumerate(self.idx_to_token):
self.token_to_idx[token] = idx
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
# -
vocab = Vocab(tokens)
print(list(vocab.token_to_idx.items())[0:])
# ## 1.4 将词转为索引
for i in range(1,10):
print('words: ', tokens[i])
print('indices:', vocab[tokens[i]])
# # 2.语言模型数据集
with open('../../datasets/jaychou_lyrics.txt') as f:
corpus_chars = f.read()
print(len(corpus_chars))
print(corpus_chars[100: 120])
corpus_chars = corpus_chars.replace('\n', ' ').replace('\r', ' ')
corpus_chars = corpus_chars[: 10000]
# ## 2.1 建立字符索引
# +
idx_to_char = list(set(corpus_chars)) #去重
char_to_idx = {char: i for i, char in enumerate(idx_to_char)} # 字符到索引的映射
vocab_size = len(char_to_idx)
print(vocab_size)
# -
| Code/task02/.ipynb_checkpoints/word_handle_RNN-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/iesous-kurios/AB-Demo/blob/master/HHAA_Notebook_txt.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="z1Ngl2Ol2NJh" colab_type="code" colab={}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
# + id="Hm24cNjR2NJl" colab_type="code" colab={}
# load excel file containing HMIS data
df = pd.read_excel('/content/july2019.xlsx')
#df_old = pd.read_excel('jan_bridge_served.xlsx')
# + id="LB1gptNn2NJn" colab_type="code" colab={}
# create new dataframe that only has guests in feb that were not served in jan
#df = df_new[~df_new.isin(df_old)].dropna(how='all')
# + id="34wI4lCA2NJq" colab_type="code" colab={}
#df = df.dropna(subset=['3.6 Gender'], how='all')
# + id="QxKYolVa2NJt" colab_type="code" colab={}
# + id="UktTsFBr2NJw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5a6a2e40-b0a7-48de-fca1-97be67dde6f7"
# count of guests served in feb that were not served in jan
df['5.8 Personal ID'].value_counts().sum()
# + id="hBi2JjSN2NJz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="e11f82f2-5de7-47e0-e7d9-c7a734585640"
uniqueValues = df['5.8 Personal ID'].nunique()
print('Unique elements in column "5.8 Personal ID" ')
print(uniqueValues)
# + id="DWAKBQVM2NJ2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a2c00a78-d1b4-4d86-b2b1-3790914cdbc9"
# display how many total IDs are found in original file
df['5.8 Personal ID'].value_counts().sum()
# + id="XrmiTn802NJ5" colab_type="code" colab={}
# remove the duplicates
df = df.drop_duplicates(subset='5.8 Personal ID', keep='first')
# + id="JMpWinXD2NJ8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3fbe3314-f162-4082-8d98-6aa066d55380"
# get new number of total IDs to make sure the code worked
df['5.8 Personal ID'].value_counts().sum()
# + id="JSGz-OFS2NJ-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2cf4865a-f551-4f17-85b3-46e1c18c2601"
# make sure total unduplicated guests in dataframe is equal to count of unique values
assert(df['5.8 Personal ID'].value_counts().sum() == uniqueValues)
# print success message if above code proves that only unduplicated guests are in our dataframe
print('Your data set only contains unduplicated guests')
# the print statement will fail to print if the number of guests in your dataframe is different from number of unduplicated guests
# + id="VsLnFVRQ2NKB" colab_type="code" colab={}
counties = pd.read_excel('/content/uszips.xlsx')
# + id="1hT3BXB82NKE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="cc298d3e-3f8a-41d5-e859-e9548245def0"
counties.head()
# + id="yE3sVHAX2NKG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="42d21192-73de-4d3d-cc0f-4d97c1c7b2e8"
counties.tail()
# + id="e1Vh_yUr2NKI" colab_type="code" colab={}
v5 = df[['V5 Prior Address', 'V5 State', 'V5 Zip']].copy()
# + id="NjnyQsS_2NKM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="03b22e34-2072-4641-dabd-eeb637cd5f9c"
v5.sample(5)
# + id="rZx1gDv82NKO" colab_type="code" colab={}
v5 = v5.merge(counties, on='V5 Zip')
# + id="p7UwPgDj2NKQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="93e760d6-ed09-41bd-bb45-0539ad61c113"
v5.head()
# + id="txmpOSvK8HLb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 554} outputId="e8fbebe4-3870-41aa-cb61-4ac3ce625b76"
v5.county_name.value_counts()
# + id="jAkqUXQ12NKU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 706} outputId="dea07232-22ae-4a83-a340-5d680eb2a988"
v5['V5 City'].value_counts()
# + id="JZLgDEeJ4M34" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8b9fc730-bb58-4949-a123-022abc390df1"
v5['V5 City'].value_counts().sum()
# + id="hQ3LTeWd4Olk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c0d71607-7ee9-4ab5-f637-87521593357a"
130-80
# + id="Z0hb8gCX4RPg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d46e8e27-1463-4f4b-e3f0-29d2ae1c5537"
50/130
# + id="fj6Nsnyj2NKW" colab_type="code" colab={}
def get_count(city):
count = 0
for row in v5['V5 City']:
if row == city:
count += 1
return count
# + id="QoUKdCQe2NKY" colab_type="code" colab={}
city_list = ['Cheney', 'Spokane Valley', 'Veradale/Spokane Valley', 'Deer Park', 'Medical Lake', 'Liberty Lake', 'Waverly', 'Latah', 'Fairfield', 'Rockford', 'Spangle', 'Airway Heights']
# + id="FgUFnf182NKb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 218} outputId="3a5e32a1-ca88-4b66-a9dc-e5db387a8840"
for item in city_list:
print("Number served in", item, "is", get_count(item))
# + id="hz6dUEIK2NKd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="2886eaf9-89ea-484f-fb6e-8176db39b4c1"
# race info
df['3.4 Race'].value_counts()
# + id="DA-xqsAc2NKf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="7095fdb8-b566-42fe-f551-fb98e5302b55"
# ethnicity
df['3.5 Ethnicity'].value_counts()
# + id="8BGGNNyv2NKh" colab_type="code" colab={}
# print total number of families served (unduplicated)
unique_households = df['5.9 Household ID'].nunique()
# + id="psjXKYeO2NKk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="331f0477-0db1-43d4-a00b-8b5952ddca5a"
# print total of unique household IDs
unique_households
# + id="wfKdH4__2NKn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0e589288-0ca4-4aa4-b162-081aa56ff5f9"
# double check unique households with another method
uniquehouseholds = df['5.9 Household ID'].unique()
len(uniquehouseholds)
# + id="vD8S-o3g2NKr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 218} outputId="fca289ad-fc7a-4587-bf11-d25f67027cb4"
df['4.2 Income Total at Entry'].value_counts()
# + id="Xn6zBXs02NKu" colab_type="code" colab={}
df['30-50 MFI'] = df['4.2 Income Total at Entry'] >= 1700
# + id="BdIVBYAf2NKw" colab_type="code" colab={}
df['30 MFI'] = df['4.2 Income Total at Entry'] <= 1700
# + id="mVnHxo162NKy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="58c24fc9-699a-48b7-b90c-498713872abe"
df['30 MFI'].value_counts()
# + id="_xO-FHQF2NK0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="97e44bcf-694f-4c75-ce3e-622191a6ff88"
df['30-50 MFI'].value_counts()
# + id="RwbH670f2NK2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="c96c422b-ff74-4c02-b718-67a921869f26"
df['3.6 Gender'].value_counts(normalize=False)
# + id="UMBAtHIN2NK5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="7d9c0495-7a90-4dbc-8c8e-3968d8f501e0"
# guests that stayed 60 days or less
df['60 or less'] = df['Count of Bed Nights (Housing Check-ins)'] <= 60
df['60 or less'].value_counts()
# + id="S_gH5UE82NK7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="4efac4e7-ed41-4cc8-9ce1-7979eb49967e"
df['90 or less'] = df['Count of Bed Nights (Housing Check-ins)'] <= 90
df['90 or less'].value_counts()
# + id="bRR9Eemv2NK-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="0e9b5327-e423-4308-d042-bb9a7af28a83"
df['3.12 Exit Destination'].value_counts()
# + id="CK92dEH72NLA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 645} outputId="556c349f-afed-49a1-9193-5ed938815266"
pd.pivot_table(df, index='5.9 Household ID', columns=['4.2 Income Total at Entry', '4.2 Income Total at Exit'], aggfunc='sum')
# + id="F_EE7gol2NLC" colab_type="code" colab={}
df['increased'] = df['4.2 Income Total at Exit'] > df['4.2 Income Total at Entry']
# + id="WHmt-lCH2NLE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="ebba8342-c084-430a-f1c9-778761987f2e"
df['increased'].value_counts()
| HHAA_Notebook_txt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %%writefile ../pipelines/esg_trending_topics/extract.py
import pandas as pd
from pytrends.request import TrendReq
# ~------------------ EXTRACT ------------------~
def get_queries(kw_list):
""" Calls pytrend' related_queries with a list of keywords and geo settings
Input
pytrend: TrendReq() session of pytrend
kw_list: list of strings, used as input for query and passed to TrendReq().build_payload()
Return
Dataframe with query result
"""
df_related_queries = pd.DataFrame()
try:
pytrend = TrendReq()
pytrend.build_payload(kw_list)
df_related_queries = pytrend.related_queries()
print(f"Query succeeded for", *kw_list, sep='\n\t')
except Exception as e:
print(e, "\nQuery not unsuccessful\n", '='*42)
return df_related_queries
| notebooks/1_extract.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Softmax
from datastore.data.kuzushiji import KuzushijiMNIST
# -
data = KuzushijiMNIST('/home/ygx/data', 'train', download=True)
print(data)
# #### Pytorch Model
pytorch_model = nn.Conv2d(1, 20, 5, 1)
dataloader = DataLoader(data, batch_size=1)
for idx, (x, y) in enumerate(dataloader):
x = x.expand(1,1,28,28)
x = x.float()
out = pytorch_model(x)
print(f'output shape: {out.shape}')
if idx == 2:
break
# #### Keras Model
train_images, train_labels = data.load_data()
# +
sample_size = 1
np.random.seed(2018)
idx = np.random.choice(1, size=sample_size, replace=False)
train_sample = train_images.reshape(1, -1)[idx, :]
label_sample = train_labels[idx]
# +
X_train_flat = train_images.reshape(6000, -1)
# Keras inputs
x_train = np.expand_dims(train_images, axis=-1)
y_train = keras.utils.to_categorical(train_labels)
# -
keras_model = Sequential()
keras_model.add(Conv2D(1, kernel_size=5, input_shape=(28,28,1)))
# +
img_rows = img_cols = 28
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=(img_rows, img_cols, 1)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# +
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=1,
epochs=1,
verbose=1)
# -
| examples/notebooks/kuzushiji.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# ## Here we go briefly through the basics of a class
# Here we get a brief introduction to classes and how we can create them once and use anywhere!
# Simply put: classes have functions,and they encapsulate and collect related bits of useful information. And you can put related classes and functions in a file and give it a name, and we call that a module, which you can then import for your own work.
#
# So actually, creating classes, creating functions, creating modules, it's all really easy. It's all just Python, and it's all just reusing a couple of concepts that we've seen a bunch of times in this series. So functions for encapsulating and naming useful bits of work,classes for encapsulating and naming related bits of functionality, and then modules to store a bunch of classes and functions. With all this you'll get a feel for when it makes sense to write functions, when it makes sense to right classes, and when it makes sense to create your own modules.
class greeting(object):
def hello(self):
print('Hi there!')
def goodbye(self):
print('Goodbye!')
g = greeting()
g.hello()
g.goodbye()
g2 = greeting()
g2.hello()
g2.goodbye()
g2 = greeting()
g2.hello()
g2.goodbye()
# +
class greeting(object):
def __init__(self, name): #is called whenever a new instance of a class is created
self.name = name
def hello(self):
print('Hi there! ' + self.name)
def goodbye(self):
print('Goodbye! ' + self.name)
g = greeting("Tarry")
g.hello()
g.goodbye()
g2 = greeting("Jessica")
g2.hello()
g2.goodbye()
# +
import random
class Die(object): # it inherist an object
def roll(self):
return random.randint(1, 6)
d = Die()
print(d.roll())
print(d.roll())
print(d.roll())
# +
import random
class Die(object): # it inherist an object
def __init__(self, sides):
self.sides = sides
def roll(self):
return random.randint(1, self.sides)
print("D1 rolls: ")
d = Die(6)
print(d.roll())
print(d.roll())
print(d.roll())
print("D2 rolls: ")
d2 = Die(20)
print(d2.roll())
print(d2.roll())
print(d2.roll())
# +
import random
class Deck(object):
def shuffle(self):
suits = ['Spades', 'Hearts', 'Clubs', 'Diamonds']
ranks = ['1','2','3','4','5','6','7','8','9','10', 'Jack', 'Queen', 'King', 'Ace']
self.cards = [] #Giving it self exposes is to the rest of the class for usage
for suit in suits:
for rank in ranks:
self.cards.append(rank + ' of ' + suit)
random.shuffle(self.cards)
def deal(self):
return self.cards.pop()
d = Deck()
d.shuffle()
print(d.deal())
print(d.deal())
print(d.deal())
| python-tuts/beginner/Super basic concepts of classes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_crowd_detection_mayub)
# language: python
# name: conda_crowd_detection_mayub
# ---
import tensorflow as tf
from keras.models import model_from_json
# ### Export Classification Model
# +
# The export path contains the name and the version of the model
tf.keras.backend.set_learning_phase(0) # Ignore dropout at inference
classify_model = tf.keras.models.load_model('/home/ubuntu/mayub/Github/learnopencv/Keras-Fine-Tuning/da_last4_layers.h5')
export_path = '../my_image_classifier/1'
# Fetch the Keras session and save the model
# The signature definition is defined by the input and output tensors
# And stored with the default serving key
with tf.keras.backend.get_session() as sess:
tf.saved_model.simple_save(
sess,
export_path,
inputs={'input_image': classify_model.input},
outputs={t.name: t for t in classify_model.outputs})
# -
# ### Export Crowd Detection Models
# #### If you get "Attempted to use a closed session" Restart Jupyter kernal and try again or try using `tf.keras.backend.clear_session()`
## Load the MODEL Architecture File
json_file = open('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/models/Model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
## Load the model for dense images and Save in H5 format
dense_model = model_from_json(loaded_model_json)
dense_model.load_weights("/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/weights/CSRNet_MAE67.984_RMSE103.25_SFN0.838_MAPE0.173_epoch127-150.0.hdf5")
dense_model.save('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/weights/dense_model.h5')
# +
tf.keras.backend.set_learning_phase(0)
dense_model = tf.keras.models.load_model('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/weights/dense_model.h5')
export_path = '../crowd_detector/1'
with tf.keras.backend.get_session() as sess2:
tf.saved_model.simple_save(sess2,export_path,inputs={'input_image': dense_model.input},outputs={t.name: t for t in dense_model.outputs})
# -
# #### Model 2
## Load the MODEL Architecture File
json_file = open('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/models/Model.json', 'r')
loaded_model_json2 = json_file.read()
json_file.close()
## Load model for Sparse images
sparse_model = model_from_json(loaded_model_json2)
sparse_model.load_weights("/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/weights/CSRNet_MAE8.31_MSE14.361_SFN0.0_MAPE0.066_epoch135-400.0.hdf5")
sparse_model.save('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/weights/sparse_model.h5')
# +
tf.keras.backend.set_learning_phase(0)
sparse = tf.keras.models.load_model('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/weights/sparse_model.h5')
export_path = '../crowd_detector/2'
## Run this to avoid `FailedPreconditionError: Attempting to use uninitialized value` error
# tf.keras.backend.get_session().run(tf.global_variables_initializer())
with tf.keras.backend.get_session() as sess3:
tf.saved_model.simple_save(sess3,export_path,inputs={'input_image': sparse.input},outputs={t.name: t for t in sparse.outputs})
# -
from PIL import Image
import numpy as np
import io
import requests
import json
def prepare_image(img, im_type=None):
if im_type=="classify":
newsize = (224, 224)
img = img.resize(newsize)
#Function to load,normalize and return image
im = np.array(img)
im = im/255.0
im[:,:,0]=(im[:,:,0]-0.485)/0.229
im[:,:,1]=(im[:,:,1]-0.456)/0.224
im[:,:,2]=(im[:,:,2]-0.406)/0.225
im = np.expand_dims(im,axis = 0)
print(str(im.shape))
return im
image = Image.open('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/data/Classify_all/sparse/IMG_7.jpg')
# image = Image.open('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/data/Classify_all/sparse/IMG_15.jpg')
image = prepare_image(image)
# image = image.astype('float')
# payload = {
# "instances": [{'input_image': image.tolist()}]
# }
#
# #sending post request to TensorFlow Serving server
# r = requests.post('http://localhost:8500/v1/models/classify:predict', json=payload)
# pred = json.loads(r.content.decode('utf-8'))
# ### Test Crowd Count Models
import grpc
from grpc.beta import implementations
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from tensorflow_serving.apis import prediction_service_pb2
from tensorflow.core.framework import types_pb2
import tensorflow as tf
channel = grpc.insecure_channel("localhost:8500")
# channel = implementations.insecure_channel("localhost",8500)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
# stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = "dense_crowd"
request.inputs["input_image"].CopyFrom(tf.make_tensor_proto(image, dtype=types_pb2.DT_FLOAT))
response = stub.Predict(request, timeout=60.0)
response
predictions = tf.make_ndarray(response.outputs['y_out/Relu:0'])
print(predictions)
summ = np.sum(predictions)
print("Predicted Count: " + str(round(summ)))
# ### Test Classification Model
# image = Image.open('/home/ubuntu/mayub/Github/Crowd_Detection/image_examples/sparse_crowd_01.jpg')
image = Image.open('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/data/Classify_all/dense/IMG_26.jpg')
image = prepare_image(image, im_type="classify")
# Using stub from above
request.model_spec.name = "classify"
request.inputs["input_image"].CopyFrom(tf.make_tensor_proto(image, dtype=types_pb2.DT_FLOAT))
response = stub.Predict(request, timeout=60.0)
response
predictions = tf.make_ndarray(response.outputs['dense_11/Softmax:0'])
predicted_classes = np.argmax(predictions,axis=1)
print("Predcited Class (0-dense, 1-sparse): "+str(predicted_classes[0]))
# Take a sample picture
img_path1 = '/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/data/Classify_all/sparse/IMG_7.jpg'
img_path2 = '/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/data/Classify_all/dense/20.jpg'
# image1 = Image.open(img_path1).convert('RGB')
# image2 = Image.open(img_path2).convert('RGB')
# Read as binary data
data1 = open(img_path1,'rb').read()
data2 = open(img_path2,'rb').read()
def prepare_image(img, im_type=None):
if im_type=="classify":
newsize = (224, 224)
img = img.resize(newsize)
#Function to load,normalize and return image
im = np.array(img)
print(im.shape)
# print(im)
im = im/255.0
im[:,:,0]=(im[:,:,0]-0.485)/0.229
im[:,:,1]=(im[:,:,1]-0.456)/0.224
im[:,:,2]=(im[:,:,2]-0.406)/0.225
im = np.expand_dims(im,axis = 0)
# print(str(im.shape()))
return im
def create_tf_prediction_request():
channel = grpc.insecure_channel("127.0.0.1:8500")
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
return stub,request
# +
st, req = create_tf_prediction_request()
image = Image.open('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/data/Classify_all/sparse/IMG_7.jpg')
image = prepare_image(image)
req.model_spec.name = "sparse_crowd"
req.inputs["input_image"].CopyFrom(tf.make_tensor_proto(image, dtype=types_pb2.DT_FLOAT))
data = {"success": False}
response = st.Predict(req, timeout=60.0)
p_hmap = tf.make_ndarray(response.outputs['y_out/Relu:0'])
count = np.sum(p_hmap)
print(str(round(count)))
# -
req.model_spec.name = "dense_crowd"
response = st.Predict(req, timeout=60.0)
p_hmap = tf.make_ndarray(response.outputs['y_out/Relu:0'])
count = np.sum(p_hmap)
print(str(round(count)))
image2 = Image.open('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/data/Classify_all/dense/20.jpg')
image2 = image2.convert("RGB")
image2 = prepare_image(image2)
req.model_spec.name = "sparse_crowd"
req.inputs["input_image"].CopyFrom(tf.make_tensor_proto(image2, dtype=types_pb2.DT_FLOAT))
response = st.Predict(req, timeout=60.0)
p_hmap = tf.make_ndarray(response.outputs['y_out/Relu:0'])
count = np.sum(p_hmap)
print(str(round(count)))
req.model_spec.name = "dense_crowd"
response = st.Predict(req, timeout=60.0)
p_hmap = tf.make_ndarray(response.outputs['y_out/Relu:0'])
count = np.sum(p_hmap)
print(str(round(count)))
| utils/Generate_tf_serving_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table> <tr>
# <td style="background-color:#ffffff;">
# <a href="http://qworld.lu.lv" target="_blank"><img src="..\images\qworld.jpg" width="25%" align="left"> </a></td>
# <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
# prepared by <NAME>, <NAME>, <NAME>, <NAME> and <NAME> (<a href="http://qworld.lu.lv/index.php/qturkey/" target="_blank">QTurkey</a>)
# </td>
# </tr></table>
# <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
# <h1> Bernstein-Vazirani Algorithm </h1>
# Let $x \cdot s$ represent the inner product of the bit strings modulo 2.
#
# For instance if $x = 1000$ and $s = 1010$, then $x\cdot s = 1.1 + 0.0 + 0.1 + 0.0 = 1 \pmod 2 = 1$
# ## Problem
#
# Given an oracle function $f:\{0,1\}^n \rightarrow \{0,1\}$, which is defined as $ f(\mathbf{x}) = x \cdot s $, find the secret string (sequence of bits) $s$.
#
#
# This might come across as a bit of an artificially created problem, because it is. It was specifically designed to be solved using a quantum computer in order to show there can be advantages in using quantum algorithms over probabilistic algorithms.
# Let's start by giving an example of such an $f$.
#
# \begin{align*}
# f(00)&=0\\
# f(01)&=1\\
# f(10)&=0\\
# f(11)&=1\\
# \end{align*}
#
# In this example, $s$ is $01$, as $f(00)=00\cdot 01=0$, $f(01)=01\cdot 01 = 1$, $f(10)=10\cdot01=0$ and $f(11)=11\cdot01=1$.
#
#
# Note that now the unitary operator $U_f$ takes the following form: $ U_f: \ket{x}\ket{y} \mapsto \ket{x}\ket{y \oplus ( x \cdot s)} $.
#
#
# ### Task 1
#
# - Using how many queries can you solve the problem clasically? How many queries if you use a probabilistic algorithm?
# - How many queries do you think we need to make if we are to solve the problem with a quantum computer?
# <a href="A05_Bernstein_Vazirani_Algorithm_Solutions.ipynb#task1">click for our solution</a>
# ### Task 2
#
# What can we say about the $f:\{0,1\}^n \rightarrow \{0,1\}$ function if $s = 0^n$?
# <a href="A05_Bernstein_Vazirani_Algorithm_Solutions.ipynb#task2">click for our solution</a>
# ## Algorithm
#
# We use exactly the same algorithm as Deutsch-Jozsa.
#
# <img src="../images/deutschjozsa.png" width="60%" align="center">
#
# We construct a circuit with $n+1$ qubits.
#
# - Set the $n+1$'st qubit to state $\ket{-}$ by applying $X$ and $H$ gates.
# - Apply $H$ to first $n$ qubits.
# - Apply $U_f$.
# - Apply $H$ to first $n$ qubits.
# - Measure the first $n$ qubits to obtain $s$.
# ## Analysis
#
# <img src="../images/deutschjozsa2.png" width="60%" align="center">
#
# As we have the same circuit as Deustch-Jozsa, the initialization is the same.
#
# $$ \ket{\psi_2} = \frac{1}{\sqrt{2^n}}\sum_{x=0}^{2^n-1} \ket{x} \otimes \ket{-} $$
# From now on we can ignore the output qubit and focus on our input qubits. After applying $U_f$ we then get the state:
#
# $$ \ket{\psi_{3,0}}= \frac{1}{\sqrt{2^n}} \sum_{x=0}^{2^n-1} (-1)^{f(x)} \ket{x} $$
# Let's replace $f(x) = x \cdot s$, and rewrite our state as follows:
#
# $$ \ket{\psi_{3,0}} = \frac{1}{\sqrt{2^n}} \sum_{x=0}^{2^n-1} (-1)^{x \cdot s} \ket{x} $$
# From the previous notebook we know the following expression: $$ H^{\otimes n} \ket{x} = \frac{1}{\sqrt{2^n}} \sum_{x=0}^{2^n-1} (-1)^{x \cdot z} \ket{z}. $$
# We also know that the $H^{\otimes n}$ operator is its own inverse. Thus, we can say that $H^{\otimes n}\ket{a} = \ket{b} \Longleftrightarrow H^{\otimes n}\ket{b} = \ket{a}$. So in fact, $\ket{\psi_3}$ is the state obtained after applying $H^{\otimes n}$ to $\ket{s}$.
#
# Hence after applying $H^{\otimes n}$ to the input qubits, we get the final state as
#
# $$\ket{\psi_{4,0}} = \ket{s}.$$
#
# We measure the first $n$ qubits and we observe the string $s$ with probability 1.
#
# Note that this is also an exact algorithm without any errors like the other algorithms we have seen so far.
# ### Task 3
#
# Given an oracle function `bv_oracle()` that constructs a 6 qubit oracle circuit ($s$ has length 5) for $f$, construct a circuit that implements the algorithm described above to find out $s$.
#
# Note that qubit 5 is the output qubit.
#
# Run the following cell to load function `bv_oracle()`.
# %run ../include/oracle.py
# +
from qiskit import QuantumCircuit, execute, Aer
n=5
#Create quantum circuit
bv_circuit = QuantumCircuit(n+1, n)
#Your code here
bv_circuit.draw(output="mpl")
# -
job = execute(bv_circuit, Aer.get_backend('qasm_simulator'),shots=10000)
counts = job.result().get_counts()
print(counts)
# <a href="A05_Bernstein_Vazirani_Algorithm_Solutions.ipynb#task3">click for our solution</a>
# ### Task 4
#
# Given $\textbf{s} = 0110$, implement a function that returns an oracle for the function $ f(\mathbf{x}) = \mathbf{x} \cdot \mathbf{s} $. Note that $n=4$ and you will need a cirucit with 5 qubits where qubit 4 is the output qubit.
# +
from qiskit import QuantumCircuit
def oracle():
#Your code here
# -
# <a href="A05_Bernstein_Vazirani_Algorithm_Solutions.ipynb#task4">click for our solution</a>
# Recall from Task 1 that we need at least $n$ queries both using the deterministic and probabilistic approaches. Now finally, we have an algorithm which provides speedup in the query model also against probabilistic algorithms, although not exponential.
| nickel/A05_Bernstein_Vazirani_Algorithm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pylab as plt
# - [Scipy FFT tutorial](https://docs.scipy.org/doc/scipy/reference/tutorial/fft.html)
# ### Correlate2d vs convolve2d
#
# def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
# out = sigtools._convolve2d(in1, in2.conj(), ...
# # https://github.com/scipy/scipy/blob/adc4f4f7bab120ccfab9383aba272954a0a12fb0/scipy/signal/signaltools.py#L1552-L1644
#
# def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
# out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
#
#
# both calls `sigtools._convolve2d`
# +
## =================
## 1D convolution
## =================
from scipy.signal import fftconvolve
x = np.linspace(-2, 2, 65)
y1 = np.zeros_like(x)
y1[ np.abs(x)<0.5 ] = 1
y2 = np.zeros_like(x)
y2[ np.abs(x-0.2)<0.6 ] = 1
corr = fftconvolve(y1, y2, mode='same')
plt.plot(x, y1, label='y1');
plt.plot(x, y2, label='y2');
plt.plot(x, corr, label='corr');
plt.legend(); plt.xlabel('x');
# -
# Guizar-Sicairos, Manuel, <NAME>, et <NAME>. « Efficient Subpixel Image Registration Algorithms ». Optics Letters 33, nᵒ 2 (15 janvier 2008): 156. https://doi.org/10.1364/OL.33.000156.
#
# The usual FFT approach to finding the cross-correlation peak to within a fraction, 1 / , of a pixel is
# (i) compute F共u , v兲 and G共u , v兲, (ii) embed the product
# F共u , v兲G * 共u , v兲 in a larger array of zeros of dimen-
# sions 共 M , N兲, (iii) compute an inverse FFT to ob-
# tain an upsampled cross correlation, and (iv) locate
# its peak. The computational complexity of the inverse
# FFT in this case is O兵MN 关log 2 共 M兲 + log 2 共 N兲兴其 for
# N 艋 M.
from scipy.fft import fft, ifft, fftshift, ifftshift
# +
x = np.arange(-20, 20, 1)
y1 = np.exp( -(x)**2 / 30 )
y2 = np.exp( -(x - 2.345678)**2 / 10 ) #+ 0.1*np.random.randn(len(x))
y1_TF = (fft(y1))
y2_TF = (fft(y2))
corr_TF = y1_TF * y2_TF.conj()
corr = ifft(corr_TF)
corr = fftshift(corr)
corr = np.abs(corr)
plt.plot(x, y1, 'x-', label='y1');
plt.plot(x, y2, 'x-', label='y2');
plt.plot(x, corr, '|-', label='corr');
plt.legend(); plt.xlabel('x');
print('argmax=', x[np.argmax(corr)])
# +
up_sample_factor = 100
shifted_corr_TF = fftshift(corr_TF)
padded_corr_TF = np.pad(shifted_corr_TF, (0, 1+len(shifted_corr_TF)*(up_sample_factor-1)))
upsampled_corr = ifft(padded_corr_TF)
upsampled_corr = fftshift(upsampled_corr)
upsampled_corr = np.abs(upsampled_corr)
x_up = np.linspace(x.min(), x.max(), len(upsampled_corr))
# +
plt.plot(x, y1, 'x-', label='y1');
plt.plot(x, y2, 'x-', label='y2');
plt.plot(x, corr, '|-', label='corr');
plt.plot(x_up, upsampled_corr, '|-', label='upsampled corr');
plt.legend(); plt.xlabel('x');
print('argmax=', x[np.argmax(corr)])
print('up-sampled argmax=', x_up[np.argmax(upsampled_corr)])
# -
from skimage.registration import phase_cross_correlation
phase_cross_correlation(y1.reshape(len(y1), 1), y2.reshape(len(y2), 1),
upsample_factor=10000)
len(padded_corr_TF)
# +
up_sample_factor = 500
x = np.arange(-20, 20, 1)
y1 = np.exp( -(x)**2 / 30 )
y2 = np.exp( -(x - 0.0000000000000000345678)**2 / 30 )
y1_TF = fft(y1)
y2_TF = fft(y2)
corr_TF = y1_TF * y2_TF.conj()
corr_TF = ifftshift(corr_TF)
corr = ifft(corr_TF, n=up_sample_factor*len(corr_TF))
corr = ifftshift(corr)
corr = np.abs(corr)
x_up_sampled = np.linspace(x.min(), x.max(), len(corr))
plt.plot(x, y1, 'x-', label='y1');
plt.plot(x, y2, 'x-', label='y2');
plt.plot(x_up_sampled, corr, '-', label='corr');
plt.legend(); plt.xlabel('x');
print('argmax=', x_up_sampled[np.argmax(corr)])
# +
corr_TF = y1_TF * y2_TF.conj()
corr = ifft(corr_TF)
corr = fftshift(corr)
corr = np.abs(corr)
plt.plot(x, y1, 'x-', label='y1');
plt.plot(x, y2, 'x-', label='y2');
plt.plot(x, corr, '|-', label='corr');
plt.legend(); plt.xlabel('x');
print('argmax=', x[np.argmax(corr)])
# -
# zero-padding
# http://greg-ashton.physics.monash.edu/example-of-zero-padding-using-scipy.html
# +
up_sample_factor = 100
y1_TF = fft(y1)#, n=up_sample_factor*len(y1))
y2_TF = fft(y2)#, n=up_sample_factor*len(y2))
corr_TF = y1_TF * y2_TF.conj()
corr_TF_normed = corr_TF / np.abs(corr_TF)
#corr_TF = corr_TF_normed
corr = ifft(corr_TF)
corr = fftshift(corr)
corr = np.abs(corr)
x_fine = np.linspace(np.min(x), np.max(x), len(corr))
plt.plot(x, y1, 'x-', label='y1');
plt.plot(x, y2, 'x-', label='y2');
plt.plot(x_fine, corr, '|-', label='corr');
plt.legend(); plt.xlabel('x');
print(x_fine[np.argmax(corr)])
# +
#corr_TF_centered = fftshift(corr_TF)
corr_TF_pad = np.zeros(up_sample_factor*len(corr_TF), dtype='complex128')
corr_TF_pad[:len(corr_TF)] = corr_TF_centered
plt.plot(np.abs(corr_TF_pad))
# -
corr_TF_centered = fftshift(corr_TF)
corr_TF_pad = np.pad(corr_TF_centered, up_sample_factor*len(corr_TF_centered))
plt.plot(np.abs(corr_TF_pad))
# +
corr = ifft(corr_TF_pad)
corr = fftshift(corr)
corr = np.abs(corr)
x_fine = np.linspace(np.min(x), np.max(x), len(corr))
plt.plot(x, y1, 'x-', label='y1');
plt.plot(x, y2, 'x-', label='y2');
plt.plot(x_fine, corr, '|-', label='corr');
plt.legend(); plt.xlabel('x');
print(x_fine[np.argmax(corr)])
| toy_models/01_fft.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Tools - NumPy**
#
# *NumPy is the fundamental library for scientific computing with Python. NumPy is centered around a powerful N-dimensional array object, and it also contains useful linear algebra, Fourier transform, and random number functions.*
#
# # Creating arrays
# Now let's import `numpy`. Most people import it as `np`:
import numpy as np
# ## `np.zeros`
# The `zeros` function creates an array containing any number of zeros:
np.zeros(5)
# It's just as easy to create a 2D array (ie. a matrix) by providing a tuple with the desired number of rows and columns. For example, here's a 3x4 matrix:
np.zeros((3,4))
# ## Some vocabulary
#
# * In NumPy, each dimension is called an **axis**.
# * The number of axes is called the **rank**.
# * For example, the above 3x4 matrix is an array of rank 2 (it is 2-dimensional).
# * The first axis has length 3, the second has length 4.
# * An array's list of axis lengths is called the **shape** of the array.
# * For example, the above matrix's shape is `(3, 4)`.
# * The rank is equal to the shape's length.
# * The **size** of an array is the total number of elements, which is the product of all axis lengths (eg. 3*4=12)
a = np.zeros((3,4))
a
a.shape
a.ndim # equal to len(a.shape)
a.size
# ## N-dimensional arrays
# You can also create an N-dimensional array of arbitrary rank. For example, here's a 3D array (rank=3), with shape `(2,3,4)`:
np.zeros((2,3,4))
# ## Array type
# NumPy arrays have the type `ndarray`s:
type(np.zeros((3,4)))
# ## `np.ones`
# Many other NumPy functions create `ndarrays`.
#
# Here's a 3x4 matrix full of ones:
np.ones((3,4))
# ## `np.full`
# Creates an array of the given shape initialized with the given value. Here's a 3x4 matrix full of `π`.
np.full((3,4), np.pi)
# ## `np.empty`
# An uninitialized 2x3 array (its content is not predictable, as it is whatever is in memory at that point):
np.empty((2,3))
# ## np.array
# Of course you can initialize an `ndarray` using a regular python array. Just call the `array` function:
np.array([[1,2,3,4], [10, 20, 30, 40]])
# ## `np.arange`
# You can create an `ndarray` using NumPy's `range` function, which is similar to python's built-in `range` function:
np.arange(1, 5)
# It also works with floats:
np.arange(1.0, 5.0)
# Of course you can provide a step parameter:
np.arange(1, 5, 0.5)
# However, when dealing with floats, the exact number of elements in the array is not always predictible. For example, consider this:
print(np.arange(0, 5/3, 1/3)) # depending on floating point errors, the max value is 4/3 or 5/3.
print(np.arange(0, 5/3, 0.333333333))
print(np.arange(0, 5/3, 0.333333334))
# ## `np.linspace`
# For this reason, it is generally preferable to use the `linspace` function instead of `arange` when working with floats. The `linspace` function returns an array containing a specific number of points evenly distributed between two values (note that the maximum value is *included*, contrary to `arange`):
print(np.linspace(0, 5/3, 6))
# ## `np.rand` and `np.randn`
# A number of functions are available in NumPy's `random` module to create `ndarray`s initialized with random values.
# For example, here is a 3x4 matrix initialized with random floats between 0 and 1 (uniform distribution):
np.random.rand(3,4)
# Here's a 3x4 matrix containing random floats sampled from a univariate [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) (Gaussian distribution) of mean 0 and variance 1:
np.random.randn(3,4)
# To give you a feel of what these distributions look like, let's use matplotlib (see the [matplotlib tutorial](tools_matplotlib.ipynb) for more details):
# %matplotlib inline
import matplotlib.pyplot as plt
plt.hist(np.random.rand(100000), normed=True, bins=100, histtype="step", color="blue", label="rand")
plt.hist(np.random.randn(100000), normed=True, bins=100, histtype="step", color="red", label="randn")
plt.axis([-2.5, 2.5, 0, 1.1])
plt.legend(loc = "upper left")
plt.title("Random distributions")
plt.xlabel("Value")
plt.ylabel("Density")
plt.show()
# ## np.fromfunction
# You can also initialize an `ndarray` using a function:
# +
def my_function(z, y, x):
return x * y + z
np.fromfunction(my_function, (3, 2, 10))
# -
# NumPy first creates three `ndarrays` (one per dimension), each of shape `(2, 10)`. Each array has values equal to the coordinate along a specific axis. For example, all elements in the `z` array are equal to their z-coordinate:
#
# [[[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
#
# [[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
# [ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]]
#
# [[ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]
# [ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]]]
#
# So the terms x, y and z in the expression `x * y + z` above are in fact `ndarray`s (we will discuss arithmetic operations on arrays below). The point is that the function `my_function` is only called *once*, instead of once per element. This makes initialization very efficient.
# # Array data
# ## `dtype`
# NumPy's `ndarray`s are also efficient in part because all their elements must have the same type (usually numbers).
# You can check what the data type is by looking at the `dtype` attribute:
c = np.arange(1, 5)
print(c.dtype, c)
c = np.arange(1.0, 5.0)
print(c.dtype, c)
# Instead of letting NumPy guess what data type to use, you can set it explicitly when creating an array by setting the `dtype` parameter:
d = np.arange(1, 5, dtype=np.complex64)
print(d.dtype, d)
# Available data types include `int8`, `int16`, `int32`, `int64`, `uint8`|`16`|`32`|`64`, `float16`|`32`|`64` and `complex64`|`128`. Check out [the documentation](http://docs.scipy.org/doc/numpy-1.10.1/user/basics.types.html) for the full list.
#
# ## `itemsize`
# The `itemsize` attribute returns the size (in bytes) of each item:
e = np.arange(1, 5, dtype=np.complex64)
e.itemsize
# ## `data` buffer
# An array's data is actually stored in memory as a flat (one dimensional) byte buffer. It is available *via* the `data` attribute (you will rarely need it, though).
f = np.array([[1,2],[1000, 2000]], dtype=np.int32)
f.data
# In python 2, `f.data` is a buffer. In python 3, it is a memoryview.
# +
if (hasattr(f.data, "tobytes")):
data_bytes = f.data.tobytes() # python 3
else:
data_bytes = memoryview(f.data).tobytes() # python 2
data_bytes
# -
# Several `ndarrays` can share the same data buffer, meaning that modifying one will also modify the others. We will see an example in a minute.
# # Reshaping an array
# ## In place
# Changing the shape of an `ndarray` is as simple as setting its `shape` attribute. However, the array's size must remain the same.
g = np.arange(24)
print(g)
print("Rank:", g.ndim)
g.shape = (6, 4)
print(g)
print("Rank:", g.ndim)
g.shape = (2, 3, 4)
print(g)
print("Rank:", g.ndim)
# ## `reshape`
# The `reshape` function returns a new `ndarray` object pointing at the *same* data. This means that modifying one array will also modify the other.
g2 = g.reshape(4,6)
print(g2)
print("Rank:", g2.ndim)
# Set item at row 1, col 2 to 999 (more about indexing below).
g2[1, 2] = 999
g2
# The corresponding element in `g` has been modified.
g
# ## `ravel`
# Finally, the `ravel` function returns a new one-dimensional `ndarray` that also points to the same data:
g.ravel()
# # Arithmetic operations
# All the usual arithmetic operators (`+`, `-`, `*`, `/`, `//`, `**`, etc.) can be used with `ndarray`s. They apply *elementwise*:
a = np.array([14, 23, 32, 41])
b = np.array([5, 4, 3, 2])
print("a + b =", a + b)
print("a - b =", a - b)
print("a * b =", a * b)
print("a / b =", a / b)
print("a // b =", a // b)
print("a % b =", a % b)
print("a ** b =", a ** b)
# Note that the multiplication is *not* a matrix multiplication. We will discuss matrix operations below.
#
# The arrays must have the same shape. If they do not, NumPy will apply the *broadcasting rules*.
# # Broadcasting
# In general, when NumPy expects arrays of the same shape but finds that this is not the case, it applies the so-called *broadcasting* rules:
#
# ## First rule
# *If the arrays do not have the same rank, then a 1 will be prepended to the smaller ranking arrays until their ranks match.*
h = np.arange(5).reshape(1, 1, 5)
h
# Now let's try to add a 1D array of shape `(5,)` to this 3D array of shape `(1,1,5)`. Applying the first rule of broadcasting!
h + [10, 20, 30, 40, 50] # same as: h + [[[10, 20, 30, 40, 50]]]
# ## Second rule
# *Arrays with a 1 along a particular dimension act as if they had the size of the array with the largest shape along that dimension. The value of the array element is repeated along that dimension.*
k = np.arange(6).reshape(2, 3)
k
# Let's try to add a 2D array of shape `(2,1)` to this 2D `ndarray` of shape `(2, 3)`. NumPy will apply the second rule of broadcasting:
k + [[100], [200]] # same as: k + [[100, 100, 100], [200, 200, 200]]
# Combining rules 1 & 2, we can do this:
k + [100, 200, 300] # after rule 1: [[100, 200, 300]], and after rule 2: [[100, 200, 300], [100, 200, 300]]
# And also, very simply:
k + 1000 # same as: k + [[1000, 1000, 1000], [1000, 1000, 1000]]
# ## Third rule
# *After rules 1 & 2, the sizes of all arrays must match.*
try:
k + [33, 44]
except ValueError as e:
print(e)
# Broadcasting rules are used in many NumPy operations, not just arithmetic operations, as we will see below.
# For more details about broadcasting, check out [the documentation](https://docs.scipy.org/doc/numpy-dev/user/basics.broadcasting.html).
# ## Upcasting
# When trying to combine arrays with different `dtype`s, NumPy will *upcast* to a type capable of handling all possible values (regardless of what the *actual* values are).
k1 = np.arange(0, 5, dtype=np.uint8)
print(k1.dtype, k1)
k2 = k1 + np.array([5, 6, 7, 8, 9], dtype=np.int8)
print(k2.dtype, k2)
# Note that `int16` is required to represent all *possible* `int8` and `uint8` values (from -128 to 255), even though in this case a uint8 would have sufficed.
k3 = k1 + 1.5
print(k3.dtype, k3)
# # Conditional operators
# The conditional operators also apply elementwise:
m = np.array([20, -5, 30, 40])
m < [15, 16, 35, 36]
# And using broadcasting:
m < 25 # equivalent to m < [25, 25, 25, 25]
# This is most useful in conjunction with boolean indexing (discussed below).
m[m < 25]
# # Mathematical and statistical functions
# Many mathematical and statistical functions are available for `ndarray`s.
#
# ## `ndarray` methods
# Some functions are simply `ndarray` methods, for example:
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
print(a)
print("mean =", a.mean())
# Note that this computes the mean of all elements in the `ndarray`, regardless of its shape.
#
# Here are a few more useful `ndarray` methods:
for func in (a.min, a.max, a.sum, a.prod, a.std, a.var):
print(func.__name__, "=", func())
# These functions accept an optional argument `axis` which lets you ask for the operation to be performed on elements along the given axis. For example:
c=np.arange(24).reshape(2,3,4)
c
c.sum(axis=0) # sum across matrices
c.sum(axis=1) # sum across rows
# You can also sum over multiple axes:
c.sum(axis=(0,2)) # sum across matrices and columns
0+1+2+3 + 12+13+14+15, 4+5+6+7 + 16+17+18+19, 8+9+10+11 + 20+21+22+23
# ## Universal functions
# NumPy also provides fast elementwise functions called *universal functions*, or **ufunc**. They are vectorized wrappers of simple functions. For example `square` returns a new `ndarray` which is a copy of the original `ndarray` except that each element is squared:
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
np.square(a)
# Here are a few more useful unary ufuncs:
print("Original ndarray")
print(a)
for func in (np.abs, np.sqrt, np.exp, np.log, np.sign, np.ceil, np.modf, np.isnan, np.cos):
print("\n", func.__name__)
print(func(a))
# ## Binary ufuncs
# There are also many binary ufuncs, that apply elementwise on two `ndarray`s. Broadcasting rules are applied if the arrays do not have the same shape:
a = np.array([1, -2, 3, 4])
b = np.array([2, 8, -1, 7])
np.add(a, b) # equivalent to a + b
np.greater(a, b) # equivalent to a > b
np.maximum(a, b)
np.copysign(a, b)
# # Array indexing
# ## One-dimensional arrays
# One-dimensional NumPy arrays can be accessed more or less like regular python arrays:
a = np.array([1, 5, 3, 19, 13, 7, 3])
a[3]
a[2:5]
a[2:-1]
a[:2]
a[2::2]
a[::-1]
# Of course, you can modify elements:
a[3]=999
a
# You can also modify an `ndarray` slice:
a[2:5] = [997, 998, 999]
a
# ## Differences with regular python arrays
# Contrary to regular python arrays, if you assign a single value to an `ndarray` slice, it is copied across the whole slice, thanks to broadcasting rules discussed above.
a[2:5] = -1
a
# Also, you cannot grow or shrink `ndarray`s this way:
try:
a[2:5] = [1,2,3,4,5,6] # too long
except ValueError as e:
print(e)
# You cannot delete elements either:
try:
del a[2:5]
except ValueError as e:
print(e)
# Last but not least, `ndarray` **slices are actually *views*** on the same data buffer. This means that if you create a slice and modify it, you are actually going to modify the original `ndarray` as well!
a_slice = a[2:6]
a_slice[1] = 1000
a # the original array was modified!
a[3] = 2000
a_slice # similarly, modifying the original array modifies the slice!
# If you want a copy of the data, you need to use the `copy` method:
another_slice = a[2:6].copy()
another_slice[1] = 3000
a # the original array is untouched
a[3] = 4000
another_slice # similary, modifying the original array does not affect the slice copy
# ## Multi-dimensional arrays
# Multi-dimensional arrays can be accessed in a similar way by providing an index or slice for each axis, separated by commas:
b = np.arange(48).reshape(4, 12)
b
b[1, 2] # row 1, col 2
b[1, :] # row 1, all columns
b[:, 1] # all rows, column 1
# **Caution**: note the subtle difference between these two expressions:
b[1, :]
b[1:2, :]
# The first expression returns row 1 as a 1D array of shape `(12,)`, while the second returns that same row as a 2D array of shape `(1, 12)`.
# ## Fancy indexing
# You may also specify a list of indices that you are interested in. This is referred to as *fancy indexing*.
b[(0,2), 2:5] # rows 0 and 2, columns 2 to 4 (5-1)
b[:, (-1, 2, -1)] # all rows, columns -1 (last), 2 and -1 (again, and in this order)
# If you provide multiple index arrays, you get a 1D `ndarray` containing the values of the elements at the specified coordinates.
b[(-1, 2, -1, 2), (5, 9, 1, 9)] # returns a 1D array with b[-1, 5], b[2, 9], b[-1, 1] and b[2, 9] (again)
# ## Higher dimensions
# Everything works just as well with higher dimensional arrays, but it's useful to look at a few examples:
c = b.reshape(4,2,6)
c
c[2, 1, 4] # matrix 2, row 1, col 4
c[2, :, 3] # matrix 2, all rows, col 3
# If you omit coordinates for some axes, then all elements in these axes are returned:
c[2, 1] # Return matrix 2, row 1, all columns. This is equivalent to c[2, 1, :]
# ## Ellipsis (`...`)
# You may also write an ellipsis (`...`) to ask that all non-specified axes be entirely included.
c[2, ...] # matrix 2, all rows, all columns. This is equivalent to c[2, :, :]
c[2, 1, ...] # matrix 2, row 1, all columns. This is equivalent to c[2, 1, :]
c[2, ..., 3] # matrix 2, all rows, column 3. This is equivalent to c[2, :, 3]
c[..., 3] # all matrices, all rows, column 3. This is equivalent to c[:, :, 3]
# ## Boolean indexing
# You can also provide an `ndarray` of boolean values on one axis to specify the indices that you want to access.
b = np.arange(48).reshape(4, 12)
b
rows_on = np.array([True, False, True, False])
b[rows_on, :] # Rows 0 and 2, all columns. Equivalent to b[(0, 2), :]
cols_on = np.array([False, True, False] * 4)
b[:, cols_on] # All rows, columns 1, 4, 7 and 10
# ## `np.ix_`
# You cannot use boolean indexing this way on multiple axes, but you can work around this by using the `ix_` function:
b[np.ix_(rows_on, cols_on)]
np.ix_(rows_on, cols_on)
# If you use a boolean array that has the same shape as the `ndarray`, then you get in return a 1D array containing all the values that have `True` at their coordinate. This is generally used along with conditional operators:
b[b % 3 == 1]
# # Iterating
# Iterating over `ndarray`s is very similar to iterating over regular python arrays. Note that iterating over multidimensional arrays is done with respect to the first axis.
c = np.arange(24).reshape(2, 3, 4) # A 3D array (composed of two 3x4 matrices)
c
for m in c:
print("Item:")
print(m)
for i in range(len(c)): # Note that len(c) == c.shape[0]
print("Item:")
print(c[i])
# If you want to iterate on *all* elements in the `ndarray`, simply iterate over the `flat` attribute:
for i in c.flat:
print("Item:", i)
# # Stacking arrays
# It is often useful to stack together different arrays. NumPy offers several functions to do just that. Let's start by creating a few arrays.
q1 = np.full((3,4), 1.0)
q1
q2 = np.full((4,4), 2.0)
q2
q3 = np.full((3,4), 3.0)
q3
# ## `vstack`
# Now let's stack them vertically using `vstack`:
q4 = np.vstack((q1, q2, q3))
q4
q4.shape
# This was possible because q1, q2 and q3 all have the same shape (except for the vertical axis, but that's ok since we are stacking on that axis).
#
# ## `hstack`
# We can also stack arrays horizontally using `hstack`:
q5 = np.hstack((q1, q3))
q5
q5.shape
# This is possible because q1 and q3 both have 3 rows. But since q2 has 4 rows, it cannot be stacked horizontally with q1 and q3:
try:
q5 = np.hstack((q1, q2, q3))
except ValueError as e:
print(e)
# ## `concatenate`
# The `concatenate` function stacks arrays along any given existing axis.
q7 = np.concatenate((q1, q2, q3), axis=0) # Equivalent to vstack
q7
q7.shape
# As you might guess, `hstack` is equivalent to calling `concatenate` with `axis=1`.
# ## `stack`
# The `stack` function stacks arrays along a new axis. All arrays have to have the same shape.
q8 = np.stack((q1, q3))
q8
q8.shape
# # Splitting arrays
# Splitting is the opposite of stacking. For example, let's use the `vsplit` function to split a matrix vertically.
#
# First let's create a 6x4 matrix:
r = np.arange(24).reshape(6,4)
r
# Now let's split it in three equal parts, vertically:
r1, r2, r3 = np.vsplit(r, 3)
r1
r2
r3
# There is also a `split` function which splits an array along any given axis. Calling `vsplit` is equivalent to calling `split` with `axis=0`. There is also an `hsplit` function, equivalent to calling `split` with `axis=1`:
r4, r5 = np.hsplit(r, 2)
r4
r5
# # Transposing arrays
# The `transpose` method creates a new view on an `ndarray`'s data, with axes permuted in the given order.
#
# For example, let's create a 3D array:
t = np.arange(24).reshape(4,2,3)
t
# Now let's create an `ndarray` such that the axes `0, 1, 2` (depth, height, width) are re-ordered to `1, 2, 0` (depth→width, height→depth, width→height):
t1 = t.transpose((1,2,0))
t1
t1.shape
# By default, `transpose` reverses the order of the dimensions:
t2 = t.transpose() # equivalent to t.transpose((2, 1, 0))
t2
t2.shape
# NumPy provides a convenience function `swapaxes` to swap two axes. For example, let's create a new view of `t` with depth and height swapped:
t3 = t.swapaxes(0,1) # equivalent to t.transpose((1, 0, 2))
t3
t3.shape
# # Linear algebra
# NumPy 2D arrays can be used to represent matrices efficiently in python. We will just quickly go through some of the main matrix operations available. For more details about Linear Algebra, vectors and matrics, go through the [Linear Algebra tutorial](math_linear_algebra.ipynb).
#
# ## Matrix transpose
# The `T` attribute is equivalent to calling `transpose()` when the rank is ≥2:
m1 = np.arange(10).reshape(2,5)
m1
m1.T
# The `T` attribute has no effect on rank 0 (empty) or rank 1 arrays:
m2 = np.arange(5)
m2
m2.T
# We can get the desired transposition by first reshaping the 1D array to a single-row matrix (2D):
m2r = m2.reshape(1,5)
m2r
m2r.T
# ## Matrix dot product
# Let's create two matrices and execute a matrix [dot product](https://en.wikipedia.org/wiki/Dot_product) using the `dot` method.
n1 = np.arange(10).reshape(2, 5)
n1
n2 = np.arange(15).reshape(5,3)
n2
n1.dot(n2)
# **Caution**: as mentionned previously, `n1*n2` is *not* a dot product, it is an elementwise product.
# ## Matrix inverse and pseudo-inverse
# Many of the linear algebra functions are available in the `numpy.linalg` module, in particular the `inv` function to compute a square matrix's inverse:
# +
import numpy.linalg as linalg
m3 = np.array([[1,2,3],[5,7,11],[21,29,31]])
m3
# -
linalg.inv(m3)
# You can also compute the [pseudoinverse](https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse) using `pinv`:
linalg.pinv(m3)
# ## Identity matrix
# The product of a matrix by its inverse returns the identiy matrix (with small floating point errors):
m3.dot(linalg.inv(m3))
# You can create an identity matrix of size NxN by calling `eye`:
np.eye(3)
# ## QR decomposition
# The `qr` function computes the [QR decomposition](https://en.wikipedia.org/wiki/QR_decomposition) of a matrix:
q, r = linalg.qr(m3)
q
r
q.dot(r) # q.r equals m3
# ## Determinant
# The `det` function computes the [matrix determinant](https://en.wikipedia.org/wiki/Determinant):
linalg.det(m3) # Computes the matrix determinant
# ## Eigenvalues and eigenvectors
# The `eig` function computes the [eigenvalues and eigenvectors](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors) of a square matrix:
eigenvalues, eigenvectors = linalg.eig(m3)
eigenvalues # λ
eigenvectors # v
m3.dot(eigenvectors) - eigenvalues * eigenvectors # m3.v - λ*v = 0
# ## Singular Value Decomposition
# The `svd` function takes a matrix and returns its [singular value decomposition](https://en.wikipedia.org/wiki/Singular_value_decomposition):
m4 = np.array([[1,0,0,0,2], [0,0,3,0,0], [0,0,0,0,0], [0,2,0,0,0]])
m4
U, S_diag, V = linalg.svd(m4)
U
S_diag
# The `svd` function just returns the values in the diagonal of Σ, but we want the full Σ matrix, so let's create it:
S = np.zeros((4, 5))
S[np.diag_indices(4)] = S_diag
S # Σ
V
U.dot(S).dot(V) # U.Σ.V == m4
# ## Diagonal and trace
np.diag(m3) # the values in the diagonal of m3 (top left to bottom right)
np.trace(m3) # equivalent to np.diag(m3).sum()
# ## Solving a system of linear scalar equations
# The `solve` function solves a system of linear scalar equations, such as:
#
# * $2x + 6y = 6$
# * $5x + 3y = -9$
coeffs = np.array([[2, 6], [5, 3]])
depvars = np.array([6, -9])
solution = linalg.solve(coeffs, depvars)
solution
# Let's check the solution:
coeffs.dot(solution), depvars # yep, it's the same
# Looks good! Another way to check the solution:
np.allclose(coeffs.dot(solution), depvars)
# # Vectorization
# Instead of executing operations on individual array items, one at a time, your code is much more efficient if you try to stick to array operations. This is called *vectorization*. This way, you can benefit from NumPy's many optimizations.
#
# For example, let's say we want to generate a 768x1024 array based on the formula $sin(xy/40.5)$. A **bad** option would be to do the math in python using nested loops:
import math
data = np.empty((768, 1024))
for y in range(768):
for x in range(1024):
data[y, x] = math.sin(x*y/40.5) # BAD! Very inefficient.
# Sure, this works, but it's terribly inefficient since the loops are taking place in pure python. Let's vectorize this algorithm. First, we will use NumPy's `meshgrid` function which generates coordinate matrices from coordinate vectors.
x_coords = np.arange(0, 1024) # [0, 1, 2, ..., 1023]
y_coords = np.arange(0, 768) # [0, 1, 2, ..., 767]
X, Y = np.meshgrid(x_coords, y_coords)
X
Y
# As you can see, both `X` and `Y` are 768x1024 arrays, and all values in `X` correspond to the horizontal coordinate, while all values in `Y` correspond to the the vertical coordinate.
#
# Now we can simply compute the result using array operations:
data = np.sin(X*Y/40.5)
# Now we can plot this data using matplotlib's `imshow` function (see the [matplotlib tutorial](tools_matplotlib.ipynb)).
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig = plt.figure(1, figsize=(7, 6))
plt.imshow(data, cmap=cm.hot, interpolation="bicubic")
plt.show()
# # Saving and loading
# NumPy makes it easy to save and load `ndarray`s in binary or text format.
#
# ## Binary `.npy` format
# Let's create a random array and save it.
a = np.random.rand(2,3)
a
np.save("my_array", a)
# Done! Since the file name contains no file extension was provided, NumPy automatically added `.npy`. Let's take a peek at the file content:
# +
with open("my_array.npy", "rb") as f:
content = f.read()
content
# -
# To load this file into a NumPy array, simply call `load`:
a_loaded = np.load("my_array.npy")
a_loaded
# ## Text format
# Let's try saving the array in text format:
np.savetxt("my_array.csv", a)
# Now let's look at the file content:
with open("my_array.csv", "rt") as f:
print(f.read())
# This is a CSV file with tabs as delimiters. You can set a different delimiter:
np.savetxt("my_array.csv", a, delimiter=",")
# To load this file, just use `loadtxt`:
a_loaded = np.loadtxt("my_array.csv", delimiter=",")
a_loaded
# ## Zipped `.npz` format
# It is also possible to save multiple arrays in one zipped file:
b = np.arange(24, dtype=np.uint8).reshape(2, 3, 4)
b
np.savez("my_arrays", my_a=a, my_b=b)
# Again, let's take a peek at the file content. Note that the `.npz` file extension was automatically added.
# +
with open("my_arrays.npz", "rb") as f:
content = f.read()
repr(content)[:180] + "[...]"
# -
# You then load this file like so:
my_arrays = np.load("my_arrays.npz")
my_arrays
# This is a dict-like object which loads the arrays lazily:
my_arrays.keys()
my_arrays["my_a"]
# # What next?
# Now you know all the fundamentals of NumPy, but there are many more options available. The best way to learn more is to experiment with NumPy, and go through the excellent [reference documentation](http://docs.scipy.org/doc/numpy/reference/index.html) to find more functions and features you may be interested in.
| tools_numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="-W_y34U0GGDB"
# ## Connect to Google Drive
#
# + colab={"base_uri": "https://localhost:8080/", "height": 128} colab_type="code" id="vCi2UJ3qFoiR" outputId="16b6fc4f-f820-476b-9770-7168ecc8c9b5"
from google.colab import drive
drive.mount("/content/drive", force_remount=True)
# + colab={} colab_type="code" id="e24YW6BYF8QI"
import os
os.chdir("drive/My Drive/Nanibot_ZaloAIChallenge2019_VietnameseWikiQA/QASystem/")
# + colab={} colab_type="code" id="in2NsBJZGCfk"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# + [markdown] colab_type="text" id="IpQXRJGyGbMN"
# ## Hyperparameters
# + colab={} colab_type="code" id="r4XAXPDwGdLb"
MAX_SEQ_LEN = 200
LEARNING_RATE = 2e-5
BATCH_SIZE = 16
EPOCHS = 2
DROPOUT_RATE = 0.1
WARMUP_PROPORTION = 0.1
BERT_MODEL_PATH = "./model/multi_cased/"
DATASET_PATH = "./dataset/"
MODEL_PATH = "./finetuned/classifier/"
# -
# ## Train/Evaluation/Predict
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="upDNpwYGG2-p" outputId="47ce52e1-39b4-4966-e23f-30e7b3ba76ce"
# !python run_zalo.py \
# --mode train \
# --dataset_path $DATASET_PATH \
# --bert_model_path $BERT_MODEL_PATH \
# --model_path $MODEL_PATH \
# --max_sequence_len $MAX_SEQ_LEN \
# --model_learning_rate $LEARNING_RATE \
# --model_batch_size $BATCH_SIZE \
# --train_epochs $EPOCHS \
# --train_dropout_rate $DROPOUT_RATE \
# --bert_warmup_proportion $WARMUP_PROPORTION \
# --force_data_balance True
# + colab={} colab_type="code" id="7q-PEUTpHxzY"
# !python run_zalo.py \
# --mode predict_test \
# --dataset_path $DATASET_PATH \
# --bert_model_path $BERT_MODEL_PATH \
# --model_path $MODEL_PATH \
# --max_sequence_len $MAX_SEQ_LEN \
# + colab={} colab_type="code" id="6I8HzI8SN7Y1"
# !python run_zalo.py \
# --mode eval \
# --dataset_path $DATASET_PATH \
# --bert_model_path $BERT_MODEL_PATH \
# --model_path $MODEL_PATH \
# --max_sequence_len $MAX_SEQ_LEN \
| QASystem/sample_run.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .download-project
# language: python
# name: .download-project
# ---
# +
import numpy as np
import pandas as pa
import pickle
import librosa.display
from src.utils import get_framed_label, train_test_split, from_boolean_array_to_intervals, get_annotated_intervals
from src.data import load_annotation
from src.data import load_radar, load_water_distance, load_weight_sensor, load_audio
from src import make_dataset
from matplotlib import pyplot as plt
# +
config = {
'USE_IDS': [],
'DATAFRAME_PATH': "C:/Users/Jiajun/Desktop/download-project/data/raw/data_frames",
'ANNOTATION_PATH': "C:/Users/Jiajun/Desktop/download-project/data/processed/Annotation.csv",
'WINDOW_SECONDS': 3,
'HOP_SECONDS': 1,
'CATEGORY': 'Defecation',
}
complete_ids = load_annotation.get_complete_ids(
annotation_filename = config['ANNOTATION_PATH'],
category = config['CATEGORY']
)
annotations = load_annotation.get_annotation(config['ANNOTATION_PATH'])
# +
selected_ids = complete_ids[(complete_ids < 1900) & (complete_ids > 1000)]
TRAIN_IDS, TEST_IDS = train_test_split(selected_ids)
#TRAIN_IDS = [987, 960, 954, 964, 968, 979, 976, 993, 953, 982, 984, 995, 985, 958]
#TEST_IDS = [989, 970, 971, 986, 978, 992]
print(f"Category: {config['CATEGORY']}")
print(f"Training {len(TRAIN_IDS)} use_ids: {TRAIN_IDS[:5]}...")
print(f"Testing {len(TEST_IDS)} use_ids: {TEST_IDS[:5]}...")
# +
train_config = config.copy()
test_config = config.copy()
train_config['USE_IDS'] = TRAIN_IDS
test_config['USE_IDS'] = TEST_IDS
dataset = {}
dataset['train'] = make_dataset.Seq2SeqDatasetDefecate(train_config)
dataset['test'] = make_dataset.Seq2SeqDatasetDefecate(test_config)
# -
train_x, train_y = [], []
for i in range(len(dataset['train'])):
x, y = dataset['train'][i]
train_x.append(x.numpy())
train_y.append(y.numpy())
train_x = np.concatenate(train_x)
train_y = np.concatenate(train_y)
# +
test_x, test_y = [], []
for i in range(len(dataset['test'])):
x, y = dataset['test'][i]
test_x.append(x.numpy())
test_y.append(y.numpy())
test_x = np.concatenate(test_x)
test_y = np.concatenate(test_y)
# -
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
rf = RandomForestClassifier(n_estimators=5)
rf.fit(train_x, train_y)
print (classification_report(
y_true=test_y,
y_pred=np.array(rf.predict_proba(test_x)[:, 1] > 0.3, dtype=int))
)
print (classification_report(
y_true=test_y,
y_pred=np.array(rf.predict_proba(test_x)[:, 1] > 0.4, dtype=int))
)
# # Model
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from src.seq2seq.train import train_test_split, train, test
from src.seq2seq.model import LSTMClassifier
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
# +
# model
NUM_EPOCHS = 5
model = LSTMClassifier(input_dim = 60, hidden_dim = 64, output_dim = 2, num_layers = 2)
optimizer = optim.SGD(model.parameters(), lr = 0.3)
criterion = nn.CrossEntropyLoss()
# -
# training
for epoch in range(NUM_EPOCHS):
print ("Epoch : {}".format(epoch))
train(
dataset = dataset['train'],
batch_size = 5,
model = model,
optimizer = optimizer,
criterion = criterion
)
test(
dataset = dataset['test'],
model = model,
criterion = criterion
)
# +
# eval
THRESHOLD = 0.3
predictions = np.array([])
labels = np.array([])
for use_i in TEST_IDS:
eval_config = config.copy()
eval_config['USE_IDS'] = [use_i]
feature, label = make_dataset.Seq2SeqDatasetDefecate(eval_config)[0]
shape = feature.shape
ypred = model(feature.view(shape[0], 1, shape[1])).squeeze(dim=0)
m = nn.Softmax(dim=1)
prediction = m(ypred)[:, 1].long()
predictions = np.concatenate([predictions, prediction.numpy()])
labels = np.concatenate([labels, label.numpy()])
# -
from sklearn.metrics import classification_report
print (classification_report(
y_true=labels,
y_pred=predictions))
predictions
| notebooks/0128-defecate-radar-seq2seq.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pynlp
# language: python
# name: pynlp
# ---
# + [markdown] id="YuZSyQVvl_d-"
# # SpaCy Lab
# Written in Python, SpaCy performs advanced NLP tasks
# + id="i48CEEw6l_eA" executionInfo={"status": "ok", "timestamp": 1603863804546, "user_tz": 420, "elapsed": 9621, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggu3bTBcysUxxtmnLI9n_OEGc92nfpZXGRAhDZ5vQ=s64", "userId": "12984101785953050215"}} outputId="f0831a64-2739-4915-e498-17b53dfc5531" colab={"base_uri": "https://localhost:8080/", "height": 1000}
try:
# %tensorflow_version 2.x
is_colab = True
# !pip install spacy
# !python -m spacy download en_core_web_md
# !pip install spacy-transformers
except:
is_colab = False
print(f'\033[00mUsing Google CoLab = \033[93m{COLAB}')
if (COLAB): print("Dependencies installed")
# + [markdown] id="vAx9PeMQl_eK"
# # Spacy: Getting started
#
# As discussed in the lecture portion, Python has two main libraries to help with NLP tasks:
#
# * [NLTK](https://www.nltk.org/)
# * [Spacy](https://spacy.io/)
#
# SpaCy launched in 2015 and has rapidly become an industry standard, and is a focus of our training. SpaCy provides an industrial grade project that is both open-source and contains community driven integrations (see SpaCy Universe).
#
# SpaCy requires you to download language resources (such as models). For the english language, you can use `python -m spacy download en_core_web_sm`. The suffix `_sm` indicates "small" model, while `_md` and `_lg` indicate medium and large, respectively and provide more advanced features (we won't need in this tutorial).
#
# + id="NsNmfdbWmkxZ" executionInfo={"status": "ok", "timestamp": 1603861712457, "user_tz": 420, "elapsed": 8233, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggu3bTBcysUxxtmnLI9n_OEGc92nfpZXGRAhDZ5vQ=s64", "userId": "12984101785953050215"}} outputId="7ad799c2-7888-4f57-bcbc-c6d712dfd8d7" colab={"base_uri": "https://localhost:8080/", "height": 674}
# !python -m spacy download en_core_web_sm
# + id="Nv3udxHCnV9i" executionInfo={"status": "ok", "timestamp": 1603862624643, "user_tz": 420, "elapsed": 3421, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggu3bTBcysUxxtmnLI9n_OEGc92nfpZXGRAhDZ5vQ=s64", "userId": "12984101785953050215"}} outputId="232eb764-3da7-47cd-ad3c-35132e013bff" colab={"base_uri": "https://localhost:8080/", "height": 270}
# !pip install urllib3==1.25.10
# + id="-SYMMX6bnYtY"
# !pip show urllib3 | grep version
# + id="dDn901chl_eL"
import spacy
nlp = spacy.load('en_core_web_sm')
# Use if needed:
#spacy.util.get_data_path()
# + [markdown] id="cQjRh0yil_eS"
# # Tokenization
#
# For each word in that sentence _spaCy_ generates a [token](https://spacy.io/api/token) for each word in the sentence. The token fields show the raw text, the root of the word (lemma), the Part of Speech (POS), whether or not its a stop word, and many other things.
# + id="ZbUht2qXl_eU" executionInfo={"status": "ok", "timestamp": 1603863732252, "user_tz": 420, "elapsed": 487, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggu3bTBcysUxxtmnLI9n_OEGc92nfpZXGRAhDZ5vQ=s64", "userId": "12984101785953050215"}} outputId="cfddc2d6-35a3-4622-c46b-835c0f3b6eb7" colab={"base_uri": "https://localhost:8080/", "height": 108}
import spacy
text = "this is a beautiful day"
doc = nlp(text)
for token in doc:
print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
token.shape_, token.is_alpha, token.is_stop)
# + [markdown] id="6Y5IJx92l_eZ"
# # Numeric representation
#
# Let's print the last token and see its _numeric_ representation:
# + id="zgFHH4fpl_ea" executionInfo={"status": "ok", "timestamp": 1603863734944, "user_tz": 420, "elapsed": 448, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggu3bTBcysUxxtmnLI9n_OEGc92nfpZXGRAhDZ5vQ=s64", "userId": "12984101785953050215"}} outputId="bc86efec-4305-43ee-acad-64baf421b3fe" colab={"base_uri": "https://localhost:8080/", "height": 399}
print(f'The token is from the raw text: \033[92m{token.text}\033[0m\nNumeric representation:\n')
print(token.vector)
print(f'\nThe length of the vector is {token.vector.shape}') # 96 length vector
# + [markdown] id="tNIc9BOml_ed"
# # Display
#
# Note: Run the following as `display.serve` outside of Jupyter
# + id="Rdoj3URjl_ee" executionInfo={"status": "ok", "timestamp": 1603863736627, "user_tz": 420, "elapsed": 526, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggu3bTBcysUxxtmnLI9n_OEGc92nfpZXGRAhDZ5vQ=s64", "userId": "12984101785953050215"}} outputId="a603bd1a-bf57-43a7-92ba-25620b0dc8ec" colab={"base_uri": "https://localhost:8080/", "height": 475}
from spacy import displacy
displacy.render(doc, style="dep", jupyter=True)
displacy.render(doc, style="ent")
# day is shown as a recognized "DATE"
# + [markdown] id="RWi7haAJl_ej"
# ### Exercise:
#
# Explore different parts of speech & sentence structures.
# * Show PERSON
# * Show location
#
# Some examples:
# * "They met at a cafe in London last year"
# * "Peter went to see his uncle in Brooklyn"
# * "The chicken crossed the road because it was hungry"
# * "The chicken crossed the road because it was narrow"
# + [markdown] id="ZZsB9LKBl_ek"
# # Similarity of two sentences
#
# Let's do the same as above, but mix with two similar sentences
# + id="qwismScsl_em"
sentence_list = ["this is a beautiful day", "today is bright and sunny"]
# + id="9RCD3AlPl_es"
doc_list = list(map(nlp, sentence_list))
# + id="3_dh1_Xil_ey" executionInfo={"status": "ok", "timestamp": 1603863740690, "user_tz": 420, "elapsed": 559, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggu3bTBcysUxxtmnLI9n_OEGc92nfpZXGRAhDZ5vQ=s64", "userId": "12984101785953050215"}} outputId="135125c0-9ad9-406b-d09d-d7e311f09f50" colab={"base_uri": "https://localhost:8080/", "height": 435}
## Python program to understand the usage of tabulate function for printing tables in a tabular format
from tabulate import tabulate
import pandas as pd
column_names = ['text', 'lemma', 'pos', 'tag', 'dep', 'shape', 'is_alpha', 'is_stop']
df = pd.DataFrame(columns = column_names)
for doc in doc_list:
print(f'\n\033[92mPrinting tokens for \033[91m"{doc}"\033[0m')
for token in doc:
token_list = [token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
token.shape_, token.is_alpha, token.is_stop]
token_series = pd.Series(token_list, index = df.columns)
df = df.append(token_series, ignore_index=True)
print(tabulate(df, headers=column_names))
# + [markdown] id="GXlEceRll_e4"
# # Showing similarity between two sentences
#
# 1. "this is a beautiful day"
# 2. "this day is bright and sunny"
#
# Note: If you have loaded the small (sm) dataset, you will get the following warning:
# > UserWarning: [W007] The model you're using has no word vectors loaded, so the result of the Token.similarity method will be based on the tagger, parser and NER, which may not give useful similarity judgements. This may happen if you're using one of the small models, e.g. `en_core_web_sm`, which don't ship with word vectors and only use context-sensitive tensors. You can always add your own word vectors, or use one of the larger models instead if available.
#
# Try:
# * `python -m spacy download en_core_web_md`
# * or: `python -m spacy download en_core_web_lg`
# + id="xbS1Jh-9l_e5"
import warnings
# choose action = 'ignore' to ignore the small dataset warning
warnings.filterwarnings(action = "ignore") # "default"
# + id="couIc9KHl_e-" executionInfo={"status": "ok", "timestamp": 1603863743950, "user_tz": 420, "elapsed": 536, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggu3bTBcysUxxtmnLI9n_OEGc92nfpZXGRAhDZ5vQ=s64", "userId": "12984101785953050215"}} outputId="1c805378-05ec-4b2c-9f49-35bd3737f7ef" colab={"base_uri": "https://localhost:8080/", "height": 35}
doc_list[0].similarity(doc_list[1])
# + id="LUThTsETl_fC" executionInfo={"status": "error", "timestamp": 1603913356218, "user_tz": 420, "elapsed": 706, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggu3bTBcysUxxtmnLI9n_OEGc92nfpZXGRAhDZ5vQ=s64", "userId": "12984101785953050215"}} outputId="051a4229-6ac7-4300-b567-8a987c014d31" colab={"base_uri": "https://localhost:8080/", "height": 171}
nlp_md = spacy.load("en_core_web_md")
# + id="6ToOp6jFl_fG" executionInfo={"status": "error", "timestamp": 1603863745872, "user_tz": 420, "elapsed": 539, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggu3bTBcysUxxtmnLI9n_OEGc92nfpZXGRAhDZ5vQ=s64", "userId": "12984101785953050215"}} outputId="178f54cf-a220-4124-c0ed-2ce0a9b53aeb" colab={"base_uri": "https://localhost:8080/", "height": 207}
# try again
doc_md_list = list(map(nlp_md, sentence_list))
doc_md_list[0].similarity(doc_md_list[1])
# + [markdown] id="t5fzgShQl_fL"
# # Paragraph
#
# How do you deal with multiple sentences?
# + id="wqmoLSqNl_fM" outputId="c81ecfd4-d840-4950-d4ac-f8f58b83bf36"
text = """When we went out for ice-cream last summer, the place was
packed. This year, however, things are eerily different. You can see that
the stores are nearly desserted and roads empty like never before. It's a
reality that we are all getting used to, albeit very slowly and reluctantly.
"""
doc = nlp(text)
for sent in doc.sents:
print(">", sent)
# + [markdown] id="5Hj4_fC4l_fa"
# # Scattertext
#
# The following is nice demonstration of the power of SpaCy with text from the Democratic and Republican conventions over the years. This demo is created by
# derwen.ai using the `scattertext` library.
# + id="J6VGNpWzl_fa" executionInfo={"status": "ok", "timestamp": 1603864636685, "user_tz": 420, "elapsed": 3506, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggu3bTBcysUxxtmnLI9n_OEGc92nfpZXGRAhDZ5vQ=s64", "userId": "12984101785953050215"}} outputId="3c2c343b-fd1b-4c60-b9bb-f51314facfd8" colab={"base_uri": "https://localhost:8080/", "height": 274}
# First, install scattertext
# !pip install scattertext
# + id="HeNkDM0YzO6s"
# ?nlp.create_pipe
# + id="gmOu3OkRl_ff"
import scattertext as st
# By default, the nlp English pipeline comes with `tagger`, `parser`, and `NER`
if "merge_entities" not in nlp.pipe_names:
nlp.add_pipe(nlp.create_pipe("merge_entities"))
if "merge_noun_chunks" not in nlp.pipe_names:
nlp.add_pipe(nlp.create_pipe("merge_noun_chunks"))
convention_df = st.SampleCorpora.ConventionData2012.get_data()
corpus = st.CorpusFromPandas(convention_df,
category_col="party",
text_col="text",
nlp=nlp).build()
# + [markdown] id="eO0PpBGYl_fl"
# Generate interactive visualization once the corpus is ready:
# + id="rAW5yL2Gl_fm"
html = st.produce_scattertext_explorer(
corpus,
category="democrat",
category_name="Democratic",
not_category_name="Republican",
width_in_pixels=1000,
metadata=convention_df["speaker"]
)
# + [markdown] id="wvF2hn3ml_ft"
# Render the visualization:
# + id="_ZLqCy9tl_fu" outputId="6f08dfdb-2a54-4ab7-b9c2-5d8fa0713f15"
from IPython.display import IFrame
from IPython.core.display import display, HTML
import sys
IN_COLAB = "google.colab" in sys.modules
print(IN_COLAB)
# + [markdown] id="MM3jJ0pkl_f0"
# **Use in Google Colab**
# + id="s83lFE99l_f1"
if IN_COLAB:
display(HTML("<style>.container { width:98% !important; }</style>"))
display(HTML(html))
# + [markdown] id="QsLosgmLl_f5"
# **Use in Jupyter**
# + id="BgYaZNjUl_f5" outputId="6293435e-14d5-4f23-953a-bce809c42a5c"
file_name = "foo.html"
with open(file_name, "wb") as f:
f.write(html.encode("utf-8"))
IFrame(src=file_name, width = 1200, height=700)
# + [markdown] id="w6AIHKvNvwUT"
# # The SpaCy universe
#
# That's the end of our intro to SpaCy journey. However, as discussed, SpaCy is an open, collaborative project that has a universe of plugins and datasets that make working with it very helpful for a number of use cases. The following is a sampling of the [SpaCy Universe](https://spacy.io/universe):
# - [Legal: Blackstone](https://spacy.io/universe/project/blackstone)
# - [Biomedical: Kindred](https://spacy.io/universe/project/kindred)
# - [Geographic: mordecai](https://spacy.io/universe/project/mordecai)
# - [Label: Prodigy](https://spacy.io/universe/project/prodigy)
# - [Edge: spacy-raspberry](https://spacy.io/universe/project/spacy-raspberry)
# - [Voice: Rasa NLU](https://spacy.io/universe/project/rasa)
# - [Transformers: spacy-transformers](https://explosion.ai/blog/spacy-pytorch-transformers)
# - [Conference: spaCy IRL 2019](https://irl.spacy.io/2019/)
#
# _Credit: Derwen.ai_
# + id="qs5XELqOvyPY"
# + [markdown] id="VzkFb1HqsDVP"
# ### Exercise: Creating a custom NER system using SpaCy
# Process:
# 1. Start with a good annotated dataset
# 2. Model specific pre-processing
# - list of dict containing lower case
# - Output is a numeric vector - list of tuples (token, POS, IOB, NER)
# + id="nU9IleHNsFEy"
| part1_labs/02_SpaCy.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++14
// language: C++14
// name: xeus-cling-cpp14
// ---
#include "xwidgets/xvideo.hpp"
#include "xwebrtc/xvideo_stream.hpp"
// # Create a video_stream directly from a file
auto video_stream1 = xwebrtc::video_stream_from_file("Big.Buck.Bunny.mp4")
.finalize();
video_stream1
video_stream1
video_stream1.playing = false;
// # Create a video_stream from a video widget
auto video = xw::video_from_file("Big.Buck.Bunny.mp4").finalize();
auto video_stream2 = xwebrtc::video_stream(video);
video_stream2
| notebooks/VideoStream.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="wSIzsO8le_Ha"
# # Twitter Trends Monitor v.1.0
# A simple Python app which connects to the Twitter API and fetches the top 50 trending topics for a list of locations. Optionally, it will send the trends to an Azure Event Hub.
# + [markdown] id="_HLKEWV9opJj"
# ## Install and import libraries
# + colab={"base_uri": "https://localhost:8080/"} id="-rwisz600dwl" outputId="4154c83e-005a-4d8d-d0a7-ef9cc98ed320" gather={"logged": 1613181116252}
# install libraries
# !pip install geocoder
# !pip install tweepy
# !pip install azure-eventhub
# !pip install opencensus-ext-azure
# + id="UkmraQET1c-Z" gather={"logged": 1613297624456}
# import libraries
import tweepy
import os
import json
import sys
import geocoder
import time
import codecs
from azure.eventhub import EventHubProducerClient, EventData
from azure.eventhub.exceptions import EventHubError
import logging
from opencensus.ext.azure.log_exporter import AzureLogHandler
# + [markdown] id="FpJv064Zotr_"
# ## Configure connection strings to Twitter and Azure Event Hub
# + id="_szqB7Kd1hR2" gather={"logged": 1613295792619}
# Twitter
consumer_key = "<KEY>"
consumer_secret = "<KEY>"
access_key = "<KEY>"
access_secret = "<KEY>"
# Azure Event Hub connection string and event hub name
connection_string = "xxxxxxxxxxxxxxxxxxxxxxx"
event_hub_name = "xxxxxxxxxxxxxxxxxxxxxxx"
# + [markdown] id="W73IZ1rtoyX7"
# ## Set variables
# Set the list of locations to look up trends, how frequently the twitter API endpoint will be accessed and for how long will the application execute.
# + id="H8HDGPzA1yya" gather={"logged": 1613298724561}
# Set the list of locations (i.e. cities, countries)
locations = ["Greece", "New York", "Paris"]
# How often will the application fetch the trends
queryIntervalSeconds = 30
# How long will the application execute
runtime = 2*60
# initialize tweepy client
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Main Code
# + id="e1KYjReglIH8" gather={"logged": 1613296870658}
def send_event_data_batch(message):
# sends captured trend JSON files as events to the Azure Event Hub
producer = EventHubProducerClient.from_connection_string(conn_str=connection_string,eventhub_name=event_hub_name)
event_data_batch = producer.create_batch()
event_data_batch.add(EventData(message))
producer.send_batch(event_data_batch)
producer.close()
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1613298404436}
def saveTrendsToJson(output):
# saves a trends response for a location to a json file
print("saving output to json")
with open('trends-output.json', 'a', encoding='utf8') as outfile:
json.dump(output, outfile, ensure_ascii=False)
#outfile.write(test)
outfile.write("\n")
return(None)
# + id="ST9skaDPPrQy" gather={"logged": 1613298714550}
def getTrendingTopics(locations):
# Retrieves trending topics for each location and outputs a JSON file
for i in locations:
try:
g = geocoder.osm(i)
print("Fetcing trending topics for location:", g)
closest_loc = api.trends_closest(g.lat, g.lng)
trendingTopics = api.trends_place(closest_loc[0]['woeid'])
trendingTopicsJson = json.dumps(trendingTopics)
saveTrendsToJson(trendingTopics)
# comment out below line to skip sending the topics list to the event hub
send_event_data_batch(trendingTopicsJson)
print("Successfully processed trending topics for location:", g)
except KeyError:
print("reached keyerror")
continue
except Exception:
print("error:", sys.exc_info()[0])
logger.warning("Unable to find location %s", g)
continue
# + [markdown] id="gNA3jLnwfQAh"
# ## Start fetching trends!
# Execute this cell to initiate the Trends Monitor.
# + id="8jjVJueeYods"
start_time = time.time()
logger = logging.getLogger(__name__)
logger.addHandler(AzureLogHandler(connection_string='InstrumentationKey=d25e2379-7318-47c4-a9ad-178c869e13d8'))
print("Started tracking trending topics for",runtime,"seconds at UTC time",time.strftime("%H:%M:%S", time.localtime()), "and for",len(locations),"locations:",locations)
while True:
if (time.time() - start_time) < runtime:
trendingTopics = getTrendingTopics(locations)
time.sleep(queryIntervalSeconds - ((time.time() - start_time) % queryIntervalSeconds))
else:
t = time.localtime()
current_time = time.strftime("%H:%M:%S", t)
print('Runtime limit of', runtime, ' seconds reached, stopping connection at UTC time.',current_time)
sys.exit()
| twitterTrendMonitor_v_1_0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Bond Distortion Method applied to CdTe vacancies
# ### Table of contents
# * [Generate defects with doped](#generate_defects)
# * [Apply rattle+BDM to defects](#apply_BDM)
# * [Plot energy of final structures obtained with BDM](#plot_BDM_energy)
# * [Analyse defect distortions](#analyse_defect_distortions)
# +
import sys
import os
import BDM
from BDM.BDM import apply_RBDM_defect_dict # module where Bond Distortion and Rattle functions are
# -
import doped
import pymatgen
import ase
from importlib.metadata import version
# check versions
print("Doped version:" , version('doped') )
print("Pymatgen version:" , version('pymatgen') )
print("Ase version:" , version('ase') )
# <a id='generate_defects'></a>
# #### 1. Generate defects with doped
# +
from doped.pycdt.core.defectsmaker import ChargedDefectsStructures
from pymatgen.core.structure import Structure
# Specify perfect (bulk) supercell structure
bulk_supercell = Structure.from_file("./CdTe_bulk_supercell_POSCAR")
# Let's generate the vacancies
def_structs = ChargedDefectsStructures(
bulk_supercell,
cellmax=bulk_supercell.num_sites,
antisites_flag=False,
include_interstitials=False,
)
# -
defect_dictionary = {'vacancies': def_structs.defects['vacancies']}
# ##### Rationale for BDM
# Deep defect relaxations tend to follow the change in electron count, so for the neutral Cd vacancy, any localised distortions are likely to involve two neighbouring Te moving closer/further apart (as we're going from a -2 oxidation state to 0). For the singly-charged vacancy, likely to have just one neighbouring Te moving, etc.
#
# So, the Bond Distortion Method involves distorting the initial bond lengths around the vacancy for a set of trial distortion amounts, performing a rough `vasp_gam` relaxation for each and then comparing the energies, to see if we find any lower energy structures.
# <a id='apply_BDM'></a>
# #### 2. Apply the Bond Distortion Method to your defects
# In order to determine the number of the defect nearest neighbours to distort you can set
# the chemical valences and oxidation states of the atoms in your material:
# Note: (Update in BDM: now the number of extra/missing e- is determined based on the oxidation states)
valences_oxi_states_CdTe = {"valences": {"Cd": 2, "Te": 6},
"oxi_states": {"Cd": +2, "Te": -2}
}
# +
# And input them, along the defect dictionary and INCAR settings to the function:
# apply_RBDM_defect_dict?
# -
#
# ##### Some comments about the default values in the previous function:
# * The deafult `INCAR` settings have been tested, and performed better than others (wider interval of BDM distortions lead to defect ground-state).
#
# * The Bond Distortion Increment (`bdm_increment`) determines the increment used to generate the grid for distorting the defect nearest neighbours. Recommended values: 0.1-0.3
#
# * The user can also input a list of distortions using `bdm_distortions` (e.g. [-0.4, 0.4])
#
# * If the defect is surrounded by chemically different neighbours, the user *can* select the element to distort with `distorted_elements`. This is a dictionary mapping defect name (without charge state) to element symbol (e.g. {'vac_1_Cd': 'Te'} ). If not specified, the nearest neighbours will be selected.
#
# * The standard deviation (`std_dev`) of the rattle function controls the magnitude of the random distortion applied to your structure. For materials with bond distances ~2.5-3.5 A, `std_dev`=2.5 A performed the best. However, for some systems (highly packed structures/smaller bond lengths) it may be too much. In these cases, it may be better to use `std_dev`=0.15 or 0.05 A.
#
# * Further information on the distorted atoms and their initial and final distances can be obtained by setting the optional keyword `verbose` to True.
# Using these distorted structures, we can run `vasp_gam` defect relaxations with these `INCAR` settings:
# `IBRION = 2`, `POTIM = 0.2`, `LREAL = Auto`, `ROPT = 1e-3 1e-3`, `ADDGRID = False`
# to quickly test if these distortions result in a lower energy structure.
# Note: For `ROPT`, set as many `1e-3` as species in your `POTCAR` (see [VASP ROPT](https://www.vasp.at/wiki/index.php/ROPT) )
# Incar settings we'd use
incar_settings = {'ADDGRID': False, 'ALGO': 'Normal', 'EDIFFG': -0.01, 'IBRION': 2, 'ISPIN': 2, 'POTIM': 0.2,
'LVHAR': False, 'LSUBROT': False, 'LREAL': 'Auto', 'ROPT': '1e-3 1e-3', 'LWAVE': False, 'LCHARG': False}
# + jupyter={"outputs_hidden": true}
defect_dictionary
# -
bdm_defect_dict = apply_RBDM_defect_dict(
defect_dictionary,
valences_oxi_states = valences_oxi_states_CdTe,
incar_settings = incar_settings,
bdm_increment = 0.1,
)
# If you prefer a different BDM distortion grid, you can apply it with `bdm_distortions`:
# +
## Can also choose your preferred BDM distortions
bdm_distortions = [-0.4, -0.2, 0.2, 0.4]
bdm_defect_dict = apply_RBDM_defect_dict(
defect_dictionary,
valences_oxi_states = valences_oxi_states_CdTe,
incar_settings = incar_settings,
bdm_distortions = bdm_distortions,
write_files = False,
verbose = True)
# -
# check applied distortions
print(f"Applied distortions: {bdm_defect_dict['vac_1_Cd']['vac_1_Cd_0'].keys()}")
# #### 3. Send to HPCs and run
# Then parse the energies by running the `BDM_parsing_script.sh` from the `defect-finder` folder, in the top-level folder containing your defect folders (e.g. `vac_2_Te_0` etc. (with subfolders: `vac_2_Te_0/BDM/vac_2_Te_0_10.0%_BDM_Distortion/vasp_gam` etc.). This will parse the energies and store them in a `{defect_name}.txt` file (in the `{defect_name}/BDM` folders) to allow easy plotting and analysis.
#
# Also recommended to parse the final structures (CONTCARs) obtained with each BDM distortion to your local computer to ease the structural analysis. Can go to the local folder where BDM input is and use scp/rsync...
# ```bash
# rsync --exclude-from {path_to_BDM_folder}/excluded_files_BDM_rsync.txt remote_machine:{path_to_folder_with_all_defects}/${defect_name} {local_destination}
# ```
# <a id='plot_BDM_energy'></a>
# #### 4. Plot energies vs distortion
# To see if BDM found any deep relaxation, can quicly plot the results with some of the plotting functions from `plot_BDM.py`.
# Easiest to use `plot_all_defects`. Just input a dictionary with the defect names and charges you want to analyse (e.g {"vac_1_Cd":[0,-1,-2]}, "vac_2_Te": [0,1,2]).
# It will determine if an energy-lowering distortion was found, and, if so, will show a plot of relative energy versus BDM distorion.
# +
from BDM.plot_BDM import plot_all_defects
# plot_all_defects?
# +
# Let's do the CdTe vacancies
output_path = os.getcwd()+"/output_V_Cd/" # where the output is
print("Directory where your ouput is? \n", output_path)
print("\nLet's see if BDM found any energy lowering distortion...")
print("Will show a plot for the defects where BDM did find it\n")
plot_all_defects({"vac_1_Cd":[0,-1,-2]
},
output_path,
add_colorbar=False)
# -
# ##### Can also add a colorbar
# More informative if you add a colorbar showing the structural similarity between the final structures.
# For this you need the CONTCAR's obtained with each BDM distortion.
# You can either use:
# * root mean squared displacement (normalized by (Vol / nsites) ** (1/3)) (`rms` = 0) or
# * maximum distance between paired sites (`rms` = 1).
#
# A more detailed description of these functions can be found: [Pymatgen Structure Matcher](https://pymatgen.org/pymatgen.analysis.structure_matcher.html) and look for `get_rms_dist`
# +
plot_all_defects({"vac_1_Cd": [0,-1,-2] },
output_path,
add_colorbar = True, # Set add_colorbar to True
rms = 1, # maximum distance between paired sites
)
print("BDM only found a significant energy lowering distortion for V_{Cd}^0")
# -
# <a id='analyse_defect_distortions'></a>
# #### 5. Analyse the defect distortions found with BDM
# Can quickly analyse the defect distortion with some of the functions from `BDM.analyse_defects`:
output_path = os.getcwd()+"/output_V_Cd/" # where the output is
print("Directory where your ouput is? \n", output_path)
# +
from BDM.analyse_defects import get_structures, analyse_structure, compare_structures
# Grab all structures obtained with BDM and Unperturbed relaxation.
# This gives a dictionary matching BDM distortion to final structure
vac_1_Cd_0 = get_structures("vac_1_Cd_0", output_path)
# Can then analyse a certain BDM final structure with:
analyse_structure("vac_1_Cd_0", vac_1_Cd_0["Unperturbed"], output_path)
analyse_structure("vac_1_Cd_0", vac_1_Cd_0[-0.4], output_path)
# -
# Can also compare the structural similary between all the BDM structures with `compare_structures`. It prints the root mean squared displacement,
# maximum distance between paired sites, and energy (relative to unperturbed structure) of all BDM final structures relative to the Unperturbed
# +
from BDM.analyse_defects import get_energies, get_structures, analyse_structure, compare_structures
output_path = os.getcwd()+"/output_V_Cd/"
vac_1_Cd_0 = get_structures("vac_1_Cd_0", output_path)
defect_energies = get_energies("vac_1_Cd_0", output_path)
structure_comparison = compare_structures(vac_1_Cd_0,
defect_energies ,
compare_to="Unperturbed",
)
# -
| Example_notebook/Defect_Finding_Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''base'': conda)'
# language: python
# name: python37464bitbasecondad572fd0cb7ab471fbfff7fdb64f304d8
# ---
# ## Artists Exploratory Data Analysis
#
# The `artist.csv` file contains artists data fetched from the playlists from `playlist_filtered.csv` data.
#
# This notebook includes only:
# - Removal of collaborators and other non-bolivian artists.
# - Exploration of genres.
#
# The **Complete Exploratory Analysis** can be found in `Capstone-Project.ipynb` notebook in the root folder of this project. Where a full analysis is provided.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
df = pd.read_csv('artists_raw.csv')
df = df.reset_index(drop=True)
df.shape
df.head(10)
# ### Remove duplicates
duplicates = df[df.duplicated('name')].sort_values('name')
duplicates
df = df.drop_duplicates(subset='name', keep='first')
df.shape
# ### Remove non-Bolivian artists
# All artists including substring of other countres in genere.
internationals = df[df.genres.str.contains('argentin|chile|colombia|mexic|peru|uruguay')]
df = df[~df.artist_id.isin(internationals.artist_id)]
print("Internationals", internationals.shape)
internationals[['name', 'genres']]
df.head(5)
# ## Manually excluded
# Elements in the excluded files cointain artists from other countries and duplicates (with non equal) and other irrelevants.
excluded_artists = pd.read_csv('artists_excluded.csv')
excluded_artists.shape
df = df[~df.artist_id.isin(excluded_artists.artist_id)]
df.shape
df.iloc[range(50)]
# ## Exploratory Data Analysis
# Zoomed Popularity vs Followers
followers_limit = 8000
zoomed_data = (df.followers <= followers_limit)
ax = df[zoomed_data].plot.scatter(x='popularity', y='followers', figsize=(16,8))
plt.xticks(range(51), range(51));
# ## Genres
df.genres.value_counts()
from ast import literal_eval
df.genres = df.genres.apply(literal_eval)
no_genre = df[df.genres.apply(len) == 0]
no_genre
# +
assignments = [{
'genre': 'folklore boliviano',
'names': ['Andesur', 'Canto Popular', 'Antares', 'Bolivia', 'Banda Intercontinental Poopó']
}, {
'genre': 'hip hop boliviano',
'names': ['<NAME>', 'Zckrap', '<NAME>']
}, {
'genre': 'latin pop',
'names': ['Bolivia Band']
}, {
'genre': 'reggae en espanol',
'names': ['<NAME>']
}]
df_copy = df.copy()
for assignment in assignments:
genre = assignment['genre']
names = assignment['names']
to_modify = df_copy.name.isin(names)
df_copy[to_modify].genres.apply(lambda genres: genres.append(genre))
df_copy.iloc[:-20]
# -
| data/artists_eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [](https://githubtocolab.com/giswqs/leafmap/blob/master/examples/notebooks/02_using_basemaps.ipynb)
# [](https://gishub.org/leafmap-pangeo)
#
# **Using basemaps in leafmap**
#
# Uncomment the following line to install [leafmap](https://leafmap.org) if needed.
import leafmap
# Create an interactive map.
m = leafmap.Map()
m
# Specify a Google basemap to use, can be one of ["ROADMAP", "TERRAIN", "SATELLITE", "HYBRID"].
m = leafmap.Map(google_map="HYBRID")
m
m = leafmap.Map(google_map="TERRAIN")
m
# Add a basemap using the `add_basemap()` function.
m = leafmap.Map()
m.add_basemap("HYBRID")
m.add_basemap("Esri.NatGeoWorldMap")
m
# Add an XYZ tile layer.
m = leafmap.Map()
m.add_tile_layer(url="https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}", name="Google Satellite", attribution="Google")
m
# Add a WMS tile layer.
m = leafmap.Map()
naip_url = 'https://services.nationalmap.gov/arcgis/services/USGSNAIPImagery/ImageServer/WMSServer?'
m.add_wms_layer(url=naip_url, layers='0', name='NAIP Imagery', format='image/png', shown=True)
m
# Add a legend to the map.
# +
m = leafmap.Map(google_map="HYBRID")
url1 = "https://www.fws.gov/wetlands/arcgis/services/Wetlands/MapServer/WMSServer?"
m.add_wms_layer(url1, layers="1",format='image/png', transparent=True, name="NWI Wetlands Vector")
url2 = "https://www.fws.gov/wetlands/arcgis/services/Wetlands_Raster/ImageServer/WMSServer?"
m.add_wms_layer(url2, layers="0",format='image/png', transparent=True, name="NWI Wetlands Raster")
m.add_legend(builtin_legend="NWI")
m
# -
# Add a layer from [xyzservices](https://github.com/geopandas/xyzservices) provider object
import os
import xyzservices.providers as xyz
basemap = xyz.HEREv3.basicMap
basemap
# Pass the api key to the xyzservices provider object or set it as an environment variable.
basemap['apiKey'] = os.environ["HEREMAPS_API_KEY"]
# Add the xyzservices provider object as a layer to the map.
m = leafmap.Map()
m.add_basemap(basemap)
m
| examples/notebooks/02_using_basemaps.ipynb |